metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
---|---|---|
{
"filename": "tests.py",
"repo_name": "ldolan05/ACID",
"repo_path": "ACID_extracted/ACID-main/tests/tests.py",
"type": "Python"
}
|
from astropy.io import fits
import ACID_code.ACID as acid
import numpy as np
import matplotlib.pyplot as plt
import glob
def test_run_e2ds():
e2ds_files = glob.glob('tests/data/*e2ds_A*.fits')
linelist = 'example/example_linelist.txt'
save_path = 'no save'
velocities = np.arange(-25, 25, 0.82)
# run ACID on e2ds files
ACID_results_e2ds = acid.ACID_HARPS(e2ds_files, linelist, vgrid = velocities, save_path = save_path, order_range = np.arange(41, 43))
def test_run_s1d():
s1d_files = glob.glob('tests/data/*s1d_A*.fits')
linelist = 'example/example_linelist.txt'
save_path = 'no save'
velocities = np.arange(-25, 25, 0.82)
# run ACID on s1d files
ACID_results_s1d = acid.ACID_HARPS(s1d_files, linelist, vgrid = velocities, save_path = save_path, order_range = np.arange(41, 43), file_type = 's1d')
test_run_e2ds()
test_run_s1d()
|
ldolan05REPO_NAMEACIDPATH_START.@ACID_extracted@ACID-main@[email protected]@.PATH_END.py
|
{
"filename": "uldm.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/lenstronomy/LensModel/Profiles/uldm.py",
"type": "Python"
}
|
__author__ = "lucateo"
# this file contains a class to compute the Ultra Light Dark Matter soliton profile
import numpy as np
from scipy.special import gamma, hyp2f1
from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase
__all__ = ["Uldm"]
class Uldm(LensProfileBase):
"""
This class contains functions concerning the ULDM soliton density profile,
whose good approximation is (see for example https://arxiv.org/pdf/1406.6586.pdf )
.. math::
\\rho = \\rho_0 (1 + a(\\theta/\\theta_c)^2)^{-\\beta}
where :math:`\\theta_c` is the core radius, corresponding to the radius where the
density drops by half its central value, :math: `\\beta` is the slope (called just slope
in the parameters of this model), :math: `\\rho_0 = \\kappa_0 \\Sigma_c/D_lens`,
and :math: `a` is a parameter, dependent on :math: `\\beta`, chosen such
that :math: `\\theta_c` indeed corresponds to the radius where the density drops by half
(simple math gives :math: `a = 0.5^{-1/\\beta} - 1` ).
For an ULDM soliton profile without contributions to background potential, it
turns out that :math: `\\beta = 8, a = 0.091`. We allow :math: `\\beta` to be
different from 8 to model solitons which feel the influence of background
potential (see 2105.10873)
The profile has, as parameters:
- kappa_0: central convergence
- theta_c: core radius (in arcseconds)
- slope: exponent entering the profile, default value is 8
"""
_s = 0.000001 # numerical limit for minimal radius
param_names = ["kappa_0", "theta_c", "slope", "center_x", "center_y"]
lower_limit_default = {
"kappa_0": 0,
"theta_c": 0,
"slope": 3.5,
"center_x": -100,
"center_y": -100,
}
upper_limit_default = {
"kappa_0": 1.0,
"theta_c": 100,
"slope": 10,
"center_x": 100,
"center_y": 100,
}
@staticmethod
def rhotilde(kappa_0, theta_c, slope=8):
"""Computes the central density in angular units.
:param kappa_0: central convergence of profile
:param theta_c: core radius (in arcsec)
:param slope: exponent entering the profile
:return: central density in 1/arcsec
"""
a_factor_sqrt = np.sqrt(0.5 ** (-1 / slope) - 1)
num_factor = (
gamma(slope) / gamma(slope - 1 / 2) * a_factor_sqrt / np.sqrt(np.pi)
)
return kappa_0 * num_factor / theta_c
def function(self, x, y, kappa_0, theta_c, center_x=0, center_y=0, slope=8):
"""
:param x: angular position (normally in units of arc seconds)
:param y: angular position (normally in units of arc seconds)
:param kappa_0: central convergence of profile
:param theta_c: core radius (in arcsec)
:param slope: exponent entering the profile
:param center_x: center of halo (in angular units)
:param center_y: center of halo (in angular units)
:return: lensing potential (in arcsec^2)
"""
from mpmath import hyp3f2
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
r = np.maximum(r, self._s)
a_factor_sqrt = np.sqrt((0.5) ** (-1.0 / slope) - 1)
if np.isscalar(r) == True:
hypgeom = float(
kappa_0
/ 2
* r**2
* hyp3f2(1, 1, slope - 0.5, 2, 2, -((a_factor_sqrt * r / theta_c) ** 2))
)
else:
hypgeom = np.array(
[
kappa_0
/ 2.0
* r_i**2.0
* hyp3f2(
1,
1,
slope - 0.5,
2,
2,
-((a_factor_sqrt * r_i / theta_c) ** 2.0),
)
for r_i in r
],
dtype=float,
)
return hypgeom
@staticmethod
def alpha_radial(r, kappa_0, theta_c, slope=8):
"""Returns the radial part of the deflection angle.
:param kappa_0: central convergence of profile
:param theta_c: core radius (in arcsec)
:param slope: exponent entering the profile
:param r: radius where the deflection angle is computed
:return: radial deflection angle
"""
a_factor = 0.5 ** (-1.0 / slope) - 1
prefactor = 2.0 / (2 * slope - 3) * kappa_0 * theta_c**2 / a_factor
denominator_factor = (1 + a_factor * r**2 / theta_c**2) ** (slope - 3.0 / 2)
return prefactor / r * (1 - 1 / denominator_factor)
def derivatives(self, x, y, kappa_0, theta_c, center_x=0, center_y=0, slope=8):
"""Returns df/dx and df/dy of the function (lensing potential), which are the
deflection angles.
:param x: angular position (normally in units of arc seconds)
:param y: angular position (normally in units of arc seconds)
:param kappa_0: central convergence of profile
:param theta_c: core radius (in arcsec)
:param slope: exponent entering the profile
:param center_x: center of halo (in angular units)
:param center_y: center of halo (in angular units)
:return: deflection angle in x, deflection angle in y
"""
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_**2 + y_**2)
R = np.maximum(R, 0.00000001)
f_x = self.alpha_radial(R, kappa_0, theta_c, slope) * x_ / R
f_y = self.alpha_radial(R, kappa_0, theta_c, slope) * y_ / R
return f_x, f_y
def hessian(self, x, y, kappa_0, theta_c, center_x=0, center_y=0, slope=8):
"""
:param x: angular position (normally in units of arc seconds)
:param y: angular position (normally in units of arc seconds)
:param kappa_0: central convergence of profile
:param theta_c: core radius (in arcsec)
:param slope: exponent entering the profile
:param center_x: center of halo (in angular units)
:param center_y: center of halo (in angular units)
:return: Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2
"""
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_**2 + y_**2)
R = np.maximum(R, 0.00000001)
a_factor = 0.5 ** (-1.0 / slope) - 1
prefactor = 2.0 / (2 * slope - 3) * kappa_0 * theta_c**2 / a_factor
# denominator factor
denominator = 1 + a_factor * R**2 / theta_c**2
factor1 = (
(2 * slope - 3)
* a_factor
* denominator ** (1.0 / 2 - slope)
/ (theta_c**2 * R**2)
)
factor2 = 1 / R**4 * (1 - denominator ** (3.0 / 2 - slope))
f_xx = prefactor * (factor1 * x_**2 + factor2 * (y_**2 - x_**2))
f_yy = prefactor * (factor1 * y_**2 + factor2 * (x_**2 - y_**2))
f_xy = prefactor * (factor1 * x_ * y_ - factor2 * 2 * x_ * y_)
return f_xx, f_xy, f_xy, f_yy
def density(self, R, kappa_0, theta_c, slope=8):
"""Three dimensional ULDM profile in angular units (rho0_physical = rho0_angular
Sigma_crit / D_lens)
:param R: radius of interest
:param kappa_0: central convergence of profile
:param theta_c: core radius (in arcsec)
:param slope: exponent entering the profile
:return: rho(R) density in angular units
"""
rhotilde = self.rhotilde(kappa_0, theta_c, slope)
a_factor = 0.5 ** (-1.0 / slope) - 1
return rhotilde / (1 + a_factor * (R / theta_c) ** 2) ** slope
def density_lens(self, r, kappa_0, theta_c, slope=8):
"""Computes the density at 3d radius r given lens model parameterization. The
integral in the LOS projection of this quantity results in the convergence
quantity.
:param r: 3d radius
:param kappa_0: central convergence of profile
:param theta_c: core radius (in arcsec)
:param slope: exponent entering the profile
:return: density rho(r)
"""
return self.density(r, kappa_0, theta_c, slope)
@staticmethod
def kappa_r(R, kappa_0, theta_c, slope=8):
"""Convergence of the cored density profile. This routine is also for testing.
:param R: radius (angular scale)
:param kappa_0: convergence in the core
:param theta_c: core radius
:param slope: exponent entering the profile
:return: convergence at r
"""
a_factor = (0.5) ** (-1.0 / slope) - 1
return kappa_0 * (1 + a_factor * (R / theta_c) ** 2) ** (1.0 / 2 - slope)
def density_2d(self, x, y, kappa_0, theta_c, center_x=0, center_y=0, slope=8):
"""Projected two dimensional ULDM profile (convergence * Sigma_crit), but given
our units convention for rho0, it is basically the convergence.
:param x: x-coordinate
:param y: y-coordinate
:param kappa_0: central convergence of profile
:param theta_c: core radius (in arcsec)
:param slope: exponent entering the profile
:return: Epsilon(R) projected density at radius R
"""
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_**2 + y_**2)
return self.kappa_r(R, kappa_0, theta_c, slope)
def _mass_integral(self, x, slope=8):
"""Returns the analytic result of the integral appearing in mass expression.
:param slope: exponent entering the profile
:return: integral result
"""
hypF = np.real(hyp2f1(3.0 / 2, slope, 5.0 / 2, -(x**2)))
return 1.0 / 3 * x**3 * hypF
def mass_3d(self, R, kappa_0, theta_c, slope=8):
"""Mass enclosed a 3d sphere or radius r.
:param R: radius in arcseconds
:param kappa_0: central convergence of profile
:param theta_c: core radius (in arcsec)
:param slope: exponent entering the profile
:return: mass of soliton in angular units
"""
rhotilde = self.rhotilde(kappa_0, theta_c, slope)
a_factor = 0.5 ** (-1.0 / slope) - 1
prefactor = 4.0 * np.pi * rhotilde * theta_c**3 / (a_factor) ** (1.5)
m_3d = prefactor * (
self._mass_integral(R / theta_c * np.sqrt(a_factor), slope)
- self._mass_integral(0, slope)
)
return m_3d
def mass_3d_lens(self, r, kappa_0, theta_c, slope=8):
"""Mass enclosed a 3d sphere or radius r.
:param r: radius over which the mass is computed
:param kappa_0: central convergence of profile
:param theta_c: core radius (in arcsec)
:param slope: exponent entering the profile
:return: mass enclosed in 3D ball
"""
m_3d = self.mass_3d(r, kappa_0, theta_c, slope)
return m_3d
def mass_2d(self, R, kappa_0, theta_c, slope=8):
"""Mass enclosed a 2d sphere or radius r.
:param R: radius over which the mass is computed
:param kappa_0: central convergence of profile
:param theta_c: core radius (in arcsec)
:param slope: exponent entering the profile
:return: mass enclosed in 2d sphere
"""
m_2d = np.pi * R * self.alpha_radial(R, kappa_0, theta_c, slope)
return m_2d
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@lenstronomy@LensModel@[email protected]@.PATH_END.py
|
{
"filename": "_dtickrange.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/icicle/marker/colorbar/tickformatstop/_dtickrange.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DtickrangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self,
plotly_name="dtickrange",
parent_name="icicle.marker.colorbar.tickformatstop",
**kwargs,
):
super(DtickrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
items=kwargs.pop(
"items",
[
{"editType": "colorbars", "valType": "any"},
{"editType": "colorbars", "valType": "any"},
],
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@icicle@marker@colorbar@tickformatstop@[email protected]_END.py
|
{
"filename": "bulkDataNTtest.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/LGPL/CommonSoftware/bulkDataNT/src/bulkDataNTtest.py",
"type": "Python"
}
|
#! /usr/bin/env python
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) Associated Universities Inc., 2010
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# "@(#) $Id: bulkDataNTtest.py,v 1.1 2012/10/23 09:44:16 bjeram Exp $"
#
# who when what
# -------- -------- ----------------------------------------------
#
from subprocess import Popen, PIPE
import os
import sys
import time
class bulkDataNTtestSuite(object):
def startSenders(self):
flow = 0
numOfHosts = len(sys.argv)
output = []
process = []
cmd = []
print 'aa:' + str(sys.argv)
for i in range(1, numOfHosts):
flowString = format(flow, "02d")
command = ("ssh " + os.environ['USER']+ "@" + sys.argv[i] +
" 'source .bash_profile; export BULKDATA_NT_DEBUG=1; export ACS_LOG_STDOUT=2; bulkDataNTGenSender -l 50 -b 640000 -s TS -f "+
flowString+
" | grep -v lost | tee bulkDataNTGenSender.$HOST'")
print "#",i,' : '+ command
p = Popen(command, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
# print p.pid, p.returncode, p.poll()
if p is not None:
process.append(p)
cmd.append(command)
flow+=1
else:
print 'Problem to execute: ' + command
print 'Going to start sending data'
time.sleep(3)
for i in range(1, numOfHosts):
p = process[i-1]
p.poll()
if p.returncode is None:
p.stdin.write('\n\n')
else:
print "#",i, 'command: '+cmd[i-1]+' exit with an error: ', p.returncode, p.stderr.read()
for i in range(1, numOfHosts):
p = process[i-1]
p.wait()
output.append(p.stdout.read().strip())
if p.returncode is 0:
print "#",i," : ", p.returncode, " : "+output[i-1]
else:
print "#",i, 'command: '+cmd[i-1]+' exit with an error: ', p.returncode, p.stderr.read()
if __name__ == '__main__':
testSuit = bulkDataNTtestSuite()
testSuit.startSenders()
# ___oOo___
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@LGPL@CommonSoftware@bulkDataNT@[email protected]@.PATH_END.py
|
{
"filename": "vimba_c.py",
"repo_name": "alliedvision/VimbaPython",
"repo_path": "VimbaPython_extracted/VimbaPython-master/vimba/c_binding/vimba_c.py",
"type": "Python"
}
|
"""BSD 2-Clause License
Copyright (c) 2019, Allied Vision Technologies GmbH
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import copy
import ctypes
from typing import Callable, Any, Tuple
from ctypes import c_void_p, c_char_p, byref, sizeof, POINTER as c_ptr, c_char_p as c_str
from ..util import TraceEnable
from ..error import VimbaSystemError
from .vimba_common import Uint32Enum, Int32Enum, VmbInt32, VmbUint32, VmbInt64, VmbUint64, \
VmbHandle, VmbBool, VmbDouble, VmbError, VimbaCError, VmbPixelFormat, \
fmt_enum_repr, fmt_repr, fmt_flags_repr, load_vimba_lib
__version__ = None
__all__ = [
'VmbPixelFormat',
'VmbInterface',
'VmbAccessMode',
'VmbFeatureData',
'VmbFeaturePersist',
'VmbFeatureVisibility',
'VmbFeatureFlags',
'VmbFrameStatus',
'VmbFrameFlags',
'VmbVersionInfo',
'VmbInterfaceInfo',
'VmbCameraInfo',
'VmbFeatureInfo',
'VmbFeatureEnumEntry',
'VmbFrame',
'VmbFeaturePersistSettings',
'G_VIMBA_C_HANDLE',
'VIMBA_C_VERSION',
'EXPECTED_VIMBA_C_VERSION',
'call_vimba_c',
'build_callback_type'
]
# Types
class VmbInterface(Uint32Enum):
"""
Camera Interface Types:
Unknown - Interface is not known to this version of the API
Firewire - 1394
Ethernet - GigE
Usb - USB 3.0
CL - Camera Link
CSI2 - CSI-2
"""
Unknown = 0
Firewire = 1
Ethernet = 2
Usb = 3
CL = 4
CSI2 = 5
def __str__(self):
return self._name_
class VmbAccessMode(Uint32Enum):
"""
Camera Access Mode:
None_ - No access
Full - Read and write access
Read - Read-only access
Config - Configuration access (GeV)
Lite - Read and write access without feature access (only addresses)
"""
None_ = 0
Full = 1
Read = 2
Config = 4
Lite = 8
def __str__(self):
return self._name_
class VmbFeatureData(Uint32Enum):
"""
Feature Data Types
Unknown - Unknown feature type
Int - 64 bit integer feature
Float - 64 bit floating point feature
Enum - Enumeration feature
String - String feature
Bool - Boolean feature
Command - Command feature
Raw - Raw (direct register access) feature
None_ - Feature with no data
"""
Unknown = 0
Int = 1
Float = 2
Enum = 3
String = 4
Bool = 5
Command = 6
Raw = 7
None_ = 8
def __str__(self):
return self._name_
class VmbFeaturePersist(Uint32Enum):
"""
Type of features that are to be saved (persisted) to the XML file
when using VmbCameraSettingsSave
All - Save all features to XML, including look-up tables
Streamable - Save only features marked as streamable, excluding
look-up tables
NoLUT - Save all features except look-up tables (default)
"""
All = 0
Streamable = 1
NoLUT = 2
def __str__(self):
return self._name_
class VmbFeatureVisibility(Uint32Enum):
"""
Feature Visibility
Unknown - Feature visibility is not known
Beginner - Feature is visible in feature list (beginner level)
Expert - Feature is visible in feature list (expert level)
Guru - Feature is visible in feature list (guru level)
Invisible - Feature is not visible in feature list
"""
Unknown = 0
Beginner = 1
Expert = 2
Guru = 3
Invisible = 4
def __str__(self):
return self._name_
class VmbFeatureFlags(Uint32Enum):
"""
Feature Flags
None_ - No additional information is provided
Read - Static info about read access.
Current status depends on access mode, check with
VmbFeatureAccessQuery()
Write - Static info about write access.
Current status depends on access mode, check with
VmbFeatureAccessQuery()
Volatile - Value may change at any time
ModifyWrite - Value may change after a write
"""
None_ = 0
Read = 1
Write = 2
Undocumented = 4
Volatile = 8
ModifyWrite = 16
def __str__(self):
return self._name_
class VmbFrameStatus(Int32Enum):
"""
Frame transfer status
Complete - Frame has been completed without errors
Incomplete - Frame could not be filled to the end
TooSmall - Frame buffer was too small
Invalid - Frame buffer was invalid
"""
Complete = 0
Incomplete = -1
TooSmall = -2
Invalid = -3
def __str__(self):
return self._name_
class VmbFrameFlags(Uint32Enum):
"""
Frame Flags
None_ - No additional information is provided
Dimension - Frame's dimension is provided
Offset - Frame's offset is provided (ROI)
FrameID - Frame's ID is provided
Timestamp - Frame's timestamp is provided
"""
None_ = 0
Dimension = 1
Offset = 2
FrameID = 4
Timestamp = 8
def __str__(self):
return self._name_
class VmbVersionInfo(ctypes.Structure):
"""
Version Information
Fields:
major - Type: VmbUint32, Info: Major version number
minor - Type: VmbUint32, Info: Minor version number
patch - Type: VmbUint32, Info: Patch version number
"""
_fields_ = [
("major", VmbUint32),
("minor", VmbUint32),
("patch", VmbUint32)
]
def __str__(self):
return '{}.{}.{}'.format(self.major, self.minor, self.patch)
def __repr__(self):
rep = 'VmbVersionInfo'
rep += '(major=' + repr(self.major)
rep += ',minor=' + repr(self.minor)
rep += ',patch=' + repr(self.patch)
rep += ')'
return rep
class VmbInterfaceInfo(ctypes.Structure):
"""
Interface information. Holds read-only information about an interface.
Fields:
interfaceIdString - Type: c_char_p
Info: Unique identifier for each interface
interfaceType - Type: VmbInterface (VmbUint32)
Info: Interface type, see VmbInterface
interfaceName - Type: c_char_p
Info: Interface name, given by transport layer
serialString - Type: c_char_p
Info: Serial number
permittedAccess - Type: VmbAccessMode (VmbUint32)
Info: Used access mode, see VmbAccessMode
"""
_fields_ = [
("interfaceIdString", c_char_p),
("interfaceType", VmbUint32),
("interfaceName", c_char_p),
("serialString", c_char_p),
("permittedAccess", VmbUint32)
]
def __repr__(self):
rep = 'VmbInterfaceInfo'
rep += fmt_repr('(interfaceIdString={}', self.interfaceIdString)
rep += fmt_enum_repr(',interfaceType={}', VmbInterface, self.interfaceType)
rep += fmt_repr(',interfaceName={}', self.interfaceName)
rep += fmt_repr(',serialString={}', self.serialString)
rep += fmt_flags_repr(',permittedAccess={}', VmbAccessMode, self.permittedAccess)
rep += ')'
return rep
class VmbCameraInfo(ctypes.Structure):
"""
Camera information. Holds read-only information about a camera.
Fields:
cameraIdString - Type: c_char_p
Info: Unique identifier for each camera
cameraName - Type: c_char_p
Info: Name of the camera
modelName - Type: c_char_p
Info: Model name
serialString - Type: c_char_p
Info: Serial number
permittedAccess - Type: VmbAccessMode (VmbUint32)
Info: Used access mode, see VmbAccessMode
interfaceIdString - Type: c_char_p
Info: Unique value for each interface or bus
"""
_fields_ = [
("cameraIdString", c_char_p),
("cameraName", c_char_p),
("modelName", c_char_p),
("serialString", c_char_p),
("permittedAccess", VmbUint32),
("interfaceIdString", c_char_p)
]
def __repr__(self):
rep = 'VmbCameraInfo'
rep += fmt_repr('(cameraIdString={}', self.cameraIdString)
rep += fmt_repr(',cameraName={}', self.cameraName)
rep += fmt_repr(',modelName={}', self.modelName)
rep += fmt_repr(',serialString={}', self.serialString)
rep += fmt_flags_repr(',permittedAccess={}', VmbAccessMode, self.permittedAccess)
rep += fmt_repr(',interfaceIdString={}', self.interfaceIdString)
rep += ')'
return rep
class VmbFeatureInfo(ctypes.Structure):
"""
Feature information. Holds read-only information about a feature.
Fields:
name - Type: c_char_p
Info: Name used in the API
featureDataType - Type: VmbFeatureData (VmbUint32)
Info: Data type of this feature
featureFlags - Type: VmbFeatureFlags (VmbUint32)
Info: Access flags for this feature
category - Type: c_char_p
Info: Category this feature can be found in
displayName - Type: c_char_p
Info: Feature name to be used in GUIs
pollingTime - Type: VmbUint32
Info: Predefined polling time for volatile
features
unit - Type: c_char_p
Info: Measuring unit as given in the XML file
representation - Type: c_char_p
Info: Representation of a numeric feature
visibility - Type: VmbFeatureVisibility (VmbUint32)
Info: GUI visibility
tooltip - Type: c_char_p
Info: Short description, e.g. for a tooltip
description - Type: c_char_p
Info: Longer description
sfncNamespace - Type: c_char_p
Info: Namespace this feature resides in
isStreamable - Type: VmbBool
Info: Indicates if a feature can be stored
to / loaded from a file
hasAffectedFeatures - Type: VmbBool
Info: Indicates if the feature potentially
affects other features
hasSelectedFeatures - Type: VmbBool
Info: Indicates if the feature selects other
features
"""
_fields_ = [
("name", c_char_p),
("featureDataType", VmbUint32),
("featureFlags", VmbUint32),
("category", c_char_p),
("displayName", c_char_p),
("pollingTime", VmbUint32),
("unit", c_char_p),
("representation", c_char_p),
("visibility", VmbUint32),
("tooltip", c_char_p),
("description", c_char_p),
("sfncNamespace", c_char_p),
("isStreamable", VmbBool),
("hasAffectedFeatures", VmbBool),
("hasSelectedFeatures", VmbBool)
]
def __repr__(self):
rep = 'VmbFeatureInfo'
rep += fmt_repr('(name={}', self.name)
rep += fmt_enum_repr(',featureDataType={}', VmbFeatureData, self.featureDataType)
rep += fmt_flags_repr(',featureFlags={}', VmbFeatureFlags, self.featureFlags)
rep += fmt_repr(',category={}', self.category)
rep += fmt_repr(',displayName={}', self.displayName)
rep += fmt_repr(',pollingTime={}', self.pollingTime)
rep += fmt_repr(',unit={}', self.unit)
rep += fmt_repr(',representation={}', self.representation)
rep += fmt_enum_repr(',visibility={}', VmbFeatureVisibility, self.visibility)
rep += fmt_repr(',tooltip={}', self.tooltip)
rep += fmt_repr(',description={}', self.description)
rep += fmt_repr(',sfncNamespace={}', self.sfncNamespace)
rep += fmt_repr(',isStreamable={}', self.isStreamable)
rep += fmt_repr(',hasAffectedFeatures={}', self.hasAffectedFeatures)
rep += fmt_repr(',hasSelectedFeatures={}', self.hasSelectedFeatures)
rep += ')'
return rep
class VmbFeatureEnumEntry(ctypes.Structure):
"""
Info about possible entries of an enumeration feature:
Fields:
name - Type: c_char_p
Info: Name used in the API
displayName - Type: c_char_p
Info: Enumeration entry name to be used in GUIs
visibility - Type: VmbFeatureVisibility (VmbUint32)
Info: GUI visibility
tooltip - Type: c_char_p
Info: Short description, e.g. for a tooltip
description - Type: c_char_p
Info: Longer description
sfncNamespace - Type: c_char_p
Info: Namespace this feature resides in
intValue - Type: VmbInt64
Info: Integer value of this enumeration entry
"""
_fields_ = [
("name", c_char_p),
("displayName", c_char_p),
("visibility", VmbUint32),
("tooltip", c_char_p),
("description", c_char_p),
("sfncNamespace", c_char_p),
("intValue", VmbInt64)
]
def __repr__(self):
rep = 'VmbFeatureEnumEntry'
rep += fmt_repr('(name={}', self.name)
rep += fmt_repr(',displayName={}', self.displayName)
rep += fmt_enum_repr(',visibility={}', VmbFeatureVisibility, self.visibility)
rep += fmt_repr(',tooltip={}', self.tooltip)
rep += fmt_repr(',description={}', self.description)
rep += fmt_repr(',sfncNamespace={}', self.sfncNamespace)
rep += fmt_repr(',intValue={},', self.intValue)
rep += ')'
return rep
class VmbFrame(ctypes.Structure):
"""
Frame delivered by Camera
Fields (in):
buffer - Type: c_void_p
Info: Comprises image and ancillary data
bufferSize - Type: VmbUint32_t
Info: Size of the data buffer
context - Type: c_void_p[4]
Info: 4 void pointers that can be employed by the user
(e.g. for storing handles)
Fields (out):
receiveStatus - Type: VmbFrameStatus (VmbInt32)
Info: Resulting status of the receive operation
receiveFlags - Type: VmbFrameFlags (VmbUint32)
Info: Flags indicating which additional frame
information is available
imageSize - Type: VmbUint32
Info: Size of the image data inside the data buffer
ancillarySize - Type: VmbUint32
Info: Size of the ancillary data inside the
data buffer
pixelFormat - Type: VmbPixelFormat (VmbUint32)
Info: Pixel format of the image
width - Type: VmbUint32
Info: Width of an image
height - Type: VmbUint32
Info: Height of an image
offsetX - Type: VmbUint32
Info: Horizontal offset of an image
offsetY - Type: VmbUint32
Info: Vertical offset of an image
frameID - Type: VmbUint64
Info: Unique ID of this frame in this stream
timestamp - Type: VmbUint64
Info: Timestamp set by the camera
"""
_fields_ = [
("buffer", c_void_p),
("bufferSize", VmbUint32),
("context", c_void_p * 4),
("receiveStatus", VmbInt32),
("receiveFlags", VmbUint32),
("imageSize", VmbUint32),
("ancillarySize", VmbUint32),
("pixelFormat", VmbUint32),
("width", VmbUint32),
("height", VmbUint32),
("offsetX", VmbUint32),
("offsetY", VmbUint32),
("frameID", VmbUint64),
("timestamp", VmbUint64)
]
def __repr__(self):
rep = 'VmbFrame'
rep += fmt_repr('(buffer={}', self.buffer)
rep += fmt_repr(',bufferSize={}', self.bufferSize)
rep += fmt_repr(',context={}', self.context)
rep += fmt_enum_repr('receiveStatus: {}', VmbFrameStatus, self.receiveStatus)
rep += fmt_flags_repr(',receiveFlags={}', VmbFrameFlags, self.receiveFlags)
rep += fmt_repr(',imageSize={}', self.imageSize)
rep += fmt_repr(',ancillarySize={}', self.ancillarySize)
rep += fmt_enum_repr(',pixelFormat={}', VmbPixelFormat, self.pixelFormat)
rep += fmt_repr(',width={}', self.width)
rep += fmt_repr(',height={}', self.height)
rep += fmt_repr(',offsetX={}', self.offsetX)
rep += fmt_repr(',offsetY={}', self.offsetY)
rep += fmt_repr(',frameID={}', self.frameID)
rep += fmt_repr(',timestamp={}', self.timestamp)
rep += ')'
return rep
def deepcopy_skip_ptr(self, memo):
result = VmbFrame()
memo[id(self)] = result
result.buffer = None
result.bufferSize = 0
result.context = (None, None, None, None)
setattr(result, 'receiveStatus', copy.deepcopy(self.receiveStatus, memo))
setattr(result, 'receiveFlags', copy.deepcopy(self.receiveFlags, memo))
setattr(result, 'imageSize', copy.deepcopy(self.imageSize, memo))
setattr(result, 'ancillarySize', copy.deepcopy(self.ancillarySize, memo))
setattr(result, 'pixelFormat', copy.deepcopy(self.pixelFormat, memo))
setattr(result, 'width', copy.deepcopy(self.width, memo))
setattr(result, 'height', copy.deepcopy(self.height, memo))
setattr(result, 'offsetX', copy.deepcopy(self.offsetX, memo))
setattr(result, 'offsetY', copy.deepcopy(self.offsetY, memo))
setattr(result, 'frameID', copy.deepcopy(self.frameID, memo))
setattr(result, 'timestamp', copy.deepcopy(self.timestamp, memo))
return result
class VmbFeaturePersistSettings(ctypes.Structure):
"""
Parameters determining the operation mode of VmbCameraSettingsSave
and VmbCameraSettingsLoad
Fields:
persistType - Type: VmbFeaturePersist (VmbUint32)
Info: Type of features that are to be saved
maxIterations - Type: VmbUint32
Info: Number of iterations when loading settings
loggingLevel - Type: VmbUint32
Info: Determines level of detail for load/save
settings logging
"""
_fields_ = [
("persistType", VmbUint32),
("maxIterations", VmbUint32),
("loggingLevel", VmbUint32)
]
def __repr__(self):
rep = 'VmbFrame'
rep += fmt_enum_repr('(persistType={}', VmbFeaturePersist, self.persistType)
rep += fmt_repr(',maxIterations={}', self.maxIterations)
rep += fmt_repr(',loggingLevel={}', self.loggingLevel)
rep += ')'
return rep
G_VIMBA_C_HANDLE = VmbHandle(1)
VIMBA_C_VERSION = None
EXPECTED_VIMBA_C_VERSION = '1.9.0'
# For detailed information on the signatures see "VimbaC.h"
# To improve readability, suppress 'E501 line too long (> 100 characters)'
# check of flake8
_SIGNATURES = {
'VmbVersionQuery': (VmbError, [c_ptr(VmbVersionInfo), VmbUint32]),
'VmbStartup': (VmbError, None),
'VmbShutdown': (None, None),
'VmbCamerasList': (VmbError, [c_ptr(VmbCameraInfo), VmbUint32, c_ptr(VmbUint32), VmbUint32]),
'VmbCameraInfoQuery': (VmbError, [c_str, c_ptr(VmbCameraInfo), VmbUint32]),
'VmbCameraOpen': (VmbError, [c_str, VmbAccessMode, c_ptr(VmbHandle)]),
'VmbCameraClose': (VmbError, [VmbHandle]),
'VmbFeaturesList': (VmbError, [VmbHandle, c_ptr(VmbFeatureInfo), VmbUint32, c_ptr(VmbUint32), VmbUint32]), # noqa: E501
'VmbFeatureInfoQuery': (VmbError, [VmbHandle, c_str, c_ptr(VmbFeatureInfo), VmbUint32]),
'VmbFeatureListAffected': (VmbError, [VmbHandle, c_str, c_ptr(VmbFeatureInfo), VmbUint32, c_ptr(VmbUint32), VmbUint32]), # noqa: E501
'VmbFeatureListSelected': (VmbError, [VmbHandle, c_str, c_ptr(VmbFeatureInfo), VmbUint32, c_ptr(VmbUint32), VmbUint32]), # noqa: E501
'VmbFeatureAccessQuery': (VmbError, [VmbHandle, c_str, c_ptr(VmbBool), c_ptr(VmbBool)]),
'VmbFeatureIntGet': (VmbError, [VmbHandle, c_str, c_ptr(VmbInt64)]),
'VmbFeatureIntSet': (VmbError, [VmbHandle, c_str, VmbInt64]),
'VmbFeatureIntRangeQuery': (VmbError, [VmbHandle, c_str, c_ptr(VmbInt64), c_ptr(VmbInt64)]), # noqa: E501
'VmbFeatureIntIncrementQuery': (VmbError, [VmbHandle, c_str, c_ptr(VmbInt64)]),
'VmbFeatureFloatGet': (VmbError, [VmbHandle, c_str, c_ptr(VmbDouble)]),
'VmbFeatureFloatSet': (VmbError, [VmbHandle, c_str, VmbDouble]),
'VmbFeatureFloatRangeQuery': (VmbError, [VmbHandle, c_str, c_ptr(VmbDouble), c_ptr(VmbDouble)]),
'VmbFeatureFloatIncrementQuery': (VmbError, [VmbHandle, c_str, c_ptr(VmbBool), c_ptr(VmbDouble)]), # noqa: E501
'VmbFeatureEnumGet': (VmbError, [VmbHandle, c_str, c_ptr(c_str)]),
'VmbFeatureEnumSet': (VmbError, [VmbHandle, c_str, c_str]),
'VmbFeatureEnumRangeQuery': (VmbError, [VmbHandle, c_str, c_ptr(c_str), VmbUint32, c_ptr(VmbUint32)]), # noqa: E501
'VmbFeatureEnumIsAvailable': (VmbError, [VmbHandle, c_str, c_str, c_ptr(VmbBool)]),
'VmbFeatureEnumAsInt': (VmbError, [VmbHandle, c_str, c_str, c_ptr(VmbInt64)]),
'VmbFeatureEnumAsString': (VmbError, [VmbHandle, c_str, VmbInt64, c_ptr(c_str)]),
'VmbFeatureEnumEntryGet': (VmbError, [VmbHandle, c_str, c_str, c_ptr(VmbFeatureEnumEntry), VmbUint32]), # noqa: E501
'VmbFeatureStringGet': (VmbError, [VmbHandle, c_str, c_str, VmbUint32, c_ptr(VmbUint32)]), # noqa: E501
'VmbFeatureStringSet': (VmbError, [VmbHandle, c_str, c_str]),
'VmbFeatureStringMaxlengthQuery': (VmbError, [VmbHandle, c_str, c_ptr(VmbUint32)]),
'VmbFeatureBoolGet': (VmbError, [VmbHandle, c_str, c_ptr(VmbBool)]),
'VmbFeatureBoolSet': (VmbError, [VmbHandle, c_str, VmbBool]),
'VmbFeatureCommandRun': (VmbError, [VmbHandle, c_str]),
'VmbFeatureCommandIsDone': (VmbError, [VmbHandle, c_str, c_ptr(VmbBool)]),
'VmbFeatureRawGet': (VmbError, [VmbHandle, c_str, c_str, VmbUint32, c_ptr(VmbUint32)]),
'VmbFeatureRawSet': (VmbError, [VmbHandle, c_str, c_str, VmbUint32]),
'VmbFeatureRawLengthQuery': (VmbError, [VmbHandle, c_str, c_ptr(VmbUint32)]),
'VmbFeatureInvalidationRegister': (VmbError, [VmbHandle, c_str, c_void_p, c_void_p]), # noqa: E501
'VmbFeatureInvalidationUnregister': (VmbError, [VmbHandle, c_str, c_void_p]),
'VmbFrameAnnounce': (VmbError, [VmbHandle, c_ptr(VmbFrame), VmbUint32]),
'VmbFrameRevoke': (VmbError, [VmbHandle, c_ptr(VmbFrame)]),
'VmbFrameRevokeAll': (VmbError, [VmbHandle]),
'VmbCaptureStart': (VmbError, [VmbHandle]),
'VmbCaptureEnd': (VmbError, [VmbHandle]),
'VmbCaptureFrameQueue': (VmbError, [VmbHandle, c_ptr(VmbFrame), c_void_p]),
'VmbCaptureFrameWait': (VmbError, [VmbHandle, c_ptr(VmbFrame), VmbUint32]),
'VmbCaptureQueueFlush': (VmbError, [VmbHandle]),
'VmbInterfacesList': (VmbError, [c_ptr(VmbInterfaceInfo), VmbUint32, c_ptr(VmbUint32), VmbUint32]), # noqa: E501
'VmbInterfaceOpen': (VmbError, [c_str, c_ptr(VmbHandle)]),
'VmbInterfaceClose': (VmbError, [VmbHandle]),
'VmbAncillaryDataOpen': (VmbError, [c_ptr(VmbFrame), c_ptr(VmbHandle)]),
'VmbAncillaryDataClose': (VmbError, [VmbHandle]),
'VmbMemoryRead': (VmbError, [VmbHandle, VmbUint64, VmbUint32, c_str, c_ptr(VmbUint32)]),
'VmbMemoryWrite': (VmbError, [VmbHandle, VmbUint64, VmbUint32, c_str, c_ptr(VmbUint32)]),
'VmbRegistersRead': (VmbError, [VmbHandle, VmbUint32, c_ptr(VmbUint64), c_ptr(VmbUint64), c_ptr(VmbUint32)]), # noqa: E501
'VmbRegistersWrite': (VmbError, [VmbHandle, VmbUint32, c_ptr(VmbUint64), c_ptr(VmbUint64), c_ptr(VmbUint32)]), # noqa: E501
'VmbCameraSettingsSave': (VmbError, [VmbHandle, c_str, c_ptr(VmbFeaturePersistSettings), VmbUint32]), # noqa: E501
'VmbCameraSettingsLoad': (VmbError, [VmbHandle, c_str, c_ptr(VmbFeaturePersistSettings), VmbUint32]) # noqa: E501
}
def _attach_signatures(lib_handle):
global _SIGNATURES
for function_name, signature in _SIGNATURES.items():
fn = getattr(lib_handle, function_name)
fn.restype, fn.argtypes = signature
fn.errcheck = _eval_vmberror
return lib_handle
def _check_version(lib_handle):
global EXPECTED_VIMBA_C_VERSION
global VIMBA_C_VERSION
v = VmbVersionInfo()
lib_handle.VmbVersionQuery(byref(v), sizeof(v))
VIMBA_C_VERSION = str(v)
loaded_version = (v.major, v.minor, v.patch)
expected_version = tuple(map(int, EXPECTED_VIMBA_C_VERSION.split(".")))
# major and minor version must be equal, patch version may be equal or greater
if not(loaded_version[0:2] == expected_version[0:2] and
loaded_version[2] >= expected_version[2]):
msg = 'Invalid VimbaC Version: Expected: {}, Found:{}'
raise VimbaSystemError(msg.format(EXPECTED_VIMBA_C_VERSION, VIMBA_C_VERSION))
return lib_handle
def _eval_vmberror(result: VmbError, func: Callable[..., Any], *args: Tuple[Any, ...]):
if result not in (VmbError.Success, None):
raise VimbaCError(result)
_lib_instance = _check_version(_attach_signatures(load_vimba_lib('VimbaC')))
@TraceEnable()
def call_vimba_c(func_name: str, *args):
"""This function encapsulates the entire VimbaC access.
For Details on valid function signatures see the 'VimbaC.h'.
Arguments:
func_name: The function name from VimbaC to be called.
args: Varargs passed directly to the underlaying C-Function.
Raises:
TypeError if given are do not match the signature of the function.
AttributeError if func with name 'func_name' does not exist.
VimbaCError if the function call is valid but neither None or VmbError.Success was returned.
The following functions of VimbaC can be executed:
VmbVersionQuery
VmbStartup
VmbShutdown
VmbCamerasList
VmbCameraInfoQuery
VmbCameraOpen
VmbCameraClose
VmbFeaturesList
VmbFeatureInfoQuery
VmbFeatureListAffected
VmbFeatureListSelected
VmbFeatureAccessQuery
VmbFeatureIntGet
VmbFeatureIntSet
VmbFeatureIntRangeQuery
VmbFeatureIntIncrementQuery
VmbFeatureFloatGet
VmbFeatureFloatSet
VmbFeatureFloatRangeQuery
VmbFeatureFloatIncrementQuery
VmbFeatureEnumGet
VmbFeatureEnumSet
VmbFeatureEnumRangeQuery
VmbFeatureEnumIsAvailable
VmbFeatureEnumAsInt
VmbFeatureEnumAsString
VmbFeatureEnumEntryGet
VmbFeatureStringGet
VmbFeatureStringSet
VmbFeatureStringMaxlengthQuery
VmbFeatureBoolGet
VmbFeatureBoolSet
VmbFeatureCommandRun
VmbFeatureCommandIsDone
VmbFeatureRawGet
VmbFeatureRawSet
VmbFeatureRawLengthQuery
VmbFeatureInvalidationRegister
VmbFeatureInvalidationUnregister
VmbFrameAnnounce
VmbFrameRevoke
VmbFrameRevokeAll
VmbCaptureStart
VmbCaptureEnd
VmbCaptureFrameQueue
VmbCaptureFrameWait
VmbCaptureQueueFlush
VmbInterfacesList
VmbInterfaceOpen
VmbInterfaceClose
VmbAncillaryDataOpen
VmbAncillaryDataClose
VmbMemoryRead
VmbMemoryWrite
VmbRegistersRead
VmbRegistersWrite
VmbCameraSettingsSave
VmbCameraSettingsLoad
"""
global _lib_instance
getattr(_lib_instance, func_name)(*args)
def build_callback_type(*args):
global _lib_instance
lib_type = type(_lib_instance)
if lib_type == ctypes.CDLL:
return ctypes.CFUNCTYPE(*args)
elif lib_type == ctypes.WinDLL:
return ctypes.WINFUNCTYPE(*args)
else:
raise VimbaSystemError('Unknown Library Type. Abort.')
|
alliedvisionREPO_NAMEVimbaPythonPATH_START.@VimbaPython_extracted@VimbaPython-master@vimba@c_binding@[email protected]_END.py
|
{
"filename": "_legendgroup.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/ohlc/_legendgroup.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendgroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="legendgroup", parent_name="ohlc", **kwargs):
super(LegendgroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@ohlc@[email protected]_END.py
|
{
"filename": "modeling_2D.py",
"repo_name": "gammapy/gammapy",
"repo_path": "gammapy_extracted/gammapy-main/examples/tutorials/analysis-2d/modeling_2D.py",
"type": "Python"
}
|
"""
2D map fitting
==============
Source modelling and fitting in stacked observations using the high level interface.
Prerequisites
-------------
- To understand how a general modelling and fitting works in gammapy,
please refer to the :doc:`/tutorials/analysis-3d/analysis_3d` tutorial.
Context
-------
We often want the determine the position and morphology of an object. To
do so, we don’t necessarily have to resort to a full 3D fitting but can
perform a simple image fitting, in particular, in an energy range where
the PSF does not vary strongly, or if we want to explore a possible
energy dependence of the morphology.
Objective
---------
To localize a source and/or constrain its morphology.
Proposed approach
-----------------
The first step here, as in most analysis with DL3 data, is to create
reduced datasets. For this, we will use the `Analysis` class to create
a single set of stacked maps with a single bin in energy (thus, an
*image* which behaves as a *cube*). This, we will then model with a
spatial model of our choice, while keeping the spectral model fixed to
an integrated power law.
"""
# %matplotlib inline
import astropy.units as u
import matplotlib.pyplot as plt
######################################################################
# Setup
# -----
#
# As usual, we’ll start with some general imports…
#
from IPython.display import display
from gammapy.analysis import Analysis, AnalysisConfig
######################################################################
# Check setup
# -----------
from gammapy.utils.check import check_tutorials_setup
check_tutorials_setup()
######################################################################
# Creating the config file
# ------------------------
#
# Now, we create a config file for out analysis. You may load this from
# disc if you have a pre-defined config file.
#
# Here, we use 3 simulated CTAO runs of the galactic center.
#
config = AnalysisConfig()
# Selecting the observations
config.observations.datastore = "$GAMMAPY_DATA/cta-1dc/index/gps/"
config.observations.obs_ids = [110380, 111140, 111159]
######################################################################
# Technically, gammapy implements 2D analysis as a special case of 3D
# analysis (one bin in energy). So, we must specify the type of
# analysis as *3D*, and define the geometry of the analysis.
#
config.datasets.type = "3d"
config.datasets.geom.wcs.skydir = {
"lon": "0 deg",
"lat": "0 deg",
"frame": "galactic",
} # The WCS geometry - centered on the galactic center
config.datasets.geom.wcs.width = {"width": "8 deg", "height": "6 deg"}
config.datasets.geom.wcs.binsize = "0.02 deg"
# The FoV radius to use for cutouts
config.datasets.geom.selection.offset_max = 2.5 * u.deg
config.datasets.safe_mask.methods = ["offset-max"]
config.datasets.safe_mask.parameters = {"offset_max": "2.5 deg"}
config.datasets.background.method = "fov_background"
config.fit.fit_range = {"min": "0.1 TeV", "max": "30.0 TeV"}
# We now fix the energy axis for the counts map - (the reconstructed energy binning)
config.datasets.geom.axes.energy.min = "0.1 TeV"
config.datasets.geom.axes.energy.max = "10 TeV"
config.datasets.geom.axes.energy.nbins = 1
config.datasets.geom.wcs.binsize_irf = "0.2 deg"
print(config)
######################################################################
# Getting the reduced dataset
# ---------------------------
#
######################################################################
# We now use the config file and create a single `MapDataset` containing
# `counts`, `background`, `exposure`, `psf` and `edisp` maps.
#
# %%time
analysis = Analysis(config)
analysis.get_observations()
analysis.get_datasets()
print(analysis.datasets["stacked"])
######################################################################
# The counts and background maps have only one bin in reconstructed
# energy. The exposure and IRF maps are in true energy, and hence, have a
# different binning based upon the binning of the IRFs. We need not bother
# about them presently.
#
print(analysis.datasets["stacked"].counts)
print(analysis.datasets["stacked"].background)
print(analysis.datasets["stacked"].exposure)
######################################################################
# We can have a quick look of these maps in the following way:
#
analysis.datasets["stacked"].counts.reduce_over_axes().plot(vmax=10, add_cbar=True)
plt.show()
######################################################################
# Modelling
# ---------
#
# Now, we define a model to be fitted to the dataset. **The important
# thing to note here is the dummy spectral model - an integrated powerlaw
# with only free normalisation**. Here, we use its YAML definition to load
# it:
#
model_config = """
components:
- name: GC-1
type: SkyModel
spatial:
type: PointSpatialModel
frame: galactic
parameters:
- name: lon_0
value: 0.02
unit: deg
- name: lat_0
value: 0.01
unit: deg
spectral:
type: PowerLaw2SpectralModel
parameters:
- name: amplitude
value: 1.0e-12
unit: cm-2 s-1
- name: index
value: 2.0
unit: ''
frozen: true
- name: emin
value: 0.1
unit: TeV
frozen: true
- name: emax
value: 10.0
unit: TeV
frozen: true
"""
analysis.set_models(model_config)
######################################################################
# We will freeze the parameters of the background
#
analysis.datasets["stacked"].background_model.parameters["tilt"].frozen = True
# To run the fit
analysis.run_fit()
# To see the best fit values along with the errors
display(analysis.models.to_parameters_table())
|
gammapyREPO_NAMEgammapyPATH_START.@gammapy_extracted@gammapy-main@examples@tutorials@analysis-2d@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "1313e/PRISM",
"repo_path": "PRISM_extracted/PRISM-master/prism/emulator/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Emulator
========
Contains the definition of *PRISM*'s :class:`~Emulator` base class and various
specialized :class:`~Emulator` subclasses.
"""
# %% IMPORTS
# Import base Emulator class
from ._emulator import Emulator
# Import specialized Emulator subclasses
# All declaration
__all__ = ['Emulator']
|
1313eREPO_NAMEPRISMPATH_START.@PRISM_extracted@PRISM-master@prism@emulator@[email protected]_END.py
|
{
"filename": "tempita.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/_build_utils/tempita.py",
"type": "Python"
}
|
import sys
import os
import argparse
from Cython import Tempita as tempita
# XXX: If this import ever fails (does it really?), vendor either
# cython.tempita or numpy/npy_tempita.
def process_tempita(fromfile, outfile=None):
"""Process tempita templated file and write out the result.
The template file is expected to end in `.c.in` or `.pyx.in`:
E.g. processing `template.c.in` generates `template.c`.
"""
if outfile is None:
# We're dealing with a distutils build here, write in-place
outfile = os.path.splitext(fromfile)[0]
from_filename = tempita.Template.from_filename
template = from_filename(fromfile,
encoding=sys.getdefaultencoding())
content = template.substitute()
with open(outfile, 'w') as f:
f.write(content)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("infile", type=str,
help="Path to the input file")
parser.add_argument("-o", "--outdir", type=str,
help="Path to the output directory")
parser.add_argument("-i", "--ignore", type=str,
help="An ignored input - may be useful to add a "
"dependency between custom targets")
args = parser.parse_args()
if not args.infile.endswith('.in'):
raise ValueError(f"Unexpected extension: {args.infile}")
outdir_abs = os.path.join(os.getcwd(), args.outdir)
outfile = os.path.join(outdir_abs,
os.path.splitext(os.path.split(args.infile)[1])[0])
process_tempita(args.infile, outfile)
if __name__ == "__main__":
main()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@[email protected]@.PATH_END.py
|
{
"filename": "sampling.py",
"repo_name": "astro-informatics/QuantifAI",
"repo_path": "QuantifAI_extracted/QuantifAI-main/quantifai/sampling.py",
"type": "Python"
}
|
import torch
import math
import numpy as np
from typing import Callable
def ULA_kernel(
X: torch.Tensor, delta: float, grad_likelihood_prior: Callable
) -> torch.Tensor:
"""ULA sampling algorithm kernel
Args:
X (torch.Tensor): Tensor to update
delta (float): Step size for the ULA algorithm
grad_likelihood_prior (function): drift term or gradient of the likelihood and prior terms
Returns:
torch.Tensor: New generated sample
"""
return (
torch.clone(X)
- delta * grad_likelihood_prior(X)
+ math.sqrt(2 * delta) * torch.randn_like(X)
)
def MYULA_kernel(
X: torch.Tensor,
delta: float,
lmbd: float,
grad_likelihood: Callable,
prox_prior: Callable,
op_drift=lambda _x: torch.real(_x),
) -> torch.Tensor:
"""ULA sampling algorithm kernel
Args:
X (torch.Tensor): Tensor to update
delta (float): Step size for the MYULA algorithm
lmbd (float): Moreau-Yosida envelope parameter
grad_likelihood (function): gradient of the likelihood
prox_prior (function): prox of the non-smooth prior
op_drift (function): operator to apply to the drift term.
Defaults to the real projection.
Returns:
torch.Tensor: New generated sample
"""
return (
(1.0 - (delta / lmbd)) * torch.clone(X)
+ op_drift(
-delta * grad_likelihood(torch.clone(X))
+ (delta / lmbd) * prox_prior(X, lmbd)
)
+ math.sqrt(2 * delta) * torch.randn_like(X)
)
def SKROCK_kernel(
X: torch.Tensor,
Lipschitz_U: float,
nStages: int,
eta: float,
dt_perc: float,
grad_likelihood_prior: Callable,
) -> torch.Tensor:
"""SKROCK sampling algorithm kernel
Args:
X (torch.Tensor): Tensor to update
Lipschitz_U (float): Lipschitz constant of the likelihood and prior terms
nStages (float): Number of gradient evaluations to store and use for the update
eta (int): Variable appearing in the max step-size calculation
dt_perc (float): Percentage of the step-size to be used
grad_likelihood_prior (function): drift term or gradient of the likelihood and prior terms
Returns:
Xts (torch.Tensor): New generated sample
"""
# SK-ROCK parameters
# First kind Chebyshev function
T_s = lambda s, x: np.cosh(s * np.arccosh(x))
# First derivative Chebyshev polynomial first kind
T_prime_s = lambda s, x: s * np.sinh(s * np.arccosh(x)) / np.sqrt(x**2 - 1)
# computing SK-ROCK stepsize given a number of stages
# and parameters needed in the algorithm
denNStag = 2 - (4 / 3) * eta
rhoSKROCK = ((nStages - 0.5) ** 2) * denNStag - 1.5 # stiffness ratio
dtSKROCK = dt_perc * rhoSKROCK / Lipschitz_U # step-size
w0 = 1 + eta / (nStages**2) # parameter \omega_0
w1 = T_s(nStages, w0) / T_prime_s(nStages, w0) # parameter \omega_1
mu1 = w1 / w0 # parameter \mu_1
nu1 = nStages * (w1 / 2) # parameter \nu_1
kappa1 = nStages * (w1 / w0) # parameter \kappa_1
# Sampling the variable X (SKROCK)
Q = math.sqrt(2 * dtSKROCK) * torch.randn_like(X) # diffusion term
# SKROCK
# SKROCK first internal iteration (s=1)
XtsMinus2 = X.clone()
Xts = X.clone() - mu1 * dtSKROCK * grad_likelihood_prior(X + nu1 * Q) + kappa1 * Q
for js in range(2, nStages + 1): # s=2,...,nStages SK-ROCK internal iterations
XprevSMinus2 = Xts.clone()
mu = 2 * w1 * T_s(js - 1, w0) / T_s(js, w0) # parameter \mu_js
nu = 2 * w0 * T_s(js - 1, w0) / T_s(js, w0) # parameter \nu_js
kappa = 1 - nu # parameter \kappa_js
Xts = -mu * dtSKROCK * grad_likelihood_prior(Xts) + nu * Xts + kappa * XtsMinus2
XtsMinus2 = XprevSMinus2
return Xts # new sample produced by the SK-ROCK algorithm
|
astro-informaticsREPO_NAMEQuantifAIPATH_START.@QuantifAI_extracted@QuantifAI-main@[email protected]@.PATH_END.py
|
{
"filename": "drm_funcs.py",
"repo_name": "Swift-BAT/NITRATES",
"repo_path": "NITRATES_extracted/NITRATES-main/nitrates/lib/drm_funcs.py",
"type": "Python"
}
|
import numpy as np
from astropy.io import fits
import os
from ..lib.logllh_ebins_funcs import get_cnt_ebins_normed, get_cnt_ebins
def get_drm_arr(drm_dir):
drm_fnames = np.array([fn for fn in os.listdir(drm_dir) if "drm_" in fn])
imxs = np.array([float(fn.split("_")[1]) for fn in drm_fnames])
imys = np.array([float(fn.split("_")[2]) for fn in drm_fnames])
dtp = [("imx", np.float64), ("imy", np.float64), ("fname", drm_fnames.dtype)]
drm_arr = np.empty(len(imxs), dtype=dtp)
drm_arr["imx"] = imxs
drm_arr["imy"] = imys
drm_arr["fname"] = drm_fnames
return drm_arr
def get_ebin_ind_edges(drm, ebins0, ebins1):
# drm = fits.open(os.path.join(b_dir, drm_arr['fname'][0]))
drm_ebins0 = drm[2].data["E_MIN"]
drm_ebins1 = drm[2].data["E_MAX"]
ebin_ind_edges = [
(
np.argmin(np.abs(drm_ebins0 - ebins0[i])),
np.argmin(np.abs(drm_ebins1 - ebins1[i])),
)
for i in range(len(ebins0))
]
return ebin_ind_edges
class DRMs(object):
def __init__(self, drm_dir):
self.drm_dir = drm_dir
self.drm_arr = get_drm_arr(drm_dir)
def get_closest_ind(self, imx, imy):
return np.argmin(np.hypot(imx - self.drm_arr["imx"], imy - self.drm_arr["imy"]))
def get_drm(self, imx, imy, ret_pos=False):
ind = self.get_closest_ind(imx, imy)
fname = os.path.join(self.drm_dir, self.drm_arr["fname"][ind])
# print "Opening DRM ", fname
drm = fits.open(fname, memmap=False)
if ret_pos:
drm_imx = self.drm_arr["imx"][ind]
drm_imy = self.drm_arr["imy"][ind]
return drm, drm_imx, drm_imy
return drm
class cnts_norm_intp(object):
def __init__(self, cnt_ebins_norm_ind_mat, ind_ax):
self.ind_ax = ind_ax
self.cnt_ebins_norm_ind_mat = cnt_ebins_norm_ind_mat
self.ind0 = np.min(ind_ax)
self.ind1 = np.max(ind_ax)
def __call__(self, ind):
if (ind <= self.ind0) or (ind >= self.ind1):
return np.nan * np.ones(np.shape(self.cnt_ebins_norm_ind_mat)[1])
ind_ind0 = np.argmin(np.abs(ind - self.ind_ax))
ind_ind1 = ind_ind0 + 1 if ind > self.ind_ax[ind_ind0] else ind_ind0 - 1
A0 = np.abs(ind - self.ind_ax[ind_ind1]) / np.abs(
self.ind_ax[ind_ind0] - self.ind_ax[ind_ind1]
)
A1 = 1.0 - A0
cnts_norm = (
A0 * self.cnt_ebins_norm_ind_mat[ind_ind0]
+ A1 * self.cnt_ebins_norm_ind_mat[ind_ind1]
)
return cnts_norm
class cnts_intp(object):
def __init__(self, cnt_ebins_ind_mat, ind_ax):
self.ind_ax = ind_ax
self.cnt_ebins_ind_mat = cnt_ebins_ind_mat
self.ind0 = np.min(ind_ax)
self.ind1 = np.max(ind_ax)
def __call__(self, ind):
if (ind <= self.ind0) or (ind >= self.ind1):
return np.nan * np.ones(np.shape(self.cnt_ebins_ind_mat)[1])
ind_ind0 = np.argmin(np.abs(ind - self.ind_ax))
ind_ind1 = ind_ind0 + 1 if ind > self.ind_ax[ind_ind0] else ind_ind0 - 1
A0 = np.abs(ind - self.ind_ax[ind_ind1]) / np.abs(
self.ind_ax[ind_ind0] - self.ind_ax[ind_ind1]
)
A1 = 1 - A0
cnts = (
A0 * self.cnt_ebins_ind_mat[ind_ind0]
+ A1 * self.cnt_ebins_ind_mat[ind_ind1]
)
return cnts
def get_cnts_intp_obj(ind_ax, drm, ebin_ind_edges, abs_cor, E0=50.0, normed=True):
nebins = len(ebin_ind_edges)
cnt_ebins_ind_mat = np.zeros((len(ind_ax), nebins))
for i in range(len(ind_ax)):
if normed:
cnt_ebins_ind_mat[i] = get_cnt_ebins_normed(
ind_ax[i], drm, ebin_ind_edges, abs_cor=abs_cor, E0=E0
)
else:
cnt_ebins_ind_mat[i] = get_cnt_ebins(
1.0, ind_ax[i], drm, ebin_ind_edges, abs_cor=abs_cor, E0=E0
)
if np.any(np.isnan(cnt_ebins_ind_mat)):
print("Bad cnt_ebins_ind_mat")
print((np.sum(np.isnan(cnt_ebins_ind_mat))))
if normed:
intp_obj = cnts_norm_intp(cnt_ebins_ind_mat, ind_ax)
else:
intp_obj = cnts_intp(cnt_ebins_ind_mat, ind_ax)
return intp_obj
|
Swift-BATREPO_NAMENITRATESPATH_START.@NITRATES_extracted@NITRATES-main@nitrates@lib@[email protected]_END.py
|
{
"filename": "recipe_divide_a0v.py",
"repo_name": "igrins/plp",
"repo_path": "plp_extracted/plp-master/igrins/recipes/recipe_divide_a0v.py",
"type": "Python"
}
|
from .argh_helper import argh
from .process_divide_a0v import process_band
from ..libs.recipe_factory import new_recipe_class, new_recipe_func
_recipe_class_divide_a0v = new_recipe_class("RecipeDivideA0V",
("EXTENDED_*", "STELLAR_*"),
process_band)
divide_a0v = new_recipe_func("divide_a0v",
_recipe_class_divide_a0v)
# FIXME: This is ugly.
divide_a0v = argh.arg('-a', '--a0v', default="GROUP2")(divide_a0v)
divide_a0v = argh.arg('--a0v-obsid', default=None, type=int)(divide_a0v)
divide_a0v = argh.arg('--basename-postfix', default=None, type=str)(divide_a0v)
divide_a0v = argh.arg('--outname-postfix', default=None, type=str)(divide_a0v)
divide_a0v = argh.arg("-g", "--groups", default=None)(divide_a0v)
__all__ = divide_a0v
|
igrinsREPO_NAMEplpPATH_START.@plp_extracted@plp-master@igrins@recipes@[email protected]_END.py
|
{
"filename": "gradDivVectorFieldListPairWiseInst.cc.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/FieldOperations/gradDivVectorFieldListPairWiseInst.cc.py",
"type": "Python"
}
|
text = """
//------------------------------------------------------------------------------
// Explicit instantiation.
//------------------------------------------------------------------------------
#include "FieldOperations/gradDivVectorFieldListPairWise.cc"
#include "Geometry/Dimension.hh"
namespace Spheral {
//============================== gradDivVectorFieldList() ==============================
template
FieldList<Dim< %(ndim)s >, Dim< %(ndim)s >::Vector>
gradDivVectorFieldListPairWise< Dim< %(ndim)s > >
(const FieldList<Dim< %(ndim)s >, Dim< %(ndim)s >::Vector>& fieldList,
const FieldList<Dim< %(ndim)s >, Dim< %(ndim)s >::Vector>& position,
const FieldList<Dim< %(ndim)s >, Dim< %(ndim)s >::Scalar>& weight,
const FieldList<Dim< %(ndim)s >, Dim< %(ndim)s >::Scalar>& mass,
const FieldList<Dim< %(ndim)s >, Dim< %(ndim)s >::Scalar>& density,
const FieldList<Dim< %(ndim)s >, Dim< %(ndim)s >::SymTensor>& Hfield,
const TableKernel< Dim< %(ndim)s > >& kernel);
}
"""
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@[email protected]@.PATH_END.py
|
{
"filename": "result.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/io/votable/validator/result.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains a class to handle a validation result for a single VOTable
file.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from ....extern import six
from ....extern.six.moves import http_client, urllib
from ....extern.six.moves import cPickle as pickle
# STDLIB
from xml.parsers.expat import ExpatError
import hashlib
import os
import shutil
import socket
import subprocess
import warnings
# VO
from .. import table
from .. import exceptions
from .. import xmlutil
class Result(object):
def __init__(self, url, root='results', timeout=10):
self.url = url
m = hashlib.md5()
m.update(url)
self._hash = m.hexdigest()
self._root = root
self._path = os.path.join(
self._hash[0:2], self._hash[2:4], self._hash[4:])
if not os.path.exists(self.get_dirpath()):
os.makedirs(self.get_dirpath())
self.timeout = timeout
self.load_attributes()
def __enter__(self):
return self
def __exit__(self, *args):
self.save_attributes()
def get_dirpath(self):
return os.path.join(self._root, self._path)
def get_htmlpath(self):
return self._path
def get_attribute_path(self):
return os.path.join(self.get_dirpath(), "values.dat")
def get_vo_xml_path(self):
return os.path.join(self.get_dirpath(), "vo.xml")
# ATTRIBUTES
def load_attributes(self):
path = self.get_attribute_path()
if os.path.exists(path):
try:
with open(path, 'rb') as fd:
self._attributes = pickle.load(fd)
except Exception:
shutil.rmtree(self.get_dirpath())
os.makedirs(self.get_dirpath())
self._attributes = {}
else:
self._attributes = {}
def save_attributes(self):
path = self.get_attribute_path()
with open(path, 'wb') as fd:
pickle.dump(self._attributes, fd)
def __getitem__(self, key):
return self._attributes[key]
def __setitem__(self, key, val):
self._attributes[key] = val
def __contains__(self, key):
return key in self._attributes
# VO XML
def download_xml_content(self):
path = self.get_vo_xml_path()
if 'network_error' not in self._attributes:
self['network_error'] = None
if os.path.exists(path):
return
def fail(reason):
reason = str(reason)
with open(path, 'wb') as fd:
fd.write('FAILED: {0}\n'.format(reason).encode('utf-8'))
self['network_error'] = reason
r = None
try:
if six.PY2:
r = urllib.request.urlopen(self.url, timeout=self.timeout)
else:
r = urllib.request.urlopen(
self.url.decode('ascii'), timeout=self.timeout)
except urllib.error.URLError as e:
if hasattr(e, 'reason'):
reason = e.reason
else:
reason = e.code
fail(reason)
return
except http_client.HTTPException as e:
fail("HTTPException: {}".format(str(e)))
return
except (socket.timeout, socket.error) as e:
fail("Timeout")
return
if r is None:
fail("Invalid URL")
return
try:
content = r.read()
except socket.timeout as e:
fail("Timeout")
return
else:
r.close()
with open(path, 'wb') as fd:
fd.write(content)
def get_xml_content(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
with open(path, 'rb') as fd:
content = fd.read()
return content
def validate_vo(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
self['version'] = ''
if 'network_error' in self and self['network_error'] is not None:
self['nwarnings'] = 0
self['nexceptions'] = 0
self['warnings'] = []
self['xmllint'] = None
self['warning_types'] = set()
return
nexceptions = 0
nwarnings = 0
t = None
lines = []
with open(path, 'rb') as input:
with warnings.catch_warnings(record=True) as warning_lines:
try:
t = table.parse(input, pedantic=False, filename=path)
except (ValueError, TypeError, ExpatError) as e:
lines.append(str(e))
nexceptions += 1
lines = [str(x.message) for x in warning_lines] + lines
if t is not None:
self['version'] = version = t.version
else:
self['version'] = version = "1.0"
if 'xmllint' not in self:
# Now check the VO schema based on the version in
# the file.
try:
success, stdout, stderr = xmlutil.validate_schema(path, version)
# OSError is raised when XML file eats all memory and
# system sends kill signal.
except OSError as e:
self['xmllint'] = None
self['xmllint_content'] = str(e)
else:
self['xmllint'] = (success == 0)
self['xmllint_content'] = stderr
warning_types = set()
for line in lines:
w = exceptions.parse_vowarning(line)
if w['is_warning']:
nwarnings += 1
if w['is_exception']:
nexceptions += 1
warning_types.add(w['warning'])
self['nwarnings'] = nwarnings
self['nexceptions'] = nexceptions
self['warnings'] = lines
self['warning_types'] = warning_types
def has_warning(self, warning_code):
return warning_code in self['warning_types']
def match_expectations(self):
if 'network_error' not in self:
self['network_error'] = None
if self['expected'] == 'good':
return (not self['network_error'] and
self['nwarnings'] == 0 and
self['nexceptions'] == 0)
elif self['expected'] == 'incorrect':
return (not self['network_error'] and
(self['nwarnings'] > 0 or
self['nexceptions'] > 0))
elif self['expected'] == 'broken':
return self['network_error'] is not None
def validate_with_votlint(self, path_to_stilts_jar):
filename = self.get_vo_xml_path()
p = subprocess.Popen(
"java -jar {} votlint validate=false {}".format(
path_to_stilts_jar, filename),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if len(stdout) or p.returncode:
self['votlint'] = False
else:
self['votlint'] = True
self['votlint_content'] = stdout
def get_result_subsets(results, root, s=None):
all_results = []
correct = []
not_expected = []
fail_schema = []
schema_mismatch = []
fail_votlint = []
votlint_mismatch = []
network_failures = []
version_10 = []
version_11 = []
version_12 = []
version_unknown = []
has_warnings = []
warning_set = {}
has_exceptions = []
exception_set = {}
for url in results:
if s:
six.next(s)
if isinstance(url, Result):
x = url
else:
x = Result(url, root=root)
all_results.append(x)
if (x['nwarnings'] == 0 and
x['nexceptions'] == 0 and
x['xmllint'] is True):
correct.append(x)
if not x.match_expectations():
not_expected.append(x)
if x['xmllint'] is False:
fail_schema.append(x)
if (x['xmllint'] is False and
x['nwarnings'] == 0 and
x['nexceptions'] == 0):
schema_mismatch.append(x)
if 'votlint' in x and x['votlint'] is False:
fail_votlint.append(x)
if 'network_error' not in x:
x['network_error'] = None
if (x['nwarnings'] == 0 and
x['nexceptions'] == 0 and
x['network_error'] is None):
votlint_mismatch.append(x)
if 'network_error' in x and x['network_error'] is not None:
network_failures.append(x)
version = x['version']
if version == '1.0':
version_10.append(x)
elif version == '1.1':
version_11.append(x)
elif version == '1.2':
version_12.append(x)
else:
version_unknown.append(x)
if x['nwarnings'] > 0:
has_warnings.append(x)
for warning in x['warning_types']:
if (warning is not None and
len(warning) == 3 and
warning.startswith('W')):
warning_set.setdefault(warning, [])
warning_set[warning].append(x)
if x['nexceptions'] > 0:
has_exceptions.append(x)
for exc in x['warning_types']:
if exc is not None and len(exc) == 3 and exc.startswith('E'):
exception_set.setdefault(exc, [])
exception_set[exc].append(x)
warning_set = list(warning_set.items())
warning_set.sort()
exception_set = list(exception_set.items())
exception_set.sort()
tables = [
('all', 'All tests', all_results),
('correct', 'Correct', correct),
('unexpected', 'Unexpected', not_expected),
('schema', 'Invalid against schema', fail_schema),
('schema_mismatch', 'Invalid against schema/Passed vo.table',
schema_mismatch, ['ul']),
('fail_votlint', 'Failed votlint', fail_votlint),
('votlint_mismatch', 'Failed votlint/Passed vo.table',
votlint_mismatch, ['ul']),
('network_failures', 'Network failures', network_failures),
('version1.0', 'Version 1.0', version_10),
('version1.1', 'Version 1.1', version_11),
('version1.2', 'Version 1.2', version_12),
('version_unknown', 'Version unknown', version_unknown),
('warnings', 'Warnings', has_warnings)]
for warning_code, warning in warning_set:
if s:
six.next(s)
warning_class = getattr(exceptions, warning_code, None)
if warning_class:
warning_descr = warning_class.get_short_name()
tables.append(
(warning_code,
'{}: {}'.format(warning_code, warning_descr),
warning, ['ul', 'li']))
tables.append(
('exceptions', 'Exceptions', has_exceptions))
for exception_code, exc in exception_set:
if s:
six.next(s)
exception_class = getattr(exceptions, exception_code, None)
if exception_class:
exception_descr = exception_class.get_short_name()
tables.append(
(exception_code,
'{}: {}'.format(exception_code, exception_descr),
exc, ['ul', 'li']))
return tables
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@[email protected]@site-packages@astropy@io@votable@[email protected]@.PATH_END.py
|
{
"filename": "sample.py",
"repo_name": "POSYDON-code/POSYDON",
"repo_path": "POSYDON_extracted/POSYDON-main/posydon/active_learning/psy_cris/sample.py",
"type": "Python"
}
|
"""The definition of the `Sampler` class in PSY-CRIS."""
__authors__ = [
"Kyle Akira Rocha <[email protected]>",
"Scott Coughlin <[email protected]>",
]
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import time
import sys
from collections import OrderedDict
import scipy.stats
from scipy.spatial.distance import pdist
class Sampler:
"""Class implementing PTMCMC and MCMC for PSY-CRIS algorith.
Modular implementation of PTMCMC and MCMC designed to
implement the PSY-CRIS algorithm of sampling points
in a target distribution constructed with a Classifier
and Regressor. After a posterior is generated, methods
in this class are also used to downsample.
"""
def __init__(self, classifier=None, regressor=None):
"""Initialize the sampler.
Parameters
----------
classifier : instance of <class, Classifier>
A trained classifier object.
regressor : instance of <class, Regressor>, optional
A trained regressor object.
"""
self._Classifier_ = classifier
self._Regressor_ = regressor
if (self._Classifier_ is not None) or (self._Regressor_ is not None):
if self._Classifier_ is not None:
self._TableData_ = self._Classifier_._TableData_
else:
self._TableData_ = self._Regressor_._TableData_
# Find the bounds of the walker - should be a TableData attribute
self._max_vals_ = self._TableData_._max_input_vals
self._min_vals_ = self._TableData_._min_input_vals
# You can save chains_history in here
self._chain_step_hist_holder_ = OrderedDict()
# Not fully implemented yet I'm pretty sure....
self._MAX_APC_str_ = []
def TD_2d_analytic(self, name, args, **kwargs):
r"""2-dimensional analytic target distribution for testing MCMC/PTMCMC.
The function:
$\frac{16}{3\pi} \left( \exp\left[-\mu^2 - (9 + 4\mu^2 + 8\nu)^2\right]
+ \frac{1}{2} \exp\left[- 8 \mu^2 - 8 (\nu-2)^2\right] \right)$
Parameters
----------
name : str
Name of algorithm to use. For this method None.
args : array
2D location to get the value of the function.
**kwargs
Kwargs for more complex target distributions.
Returns
-------
array or float
"""
mu, nu = args
arg1 = -(mu ** 2) - (9 + 4 * mu ** 2 + 8 * nu) ** 2
arg2 = -8 * mu ** 2 - 8 * (nu - 2) ** 2
return (16) / (3 * np.pi) * (np.exp(arg1) + 0.5 * np.exp(arg2))
def get_TD_classification_data(self, *args, **kwargs):
"""Get target-distribution classification data.
Calculate terms relevant for creating target distributions
with classification terms.
Parameters
----------
classifier_name : str
Trained classifier name to use for predictions.
position : array
Position in parameter space to eval
**kwargs
TD_verbose : bool
Print useful output
Returns
-------
max_probs : array
Maximum probabilities at each query point
position : array
Position in parameter space being queried
cls_key : array
Classification key predicted for each query position
"""
classifier_name, position = args
position = np.array(position)
if position.ndim == 1:
position = position[np.newaxis, :]
TD_verbose = kwargs.get("TD_verbose", False)
normalized_probs, where_not_nan = self._Classifier_.return_probs(
classifier_name, position, verbose=TD_verbose
)
where_nan = np.array(
[i for i in range(len(position)) if i not in where_not_nan]
)
max_probs = np.max(normalized_probs, axis=1)
cls_key = np.argmax(normalized_probs, axis=1)
position = np.array(position)
if len(where_nan) > 0:
max_probs = np.zeros(position.shape[0]) + 1e-16
cls_key = [None] * position.shape[0]
if TD_verbose:
print('\t', max_probs, position, cls_key)
return max_probs, position, cls_key
else:
max_probs = np.where(max_probs == 1, 1-1e-16, max_probs)
if TD_verbose:
print('\t', max_probs, position, cls_key)
return max_probs, position, cls_key
def TD_classification(self, classifier_name, position, **kwargs):
r"""Target distribution using classification.
$f(x) = 1 - max[P_{\rm class}(x)]$
Parameters
----------
classifier_name : str
String to specify the trained classification algorithm to use.
position : array
Single location in parameter space for the target distribution to
be evaluated at.
**kwargs
TD_BETA : float
Exponent of target distribution - $f(x)^{\rm TD_BETA}$
Used for smoothing or sharpening.
TD_verbose : bool
Extra print output every method call.
Returns
-------
array
If classification probability is Nan: f(x) = 1E-16
"""
# TD_verbose = kwargs.get("TD_verbose", False)
TD_BETA = kwargs.get("TD_BETA", 1.0)
max_probs, pos, cls_keys = self.get_TD_classification_data(
classifier_name, position, **kwargs
)
theoretical_max_TD_cls_term = 1 - 1 / self._TableData_.num_classes
return ((1 - max_probs) * 1 / theoretical_max_TD_cls_term) ** (TD_BETA)
def TD_classification_regression(self, names, args, **kwargs):
r"""Target distribution using both classification & regression.
Classification: $1 - max[P_{\rm class}(x)]$
Regression: $ A_0 \log( A_1* abs( max[APC_n [loc]]) + 1 )$
Parameters
----------
names : list like
Iterable containing the two strings specifying the classification
and regression algorithm to use.
args : array
Position in parameter space to evaluate the target distribution at.
**kwargs
TD_A1 : float, optional
Scaling factor inside the Log regression error term.
(Default = 0.5)
TD_TAU : float, optional
Relative weight of classification to regression term.
(Default = 0.5)
TD_BETA : float, optional
Exponent of the entire target distribution. Used
for smoothing or sharpening the distribution. Default is 1.
TD_verbose : bool, optional
Print more diagnostic information.
Rreturns
--------
array
"""
normalized_probs, where_not_nan = self._Classifier_.return_probs(
names[0], args, verbose=False
)
max_probs = np.max(normalized_probs, axis=1)
pred_class_ids = np.argmax(normalized_probs, axis=1)
cls_key = [self._Classifier_.class_id_mapping[i]
for i in pred_class_ids]
theoretical_max_TD_cls_term = (
1 - 1 / self._Classifier_._TableData_.num_classes)
if max_probs == 1:
max_probs = 1 - 1e-16
classification_term = (1 - max_probs) * 1 / theoretical_max_TD_cls_term
if len(where_not_nan) != len(normalized_probs):
return 1e-16
else:
if isinstance(
self._Regressor_.regr_dfs_per_class[cls_key[0]], pd.DataFrame
):
max_APC, which_col_max = self._Regressor_.get_max_APC_val(
names[1], cls_key[0], args
)
self._MAX_APC_str_.append(which_col_max)
if self._Regressor_.abs_max_APC is None:
raise ValueError("No max APC value found in TableData...")
A1 = kwargs.get("TD_A1", 0.5)
scaling_log_func = lambda A1, x: np.log10(A1 * np.abs(x) + 1)
A0 = 1 / scaling_log_func(A1, self._Regressor_.abs_max_APC)
regression_term = A0 * scaling_log_func(A1, max_APC)
else:
regression_term = 1e-16
TD_TAU = kwargs.get("TD_TAU", 0.5)
TD_BETA = kwargs.get("TD_BETA", 1.0)
if kwargs.get("TD_verbose", False):
print("TD_TAU: {0} | TD_BETA: {1}".format(TD_TAU, TD_BETA))
return (TD_TAU * classification_term
+ (1 - TD_TAU) * regression_term) ** TD_BETA
def save_chain_step_history(self, key, chain_step_history,
overwrite=False):
"""Save PTMCMC output chain_step_history inside the sampler object."""
if not (key not in self._chain_step_hist_holder_.keys() or overwrite):
raise Exception(
"\nYou are about to overwrite an existing element in '{0}'\n\n"
"\tUse the option 'overwrite=True' to reassign.".format(key))
self._chain_step_hist_holder_[key] = chain_step_history
print("Saved chain to '{0}'.".format(key))
def get_saved_chain_step_history(self, key, return_all=False):
"""Return the saved chain step history."""
if return_all:
return self._chain_step_hist_holder_
else:
return self._chain_step_hist_holder_[key]
def run_PTMCMC(self, T_max, N_tot, target_dist, classifier_name,
init_pos=None, N_draws_per_swap=3, c_spacing=1.2,
alpha=None, upper_limit_reject=1e5, verbose=False,
trace_plots=False, **TD_kwargs):
"""Run a Paralel Tempered MCMC with user-specified target distribution.
Calls the method `run_MCMC`.
Parameters
----------
T_max : float
Sets the maximum temperature MCMC in the chain.
N_tot : int
The total number of iterations for the PTMCMC.
target_dist : callable
The target distribution to sample.
Must take arguments (method_name, location_to_eval)
(A 2D analytic function is provided - analytic_target_dist)
classifier_name : str, list
A single string or list of strings specifying the interpolator to
use for classification or classification & regression respectively.
init_pos : array
Initial position of walkers in each axis. Default is the median of
the input data in TableData.
N_draws_per_swap : int, optional
Number of draws to perform for each MCMC before swap proposals.
c_spacing : float, optional
Sets the spacing of temperatures in each chain.
T_{i+1} = T_{i}^{1/c}, range: [T_max , T=1]
alpha : float, optional
Sets the standard deviation of steps taken by the walkers. Default
is 1/5 the range of training data from TableData.
upper_limit_reject : float, optional
Sets the upper limit of rejected points.
verbose : bool, optional
Useful print statements during execution.
Returns
-------
chain_step_history : dict
Hold the step history for every chain. Keys are integers that range
from 0 (max T) to the total number of chains -1 (min T).
T_list : array
Array filled with the temperatures of each chain from max to min.
Notes
-----
There is a zero prior on the PTMCMC outside the range of training data.
"""
# create list of Temperatures: T_{i+1}=T_{i}^{1/c}, range: [T_max, T=1]
T_list = [T_max]
while T_list[-1] > 1.3:
T_list.append(T_list[-1] ** (1 / c_spacing))
T_list.append(1)
T_list = np.array(T_list)
num_chains = len(T_list)
if verbose:
print("Num chains: {0}\nTemperatures: {1}\n".format(num_chains,
T_list))
# data storage
chain_holder = OrderedDict()
for i in range(len(T_list)):
chain_holder[i] = []
N_loops = int(N_tot / N_draws_per_swap)
# Initial conditions for all links in chain
# ADD: init_pos can be a unitless position in the range of the axes of
# the data. This change should also be applied to alpha - user just
# gives num(0, 1]
if init_pos is None:
init_pos = np.median(self._TableData_._input_.values, axis=0)
if not isinstance(init_pos, np.ndarray):
init_pos = np.array(init_pos)
if init_pos.ndim > 1:
raise ValueError(
"init_pos has {0} dimensions, must be one dimensional.".format(
init_pos.ndim
)
)
if alpha is None:
alpha = [abs(max_val-min_val)/5 for min_val, max_val in
zip(self._min_vals_, self._max_vals_)]
# start chains in the same position
this_iter_step_loc = [init_pos] * num_chains
# Accept ratio tracker
total_acc = np.zeros(num_chains)
total_rej = np.zeros(num_chains)
acc_ratio_holder = np.zeros(num_chains)
start_time = time.time()
for counter in range(N_loops):
# Number of draws before swap
N_draws = N_draws_per_swap
last_step_holder = []
for i in range(num_chains):
# Run MCMC as f(T) N_draw times
step_history = [this_iter_step_loc[i]]
steps, acc, rej = self.run_MCMC(
N_draws,
alpha,
step_history,
target_dist,
classifier_name,
T=T_list[i],
upper_limit_reject=upper_limit_reject,
**TD_kwargs
)
# save 'current' params for each T
last_step_holder.append(steps[-1])
total_acc[i] += acc
total_rej[i] += rej
acc_ratio_holder[i] = total_acc[i] / (total_acc[i]
+ total_rej[i])
# data storage
chain_holder[i].append(np.array(steps))
if verbose:
# useful output during the PTMCMC
num_bars = 20
how_close = int((counter / (N_loops - 1)) * num_bars)
progress_bar = (
"|"
+ how_close * "="
+ ">"
+ abs(num_bars - how_close) * " "
+ "|"
+ "{0:.1f}%".format(counter / (N_loops - 1) * 100)
)
b = ("num_acc/total: Tmax={0:.4f} Tmin={1:.4f} loop #{2}, {3}".
format(acc_ratio_holder[0], acc_ratio_holder[-1],
counter, progress_bar))
sys.stdout.write("\r" + b)
# Calc H to see if chains SWAP
accept = 0
reject = 0
for i in range(len(T_list) - 1):
args_i = last_step_holder[i]
args_i_1 = last_step_holder[i + 1]
top = (target_dist(classifier_name, args_i_1, **TD_kwargs))**(
1 / T_list[i]
) * (target_dist(classifier_name, args_i, **TD_kwargs)) ** (
1 / T_list[i + 1]
)
bot = (target_dist(classifier_name, args_i, **TD_kwargs)) ** (
1 / T_list[i]
) * (target_dist(classifier_name, args_i_1, **TD_kwargs)) ** (
1 / T_list[i + 1]
)
# can get div 0 errors when using linear because of nans
if bot == 0:
ratio = 0
else:
ratio = top / bot
# inter-chain transition probability
H = min(1, ratio)
chance = np.random.uniform(low=0, high=1)
if chance <= H:
accept += 1
# SWAP the params between the two chains
last_step_holder[i] = args_i_1
last_step_holder[i + 1] = args_i
else:
reject += 1
# print(accept, reject); print(last_step_holder)
# Update current params (could be swapped)
# to read into MCMC on next iteration!!!
this_iter_step_loc = last_step_holder
chain_step_history = OrderedDict()
for pos, steps in chain_holder.items():
chain_step_history[pos] = np.concatenate(steps)
if verbose:
print("\nLength of chains: \n{0}".format(
np.array([len(chain_step_history[i])
for i in range(num_chains)])))
fin_time_s = time.time() - start_time
print(
"Finished in {0:.2f} seconds, {1:.2f} minutes.".format(
fin_time_s, fin_time_s / 60
)
)
if trace_plots:
self.make_trace_plot(chain_step_history, T_list,
0, save_fig=False)
self.make_trace_plot(chain_step_history, T_list,
num_chains - 1, save_fig=False)
return chain_step_history, T_list
def run_MCMC(self, N_trials, alpha, step_history, target_dist,
classifier_name, T=1, upper_limit_reject=1e4, **TD_kwargs):
"""Run a Markov chain Monte Carlo given a target distribution.
Parameters
----------
N_trials : int
Number of proposals or trial steps to take before stopping.
alpha : float
Related to the step size of the MCMC walker.
Defines the standard deviation of a zero mean normal
from which the step is randomly drawn.
step_history : list
Initial starting location in parameter space.
Could contain an arbitrary number of previous steps
but a walker will start at the last step in the list.
targe_dist : callable
The target distribution to sample.
Must take arguments ( method_name, element_of_step_history )
(A 2D analytic function is provided - TD_2d_analytic)
classifier_name : str
Name of interpolation technique used in the target_dist.
T : float, optional
Temperature of the MCMC.
upper_limit_reject : int, optional
Sets the maximum number of rejected steps before the MCMC
stops walking. Avoiding a slowly converging walk with few
accepted points.
Returns
-------
step_history : array
An array containing all accepted steps of the MCMC.
accept : int
Total number of accepted steps.
reject : int
Total number of rejected steps.
Notes
-----
Assumes uniform priors and a symetric jump proposal (gaussian).
"""
if not isinstance(step_history, np.ndarray):
step_history = np.array(step_history)
if step_history.ndim == 1:
step_history = np.array([step_history])
# We will be appending to the list
step_history = list(step_history)
accept = 0
reject = 0
while (accept + reject < N_trials
and reject < abs(int(upper_limit_reject))):
current_step = step_history[-1]
# f(θ)
val = target_dist(classifier_name, current_step, **TD_kwargs)
# θ+Δθ
trial_step = current_step + np.random.normal(
0, alpha, size=len(current_step)
)
# f(θ+Δθ)
trial_val = target_dist(classifier_name, trial_step, **TD_kwargs)
# check if the trial step is in the range of data
for i, step_in_axis in enumerate(trial_step):
if (
step_in_axis <= self._max_vals_[i]
and step_in_axis >= self._min_vals_[i]
):
pass
else:
trial_val = 0 # essential reject points outside of range
if val == 0: # avoid div 0 errors
ratio = 0
else:
ratio = trial_val / val
accept_prob = min(1, (ratio) ** (1 / T))
chance = np.random.uniform(low=0, high=1)
if chance <= accept_prob:
accept += 1
step_history.append(trial_step)
else:
reject += 1
return np.array(step_history), accept, reject
def normalize_step_history(self, step_history):
"""Take steps and normalize [0,1] according to min/max in each axis.
The max and min are taken from the original data set from TableData.
"""
normed_steps = np.copy(step_history)
for j, steps_in_axis in enumerate(step_history.T):
normed_steps.T[j] = (steps_in_axis - self._min_vals_[j]) / (
self._max_vals_[j] - self._min_vals_[j]
)
return normed_steps
def undo_normalize_step_history(self, normed_steps):
"""Rescale step history.
Take normed steps from [0,1] and return their value
in the original range of the axes based off the range in TableData.
"""
mapped_steps = np.copy(normed_steps)
for j, steps_in_axis in enumerate(normed_steps.T):
mapped_steps.T[j] = (
steps_in_axis * (self._max_vals_[j] - self._min_vals_[j])
+ self._min_vals_[j]
)
return mapped_steps
def do_simple_density_logic(self, step_history, N_points, Kappa,
var_mult=None, add_mvns_together=False,
include_training_data=True, verbose=False):
"""Perform multivariate normal density logic on a given step history.
This is a simplified version of the method 'do_density_logic'.
It assumes that every accepted point will have the same exact MVN.
Each proposal distribution starts with the training set from TableData
which keeps training data from being proposed again.
Parameters
----------
step_history : ndarray
List of points from a PTMCMC or MCMC. (posterior)
N_points : int
Number of points desired to be drawn from the posterior but may not
actually be the number of points accepted. Contributes to the
length scale of the MVN distribution of accepted points
(along with kappa).
Kappa : float
Scaling factor that sets the initial size of the MVN for accepted
points. This should be proportional to the filling factor of the
area of iterest described by the target distribution used to
create the posterior.
var_mult : float, ndarray, optional
Variance multiplier for the MVN of accepted points.
add_mvns_together : bool, optional
Add MVNs together when creating the accepted point distribution.
include_training_data : bool, optional
Include the trainind data in the target distribution before
sampling.
verbose : bool, optional
Print useful diagnostic information.
Returns
-------
accepted_points : ndarray
Accepted points from the posterior to be labled by the user.
(query points)
rejected_points : ndarray
Rejected points from the posterior.
Notes
-----
The accepted laguage here is indicative of query points for the oracle
to label in an active learning scheme. It is not accepted vs rejected
normally used for MCMC.
"""
if include_training_data:
if self._TableData_ is None:
raise ValueError("No TableData found in sampler. "
"Set `include_training_data` to false.")
original_training_data = self._TableData_.get_data("input")
else:
original_training_data = []
how_many_training_data = len(original_training_data)
n_dim = len(original_training_data[0])
# approximate scaling of the variance given a set of N proposal points
# the filling factor Kappa is not generally known a priori
if var_mult is None:
var_mult = 1
sigma = Kappa * 0.5 * (N_points) ** (-1.0 / n_dim) * var_mult
Covariance = sigma * np.identity(n_dim)
if verbose:
print("Covariance: \n{0}".format(Covariance))
single_MVN = scipy.stats.multivariate_normal(np.zeros(n_dim),
Covariance)
max_val = 1 / np.sqrt((
2 * np.pi) ** (n_dim) * np.linalg.det(Covariance))
# treat the training data as already accepted points
accepted_points = list(original_training_data)
rejected_points = []
for step in step_history:
if len(accepted_points) < 1:
accepted_points.append(step)
continue
# distance: how far is this point from all the accepted_points
dist_to_acc_pts = step - np.array(accepted_points)
pdf_val_at_point = single_MVN.pdf(dist_to_acc_pts)
if isinstance(pdf_val_at_point, float):
pdf_val_at_point = [pdf_val_at_point]
if add_mvns_together:
pdf_val_at_point = [np.sum(pdf_val_at_point)]
random_chances = np.random.uniform(
low=0, high=max_val, size=len(pdf_val_at_point)
)
chance_above_distr = random_chances > pdf_val_at_point
if chance_above_distr.all():
accepted_points.append(step)
else:
rejected_points.append(step)
# remove training data from accepted points
only_new_accpeted_points = accepted_points[how_many_training_data:]
return np.array(only_new_accpeted_points), np.array(rejected_points)
def do_density_logic(self, step_history, N_points, Kappa, shuffle=False,
norm_steps=False, var_mult=None,
add_mvns_together=False, pre_acc_points=None,
verbose=False):
"""Do the density based of the normal gaussian kernel on each point.
This method automatically takes out the first 5% of steps of the MCMC
so that the initial starting points are not chosen automatically
(if you start in a non-ideal region). Wait for the burn in.
Parameters
----------
step_history : ndarray
N_points : int
Kappa : float
shuffle : bool, optional
norm_steps : bool, optional
var_mult : float, optional
add_mvns_together : bool, optional
verbose : bool, optional
Returns
-------
accepted_points : ndarray
rejected_points : ndarray
accepted_sigmas : ndarray
"""
if shuffle: # shuffle the order of the steps
if verbose:
print("Shuffling steps....")
np.random.shuffle(step_history) # returns none
if norm_steps: # normalize steps
if verbose:
print("Normalizing steps....")
step_history = self.normalize_step_history(step_history)
# Set the default average length scale
num_dim = len(self._Classifier_.input_data[0])
sigma = Kappa * 0.5 * (N_points) ** (-1.0 / num_dim)
# We assume the covaraince is the identity - later we may pass the
# entire array but for now we just assume you pass a
# variance multiplier (var_mult)
if var_mult is None:
var_mult = np.array([1] * num_dim)
else:
var_mult = np.array(var_mult)
if var_mult.ndim != num_dim:
raise ValueError(
"var_mult must be the same dimensionality as input data."
)
if verbose:
print("Num dims: {0}".format(num_dim))
print("length scale sigma: {0}".format(sigma))
print("var_mult: {0}".format(var_mult))
print("Kappa: {0}".format(Kappa))
# -> Forcing a few key points to always be accepted, for example
if pre_acc_points is None:
accepted_points = []
elif isinstance(pre_acc_points, np.ndarray):
accepted_points = list(pre_acc_points)
else:
accepted_points = []
accepted_points = []
accepted_sigmas = []
max_val_holder = []
accepted_mvn_holder = []
rejected_points = []
skip_steps = int(len(step_history) * 0.05)
good_steps = step_history[
skip_steps:
] # skip first 5% of steps to get into a good region
for i in range(len(good_steps)):
proposed_step = good_steps[i]
accept = False
if len(accepted_points) == 0:
accept = True
else:
# If you enter you must have accepted one point
Sigma = accepted_sigmas[-1:]
k = len(Sigma)
max_val = 1 / np.sqrt((2 * np.pi)**k * np.linalg.det(Sigma))
max_val_holder.append(max_val)
rnd_chance = np.random.uniform(
low=0, high=np.max(max_val_holder), size=1
)
# we will choose the chance from [0, highest point in distr]
distr_holder = []
for point in accepted_mvn_holder:
eval_mvn_at_new_step = point.pdf(proposed_step)
distr_holder.append(eval_mvn_at_new_step)
if add_mvns_together:
# instead of checking each individual point keeping all
# mvns seperate, we want to add them
# together and get upper bound
# IF we do this we need to change the MVN to not be
# normalized !!!!
# THE UNORMALIZED MVN IS NOT IMPLEMENTED
total_chance_above_distr = (
rnd_chance > np.sum(distr_holder))
else:
total_chance_above_distr = np.sum(
rnd_chance > distr_holder)
if len(accepted_points) == total_chance_above_distr:
accept = True
else:
pass # REJECT
if accept:
# https://stackoverflow.com/questions/619335
# corner = np.random.normal(0,0.1, 1)
# A = np.array( [ [sigma, corner], [corner, sigma] ] )
# Sigma = np.dot(A,A.transpose())
# Sigma = [ [sigma*var_mult[0], 0.], [0., sigma*var_mult[1]] ]
Sigma = (sigma * np.identity(len(var_mult))
* np.array([var_mult]))
mvn = scipy.stats.multivariate_normal(proposed_step, Sigma)
accepted_mvn_holder.append(mvn)
accepted_sigmas.append(Sigma)
accepted_points.append(proposed_step)
else:
rejected_points.append(proposed_step)
if verbose:
print("Num accepted: {0}".format(len(accepted_points)))
print("Num rejected: {0}".format(len(rejected_points)))
if norm_steps:
if verbose:
print("Unormalizing steps....")
accepted_points = self.undo_normalize_step_history(accepted_points)
rejected_points = self.undo_normalize_step_history(rejected_points)
return (
np.array(accepted_points),
np.array(rejected_points),
np.array(accepted_sigmas),
)
def get_proposed_points(self, step_history, N_points, Kappa, shuffle=False,
norm_steps=False, add_mvns_together=False,
include_training_data=True, var_mult=None,
seed=None, n_repeats=1, max_iters=1e3,
verbose=False, **kwargs):
"""Get proposed points in parameter space given a MCMC step history.
The desnity logic is not deterministic, so multiple iterations
may be needed to converge on a desired number of proposed points.
This method performs multiple calls to do_density_logic while
changing Kappa in order to return the desired number of points. After
n_iters instances of the correct number of N_points, the distibution
with the largest average distance is chosen.
Warning: This algorithm has not been tested for large N data sets and
may struggle to converge.
Parameters
----------
step_history : ndarray
Posterior from which to sample new query points.
N_points : int
N query points to converge to.
Kappa : float
Multiplies the length scale of MVNs and changes such that the
desired number of query points is found.
shuffle : bool, optional
Shuffle points in posterior in place before sampling.
norm_steps : bool, optional
Normalize steps before sampling.
add_mvns_together : bool, optional
Add MVNs of accepted point distribution together.
include_training_data : bool, optional
Include training data in the accpeted point distribution before
sampling.
var_mult : ndarray, optional
Variance multiplier.
seed : float, optional
Random seed to use for random sampling.
n_repeats: int, optional
Number of times to converge to the correct number of points. Each
iteration may be a different realization of the posterior.
verbose : bool, optional
Print useful information.
**kwargs
show_plots : bool, optional
Show 2D plot of proposed points with step history & training data.
Returns
-------
acc_pts : ndarray
Array of proposed points to be used as initial
conditions in new simulations.
Kappa : float
Scaling factor which reproduced the desired number
of accepted points.
Notes
-----
Will automatically exit if it goes through max_iters iterations
without converging on the desired number of points.
"""
if seed is not None:
np.random.seed(seed=seed)
print("Setting seed: {0}".format(seed))
if verbose:
print(
"Converging to {0} points, {1} times.".format(N_points,
int(n_repeats))
)
enough_good_pts = False
how_many_good_pts = int(n_repeats)
good_n_points = []
avg_distances = []
good_kappas = []
iters = 1
start_time = time.time()
while not (enough_good_pts) and iters < max_iters:
acc_pts, rej_pts = self.do_simple_density_logic(
step_history,
N_points,
Kappa,
var_mult=var_mult,
add_mvns_together=add_mvns_together,
include_training_data=include_training_data,
verbose=False,
)
if len(acc_pts) == 0:
Kappa = Kappa * 0.5
iters += 1
continue
if len(acc_pts) == N_points:
average_dist_between_acc_points = np.mean(pdist(acc_pts))
good_n_points.append(acc_pts)
avg_distances.append(average_dist_between_acc_points)
good_kappas.append(Kappa)
if len(good_n_points) >= how_many_good_pts:
enough_good_pts = True
if verbose:
if len(acc_pts) == N_points:
print_str = " *{0}* {1:2.2f}s".format(
len(good_n_points), abs(start_time - time.time())
)
ending = "\n"
else:
print_str = ""
ending = "\r"
print(
"\t acc_pts: {0}, Kappa = {1:.3f}".format(len(acc_pts),
Kappa)
+ print_str,
end=ending,
)
diff = abs(len(acc_pts) - N_points)
change_factor = 0.01 / (max(1, np.log10(iters)))
if len(acc_pts) > N_points:
Kappa = Kappa * (1 + change_factor * diff) # increase kappa
elif len(acc_pts) < N_points:
if (1 - change_factor * diff) < 0:
Kappa = Kappa * 0.1
else:
Kappa = Kappa * (1 - change_factor*diff) # decrease kappa
iters += 1
if iters == max_iters:
print("Reached max iters before converging!")
if verbose:
print("\nFinal Kappa = {0}\nConverged in {1} iters.".format(Kappa,
iters))
# we want 1/r dependance to penalize closely spaced points
where_best_distribution = np.argmax(avg_distances)
best_acc_pts = good_n_points[where_best_distribution]
best_Kappa = good_kappas[where_best_distribution]
if verbose:
print("Average Distances: \n{0}".format(np.array(avg_distances)))
print("Kappas: \n{0}".format(np.array(good_kappas)))
print("loc: {0}".format(where_best_distribution))
if kwargs.get("show_plots", False):
self.make_prop_points_plots(step_history, best_acc_pts)
return best_acc_pts, best_Kappa
def make_prop_points_plots(self, step_hist, prop_points, axes=(0, 1),
show_fig=True, save_fig=False):
"""Plot the proposed / accepted points over the step history."""
axis1, axis2 = axes
fig, sub = plt.subplots(nrows=1, ncols=1, figsize=(4, 4), dpi=90)
training_data = self._Classifier_.input_data
sub.scatter(
training_data.T[axis1],
training_data.T[axis2],
color="k",
marker="x",
label="training data",
)
sub.scatter(
step_hist.T[axis1],
step_hist.T[axis2],
color="C0",
alpha=0.5,
label="step history",
)
sub.scatter(
prop_points.T[axis1],
prop_points.T[axis2],
color="pink",
label="new proposed points",
)
sub.set_xlim(self._min_vals_[axis1], self._max_vals_[axis1])
sub.set_ylim(self._min_vals_[axis2], self._max_vals_[axis2])
plt.legend(loc="best", bbox_to_anchor=[1, 0, 0.22, 1])
return fig, sub
def make_trace_plot(self, chain_holder, T_list, Temp, save_fig=False,
show_fig=True):
"""Make a `step number` vs. `position of a sampler in an axis` plot.
This function makes titles assuming you are using the data
from the classifier.
"""
if not show_fig and not save_fig:
return
which_temp = Temp # int? index
n_axis = len(chain_holder[which_temp].T)
steps = chain_holder[which_temp].T
axis_names = self._Classifier_._TableData_.get_data(
what_data="input", return_df=True
).keys()
fig, axs = plt.subplots(
nrows=n_axis,
ncols=2,
figsize=(10, 2.5 * n_axis),
dpi=100,
gridspec_kw={"width_ratios": [1.8, 1]},
)
for num, ax in enumerate(axs):
ax[0].plot(steps[num], "-", linewidth=0.5, color="C4")
ax[0].set_title("Input: {0}".format(axis_names[num]))
ax[0].set_ylabel("Axis {0}".format(num) + " , T={0:.2f}".
format(T_list[which_temp]), fontsize=12)
ax[0].set_xlabel("N steps", fontsize=12)
ax[1].hist(steps[num],
bins=50, histtype="step", density=True, color="C1")
ax[1].set_xlabel("Axis {0}".format(num), fontsize=12)
ax[1].set_ylabel("Posterior", fontsize=12)
fig.subplots_adjust(hspace=0.45)
if save_fig:
plt.savefig("trace_plot_T{0:.0f}.pdf".format(T_list[which_temp]))
if show_fig:
plt.show()
return None
|
POSYDON-codeREPO_NAMEPOSYDONPATH_START.@POSYDON_extracted@POSYDON-main@posydon@active_learning@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterternary/selected/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._textfont import TextfontValidator
from ._marker import MarkerValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._textfont.TextfontValidator", "._marker.MarkerValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterternary@selected@[email protected]_END.py
|
{
"filename": "densvar.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/Sandbox/imaging/densvar.py",
"type": "Python"
}
|
import fitsio
import astropy.io.fits as fits
import healpy as hp
import numpy as np
from matplotlib import pyplot as plt
pixfn = '/global/cfs/cdirs/desi/target/catalogs/dr9m/0.44.0/pixweight/main/resolve/dark/pixweight-dark.fits'
hdr = fits.getheader(pixfn,1)
nside,nest = hdr['HPXNSIDE'],hdr['HPXNEST']
print(nside,nest)
R_G=3.214 # http://legacysurvey.org/dr8/catalogs/#galactic-extinction-coefficients
R_R=2.165
R_Z=1.211
dr = '9'
fidf = 'targetDR9m44.fits'
ranf = '/global/cfs/cdirs/desi/target/catalogs/dr9m/0.44.0/randoms/resolve/randoms-1-0.fits'
if dr == '9':
#this will be needed no matter the sample, might want more
#rall = fitsio.read('/global/cfs/cdirs/desi/target/catalogs/dr9m/0.44.0/randoms/resolve/randoms-1-0.fits')
#print(len(rall))
sdir = '/project/projectdirs/desi/users/ajross/dr9/'
def mask(dd,mb=[1,5,6,7,11,12,13]):
keep = (dd['NOBS_G']>0) & (dd['NOBS_R']>0) & (dd['NOBS_Z']>0)
print(len(dd[keep]))
keepelg = keep
for bit in mb:
keepelg &= ((dd['MASKBITS'] & 2**bit)==0)
print(len(dd[keepelg]))
dd = dd[keepelg]
return dd
def sel_reg(ra,dec,reg):
wra = (ra > 100-dec)
wra &= (ra < 280 +dec)
if reg == 'DN':
w = dec < 32.375
w &= wra
if reg == 'DS':
w = ~wra
w &= dec > -25
return w
#def split_reg():
#rall = mask(rall)
def radec2thphi(ra,dec):
return (-dec+90.)*np.pi/180.,ra*np.pi/180.
def thphi2radec(theta,phi):
return 180./np.pi*phi,-(180./np.pi*theta-90)
def plot_hpdens(type,reg=False,fnc=None,sz=.2,vx=1.5,vm=.5,weights=None):
if fnc is None:
ff = fidf
else:
ff = fnc
ft = fitsio.read(sdir+type+ff,columns=['RA','DEC','PHOTSYS','NOBS_G','NOBS_R','NOBS_Z','MASKBITS'])
print(len(ft))
ft = mask(ft)
print(len(ft))
rl = fitsio.read(ranf,columns=['RA','DEC','PHOTSYS','NOBS_G','NOBS_R','NOBS_Z','MASKBITS'])
print(len(rl))
rl = mask(rl)
print(len(rl))
if reg:
if reg == 'S' or reg == 'N':
wr = rl['PHOTSYS'] == reg
wd = ft['PHOTSYS'] == reg
else:
wr = sel_reg(rl['RA'],rl['DEC'],reg)
wd = sel_reg(ft['RA'],ft['DEC'],reg)
rl = rl[wr]
ft = ft[wd]
rth,rphi = radec2thphi(rl['RA'],rl['DEC'])
rpix = hp.ang2pix(nside,rth,rphi,nest=nest)
dth,dphi = radec2thphi(ft['RA'],ft['DEC'])
dpix = hp.ang2pix(nside,dth,dphi,nest=nest)
pixlr = np.zeros(12*nside*nside)
pixlg = np.zeros(12*nside*nside)
if weights is None:
weights = np.ones(len(pixlr))
for pix in rpix:
pixlr[pix] += 1.
print('randoms done')
for pix in dpix:
pixlg[pix] += 1.
wp = (pixlr > 0) & (weights*0 == 0)
pixls = []
for i in range(0,len(pixlr)):
if pixlr[i] > 0 and weights[i]*0 == 0:
pixls.append(i)
pixls = np.array(pixls).astype(int)
th,phi = hp.pix2ang(nside,pixls,nest=nest)
od = pixlg[wp]/pixlr[wp]*weights[wp]
od = od/np.mean(od)
ra,dec = thphi2radec(th,phi)
if reg == 'DS':
wr = ra > 250
ra[wr] -=360
if vx == None:
vx = np.max(od)
if vm == None:
vm = np.min(od)
plt.scatter(ra,np.sin(dec*np.pi/180),c=od,s=sz,vmax=vx,vmin=vm)#,vmin=1.,vmax=2)
plt.xlabel('RA')
plt.ylabel('sin(DEC)')
plt.colorbar()
plt.title('relative '+type+' density')
plt.show()
def plot_hpprop(par,type='ELG',reg=False,fnc=None,sz=.2,vx=None,vm=None,weights=None):
if fnc is None:
ff = fidf
else:
ff = fnc
ft = fitsio.read(sdir+type+ff,columns=['RA','DEC','PHOTSYS','NOBS_G','NOBS_R','NOBS_Z','MASKBITS'])
print(len(ft))
ft = mask(ft)
print(len(ft))
rl = fitsio.read(ranf,columns=['RA','DEC','PHOTSYS','NOBS_G','NOBS_R','NOBS_Z','MASKBITS'])
print(len(rl))
rl = mask(rl)
print(len(rl))
if reg:
if reg == 'S' or reg == 'N':
wr = rl['PHOTSYS'] == reg
wd = ft['PHOTSYS'] == reg
else:
wr = sel_reg(rl['RA'],rl['DEC'],reg)
wd = sel_reg(ft['RA'],ft['DEC'],reg)
rl = rl[wr]
ft = ft[wd]
rth,rphi = radec2thphi(rl['RA'],rl['DEC'])
rpix = hp.ang2pix(nside,rth,rphi,nest=nest)
dth,dphi = radec2thphi(ft['RA'],ft['DEC'])
dpix = hp.ang2pix(nside,dth,dphi,nest=nest)
pixlr = np.zeros(12*nside*nside)
pixlg = np.zeros(12*nside*nside)
if weights is None:
weights = np.ones(len(pixlr))
for pix in rpix:
pixlr[pix] += 1.
print('randoms done')
for pix in dpix:
pixlg[pix] += 1.
wp = (pixlr > 0) & (weights*0 == 0)
parv = fitsio.read(pixfn)
if par == 'PSFTOT':
parv = (parv[wp]['PSFSIZE_G'])*(parv[wp]['PSFSIZE_R'])*(parv[wp]['PSFSIZE_Z'])
elif par == 'SN2TOT_FLAT':
ebv = parv[wp]['EBV']
parv = 10.**(-0.4*R_G*ebv*2.)*parv[wp]['PSFDEPTH_G'] + 10.**(-0.4*R_R*ebv*2.)*parv[wp]['PSFDEPTH_R'] + 10.**(-0.4*R_Z*ebv*2.)*parv[wp]['PSFDEPTH_Z']
elif par == 'fracPSF':
wpsf = ft['MORPHTYPE'] == 'PSF'
pixlgp = np.zeros(12*nside*nside)
dpixp = dpix[wpsf]
for i in range(0,len(dpixp)):
pix = dpixp[i]
pixlgp[pix] += 1.
parv = pixlgp[wp]/pixlg[wp]
else:
parv = parv[wp][par]
pixls = []
for i in range(0,len(pixlr)):
if pixlr[i] > 0 and weights[i]*0 == 0:
pixls.append(i)
pixls = np.array(pixls).astype(int)
th,phi = hp.pix2ang(nside,pixls,nest=nest)
od = parv
if vx == None:
vx = np.max(od)
if vm == None:
vm = np.min(od)
ra,dec = thphi2radec(th,phi)
if reg == 'DS':
wr = ra > 250
ra[wr] -= 360
plt.scatter(ra,np.sin(dec*np.pi/180),c=od,s=sz,vmax=vx,vmin=vm)#,vmin=1.,vmax=2)
plt.xlabel('RA')
plt.ylabel('sin(DEC)')
plt.colorbar()
plt.title(par)
plt.show()
def plot_brickdens(type,reg=False,sz=.2,vx=2):
brickf = fitsio.read('/global/cfs/cdirs/cosmo/work/legacysurvey/dr9m/survey-bricks.fits.gz')
brickdictrd = {}
for i in range(0,len(brickf)):
brickdictrd[brickf[i]['BRICKID']] = (brickf[i]['RA'],brickf[i]['DEC'])
if fnc is None:
ff = fidf
else:
ff = fnc
ft = fitsio.read(sdir+type+ff)
print(len(ft))
rl = fitsio.read(ranf,columns=['RA','DEC','PHOTSYS'])
if reg:
wr = rl['PHOTSYS'] == reg
rl = rl[wr]
wd = ft['PHOTSYS'] == reg
ft = ft[wd]
nbr = np.max(rl['BRICKID'])
nbd = np.max(ft['BRICKID'])
nbx = np.max([nbr,nbd])+1
print('maximum brickid is '+str(nbx))
pixlr = np.zeros(nbx)
pixlg = np.zeros(nbx)
for i in range(0,len(rl)):
id = rl[i]['BRICKID']
pixlr[id] += 1.
print('randoms done')
for i in range(0,len(ft)):
id = ft[i]['BRICKID']
pixlg[id] += 1.
wp = pixlr > 0
pixls = []
for i in range(0,len(pixlr)):
if pixlr[i] > 0:
pixls.append(i)
pixls = np.array(pixls).astype(int)
rap = []
decp = []
for id in pixls:
rai,deci = brickdictrd[id]
rap.append(rai)
decp.append(deci)
od = pixlg[wp]/pixlr[wp]
od = od/np.mean(od)
decp = np.array(decp)
plt.scatter(rap,np.sin(decp*np.pi/180),c=od,s=sz,vmax=vx)#,vmin=1.,vmax=2)
plt.xlabel('RA')
plt.ylabel('sin(DEC)')
plt.show()
def plot_brickprop(type,prop,reg=False,fnc=None,sz=.2,vx=None,vm=None,decmin=-90,decmax=91,decsp=30):
brickf = fitsio.read('/global/cfs/cdirs/cosmo/work/legacysurvey/dr9m/survey-bricks.fits.gz')
brickdictrd = {}
for i in range(0,len(brickf)):
brickdictrd[brickf[i]['BRICKID']] = (brickf[i]['RA'],brickf[i]['DEC'])
if fnc is None:
ff = fidf
else:
ff = fnc
ft = fitsio.read(sdir+type+ff,columns=['RA','DEC','PHOTSYS','NOBS_G','NOBS_R','NOBS_Z','MASKBITS','BRICKID',prop])
print(len(ft))
if reg:
wd = ft['PHOTSYS'] == reg
ft = ft[wd]
nbd = np.max(ft['BRICKID'])
nbx = nbd+1
print('maximum brickid is '+str(nbx))
pixlr = np.zeros(nbx)
pixlg = np.zeros(nbx)
for i in range(0,len(ft)):
id = ft[i]['BRICKID']
pixlr[id] += 1.
pixlg[id] += ft[i][prop]
wp = pixlr > 0
pixls = []
for i in range(0,len(pixlr)):
if pixlr[i] > 0:
pixls.append(i)
pixls = np.array(pixls).astype(int)
rap = []
decp = []
for id in pixls:
rai,deci = brickdictrd[id]
rap.append(rai)
decp.append(deci)
od = pixlg[wp]/pixlr[wp]
if vx == None:
vx = np.max(od)
if vm == None:
vm = np.min(od)
#od = od/np.mean(od)
print(vm,vx)
decp = np.array(decp)
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, .8, .8])
ax.scatter(rap,np.sin(decp*np.pi/180),c=od,s=sz,vmax=vx,vmin=vm)
ax.set_title(prop +' averaged in bricks')
ax.set_xlabel('RA')
ax.set_ylabel('DEC')
yt = np.arange(decmin,decmax,decsp)
yl = []
for i in range(0,len(yt)):
yl.append(str(yt[i]))
ax.set_yticks(np.sin(yt*np.pi/180.))
ax.set_yticklabels(yl)
rarn = np.max(rap)/90.-np.min(rap)/90.
ar = (np.sin(np.pi/180.*decmax)-np.sin(np.pi/180.*decmin))/rarn
print(ar,rarn,np.sin(np.pi/180.*decmax)-np.sin(np.pi/180.*decmin))
ax.set_aspect(ar*180)
fig.colorbar()
plt.show()
def plot_brickpropvar(type,prop,reg=False,fnc=None,sz=.2,vx=None,vm=None):
brickf = fitsio.read('/global/cfs/cdirs/cosmo/work/legacysurvey/dr9m/survey-bricks.fits.gz')
brickdictrd = {}
for i in range(0,len(brickf)):
brickdictrd[brickf[i]['BRICKID']] = (brickf[i]['RA'],brickf[i]['DEC'])
if fnc is None:
ff = fidf
else:
ff = fnc
ft = fitsio.read(sdir+type+ff,columns=['RA','DEC','PHOTSYS','NOBS_G','NOBS_R','NOBS_Z','MASKBITS','BRICKID',prop])
print(len(ft))
if reg:
wd = ft['PHOTSYS'] == reg
ft = ft[wd]
nbd = np.max(ft['BRICKID'])
nbx = nbd+1
print('maximum brickid is '+str(nbx))
pixlr = np.zeros(nbx)
pixlg = np.zeros(nbx)
pixlv = np.zeros(nbx)
for i in range(0,len(ft)):
id = ft[i]['BRICKID']
pixlr[id] += 1.
pixlg[id] += ft[i][prop]
pixlv[id] += ft[i][prop]**2.
wp = pixlr > 0
pixls = []
for i in range(0,len(pixlr)):
if pixlr[i] > 0:
pixls.append(i)
pixls = np.array(pixls).astype(int)
rap = []
decp = []
for id in pixls:
rai,deci = brickdictrd[id]
rap.append(rai)
decp.append(deci)
od = pixlv[wp]/pixlr[wp]-(pixlg[wp]/pixlr[wp])**2.
if vx == None:
vx = np.max(od)
if vm == None:
vm = np.min(od)
#od = od/np.mean(od)
print(vm,vx)
decp = np.array(decp)
plt.scatter(rap,np.sin(decp*np.pi/180),c=od,s=sz,vmax=vx,vmin=vm)
plt.title('variance in ' +prop +' per brick')
plt.xlabel('RA')
plt.ylabel('sin(DEC)')
plt.colorbar()
plt.show()
def plot_brickprop_stdper(type,prop,reg=False,fnc=None,sz=.2,vx=None,vm=None,minn = 10):
brickf = fitsio.read('/global/cfs/cdirs/cosmo/work/legacysurvey/dr9m/survey-bricks.fits.gz')
brickdictrd = {}
for i in range(0,len(brickf)):
brickdictrd[brickf[i]['BRICKID']] = (brickf[i]['RA'],brickf[i]['DEC'])
if fnc is None:
ff = fidf
else:
ff = fnc
ft = fitsio.read(sdir+type+ff,columns=['RA','DEC','PHOTSYS','NOBS_G','NOBS_R','NOBS_Z','MASKBITS','BRICKID',prop])
print(len(ft))
if reg:
wd = ft['PHOTSYS'] == reg
ft = ft[wd]
nbd = np.max(ft['BRICKID'])
nbx = nbd+1
print('maximum brickid is '+str(nbx))
pixlr = np.zeros(nbx)
pixlg = np.zeros(nbx)
pixlv = np.zeros(nbx)
for i in range(0,len(ft)):
id = ft[i]['BRICKID']
pixlr[id] += 1.
pixlg[id] += ft[i][prop]
pixlv[id] += ft[i][prop]**2.
wp = pixlr > minn
pixls = []
for i in range(0,len(pixlr)):
if pixlr[i] > minn:
pixls.append(i)
pixls = np.array(pixls).astype(int)
rap = []
decp = []
for id in pixls:
rai,deci = brickdictrd[id]
rap.append(rai)
decp.append(deci)
od = (pixlv[wp]/pixlr[wp]-(pixlg[wp]/pixlr[wp])**2.)**.5/(pixlg[wp]/pixlr[wp])
wo = od*0 == 0
od = od[wo]
if vx == None:
vx = np.max(od)
if vm == None:
vm = np.min(od)
#od = od/np.mean(od)
print(vm,vx)
decp = np.array(decp)
rap = np.array(rap)
plt.scatter(rap[wo],np.sin(decp[wo]*np.pi/180),c=od,s=sz,vmax=vx,vmin=vm)
plt.title('variance/mean in ' +prop +' per brick')
plt.xlabel('RA')
plt.ylabel('sin(DEC)')
plt.colorbar()
plt.show()
def densvsimpar_ran(type,par,reg=None,fnc=None,vmin=None,vmax=None,nbin=10):
if fnc is None:
ff = fidf
else:
ff = fnc
print('using '+sdir+type+ff)
ft = fitsio.read(sdir+type+ff)
ft = mask(ft)
print(len(ft))
rl = fitsio.read(ranf,columns=['RA','DEC','PHOTSYS',par,'NOBS_G','NOBS_R','NOBS_Z','MASKBITS'])
rl = mask(rl)
print(len(rl))
if reg == None:
reg = 'All'
else:
wr = rl['PHOTSYS'] == reg
rl = rl[wr]
wd = ft['PHOTSYS'] == reg
ft = ft[wd]
if vmin is None:
vmin = np.min(rl[par])
if vmax is None:
vmax = np.max(rl[par])
rh,bn = np.histogram(rl[par],bins=nbin,range=(vmin,vmax))
dh,db = np.histogram(ft[par],bins=bn)
rf = len(rl)/len(ft)
sv = dh/rh*rf
ep = np.sqrt(dh)/rh*rf
bc = []
for i in range(0,len(bn)-1):
bc.append((bn[i]+bn[i+1])/2.)
plt.errorbar(bc,sv-1.,ep,fmt='ko')
plt.hist(rl[par],bins=nbin,range=(vmin,vmax),weights=0.2*np.ones(len(rl))/np.max(rh))
plt.ylim(-.3,.3)
plt.xlabel(par)
plt.ylabel('Ngal/<Ngal> - 1')
plt.title(type+' in '+reg + ' footprint')
plt.show()
wv = (rl[par]>vmin) & (rl[par] < vmax)
frac = len(rl[~wv])/len(rl)
print('fraction of randoms not included in plot: '+str(frac))
def densvsinput_pix(type,parl,wsel,reg=None,fnc=None,xlab='',vmin=None,vmax=None,ebvcut=None,edscut=None,sn2cut=None,fpsfcut=None,gfluxcut=None,rfluxcut=None,gbcut=None,nbin=10,weights=None,titl=''):
#input custom map/mask
if fnc is None:
ff = fidf
else:
ff = fnc
ft = fitsio.read(sdir+type+ff)
print(len(ft))
rl = fitsio.read(ranf,columns=['RA','DEC','PHOTSYS'])
if reg:
wr = rl['PHOTSYS'] == reg
rl = rl[wr]
wd = ft['PHOTSYS'] == reg
ft = ft[wd]
if gfluxcut:
wg = ft['FLUX_G']/ft['MW_TRANSMISSION_G'] > gfluxcut
print(len(ft))
ft = ft[wg]
print(len(ft))
if rfluxcut:
wg = ft['FLUX_R']/ft['MW_TRANSMISSION_R'] > rfluxcut
ft = ft[wg]
rth,rphi = radec2thphi(rl['RA'],rl['DEC'])
rpix = hp.ang2pix(nside,rth,rphi,nest=nest)
dth,dphi = radec2thphi(ft['RA'],ft['DEC'])
dpix = hp.ang2pix(nside,dth,dphi,nest=nest)
pixlr = np.zeros(12*nside*nside)
pixlg = np.zeros(12*nside*nside)
if weights is None:
weights = np.ones(len(pixlr))
for pix in rpix:
pixlr[pix] += 1.
print('randoms done')
for i in range(0,len(dpix)):
pix = dpix[i]
pixlg[pix] += 1.
wp = wsel
wp &= (pixlr > 0) & (weights*0 == 0)
parv = fitsio.read(pixfn)
ebv = parv['EBV']
sn2tf = 10.**(-0.4*R_G*ebv*2.)*parv['PSFDEPTH_G'] + 10.**(-0.4*R_R*ebv*2.)*parv['PSFDEPTH_R'] + 10.**(-0.4*R_Z*ebv*2.)*parv['PSFDEPTH_Z']
print(len(parv[wp]))
if sn2cut:
wp &= (sn2tf > sn2cut)
if fpsfcut:
wpsf = ft['MORPHTYPE'] == 'PSF'
pixlgp = np.zeros(12*nside*nside)
dpixp = dpix[wpsf]
for i in range(0,len(dpixp)):
pix = dpixp[i]
pixlgp[pix] += 1.
fpsf = pixlgp/pixlg
wp &= (fpsf < fpsfcut)
if ebvcut:
wp &= (parv['EBV'] < ebvcut)
if edscut:
eds = parv['EBV']/parv['STARDENS']
wp &= (eds < edscut)
parv = parl
wp &= parv !=0
wp &= parv*0 == 0
print(len(parv[wp]))
if vmin is None:
vmin = np.min(parv[wp])
if vmax is None:
vmax = np.max(parv[wp])
parv = parv[wp]
rh,bn = np.histogram(parv,bins=nbin,range=(vmin,vmax),weights=pixlr[wp])
dh,db = np.histogram(parv,bins=bn,weights=pixlg[wp]*weights[wp])
norm = sum(rh)/sum(dh)
sv = dh/rh*norm
ep = np.sqrt(dh)/rh*norm
bc = []
for i in range(0,len(bn)-1):
bc.append((bn[i]+bn[i+1])/2.)
plt.errorbar(bc,sv-1.,ep,fmt='ko')
plt.hist(parv,bins=nbin,range=(vmin,vmax),weights=pixlr[wp]*0.2*np.ones(len(pixlr[wp]))/np.max(rh))
plt.ylim(-.3,.3)
plt.xlabel(xlab)
plt.ylabel('Ngal/<Ngal> - 1')
plt.title(type+' in '+reg + ' footprint, using pixelized map'+titl)
plt.show()
wv = (parv>=vmin) & (parv <=vmax)
frac = sum(pixlr[wp][~wv])/sum(pixlr[wp])
print('fraction of randoms not included in plot: '+str(frac))
return bc,sv,ep
def densvsskyres_pix(type,par,reg=None,fnc=None,vmin=None,vmax=None,ebvcut=None,edscut=None,sn2cut=None,fpsfcut=None,gfluxcut=None,rfluxcut=None,gbcut=None,nbin=10,weights=None,titl=''):
#test against Rongpu's residuals
if fnc is None:
ff = fidf
else:
ff = fnc
ft = fitsio.read(sdir+type+ff)
print(len(ft))
rl = rall
if reg:
wr = rall['PHOTSYS'] == reg
rl = rl[wr]
wd = ft['PHOTSYS'] == reg
ft = ft[wd]
if gfluxcut:
wg = ft['FLUX_G']/ft['MW_TRANSMISSION_G'] > gfluxcut
print(len(ft))
ft = ft[wg]
print(len(ft))
if rfluxcut:
wg = ft['FLUX_R']/ft['MW_TRANSMISSION_R'] > rfluxcut
ft = ft[wg]
rth,rphi = radec2thphi(rl['RA'],rl['DEC'])
rpix = hp.ang2pix(nside,rth,rphi,nest=nest)
dth,dphi = radec2thphi(ft['RA'],ft['DEC'])
dpix = hp.ang2pix(nside,dth,dphi,nest=nest)
pixlr = np.zeros(12*nside*nside)
pixlg = np.zeros(12*nside*nside)
if weights is None:
weights = np.ones(len(pixlr))
for pix in rpix:
pixlr[pix] += 1.
print('randoms done')
for i in range(0,len(dpix)):
pix = dpix[i]
pixlg[pix] += 1.
wp = (pixlr > 0) & (weights*0 == 0)
parv = fitsio.read(pixfn)
ebv = parv['EBV']
sn2tf = 10.**(-0.4*R_G*ebv*2.)*parv['PSFDEPTH_G'] + 10.**(-0.4*R_R*ebv*2.)*parv['PSFDEPTH_R'] + 10.**(-0.4*R_Z*ebv*2.)*parv['PSFDEPTH_Z']
print(len(parv[wp]))
if sn2cut:
wp &= (sn2tf > sn2cut)
if fpsfcut:
wpsf = ft['MORPHTYPE'] == 'PSF'
pixlgp = np.zeros(12*nside*nside)
dpixp = dpix[wpsf]
for i in range(0,len(dpixp)):
pix = dpixp[i]
pixlgp[pix] += 1.
fpsf = pixlgp/pixlg
wp &= (fpsf < fpsfcut)
if ebvcut:
wp &= (parv['EBV'] < ebvcut)
if edscut:
eds = parv['EBV']/parv['STARDENS']
wp &= (eds < edscut)
rf = fitsio.read('/global/u2/r/rongpu/share/desi/sky_residual_dr9_partial/sky_residual_dr9_north_256.fits')
parv = np.zeros(12*nside*nside)
for i in range(0,len(rf)):
px = rf['hp_idx'][i]
parv[px] = rf[par][i]
parv = hp.reorder(parv,r2n=True)
wp &= parv !=0
wp &= parv*0 == 0
print(len(parv[wp]))
if vmin is None:
vmin = np.min(parv[wp])
if vmax is None:
vmax = np.max(parv[wp])
parv = parv[wp]
rh,bn = np.histogram(parv,bins=nbin,range=(vmin,vmax),weights=pixlr[wp])
dh,db = np.histogram(parv,bins=bn,weights=pixlg[wp]*weights[wp])
norm = sum(rh)/sum(dh)
sv = dh/rh*norm
ep = np.sqrt(dh)/rh*norm
bc = []
for i in range(0,len(bn)-1):
bc.append((bn[i]+bn[i+1])/2.)
plt.errorbar(bc,sv-1.,ep,fmt='ko')
plt.hist(parv,bins=nbin,range=(vmin,vmax),weights=pixlr[wp]*0.2*np.ones(len(pixlr[wp]))/np.max(rh))
plt.ylim(-.3,.3)
plt.xlabel(par)
plt.ylabel('Ngal/<Ngal> - 1')
plt.title(type+' in '+reg + ' footprint, using pixelized map'+titl)
plt.show()
wv = (parv>=vmin) & (parv <=vmax)
frac = sum(pixlr[wp][~wv])/sum(pixlr[wp])
print('fraction of randoms not included in plot: '+str(frac))
return bc,sv,ep
def densvsimpar_pix(type,par,reg=None,fnc=None,vmin=None,vmax=None,ebvcut=None,edscut=None,sn2cut=None,fpsfcut=None,gfluxcut=None,rfluxcut=None,gbcut=None,nbin=10,weights=None,titl=''):
if fnc is None:
ff = fidf
else:
ff = fnc
ft = fitsio.read(sdir+type+ff)
print(len(ft))
ft = mask(ft)
print(len(ft))
rl = fitsio.read(ranf,columns=['RA','DEC','PHOTSYS','NOBS_G','NOBS_R','NOBS_Z','MASKBITS'])
print(len(rl))
rl = mask(rl)
print(len(rl))
if reg:
if reg == 'S' or reg == 'N':
wr = rl['PHOTSYS'] == reg
wd = ft['PHOTSYS'] == reg
else:
wr = sel_reg(rl['RA'],rl['DEC'],reg)
wd = sel_reg(ft['RA'],ft['DEC'],reg)
rl = rl[wr]
ft = ft[wd]
if gfluxcut:
wg = ft['FLUX_G']/ft['MW_TRANSMISSION_G'] > gfluxcut
print(len(ft))
ft = ft[wg]
print(len(ft))
if rfluxcut:
wg = ft['FLUX_R']/ft['MW_TRANSMISSION_R'] > rfluxcut
ft = ft[wg]
rth,rphi = radec2thphi(rl['RA'],rl['DEC'])
rpix = hp.ang2pix(nside,rth,rphi,nest=nest)
dth,dphi = radec2thphi(ft['RA'],ft['DEC'])
dpix = hp.ang2pix(nside,dth,dphi,nest=nest)
pixlr = np.zeros(12*nside*nside)
pixlg = np.zeros(12*nside*nside)
if par.split('-')[0] == 'VAR' or par.split('-')[0] == 'STDPER':
pixlp = np.zeros(12*nside*nside)
pixlv = np.zeros(12*nside*nside)
if weights is None:
weights = np.ones(len(pixlr))
for pix in rpix:
pixlr[pix] += 1.
print('randoms done')
for i in range(0,len(dpix)):
pix = dpix[i]
pixlg[pix] += 1.
if par.split('-')[0] == 'VAR' or par.split('-')[0] == 'STDPER':
pixlp[pix] += ft[i][par.split('-')[1]]
pixlv[pix] += ft[i][par.split('-')[1]]**2.
wp = (pixlr > 0) & (weights*0 == 0)
parv = fitsio.read(pixfn)
ebv = parv['EBV']
sn2tf = 10.**(-0.4*R_G*ebv*2.)*parv['PSFDEPTH_G'] + 10.**(-0.4*R_R*ebv*2.)*parv['PSFDEPTH_R'] + 10.**(-0.4*R_Z*ebv*2.)*parv['PSFDEPTH_Z']
print(len(parv[wp]))
if sn2cut:
wp &= (sn2tf > sn2cut)
if fpsfcut:
wpsf = ft['MORPHTYPE'] == 'PSF'
pixlgp = np.zeros(12*nside*nside)
dpixp = dpix[wpsf]
for i in range(0,len(dpixp)):
pix = dpixp[i]
pixlgp[pix] += 1.
fpsf = pixlgp/pixlg
wp &= (fpsf < fpsfcut)
if ebvcut:
wp &= (parv['EBV'] < ebvcut)
if edscut:
eds = parv['EBV']/parv['STARDENS']
wp &= (eds < edscut)
if gbcut is not None:
print('applying background cut of '+str(gbcut))
rf = fitsio.read('/global/u2/r/rongpu/share/desi/sky_residual_dr9_partial/sky_residual_dr9_north_256.fits')
gb = np.zeros(12*nside*nside)
for i in range(0,len(rf)):
px = rf['hp_idx'][i]
gb[px] = rf['g_blobsky'][i]
gb = hp.reorder(gb,r2n=True)
wp &= (gb != 0)
wp &= (gb < gbcut)
print(len(parv[wp]))
if len(par.split('-')) > 1:
if par.split('-')[0] == 'VAR':
parv = pixlv[wp]/pixlg[wp]-(pixlp[wp]/pixlg[wp])**2.
elif par.split('-')[0] == 'STDPER':
var = pixlv[wp]/pixlg[wp]-(pixlp[wp]/pixlg[wp])**2.
parv = var**.5/(pixlp[wp]/pixlg[wp])
elif par.split('-')[1] == 'X':
parv = parv[wp][par.split('-')[0]]*parv[wp][par.split('-')[2]]
elif par.split('-')[1] == 'DIV':
parv = parv[wp][par.split('-')[0]]/parv[wp][par.split('-')[2]]
elif par == 'PSFTOT':
parv = (parv[wp]['PSFSIZE_G'])*(parv[wp]['PSFSIZE_R'])*(parv[wp]['PSFSIZE_Z'])
elif par == 'SN2TOT_FLAT':
ebv = parv[wp]['EBV']
parv = 10.**(-0.4*R_G*ebv*2.)*parv[wp]['PSFDEPTH_G'] + 10.**(-0.4*R_R*ebv*2.)*parv[wp]['PSFDEPTH_R'] + 10.**(-0.4*R_Z*ebv*2.)*parv[wp]['PSFDEPTH_Z']
elif par == 'SN2TOT_G':
ebv = parv[wp]['EBV']
parv = 10.**(-0.4*R_G*ebv*2.)*parv[wp]['PSFDEPTH_G']
elif par == 'fracPSF':
wpsf = ft['MORPHTYPE'] == 'PSF'
pixlgp = np.zeros(12*nside*nside)
dpixp = dpix[wpsf]
for i in range(0,len(dpixp)):
pix = dpixp[i]
pixlgp[pix] += 1.
parv = pixlgp[wp]/pixlg[wp]
else:
parv = parv[wp][par]
wo = parv*0 == 0
if vmin is None:
vmin = np.min(parv[wo])
if vmax is None:
vmax = np.max(parv[wo])
rh,bn = np.histogram(parv,bins=nbin,range=(vmin,vmax),weights=pixlr[wp])
dh,db = np.histogram(parv,bins=bn,weights=pixlg[wp]*weights[wp])
norm = sum(rh)/sum(dh)
sv = dh/rh*norm
ep = np.sqrt(dh)/rh*norm
bc = []
for i in range(0,len(bn)-1):
bc.append((bn[i]+bn[i+1])/2.)
plt.errorbar(bc,sv-1.,ep,fmt='ko')
plt.hist(parv,bins=nbin,range=(vmin,vmax),weights=pixlr[wp]*0.2*np.ones(len(pixlr[wp]))/np.max(rh))
plt.ylim(-.3,.3)
plt.xlabel(par)
plt.ylabel('Ngal/<Ngal> - 1')
plt.title(type+' in '+reg + ' footprint, using pixelized map'+titl)
plt.show()
wv = (parv>=vmin) & (parv <=vmax)
frac = sum(pixlr[wp][~wv])/sum(pixlr[wp])
print('fraction of randoms not included in plot: '+str(frac))
return bc,sv,ep
#plot density vs depth with healpix values
def plotvshp_compmc(type,sys,rng,mcl=None,ws=None,reg=None,fnc=None,gdzm=0,ebvm=100,title='',effac=1.,mingd=0,maxgd=1.e6,minpsfg=0,maxpsfg=100,south=True):
if fnc is None:
ff = fidf
else:
ff = fnc
ft = fitsio.read(sdir+type+ff)
print(len(ft))
rl = rall
if reg:
wr = rall['PHOTSYS'] == reg
rl = rl[wr]
wd = ft['PHOTSYS'] == reg
ft = ft[wd]
rth,rphi = radec2thphi(rl['RA'],rl['DEC'])
rpix = hp.ang2pix(nside,rth,rphi,nest=nest)
dth,dphi = radec2thphi(ft['RA'],ft['DEC'])
dpix = hp.ang2pix(nside,dth,dphi,nest=nest)
r1 = np.zeros(12*nside*nside)
d1= np.zeros(12*nside*nside)
for pix in rpix:
r1[pix] += 1.
print('randoms done')
for pix in dpix:
d1[pix] += 1.
hpq = fitsio.read(pixfn)
#hpq = parv[par]
w = r1 > 0
print(len(hpq[w]))
w &= hpq['GALDEPTH_Z'] > gdzm
w &= hpq['GALDEPTH_G'] > mingd
w &= hpq['GALDEPTH_G'] < maxgd
w &= hpq['EBV'] < ebvm
w &= hpq['PSFSIZE_G'] > minpsfg
w &= hpq['PSFSIZE_G'] < maxpsfg
if ws is not None:
w &= ws*0 == 0
if mcl is not None:
w &= mcl*0 == 0
w &= mcl > 0
print(len(hpq[w]))
#w
if sys != 'gdc' and sys != 'rdc' and sys != 'zdc' and sys != 'dg' and sys != 'dr' and sys != 'dz' and sys != 'dgr' and sys != 'drz' and sys != 'dgz':
sm = hpq[w][sys]
xlab = sys
else:
if sys == 'gdc':
print('g band depth, extinction corrected')
sm = hpq[w]['GALDEPTH_G']*10.**(-0.4*R_G*hpq[w]['EBV'])
xlab = 'g band depth, extinction corrected'
if sys == 'rdc':
sm = hpq[w]['GALDEPTH_R']*10.**(-0.4*R_R*hpq[w]['EBV'])
xlab = 'r band depth, extinction corrected'
if sys == 'zdc':
sm = hpq[w]['GALDEPTH_Z']*10.**(-0.4*R_Z*hpq[w]['EBV'])
xlab = 'z band depth, extinction corrected'
if sys == 'dg':
sm = dg[w]
xlab = 'g band PS1 residual'
if sys == 'dr':
sm = dr[w]
xlab = 'r band PS1 residual'
if sys == 'dz':
sm = dz[w]
xlab = 'z band PS1 residual'
if sys == 'dgr':
sm = dg[w]-dr[w]
xlab = 'g-r band PS1 residual'
if sys == 'drz':
sm = dr[w]-dz[w]
xlab = 'r-z band PS1 residual'
if sys == 'dgz':
sm = dg[w]-dz[w]
xlab = 'g-z band PS1 residual'
ds = np.ones(len(d1))
print(len(ds),len(d1),len(w),len(sm))
hdnoc = np.histogram(sm,weights=d1[w],range=rng)
#print(hd1)
hr1 = np.histogram(sm,weights=r1[w],bins=hdnoc[1],range=rng)
xl = []
for i in range(0,len(hr1[0])):
xl.append((hr1[1][i]+hr1[1][i+1])/2.)
plt.errorbar(xl,hdnoc[0]/hr1[0]/(sum(d1[w])/sum(r1[w])),np.sqrt(hdnoc[0])/hr1[0]/(sum(d1[w])/sum(r1[w])),fmt='ko',label='raw')
if ws is not None:
ds = ws
hd1 = np.histogram(sm,weights=d1[w]*ds[w],bins=hdnoc[1],range=rng)
plt.plot(xl,hd1[0]/hr1[0]/(sum(d1[w]*ds[w])/sum(r1[w])),'b-',label='+ EBV weights')
#hd1 = np.histogram(sm,weights=d1[w]*ds[w],bins=hdnoc[1],range=rng)
#plt.plot(xl,hd1[0]/hr1[0]/(sum(d1[w]*ds[w])/sum(r1[w])),'k--',label='with stellar density weights')
if mcl is not None:
dmcse = mcl**effac
hd1 = np.histogram(sm,weights=d1[w]/dmcse[w],bins=hdnoc[1],range=rng)
plt.plot(xl,hd1[0]/hr1[0]/(sum(d1[w]/dmcse[w])/sum(r1[w])),'r-',label='+MC weights')
if ws is not None and mcl is not None:
hd1 = np.histogram(sm,weights=d1[w]*ds[w]/dmcse[w],bins=hdnoc[1],range=rng)
plt.plot(xl,hd1[0]/hr1[0]/(sum(d1[w]*ds[w]/dmcse[w])/sum(r1[w])),'-',color='purple',label='+MC weights + EBV weights')
#dmcs = mcls**effac
#hd1 = np.histogram(sm,weights=d1[w]*ds[w]/dmcs[w],bins=hdnoc[1],range=rng)
#plt.plot(xl,hd1[0]/hr1[0]/(sum(d1[w]*ds[w]/dmcs[w])/sum(r1[w])),'b-',label='+MC; sed w ext sigma')
#dmco = mclo**effac
#hd1 = np.histogram(sm,weights=d1[w]*ds[w]/dmco[w],bins=hdnoc[1],range=rng)
#plt.plot(xl,hd1[0]/hr1[0]/(sum(d1[w]*ds[w]/dmco[w])/sum(r1[w])),'-',color='purple',label='old MC')
#plt.title(str(mp)+reg)
plt.plot(xl,np.ones(len(xl)),'k:',label='null')
plt.legend()#(['raw','with stellar density weights','+sed ext MC','just sed MC','old MC','null']))
plt.ylabel('relative density')
plt.xlabel(xlab)
plt.ylim(0.7,1.3)
plt.title(title)
plt.show()
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@Sandbox@[email protected]@.PATH_END.py
|
{
"filename": "_borderwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattermap/marker/colorbar/_borderwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BorderwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="borderwidth",
parent_name="scattermap.marker.colorbar",
**kwargs,
):
super(BorderwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattermap@marker@colorbar@[email protected]_END.py
|
{
"filename": "hou_config_funcs.py",
"repo_name": "meyeralexj/gubas",
"repo_path": "gubas_extracted/gubas-master/hou_config_funcs.py",
"type": "Python"
}
|
def hou_config_read(filename):
CFG=cfp.ConfigParser()
CFG.read(filename)
# get flags
fahnestock_flag=CFG.getboolean("Initial Conditions","Fahnestock Input File Flag")
C_flag=CFG.getboolean("Initial Conditions","B into A Euler Flag")
Cc_flag=CFG.getboolean("Initial Conditions","A into N Euler Flag")
integ=CFG.getint("Integration Settings", "Integrator Flag")
Tgen=CFG.getint("Body Model Definitions","Inertia Integral Generation Flag")
a_shape=CFG.getint("Body Model Definitions","Primary Shape Flag")
b_shape=CFG.getint("Body Model Definitions","Secondary Shape Flag")
# get gracity expansion values
n=CFG.getint("Mutual Gravity Expansion Parameters","Gravity Expansion Truncation Order")
nA=CFG.getint("Mutual Gravity Expansion Parameters","Primary Inertia Integral Truncation Order")
nB=CFG.getint("Mutual Gravity Expansion Parameters","Secondary Inertia Integral Truncation Order")
# get integrator settings
t0=CFG.getfloat("Integration Settings","Start Time")
tf=CFG.getfloat("Integration Settings","Final Time")
h=CFG.getfloat("Integration Settings","Fixed Time Step")
tol=CFG.getfloat("Integration Settings","Absolute Tolerance")
# get shape settings - values converted to km
aA=CFG.getfloat("Body Model Definitions","Primary Semi-Major Axis")
bA=CFG.getfloat("Body Model Definitions","Primary Semi-Intermediate Axis")
cA=CFG.getfloat("Body Model Definitions","Primary Semi-Minor Axis")
aB=CFG.getfloat("Body Model Definitions","Secondary Semi-Major Axis")
bB=CFG.getfloat("Body Model Definitions","Secondary Semi-Intermediate Axis")
cB=CFG.getfloat("Body Model Definitions","Secondary Semi-Minor Axis")
tet_fileA=CFG.get("Body Model Definitions","Primary Tetrahedron File")
vert_fileA=CFG.get("Body Model Definitions","Primary Vertex File")
tet_fileB=CFG.get("Body Model Definitions","Secondary Tetrahedron File")
vert_fileB=CFG.get("Body Model Definitions","Secondary Vertex File")
# get output settings
postProcessing=CFG.getint("Output Settings", "Post Processing") #added by hagrusa, option to only output binaries
out_freq=CFG.getfloat("Output Settings", "Fixed Output Frequency")
out_time_name=CFG.get("Output Settings","Specified Time List Filename")
case=CFG.get("Output Settings","Case Name")
# get additional forces settings
flyby_toggle = CFG.getint("Additional Forces and Perturbations","Flyby")
sg_toggle = CFG.getint("Additional Forces and Perturbations","Solar Gravity")
tt_toggle = CFG.getint("Additional Forces and Perturbations","Tidal Torque")
helio_toggle = CFG.getint("Additional Forces and Perturbations","Heliocentric Orbit")
Mplanet = CFG.getfloat("Additional Forces and Perturbations","Planetary Mass")
a_hyp = CFG.getfloat("Additional Forces and Perturbations","Semimajor Axis")
e_hyp = CFG.getfloat("Additional Forces and Perturbations","Eccentricity")
i_hyp = CFG.getfloat("Additional Forces and Perturbations","Inclination")
RAAN_hyp = CFG.getfloat("Additional Forces and Perturbations","RAAN")
om_hyp = CFG.getfloat("Additional Forces and Perturbations","Argument of Periapsis")
tau_hyp = CFG.getfloat("Additional Forces and Perturbations","Flyby Time")
Msolar = CFG.getfloat("Additional Forces and Perturbations","Solar Mass")
a_helio = CFG.getfloat("Additional Forces and Perturbations","Heliocentric Semimajor Axis")
e_helio = CFG.getfloat("Additional Forces and Perturbations","Heliocentric Eccentricity")
i_helio = CFG.getfloat("Additional Forces and Perturbations","Heliocentric Inclination")
RAAN_helio = CFG.getfloat("Additional Forces and Perturbations","Heliocentric RAAN")
om_helio = CFG.getfloat("Additional Forces and Perturbations","Heliocentric Argument of Periapsis")
tau_helio = CFG.getfloat("Additional Forces and Perturbations","Time of periapsis passage")
sol_rad = CFG.getfloat("Additional Forces and Perturbations","Solar Orbit Radius")
au_def = CFG.getfloat("Additional Forces and Perturbations","AU Definition")/1000.
love1 = CFG.getfloat("Additional Forces and Perturbations","Primary Love Number")
love2 = CFG.getfloat("Additional Forces and Perturbations","Secondary Love Number")
refrad1 = CFG.getfloat("Additional Forces and Perturbations","Primary Reference Radius")
refrad2 = CFG.getfloat("Additional Forces and Perturbations","Secondary Reference Radius")
eps1 = CFG.getfloat("Additional Forces and Perturbations","Primary Tidal Lag Angle")
eps2 = CFG.getfloat("Additional Forces and Perturbations","Secondary Tidal Lag Angle")
Msun = CFG.getfloat("Additional Forces and Perturbations","Sun Mass")
# check initial conditions type with fahnestock flag
if fahnestock_flag==1:
(G,rhoA,rhoB,x0)=read_bench("systemdata_standard_MKS_units","initstate_standard_MKS_units")
# (G,rhoA,rhoB,x0)=read_bench("systemdata_standard_MKS_units","d3_1320")
else:
# get gravity parameter - convert to kg km s units
G=CFG.getfloat("Gravity Parameter","G")/1.e9
# get densities - convert to km kg s units
rhoA=CFG.getfloat("Body Model Definitions","Primary Density")*1.e12
rhoB=CFG.getfloat("Body Model Definitions","Secondary Density")*1.e12
# get initial conditions - convert to km kg s units
x0=np.zeros([30])
x0[0]=CFG.getfloat("Initial Conditions","Relative Position X")/1000.
x0[1]=CFG.getfloat("Initial Conditions","Relative Position Y")/1000.
x0[2]=CFG.getfloat("Initial Conditions","Relative Position Z")/1000.
x0[3]=CFG.getfloat("Initial Conditions","Relative Velocity X")/1000.
x0[4]=CFG.getfloat("Initial Conditions","Relative Velocity Y")/1000.
x0[5]=CFG.getfloat("Initial Conditions","Relative Velocity Z")/1000.
x0[6]=CFG.getfloat("Initial Conditions","Primary Angular Velocity X")
x0[7]=CFG.getfloat("Initial Conditions","Primary Angular Velocity Y")
x0[8]=CFG.getfloat("Initial Conditions","Primary Angular Velocity Z")
x0[9]=CFG.getfloat("Initial Conditions","Secondary Angular Velocity X")
x0[10]=CFG.getfloat("Initial Conditions","Secondary Angular Velocity Y")
x0[11]=CFG.getfloat("Initial Conditions","Secondary Angular Velocity Z")
if C_flag==0:
x0[21]=CFG.getfloat("Initial Conditions","B into A (1,1)")
x0[22]=CFG.getfloat("Initial Conditions","B into A (1,2)")
x0[23]=CFG.getfloat("Initial Conditions","B into A (1,3)")
x0[24]=CFG.getfloat("Initial Conditions","B into A (2,1)")
x0[25]=CFG.getfloat("Initial Conditions","B into A (2,2)")
x0[26]=CFG.getfloat("Initial Conditions","B into A (2,3)")
x0[27]=CFG.getfloat("Initial Conditions","B into A (3,1)")
x0[28]=CFG.getfloat("Initial Conditions","B into A (3,2)")
x0[29]=CFG.getfloat("Initial Conditions","B into A (3,3)")
C=np.reshape(x0[21:30],[3,3])
else:# if user defines euler angles use this rotation matrix definition - MAKE SURE INPUT EULER ANGLES MATCH THEIR DEFINITIONS
th1=CFG.getfloat("Initial Conditions","B into A Euler 1 X")
th2=CFG.getfloat("Initial Conditions","B into A Euler 2 Y")
th3=CFG.getfloat("Initial Conditions","B into A Euler 3 Z")
C=np.array([[np.cos(th2)*np.cos(th3),np.sin(th1)*np.sin(th2)*np.cos(th3)+np.cos(th1)*np.sin(th3),-np.cos(th1)*np.sin(th2)*np.cos(th3)+np.sin(th1)*np.sin(th3)],\
[-np.cos(th2)*np.sin(th3),-np.sin(th1)*np.sin(th2)*np.sin(th3)+np.cos(th1)*np.cos(th3),np.cos(th1)*np.sin(th2)*np.sin(th3)+np.sin(th1)*np.cos(th3)],\
[np.sin(th2),-np.sin(th1)*np.cos(th2),np.cos(th1)*np.cos(th2)]]).T
x0[21:30]=np.reshape(C,[1,9])
if Cc_flag==0:
x0[12]=CFG.getfloat("Initial Conditions","A into N (1,1)")
x0[13]=CFG.getfloat("Initial Conditions","A into N (1,2)")
x0[14]=CFG.getfloat("Initial Conditions","A into N (1,3)")
x0[15]=CFG.getfloat("Initial Conditions","A into N (2,1)")
x0[16]=CFG.getfloat("Initial Conditions","A into N (2,2)")
x0[17]=CFG.getfloat("Initial Conditions","A into N (2,3)")
x0[18]=CFG.getfloat("Initial Conditions","A into N (3,1)")
x0[19]=CFG.getfloat("Initial Conditions","A into N (3,2)")
x0[20]=CFG.getfloat("Initial Conditions","A into N (3,3)")
else:# if user defines euler angles use this rotation matrix definition - MAKE SURE INPUT EULER ANGLES MATCH THEIR DEFINITIONS
th1=CFG.getfloat("Initial Conditions","A into N Euler 1 X")
th2=CFG.getfloat("Initial Conditions","A into N Euler 2 Y")
th3=CFG.getfloat("Initial Conditions","A into N Euler 3 Z")
Cc=np.array([[np.cos(th2)*np.cos(th3),np.sin(th1)*np.sin(th2)*np.cos(th3)+np.cos(th1)*np.sin(th3),-np.cos(th1)*np.sin(th2)*np.cos(th3)+np.sin(th1)*np.sin(th3)],\
[-np.cos(th2)*np.sin(th3),-np.sin(th1)*np.sin(th2)*np.sin(th3)+np.cos(th1)*np.cos(th3),np.cos(th1)*np.sin(th2)*np.sin(th3)+np.sin(th1)*np.cos(th3)],\
[np.sin(th2),-np.sin(th1)*np.cos(th2),np.cos(th1)*np.cos(th2)]]).T
x0[12:21]=np.reshape(Cc,[1,9])
x0[9:12]=np.dot(C,np.array([x0[9:12]]).T).T[0]
return(G,n,nA,nB,aA,bA,cA,aB,bB,cB,a_shape,b_shape,rhoA,rhoB,t0,tf,tet_fileA,vert_fileA,tet_fileB,vert_fileB,x0,Tgen,integ,h,tol,out_freq,out_time_name,case,flyby_toggle,helio_toggle,sg_toggle,tt_toggle,Mplanet,a_hyp,e_hyp,i_hyp,RAAN_hyp,om_hyp,tau_hyp,Msolar,a_helio,e_helio,i_helio,RAAN_helio,om_helio,tau_helio,sol_rad,au_def,love1,love2,refrad1,refrad2,eps1,eps2,Msun,postProcessing)
|
meyeralexjREPO_NAMEgubasPATH_START.@gubas_extracted@gubas-master@[email protected]_END.py
|
{
"filename": "_ticklen.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram/marker/colorbar/_ticklen.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ticklen", parent_name="histogram.marker.colorbar", **kwargs
):
super(TicklenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram@marker@colorbar@[email protected]_END.py
|
{
"filename": "6_paper_imaging_plots_SIM2.py",
"repo_name": "eogarvin/MLCCS",
"repo_path": "MLCCS_extracted/MLCCS-main/tests/6_paper_imaging_plots_SIM2.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 19 2023
@author: emily
This script intends to plot results for several experiments conducted on different alpha values.
"""
## LIBRARIES
import gc
import pandas as pd
import numpy as np
import seaborn as sns
from functools import partial
from itertools import chain, repeat
from matplotlib import pyplot
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
from astropy.io import fits
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, roc_auc_score, confusion_matrix, f1_score, precision_recall_curve, auc
from multiprocessing import Pool, freeze_support
import concurrent.futures
import random
import time
import pickle
import os
import copy
import sys
# sys.path.append(code_path + "ml_spectroscopy/ml_spectroscopy")
# sys.path.append("C:/Users/emily/Documents/ML_spectroscopy_thesis/50_code/ml_spectroscopy")
from ml_spectroscopy.crosscorrNormVec import crosscorrRV_vec
from ml_spectroscopy.config import path_init
from ml_spectroscopy.DataPreprocessing_utils import image_reconstruct, image_deconstruct
from ml_spectroscopy.plottings_utils_results import ROC_curve_customplot, ROC_curve_saveplt ,PR_curve_customplot, \
PR_curve_saveplt
from ml_spectroscopy.utility_functions import flatten, Average, grid_search0
## SET SEED FOR REPRODUCIBILITY
random.seed(100)
## Settings
data_name = 'GQlupb'
planet = 'GQlupB'
alpha = 5 # [2,5,8,11,22,32,43,55]
# alpha=2
bal = 50
v = 6
frame = 'simple'
# folds = [7,13,14,16]
plotname = 'test'
methods_ls = ['SNR', 'PCT', 'CNN1', 'CNN2']
color_ls = {'SNR': 'red', 'SNR_auto': 'brown', 'ENET': 'forestgreen', 'RID': 'lime', 'PCT': 'lightblue', 'DNN': 'blue',
'CNN1': 'navy', 'CNN2': 'purple'}
title_ls = {'SNR': 'S/N', 'SNR_auto': 'SNR_auto', 'ENET': 'Elasticnet', 'RID': 'Ridge', 'PCT': 'Perceptron',
'DNN': 'DNN',
'CNN1': 'CNN', 'CNN2': 'CNN'}
planetlist = ['GQlupb0', 'GQlupb1', 'GQlupb2', 'GQlupb3', 'GQlupb4', 'GQlupb5', 'GQlupb6', 'GQlupb7', 'PZTel_10',
'PZTel_11', 'PZTel_12', 'PZTel_13', 'PZTel_20', 'PZTel_21', 'PZTel_22', 'PZTel_23', 'PZTel_24',
'PZTel_25', 'PZTel_26']
## ACTIVE SUBDIR
subdir = path_init()
# subdir = "C:/Users/emily/Documents/ML_spectroscopy_thesis/"
# PATHS
code_path = subdir + "50_code/"
data_path = subdir + "30_data/DataSets/"
plot_path = subdir + "60_plots/"
results_path = subdir + "70_results/"
# visual_path = subdir + "80_visualisation/"
# csv_res_path = subdir + "80_visualisation/"
# Directory to fetch results
dir_path = results_path + "export_CV/from_GPU_byfold/Res_SIM_planets_220324_CO/" # Depending on the way we pick the files, can't we directly point to the right directory from the start?
# Directory to store results
visualisation_path = subdir + "80_visualisation/Realistic_fakeplanets/Vis_07_220324_SIM/"
csv_path = subdir + "80_visualisation/Realistic_fakeplanets/Vis_07_220324_SIM/"
k2 = [0, 1, 2]
#alpha_set = ["alphanone", "alphaover3", "alphaover6", "alphamin"]
alpha_set = ["alphanone", "alphaover2", "alphaover3", "alphaover6"]
#alpha_set = ["alphanone", "alphanone2", "alphaover2", "alphaover3", "alphaover4", "alphaover6"]
cubes = ["Cube 0", "Cube 1", "Cube 2"]
###################### Try with TPR cnstraint
image_size = 56
hfov = 56*0.025/2
planet_rel_star = [0,0]
extent0 = [-hfov+planet_rel_star[0], hfov+planet_rel_star[0], -hfov+planet_rel_star[1], hfov+planet_rel_star[1]]
# initiate the loop
for a in range(len(alpha_set)):
# What was the base template used for the experiemnts?
template_characteristics = {'Temp': 2800, 'Surf_grav': 4.1, 'H2O': 0, 'CO': 1}
tp_path = '/home/ipa/quanz/user_accounts/egarvin/Thesis/70_results/export_CV/from_GPU_byfold/Res_SIM_planets_220324_CO/'
# keys = folds
# ls_results_realistic_fake = {key: None for key in keys}
# for i in folds:
with open(tp_path + 'results_SIM_CO_data_alpha_'+alpha_set[a]+'_CV_testfold.pkl', "rb") as f:
ls_results_realistic_fake = pickle.load(f) # i is the validation number but the proper set is at i+1
data00 = pd.read_pickle(
data_path + 'csv_inputs/intro_plot_dataset/PZ_Tel_signals_alphanone_temp2800_logg_4.1_H2O_0_CO_1.pkl')
data0 = data00.loc[data00.index.levels[0][k2]]
data01 = pd.read_pickle(
data_path + 'csv_inputs/intro_plot_dataset/PZ_Tel_signals_' + str(alpha_set[a]) + '_temp2800_logg_4.1_H2O_0_CO_1.pkl')
data1 = data01.loc[data01.index.levels[0][k2]]
# data1 = pd.read_pickle(
# data_path + 'csv_inputs/CCF_realistic_fakeplanets/final_test_sets/final_testset_H2O_crosscorr_data_alpha_' + str(
# alpha) + '_temp2800.0_sg4.1.pkl')
meta_data = pd.read_pickle(data_path + "csv_inputs/intro_plot_dataset/PZ_Tel_signals_alphanone_H2O_0_CO_1_meta.pkl")
# For each fold:
# get the results
# get the indices of the results
# Fill the gaps of the image with nans?
# reconstruct the image with the results
# Image should contain:
# Map of accuracy (correctly classified is green , badly classified is red)
# Map of True vs false positives?
# Map of true vs false negatives ?
# Could also have: Orange = negatives, true are dark, false are light; blue = true and false positives; true are dark, wrong are light
# for j in folds: # + 1):
# if j == 0:
# i = (len(planetlist) - 1)
# else:
# i = j - 1
data_test_original_sim = data0.loc[data0.index.levels[0][k2]].drop("planet", axis=1)
data_test = data1.loc[data1.index.levels[0][k2]].drop("planet", axis=1)
j = 12
# Get the data
dtpath0 = data_path + "csv_inputs/True_Spectrum_Data"
noise_temp_wl_shape = 2 # here we do not need the wavelength dimension
# Get the data where the planet is indicated
path0 = os.path.join(dtpath0, str(
planetlist[j]) + '_spectrum_dt.csv') # Untrimmed data. therefore, take the WL range from the trimmed data-
original_df = pd.read_csv(path0)
dir_file_planet = data_path + 'True_HCI_data'
dir_file_WR = data_path + 'wavelength_ranges'
# If templates have more molecules, remember to adapt the number of dropped end columns in the function
dir_file_mol_template = data_path + 'csv_inputs/Molecular_Templates_df2.csv'
savedirccf = data_path + 'csv_inputs/True_CCF_data'
savedirdata = data_path + 'csv_inputs/True_Spectrum_Data'
hdu_list0 = fits.open(dir_file_planet + '/res_' + planetlist[j] + '.fits')
hdu_list0.info()
Planet_HCI = hdu_list0[0].data
hdu_list0.close()
Planet_HCI = Planet_HCI[:, ::-1, :] # To get the north up, as python opens fits upside down
# Planet_WR = importWavelength_asList(dir_file_WR + '/WR_' + WRFilename, extension)
# Transform the 3d cube into a 2d set of rows of spectrums and columns of wavelengths. NANS are removed but the info is stored in the last output
PlanetHCI_nanrm, Planet_vec_shape, Planet_position_nan = image_deconstruct(Planet_HCI)
fig2, ax2 = plt.subplots(3, 4, figsize=(4.5 * 4, 4 * 3), layout='constrained')
fig2.suptitle("Grid of PZ Tel B simulations inserted in real SINFONI noise and resulting CO maps ("+str(int(a+1))+")", fontsize=20, fontweight="bold")
for i in range(3):
image_scores = {}
for m in methods_ls:
if m in ['ENET', 'LAS', 'RID', 'ENET2', 'XGB']:
prob_Y0 = ls_results_realistic_fake['results'][m]['Y_pred_prob']
# Y_pred = np.array(prob_Y > 0.5) * 1
elif m in ['SNR', 'SNR_auto']:
prob_Y0 = ls_results_realistic_fake['results'][m]['SNR']
# Y_pred = np.array(prob_Y > 3) * 1
else:
prob_Y0 = ls_results_realistic_fake['results'][m]['Y_pred_prob'][:, 1]
# Y_pred = np.array(prob_Y > 0.5) * 1
Y_pred0 = ls_results_realistic_fake['results'][m]['Y_pred']
# dt_true = pd.read_pickle(data_path + "csv_inputs/CCF_realistic_fakeplanets/noise_and_planets_spectra/injection_labels_set.pkl")
# Y_true = dt_true.iloc[0:3041]
# Y_pred[np.where(Y_true == 1)[0].tolist()]
#
# Positives_predictions = [Y_pred[index] for index in np.where(Y_true == 1)[0].tolist()]
# Positives_scores = [prob_Y[index] for index in np.where(Y_true == 1)[0].tolist()]
# Deconstruct a full image (here we only use two frames as the wavelength dimension is not of interest - but the function was built for more than one dimensio)
# PlanetHCI_nanrm, Planet_vec_shape, Planet_position_nan = image_deconstruct( )
if planetlist[j][:2] == 'PZ':
size = 1795
rv = 22
elif planetlist[j][:2] == 'GQ':
size = 1958
rv = 44
fold_name = [ 'PZTel_20', 'PZTel_21', 'PZTel_22', 'PZTel_23',]
Y_pred = np.array_split(Y_pred0, 3)[i]
prob_Y = np.array_split(prob_Y0, 3)[i]
reconstruct_prediction = np.tile(Y_pred, size).reshape(size, len(Y_pred)).T
reconstruct_scores = np.tile(prob_Y, size).reshape(size, len(prob_Y)).T
reconstruct_ccf = np.tile(data_test.loc[fold_name[i]][0], size).reshape(size, len(data_test.loc[fold_name[i]][0])).T
reconstruct_ccf_original_sim = np.tile(data_test_original_sim.loc[fold_name[i]][0], size).reshape(size, len(data_test_original_sim.loc[fold_name[i]][0])).T
img_prediction = image_reconstruct(reconstruct_prediction, Planet_vec_shape[0], Planet_vec_shape[1],Planet_position_nan)
img_scores = image_reconstruct(reconstruct_scores, Planet_vec_shape[0], Planet_vec_shape[1],Planet_position_nan)
img_ccf_original_sim = image_reconstruct(reconstruct_ccf_original_sim, Planet_vec_shape[0], Planet_vec_shape[1],Planet_position_nan)
img_ccf = image_reconstruct(reconstruct_ccf, Planet_vec_shape[0], Planet_vec_shape[1],Planet_position_nan)
image_scores[m] = img_scores
# prediction
#plt.imshow(img_prediction[1, :, :])
#plt.title(str(title_ls[m]), fontsize=18)
#plt.xlabel('[px]', fontsize=17)
#plt.ylabel('[px]', fontsize=17)
#plt.savefig(
# visualisation_path + 'img_realisticfake_' + planetlist[j] + '_method_' + str(m) + '_alpha_' + str(
# alpha) + '_Base_Prediction_areas.pdf', bbox_inches='tight')
#plt.show()
#plt.close()
#
# # CCF
# plt.imshow(img_ccf_original_sim[0, :, :], cmap='inferno')
# plt.title('Good Obs. Conditions', fontsize=18)
# clb = plt.colorbar()
# clb.set_label('Normalised CCF Values', fontsize=14)
# plt.set_xlabel('[px]', fontsize=17)
# plt.ylabel('[px]', fontsize=17)
# #plt.savefig(
# # visualisation_path + 'img_realisticfake_' + planetlist[j] + '_method_' + str(m) + '_alpha_' + str(
# # alpha) + '_CCF.pdf', bbox_inches='tight')
#
# # CCF
# plt.imshow(img_ccf[0, :, :], cmap='inferno')
# plt.title('Bad Conditions', fontsize=18)
# clb = plt.colorbar()
# clb.set_label('Normalised CCF Values', fontsize=14)
# plt.xlabel('[px]', fontsize=17)
# plt.ylabel('[px]', fontsize=17)
# #plt.savefig(
# # visualisation_path + 'img_realisticfake_' + planetlist[j] + '_method_' + str(m) + '_alpha_' + str(
# # alpha) + '_CCF.pdf', bbox_inches='tight')
#
# plt.imshow(image_scores[m][1, :, :], cmap='inferno')
# plt.set_title(str(title_ls[m]), fontsize=18)
# clb = plt.colorbar()
# if m in ['SNR', 'SNR_auto']:
# clb.set_label('Scores: S/N Values', fontsize=14)
# else:
# clb.set_label('Scores: Probabilities', fontsize=14)
#
# # Y_pred = np.array(prob_Y > 0.5) * 1
# plt.xlabel('[px]', fontsize=17)
# plt.ylabel('[px]', fontsize=17)
# #plt.savefig(
# # visualisation_path + 'img_realisticfake_' + planetlist[j] + '_method_' + str(m) + '_alpha_' + str(
# # alpha) + '_Scores.pdf', bbox_inches='tight')
# plt.show()
# plt.close()
#
#
fig1, ax1 = plt.subplots(1, 4, figsize=(4.5 * 4, 5 * 1), layout='constrained')
fig_title = fig1.suptitle("PZ Tel B simulation inserted in real SINFONI noise and recovery of CO map", fontsize=20, fontweight="bold")
# CCF
img0 = ax1[0].imshow(img_ccf_original_sim[0, :, :], extent=extent0, cmap='inferno')
ax1[0].set_title('Molecular Map \n Good Conditions', fontsize=20)
clb = plt.colorbar(img0, ax=ax1[0], shrink=0.68)
clb.set_label('Normalised CCF Values', fontsize=17)
clb.ax.tick_params(labelsize=14)
ax1[0].set_xlim([-hfov + planet_rel_star[0], hfov + planet_rel_star[0]])
ax1[0].set_ylim([-hfov + planet_rel_star[1], hfov + planet_rel_star[1]])
ax1[0].set_xlabel(r'$\Delta$ RA (arcsec)', fontsize=20)
ax1[0].set_ylabel(r'$\Delta$ Dec (arcsec)', fontsize=20)
ax1[0].xaxis.set_major_locator(plt.MaxNLocator(5))
ax1[0].yaxis.set_major_locator(plt.MaxNLocator(5))
ax1[0].tick_params(labelsize=16)
ax1[0].label_outer()
#plt.savefig(
# visualisation_path + 'img_realisticfake_' + planetlist[j] + '_method_' + str(m) + '_alpha_' + str(
# alpha) + '_CCF.pdf', bbox_inches='tight')
# CCF
img1= ax1[1].imshow(img_ccf[0, :, :], extent=extent0, cmap='inferno')
ax1[1].set_title('Molecular Map \n Bad Conditions', fontsize=20)
clb = plt.colorbar(img1, ax=ax1[1], shrink=0.68)
clb.set_label('Normalised CCF Values', fontsize=17)
clb.ax.tick_params(labelsize=14)
ax1[1].set_xlim([-hfov + planet_rel_star[0], hfov + planet_rel_star[0]])
ax1[1].set_ylim([-hfov + planet_rel_star[1], hfov + planet_rel_star[1]])
ax1[1].set_xlabel(r'$\Delta$ RA (arcsec)', fontsize=20)
ax1[1].set_ylabel(r'$\Delta$ Dec (arcsec)', fontsize=20)
ax1[1].xaxis.set_major_locator(plt.MaxNLocator(5))
ax1[1].yaxis.set_major_locator(plt.MaxNLocator(5))
ax1[1].tick_params(labelsize=16)
ax1[1].label_outer()
#plt.savefig(
# visualisation_path + 'img_realisticfake_' + planetlist[j] + '_method_' + str(m) + '_alpha_' + str(
# alpha) + '_CCF.pdf', bbox_inches='tight')
ms = ["SNR", "CNN2"]
for s in range(2):
# score
img2 = ax1[2+s].imshow(image_scores[ms[s]][1, :, :], extent=extent0, cmap='inferno')
ax1[2+s].set_title(str(title_ls[ms[s]]+' scores \n Bad Conditions'), fontsize=20)
clb = plt.colorbar(img2, ax=ax1[2+s], shrink=0.68)
clb.ax.tick_params(labelsize=14)
if m in ['SNR', 'SNR_auto']:
clb.set_label('Scores: S/N Values', fontsize=17)
else:
clb.set_label('Scores: Probabilities', fontsize=17)
# Y_pred = np.array(prob_Y > 0.5) * 1
ax1[2+s].set_xlim([-hfov + planet_rel_star[0], hfov + planet_rel_star[0]])
ax1[2+s].set_ylim([-hfov + planet_rel_star[1], hfov + planet_rel_star[1]])
ax1[2+s].set_xlabel(r'$\Delta$ RA (arcsec)', fontsize=20)
ax1[2+s].set_ylabel(r'$\Delta$ Dec (arcsec)', fontsize=20)
ax1[2+s].xaxis.set_major_locator(plt.MaxNLocator(5))
ax1[2+s].yaxis.set_major_locator(plt.MaxNLocator(5))
ax1[2+s].tick_params(labelsize=16)
ax1[2+s].label_outer()
fig1.savefig(visualisation_path + 'SIM_CO_alpha_' + str(alpha_set[a]) + '_Scores_'+str(fold_name[i])+'.pdf', bbox_inches='tight', dpi=600)
fig1.show()
#plt.savefig(
# visualisation_path + 'img_realisticfake_' + planetlist[j] + '_method_' + str(m) + '_alpha_' + str(
# alpha) + '_Scores.pdf', bbox_inches='tight')
# CCF
img0 = ax2[i,0].imshow(img_ccf_original_sim[0, :, :], extent=extent0, cmap='inferno')
if i == 0:
ax2[i,0].set_title('Good Conditions', fontsize=19)
#cax00 = fig2.add_axes([ax2[i,0].get_position().x1+0.01,ax2[i,0].get_position().y0,0.02,ax2[i,0].get_position().height])
clb = plt.colorbar(img0, ax=ax2[i,0], shrink=0.9)
clb.set_label('Normalised CCF Values', fontsize=17)
clb.ax.tick_params(labelsize=14)
clb.ax.tick_params(labelsize=14)
ax2[i,0].set_xlim([-hfov + planet_rel_star[0], hfov + planet_rel_star[0]])
ax2[i,0].set_ylim([-hfov + planet_rel_star[1], hfov + planet_rel_star[1]])
ax2[i,0].set_xlabel(r'$\Delta$ RA (arcsec)', fontsize=19)
ax2[i,0].set_ylabel(r'$\Delta$ Dec (arcsec)', fontsize=19)
ax2[i,0].xaxis.set_major_locator(plt.MaxNLocator(5))
ax2[i,0].yaxis.set_major_locator(plt.MaxNLocator(5))
ax2[i,0].tick_params(labelsize=15)
ax2[i,0].label_outer()
#plt.savefig(
# visualisation_path + 'img_realisticfake_' + planetlist[j] + '_method_' + str(m) + '_alpha_' + str(
# alpha) + '_CCF.pdf', bbox_inches='tight')
# CCF
img1= ax2[i,1].imshow(img_ccf[0, :, :], extent=extent0, cmap='inferno')
if i == 0:
ax2[i,1].set_title('Bad Conditions', fontsize=19)
#cax01 = fig2.add_axes([ax2[i,1].get_position().x1+0.01,ax2[i,1].get_position().y0,0.02,ax2[i,1].get_position().height])
clb = plt.colorbar(img1, ax=ax2[i,1], shrink=0.9)
clb.set_label('Normalised CCF Values', fontsize=17)
clb.ax.tick_params(labelsize=14)
ax2[i,1].set_xlim([-hfov + planet_rel_star[0], hfov + planet_rel_star[0]])
ax2[i,1].set_ylim([-hfov + planet_rel_star[1], hfov + planet_rel_star[1]])
ax2[i,1].set_xlabel(r'$\Delta$ RA (arcsec)', fontsize=19)
ax2[i,1].set_ylabel(r'$\Delta$ Dec (arcsec)', fontsize=19)
ax2[i,1].xaxis.set_major_locator(plt.MaxNLocator(5))
ax2[i,1].yaxis.set_major_locator(plt.MaxNLocator(5))
ax2[i,1].tick_params(labelsize=15)
ax2[i,1].label_outer()
#plt.savefig(
# visualisation_path + 'img_realisticfake_' + planetlist[j] + '_method_' + str(m) + '_alpha_' + str(
# alpha) + '_CCF.pdf', bbox_inches='tight')
mt = ["SNR","CNN2"]
for s in range(2):
# score
img2 = ax2[i,2+s].imshow(image_scores[mt[s]][1, :, :], extent=extent0, cmap='inferno')
if i == 0:
ax2[i,2+s].set_title(str(title_ls[mt[s]] + ' (Bad Conditions)'), fontsize=19)
#cax02 = fig2.add_axes([ax2[i, 2+s].get_position().x1 + 0.01, ax2[i, 2+s].get_position().y0, 0.02, ax2[i, 2+s].get_position().height])
clb = plt.colorbar(img2, ax=ax2[i,2+s], shrink=0.9)
if m in ['SNR', 'SNR_auto']:
clb.set_label('Scores: S/N Values', fontsize=17)
else:
clb.set_label('Scores: Probabilities', fontsize=17)
# Y_pred = np.array(prob_Y > 0.5) * 1
ax2[i,2+s].set_xlim([-hfov + planet_rel_star[0], hfov + planet_rel_star[0]])
ax2[i,2+s].set_ylim([-hfov + planet_rel_star[1], hfov + planet_rel_star[1]])
ax2[i,2+s].set_xlabel(r'$\Delta$ RA (arcsec)', fontsize=19)
ax2[i,2+s].set_ylabel(r'$\Delta$ Dec (arcsec)', fontsize=19)
ax2[i,2+s].tick_params(labelsize=15)
ax2[i,2+s].xaxis.set_major_locator(plt.MaxNLocator(5))
ax2[i,2+s].yaxis.set_major_locator(plt.MaxNLocator(5))
ax2[i,2+s].label_outer()
fig2.savefig(visualisation_path + 'SIM_CO_alpha_' + str(alpha_set[a]) + '_Scores_allfolds.pdf', bbox_inches='tight', dpi=400)
fig2.show()
plt.close()
#plt.savefig(
# visualisation_path + 'img_realisticfake_' + planetlist[j] + '_method_' + str(m) + '_alpha_' + str(
# alpha) + '_Scores.pdf', bbox_inches='tight')
|
eogarvinREPO_NAMEMLCCSPATH_START.@MLCCS_extracted@MLCCS-main@tests@[email protected]_END.py
|
{
"filename": "_sizesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattergeo/hoverlabel/font/_sizesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="sizesrc", parent_name="scattergeo.hoverlabel.font", **kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattergeo@hoverlabel@font@[email protected]_END.py
|
{
"filename": "test_memory.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/tests/unit_tests/schema/test_memory.py",
"type": "Python"
}
|
from langchain.schema.memory import __all__
EXPECTED_ALL = ["BaseMemory"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@tests@unit_tests@schema@[email protected]_END.py
|
{
"filename": "cli__approx-on-full-history__first-sentence.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/_includes/work_src/reusage/cli__approx-on-full-history__first-sentence.md",
"type": "Markdown"
}
|
The principles for calculating the approximated values.
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@_includes@work_src@reusage@[email protected]_END.py
|
{
"filename": "test_ufunc.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/_core/tests/test_ufunc.py",
"type": "Python"
}
|
import warnings
import itertools
import sys
import ctypes as ct
import pickle
import pytest
from pytest import param
import numpy as np
import numpy._core.umath as ncu
import numpy._core._umath_tests as umt
import numpy.linalg._umath_linalg as uml
import numpy._core._operand_flag_tests as opflag_tests
import numpy._core._rational_tests as _rational_tests
from numpy.exceptions import AxisError
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_no_warnings,
assert_allclose, HAS_REFCOUNT, suppress_warnings, IS_WASM, IS_PYPY,
)
from numpy.testing._private.utils import requires_memory
UNARY_UFUNCS = [obj for obj in np._core.umath.__dict__.values()
if isinstance(obj, np.ufunc)]
UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types]
# Remove functions that do not support `floats`
UNARY_OBJECT_UFUNCS.remove(np.bitwise_count)
class TestUfuncKwargs:
def test_kwarg_exact(self):
assert_raises(TypeError, np.add, 1, 2, castingx='safe')
assert_raises(TypeError, np.add, 1, 2, dtypex=int)
assert_raises(TypeError, np.add, 1, 2, extobjx=[4096])
assert_raises(TypeError, np.add, 1, 2, outx=None)
assert_raises(TypeError, np.add, 1, 2, sigx='ii->i')
assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i')
assert_raises(TypeError, np.add, 1, 2, subokx=False)
assert_raises(TypeError, np.add, 1, 2, wherex=[True])
def test_sig_signature(self):
assert_raises(TypeError, np.add, 1, 2, sig='ii->i',
signature='ii->i')
def test_sig_dtype(self):
assert_raises(TypeError, np.add, 1, 2, sig='ii->i',
dtype=int)
assert_raises(TypeError, np.add, 1, 2, signature='ii->i',
dtype=int)
def test_extobj_removed(self):
assert_raises(TypeError, np.add, 1, 2, extobj=[4096])
class TestUfuncGenericLoops:
"""Test generic loops.
The loops to be tested are:
PyUFunc_ff_f_As_dd_d
PyUFunc_ff_f
PyUFunc_dd_d
PyUFunc_gg_g
PyUFunc_FF_F_As_DD_D
PyUFunc_DD_D
PyUFunc_FF_F
PyUFunc_GG_G
PyUFunc_OO_O
PyUFunc_OO_O_method
PyUFunc_f_f_As_d_d
PyUFunc_d_d
PyUFunc_f_f
PyUFunc_g_g
PyUFunc_F_F_As_D_D
PyUFunc_F_F
PyUFunc_D_D
PyUFunc_G_G
PyUFunc_O_O
PyUFunc_O_O_method
PyUFunc_On_Om
Where:
f -- float
d -- double
g -- long double
F -- complex float
D -- complex double
G -- complex long double
O -- python object
It is difficult to assure that each of these loops is entered from the
Python level as the special cased loops are a moving target and the
corresponding types are architecture dependent. We probably need to
define C level testing ufuncs to get at them. For the time being, I've
just looked at the signatures registered in the build directory to find
relevant functions.
"""
np_dtypes = [
(np.single, np.single), (np.single, np.double),
(np.csingle, np.csingle), (np.csingle, np.cdouble),
(np.double, np.double), (np.longdouble, np.longdouble),
(np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)]
@pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1):
xs = np.full(10, input_dtype(x), dtype=output_dtype)
ys = f(xs)[::2]
assert_allclose(ys, y)
assert_equal(ys.dtype, output_dtype)
def f2(x, y):
return x**y
@pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1):
xs = np.full(10, input_dtype(x), dtype=output_dtype)
ys = f(xs, xs)[::2]
assert_allclose(ys, y)
assert_equal(ys.dtype, output_dtype)
# class to use in testing object method loops
class foo:
def conjugate(self):
return np.bool(1)
def logical_xor(self, obj):
return np.bool(1)
def test_unary_PyUFunc_O_O(self):
x = np.ones(10, dtype=object)
assert_(np.all(np.abs(x) == 1))
def test_unary_PyUFunc_O_O_method_simple(self, foo=foo):
x = np.full(10, foo(), dtype=object)
assert_(np.all(np.conjugate(x) == True))
def test_binary_PyUFunc_OO_O(self):
x = np.ones(10, dtype=object)
assert_(np.all(np.add(x, x) == 2))
def test_binary_PyUFunc_OO_O_method(self, foo=foo):
x = np.full(10, foo(), dtype=object)
assert_(np.all(np.logical_xor(x, x)))
def test_binary_PyUFunc_On_Om_method(self, foo=foo):
x = np.full((10, 2, 3), foo(), dtype=object)
assert_(np.all(np.logical_xor(x, x)))
def test_python_complex_conjugate(self):
# The conjugate ufunc should fall back to calling the method:
arr = np.array([1+2j, 3-4j], dtype="O")
assert isinstance(arr[0], complex)
res = np.conjugate(arr)
assert res.dtype == np.dtype("O")
assert_array_equal(res, np.array([1-2j, 3+4j], dtype="O"))
@pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS)
def test_unary_PyUFunc_O_O_method_full(self, ufunc):
"""Compare the result of the object loop with non-object one"""
val = np.float64(np.pi/4)
class MyFloat(np.float64):
def __getattr__(self, attr):
try:
return super().__getattr__(attr)
except AttributeError:
return lambda: getattr(np._core.umath, attr)(val)
# Use 0-D arrays, to ensure the same element call
num_arr = np.array(val, dtype=np.float64)
obj_arr = np.array(MyFloat(val), dtype="O")
with np.errstate(all="raise"):
try:
res_num = ufunc(num_arr)
except Exception as exc:
with assert_raises(type(exc)):
ufunc(obj_arr)
else:
res_obj = ufunc(obj_arr)
assert_array_almost_equal(res_num.astype("O"), res_obj)
def _pickleable_module_global():
pass
class TestUfunc:
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_(pickle.loads(pickle.dumps(np.sin,
protocol=proto)) is np.sin)
# Check that ufunc not defined in the top level numpy namespace
# such as numpy._core._rational_tests.test_add can also be pickled
res = pickle.loads(pickle.dumps(_rational_tests.test_add,
protocol=proto))
assert_(res is _rational_tests.test_add)
def test_pickle_withstring(self):
astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n"
b"(S'numpy._core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
assert_(pickle.loads(astring) is np.cos)
@pytest.mark.skipif(IS_PYPY, reason="'is' check does not work on PyPy")
def test_pickle_name_is_qualname(self):
# This tests that a simplification of our ufunc pickle code will
# lead to allowing qualnames as names. Future ufuncs should
# possible add a specific qualname, or a hook into pickling instead
# (dask+numba may benefit).
_pickleable_module_global.ufunc = umt._pickleable_module_global_ufunc
obj = pickle.loads(pickle.dumps(_pickleable_module_global.ufunc))
assert obj is umt._pickleable_module_global_ufunc
def test_reduceat_shifting_sum(self):
L = 6
x = np.arange(L)
idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel()
assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7])
def test_all_ufunc(self):
"""Try to check presence and results of all ufuncs.
The list of ufuncs comes from generate_umath.py and is as follows:
===== ==== ============= =============== ========================
done args function types notes
===== ==== ============= =============== ========================
n 1 conjugate nums + O
n 1 absolute nums + O complex -> real
n 1 negative nums + O
n 1 sign nums + O -> int
n 1 invert bool + ints + O flts raise an error
n 1 degrees real + M cmplx raise an error
n 1 radians real + M cmplx raise an error
n 1 arccos flts + M
n 1 arccosh flts + M
n 1 arcsin flts + M
n 1 arcsinh flts + M
n 1 arctan flts + M
n 1 arctanh flts + M
n 1 cos flts + M
n 1 sin flts + M
n 1 tan flts + M
n 1 cosh flts + M
n 1 sinh flts + M
n 1 tanh flts + M
n 1 exp flts + M
n 1 expm1 flts + M
n 1 log flts + M
n 1 log10 flts + M
n 1 log1p flts + M
n 1 sqrt flts + M real x < 0 raises error
n 1 ceil real + M
n 1 trunc real + M
n 1 floor real + M
n 1 fabs real + M
n 1 rint flts + M
n 1 isnan flts -> bool
n 1 isinf flts -> bool
n 1 isfinite flts -> bool
n 1 signbit real -> bool
n 1 modf real -> (frac, int)
n 1 logical_not bool + nums + M -> bool
n 2 left_shift ints + O flts raise an error
n 2 right_shift ints + O flts raise an error
n 2 add bool + nums + O boolean + is ||
n 2 subtract bool + nums + O boolean - is ^
n 2 multiply bool + nums + O boolean * is &
n 2 divide nums + O
n 2 floor_divide nums + O
n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d
n 2 fmod nums + M
n 2 power nums + O
n 2 greater bool + nums + O -> bool
n 2 greater_equal bool + nums + O -> bool
n 2 less bool + nums + O -> bool
n 2 less_equal bool + nums + O -> bool
n 2 equal bool + nums + O -> bool
n 2 not_equal bool + nums + O -> bool
n 2 logical_and bool + nums + M -> bool
n 2 logical_or bool + nums + M -> bool
n 2 logical_xor bool + nums + M -> bool
n 2 maximum bool + nums + O
n 2 minimum bool + nums + O
n 2 bitwise_and bool + ints + O flts raise an error
n 2 bitwise_or bool + ints + O flts raise an error
n 2 bitwise_xor bool + ints + O flts raise an error
n 2 arctan2 real + M
n 2 remainder ints + real + O
n 2 hypot real + M
===== ==== ============= =============== ========================
Types other than those listed will be accepted, but they are cast to
the smallest compatible type for which the function is defined. The
casting rules are:
bool -> int8 -> float32
ints -> double
"""
pass
# from include/numpy/ufuncobject.h
size_inferred = 2
can_ignore = 4
def test_signature0(self):
# the arguments to test_signature are: nin, nout, core_signature
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(i),(i)->()")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 1, 0))
assert_equal(ixs, (0, 0))
assert_equal(flags, (self.size_inferred,))
assert_equal(sizes, (-1,))
def test_signature1(self):
# empty core signature; treat as plain ufunc (with trivial core)
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(),()->()")
assert_equal(enabled, 0)
assert_equal(num_dims, (0, 0, 0))
assert_equal(ixs, ())
assert_equal(flags, ())
assert_equal(sizes, ())
def test_signature2(self):
# more complicated names for variables
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(i1,i2),(J_1)->(_kAB)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 1, 1))
assert_equal(ixs, (0, 1, 2, 3))
assert_equal(flags, (self.size_inferred,)*4)
assert_equal(sizes, (-1, -1, -1, -1))
def test_signature3(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(i1, i12), (J_1)->(i12, i2)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 1, 2))
assert_equal(ixs, (0, 1, 2, 1, 3))
assert_equal(flags, (self.size_inferred,)*4)
assert_equal(sizes, (-1, -1, -1, -1))
def test_signature4(self):
# matrix_multiply signature from _umath_tests
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(n,k),(k,m)->(n,m)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 2, 2))
assert_equal(ixs, (0, 1, 1, 2, 0, 2))
assert_equal(flags, (self.size_inferred,)*3)
assert_equal(sizes, (-1, -1, -1))
def test_signature5(self):
# matmul signature from _umath_tests
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(n?,k),(k,m?)->(n?,m?)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 2, 2))
assert_equal(ixs, (0, 1, 1, 2, 0, 2))
assert_equal(flags, (self.size_inferred | self.can_ignore,
self.size_inferred,
self.size_inferred | self.can_ignore))
assert_equal(sizes, (-1, -1, -1))
def test_signature6(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
1, 1, "(3)->()")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 0))
assert_equal(ixs, (0,))
assert_equal(flags, (0,))
assert_equal(sizes, (3,))
def test_signature7(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
3, 1, "(3),(03,3),(n)->(9)")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 2, 1, 1))
assert_equal(ixs, (0, 0, 0, 1, 2))
assert_equal(flags, (0, self.size_inferred, 0))
assert_equal(sizes, (3, -1, 9))
def test_signature8(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
3, 1, "(3?),(3?,3?),(n)->(9)")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 2, 1, 1))
assert_equal(ixs, (0, 0, 0, 1, 2))
assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
assert_equal(sizes, (3, -1, 9))
def test_signature9(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
1, 1, "( 3) -> ( )")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 0))
assert_equal(ixs, (0,))
assert_equal(flags, (0,))
assert_equal(sizes, (3,))
def test_signature10(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
3, 1, "( 3? ) , (3? , 3?) ,(n )-> ( 9)")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 2, 1, 1))
assert_equal(ixs, (0, 0, 0, 1, 2))
assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
assert_equal(sizes, (3, -1, 9))
def test_signature_failure_extra_parenthesis(self):
with assert_raises(ValueError):
umt.test_signature(2, 1, "((i)),(i)->()")
def test_signature_failure_mismatching_parenthesis(self):
with assert_raises(ValueError):
umt.test_signature(2, 1, "(i),)i(->()")
def test_signature_failure_signature_missing_input_arg(self):
with assert_raises(ValueError):
umt.test_signature(2, 1, "(i),->()")
def test_signature_failure_signature_missing_output_arg(self):
with assert_raises(ValueError):
umt.test_signature(2, 2, "(i),(i)->()")
def test_get_signature(self):
assert_equal(np.vecdot.signature, "(n),(n)->()")
def test_forced_sig(self):
a = 0.5*np.arange(3, dtype='f8')
assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
with pytest.warns(DeprecationWarning):
assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
with pytest.warns(DeprecationWarning):
assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'),
[0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
casting='unsafe'), [0, 0, 1])
b = np.zeros((3,), dtype='f8')
np.add(a, 0.5, out=b)
assert_equal(b, [0.5, 1, 1.5])
b[:] = 0
with pytest.warns(DeprecationWarning):
np.add(a, 0.5, sig='i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
with pytest.warns(DeprecationWarning):
np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
def test_signature_all_None(self):
# signature all None, is an acceptable alternative (since 1.21)
# to not providing a signature.
res1 = np.add([3], [4], sig=(None, None, None))
res2 = np.add([3], [4])
assert_array_equal(res1, res2)
res1 = np.maximum([3], [4], sig=(None, None, None))
res2 = np.maximum([3], [4])
assert_array_equal(res1, res2)
with pytest.raises(TypeError):
# special case, that would be deprecated anyway, so errors:
np.add(3, 4, signature=(None,))
def test_signature_dtype_type(self):
# Since that will be the normal behaviour (past NumPy 1.21)
# we do support the types already:
float_dtype = type(np.dtype(np.float64))
np.add(3, 4, signature=(float_dtype, float_dtype, None))
@pytest.mark.parametrize("get_kwarg", [
lambda dt: dict(dtype=dt),
lambda dt: dict(signature=(dt, None, None))])
def test_signature_dtype_instances_allowed(self, get_kwarg):
# We allow certain dtype instances when there is a clear singleton
# and the given one is equivalent; mainly for backcompat.
int64 = np.dtype("int64")
int64_2 = pickle.loads(pickle.dumps(int64))
# Relies on pickling behavior, if assert fails just remove test...
assert int64 is not int64_2
assert np.add(1, 2, **get_kwarg(int64_2)).dtype == int64
td = np.timedelta(2, "s")
assert np.add(td, td, **get_kwarg("m8")).dtype == "m8[s]"
@pytest.mark.parametrize("get_kwarg", [
param(lambda x: dict(dtype=x), id="dtype"),
param(lambda x: dict(signature=(x, None, None)), id="signature")])
def test_signature_dtype_instances_allowed(self, get_kwarg):
msg = "The `dtype` and `signature` arguments to ufuncs"
with pytest.raises(TypeError, match=msg):
np.add(3, 5, **get_kwarg(np.dtype("int64").newbyteorder()))
with pytest.raises(TypeError, match=msg):
np.add(3, 5, **get_kwarg(np.dtype("m8[ns]")))
with pytest.raises(TypeError, match=msg):
np.add(3, 5, **get_kwarg("m8[ns]"))
@pytest.mark.parametrize("casting", ["unsafe", "same_kind", "safe"])
def test_partial_signature_mismatch(self, casting):
# If the second argument matches already, no need to specify it:
res = np.ldexp(np.float32(1.), np.int_(2), dtype="d")
assert res.dtype == "d"
res = np.ldexp(np.float32(1.), np.int_(2), signature=(None, None, "d"))
assert res.dtype == "d"
# ldexp only has a loop for long input as second argument, overriding
# the output cannot help with that (no matter the casting)
with pytest.raises(TypeError):
np.ldexp(1., np.uint64(3), dtype="d")
with pytest.raises(TypeError):
np.ldexp(1., np.uint64(3), signature=(None, None, "d"))
def test_partial_signature_mismatch_with_cache(self):
with pytest.raises(TypeError):
np.add(np.float16(1), np.uint64(2), sig=("e", "d", None))
# Ensure e,d->None is in the dispatching cache (double loop)
np.add(np.float16(1), np.float64(2))
# The error must still be raised:
with pytest.raises(TypeError):
np.add(np.float16(1), np.uint64(2), sig=("e", "d", None))
def test_use_output_signature_for_all_arguments(self):
# Test that providing only `dtype=` or `signature=(None, None, dtype)`
# is sufficient if falling back to a homogeneous signature works.
# In this case, the `intp, intp -> intp` loop is chosen.
res = np.power(1.5, 2.8, dtype=np.intp, casting="unsafe")
assert res == 1 # the cast happens first.
res = np.power(1.5, 2.8, signature=(None, None, np.intp),
casting="unsafe")
assert res == 1
with pytest.raises(TypeError):
# the unsafe casting would normally cause errors though:
np.power(1.5, 2.8, dtype=np.intp)
def test_signature_errors(self):
with pytest.raises(TypeError,
match="the signature object to ufunc must be a string or"):
np.add(3, 4, signature=123.) # neither a string nor a tuple
with pytest.raises(ValueError):
# bad symbols that do not translate to dtypes
np.add(3, 4, signature="%^->#")
with pytest.raises(ValueError):
np.add(3, 4, signature=b"ii-i") # incomplete and byte string
with pytest.raises(ValueError):
np.add(3, 4, signature="ii>i") # incomplete string
with pytest.raises(ValueError):
np.add(3, 4, signature=(None, "f8")) # bad length
with pytest.raises(UnicodeDecodeError):
np.add(3, 4, signature=b"\xff\xff->i")
def test_forced_dtype_times(self):
# Signatures only set the type numbers (not the actual loop dtypes)
# so using `M` in a signature/dtype should generally work:
a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='>M8[D]')
np.maximum(a, a, dtype="M")
np.maximum.reduce(a, dtype="M")
arr = np.arange(10, dtype="m8[s]")
np.add(arr, arr, dtype="m")
np.maximum(arr, arr, dtype="m")
@pytest.mark.parametrize("ufunc", [np.add, np.sqrt])
def test_cast_safety(self, ufunc):
"""Basic test for the safest casts, because ufuncs inner loops can
indicate a cast-safety as well (which is normally always "no").
"""
def call_ufunc(arr, **kwargs):
return ufunc(*(arr,) * ufunc.nin, **kwargs)
arr = np.array([1., 2., 3.], dtype=np.float32)
arr_bs = arr.astype(arr.dtype.newbyteorder())
expected = call_ufunc(arr)
# Normally, a "no" cast:
res = call_ufunc(arr, casting="no")
assert_array_equal(expected, res)
# Byte-swapping is not allowed with "no" though:
with pytest.raises(TypeError):
call_ufunc(arr_bs, casting="no")
# But is allowed with "equiv":
res = call_ufunc(arr_bs, casting="equiv")
assert_array_equal(expected, res)
# Casting to float64 is safe, but not equiv:
with pytest.raises(TypeError):
call_ufunc(arr_bs, dtype=np.float64, casting="equiv")
# but it is safe cast:
res = call_ufunc(arr_bs, dtype=np.float64, casting="safe")
expected = call_ufunc(arr.astype(np.float64)) # upcast
assert_array_equal(expected, res)
@pytest.mark.parametrize("ufunc", [np.add, np.equal])
def test_cast_safety_scalar(self, ufunc):
# We test add and equal, because equal has special scalar handling
# Note that the "equiv" casting behavior should maybe be considered
# a current implementation detail.
with pytest.raises(TypeError):
# this picks an integer loop, which is not safe
ufunc(3., 4., dtype=int, casting="safe")
with pytest.raises(TypeError):
# We accept python float as float64 but not float32 for equiv.
ufunc(3., 4., dtype="float32", casting="equiv")
# Special case for object and equal (note that equiv implies safe)
ufunc(3, 4, dtype=object, casting="equiv")
# Picks a double loop for both, first is equiv, second safe:
ufunc(np.array([3.]), 3., casting="equiv")
ufunc(np.array([3.]), 3, casting="safe")
ufunc(np.array([3]), 3, casting="equiv")
def test_cast_safety_scalar_special(self):
# We allow this (and it succeeds) via object, although the equiv
# part may not be important.
np.equal(np.array([3]), 2**300, casting="equiv")
def test_true_divide(self):
a = np.array(10)
b = np.array(20)
tgt = np.array(0.5)
for tc in 'bhilqBHILQefdgFDG':
dt = np.dtype(tc)
aa = a.astype(dt)
bb = b.astype(dt)
# Check result value and dtype.
for x, y in itertools.product([aa, -aa], [bb, -bb]):
# Check with no output type specified
if tc in 'FDG':
tgt = complex(x)/complex(y)
else:
tgt = float(x)/float(y)
res = np.true_divide(x, y)
rtol = max(np.finfo(res).resolution, 1e-15)
assert_allclose(res, tgt, rtol=rtol)
if tc in 'bhilqBHILQ':
assert_(res.dtype.name == 'float64')
else:
assert_(res.dtype.name == dt.name )
# Check with output type specified. This also checks for the
# incorrect casts in issue gh-3484 because the unary '-' does
# not change types, even for unsigned types, Hence casts in the
# ufunc from signed to unsigned and vice versa will lead to
# errors in the values.
for tcout in 'bhilqBHILQ':
dtout = np.dtype(tcout)
assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
for tcout in 'efdg':
dtout = np.dtype(tcout)
if tc in 'FDG':
# Casting complex to float is not allowed
assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
else:
tgt = float(x)/float(y)
rtol = max(np.finfo(dtout).resolution, 1e-15)
# The value of tiny for double double is NaN
with suppress_warnings() as sup:
sup.filter(UserWarning)
if not np.isnan(np.finfo(dtout).tiny):
atol = max(np.finfo(dtout).tiny, 3e-308)
else:
atol = 3e-308
# Some test values result in invalid for float16
# and the cast to it may overflow to inf.
with np.errstate(invalid='ignore', over='ignore'):
res = np.true_divide(x, y, dtype=dtout)
if not np.isfinite(res) and tcout == 'e':
continue
assert_allclose(res, tgt, rtol=rtol, atol=atol)
assert_(res.dtype.name == dtout.name)
for tcout in 'FDG':
dtout = np.dtype(tcout)
tgt = complex(x)/complex(y)
rtol = max(np.finfo(dtout).resolution, 1e-15)
# The value of tiny for double double is NaN
with suppress_warnings() as sup:
sup.filter(UserWarning)
if not np.isnan(np.finfo(dtout).tiny):
atol = max(np.finfo(dtout).tiny, 3e-308)
else:
atol = 3e-308
res = np.true_divide(x, y, dtype=dtout)
if not np.isfinite(res):
continue
assert_allclose(res, tgt, rtol=rtol, atol=atol)
assert_(res.dtype.name == dtout.name)
# Check booleans
a = np.ones((), dtype=np.bool)
res = np.true_divide(a, a)
assert_(res == 1.0)
assert_(res.dtype.name == 'float64')
res = np.true_divide(~a, a)
assert_(res == 0.0)
assert_(res.dtype.name == 'float64')
def test_sum_stability(self):
a = np.ones(500, dtype=np.float32)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4)
a = np.ones(500, dtype=np.float64)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_sum(self):
for dt in (int, np.float16, np.float32, np.float64, np.longdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
# warning if sum overflows, which it does in float16
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", RuntimeWarning)
tgt = dt(v * (v + 1) / 2)
overflow = not np.isfinite(tgt)
assert_equal(len(w), 1 * overflow)
d = np.arange(1, v + 1, dtype=dt)
assert_almost_equal(np.sum(d), tgt)
assert_equal(len(w), 2 * overflow)
assert_almost_equal(np.sum(d[::-1]), tgt)
assert_equal(len(w), 3 * overflow)
d = np.ones(500, dtype=dt)
assert_almost_equal(np.sum(d[::2]), 250.)
assert_almost_equal(np.sum(d[1::2]), 250.)
assert_almost_equal(np.sum(d[::3]), 167.)
assert_almost_equal(np.sum(d[1::3]), 167.)
assert_almost_equal(np.sum(d[::-2]), 250.)
assert_almost_equal(np.sum(d[-1::-2]), 250.)
assert_almost_equal(np.sum(d[::-3]), 167.)
assert_almost_equal(np.sum(d[-1::-3]), 167.)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt)
d += d
assert_almost_equal(d, 2.)
def test_sum_complex(self):
for dt in (np.complex64, np.complex128, np.clongdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j)
d = np.empty(v, dtype=dt)
d.real = np.arange(1, v + 1)
d.imag = -np.arange(1, v + 1)
assert_almost_equal(np.sum(d), tgt)
assert_almost_equal(np.sum(d[::-1]), tgt)
d = np.ones(500, dtype=dt) + 1j
assert_almost_equal(np.sum(d[::2]), 250. + 250j)
assert_almost_equal(np.sum(d[1::2]), 250. + 250j)
assert_almost_equal(np.sum(d[::3]), 167. + 167j)
assert_almost_equal(np.sum(d[1::3]), 167. + 167j)
assert_almost_equal(np.sum(d[::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[::-3]), 167. + 167j)
assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt) + 1j
d += d
assert_almost_equal(d, 2. + 2j)
def test_sum_initial(self):
# Integer, single axis
assert_equal(np.sum([3], initial=2), 5)
# Floating point
assert_almost_equal(np.sum([0.2], initial=0.1), 0.3)
# Multiple non-adjacent axes
assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2),
[12, 12, 12])
def test_sum_where(self):
# More extensive tests done in test_reduction_with_where.
assert_equal(np.sum([[1., 2.], [3., 4.]], where=[True, False]), 4.)
assert_equal(np.sum([[1., 2.], [3., 4.]], axis=0, initial=5.,
where=[True, False]), [9., 5.])
def test_vecdot(self):
arr1 = np.arange(6).reshape((2, 3))
arr2 = np.arange(3).reshape((1, 3))
actual = np.vecdot(arr1, arr2)
expected = np.array([5, 14])
assert_array_equal(actual, expected)
actual2 = np.vecdot(arr1.T, arr2.T, axis=-2)
assert_array_equal(actual2, expected)
actual3 = np.vecdot(arr1.astype("object"), arr2)
assert_array_equal(actual3, expected.astype("object"))
def test_matvec(self):
arr1 = np.arange(6).reshape((2, 3))
arr2 = np.arange(3).reshape((1, 3))
actual = np.matvec(arr1, arr2)
expected = np.array([[5, 14]])
assert_array_equal(actual, expected)
actual2 = np.matvec(arr1.T, arr2.T, axes=[(-1, -2), -2, -1])
assert_array_equal(actual2, expected)
actual3 = np.matvec(arr1.astype("object"), arr2)
assert_array_equal(actual3, expected.astype("object"))
@pytest.mark.parametrize("vec", [
np.array([[1., 2., 3.], [4., 5., 6.]]),
np.array([[1., 2j, 3.], [4., 5., 6j]]),
np.array([[1., 2., 3.], [4., 5., 6.]], dtype=object),
np.array([[1., 2j, 3.], [4., 5., 6j]], dtype=object)])
@pytest.mark.parametrize("matrix", [
None,
np.array([[1.+1j, 0.5, -0.5j],
[0.25, 2j, 0.],
[4., 0., -1j]])])
def test_vecmatvec_identity(self, matrix, vec):
"""Check that (x†A)x equals x†(Ax)."""
mat = matrix if matrix is not None else np.eye(3)
matvec = np.matvec(mat, vec) # Ax
vecmat = np.vecmat(vec, mat) # x†A
if matrix is None:
assert_array_equal(matvec, vec)
assert_array_equal(vecmat.conj(), vec)
assert_array_equal(matvec, (mat @ vec[..., np.newaxis]).squeeze(-1))
assert_array_equal(vecmat, (vec[..., np.newaxis].mT.conj()
@ mat).squeeze(-2))
expected = np.einsum('...i,ij,...j', vec.conj(), mat, vec)
vec_matvec = (vec.conj() * matvec).sum(-1)
vecmat_vec = (vecmat * vec).sum(-1)
assert_array_equal(vec_matvec, expected)
assert_array_equal(vecmat_vec, expected)
@pytest.mark.parametrize("ufunc, shape1, shape2, conj", [
(np.vecdot, (3,), (3,), True),
(np.vecmat, (3,), (3, 1), True),
(np.matvec, (1, 3), (3,), False),
(np.matmul, (1, 3), (3, 1), False),
])
def test_vecdot_matvec_vecmat_complex(self, ufunc, shape1, shape2, conj):
arr1 = np.array([1, 2j, 3])
arr2 = np.array([1, 2, 3])
actual1 = ufunc(arr1.reshape(shape1), arr2.reshape(shape2))
expected1 = np.array(((arr1.conj() if conj else arr1) * arr2).sum(),
ndmin=min(len(shape1), len(shape2)))
assert_array_equal(actual1, expected1)
# This would fail for conj=True, since matmul omits the conjugate.
if not conj:
assert_array_equal(arr1.reshape(shape1) @ arr2.reshape(shape2),
expected1)
actual2 = ufunc(arr2.reshape(shape1), arr1.reshape(shape2))
expected2 = np.array(((arr2.conj() if conj else arr2) * arr1).sum(),
ndmin=min(len(shape1), len(shape2)))
assert_array_equal(actual2, expected2)
actual3 = ufunc(arr1.reshape(shape1).astype("object"),
arr2.reshape(shape2).astype("object"))
expected3 = expected1.astype(object)
assert_array_equal(actual3, expected3)
def test_vecdot_subclass(self):
class MySubclass(np.ndarray):
pass
arr1 = np.arange(6).reshape((2, 3)).view(MySubclass)
arr2 = np.arange(3).reshape((1, 3)).view(MySubclass)
result = np.vecdot(arr1, arr2)
assert isinstance(result, MySubclass)
def test_vecdot_object_no_conjugate(self):
arr = np.array(["1", "2"], dtype=object)
with pytest.raises(AttributeError, match="conjugate"):
np.vecdot(arr, arr)
def test_vecdot_object_breaks_outer_loop_on_error(self):
arr1 = np.ones((3, 3)).astype(object)
arr2 = arr1.copy()
arr2[1, 1] = None
out = np.zeros(3).astype(object)
with pytest.raises(TypeError, match=r"\*: 'float' and 'NoneType'"):
np.vecdot(arr1, arr2, out=out)
assert out[0] == 3
assert out[1] == out[2] == 0
def test_broadcast(self):
msg = "broadcast"
a = np.arange(4).reshape((2, 1, 2))
b = np.arange(4).reshape((1, 2, 2))
assert_array_equal(np.vecdot(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "extend & broadcast loop dimensions"
b = np.arange(4).reshape((2, 2))
assert_array_equal(np.vecdot(a, b), np.sum(a*b, axis=-1), err_msg=msg)
# Broadcast in core dimensions should fail
a = np.arange(8).reshape((4, 2))
b = np.arange(4).reshape((4, 1))
assert_raises(ValueError, np.vecdot, a, b)
# Extend core dimensions should fail
a = np.arange(8).reshape((4, 2))
b = np.array(7)
assert_raises(ValueError, np.vecdot, a, b)
# Broadcast should fail
a = np.arange(2).reshape((2, 1, 1))
b = np.arange(3).reshape((3, 1, 1))
assert_raises(ValueError, np.vecdot, a, b)
# Writing to a broadcasted array with overlap should warn, gh-2705
a = np.arange(2)
b = np.arange(4).reshape((2, 2))
u, v = np.broadcast_arrays(a, b)
assert_equal(u.strides[0], 0)
x = u + v
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
u += v
assert_equal(len(w), 1)
assert_(x[0, 0] != u[0, 0])
# Output reduction should not be allowed.
# See gh-15139
a = np.arange(6).reshape(3, 2)
b = np.ones(2)
out = np.empty(())
assert_raises(ValueError, np.vecdot, a, b, out)
out2 = np.empty(3)
c = np.vecdot(a, b, out2)
assert_(c is out2)
def test_out_broadcasts(self):
# For ufuncs and gufuncs (not for reductions), we currently allow
# the output to cause broadcasting of the input arrays.
# both along dimensions with shape 1 and dimensions which do not
# exist at all in the inputs.
arr = np.arange(3).reshape(1, 3)
out = np.empty((5, 4, 3))
np.add(arr, arr, out=out)
assert (out == np.arange(3) * 2).all()
# The same holds for gufuncs (gh-16484)
np.vecdot(arr, arr, out=out)
# the result would be just a scalar `5`, but is broadcast fully:
assert (out == 5).all()
@pytest.mark.parametrize(["arr", "out"], [
([2], np.empty(())),
([1, 2], np.empty(1)),
(np.ones((4, 3)), np.empty((4, 1)))],
ids=["(1,)->()", "(2,)->(1,)", "(4, 3)->(4, 1)"])
def test_out_broadcast_errors(self, arr, out):
# Output is (currently) allowed to broadcast inputs, but it cannot be
# smaller than the actual result.
with pytest.raises(ValueError, match="non-broadcastable"):
np.positive(arr, out=out)
with pytest.raises(ValueError, match="non-broadcastable"):
np.add(np.ones(()), arr, out=out)
def test_type_cast(self):
msg = "type cast"
a = np.arange(6, dtype='short').reshape((2, 3))
assert_array_equal(np.vecdot(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
msg = "type cast on one argument"
a = np.arange(6).reshape((2, 3))
b = a + 0.1
assert_array_almost_equal(np.vecdot(a, b), np.sum(a*b, axis=-1),
err_msg=msg)
def test_endian(self):
msg = "big endian"
a = np.arange(6, dtype='>i4').reshape((2, 3))
assert_array_equal(np.vecdot(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
msg = "little endian"
a = np.arange(6, dtype='<i4').reshape((2, 3))
assert_array_equal(np.vecdot(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
# Output should always be native-endian
Ba = np.arange(1, dtype='>f8')
La = np.arange(1, dtype='<f8')
assert_equal((Ba+Ba).dtype, np.dtype('f8'))
assert_equal((Ba+La).dtype, np.dtype('f8'))
assert_equal((La+Ba).dtype, np.dtype('f8'))
assert_equal((La+La).dtype, np.dtype('f8'))
assert_equal(np.absolute(La).dtype, np.dtype('f8'))
assert_equal(np.absolute(Ba).dtype, np.dtype('f8'))
assert_equal(np.negative(La).dtype, np.dtype('f8'))
assert_equal(np.negative(Ba).dtype, np.dtype('f8'))
def test_incontiguous_array(self):
msg = "incontiguous memory layout of array"
x = np.arange(64).reshape((2, 2, 2, 2, 2, 2))
a = x[:, 0,:, 0,:, 0]
b = x[:, 1,:, 1,:, 1]
a[0, 0, 0] = -1
msg2 = "make sure it references to the original array"
assert_equal(x[0, 0, 0, 0, 0, 0], -1, err_msg=msg2)
assert_array_equal(np.vecdot(a, b), np.sum(a*b, axis=-1), err_msg=msg)
x = np.arange(24).reshape(2, 3, 4)
a = x.T
b = x.T
a[0, 0, 0] = -1
assert_equal(x[0, 0, 0], -1, err_msg=msg2)
assert_array_equal(np.vecdot(a, b), np.sum(a*b, axis=-1), err_msg=msg)
def test_output_argument(self):
msg = "output argument"
a = np.arange(12).reshape((2, 3, 2))
b = np.arange(4).reshape((2, 1, 2)) + 1
c = np.zeros((2, 3), dtype='int')
np.vecdot(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
np.vecdot(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with type cast"
c = np.zeros((2, 3), dtype='int16')
np.vecdot(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
np.vecdot(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with incontiguous layout"
c = np.zeros((2, 3, 4), dtype='int16')
np.vecdot(a, b, c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
np.vecdot(a, b, out=c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
def test_axes_argument(self):
# vecdot signature: '(n),(n)->()'
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
# basic tests on inputs (outputs tested below with matrix_multiply).
c = np.vecdot(a, b)
assert_array_equal(c, (a * b).sum(-1))
# default
c = np.vecdot(a, b, axes=[(-1,), (-1,), ()])
assert_array_equal(c, (a * b).sum(-1))
# integers ok for single axis.
c = np.vecdot(a, b, axes=[-1, -1, ()])
assert_array_equal(c, (a * b).sum(-1))
# mix fine
c = np.vecdot(a, b, axes=[(-1,), -1, ()])
assert_array_equal(c, (a * b).sum(-1))
# can omit last axis.
c = np.vecdot(a, b, axes=[-1, -1])
assert_array_equal(c, (a * b).sum(-1))
# can pass in other types of integer (with __index__ protocol)
c = np.vecdot(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)])
assert_array_equal(c, (a * b).sum(-1))
# swap some axes
c = np.vecdot(a, b, axes=[0, 0])
assert_array_equal(c, (a * b).sum(0))
c = np.vecdot(a, b, axes=[0, 2])
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
# Check errors for improperly constructed axes arguments.
# should have list.
assert_raises(TypeError, np.vecdot, a, b, axes=-1)
# needs enough elements
assert_raises(ValueError, np.vecdot, a, b, axes=[-1])
# should pass in indices.
assert_raises(TypeError, np.vecdot, a, b, axes=[-1.0, -1.0])
assert_raises(TypeError, np.vecdot, a, b, axes=[(-1.0,), -1])
assert_raises(TypeError, np.vecdot, a, b, axes=[None, 1])
# cannot pass an index unless there is only one dimension
# (output is wrong in this case)
assert_raises(AxisError, np.vecdot, a, b, axes=[-1, -1, -1])
# or pass in generally the wrong number of axes
assert_raises(AxisError, np.vecdot, a, b, axes=[-1, -1, (-1,)])
assert_raises(AxisError, np.vecdot, a, b, axes=[-1, (-2, -1), ()])
# axes need to have same length.
assert_raises(ValueError, np.vecdot, a, b, axes=[0, 1])
# matrix_multiply signature: '(m,n),(n,p)->(m,p)'
mm = umt.matrix_multiply
a = np.arange(12).reshape((2, 3, 2))
b = np.arange(8).reshape((2, 2, 2, 1)) + 1
# Sanity check.
c = mm(a, b)
assert_array_equal(c, np.matmul(a, b))
# Default axes.
c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)])
assert_array_equal(c, np.matmul(a, b))
# Default with explicit axes.
c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)])
assert_array_equal(c, np.matmul(a, b))
# swap some axes.
c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)])
assert_array_equal(c, np.matmul(a.transpose(1, 0, 2),
b.transpose(0, 3, 1, 2)))
# Default with output array.
c = np.empty((2, 2, 3, 1))
d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)])
assert_(c is d)
assert_array_equal(c, np.matmul(a, b))
# Transposed output array
c = np.empty((1, 2, 2, 3))
d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)])
assert_(c is d)
assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2))
# Check errors for improperly constructed axes arguments.
# wrong argument
assert_raises(TypeError, mm, a, b, axis=1)
# axes should be list
assert_raises(TypeError, mm, a, b, axes=1)
assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1)))
# list needs to have right length
assert_raises(ValueError, mm, a, b, axes=[])
assert_raises(ValueError, mm, a, b, axes=[(-2, -1)])
# list should not contain None, or lists
assert_raises(TypeError, mm, a, b, axes=[None, None, None])
assert_raises(TypeError,
mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]])
assert_raises(TypeError,
mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]])
assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None])
# single integers are AxisErrors if more are required
assert_raises(AxisError, mm, a, b, axes=[-1, -1, -1])
assert_raises(AxisError, mm, a, b, axes=[(-2, -1), (-2, -1), -1])
# tuples should not have duplicated values
assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)])
# arrays should have enough axes.
z = np.zeros((2, 2))
assert_raises(ValueError, mm, z, z[0])
assert_raises(ValueError, mm, z, z, out=z[:, 0])
assert_raises(ValueError, mm, z[1], z, axes=[0, 1])
assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1])
# Regular ufuncs should not accept axes.
assert_raises(TypeError, np.add, 1., 1., axes=[0])
# should be able to deal with bad unrelated kwargs.
assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True)
def test_axis_argument(self):
# vecdot signature: '(n),(n)->()'
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
c = np.vecdot(a, b)
assert_array_equal(c, (a * b).sum(-1))
c = np.vecdot(a, b, axis=-1)
assert_array_equal(c, (a * b).sum(-1))
out = np.zeros_like(c)
d = np.vecdot(a, b, axis=-1, out=out)
assert_(d is out)
assert_array_equal(d, c)
c = np.vecdot(a, b, axis=0)
assert_array_equal(c, (a * b).sum(0))
# Sanity checks on innerwt and cumsum.
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w, axis=0),
np.sum(a * b * w, axis=0))
assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0))
assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1))
out = np.empty_like(a)
b = umt.cumsum(a, out=out, axis=0)
assert_(out is b)
assert_array_equal(b, np.cumsum(a, axis=0))
b = umt.cumsum(a, out=out, axis=1)
assert_(out is b)
assert_array_equal(b, np.cumsum(a, axis=-1))
# Check errors.
# Cannot pass in both axis and axes.
assert_raises(TypeError, np.vecdot, a, b, axis=0, axes=[0, 0])
# Not an integer.
assert_raises(TypeError, np.vecdot, a, b, axis=[0])
# more than 1 core dimensions.
mm = umt.matrix_multiply
assert_raises(TypeError, mm, a, b, axis=1)
# Output wrong size in axis.
out = np.empty((1, 2, 3), dtype=a.dtype)
assert_raises(ValueError, umt.cumsum, a, out=out, axis=0)
# Regular ufuncs should not accept axis.
assert_raises(TypeError, np.add, 1., 1., axis=0)
def test_keepdims_argument(self):
# vecdot signature: '(n),(n)->()'
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
c = np.vecdot(a, b)
assert_array_equal(c, (a * b).sum(-1))
c = np.vecdot(a, b, keepdims=False)
assert_array_equal(c, (a * b).sum(-1))
c = np.vecdot(a, b, keepdims=True)
assert_array_equal(c, (a * b).sum(-1, keepdims=True))
out = np.zeros_like(c)
d = np.vecdot(a, b, keepdims=True, out=out)
assert_(d is out)
assert_array_equal(d, c)
# Now combined with axis and axes.
c = np.vecdot(a, b, axis=-1, keepdims=False)
assert_array_equal(c, (a * b).sum(-1, keepdims=False))
c = np.vecdot(a, b, axis=-1, keepdims=True)
assert_array_equal(c, (a * b).sum(-1, keepdims=True))
c = np.vecdot(a, b, axis=0, keepdims=False)
assert_array_equal(c, (a * b).sum(0, keepdims=False))
c = np.vecdot(a, b, axis=0, keepdims=True)
assert_array_equal(c, (a * b).sum(0, keepdims=True))
c = np.vecdot(a, b, axes=[(-1,), (-1,), ()], keepdims=False)
assert_array_equal(c, (a * b).sum(-1))
c = np.vecdot(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True)
assert_array_equal(c, (a * b).sum(-1, keepdims=True))
c = np.vecdot(a, b, axes=[0, 0], keepdims=False)
assert_array_equal(c, (a * b).sum(0))
c = np.vecdot(a, b, axes=[0, 0, 0], keepdims=True)
assert_array_equal(c, (a * b).sum(0, keepdims=True))
c = np.vecdot(a, b, axes=[0, 2], keepdims=False)
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
c = np.vecdot(a, b, axes=[0, 2], keepdims=True)
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
keepdims=True))
c = np.vecdot(a, b, axes=[0, 2, 2], keepdims=True)
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
keepdims=True))
c = np.vecdot(a, b, axes=[0, 2, 0], keepdims=True)
assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True))
# Hardly useful, but should work.
c = np.vecdot(a, b, axes=[0, 2, 1], keepdims=True)
assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1))
.sum(1, keepdims=True))
# Check with two core dimensions.
a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
expected = uml.det(a)
c = uml.det(a, keepdims=False)
assert_array_equal(c, expected)
c = uml.det(a, keepdims=True)
assert_array_equal(c, expected[:, np.newaxis, np.newaxis])
a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
expected_s, expected_l = uml.slogdet(a)
cs, cl = uml.slogdet(a, keepdims=False)
assert_array_equal(cs, expected_s)
assert_array_equal(cl, expected_l)
cs, cl = uml.slogdet(a, keepdims=True)
assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis])
assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis])
# Sanity check on innerwt.
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w, keepdims=True),
np.sum(a * b * w, axis=-1, keepdims=True))
assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True),
np.sum(a * b * w, axis=0, keepdims=True))
# Check errors.
# Not a boolean
assert_raises(TypeError, np.vecdot, a, b, keepdims='true')
# More than 1 core dimension, and core output dimensions.
mm = umt.matrix_multiply
assert_raises(TypeError, mm, a, b, keepdims=True)
assert_raises(TypeError, mm, a, b, keepdims=False)
# Regular ufuncs should not accept keepdims.
assert_raises(TypeError, np.add, 1., 1., keepdims=False)
def test_innerwt(self):
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
a = np.arange(100, 124).reshape((2, 3, 4))
b = np.arange(200, 224).reshape((2, 3, 4))
w = np.arange(300, 324).reshape((2, 3, 4))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_innerwt_empty(self):
"""Test generalized ufunc with zero-sized operands"""
a = np.array([], dtype='f8')
b = np.array([], dtype='f8')
w = np.array([], dtype='f8')
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_cross1d(self):
"""Test with fixed-sized signature."""
a = np.eye(3)
assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3)))
out = np.zeros((3, 3))
result = umt.cross1d(a[0], a, out)
assert_(result is out)
assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1])))
assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4))
assert_raises(ValueError, umt.cross1d, a, np.arange(4.))
# Wrong output core dimension.
assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4)))
# Wrong output broadcast dimension (see gh-15139).
assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros(3))
def test_can_ignore_signature(self):
# Comparing the effects of ? in signature:
# matrix_multiply: (m,n),(n,p)->(m,p) # all must be there.
# matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p.
mat = np.arange(12).reshape((2, 3, 2))
single_vec = np.arange(2)
col_vec = single_vec[:, np.newaxis]
col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1
# matrix @ single column vector with proper dimension
mm_col_vec = umt.matrix_multiply(mat, col_vec)
# matmul does the same thing
matmul_col_vec = umt.matmul(mat, col_vec)
assert_array_equal(matmul_col_vec, mm_col_vec)
# matrix @ vector without dimension making it a column vector.
# matrix multiply fails -> missing core dim.
assert_raises(ValueError, umt.matrix_multiply, mat, single_vec)
# matmul mimicker passes, and returns a vector.
matmul_col = umt.matmul(mat, single_vec)
assert_array_equal(matmul_col, mm_col_vec.squeeze())
# Now with a column array: same as for column vector,
# broadcasting sensibly.
mm_col_vec = umt.matrix_multiply(mat, col_vec_array)
matmul_col_vec = umt.matmul(mat, col_vec_array)
assert_array_equal(matmul_col_vec, mm_col_vec)
# As above, but for row vector
single_vec = np.arange(3)
row_vec = single_vec[np.newaxis, :]
row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1
# row vector @ matrix
mm_row_vec = umt.matrix_multiply(row_vec, mat)
matmul_row_vec = umt.matmul(row_vec, mat)
assert_array_equal(matmul_row_vec, mm_row_vec)
# single row vector @ matrix
assert_raises(ValueError, umt.matrix_multiply, single_vec, mat)
matmul_row = umt.matmul(single_vec, mat)
assert_array_equal(matmul_row, mm_row_vec.squeeze())
# row vector array @ matrix
mm_row_vec = umt.matrix_multiply(row_vec_array, mat)
matmul_row_vec = umt.matmul(row_vec_array, mat)
assert_array_equal(matmul_row_vec, mm_row_vec)
# Now for vector combinations
# row vector @ column vector
col_vec = row_vec.T
col_vec_array = row_vec_array.swapaxes(-2, -1)
mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec)
matmul_row_col_vec = umt.matmul(row_vec, col_vec)
assert_array_equal(matmul_row_col_vec, mm_row_col_vec)
# single row vector @ single col vector
assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec)
matmul_row_col = umt.matmul(single_vec, single_vec)
assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze())
# row vector array @ matrix
mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array)
matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array)
assert_array_equal(matmul_row_col_array, mm_row_col_array)
# Finally, check that things are *not* squeezed if one gives an
# output.
out = np.zeros_like(mm_row_col_array)
out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out)
assert_array_equal(out, mm_row_col_array)
out[:] = 0
out = umt.matmul(row_vec_array, col_vec_array, out=out)
assert_array_equal(out, mm_row_col_array)
# And check one cannot put missing dimensions back.
out = np.zeros_like(mm_row_col_vec)
assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec,
out)
# But fine for matmul, since it is just a broadcast.
out = umt.matmul(single_vec, single_vec, out)
assert_array_equal(out, mm_row_col_vec.squeeze())
def test_matrix_multiply(self):
self.compare_matrix_multiply_results(np.int64)
self.compare_matrix_multiply_results(np.double)
def test_matrix_multiply_umath_empty(self):
res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0)))
assert_array_equal(res, np.zeros((0, 0)))
res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10)))
assert_array_equal(res, np.zeros((10, 10)))
def compare_matrix_multiply_results(self, tp):
d1 = np.array(np.random.rand(2, 3, 4), dtype=tp)
d2 = np.array(np.random.rand(2, 3, 4), dtype=tp)
msg = "matrix multiply on type %s" % d1.dtype.name
def permute_n(n):
if n == 1:
return ([0],)
ret = ()
base = permute_n(n-1)
for perm in base:
for i in range(n):
new = perm + [n-1]
new[n-1] = new[i]
new[i] = n-1
ret += (new,)
return ret
def slice_n(n):
if n == 0:
return ((),)
ret = ()
base = slice_n(n-1)
for sl in base:
ret += (sl+(slice(None),),)
ret += (sl+(slice(0, 1),),)
return ret
def broadcastable(s1, s2):
return s1 == s2 or s1 == 1 or s2 == 1
permute_3 = permute_n(3)
slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,)
ref = True
for p1 in permute_3:
for p2 in permute_3:
for s1 in slice_3:
for s2 in slice_3:
a1 = d1.transpose(p1)[s1]
a2 = d2.transpose(p2)[s2]
ref = ref and a1.base is not None
ref = ref and a2.base is not None
if (a1.shape[-1] == a2.shape[-2] and
broadcastable(a1.shape[0], a2.shape[0])):
assert_array_almost_equal(
umt.matrix_multiply(a1, a2),
np.sum(a2[..., np.newaxis].swapaxes(-3, -1) *
a1[..., np.newaxis,:], axis=-1),
err_msg=msg + ' %s %s' % (str(a1.shape),
str(a2.shape)))
assert_equal(ref, True, err_msg="reference check")
def test_euclidean_pdist(self):
a = np.arange(12, dtype=float).reshape(4, 3)
out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype)
umt.euclidean_pdist(a, out)
b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1))
b = b[~np.tri(a.shape[0], dtype=bool)]
assert_almost_equal(out, b)
# An output array is required to determine p with signature (n,d)->(p)
assert_raises(ValueError, umt.euclidean_pdist, a)
def test_cumsum(self):
a = np.arange(10)
result = umt.cumsum(a)
assert_array_equal(result, a.cumsum())
def test_object_logical(self):
a = np.array([3, None, True, False, "test", ""], dtype=object)
assert_equal(np.logical_or(a, None),
np.array([x or None for x in a], dtype=object))
assert_equal(np.logical_or(a, True),
np.array([x or True for x in a], dtype=object))
assert_equal(np.logical_or(a, 12),
np.array([x or 12 for x in a], dtype=object))
assert_equal(np.logical_or(a, "blah"),
np.array([x or "blah" for x in a], dtype=object))
assert_equal(np.logical_and(a, None),
np.array([x and None for x in a], dtype=object))
assert_equal(np.logical_and(a, True),
np.array([x and True for x in a], dtype=object))
assert_equal(np.logical_and(a, 12),
np.array([x and 12 for x in a], dtype=object))
assert_equal(np.logical_and(a, "blah"),
np.array([x and "blah" for x in a], dtype=object))
assert_equal(np.logical_not(a),
np.array([not x for x in a], dtype=object))
assert_equal(np.logical_or.reduce(a), 3)
assert_equal(np.logical_and.reduce(a), None)
def test_object_comparison(self):
class HasComparisons:
def __eq__(self, other):
return '=='
arr0d = np.array(HasComparisons())
assert_equal(arr0d == arr0d, True)
assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast
arr1d = np.array([HasComparisons()])
assert_equal(arr1d == arr1d, np.array([True]))
assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast
assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['==']))
def test_object_array_reduction(self):
# Reductions on object arrays
a = np.array(['a', 'b', 'c'], dtype=object)
assert_equal(np.sum(a), 'abc')
assert_equal(np.max(a), 'c')
assert_equal(np.min(a), 'a')
a = np.array([True, False, True], dtype=object)
assert_equal(np.sum(a), 2)
assert_equal(np.prod(a), 0)
assert_equal(np.any(a), True)
assert_equal(np.all(a), False)
assert_equal(np.max(a), True)
assert_equal(np.min(a), False)
assert_equal(np.array([[1]], dtype=object).sum(), 1)
assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2])
assert_equal(np.array([1], dtype=object).sum(initial=1), 2)
assert_equal(np.array([[1], [2, 3]], dtype=object)
.sum(initial=[0], where=[False, True]), [0, 2, 3])
def test_object_array_accumulate_inplace(self):
# Checks that in-place accumulates work, see also gh-7402
arr = np.ones(4, dtype=object)
arr[:] = [[1] for i in range(4)]
# Twice reproduced also for tuples:
np.add.accumulate(arr, out=arr)
np.add.accumulate(arr, out=arr)
assert_array_equal(arr,
np.array([[1]*i for i in [1, 3, 6, 10]], dtype=object),
)
# And the same if the axis argument is used
arr = np.ones((2, 4), dtype=object)
arr[0, :] = [[2] for i in range(4)]
np.add.accumulate(arr, out=arr, axis=-1)
np.add.accumulate(arr, out=arr, axis=-1)
assert_array_equal(arr[0, :],
np.array([[2]*i for i in [1, 3, 6, 10]], dtype=object),
)
def test_object_array_accumulate_failure(self):
# Typical accumulation on object works as expected:
res = np.add.accumulate(np.array([1, 0, 2], dtype=object))
assert_array_equal(res, np.array([1, 1, 3], dtype=object))
# But errors are propagated from the inner-loop if they occur:
with pytest.raises(TypeError):
np.add.accumulate([1, None, 2])
def test_object_array_reduceat_inplace(self):
# Checks that in-place reduceats work, see also gh-7465
arr = np.empty(4, dtype=object)
arr[:] = [[1] for i in range(4)]
out = np.empty(4, dtype=object)
out[:] = [[1] for i in range(4)]
np.add.reduceat(arr, np.arange(4), out=arr)
np.add.reduceat(arr, np.arange(4), out=arr)
assert_array_equal(arr, out)
# And the same if the axis argument is used
arr = np.ones((2, 4), dtype=object)
arr[0, :] = [[2] for i in range(4)]
out = np.ones((2, 4), dtype=object)
out[0, :] = [[2] for i in range(4)]
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
assert_array_equal(arr, out)
def test_object_array_reduceat_failure(self):
# Reduceat works as expected when no invalid operation occurs (None is
# not involved in an operation here)
res = np.add.reduceat(np.array([1, None, 2], dtype=object), [1, 2])
assert_array_equal(res, np.array([None, 2], dtype=object))
# But errors when None would be involved in an operation:
with pytest.raises(TypeError):
np.add.reduceat([1, None, 2], [0, 2])
def test_zerosize_reduction(self):
# Test with default dtype and object dtype
for a in [[], np.array([], dtype=object)]:
assert_equal(np.sum(a), 0)
assert_equal(np.prod(a), 1)
assert_equal(np.any(a), False)
assert_equal(np.all(a), True)
assert_raises(ValueError, np.max, a)
assert_raises(ValueError, np.min, a)
def test_axis_out_of_bounds(self):
a = np.array([False, False])
assert_raises(AxisError, a.all, axis=1)
a = np.array([False, False])
assert_raises(AxisError, a.all, axis=-2)
a = np.array([False, False])
assert_raises(AxisError, a.any, axis=1)
a = np.array([False, False])
assert_raises(AxisError, a.any, axis=-2)
def test_scalar_reduction(self):
# The functions 'sum', 'prod', etc allow specifying axis=0
# even for scalars
assert_equal(np.sum(3, axis=0), 3)
assert_equal(np.prod(3.5, axis=0), 3.5)
assert_equal(np.any(True, axis=0), True)
assert_equal(np.all(False, axis=0), False)
assert_equal(np.max(3, axis=0), 3)
assert_equal(np.min(2.5, axis=0), 2.5)
# Check scalar behaviour for ufuncs without an identity
assert_equal(np.power.reduce(3), 3)
# Make sure that scalars are coming out from this operation
assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32)
# check if scalars/0-d arrays get cast
assert_(type(np.any(0, axis=0)) is np.bool)
# assert that 0-d arrays get wrapped
class MyArray(np.ndarray):
pass
a = np.array(1).view(MyArray)
assert_(type(np.any(a)) is MyArray)
def test_casting_out_param(self):
# Test that it's possible to do casts on output
a = np.ones((200, 100), np.int64)
b = np.ones((200, 100), np.int64)
c = np.ones((200, 100), np.float64)
np.add(a, b, out=c)
assert_equal(c, 2)
a = np.zeros(65536)
b = np.zeros(65536, dtype=np.float32)
np.subtract(a, 0, out=b)
assert_equal(b, 0)
def test_where_param(self):
# Test that the where= ufunc parameter works with regular arrays
a = np.arange(7)
b = np.ones(7)
c = np.zeros(7)
np.add(a, b, out=c, where=(a % 2 == 1))
assert_equal(c, [0, 2, 0, 4, 0, 6, 0])
a = np.arange(4).reshape(2, 2) + 2
np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]])
assert_equal(a, [[2, 27], [16, 5]])
# Broadcasting the where= parameter
np.subtract(a, 2, out=a, where=[True, False])
assert_equal(a, [[0, 27], [14, 5]])
def test_where_param_buffer_output(self):
# This test is temporarily skipped because it requires
# adding masking features to the nditer to work properly
# With casting on output
a = np.ones(10, np.int64)
b = np.ones(10, np.int64)
c = 1.5 * np.ones(10, np.float64)
np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5])
def test_where_param_alloc(self):
# With casting and allocated output
a = np.array([1], dtype=np.int64)
m = np.array([True], dtype=bool)
assert_equal(np.sqrt(a, where=m), [1])
# No casting and allocated output
a = np.array([1], dtype=np.float64)
m = np.array([True], dtype=bool)
assert_equal(np.sqrt(a, where=m), [1])
def test_where_with_broadcasting(self):
# See gh-17198
a = np.random.random((5000, 4))
b = np.random.random((5000, 1))
where = a > 0.3
out = np.full_like(a, 0)
np.less(a, b, where=where, out=out)
b_where = np.broadcast_to(b, a.shape)[where]
assert_array_equal((a[where] < b_where), out[where].astype(bool))
assert not out[~where].any() # outside mask, out remains all 0
@staticmethod
def identityless_reduce_arrs():
yield np.empty((2, 3, 4), order='C')
yield np.empty((2, 3, 4), order='F')
# Mixed order (reduce order differs outer)
yield np.empty((2, 4, 3), order='C').swapaxes(1, 2)
# Reversed order
yield np.empty((2, 3, 4), order='C')[::-1, ::-1, ::-1]
# Not contiguous
yield np.empty((3, 5, 4), order='C').swapaxes(1, 2)[1:, 1:, 1:]
# Not contiguous and not aligned
a = np.empty((3*4*5*8 + 1,), dtype='i1')
a = a[1:].view(dtype='f8')
a.shape = (3, 4, 5)
a = a[1:, 1:, 1:]
yield a
@pytest.mark.parametrize("a", identityless_reduce_arrs())
@pytest.mark.parametrize("pos", [(1, 0, 0), (0, 1, 0), (0, 0, 1)])
def test_identityless_reduction(self, a, pos):
# np.minimum.reduce is an identityless reduction
a[...] = 1
a[pos] = 0
for axis in [None, (0, 1), (0, 2), (1, 2), 0, 1, 2, ()]:
if axis is None:
axes = np.array([], dtype=np.intp)
else:
axes = np.delete(np.arange(a.ndim), axis)
expected_pos = tuple(np.array(pos)[axes])
expected = np.ones(np.array(a.shape)[axes])
expected[expected_pos] = 0
res = np.minimum.reduce(a, axis=axis)
assert_equal(res, expected, strict=True)
res = np.full_like(res, np.nan)
np.minimum.reduce(a, axis=axis, out=res)
assert_equal(res, expected, strict=True)
@requires_memory(6 * 1024**3)
@pytest.mark.skipif(sys.maxsize < 2**32,
reason="test array too large for 32bit platform")
def test_identityless_reduction_huge_array(self):
# Regression test for gh-20921 (copying identity incorrectly failed)
arr = np.zeros((2, 2**31), 'uint8')
arr[:, 0] = [1, 3]
arr[:, -1] = [4, 1]
res = np.maximum.reduce(arr, axis=0)
del arr
assert res[0] == 3
assert res[-1] == 4
def test_reduce_identity_depends_on_loop(self):
"""
The type of the result should always depend on the selected loop, not
necessarily the output (only relevant for object arrays).
"""
# For an object loop, the default value 0 with type int is used:
assert type(np.add.reduce([], dtype=object)) is int
out = np.array(None, dtype=object)
# When the loop is float64 but `out` is object this does not happen,
# the result is float64 cast to object (which gives Python `float`).
np.add.reduce([], out=out, dtype=np.float64)
assert type(out[()]) is float
def test_initial_reduction(self):
# np.minimum.reduce is an identityless reduction
# For cases like np.maximum(np.abs(...), initial=0)
# More generally, a supremum over non-negative numbers.
assert_equal(np.maximum.reduce([], initial=0), 0)
# For cases like reduction of an empty array over the reals.
assert_equal(np.minimum.reduce([], initial=np.inf), np.inf)
assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf)
# Random tests
assert_equal(np.minimum.reduce([5], initial=4), 4)
assert_equal(np.maximum.reduce([4], initial=5), 5)
assert_equal(np.maximum.reduce([5], initial=4), 5)
assert_equal(np.minimum.reduce([4], initial=5), 4)
# Check initial=None raises ValueError for both types of ufunc reductions
assert_raises(ValueError, np.minimum.reduce, [], initial=None)
assert_raises(ValueError, np.add.reduce, [], initial=None)
# Also in the somewhat special object case:
with pytest.raises(ValueError):
np.add.reduce([], initial=None, dtype=object)
# Check that np._NoValue gives default behavior.
assert_equal(np.add.reduce([], initial=np._NoValue), 0)
# Check that initial kwarg behaves as intended for dtype=object
a = np.array([10], dtype=object)
res = np.add.reduce(a, initial=5)
assert_equal(res, 15)
def test_empty_reduction_and_identity(self):
arr = np.zeros((0, 5))
# OK, since the reduction itself is *not* empty, the result is
assert np.true_divide.reduce(arr, axis=1).shape == (0,)
# Not OK, the reduction itself is empty and we have no identity
with pytest.raises(ValueError):
np.true_divide.reduce(arr, axis=0)
# Test that an empty reduction fails also if the result is empty
arr = np.zeros((0, 0, 5))
with pytest.raises(ValueError):
np.true_divide.reduce(arr, axis=1)
# Division reduction makes sense with `initial=1` (empty or not):
res = np.true_divide.reduce(arr, axis=1, initial=1)
assert_array_equal(res, np.ones((0, 5)))
@pytest.mark.parametrize('axis', (0, 1, None))
@pytest.mark.parametrize('where', (np.array([False, True, True]),
np.array([[True], [False], [True]]),
np.array([[True, False, False],
[False, True, False],
[False, True, True]])))
def test_reduction_with_where(self, axis, where):
a = np.arange(9.).reshape(3, 3)
a_copy = a.copy()
a_check = np.zeros_like(a)
np.positive(a, out=a_check, where=where)
res = np.add.reduce(a, axis=axis, where=where)
check = a_check.sum(axis)
assert_equal(res, check)
# Check we do not overwrite elements of a internally.
assert_array_equal(a, a_copy)
@pytest.mark.parametrize(('axis', 'where'),
((0, np.array([True, False, True])),
(1, [True, True, False]),
(None, True)))
@pytest.mark.parametrize('initial', (-np.inf, 5.))
def test_reduction_with_where_and_initial(self, axis, where, initial):
a = np.arange(9.).reshape(3, 3)
a_copy = a.copy()
a_check = np.full(a.shape, -np.inf)
np.positive(a, out=a_check, where=where)
res = np.maximum.reduce(a, axis=axis, where=where, initial=initial)
check = a_check.max(axis, initial=initial)
assert_equal(res, check)
def test_reduction_where_initial_needed(self):
a = np.arange(9.).reshape(3, 3)
m = [False, True, False]
assert_raises(ValueError, np.maximum.reduce, a, where=m)
def test_identityless_reduction_nonreorderable(self):
a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
res = np.divide.reduce(a, axis=0)
assert_equal(res, [8.0, 4.0, 8.0])
res = np.divide.reduce(a, axis=1)
assert_equal(res, [2.0, 8.0])
res = np.divide.reduce(a, axis=())
assert_equal(res, a)
assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1))
def test_reduce_zero_axis(self):
# If we have a n x m array and do a reduction with axis=1, then we are
# doing n reductions, and each reduction takes an m-element array. For
# a reduction operation without an identity, then:
# n > 0, m > 0: fine
# n = 0, m > 0: fine, doing 0 reductions of m-element arrays
# n > 0, m = 0: can't reduce a 0-element array, ValueError
# n = 0, m = 0: can't reduce a 0-element array, ValueError (for
# consistency with the above case)
# This test doesn't actually look at return values, it just checks to
# make sure that error we get an error in exactly those cases where we
# expect one, and assumes the calculations themselves are done
# correctly.
def ok(f, *args, **kwargs):
f(*args, **kwargs)
def err(f, *args, **kwargs):
assert_raises(ValueError, f, *args, **kwargs)
def t(expect, func, n, m):
expect(func, np.zeros((n, m)), axis=1)
expect(func, np.zeros((m, n)), axis=0)
expect(func, np.zeros((n // 2, n // 2, m)), axis=2)
expect(func, np.zeros((n // 2, m, n // 2)), axis=1)
expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2))
expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2))
expect(func, np.zeros((m // 3, m // 3, m // 3,
n // 2, n // 2)),
axis=(0, 1, 2))
# Check what happens if the inner (resp. outer) dimensions are a
# mix of zero and non-zero:
expect(func, np.zeros((10, m, n)), axis=(0, 1))
expect(func, np.zeros((10, n, m)), axis=(0, 2))
expect(func, np.zeros((m, 10, n)), axis=0)
expect(func, np.zeros((10, m, n)), axis=1)
expect(func, np.zeros((10, n, m)), axis=2)
# np.maximum is just an arbitrary ufunc with no reduction identity
assert_equal(np.maximum.identity, None)
t(ok, np.maximum.reduce, 30, 30)
t(ok, np.maximum.reduce, 0, 30)
t(err, np.maximum.reduce, 30, 0)
t(err, np.maximum.reduce, 0, 0)
err(np.maximum.reduce, [])
np.maximum.reduce(np.zeros((0, 0)), axis=())
# all of the combinations are fine for a reduction that has an
# identity
t(ok, np.add.reduce, 30, 30)
t(ok, np.add.reduce, 0, 30)
t(ok, np.add.reduce, 30, 0)
t(ok, np.add.reduce, 0, 0)
np.add.reduce([])
np.add.reduce(np.zeros((0, 0)), axis=())
# OTOH, accumulate always makes sense for any combination of n and m,
# because it maps an m-element array to an m-element array. These
# tests are simpler because accumulate doesn't accept multiple axes.
for uf in (np.maximum, np.add):
uf.accumulate(np.zeros((30, 0)), axis=0)
uf.accumulate(np.zeros((0, 30)), axis=0)
uf.accumulate(np.zeros((30, 30)), axis=0)
uf.accumulate(np.zeros((0, 0)), axis=0)
def test_safe_casting(self):
# In old versions of numpy, in-place operations used the 'unsafe'
# casting rules. In versions >= 1.10, 'same_kind' is the
# default and an exception is raised instead of a warning.
# when 'same_kind' is not satisfied.
a = np.array([1, 2, 3], dtype=int)
# Non-in-place addition is fine
assert_array_equal(assert_no_warnings(np.add, a, 1.1),
[2.1, 3.1, 4.1])
assert_raises(TypeError, np.add, a, 1.1, out=a)
def add_inplace(a, b):
a += b
assert_raises(TypeError, add_inplace, a, 1.1)
# Make sure that explicitly overriding the exception is allowed:
assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
assert_array_equal(a, [2, 3, 4])
def test_ufunc_custom_out(self):
# Test ufunc with built in input types and custom output type
a = np.array([0, 1, 2], dtype='i8')
b = np.array([0, 1, 2], dtype='i8')
c = np.empty(3, dtype=_rational_tests.rational)
# Output must be specified so numpy knows what
# ufunc signature to look for
result = _rational_tests.test_add(a, b, c)
target = np.array([0, 2, 4], dtype=_rational_tests.rational)
assert_equal(result, target)
# The new resolution means that we can (usually) find custom loops
# as long as they match exactly:
result = _rational_tests.test_add(a, b)
assert_equal(result, target)
# This works even more generally, so long the default common-dtype
# promoter works out:
result = _rational_tests.test_add(a, b.astype(np.uint16), out=c)
assert_equal(result, target)
# This scalar path used to go into legacy promotion, but doesn't now:
result = _rational_tests.test_add(a, np.uint16(2))
target = np.array([2, 3, 4], dtype=_rational_tests.rational)
assert_equal(result, target)
def test_operand_flags(self):
a = np.arange(16, dtype=int).reshape(4, 4)
b = np.arange(9, dtype=int).reshape(3, 3)
opflag_tests.inplace_add(a[:-1, :-1], b)
assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7],
[14, 16, 18, 11], [12, 13, 14, 15]]))
a = np.array(0)
opflag_tests.inplace_add(a, 3)
assert_equal(a, 3)
opflag_tests.inplace_add(a, [3, 4])
assert_equal(a, 10)
def test_struct_ufunc(self):
import numpy._core._struct_ufunc_tests as struct_ufunc
a = np.array([(1, 2, 3)], dtype='u8,u8,u8')
b = np.array([(1, 2, 3)], dtype='u8,u8,u8')
result = struct_ufunc.add_triplet(a, b)
assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
assert_raises(RuntimeError, struct_ufunc.register_fail)
def test_custom_ufunc(self):
a = np.array(
[_rational_tests.rational(1, 2),
_rational_tests.rational(1, 3),
_rational_tests.rational(1, 4)],
dtype=_rational_tests.rational)
b = np.array(
[_rational_tests.rational(1, 2),
_rational_tests.rational(1, 3),
_rational_tests.rational(1, 4)],
dtype=_rational_tests.rational)
result = _rational_tests.test_add_rationals(a, b)
expected = np.array(
[_rational_tests.rational(1),
_rational_tests.rational(2, 3),
_rational_tests.rational(1, 2)],
dtype=_rational_tests.rational)
assert_equal(result, expected)
def test_custom_ufunc_forced_sig(self):
# gh-9351 - looking for a non-first userloop would previously hang
with assert_raises(TypeError):
np.multiply(_rational_tests.rational(1), 1,
signature=(_rational_tests.rational, int, None))
def test_custom_array_like(self):
class MyThing:
__array_priority__ = 1000
rmul_count = 0
getitem_count = 0
def __init__(self, shape):
self.shape = shape
def __len__(self):
return self.shape[0]
def __getitem__(self, i):
MyThing.getitem_count += 1
if not isinstance(i, tuple):
i = (i,)
if len(i) > self.ndim:
raise IndexError("boo")
return MyThing(self.shape[len(i):])
def __rmul__(self, other):
MyThing.rmul_count += 1
return self
np.float64(5)*MyThing((3, 3))
assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
assert_(MyThing.getitem_count <= 2, MyThing.getitem_count)
@pytest.mark.parametrize("a", (
np.arange(10, dtype=int),
np.arange(10, dtype=_rational_tests.rational),
))
def test_ufunc_at_basic(self, a):
aa = a.copy()
np.add.at(aa, [2, 5, 2], 1)
assert_equal(aa, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9])
with pytest.raises(ValueError):
# missing second operand
np.add.at(aa, [2, 5, 3])
aa = a.copy()
np.negative.at(aa, [2, 5, 3])
assert_equal(aa, [0, 1, -2, -3, 4, -5, 6, 7, 8, 9])
aa = a.copy()
b = np.array([100, 100, 100])
np.add.at(aa, [2, 5, 2], b)
assert_equal(aa, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9])
with pytest.raises(ValueError):
# extraneous second operand
np.negative.at(a, [2, 5, 3], [1, 2, 3])
with pytest.raises(ValueError):
# second operand cannot be converted to an array
np.add.at(a, [2, 5, 3], [[1, 2], 1])
# ufuncs with indexed loops for performance in ufunc.at
indexed_ufuncs = [np.add, np.subtract, np.multiply, np.floor_divide,
np.maximum, np.minimum, np.fmax, np.fmin]
@pytest.mark.parametrize(
"typecode", np.typecodes['AllInteger'] + np.typecodes['Float'])
@pytest.mark.parametrize("ufunc", indexed_ufuncs)
def test_ufunc_at_inner_loops(self, typecode, ufunc):
if ufunc is np.divide and typecode in np.typecodes['AllInteger']:
# Avoid divide-by-zero and inf for integer divide
a = np.ones(100, dtype=typecode)
indx = np.random.randint(100, size=30, dtype=np.intp)
vals = np.arange(1, 31, dtype=typecode)
else:
a = np.ones(1000, dtype=typecode)
indx = np.random.randint(1000, size=3000, dtype=np.intp)
vals = np.arange(3000, dtype=typecode)
atag = a.copy()
# Do the calculation twice and compare the answers
with warnings.catch_warnings(record=True) as w_at:
warnings.simplefilter('always')
ufunc.at(a, indx, vals)
with warnings.catch_warnings(record=True) as w_loop:
warnings.simplefilter('always')
for i, v in zip(indx, vals):
# Make sure all the work happens inside the ufunc
# in order to duplicate error/warning handling
ufunc(atag[i], v, out=atag[i:i+1], casting="unsafe")
assert_equal(atag, a)
# If w_loop warned, make sure w_at warned as well
if len(w_loop) > 0:
#
assert len(w_at) > 0
assert w_at[0].category == w_loop[0].category
assert str(w_at[0].message)[:10] == str(w_loop[0].message)[:10]
@pytest.mark.parametrize("typecode", np.typecodes['Complex'])
@pytest.mark.parametrize("ufunc", [np.add, np.subtract, np.multiply])
def test_ufunc_at_inner_loops_complex(self, typecode, ufunc):
a = np.ones(10, dtype=typecode)
indx = np.concatenate([np.ones(6, dtype=np.intp),
np.full(18, 4, dtype=np.intp)])
value = a.dtype.type(1j)
ufunc.at(a, indx, value)
expected = np.ones_like(a)
if ufunc is np.multiply:
expected[1] = expected[4] = -1
else:
expected[1] += 6 * (value if ufunc is np.add else -value)
expected[4] += 18 * (value if ufunc is np.add else -value)
assert_array_equal(a, expected)
def test_ufunc_at_ellipsis(self):
# Make sure the indexed loop check does not choke on iters
# with subspaces
arr = np.zeros(5)
np.add.at(arr, slice(None), np.ones(5))
assert_array_equal(arr, np.ones(5))
def test_ufunc_at_negative(self):
arr = np.ones(5, dtype=np.int32)
indx = np.arange(5)
umt.indexed_negative.at(arr, indx)
# If it is [-1, -1, -1, -100, 0] then the regular strided loop was used
assert np.all(arr == [-1, -1, -1, -200, -1])
def test_ufunc_at_large(self):
# issue gh-23457
indices = np.zeros(8195, dtype=np.int16)
b = np.zeros(8195, dtype=float)
b[0] = 10
b[1] = 5
b[8192:] = 100
a = np.zeros(1, dtype=float)
np.add.at(a, indices, b)
assert a[0] == b.sum()
def test_cast_index_fastpath(self):
arr = np.zeros(10)
values = np.ones(100000)
# index must be cast, which may be buffered in chunks:
index = np.zeros(len(values), dtype=np.uint8)
np.add.at(arr, index, values)
assert arr[0] == len(values)
@pytest.mark.parametrize("value", [
np.ones(1), np.ones(()), np.float64(1.), 1.])
def test_ufunc_at_scalar_value_fastpath(self, value):
arr = np.zeros(1000)
# index must be cast, which may be buffered in chunks:
index = np.repeat(np.arange(1000), 2)
np.add.at(arr, index, value)
assert_array_equal(arr, np.full_like(arr, 2 * value))
def test_ufunc_at_multiD(self):
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, (slice(None), [1, 2, 1]), b)
assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b)
assert_equal(a,
[[[0, 401, 202],
[3, 404, 205],
[6, 407, 208]],
[[9, 410, 211],
[12, 413, 214],
[15, 416, 217]],
[[18, 419, 220],
[21, 422, 223],
[24, 425, 226]]])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, ([1, 2, 1], slice(None)), b)
assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b)
assert_equal(a,
[[[0, 1, 2],
[203, 404, 605],
[106, 207, 308]],
[[9, 10, 11],
[212, 413, 614],
[115, 216, 317]],
[[18, 19, 20],
[221, 422, 623],
[124, 225, 326]]])
a = np.arange(9).reshape(3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (0, [1, 2, 1]), b)
assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, ([1, 2, 1], 0, slice(None)), b)
assert_equal(a,
[[[0, 1, 2],
[3, 4, 5],
[6, 7, 8]],
[[209, 410, 611],
[12, 13, 14],
[15, 16, 17]],
[[118, 219, 320],
[21, 22, 23],
[24, 25, 26]]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), slice(None)), b)
assert_equal(a,
[[[100, 201, 302],
[103, 204, 305],
[106, 207, 308]],
[[109, 210, 311],
[112, 213, 314],
[115, 216, 317]],
[[118, 219, 320],
[121, 222, 323],
[124, 225, 326]]])
def test_ufunc_at_0D(self):
a = np.array(0)
np.add.at(a, (), 1)
assert_equal(a, 1)
assert_raises(IndexError, np.add.at, a, 0, 1)
assert_raises(IndexError, np.add.at, a, [], 1)
def test_ufunc_at_dtypes(self):
# Test mixed dtypes
a = np.arange(10)
np.power.at(a, [1, 2, 3, 2], 3.5)
assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9]))
def test_ufunc_at_boolean(self):
# Test boolean indexing and boolean ufuncs
a = np.arange(10)
index = a % 2 == 0
np.equal.at(a, index, [0, 2, 4, 6, 8])
assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9])
# Test unary operator
a = np.arange(10, dtype='u4')
np.invert.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9])
def test_ufunc_at_advanced(self):
# Test empty subspace
orig = np.arange(4)
a = orig[:, None][:, 0:0]
np.add.at(a, [0, 1], 3)
assert_array_equal(orig, np.arange(4))
# Test with swapped byte order
index = np.array([1, 2, 1], np.dtype('i').newbyteorder())
values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder())
np.add.at(values, index, 3)
assert_array_equal(values, [1, 8, 6, 4])
# Test exception thrown
values = np.array(['a', 1], dtype=object)
assert_raises(TypeError, np.add.at, values, [0, 1], 1)
assert_array_equal(values, np.array(['a', 1], dtype=object))
# Test multiple output ufuncs raise error, gh-5665
assert_raises(ValueError, np.modf.at, np.arange(10), [1])
# Test maximum
a = np.array([1, 2, 3])
np.maximum.at(a, [0], 0)
assert_equal(a, np.array([1, 2, 3]))
@pytest.mark.parametrize("dtype",
np.typecodes['AllInteger'] + np.typecodes['Float'])
@pytest.mark.parametrize("ufunc",
[np.add, np.subtract, np.divide, np.minimum, np.maximum])
def test_at_negative_indexes(self, dtype, ufunc):
a = np.arange(0, 10).astype(dtype)
indxs = np.array([-1, 1, -1, 2]).astype(np.intp)
vals = np.array([1, 5, 2, 10], dtype=a.dtype)
expected = a.copy()
for i, v in zip(indxs, vals):
expected[i] = ufunc(expected[i], v)
ufunc.at(a, indxs, vals)
assert_array_equal(a, expected)
assert np.all(indxs == [-1, 1, -1, 2])
def test_at_not_none_signature(self):
# Test ufuncs with non-trivial signature raise a TypeError
a = np.ones((2, 2, 2))
b = np.ones((1, 2, 2))
assert_raises(TypeError, np.matmul.at, a, [0], b)
a = np.array([[[1, 2], [3, 4]]])
assert_raises(TypeError, np.linalg._umath_linalg.det.at, a, [0])
def test_at_no_loop_for_op(self):
# str dtype does not have a ufunc loop for np.add
arr = np.ones(10, dtype=str)
with pytest.raises(np._core._exceptions._UFuncNoLoopError):
np.add.at(arr, [0, 1], [0, 1])
def test_at_output_casting(self):
arr = np.array([-1])
np.equal.at(arr, [0], [0])
assert arr[0] == 0
def test_at_broadcast_failure(self):
arr = np.arange(5)
with pytest.raises(ValueError):
np.add.at(arr, [0, 1], [1, 2, 3])
def test_reduce_arguments(self):
f = np.add.reduce
d = np.ones((5,2), dtype=int)
o = np.ones((2,), dtype=d.dtype)
r = o * 5
assert_equal(f(d), r)
# a, axis=0, dtype=None, out=None, keepdims=False
assert_equal(f(d, axis=0), r)
assert_equal(f(d, 0), r)
assert_equal(f(d, 0, dtype=None), r)
assert_equal(f(d, 0, dtype='i'), r)
assert_equal(f(d, 0, 'i'), r)
assert_equal(f(d, 0, None), r)
assert_equal(f(d, 0, None, out=None), r)
assert_equal(f(d, 0, None, out=o), r)
assert_equal(f(d, 0, None, o), r)
assert_equal(f(d, 0, None, None), r)
assert_equal(f(d, 0, None, None, keepdims=False), r)
assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape))
assert_equal(f(d, 0, None, None, False, 0), r)
assert_equal(f(d, 0, None, None, False, initial=0), r)
assert_equal(f(d, 0, None, None, False, 0, True), r)
assert_equal(f(d, 0, None, None, False, 0, where=True), r)
# multiple keywords
assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0,
where=True), r)
# too little
assert_raises(TypeError, f)
# too much
assert_raises(TypeError, f, d, 0, None, None, False, 0, True, 1)
# invalid axis
assert_raises(TypeError, f, d, "invalid")
assert_raises(TypeError, f, d, axis="invalid")
assert_raises(TypeError, f, d, axis="invalid", dtype=None,
keepdims=True)
# invalid dtype
assert_raises(TypeError, f, d, 0, "invalid")
assert_raises(TypeError, f, d, dtype="invalid")
assert_raises(TypeError, f, d, dtype="invalid", out=None)
# invalid out
assert_raises(TypeError, f, d, 0, None, "invalid")
assert_raises(TypeError, f, d, out="invalid")
assert_raises(TypeError, f, d, out="invalid", dtype=None)
# keepdims boolean, no invalid value
# assert_raises(TypeError, f, d, 0, None, None, "invalid")
# assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None)
# invalid mix
assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid",
out=None)
# invalid keyword
assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0)
assert_raises(TypeError, f, d, invalid=0)
assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid",
out=None)
assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True,
out=None, invalid=0)
assert_raises(TypeError, f, d, axis=0, dtype=None,
out=None, invalid=0)
def test_structured_equal(self):
# https://github.com/numpy/numpy/issues/4855
class MyA(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*(input.view(np.ndarray)
for input in inputs), **kwargs)
a = np.arange(12.).reshape(4,3)
ra = a.view(dtype=('f8,f8,f8')).squeeze()
mra = ra.view(MyA)
target = np.array([ True, False, False, False], dtype=bool)
assert_equal(np.all(target == (mra == ra[0])), True)
def test_scalar_equal(self):
# Scalar comparisons should always work, without deprecation warnings.
# even when the ufunc fails.
a = np.array(0.)
b = np.array('a')
assert_(a != b)
assert_(b != a)
assert_(not (a == b))
assert_(not (b == a))
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.maximum, np.minimum, np.mod,
np.greater, np.greater_equal, np.less, np.less_equal,
np.equal, np.not_equal]
a = np.array('1')
b = 1
c = np.array([1., 2.])
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
assert_raises(TypeError, f, c, a)
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or]) # logical_xor object loop is bad
@pytest.mark.parametrize("signature",
[(None, None, object), (object, None, None),
(None, object, None)])
def test_logical_ufuncs_object_signatures(self, ufunc, signature):
a = np.array([True, None, False], dtype=object)
res = ufunc(a, a, signature=signature)
assert res.dtype == object
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
@pytest.mark.parametrize("signature",
[(bool, None, object), (object, None, bool),
(None, object, bool)])
def test_logical_ufuncs_mixed_object_signatures(self, ufunc, signature):
# Most mixed signatures fail (except those with bool out, e.g. `OO->?`)
a = np.array([True, None, False])
with pytest.raises(TypeError):
ufunc(a, a, signature=signature)
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
def test_logical_ufuncs_support_anything(self, ufunc):
# The logical ufuncs support even input that can't be promoted:
a = np.array(b'1', dtype="V3")
c = np.array([1., 2.])
assert_array_equal(ufunc(a, c), ufunc([True, True], True))
assert ufunc.reduce(a) == True
# check that the output has no effect:
out = np.zeros(2, dtype=np.int32)
expected = ufunc([True, True], True).astype(out.dtype)
assert_array_equal(ufunc(a, c, out=out), expected)
out = np.zeros((), dtype=np.int32)
assert ufunc.reduce(a, out=out) == True
# Last check, test reduction when out and a match (the complexity here
# is that the "i,i->?" may seem right, but should not match.
a = np.array([3], dtype="i")
out = np.zeros((), dtype=a.dtype)
assert ufunc.reduce(a, out=out) == 1
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
@pytest.mark.parametrize("dtype", ["S", "U"])
@pytest.mark.parametrize("values", [["1", "hi", "0"], ["", ""]])
def test_logical_ufuncs_supports_string(self, ufunc, dtype, values):
# note that values are either all true or all false
arr = np.array(values, dtype=dtype)
obj_arr = np.array(values, dtype=object)
res = ufunc(arr, arr)
expected = ufunc(obj_arr, obj_arr, dtype=bool)
assert_array_equal(res, expected)
res = ufunc.reduce(arr)
expected = ufunc.reduce(obj_arr, dtype=bool)
assert_array_equal(res, expected)
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
def test_logical_ufuncs_out_cast_check(self, ufunc):
a = np.array('1')
c = np.array([1., 2.])
out = a.copy()
with pytest.raises(TypeError):
# It would be safe, but not equiv casting:
ufunc(a, c, out=out, casting="equiv")
def test_reducelike_byteorder_resolution(self):
# See gh-20699, byte-order changes need some extra care in the type
# resolution to make the following succeed:
arr_be = np.arange(10, dtype=">i8")
arr_le = np.arange(10, dtype="<i8")
assert np.add.reduce(arr_be) == np.add.reduce(arr_le)
assert_array_equal(np.add.accumulate(arr_be), np.add.accumulate(arr_le))
assert_array_equal(
np.add.reduceat(arr_be, [1]), np.add.reduceat(arr_le, [1]))
def test_reducelike_out_promotes(self):
# Check that the out argument to reductions is considered for
# promotion. See also gh-20455.
# Note that these paths could prefer `initial=` in the future and
# do not up-cast to the default integer for add and prod
arr = np.ones(1000, dtype=np.uint8)
out = np.zeros((), dtype=np.uint16)
assert np.add.reduce(arr, out=out) == 1000
arr[:10] = 2
assert np.multiply.reduce(arr, out=out) == 2**10
# For legacy dtypes, the signature currently has to be forced if `out=`
# is passed. The two paths below should differ, without `dtype=` the
# expected result should be: `np.prod(arr.astype("f8")).astype("f4")`!
arr = np.full(5, 2**25-1, dtype=np.int64)
# float32 and int64 promote to float64:
res = np.zeros((), dtype=np.float32)
# If `dtype=` is passed, the calculation is forced to float32:
single_res = np.zeros((), dtype=np.float32)
np.multiply.reduce(arr, out=single_res, dtype=np.float32)
assert single_res != res
def test_reducelike_output_needs_identical_cast(self):
# Checks the case where a simple byte-swap works, mainly tests that
# this is not rejected directly.
# (interesting because we require descriptor identity in reducelikes).
arr = np.ones(20, dtype="f8")
out = np.empty((), dtype=arr.dtype.newbyteorder())
expected = np.add.reduce(arr)
np.add.reduce(arr, out=out)
assert_array_equal(expected, out)
# Check reduceat:
out = np.empty(2, dtype=arr.dtype.newbyteorder())
expected = np.add.reduceat(arr, [0, 1])
np.add.reduceat(arr, [0, 1], out=out)
assert_array_equal(expected, out)
# And accumulate:
out = np.empty(arr.shape, dtype=arr.dtype.newbyteorder())
expected = np.add.accumulate(arr)
np.add.accumulate(arr, out=out)
assert_array_equal(expected, out)
def test_reduce_noncontig_output(self):
# Check that reduction deals with non-contiguous output arrays
# appropriately.
#
# gh-8036
x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8)
x = x[4:6,1:11:6,1:5].transpose(1, 2, 0)
y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4)
y = y_base[::2,:]
y_base_copy = y_base.copy()
r0 = np.add.reduce(x, out=y.copy(), axis=2)
r1 = np.add.reduce(x, out=y, axis=2)
# The results should match, and y_base shouldn't get clobbered
assert_equal(r0, r1)
assert_equal(y_base[1,:], y_base_copy[1,:])
assert_equal(y_base[3,:], y_base_copy[3,:])
@pytest.mark.parametrize("with_cast", [True, False])
def test_reduceat_and_accumulate_out_shape_mismatch(self, with_cast):
# Should raise an error mentioning "shape" or "size"
arr = np.arange(5)
out = np.arange(3) # definitely wrong shape
if with_cast:
# If a cast is necessary on the output, we can be sure to use
# the generic NpyIter (non-fast) path.
out = out.astype(np.float64)
with pytest.raises(ValueError, match="(shape|size)"):
np.add.reduceat(arr, [0, 3], out=out)
with pytest.raises(ValueError, match="(shape|size)"):
np.add.accumulate(arr, out=out)
@pytest.mark.parametrize('out_shape',
[(), (1,), (3,), (1, 1), (1, 3), (4, 3)])
@pytest.mark.parametrize('keepdims', [True, False])
@pytest.mark.parametrize('f_reduce', [np.add.reduce, np.minimum.reduce])
def test_reduce_wrong_dimension_output(self, f_reduce, keepdims, out_shape):
# Test that we're not incorrectly broadcasting dimensions.
# See gh-15144 (failed for np.add.reduce previously).
a = np.arange(12.).reshape(4, 3)
out = np.empty(out_shape, a.dtype)
correct_out = f_reduce(a, axis=0, keepdims=keepdims)
if out_shape != correct_out.shape:
with assert_raises(ValueError):
f_reduce(a, axis=0, out=out, keepdims=keepdims)
else:
check = f_reduce(a, axis=0, out=out, keepdims=keepdims)
assert_(check is out)
assert_array_equal(check, correct_out)
def test_reduce_output_does_not_broadcast_input(self):
# Test that the output shape cannot broadcast an input dimension
# (it never can add dimensions, but it might expand an existing one)
a = np.ones((1, 10))
out_correct = (np.empty((1, 1)))
out_incorrect = np.empty((3, 1))
np.add.reduce(a, axis=-1, out=out_correct, keepdims=True)
np.add.reduce(a, axis=-1, out=out_correct[:, 0], keepdims=False)
with assert_raises(ValueError):
np.add.reduce(a, axis=-1, out=out_incorrect, keepdims=True)
with assert_raises(ValueError):
np.add.reduce(a, axis=-1, out=out_incorrect[:, 0], keepdims=False)
def test_reduce_output_subclass_ok(self):
class MyArr(np.ndarray):
pass
out = np.empty(())
np.add.reduce(np.ones(5), out=out) # no subclass, all fine
out = out.view(MyArr)
assert np.add.reduce(np.ones(5), out=out) is out
assert type(np.add.reduce(out)) is MyArr
def test_no_doc_string(self):
# gh-9337
assert_('\n' not in umt.inner1d_no_doc.__doc__)
def test_invalid_args(self):
# gh-7961
exc = pytest.raises(TypeError, np.sqrt, None)
# minimally check the exception text
assert exc.match('loop of ufunc does not support')
@pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
def test_nat_is_not_finite(self, nat):
try:
assert not np.isfinite(nat)
except TypeError:
pass # ok, just not implemented
@pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
def test_nat_is_nan(self, nat):
try:
assert np.isnan(nat)
except TypeError:
pass # ok, just not implemented
@pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
def test_nat_is_not_inf(self, nat):
try:
assert not np.isinf(nat)
except TypeError:
pass # ok, just not implemented
class TestGUFuncProcessCoreDims:
def test_conv1d_full_without_out(self):
x = np.arange(5.0)
y = np.arange(13.0)
w = umt.conv1d_full(x, y)
assert_equal(w, np.convolve(x, y, mode='full'))
def test_conv1d_full_with_out(self):
x = np.arange(5.0)
y = np.arange(13.0)
out = np.zeros(len(x) + len(y) - 1)
umt.conv1d_full(x, y, out=out)
assert_equal(out, np.convolve(x, y, mode='full'))
def test_conv1d_full_basic_broadcast(self):
# x.shape is (3, 6)
x = np.array([[1, 3, 0, -10, 2, 2],
[0, -1, 2, 2, 10, 4],
[8, 9, 10, 2, 23, 3]])
# y.shape is (2, 1, 7)
y = np.array([[[3, 4, 5, 20, 30, 40, 29]],
[[5, 6, 7, 10, 11, 12, -5]]])
# result should have shape (2, 3, 12)
result = umt.conv1d_full(x, y)
assert result.shape == (2, 3, 12)
for i in range(2):
for j in range(3):
assert_equal(result[i, j], np.convolve(x[j], y[i, 0]))
def test_bad_out_shape(self):
x = np.ones((1, 2))
y = np.ones((2, 3))
out = np.zeros((2, 3)) # Not the correct shape.
with pytest.raises(ValueError, match=r'does not equal m \+ n - 1'):
umt.conv1d_full(x, y, out=out)
def test_bad_input_both_inputs_length_zero(self):
with pytest.raises(ValueError,
match='both inputs have core dimension 0'):
umt.conv1d_full([], [])
@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
if isinstance(getattr(np, x), np.ufunc)])
def test_ufunc_types(ufunc):
'''
Check all ufuncs that the correct type is returned. Avoid
object and boolean types since many operations are not defined for
for them.
Choose the shape so even dot and matmul will succeed
'''
for typ in ufunc.types:
# types is a list of strings like ii->i
if 'O' in typ or '?' in typ:
continue
inp, out = typ.split('->')
args = [np.ones((3, 3), t) for t in inp]
with warnings.catch_warnings(record=True):
warnings.filterwarnings("always")
res = ufunc(*args)
if isinstance(res, tuple):
outs = tuple(out)
assert len(res) == len(outs)
for r, t in zip(res, outs):
assert r.dtype == np.dtype(t)
else:
assert res.dtype == np.dtype(out)
@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
if isinstance(getattr(np, x), np.ufunc)])
def test_ufunc_noncontiguous(ufunc):
'''
Check that contiguous and non-contiguous calls to ufuncs
have the same results for values in range(9)
'''
for typ in ufunc.types:
# types is a list of strings like ii->i
if any(set('O?mM') & set(typ)):
# bool, object, datetime are too irregular for this simple test
continue
inp, out = typ.split('->')
args_c = [np.empty((6, 6), t) for t in inp]
# non contiguous (2, 3 step on the two dimensions)
args_n = [np.empty((12, 18), t)[::2, ::3] for t in inp]
# alignment != itemsize is possible. So create an array with such
# an odd step manually.
args_o = []
for t in inp:
orig_dt = np.dtype(t)
off_dt = f"S{orig_dt.alignment}" # offset by alignment
dtype = np.dtype([("_", off_dt), ("t", orig_dt)], align=False)
args_o.append(np.empty((6, 6), dtype=dtype)["t"])
for a in args_c + args_n + args_o:
a.flat = range(1, 37)
with warnings.catch_warnings(record=True):
warnings.filterwarnings("always")
res_c = ufunc(*args_c)
res_n = ufunc(*args_n)
res_o = ufunc(*args_o)
if len(out) == 1:
res_c = (res_c,)
res_n = (res_n,)
res_o = (res_o,)
for c_ar, n_ar, o_ar in zip(res_c, res_n, res_o):
dt = c_ar.dtype
if np.issubdtype(dt, np.floating):
# for floating point results allow a small fuss in comparisons
# since different algorithms (libm vs. intrinsics) can be used
# for different input strides
res_eps = np.finfo(dt).eps
tol = 3*res_eps
assert_allclose(res_c, res_n, atol=tol, rtol=tol)
assert_allclose(res_c, res_o, atol=tol, rtol=tol)
else:
assert_equal(c_ar, n_ar)
assert_equal(c_ar, o_ar)
@pytest.mark.parametrize('ufunc', [np.sign, np.equal])
def test_ufunc_warn_with_nan(ufunc):
# issue gh-15127
# test that calling certain ufuncs with a non-standard `nan` value does not
# emit a warning
# `b` holds a 64 bit signaling nan: the most significant bit of the
# significand is zero.
b = np.array([0x7ff0000000000001], 'i8').view('f8')
assert np.isnan(b)
if ufunc.nin == 1:
ufunc(b)
elif ufunc.nin == 2:
ufunc(b, b.copy())
else:
raise ValueError('ufunc with more than 2 inputs')
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_ufunc_out_casterrors():
# Tests that casting errors are correctly reported and buffers are
# cleared.
# The following array can be added to itself as an object array, but
# the result cannot be cast to an integer output:
value = 123 # relies on python cache (leak-check will still find it)
arr = np.array([value] * int(ncu.BUFSIZE * 1.5) +
["string"] +
[value] * int(1.5 * ncu.BUFSIZE), dtype=object)
out = np.ones(len(arr), dtype=np.intp)
count = sys.getrefcount(value)
with pytest.raises(ValueError):
# Output casting failure:
np.add(arr, arr, out=out, casting="unsafe")
assert count == sys.getrefcount(value)
# output is unchanged after the error, this shows that the iteration
# was aborted (this is not necessarily defined behaviour)
assert out[-1] == 1
with pytest.raises(ValueError):
# Input casting failure:
np.add(arr, arr, out=out, dtype=np.intp, casting="unsafe")
assert count == sys.getrefcount(value)
# output is unchanged after the error, this shows that the iteration
# was aborted (this is not necessarily defined behaviour)
assert out[-1] == 1
@pytest.mark.parametrize("bad_offset", [0, int(ncu.BUFSIZE * 1.5)])
def test_ufunc_input_casterrors(bad_offset):
value = 123
arr = np.array([value] * bad_offset +
["string"] +
[value] * int(1.5 * ncu.BUFSIZE), dtype=object)
with pytest.raises(ValueError):
# Force cast inputs, but the buffered cast of `arr` to intp fails:
np.add(arr, arr, dtype=np.intp, casting="unsafe")
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("bad_offset", [0, int(ncu.BUFSIZE * 1.5)])
def test_ufunc_input_floatingpoint_error(bad_offset):
value = 123
arr = np.array([value] * bad_offset +
[np.nan] +
[value] * int(1.5 * ncu.BUFSIZE))
with np.errstate(invalid="raise"), pytest.raises(FloatingPointError):
# Force cast inputs, but the buffered cast of `arr` to intp fails:
np.add(arr, arr, dtype=np.intp, casting="unsafe")
def test_trivial_loop_invalid_cast():
# This tests the fast-path "invalid cast", see gh-19904.
with pytest.raises(TypeError,
match="cast ufunc 'add' input 0"):
# the void dtype definitely cannot cast to double:
np.add(np.array(1, "i,i"), 3, signature="dd->d")
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
@pytest.mark.parametrize("offset",
[0, ncu.BUFSIZE//2, int(1.5*ncu.BUFSIZE)])
def test_reduce_casterrors(offset):
# Test reporting of casting errors in reductions, we test various
# offsets to where the casting error will occur, since these may occur
# at different places during the reduction procedure. For example
# the first item may be special.
value = 123 # relies on python cache (leak-check will still find it)
arr = np.array([value] * offset +
["string"] +
[value] * int(1.5 * ncu.BUFSIZE), dtype=object)
out = np.array(-1, dtype=np.intp)
count = sys.getrefcount(value)
with pytest.raises(ValueError, match="invalid literal"):
# This is an unsafe cast, but we currently always allow that.
# Note that the double loop is picked, but the cast fails.
# `initial=None` disables the use of an identity here to test failures
# while copying the first values path (not used when identity exists).
np.add.reduce(arr, dtype=np.intp, out=out, initial=None)
assert count == sys.getrefcount(value)
# If an error occurred during casting, the operation is done at most until
# the error occurs (the result of which would be `value * offset`) and -1
# if the error happened immediately.
# This does not define behaviour, the output is invalid and thus undefined
assert out[()] < value * offset
def test_object_reduce_cleanup_on_failure():
# Test cleanup, including of the initial value (manually provided or not)
with pytest.raises(TypeError):
np.add.reduce([1, 2, None], initial=4)
with pytest.raises(TypeError):
np.add.reduce([1, 2, None])
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("method",
[np.add.accumulate, np.add.reduce,
pytest.param(lambda x: np.add.reduceat(x, [0]), id="reduceat"),
pytest.param(lambda x: np.log.at(x, [2]), id="at")])
def test_ufunc_methods_floaterrors(method):
# adding inf and -inf (or log(-inf) creates an invalid float and warns
arr = np.array([np.inf, 0, -np.inf])
with np.errstate(all="warn"):
with pytest.warns(RuntimeWarning, match="invalid value"):
method(arr)
arr = np.array([np.inf, 0, -np.inf])
with np.errstate(all="raise"):
with pytest.raises(FloatingPointError):
method(arr)
def _check_neg_zero(value):
if value != 0.0:
return False
if not np.signbit(value.real):
return False
if value.dtype.kind == "c":
return np.signbit(value.imag)
return True
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
def test_addition_negative_zero(dtype):
dtype = np.dtype(dtype)
if dtype.kind == "c":
neg_zero = dtype.type(complex(-0.0, -0.0))
else:
neg_zero = dtype.type(-0.0)
arr = np.array(neg_zero)
arr2 = np.array(neg_zero)
assert _check_neg_zero(arr + arr2)
# In-place ops may end up on a different path (reduce path) see gh-21211
arr += arr2
assert _check_neg_zero(arr)
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
@pytest.mark.parametrize("use_initial", [True, False])
def test_addition_reduce_negative_zero(dtype, use_initial):
dtype = np.dtype(dtype)
if dtype.kind == "c":
neg_zero = dtype.type(complex(-0.0, -0.0))
else:
neg_zero = dtype.type(-0.0)
kwargs = {}
if use_initial:
kwargs["initial"] = neg_zero
else:
pytest.xfail("-0. propagation in sum currently requires initial")
# Test various length, in case SIMD paths or chunking play a role.
# 150 extends beyond the pairwise blocksize; probably not important.
for i in range(0, 150):
arr = np.array([neg_zero] * i, dtype=dtype)
res = np.sum(arr, **kwargs)
if i > 0 or use_initial:
assert _check_neg_zero(res)
else:
# `sum([])` should probably be 0.0 and not -0.0 like `sum([-0.0])`
assert not np.signbit(res.real)
assert not np.signbit(res.imag)
@pytest.mark.parametrize(["dt1", "dt2"],
[("S", "U"), ("U", "S"), ("S", "d"), ("S", "V"), ("U", "l")])
def test_addition_string_types(dt1, dt2):
arr1 = np.array([1234234], dtype=dt1)
arr2 = np.array([b"423"], dtype=dt2)
with pytest.raises(np._core._exceptions.UFuncTypeError) as exc:
np.add(arr1, arr2)
@pytest.mark.parametrize("order1,order2",
[(">", ">"), ("<", "<"), (">", "<"), ("<", ">")])
def test_addition_unicode_inverse_byte_order(order1, order2):
element = 'abcd'
arr1 = np.array([element], dtype=f"{order1}U4")
arr2 = np.array([element], dtype=f"{order2}U4")
result = arr1 + arr2
assert result == 2*element
@pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.int64])
def test_find_non_long_args(dtype):
element = 'abcd'
start = dtype(0)
end = dtype(len(element))
arr = np.array([element])
result = np._core.umath.find(arr, "a", start, end)
assert result.dtype == np.dtype("intp")
assert result == 0
def test_find_access_past_buffer():
# This checks that no read past the string buffer occurs in
# string_fastsearch.h. The buffer class makes sure this is checked.
# To see it in action, you can remove the checks in the buffer and
# this test will produce an 'Invalid read' if run under valgrind.
arr = np.array([b'abcd', b'ebcd'])
result = np._core.umath.find(arr, b'cde', 0, np.iinfo(np.int64).max)
assert np.all(result == -1)
class TestLowlevelAPIAccess:
def test_resolve_dtypes_basic(self):
# Basic test for dtype resolution:
i4 = np.dtype("i4")
f4 = np.dtype("f4")
f8 = np.dtype("f8")
r = np.add.resolve_dtypes((i4, f4, None))
assert r == (f8, f8, f8)
# Signature uses the same logic to parse as ufunc (less strict)
# the following is "same-kind" casting so works:
r = np.add.resolve_dtypes((
i4, i4, None), signature=(None, None, "f4"))
assert r == (f4, f4, f4)
# Check NEP 50 "weak" promotion also:
r = np.add.resolve_dtypes((f4, int, None))
assert r == (f4, f4, f4)
with pytest.raises(TypeError):
np.add.resolve_dtypes((i4, f4, None), casting="no")
def test_resolve_dtypes_comparison(self):
i4 = np.dtype("i4")
i8 = np.dtype("i8")
b = np.dtype("?")
r = np.equal.resolve_dtypes((i4, i8, None))
assert r == (i8, i8, b)
def test_weird_dtypes(self):
S0 = np.dtype("S0")
# S0 is often converted by NumPy to S1, but not here:
r = np.equal.resolve_dtypes((S0, S0, None))
assert r == (S0, S0, np.dtype(bool))
# Subarray dtypes are weird and may not work fully, we preserve them
# leading to a TypeError (currently no equal loop for void/structured)
dts = np.dtype("10i")
with pytest.raises(TypeError):
np.equal.resolve_dtypes((dts, dts, None))
def test_resolve_dtypes_reduction(self):
i2 = np.dtype("i2")
default_int_ = np.dtype(np.int_)
# Check special addition resolution:
res = np.add.resolve_dtypes((None, i2, None), reduction=True)
assert res == (default_int_, default_int_, default_int_)
def test_resolve_dtypes_reduction_no_output(self):
i4 = np.dtype("i4")
with pytest.raises(TypeError):
# May be allowable at some point?
np.add.resolve_dtypes((i4, i4, i4), reduction=True)
@pytest.mark.parametrize("dtypes", [
(np.dtype("i"), np.dtype("i")),
(None, np.dtype("i"), np.dtype("f")),
(np.dtype("i"), None, np.dtype("f")),
("i4", "i4", None)])
def test_resolve_dtypes_errors(self, dtypes):
with pytest.raises(TypeError):
np.add.resolve_dtypes(dtypes)
def test_resolve_dtypes_reduction_errors(self):
i2 = np.dtype("i2")
with pytest.raises(TypeError):
np.add.resolve_dtypes((None, i2, i2))
with pytest.raises(TypeError):
np.add.signature((None, None, "i4"))
@pytest.mark.skipif(not hasattr(ct, "pythonapi"),
reason="`ctypes.pythonapi` required for capsule unpacking.")
def test_loop_access(self):
# This is a basic test for the full strided loop access
data_t = ct.c_char_p * 2
dim_t = ct.c_ssize_t * 1
strides_t = ct.c_ssize_t * 2
strided_loop_t = ct.CFUNCTYPE(
ct.c_int, ct.c_void_p, data_t, dim_t, strides_t, ct.c_void_p)
class call_info_t(ct.Structure):
_fields_ = [
("strided_loop", strided_loop_t),
("context", ct.c_void_p),
("auxdata", ct.c_void_p),
("requires_pyapi", ct.c_byte),
("no_floatingpoint_errors", ct.c_byte),
]
i4 = np.dtype("i4")
dt, call_info_obj = np.negative._resolve_dtypes_and_context((i4, i4))
assert dt == (i4, i4) # can be used without casting
# Fill in the rest of the information:
np.negative._get_strided_loop(call_info_obj)
ct.pythonapi.PyCapsule_GetPointer.restype = ct.c_void_p
call_info = ct.pythonapi.PyCapsule_GetPointer(
ct.py_object(call_info_obj),
ct.c_char_p(b"numpy_1.24_ufunc_call_info"))
call_info = ct.cast(call_info, ct.POINTER(call_info_t)).contents
arr = np.arange(10, dtype=i4)
call_info.strided_loop(
call_info.context,
data_t(arr.ctypes.data, arr.ctypes.data),
arr.ctypes.shape, # is a C-array with 10 here
strides_t(arr.ctypes.strides[0], arr.ctypes.strides[0]),
call_info.auxdata)
# We just directly called the negative inner-loop in-place:
assert_array_equal(arr, -np.arange(10, dtype=i4))
@pytest.mark.parametrize("strides", [1, (1, 2, 3), (1, "2")])
def test__get_strided_loop_errors_bad_strides(self, strides):
i4 = np.dtype("i4")
dt, call_info = np.negative._resolve_dtypes_and_context((i4, i4))
with pytest.raises(TypeError, match="fixed_strides.*tuple.*or None"):
np.negative._get_strided_loop(call_info, fixed_strides=strides)
def test__get_strided_loop_errors_bad_call_info(self):
i4 = np.dtype("i4")
dt, call_info = np.negative._resolve_dtypes_and_context((i4, i4))
with pytest.raises(ValueError, match="PyCapsule"):
np.negative._get_strided_loop("not the capsule!")
with pytest.raises(TypeError, match=".*incompatible context"):
np.add._get_strided_loop(call_info)
np.negative._get_strided_loop(call_info)
with pytest.raises(TypeError):
# cannot call it a second time:
np.negative._get_strided_loop(call_info)
def test_long_arrays(self):
t = np.zeros((1029, 917), dtype=np.single)
t[0][0] = 1
t[28][414] = 1
tc = np.cos(t)
assert_equal(tc[0][0], tc[28][414])
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@_core@tests@[email protected]_END.py
|
{
"filename": "decorators.py",
"repo_name": "gwpy/gwpy",
"repo_path": "gwpy_extracted/gwpy-main/gwpy/utils/decorators.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Decorators for GWpy
"""
import warnings
from functools import wraps
__author__ = 'Duncan Macleod <[email protected]>'
DEPRECATED_FUNCTION_WARNING = (
"{0.__module__}.{0.__name__} has been deprecated, and will be "
"removed in a future release."
)
class deprecated_property(property): # pylint: disable=invalid-name
"""sub-class of `property` that invokes DeprecationWarning on every call
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
# get name of property
pname = fget.__name__
# build a wrapper that will spawn a DeprecationWarning for all calls
def _warn(func):
@wraps(func)
def _wrapped(self, *args, **kwargs):
parent = type(self).__name__ # parent class name
warnings.warn('the {0}.{1} property is deprecated, and will '
'be removed in a future release, please stop '
'using it.'.format(parent, pname),
DeprecationWarning)
return func(self, *args, **kwargs)
return _wrapped
# wrap the property methods
if fdel:
fdel = _warn(fdel)
if fset:
fset = _warn(fset)
if not fset and not fdel: # only wrap once
fget = _warn(fget)
super().__init__(fget, fset, fdel, doc)
def deprecated_function(func=None, message=DEPRECATED_FUNCTION_WARNING):
"""Adds a `DeprecationWarning` to a function
Parameters
----------
func : `callable`
the function to decorate with a `DeprecationWarning`
message : `str`, optional
the warning message to present
Notes
-----
The final warning message is formatted as ``message.format(func)``
so you can use attribute references to the function itself.
See the default message as an example.
"""
def _decorator(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
warnings.warn(
message.format(func),
category=DeprecationWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return wrapped_func
if func:
return _decorator(func)
return _decorator
def return_as(returntype):
"""Decorator to cast return of function as the given type
Parameters
----------
returntype : `type`
the desired return type of the decorated function
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
result = func(*args, **kwargs)
try:
return returntype(result)
except (TypeError, ValueError) as exc:
exc.args = (
'failed to cast return from {0} as {1}: {2}'.format(
func.__name__, returntype.__name__, str(exc)),
)
raise
return wrapped
return decorator
|
gwpyREPO_NAMEgwpyPATH_START.@gwpy_extracted@gwpy-main@gwpy@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/chains/qa_with_sources/__init__.py",
"type": "Python"
}
|
"""Load question answering with sources chains."""
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
__all__ = ["load_qa_with_sources_chain"]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@chains@qa_with_sources@[email protected]_END.py
|
{
"filename": "test_jplsbdb.py",
"repo_name": "D-arioSpace/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/jplsbdb/tests/test_jplsbdb.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import os
from astroquery.utils.mocks import MockResponse
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
from .. import SBDB, SBDBClass
# files in data/ for different query types
DATA_FILES = {'1': 'ceres.dat',
'Apophis': 'apophis.dat',
'3200': 'phaethon.dat',
'67P': '67P.dat',
'Ceres': 'ceres_missing_value.dat'
}
SCHEMATICS = {'1': '| +-- n_del_obs_used: 405',
'Apophis': '| +-- albedo_note: http://www.esa.int/Our_',
'3200': '| | +-- A2_kind: EST',
'67P': '| | +-- name: Jupiter-family Comet',
'Ceres': '| +-- n_del_obs_used: 405'
}
SEMI_MAJOR = {'1': 2.767046248500289 * u.au,
'Apophis': .9224383019077086 * u.au,
'3200': 1.271196435728355 * u.au,
'67P': 3.46473701803964 * u.au,
'Ceres': 2.767046248500289 * u.au}
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
# monkeypatch replacement request function
def nonremote_request(self, url, **kwargs):
targetname = kwargs['params']['sstr']
with open(data_path(DATA_FILES[targetname]), 'rb') as f:
response = MockResponse(content=f.read(), url=url)
return response
# use a pytest fixture to create a dummy 'requests.get' function,
# that mocks(monkeypatches) the actual 'requests.get' function:
@pytest.fixture
def patch_request(request):
mp = request.getfixturevalue("monkeypatch")
mp.setattr(SBDBClass, '_request',
nonremote_request)
return mp
# --------------------------------- actual test functions
def test_objects_numerically(patch_request):
for targetname in DATA_FILES.keys():
sbdb = SBDB.query(targetname, id_type='search', phys=True,
alternate_id=True, full_precision=True,
covariance='mat', validity=True,
alternate_orbit=True, close_approach=True,
virtual_impactor=True,
discovery=True, radar=True)
assert_quantity_allclose(sbdb['orbit']['elements']['a'],
SEMI_MAJOR[targetname])
def test_missing_value(patch_request):
"""test whether a missing value causes an error"""
sbdb = SBDB.query('Ceres', id_type='search', phys=True,
alternate_id=True, full_precision=True,
covariance='mat', validity=True,
alternate_orbit=True, close_approach=True,
virtual_impactor=True,
discovery=True, radar=True)
assert sbdb['orbit']['elements']['per'] is None
def test_quantities(patch_request):
"""Make sure query returns quantities.
Regression test for astroquery #2011.
"""
sbdb = SBDB.query('Ceres', id_type='search', phys=True,
alternate_id=True, full_precision=True,
covariance='mat', validity=True,
alternate_orbit=True, close_approach=True,
virtual_impactor=True,
discovery=True, radar=True)
assert isinstance(sbdb['phys_par']['H'], u.Quantity)
assert sbdb['phys_par']['H'].unit == u.mag
# def test_objects_against_schema(patch_request):
# for targetname in DATA_FILES.keys():
# sbdb = SBDB.query(targetname, id_type='search', phys=True,
# alternate_id=True, full_precision=True,
# covariance='mat', validity=True,
# alternate_orbit=True, close_approach=True,
# virtual_impactor=True,
# discovery=True, radar=True)
# assert SCHEMATICS[targetname] in SBDB.schematic(sbdb)
|
D-arioSpaceREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@jplsbdb@tests@[email protected]_END.py
|
{
"filename": "vis_formats_write.md",
"repo_name": "MWATelescope/mwa_hyperdrive",
"repo_path": "mwa_hyperdrive_extracted/mwa_hyperdrive-main/mdbook/src/defs/vis_formats_write.md",
"type": "Markdown"
}
|
# Supported visibility formats for writing
The following examples illustrate how to produce each of the supported
visibility file formats with `solutions-apply`, but other aspects of
`hyperdrive` are also able to produce these file formats, and all aspects are
able to perform averaging and write to multiple outputs.
~~~admonish info title="Measurement sets"
```shell
hyperdrive solutions-apply \
-d *gpubox*.fits *.metafits \
-s hyp_sols.fits \
-o hyp_cal.ms
```
~~~
~~~admonish info title="uvfits"
```shell
hyperdrive solutions-apply \
-d *gpubox*.fits *.metafits \
-s hyp_sols.fits \
-o hyp_cal.uvfits
```
A copy of the uvfits standard is
[here](https://library.nrao.edu/public/memos/aips/memos/AIPSM_117.pdf).
~~~
~~~admonish tip title="Visibility averaging"
When writing out visibilities, they can be averaged in time and frequency. Units
can be given to these; e.g. using seconds and kiloHertz:
```shell
hyperdrive solutions-apply \
-d *gpubox*.fits *.metafits *.mwaf \
-s hyp_sols.fits \
-o hyp_cal.ms \
--time-average 8s \
--freq-average 80kHz
```
Units are not required; in this case, these factors multiply the observation's
time and freq. resolutions:
```shell
hyperdrive solutions-apply \
-d *gpubox*.fits *.metafits *.mwaf \
-s hyp_sols.fits \
-o hyp_cal.ms \
--time-average 4 \
--freq-average 2
```
If the same observation is used in both examples, with a time resolution of 2s
and a freq. resolution of 40kHz, then both commands will yield the same result.
See [this page](blocks.md) for information on how visibilities are averaged in
time and frequency.
~~~
~~~admonish tip title="Writing to multiple visibility outputs"
All aspects of `hyperdrive` that can write visibilities can write to multiple
outputs. Note that it probably does not make sense to write out more than one of
each kind (e.g. two uvfits files), as each of these files will be exactly the
same, and a simple `cp` from one to the other is probably faster than writing to
two files simultaneously from `hyperdrive`.
Example (a measurement set and uvfits):
```shell
hyperdrive solutions-apply \
-d *gpubox*.fits *.metafits *.mwaf \
-s hyp_sols.fits \
-o hyp_cal.ms hyp_cal.uvfits \
--time-average 4 \
--freq-average 2
```
~~~
|
MWATelescopeREPO_NAMEmwa_hyperdrivePATH_START.@mwa_hyperdrive_extracted@mwa_hyperdrive-main@mdbook@src@defs@[email protected]_END.py
|
{
"filename": "time_weighted_retriever.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/retrievers/time_weighted_retriever.py",
"type": "Python"
}
|
import datetime
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.vectorstores import VectorStore
from pydantic import ConfigDict, Field
def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime) -> float:
"""Get the hours passed between two datetimes."""
return (time - ref_time).total_seconds() / 3600
class TimeWeightedVectorStoreRetriever(BaseRetriever):
"""Retriever that combines embedding similarity with
recency in retrieving values."""
vectorstore: VectorStore
"""The vectorstore to store documents and determine salience."""
search_kwargs: dict = Field(default_factory=lambda: dict(k=100))
"""Keyword arguments to pass to the vectorstore similarity search."""
# TODO: abstract as a queue
memory_stream: List[Document] = Field(default_factory=list)
"""The memory_stream of documents to search through."""
decay_rate: float = Field(default=0.01)
"""The exponential decay factor used as (1.0-decay_rate)**(hrs_passed)."""
k: int = 4
"""The maximum number of documents to retrieve in a given call."""
other_score_keys: List[str] = []
"""Other keys in the metadata to factor into the score, e.g. 'importance'."""
default_salience: Optional[float] = None
"""The salience to assign memories not retrieved from the vector store.
None assigns no salience to documents not fetched from the vector store.
"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def _document_get_date(self, field: str, document: Document) -> datetime.datetime:
"""Return the value of the date field of a document."""
if field in document.metadata:
if isinstance(document.metadata[field], float):
return datetime.datetime.fromtimestamp(document.metadata[field])
return document.metadata[field]
return datetime.datetime.now()
def _get_combined_score(
self,
document: Document,
vector_relevance: Optional[float],
current_time: datetime.datetime,
) -> float:
"""Return the combined score for a document."""
hours_passed = _get_hours_passed(
current_time,
self._document_get_date("last_accessed_at", document),
)
score = (1.0 - self.decay_rate) ** hours_passed
for key in self.other_score_keys:
if key in document.metadata:
score += document.metadata[key]
if vector_relevance is not None:
score += vector_relevance
return score
def get_salient_docs(self, query: str) -> Dict[int, Tuple[Document, float]]:
"""Return documents that are salient to the query."""
docs_and_scores: List[Tuple[Document, float]]
docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores(
query, **self.search_kwargs
)
results = {}
for fetched_doc, relevance in docs_and_scores:
if "buffer_idx" in fetched_doc.metadata:
buffer_idx = fetched_doc.metadata["buffer_idx"]
doc = self.memory_stream[buffer_idx]
results[buffer_idx] = (doc, relevance)
return results
async def aget_salient_docs(self, query: str) -> Dict[int, Tuple[Document, float]]:
"""Return documents that are salient to the query."""
docs_and_scores: List[Tuple[Document, float]]
docs_and_scores = (
await self.vectorstore.asimilarity_search_with_relevance_scores(
query, **self.search_kwargs
)
)
results = {}
for fetched_doc, relevance in docs_and_scores:
if "buffer_idx" in fetched_doc.metadata:
buffer_idx = fetched_doc.metadata["buffer_idx"]
doc = self.memory_stream[buffer_idx]
results[buffer_idx] = (doc, relevance)
return results
def _get_rescored_docs(
self, docs_and_scores: Dict[Any, Tuple[Document, Optional[float]]]
) -> List[Document]:
current_time = datetime.datetime.now()
rescored_docs = [
(doc, self._get_combined_score(doc, relevance, current_time))
for doc, relevance in docs_and_scores.values()
]
rescored_docs.sort(key=lambda x: x[1], reverse=True)
result = []
# Ensure frequently accessed memories aren't forgotten
for doc, _ in rescored_docs[: self.k]:
# TODO: Update vector store doc once `update` method is exposed.
buffered_doc = self.memory_stream[doc.metadata["buffer_idx"]]
buffered_doc.metadata["last_accessed_at"] = current_time
result.append(buffered_doc)
return result
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
docs_and_scores = {
doc.metadata["buffer_idx"]: (doc, self.default_salience)
for doc in self.memory_stream[-self.k :]
}
# If a doc is considered salient, update the salience score
docs_and_scores.update(self.get_salient_docs(query))
return self._get_rescored_docs(docs_and_scores)
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
docs_and_scores = {
doc.metadata["buffer_idx"]: (doc, self.default_salience)
for doc in self.memory_stream[-self.k :]
}
# If a doc is considered salient, update the salience score
docs_and_scores.update(await self.aget_salient_docs(query))
return self._get_rescored_docs(docs_and_scores)
def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:
"""Add documents to vectorstore."""
current_time = kwargs.get("current_time")
if current_time is None:
current_time = datetime.datetime.now()
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in documents]
for i, doc in enumerate(dup_docs):
if "last_accessed_at" not in doc.metadata:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
doc.metadata["buffer_idx"] = len(self.memory_stream) + i
self.memory_stream.extend(dup_docs)
return self.vectorstore.add_documents(dup_docs, **kwargs)
async def aadd_documents(
self, documents: List[Document], **kwargs: Any
) -> List[str]:
"""Add documents to vectorstore."""
current_time = kwargs.get("current_time")
if current_time is None:
current_time = datetime.datetime.now()
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in documents]
for i, doc in enumerate(dup_docs):
if "last_accessed_at" not in doc.metadata:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
doc.metadata["buffer_idx"] = len(self.memory_stream) + i
self.memory_stream.extend(dup_docs)
return await self.vectorstore.aadd_documents(dup_docs, **kwargs)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@retrievers@[email protected]_END.py
|
{
"filename": "orbit.py",
"repo_name": "lucabaldini/ixpeobssim",
"repo_path": "ixpeobssim_extracted/ixpeobssim-main/docs/macro/orbit.py",
"type": "Python"
}
|
#!/urs/bin/env python
#
# Copyright (C) 2020, the ixpeobssim team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function, division
import sys
import numpy
import matplotlib.dates
from ixpeobssim.instrument.traj import xIXPETrajectory, xSAABoundary, xTLE
from ixpeobssim.utils.matplotlib_ import plt, setup_gca, DEFAULT_COLORS
from ixpeobssim.utils.matplotlib_ import save_all_figures
from ixpeobssim.utils.time_ import string_to_met_utc, met_to_num
from ixpeobssim.core.hist import xHistogram1d
from ixpeobssim.utils.logging_ import logger
import ixpeobssim.core.pipeline as pipeline
from ixpeobssim import IXPEOBSSIM_DOC_FIG_MISC
if sys.flags.interactive:
plt.ion()
def plot(start_date = '2021-06-01', duration=86400, save=False):
"""
"""
start_met = string_to_met_utc(start_date, lazy=True)
stop_met = start_met + duration
met = numpy.linspace(start_met, stop_met, 5000)
trajectory = xIXPETrajectory()
saa = xSAABoundary()
geo_fmt = dict(xlabel='Longitude [deg]', ylabel='Latitude [deg]',
xmin=-180., xmax=180., grids=True)
print('%s\n%s' % xTLE.lines())
altitude = 600.
mean_motion = xTLE._mean_motion(altitude)
period = 86400. / mean_motion
print('Mean motion = %.5f, period = %.5f' % (mean_motion, period))
# Calculate the trajectory and altitude.
lon, lat = trajectory.position(met)
alt = trajectory.elevation(met)
# Split the trajectory into segements to avoid annoying horizontal lines
# in the plot when passing from 180 to -180 in longitude.
idx = numpy.where(numpy.diff(lon) < 0.)[0] + 1
lon = numpy.split(lon, idx)
lat = numpy.split(lat, idx)
# Plain satellite orbit.
plt.figure('IXPE trajectory')
for _lon, _lat in zip(lon, lat):
plt.plot(_lon, _lat, color=DEFAULT_COLORS[0])
plt.plot([lon[0][0], lon[-1][-1]], [lat[0][0], lat[-1][-1]], 'o')
setup_gca(**geo_fmt)
# Altitude
plt.figure('IXPE altitude')
plt.plot(met_to_num(met), alt)
setup_gca(ylabel='Altitude [km]', ymin=599.6, ymax=600.4, grids=True)
locator = matplotlib.dates.AutoDateLocator()
formatter = matplotlib.dates.ConciseDateFormatter(locator)
plt.gca().xaxis.set_major_formatter(formatter)
# SAA
plt.figure('IXPE SAA polygon')
saa.plot()
setup_gca(ymin=-35., **geo_fmt)
# Satellite orbit with SAA.
plt.figure('IXPE trajectory SAA')
for _lon, _lat in zip(lon, lat):
plt.plot(_lon, _lat, color=DEFAULT_COLORS[0])
mask = saa.contains(_lon, _lat)
plt.plot(_lon[mask], _lat[mask], color='lightgray')
saa.plot()
setup_gca(ymin=-31., ymax=6., **geo_fmt)
axins = plt.gca().inset_axes([0.575, 0.35, 0.4, 0.4])
axins.set_xlim(-78., 0.)
axins.set_ylim(-0.55, 0.55)
axins.set_xticklabels('')
axins.set_yticklabels('')
for _lon, _lat in zip(lon, lat):
axins.plot(_lon, _lat, color=DEFAULT_COLORS[0])
mask = saa.contains(_lon, _lat)
axins.plot(_lon[mask], _lat[mask], color='lightgray')
fmt = dict(facecolor='orange', edgecolor='black', alpha=0.5)
patch = matplotlib.patches.PathPatch(saa, **fmt)
axins.add_patch(patch)
plt.plot(*numpy.hsplit(saa.vertices, 2), 'o', color=fmt.get('edgecolor'))
plt.gca().indicate_inset_zoom(axins)
axins.grid(which='both')
# SAA passages
epochs = trajectory.saa_epochs(start_met, start_met + 1000000, 700)
delta = numpy.array([t2 - t1 for (t1, t2) in epochs])
min_ = delta.min()
max_ = delta.max()
mean = delta.mean()
rms = delta.std(ddof=1)
plt.figure('IXPE SAA epochs')
hist = xHistogram1d(numpy.linspace(780., 840., 75)).fill(delta)
hist.plot()
setup_gca(ymax=17., grids=True, xlabel='SAA epoch duration [s]', ylabel='Entries per bin')
pipeline.xpvisibility(srcname='Crab', startdate='2021-01-01')
if save:
save_all_figures(IXPEOBSSIM_DOC_FIG_MISC, ('pdf', 'png'))
if __name__ == '__main__':
plot(save=True)
|
lucabaldiniREPO_NAMEixpeobssimPATH_START.@ixpeobssim_extracted@ixpeobssim-main@docs@[email protected]@.PATH_END.py
|
{
"filename": "test_fouriersqrt.py",
"repo_name": "GalSim-developers/GalSim",
"repo_path": "GalSim_extracted/GalSim-main/tests/test_fouriersqrt.py",
"type": "Python"
}
|
# Copyright (c) 2012-2023 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
import numpy as np
import os
import galsim
from galsim_test_helpers import *
imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference
# images.
@timer
def test_fourier_sqrt():
"""Test that the FourierSqrt operator is the inverse of auto-convolution.
"""
dx = 0.4
myImg1 = galsim.ImageF(80,80, scale=dx)
myImg1.setCenter(0,0)
myImg2 = galsim.ImageF(80,80, scale=dx)
myImg2.setCenter(0,0)
# Test trivial case, where we could (but don't) analytically collapse the
# chain of profiles by recognizing that FourierSqrt is the inverse of
# AutoConvolve.
psf = galsim.Moffat(beta=3.8, fwhm=1.3, flux=5)
psf.drawImage(myImg1, method='no_pixel')
sqrt1 = galsim.FourierSqrt(psf)
psf2 = galsim.AutoConvolve(sqrt1)
np.testing.assert_almost_equal(psf.stepk, psf2.stepk)
psf2.drawImage(myImg2, method='no_pixel')
printval(myImg1, myImg2)
np.testing.assert_array_almost_equal(
myImg1.array, myImg2.array, 4,
err_msg="Moffat sqrt convolved with self disagrees with original")
check_basic(sqrt1, "FourierSqrt", do_x=False)
# Test non-trivial case where we compare (in Fourier space) sqrt(a*a + b*b + 2*a*b) against (a + b)
a = galsim.Moffat(beta=3.8, fwhm=1.3, flux=5)
a.shift(dx=0.5, dy=-0.3) # need nonzero centroid to test
b = galsim.Moffat(beta=2.5, fwhm=1.6, flux=3)
check = galsim.Sum([a, b])
sqrt = galsim.FourierSqrt(
galsim.Sum([
galsim.AutoConvolve(a),
galsim.AutoConvolve(b),
2*galsim.Convolve([a, b])
])
)
np.testing.assert_almost_equal(check.stepk, sqrt.stepk)
check.drawImage(myImg1, method='no_pixel')
sqrt.drawImage(myImg2, method='no_pixel')
np.testing.assert_almost_equal(check.centroid.x, sqrt.centroid.x)
np.testing.assert_almost_equal(check.centroid.y, sqrt.centroid.y)
np.testing.assert_almost_equal(check.flux, sqrt.flux)
np.testing.assert_almost_equal(check.xValue(check.centroid), check.max_sb)
print('check.max_sb = ',check.max_sb)
print('sqrt.max_sb = ',sqrt.max_sb)
# This isn't super accurate...
np.testing.assert_allclose(check.max_sb, sqrt.max_sb, rtol=0.1)
printval(myImg1, myImg2)
np.testing.assert_array_almost_equal(
myImg1.array, myImg2.array, 4,
err_msg="Fourier square root of expanded square disagrees with original")
# Check picklability
check_pickle(sqrt1, lambda x: x.drawImage(method='no_pixel'))
check_pickle(sqrt1)
# Should raise an exception for invalid arguments
assert_raises(TypeError, galsim.FourierSqrt)
assert_raises(TypeError, galsim.FourierSqrt, myImg1)
assert_raises(TypeError, galsim.FourierSqrt, [psf])
assert_raises(TypeError, galsim.FourierSqrt, psf, psf)
assert_raises(TypeError, galsim.FourierSqrt, psf, real_space=False)
assert_raises(TypeError, galsim.FourierSqrtProfile)
assert_raises(TypeError, galsim.FourierSqrtProfile, myImg1)
assert_raises(TypeError, galsim.FourierSqrtProfile, [psf])
assert_raises(TypeError, galsim.FourierSqrtProfile, psf, psf)
assert_raises(TypeError, galsim.FourierSqrtProfile, psf, real_space=False)
assert_raises(NotImplementedError, sqrt1.xValue, galsim.PositionD(0,0))
assert_raises(NotImplementedError, sqrt1.drawReal, myImg1)
assert_raises(NotImplementedError, sqrt1.shoot, 1)
if __name__ == "__main__":
runtests(__file__)
|
GalSim-developersREPO_NAMEGalSimPATH_START.@GalSim_extracted@GalSim-main@tests@[email protected]_END.py
|
{
"filename": "synthetic.py",
"repo_name": "ander-son-almeida/DashboardOCmass",
"repo_path": "DashboardOCmass_extracted/DashboardOCmass-main/synthetic.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 23 15:33:17 2021
@author: Anderson Almeida
"""
# from ocs_functions import *
import numpy as np
from oc_tools_padova_edr3 import *
def synthetic(age, dist, Av, FeH, bin_frac, nstars, Mlim):
# read isochrones
mod_grid, age_grid, z_grid = load_mod_grid()
filters = ['Gmag','G_BPmag','G_RPmag']
refMag = 'Gmag'
seed= 2
met = (10.**FeH)*0.0152
mod_cluster = model_cluster(age,dist,FeH,Av,bin_frac,nstars,filters,
refMag,error=False,Mcut=Mlim,seed=seed,
imf='chabrier',alpha=2.1, beta=-3., gaia_ext = True)
# adicionando erros dos filtros gaia
mod_cluster = get_phot_errors(mod_cluster,filters)
# simulando o cluster com os erros de observação
mod_cluster_obs = np.copy(mod_cluster)
#amostra aleatórias de uma distribuição gaussiana
mod_cluster_obs['Gmag'] = np.random.normal(mod_cluster['Gmag'],mod_cluster['e_Gmag'])
mod_cluster_obs['G_BPmag'] = np.random.normal(mod_cluster['G_BPmag'],mod_cluster['e_G_BPmag'])
mod_cluster_obs['G_RPmag'] = np.random.normal(mod_cluster['G_RPmag'],mod_cluster['e_G_RPmag'])
# atribuindo coordenadas RA e DEC - distribuicao perfil de King
ra_cen, dec_cen = 232.45,-64.86
rcore, rtidal = 5., 10
cluster_ra, cluster_dec = gen_cluster_coordinates(ra_cen, dec_cen,nstars, rcore, rtidal, 0.85*nstars,mod_cluster['Mini'])
mod_cluster_obs = add_col(mod_cluster_obs,cluster_ra,'RA_ICRS')
mod_cluster_obs = add_col(mod_cluster_obs,cluster_dec,'DEC_ICRS')
#ordenando de acordo com a mag
indV = np.argsort(mod_cluster[refMag])
# cor sintético
cor = mod_cluster['G_BPmag']-mod_cluster['G_RPmag']
absMag = mod_cluster[refMag]
# cor sintético observável
cor_obs = mod_cluster_obs['G_BPmag']-mod_cluster_obs['G_RPmag']
absMag_obs = mod_cluster_obs[refMag]
###############################################################################
# Obtendo a isocrona bruta do grid, dada uma idade e metalicidade
grid_iso = get_iso_from_grid(age,(10.**FeH)*0.0152,filters,refMag, nointerp=False)
# Faz uma isocrona - levando em consideração os parametros observacionais
fit_iso = make_obs_iso(filters, grid_iso, dist, Av, gaia_ext = True)
total_mass = np.around((np.sum(mod_cluster_obs['Mass'])) +
(np.sum(mod_cluster_obs['comp_mass'])), decimals=2)
return mod_cluster_obs, mod_cluster, cor_obs, absMag_obs, fit_iso, total_mass
|
ander-son-almeidaREPO_NAMEDashboardOCmassPATH_START.@DashboardOCmass_extracted@[email protected]@.PATH_END.py
|
{
"filename": "_hoverinfo.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/_hoverinfo.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HoverinfoValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="hoverinfo", parent_name="histogram", **kwargs):
super(HoverinfoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
extras=kwargs.pop("extras", ["all", "none", "skip"]),
flags=kwargs.pop("flags", ["x", "y", "z", "text", "name"]),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@histogram@[email protected]_END.py
|
{
"filename": "calculate_calibration_coefficients.ipynb",
"repo_name": "cta-observatory/cta-lstchain",
"repo_path": "cta-lstchain_extracted/cta-lstchain-main/notebooks/calculate_calibration_coefficients.ipynb",
"type": "Jupyter Notebook"
}
|
```python
%load_ext autoreload
%autoreload 2
from ctapipe.io import EventSource
import sys
from matplotlib import pyplot as plt
import numpy as np
%matplotlib inline
import sys
from scipy.stats import norm
from traitlets.config.loader import Config
from ctapipe import utils
# ctapipe modules
from ctapipe.visualization import CameraDisplay
from ctapipe.plotting.camera import CameraPlotter
from ctapipe.image.extractor import *
from ctapipe.containers import PedestalContainer
from ctapipe.io.hdf5tableio import HDF5TableWriter, HDF5TableReader
from lstchain.calib.camera.r0 import LSTR0Corrections
r0calib = LSTR0Corrections(
pedestal_path="../../cta-lstchain-extra/calib/camera/pedestal_file_run446_0000.fits",
r1_sample_start=2,r1_sample_end=38)
# flat field run with interleaved flatfield and pedestal events (for the moment to big for cta-lstchain-extra)
run = 472
#datadir = '/ctadata/franca/LST'
datadir = '/fefs/onsite/data/20190527'
file = f'{datadir}/LST-1.1.Run00{run}.0000.fits.fz'
reader = EventSource(file, max_events=None)
print(f"\n Read {len(reader.multi_file)} total events in files\n")
print(f"{reader.file_list} ")
channel=['HG','LG']
# use the tool to write calibration coefficients
from lstchain.tools.calc_camera_calibration import CalibrationHDF5Writer
tel_id=0 # LST1 for the moment
```
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
Read 53004 total events in files
['/ctadata/franca/LST/LST-1.1.Run00472.0000.fits.fz', '/ctadata/franca/LST/LST-1.2.Run00472.0000.fits.fz', '/ctadata/franca/LST/LST-1.3.Run00472.0000.fits.fz', '/ctadata/franca/LST/LST-1.4.Run00472.0000.fits.fz']
```python
# read first flatfield event
for i, event in enumerate(reader):
# calibrate r0 --> r1
r0calib.calibrate(event)
# select only flatfield events
if event.r0.tel[0].trigger_type == 32:
continue
break
print(f"read event id: {event.r0.event_id}, trigger {event.r0.tel[0].trigger_type}")
```
read event id: 2, trigger 1
```python
# plot R1 waveform of module [module]
def view_waveform(chan=0, pix_id=6,i=0):
waveform = event.r1.tel[tel_id].waveform
plt.plot(waveform[chan, pix_id], label=f'pixel {pix_id}')
plt.title(f"module {module}, pixel {pix_id}, channel {channel[chan]}",)
max_now=waveform[chan, pix_id].max()
min_now=waveform[chan, pix_id].min()
plt.legend()
plt.ylabel('DC',fontsize=15)
plt.xlabel('ns',fontsize=15)
# module number
module=0
# channel
chan=0
# ids of pixel in module
pixels_mod=event.lst.tel[0].svc.pixel_ids[module*7:module*7+7]
fig = plt.figure(num=0,figsize=(12,12))
for i,pix in enumerate(pixels_mod):
view_waveform(chan=chan, pix_id=pix,i=i)
#plt.savefig(f"Run{run}_low_level_correction_{channel[chan]}_mod{modu}.png")
```

```python
# plot effect of low-level calibration on module
def view_waveform(chan=0, pix_id=6,i=0):
plot_id=i*2+1
plt.subplot(7,2,plot_id)
plt.plot(event.r0.tel[tel_id].waveform[chan, pix_id,2:38], label='not corrected')
plt.plot(event.r1.tel[tel_id].waveform[chan, pix_id], label='corrected')
plt.title(f"pixel {pix_id}, channel {channel[chan]}",)
mymax=max(newwaveform[chan, pix_id].max(),oldwaveform[chan, pix_id].max()) + 50
mymin=min(newwaveform[chan, pix_id].min(),oldwaveform[chan, pix_id].min()) - 50
plt.ylim(mymin,mymax)
plt.legend()
plot_id=(i*2)+2
plt.subplot(7,2,plot_id)
plt.plot(newwaveform[chan, pix_id]-oldwaveform[chan, pix_id])
plt.ylabel('corrections',fontsize=10)
# module number
module=0
# ids of pixel in module
pixels_mod=event.lst.tel[0].svc.pixel_ids[module*7:module*7+7]
# r0
newwaveform = event.r1.tel[tel_id].waveform
# R1
oldwaveform = event.r0.tel[tel_id].waveform[:,:,2:38]
for i,pix in enumerate(pixels_mod):
for chan in(np.arange(2)):
plt.figure(num=chan,figsize=(12,24))
# plot waveform of selected channel
view_waveform(chan=chan, pix_id=pix,i=i)
#plt.savefig(f"Run{run}_low_level_correction_{channel[chan]}_mod{modu}.png")
```
```python
# integrate the charge on 12 ns around the peak value
config = Config({
"LocalPeakWindowSum": {
"window_shift": 5,
"window_width": 11
}
})
integrator = LocalPeakWindowSum(config=config)
waveform=event.r1.tel[0].waveform
image, peakpos = integrator(waveform)
fig = plt.figure(figsize=(16, 16))
for chan in(np.arange(2)):
ax = plt.subplot(2, 2, chan+1)
disp = CameraDisplay(event.inst.subarray.tels[0].camera)
disp.image = image[chan]
#disp.set_limits_minmax(2000,4000)
disp.cmap = plt.cm.coolwarm
disp.axes.text(2.0, 0, f'{channel[chan]} charge (DC)', rotation=90)
disp.add_colorbar()
ax = plt.subplot(2, 2, chan+3)
disp = CameraDisplay(event.inst.subarray.tels[0].camera)
disp.image = peakpos[chan]
disp.cmap = plt.cm.coolwarm
disp.set_limits_minmax(0,35)
disp.axes.text(2.0, 0, f'{channel[chan]} time (ns)', rotation=90)
disp.add_colorbar()
disp.update()
#plt.savefig(f"Run{run}_event_{event.lst.tel[0].evt.event_id}_charge_time.png")
```

```python
# Plot the part of the waveform that is integrated
# (this work only after the line above)
fig = plt.figure(0,figsize=(12,12))
# consider only 36 samples
samples=np.arange(0,36)
# chose the module
module=0
module_rank=np.where(event.lst.tel[0].svc.module_ids==module)
# find pixel index in module
pix_in_mod=event.lst.tel[0].svc.pixel_ids[module_rank[0][0]*7:module_rank[0][0]*7+7]
for chan in(np.arange(2)):
plt.subplot(1,2,chan+1)
for i,pix in enumerate(pix_in_mod):
# samples used to calculate the charge
start=int(peakpos[chan,pix]-integrator.window_shift)
stop=int(start+integrator.window_width)
used_samples=np.arange(start,stop)
used=waveform[chan,pix,start:stop]
plt.plot(waveform[chan,pix,], color='b', label='all samples')
plt.plot(used_samples,used, color='r', label='integrated samples')
if i==0:
plt.legend()
plt.ylabel("[DC]")
plt.xlabel(f"{channel[chan]} waveforms in module {module}")
plt.ylim(-100,2500)
fig.savefig(f"Run{run}_waverforms_module_{module}.png")
```

```python
# flat field calculations
from ctapipe.calib.camera.pedestals import PedestalIntegrator
from ctapipe.calib.camera.flatfield import FlasherFlatFieldCalculator
# configuration for the pedestal charge integrator
ped_config = Config({
"FixedWindowSum": {
"window_start": 11,
"window_width": 11,
}
})
# configuration for the flatfield charge integrator
ff_config = Config({
"LocalPeakWindowSum": {
"window_shift": 4,
"window_width": 11,
}
})
ped_calculator = PedestalIntegrator(tel_id=0,
sample_size=100,
charge_median_cut_outliers = [-4,4],
charge_std_cut_outliers = [-4,4],
charge_product="FixedWindowSum",
config=ped_config)
ff_calculator = FlasherFlatFieldCalculator(tel_id = 0,
sample_size=100,
sample_duration = 1000,
charge_cut_outliers = [-0.4,0.4],
time_cut_outliers = [0,30],
charge_product = "LocalPeakWindowSum",
config=ff_config)
calib_event=0
ped_event = False
ped_initialized = False
initialized = False
for i, event in enumerate(reader):
# create r1
r0calib.calibrate(event)
# get link to monitoring containers
if not initialized:
ped_data = event.mon.tel[tel_id].pedestal
ff_data = event.mon.tel[tel_id].flatfield
status_data = event.mon.tel[tel_id].pixel_status
calib_data = event.mon.tel[tel_id].calibration
# if new pedestal calculation
if event.lst.tel[0].evt.tib_masked_trigger == 32:
if ped_calculator.calculate_pedestals(event):
ped_event = True
print(f"new pedestal at event n. {event.r0.event_id} ({i+1})")
# consider flat field events only after first pedestal event (for pedestal mask initalization)
elif event.lst.tel[0].evt.tib_masked_trigger == 1 and event.r1.tel[tel_id].waveform.max()>1000:
if ff_calculator.calculate_relative_gain(event):
calib_event+=1
print(f"new flatfield at event n. {event.r0.event_id} ({i+1})")
# consider values only after first flat field event (for flat field mask initialitation)
if calib_event > 1:
# mask from pedestal and flat-fleid data
monitoring_unusable_pixels= np.logical_or(status_data.pedestal_failing_pixels,
status_data.flatfield_failing_pixels)
# calibration unusable pixels are an OR of all maskes
calib_data.unusable_pixels = np.logical_or(monitoring_unusable_pixels,status_data.hardware_failing_pixels)
# Extract calibraiton coefficients with F-factor method
# Assume fix F2 factor, F2=1+Var(gain)/Mean(Gain)**2 must be known from elsewhere
F2 =1.2
# calculate photon-electrons
pe = F2*(ff_data.charge_median - ped_data.charge_median)**2/(ff_data.charge_std**2 - ped_data.charge_std**2)
masked_pe = np.ma.array(pe, mask=calib_data.unusable_pixels)
break
```
new pedestal at event n. 201 (199)
new flatfield at event n. 202 (200)
new pedestal at event n. 399 (397)
new flatfield at event n. 404 (402)
```python
# plot results
mask = calib_data.unusable_pixels
# charge
fig = plt.figure(10,figsize=(16, 5))
image = ff_data.charge_median
for chan in(np.arange(2)):
ax = plt.subplot(1, 2, chan+1)
disp = CameraDisplay(event.inst.subarray.tels[tel_id].camera)
disp.highlight_pixels(mask[chan])
disp.image = image[chan]
disp.cmap = plt.cm.coolwarm
disp.axes.text(2.4, 0, 'charge median', rotation=90)
disp.add_colorbar()
# time
fig = plt.figure(11,figsize=(16, 5))
image = ff_data.time_median
for chan in(np.arange(2)):
ax = plt.subplot(1, 2, chan+1)
disp = CameraDisplay(event.inst.subarray.tels[tel_id].camera)
disp.highlight_pixels(mask[chan])
disp.image = image[chan]
disp.cmap = plt.cm.coolwarm
disp.axes.text(2.4, 0, 'time', rotation=90)
disp.add_colorbar()
#pe
fig = plt.figure(12,figsize=(16, 5))
image = pe
for chan in(np.arange(2)):
ax = plt.subplot(1, 2, chan+1)
disp = CameraDisplay(event.inst.subarray.tels[tel_id].camera)
disp.highlight_pixels(mask[chan])
disp.set_limits_minmax(0,150)
disp.image = image[chan]
disp.cmap = plt.cm.coolwarm
disp.axes.text(2.4, 0, 'pe', rotation=90)
disp.add_colorbar()
# plot some histograms
for chan in np.arange(2):
n_pe = pe[chan]
gain_median = ff_data.relative_gain_median[chan]
charge_median = ff_data.charge_median[chan]
charge_std = ff_data.charge_std[chan]
median_ped = ped_data.charge_median[chan]
ped_std = ped_data.charge_std[chan]
# select good pixels
select = np.logical_not(mask[chan])
#select = mask[chan]
fig = plt.figure(chan,figsize=(12,18))
fig.suptitle(f"channel: {channel[chan]}", fontsize=25)
# charge
plt.subplot(321)
median= int(np.median(charge_median[select]))
rms= np.std(charge_median[select])
plt.title(f"Median {median:3.2f}, std {rms:5.0f}")
plt.xlabel('charge (ADC)',fontsize=20)
plt.ylabel('pixels',fontsize=20)
plt.hist(charge_median[select])
# signal std
plt.subplot(322)
plt.ylabel('pixels',fontsize=20)
plt.xlabel('charge std',fontsize=20)
median= np.median(charge_std[select])
rms= np.std(charge_std[select])
plt.title(f"Median {median:3.2f}, std {rms:3.2f}")
plt.hist(charge_std[select])
# pedestal charge
plt.subplot(323)
plt.ylabel('pixels',fontsize=20)
plt.xlabel('pedestal',fontsize=20)
median= np.median(median_ped[select])
rms= np.std(median_ped[select])
plt.title(f"Median {median:3.2f}, std {rms:3.2f}")
plt.hist(median_ped[select])
# pedestal std
plt.subplot(324)
plt.ylabel('pixels',fontsize=20)
plt.xlabel('pedestal std',fontsize=20)
median= np.median(ped_std[select])
rms= np.std(ped_std[select])
plt.title(f"Median {median:3.2f}, std {rms:3.2f}")
plt.hist(ped_std[select])
# relative gain
plt.subplot(325)
plt.ylabel('pixels',fontsize=20)
plt.xlabel('relative gain',fontsize=20)
plt.hist(gain_median[select])
median= np.median(gain_median[select])
rms= np.std(gain_median[select])
plt.title(f"Relative gain {median:3.2f}, std {rms:5.2f}")
# photon electrons
plt.subplot(326)
plt.ylabel('pixels',fontsize=20)
plt.xlabel('pe',fontsize=20)
median= np.median(n_pe[select])
rms= np.std(n_pe[select])
plt.title(f"Median {median:3.2f}, std {rms:3.2f}")
plt.hist(n_pe[select],range=(0,200))
```
```python
# use the tool to write calibration coefficients
from lstchain.tools.calc_camera_calibration import CalibrationHDF5Writer
calibration_tool= CalibrationHDF5Writer()
calibration_tool.print_help()
```
Generate a HDF5 file with camera calibration coefficients
Options
-------
Arguments that take values are actually convenience aliases to full
Configurables, whose aliases are listed on the help line. For more information
on full configurables, see '--help-all'.
--input_file=<Unicode> (EventSource.input_url)
Default: ''
Path to the input file containing events.
--output_file=<Unicode> (CalibrationHDF5Writer.output_file)
Default: 'calibration.hdf5'
Name of the output file
--log_file=<Unicode> (CalibrationHDF5Writer.log_file)
Default: 'None'
Name of the log file
--max_events=<Int> (EventSource.max_events)
Default: None
Maximum number of events that will be read from the file
--pedestal_file=<Unicode> (LSTR0Corrections.pedestal_path)
Default: ''
Path to the LST pedestal binary file
--flatfield_product=<CaselessStrEnum> (CalibrationHDF5Writer.flatfield_product)
Default: 'FlasherFlatFieldCalculator'
Choices: ['FlasherFlatFieldCalculator', 'FlasherFlatFieldCalculator']
FlatFieldCalculator to use.
--pedestal_product=<CaselessStrEnum> (CalibrationHDF5Writer.pedestal_product)
Default: 'PedestalIntegrator'
Choices: ['PedestalIntegrator', 'PedestalIntegrator']
PedestalCalculator to use.
--r0calibrator_product=<CaselessStrEnum> (CalibrationHDF5Writer.r0calibrator_product)
Default: 'NullR0Calibrator'
Choices: ['LSTR0Corrections', 'NullR0Calibrator']
CameraR0Calibrator to use.
--log-level=<Enum> (Application.log_level)
Default: 30
Choices: (0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL')
Set the log level by value or name.
--config=<Unicode> (Tool.config_file)
Default: ''
name of a configuration file with parameters to load in addition to command-
line parameters
To see all available configurables, use `--help-all`
```python
#
calibration_tool.run(argv=['--config','/astro/users/cassol/soft/python/cta-lstchain/lstchain/tools/camera_calibration_param.json'])
```
[1;32mINFO[0m [CalibrationHDF5Writer] (tool/initialize): ctapipe version 0.6.2.post180+git5a1a6d4
[1;32mINFO[0m [CalibrationHDF5Writer] (tool/run): Starting: CalibrationHDF5Writer
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/setup): Input file has file 53004 events
[1;32mINFO[0m [CalibrationHDF5Writer.FlasherFlatFieldCalculator] (flatfield/__init__): extractor <ctapipe.image.extractor.LocalPeakWindowSum object at 0x7fd7463a4128>
[1;32mINFO[0m [CalibrationHDF5Writer.FlasherFlatFieldCalculator] (flatfield/__init__): Used events statistics : 100
[1;32mINFO[0m [CalibrationHDF5Writer.PedestalIntegrator] (pedestals/__init__): extractor <ctapipe.image.extractor.FixedWindowSum object at 0x7fd7442c3908>
[1;32mINFO[0m [CalibrationHDF5Writer.PedestalIntegrator] (pedestals/__init__): Used events statistics : 100
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/setup): Open output file calibration.hdf5
[1;32mINFO[0m [CalibrationHDF5Writer] (tool/run): CONFIG: {'CalibrationHDF5Writer': {'config_file': '/astro/users/cassol/soft/python/cta-lstchain/lstchain/tools/camera_calibration_param.json', 'flatfield_product': 'FlasherFlatFieldCalculator', 'log_datefmt': '%Y-%m-%d %H:%M:%S', 'log_file': 'log.txt', 'log_format': '%(levelname)s [%(name)s] (%(module)s/%(funcName)s): %(message)s', 'log_level': 10, 'minimum_charge': 800.0, 'output_file': 'calibration.hdf5', 'pedestal_product': 'PedestalIntegrator', 'r0calibrator_product': 'LSTR0Corrections'}}
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Start loop
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Event 0
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write pedestal data at event n. 199, id 199 stat = 100 events
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write flatfield data at event n. 200, id 200 stat = 100 events
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write pixel_status data
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write calibration data
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write pedestal data at event n. 397, id 397 stat = 100 events
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write flatfield data at event n. 402, id 402 stat = 100 events
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write pixel_status data
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write calibration data
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write pedestal data at event n. 597, id 597 stat = 100 events
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write flatfield data at event n. 602, id 602 stat = 100 events
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write pixel_status data
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write calibration data
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write pedestal data at event n. 795, id 795 stat = 100 events
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write flatfield data at event n. 804, id 804 stat = 100 events
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write pixel_status data
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write calibration data
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write pedestal data at event n. 995, id 995 stat = 100 events
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Last event, count = 999
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write pedestal data at event n. 1000, id 1000 stat = 2 events
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write flatfield data at event n. 1000, id 1000 stat = 98 events
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write pixel_status data
[1;34mDEBUG[0m [CalibrationHDF5Writer] (calc_camera_calibration/start): Write calibration data
[1;32mINFO[0m [CalibrationHDF5Writer] (tool/run): Finished: CalibrationHDF5Writer
[1;32mINFO[0m [CalibrationHDF5Writer] (tool/run): Output: /scratch4/franca/soft/cta-lstchain/notebooks/calibration.hdf5
[1;34mDEBUG[0m [CalibrationHDF5Writer] (tool/run): PROVENANCE: '[
{
"activity_name": "CalibrationHDF5Writer",
"activity_uuid": "e3dc5342-00df-4e24-8f80-11cda00ae71b",
"start": {
"time_utc": "2019-07-30T08:04:58.560"
},
"stop": {
"time_utc": "2019-07-30T08:05:25.191"
},
"system": {
"ctapipe_version": "0.6.2.post180+git5a1a6d4",
"ctapipe_resources_version": "0.2.15",
"pyhessio_version": "2.1.1",
"eventio_version": "0.20.3",
"ctapipe_svc_path": ":/astro/users/cassol/soft/python/ctapipe_io_lst:/astro/users/cassol/soft/python/WriteCameraGeometry",
"executable": "/scratch4/CTA_soft/anaconda3/envs/cta-dev/bin/python",
"platform": {
"architecture_bits": "64bit",
"architecture_linkage": "",
"machine": "x86_64",
"processor": "x86_64",
"node": "marcta2.in2p3.fr",
"version": "#1 SMP Tue May 14 15:23:27 CDT 2019",
"system": "Linux",
"release": "3.10.0-957.12.2.el7.x86_64",
"libcver": [
"glibc",
"2.9"
],
"num_cpus": 24,
"boot_time": "2019-06-28T12:00:52.000"
},
"python": {
"version_string": "3.6.4 |Anaconda, Inc.| (default, Jan 16 2018, 18:10:19) \n[GCC 7.2.0]",
"version": [
"3",
"6",
"4"
],
"compiler": "GCC 7.2.0",
"implementation": "CPython"
},
"environment": {
"CONDA_DEFAULT_ENV": "cta-dev",
"CONDA_PREFIX": "/scratch4/CTA_soft/anaconda3/envs/cta-dev",
"CONDA_PYTHON_EXE": "/scratch4/CTA_soft/anaconda3/bin/python",
"CONDA_EXE": "/scratch4/CTA_soft/anaconda3/bin/conda",
"CONDA_PROMPT_MODIFIER": "(cta-dev) ",
"CONDA_SHLVL": "1",
"PATH": "/scratch4/CTA_soft/anaconda3/envs/cta-dev/bin:/scratch4/CTA_soft/anaconda3/envs/cta-dev/bin:/cern/root/root_v6.12.04/bin:/scratch4/CTA_soft/eclipse/eclipse:/scratch4/CTA_soft/anaconda3/bin:/scratch4/CTA_soft/bin:/cta/soft/SL7/CamerasToACTL/Build.Release/bin:/cta/soft/SL7/bin:/usr/lib64/qt-3.3/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/openssh/bin:/usr/local/sbin:/astro/users/cassol/bin",
"LD_LIBRARY_PATH": null,
"DYLD_LIBRARY_PATH": null,
"USER": "cassol",
"HOME": "/astro/users/cassol",
"SHELL": "/bin/bash"
},
"arguments": [
"/scratch4/CTA_soft/anaconda3/envs/cta-dev/lib/python3.6/site-packages/ipykernel_launcher.py",
"-f",
"/run/user/911/jupyter/kernel-289b4332-cf01-4183-afde-2bf35847fcd8.json"
],
"start_time_utc": "2019-07-30T08:04:58.664"
},
"input": [
{
"url": "/ctadata/franca/LST/LST-1.1.Run00472.0000.fits.fz",
"role": "dl0.sub.evt"
},
{
"url": "/ctadata/franca/LST/LST-1.1.Run00472.0000.fits.fz",
"role": "r0.sub.evt"
},
{
"url": "/ctadata/franca/LST/LST-1.2.Run00472.0000.fits.fz",
"role": "r0.sub.evt"
},
{
"url": "/ctadata/franca/LST/LST-1.3.Run00472.0000.fits.fz",
"role": "r0.sub.evt"
},
{
"url": "/ctadata/franca/LST/LST-1.4.Run00472.0000.fits.fz",
"role": "r0.sub.evt"
},
{
"url": "/scratch4/franca/soft/ctapipe-extra/ctapipe_resources/optics.ecsv.txt",
"role": "dl0.tel.svc.optics"
},
{
"url": "/astro/users/cassol/soft/python/WriteCameraGeometry/LSTCam-002.camgeom.fits.gz",
"role": "dl0.tel.svc.camera"
}
],
"output": [
{
"url": "/scratch4/franca/soft/cta-lstchain/notebooks/calibration.hdf5",
"role": "mon.tel.calibration"
}
],
"config": {
"CalibrationHDF5Writer": {
"config_file": "/astro/users/cassol/soft/python/cta-lstchain/lstchain/tools/camera_calibration_param.json",
"flatfield_product": "FlasherFlatFieldCalculator",
"log_datefmt": "%Y-%m-%d %H:%M:%S",
"log_file": "log.txt",
"log_format": "%(levelname)s [%(name)s] (%(module)s/%(funcName)s): %(message)s",
"log_level": 10,
"minimum_charge": 800.0,
"output_file": "calibration.hdf5",
"pedestal_product": "PedestalIntegrator",
"r0calibrator_product": "LSTR0Corrections"
}
},
"status": "completed",
"duration_min": 0.44384999999993013
}
]'
```python
# read back the monitoring containers written with the tool calc_camera_calibration.py
from ctapipe.containers import FlatFieldContainer, WaveformCalibrationContainer
from ctapipe.io.hdf5tableio import HDF5TableWriter, HDF5TableReader
ff_data = FlatFieldContainer()
cal_data = WaveformCalibrationContainer()
#with HDF5TableReader('/astro/users/cassol/soft/python/cta-lstchain/lstchain/tools/calibration.hdf5') as h5_table:
with HDF5TableReader('/astro/users/cassol/soft/python/lstchain-test/calibration.hdf5') as h5_table:
assert h5_table._h5file.isopen == True
for cont in h5_table.read('/tel_0/flatfield', ff_data):
print(cont.as_dict())
for calib in h5_table.read('/tel_0/calibration', cal_data):
print(calib.as_dict())
#plt.hist(1/calib.dc_to_pe[0], color='r', histtype='step', bins = 50, stacked=True, fill=False)
h5_table.close()
# Perform some plots
fig = plt.figure(13,figsize=(16, 5))
disp = CameraDisplay(event.inst.subarray.tels[0].camera)
disp.image = calib.unusable_pixels[chan]
disp.set_limits_minmax(0,1)
disp.cmap = plt.cm.coolwarm
disp.axes.text(2.4, 0, 'failing pixels', rotation=90)
disp.add_colorbar()
#
select=np.logical_not(calib.unusable_pixels[0])
values=1/calib.dc_to_pe[0]
fig = plt.figure(12,figsize=(16, 5))
plt.hist(values[select], color='r', histtype='step', bins = 50, stacked=True, fill=False)
plt.title(f"ADC per photon-electrons, mean={np.mean(values[select]):5.0f} ADC")
```
Table '/tel_0/flatfield' is missing column 'charge_std_outliers' that is in container FlatFieldContainer. It will be skipped.
Table '/tel_0/calibration' is missing column 'pedestal_per_sample' that is in container WaveformCalibrationContainer. It will be skipped.
{'sample_time': <Quantity 1.61459295 s>, 'sample_time_range': <Quantity [140.7604873, 143.9896732] s>, 'n_events': 100, 'charge_mean': array([[8750.65, 8023.01, 6412.18, ..., 6019.34, 8470.88, 8905.34],
[ 463.79, 0. , 337.79, ..., 0. , 445.92, 461.43]]), 'charge_median': array([[9251.5, 8535. , 6624.5, ..., 6354.5, 8844. , 9329.5],
[ 491.5, 0. , 354. , ..., 0. , 469.5, 483.5]]), 'charge_std': array([[2597.08472859, 2335.80559763, 1917.29261397, ..., 1813.91913943,
2659.01584155, 2688.28660012],
[ 143.03358312, 0. , 109.52984023, ..., 0. ,
148.550172 , 144.58770729]]), 'time_mean': array([[25.29471524, 25.01204876, 25.68331372, ..., 24.86632861,
23.4798646 , 24.21229397],
[25.28042316, 0. , 26.63137701, ..., 0. ,
24.41452561, 24.07448368]]), 'time_median': array([[25.6315467 , 25.55059458, 26.6625068 , ..., 25.61345969,
24.25807469, 24.7955816 ],
[25.68022675, 0. , 27.17320141, ..., 0. ,
24.93063072, 24.9121641 ]]), 'time_std': array([[2.83058873, 3.16934295, 4.51880939, ..., 3.76996467, 4.98322383,
4.05328374],
[3.55216273, 0. , 3.9482311 , ..., 0. , 4.29526504,
5.01458122]]), 'relative_gain_mean': array([[1.30451001, 1.22760165, 0.88209902, ..., 0.88034753, 1.17002552,
1.18932563],
[1.17155036, 0. , 0.84799366, ..., 0. , 1.14570761,
1.23091316]]), 'relative_gain_median': array([[1.22092021, 1.1501553 , 0.87969747, ..., 0.8484229 , 1.1807245 ,
1.23374739],
[1.23678959, 0. , 0.90915423, ..., 0. , 1.16321198,
1.21151003]]), 'relative_gain_std': array([[0.82198046, 0.50933086, 0.37406142, ..., 0.4902031 , 0.27347099,
0.42770867],
[0.67131925, 0. , 1.04037136, ..., 0. , 0.46297653,
0.24645082]]), 'relative_time_median': array([[ 0.32426494, 0.24331283, 1.35522504, ..., 0.30617793,
-1.04920707, -0.51170015],
[-0.0236355 , 0. , 1.46933916, ..., 0. ,
-0.77323153, -0.79169815]]), 'charge_median_outliers': array([[False, False, False, ..., False, False, False],
[False, False, False, ..., False, False, False]]), 'charge_std_outliers': None, 'time_median_outliers': array([[False, False, False, ..., False, False, False],
[False, True, False, ..., True, False, False]])}
{'time': <Quantity 1.61459295 s>, 'time_range': <Quantity [140.7604873, 143.9896732] s>, 'dc_to_pe': array([[0.00167079, 0.00193604, 0.00221365, ..., 0.0024197 , 0.00152392,
0.00157557],
[0.03598056, -inf, 0.0554121 , ..., -inf, 0.03557958,
0.03805101]]), 'pedestal_per_sample': None, 'time_correction': array([[-0.32426494, -0.24331283, -1.35522504, ..., -0.30617793,
1.04920707, 0.51170015],
[ 0.0236355 , -0. , -1.46933916, ..., -0. ,
0.77323153, 0.79169815]]), 'n_pe': array([[15.45729144, 16.52410704, 14.6643551 , ..., 15.37598668,
13.47751264, 14.69926461],
[17.68444642, -0.33549366, 19.61588304, ..., -8.08172832,
16.7046134 , 18.39766368]]), 'unusable_pixels': array([[False, False, False, ..., False, False, False],
[False, True, False, ..., True, False, False]])}
Text(0.5,1,'ADC per photon-electrons, mean= 495 ADC')


```python
```
```python
```
```python
```
|
cta-observatoryREPO_NAMEcta-lstchainPATH_START.@cta-lstchain_extracted@cta-lstchain-main@notebooks@[email protected]_END.py
|
{
"filename": "Compatibility-Policy.md",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/docs-old/pages/about/Compatibility-Policy.md",
"type": "Markdown"
}
|
title: Compatibility Policy
toc: [Documentation, Compatibility Policy]
# Compatibility Policy
This document describes our compatibility policies for Crossbar.io:
1. [Backward Compatibility of Releases](#backward-compatibility-of-releases)
2. [Compatibility with WAMP Client Libraries](#compatibility-with-wamp-client-libraries)
## Backward Compatibility of Releases
It is important to first define the scope of "backward compatibility". Crossbar.io has a backward compatibility policy defined with respect to the following aspects and areas:
1. WAMP protocol (the wire level)
2. WAMP meta API
3. Crossbar.io specific WAMP meta API
4. Crossbar.io node configuration file format
5. Crossbar.io command line
Crossbar.io (after the first official, stable release 2016.3) follows a strict backwards compatibility policy. **We promise to do our best to not break any of above.**
We consider the above list to be an exhaustive description of the **public API** of Crossbar.io. Anything else isn't public, and you should avoid relying on private things.
> **IMPORTANT**: Everything not listed in above is subject to change at any time. In particular, functions and classes from the Crossbar.io code base. You absolutely MUST NOT import any of these directly in code and applications of yours. The Crossbar.io code base itself should be considered fully internal, private and an implementation artifact. Of course, since Crossbar.io is fully open-source, we cannot technically stop you from not following this advice. However, you should note that due to the AGPL license of Crossbar.io, you would need to follow the requirements of the AGPL if you were to import and use the Crossbar.io source code directly. Also not covered by our compatibility policy is the *internal management API* of Crossbar.io. This API isn't for public consumption, but for management via the upcoming Crossbar.io DevOps Center. The management API is also covered by an API license that comes with some strings attached that effectively disallows any third-party use. Please see the `LICENSE-FOR-API` document in the Crossbar.io repository for complete details.
**Details**
If you have a WAMP client component that is connecting to Crossbar.io via the WAMP protocol, we ensure that this component will continue to work as new Crossbar.io releases are published.
We think this is an extremely important aspect. Consider an embedded device with a WAMP component burned into the firmware. Even if you have a way of updating and upgrading the device firmware in the field (which you totally should have!), we believe doing is often complex and requires coordination specific to the device and the application or solution it is part of. That needs to be under your control, and Crossbar.io should not impose additional requirements and restrictions.
What if new features are introduced in the WAMP protocol? Firstly, the WAMP protocol now (2016) has been stable for quite some time. Secondly, if there are new developments at the WAMP protocol level, we ensure that these changes to the WAMP protocol are made in an backwards compatible way (that is, Crossbar.io will be able to talk to "old" and "new" clients at the same time).
We make the same promises for the WAMP meta API as implemented in Crossbar.io, and the Crossbar.io specific WAMP meta API.
**In other words, our policy is: existing WAMP clients MUST NOT break.**
> IMPORTANT: We do make these promises only for WAMP clients talking over the WAMP protocol, and connecting via an actual WAMP transport (like TCP). Whether the client is started externally or started by Crossbar.io as **guest workers** doesn't matter. We do NOT make these promises for WAMP Python based components running side-by-side in **router workers**, or started in separate **container workers**. In fact, the possibility to start Python components in such a way (in router/container workers, rather than in guest workers) might be deprecated or made private in the future.
Regarding the Crossbar.io node configuration file format, our backwards compatibility policy works slightly differently. An "old" node configuration file can be upgraded to a "new" format from the Crossbar.io command line (`crossbar upgrade`). The node configuration file has an embedded version number, and Crossbar.io uses this to ugprade the configuration file stepwise (one version increment at a time) until the final, current format has been reached.
Regarding the Crossbar.io command line arguments and parameters. These are simply "extend only". That is, a command line option available today will continue to be supported "as is". New commands, arguments or parameter values might be added from time to time (though we don't expect much here), but nothing will be removed.
## Compatibility with WAMP Client Libraries
WAMP is an open standard, and a major focus is on interoperability between implementations from different parties. We also believe that we have a track record of being open and supportive towards third parties. We deeply believe in open standards, open source and not discriminating between implementations. We don't like vendor lock-in.
On the other hand, as there are now literally dozens of WAMP client library implementations out there, most not under our control, we cannot guarantee interoperability or long-term support of these particular clients.
**We are committed to maintaining and supporting the Autobahn family of WAMP client libraries,** and we work on a best-effort basis to work nice with other WAMP client library implementors.
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@docs-old@pages@[email protected]@.PATH_END.py
|
{
"filename": "audio_classifier.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/g3doc/api_docs/python/tflite_model_maker/audio_classifier.md",
"type": "Markdown"
}
|
page_type: reference
description: APIs to train an audio classification model.
<link rel="stylesheet" href="/site-assets/css/style.css">
<!-- DO NOT EDIT! Automatically generated file. -->
<div itemscope itemtype="http://developers.google.com/ReferenceObject">
<meta itemprop="name" content="tflite_model_maker.audio_classifier" />
<meta itemprop="path" content="Stable" />
</div>
# Module: tflite_model_maker.audio_classifier
<!-- Insert buttons and diff -->
<table class="tfo-notebook-buttons tfo-api nocontent" align="left">
<td>
<a target="_blank" href="https://github.com/tensorflow/examples/blob/tflmm/v0.4.2/tensorflow_examples/lite/model_maker/public/audio_classifier/__init__.py">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub
</a>
</td>
</table>
APIs to train an audio classification model.
#### Tutorial:
<a href="https://colab.research.google.com/github/googlecodelabs/odml-pathways/blob/main/audio_classification/colab/model_maker_audio_colab.ipynb">https://colab.research.google.com/github/googlecodelabs/odml-pathways/blob/main/audio_classification/colab/model_maker_audio_colab.ipynb</a>
#### Demo code:
<a href="https://github.com/tensorflow/examples/blob/master/tensorflow_examples/lite/model_maker/demo/audio_classification_demo.py">https://github.com/tensorflow/examples/blob/master/tensorflow_examples/lite/model_maker/demo/audio_classification_demo.py</a>
## Classes
[`class AudioClassifier`](../tflite_model_maker/audio_classifier/AudioClassifier): Audio classifier for training/inference and exporing.
[`class BrowserFftSpec`](../tflite_model_maker/audio_classifier/BrowserFftSpec): Model good at detecting speech commands, using Browser FFT spectrum.
[`class DataLoader`](../tflite_model_maker/audio_classifier/DataLoader): DataLoader for audio tasks.
[`class YamNetSpec`](../tflite_model_maker/audio_classifier/YamNetSpec): Model good at detecting environmental sounds, using YAMNet embedding.
## Functions
[`create(...)`](../tflite_model_maker/audio_classifier/create): Loads data and retrains the model.
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@g3doc@api_docs@python@tflite_model_maker@[email protected]_END.py
|
{
"filename": "async_helpers.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipython/py3/IPython/core/async_helpers.py",
"type": "Python"
}
|
"""
Async helper function that are invalid syntax on Python 3.5 and below.
This code is best effort, and may have edge cases not behaving as expected. In
particular it contain a number of heuristics to detect whether code is
effectively async and need to run in an event loop or not.
Some constructs (like top-level `return`, or `yield`) are taken care of
explicitly to actually raise a SyntaxError and stay as close as possible to
Python semantics.
"""
import ast
import asyncio
import inspect
from functools import wraps
_asyncio_event_loop = None
def get_asyncio_loop():
"""asyncio has deprecated get_event_loop
Replicate it here, with our desired semantics:
- always returns a valid, not-closed loop
- not thread-local like asyncio's,
because we only want one loop for IPython
- if called from inside a coroutine (e.g. in ipykernel),
return the running loop
.. versionadded:: 8.0
"""
try:
return asyncio.get_running_loop()
except RuntimeError:
# not inside a coroutine,
# track our own global
pass
# not thread-local like asyncio's,
# because we only track one event loop to run for IPython itself,
# always in the main thread.
global _asyncio_event_loop
if _asyncio_event_loop is None or _asyncio_event_loop.is_closed():
_asyncio_event_loop = asyncio.new_event_loop()
return _asyncio_event_loop
class _AsyncIORunner:
def __call__(self, coro):
"""
Handler for asyncio autoawait
"""
return get_asyncio_loop().run_until_complete(coro)
def __str__(self):
return "asyncio"
_asyncio_runner = _AsyncIORunner()
class _AsyncIOProxy:
"""Proxy-object for an asyncio
Any coroutine methods will be wrapped in event_loop.run_
"""
def __init__(self, obj, event_loop):
self._obj = obj
self._event_loop = event_loop
def __repr__(self):
return f"<_AsyncIOProxy({self._obj!r})>"
def __getattr__(self, key):
attr = getattr(self._obj, key)
if inspect.iscoroutinefunction(attr):
# if it's a coroutine method,
# return a threadsafe wrapper onto the _current_ asyncio loop
@wraps(attr)
def _wrapped(*args, **kwargs):
concurrent_future = asyncio.run_coroutine_threadsafe(
attr(*args, **kwargs), self._event_loop
)
return asyncio.wrap_future(concurrent_future)
return _wrapped
else:
return attr
def __dir__(self):
return dir(self._obj)
def _curio_runner(coroutine):
"""
handler for curio autoawait
"""
import curio
return curio.run(coroutine)
def _trio_runner(async_fn):
import trio
async def loc(coro):
"""
We need the dummy no-op async def to protect from
trio's internal. See https://github.com/python-trio/trio/issues/89
"""
return await coro
return trio.run(loc, async_fn)
def _pseudo_sync_runner(coro):
"""
A runner that does not really allow async execution, and just advance the coroutine.
See discussion in https://github.com/python-trio/trio/issues/608,
Credit to Nathaniel Smith
"""
try:
coro.send(None)
except StopIteration as exc:
return exc.value
else:
# TODO: do not raise but return an execution result with the right info.
raise RuntimeError(
"{coro_name!r} needs a real async loop".format(coro_name=coro.__name__)
)
def _should_be_async(cell: str) -> bool:
"""Detect if a block of code need to be wrapped in an `async def`
Attempt to parse the block of code, it it compile we're fine.
Otherwise we wrap if and try to compile.
If it works, assume it should be async. Otherwise Return False.
Not handled yet: If the block of code has a return statement as the top
level, it will be seen as async. This is a know limitation.
"""
try:
code = compile(
cell, "<>", "exec", flags=getattr(ast, "PyCF_ALLOW_TOP_LEVEL_AWAIT", 0x0)
)
return inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
except (SyntaxError, MemoryError):
return False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipython@py3@IPython@core@[email protected]_END.py
|
{
"filename": "test_results.py",
"repo_name": "ConorMacBride/mcalf",
"repo_path": "mcalf_extracted/mcalf-main/src/mcalf/tests/models/test_results.py",
"type": "Python"
}
|
import numpy as np
import pytest
from mcalf.models import FitResult, FitResults
from mcalf.models import ModelBase as DummyModel
fitted_parameters = [1, 2, 1000.2, 1001.8, 5]
fit_info = {'chi2': 1.4, 'classification': 2, 'profile': 'abc',
'success': True, 'index': [123, 456, 789]}
def test_fitresult_passthrough():
fit = FitResult(fitted_parameters, fit_info)
assert fit.parameters == [1, 2, 1000.2, 1001.8, 5]
assert len(fit) == 5
assert fit.chi2 == 1.4
assert fit.classification == 2
assert fit.profile == 'abc'
assert isinstance(fit.success, bool) and fit.success
assert fit.index == [123, 456, 789]
# Test that the string representation can be formed without error
repr(fit)
fit.index = [None]*3
repr(fit)
def test_fitresult_velocity():
m = DummyModel(original_wavelengths=[1000.4, 1000.6])
m.stationary_line_core = 1000.5
m.quiescent_wavelength = 2
m.active_wavelength = 3
fit = FitResult(fitted_parameters, fit_info)
assert fit.velocity(m, vtype='quiescent') == pytest.approx(-89.95502249)
assert fit.velocity(m, vtype='active') == pytest.approx(389.80509745)
# Ensure nan is returned if no active component fitted
fitted_parameters_trim = fitted_parameters[:3]
fit = FitResult(fitted_parameters_trim, fit_info)
vel = fit.velocity(m, vtype='active')
assert vel != vel # assert is nan
# Ensure an invalid velocity type is detected
with pytest.raises(ValueError):
vel = fit.velocity(m, vtype='unknown-vtype')
def test_fitresults_init():
fits = FitResults((49, 52), 4, time=12)
assert fits.parameters.shape == (49, 52, 4)
assert fits.chi2.shape == (49, 52)
assert fits.classifications.shape == (49, 52)
assert fits.profile.shape == (49, 52)
assert fits.success.shape == (49, 52)
assert fits.time == 12
with pytest.raises(TypeError): # Should be a tuple
fits = FitResults(10, 3)
with pytest.raises(TypeError): # Should be a tuple of length 2
fits = FitResults((10, 32, 53), 8)
with pytest.raises(ValueError): # Should be an integer >= 1
fits = FitResults((10, 5), 5.5)
with pytest.raises(ValueError): # Should be an integer >= 1
fits = FitResults((10, 5), 0)
def test_fitresults_append():
# Create dummy fit results
fit1 = FitResult(
[2, 6, 254.6, 963.4],
{'chi2': 7.43, 'classification': 4, 'profile': 'absorption',
'success': True, 'index': [12, 34, 81]}
)
fit2 = FitResult(
[9, 2, 724.32, 134.8],
{'chi2': 1.34, 'classification': 2, 'profile': 'emission',
'success': True, 'index': [12, 0, 99]}
)
fit3 = FitResult(
[1, 8, 932.1, 327.5, 3.7, 9, 2, 0.2],
{'chi2': 0.79, 'classification': 1, 'profile': 'both',
'success': False, 'index': [12, 99, 0]}
)
fit4 = FitResult( # With incorrect time index
[6, 4, 356.2, 738.5],
{'chi2': 8.2, 'classification': 3, 'profile': 'absorption',
'success': True, 'index': [3, 0, 25]}
)
fit5 = FitResult( # With unknown profile name
[5, 3, 256.2, 628.5],
{'chi2': 8.1, 'classification': 3, 'profile': 'continuum',
'success': True, 'index': [12, 10, 40]}
)
# Initialise empty FitResults object
fits = FitResults((100, 100), 8, time=12)
# Append dummy fits
fits.append(fit1)
fits.append(fit2)
fits.append(fit3)
with pytest.raises(ValueError): # Time index does not match
fits.append(fit4)
with pytest.raises(ValueError): # Unknown profile
fits.append(fit5)
assert all([a == b for a, b in zip(fits.parameters[34, 81][:4], fit1.parameters)])
assert fits.chi2[34, 81] == fit1.chi2
assert fits.classifications[34, 81] == fit1.classification
assert fits.profile[34, 81] == fit1.profile
assert fits.success[34, 81] == fit1.success
assert all([a == b for a, b in zip(fits.parameters[0, 99][4:], fit2.parameters)])
assert fits.chi2[0, 99] == fit2.chi2
assert fits.classifications[0, 99] == fit2.classification
assert fits.profile[0, 99] == fit2.profile
assert fits.success[0, 99] == fit2.success
assert all([a == b for a, b in zip(fits.parameters[99, 0], fit3.parameters)])
assert fits.chi2[99, 0] == fit3.chi2
assert fits.classifications[99, 0] == fit3.classification
assert fits.profile[99, 0] == fit3.profile
assert fits.success[99, 0] == fit3.success
def test_fitresults_velocities():
m = DummyModel(original_wavelengths=[1000.4, 1000.6])
m.stationary_line_core = 1000.5
m.quiescent_wavelength = 0
m.active_wavelength = 1
fits = FitResults((4, 4), 2)
fits.parameters = np.array([
[[1000.2, 192.4], [826.5, 534.23], [8365.86, 1252.32], [1532.3, 2152.3]],
[[978.73, 753.52], [1253.5, 1329.3], [6423.4, 2355.45], [12.53, 2523.3]],
[[825.8, 862.5], [1759.5, 1000.9], [2633.4, 234.43], [2535.353, 152.34]],
[[896.53, 153.2], [1224.3, 1111.11], [634.54, 2353.97], [242.35, 763.4]]
])
truth_quiescent = np.array([[-8.99550225e+01, -5.21739130e+04, 2.20850375e+06, 1.59460270e+05],
[-6.52773613e+03, 7.58620690e+04, 1.62605697e+06, -2.96242879e+05],
[-5.23838081e+04, 2.27586207e+05, 4.89625187e+05, 4.60225787e+05],
[-3.11754123e+04, 6.71064468e+04, -1.09733133e+05, -2.27331334e+05]])
truth_active = np.array([[-2.42308846e+05, -1.39811094e+05, 7.55082459e+04, 3.45367316e+05],
[-7.40569715e+04, 9.85907046e+04, 4.06281859e+05, 4.56611694e+05],
[-4.13793103e+04, 1.19940030e+02, -2.29706147e+05, -2.54320840e+05],
[-2.54062969e+05, 3.31664168e+04, 4.05838081e+05, -7.10944528e+04]])
result_quiescent = fits.velocities(m, vtype='quiescent')
result_active = fits.velocities(m, vtype='active')
with pytest.raises(ValueError):
fits.velocities(m, vtype='unknown-vtype')
assert result_quiescent == pytest.approx(truth_quiescent)
assert result_active == pytest.approx(truth_active)
|
ConorMacBrideREPO_NAMEmcalfPATH_START.@mcalf_extracted@mcalf-main@src@mcalf@tests@models@[email protected]_END.py
|
{
"filename": "_blend.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/pointcloud/marker/_blend.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BlendValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="blend", parent_name="pointcloud.marker", **kwargs):
super(BlendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@pointcloud@marker@[email protected]_END.py
|
{
"filename": "CHANGELOG.md",
"repo_name": "NannyML/nannyml",
"repo_path": "nannyml_extracted/nannyml-main/CHANGELOG.md",
"type": "Markdown"
}
|
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.12.1] - 2024-09-06
### Fixed
- Fixed component filtering misbehaving for CBPE results. [(#423)](https://github.com/NannyML/nannyml/issues/423)
## [0.12.0] - 2024-09-06
### Fixed
- Fixed broken links in usage logging docs. Cheers once more to [@NeoKish](https://github.com/neokish)! [(#417)](https://github.com/NannyML/nannyml/issues/417)
- Fixed issues with runner type validation due to changes in Pydantic 2 behavior. [(#421)](https://github.com/NannyML/nannyml/issues/421)
- Fixed a typo in one the plotting blueprint modules. Eagle eyes [@nikml](https://github.com/nikml)! [(#418)](https://github.com/NannyML/nannyml/issues/418)
### Added
- Added multiclass support for estimated and realized performance metrics `average_precision` and `business_value`. [(#409)](https://github.com/NannyML/nannyml/issues/409)
- Added threshold value limits for multiclass metrics. [(#411)](https://github.com/NannyML/nannyml/issues/411)
### Changed
- Made the dependencies required for database access optional. Big thanks to [@Duncan-Hunter](https://github.com/Duncan-Hunter)
- Improved denominator checks in CBPE base estimation functions. [(#416)](https://github.com/NannyML/nannyml/issues/416)
- Relaxed constraints for the `rich` dependency. [(#422)](https://github.com/NannyML/nannyml/issues/422)
### Removed
- Dropped support for Python 3.7 as it was causing major issues with dependencies. [(#410)](https://github.com/NannyML/nannyml/issues/410)
## [0.11.0] - 2024-07-19
### Changed
- Updated `Pydantic` to `^2.7.4`, `SQLModel` to `^0.0.19`. [(#401)](https://github.com/NannyML/nannyml/issues/401)
- Removed the `drop_duplicates` step from the `DomainClassifier` for a further speedup. [(#402)](https://github.com/NannyML/nannyml/issues/402)
- Reverted to previous working dependency configuration for `matplotlib` as the current one causes issues in `conda`. [(#403)](https://github.com/NannyML/nannyml/issues/403)
### Fixed
- Added `DomainClassifier` method for drift detection to be run in the CLI.
- Fixed `NaN` handling for multiclass confusion matrix estimation in CBPE. [(#400)](https://github.com/NannyML/nannyml/issues/400)
- Fixed incorrect handling of columns marked as categorical in Wasserstein and Hellinger drift detection methods.
The `treat_as_categorical` value was ignored. We've also added a `treat_as_continuous` column to explicitly mark columns as continuous.
[(#404)](https://github.com/NannyML/nannyml/issues/404)
- Fixed an issue with multiclass `AUROC` calculation and estimation when not all classes are available in a
reference chunk during fitting. [(#405)](https://github.com/NannyML/nannyml/issues/405)
### Added
- Added a new data quality calculator to check if continuous values in analysis data are within the ranges
encountered in the reference data. Big thanks to [@jnesfield](https://github.com/jnesfield)! Still needs some documentation...
[(#408)](https://github.com/NannyML/nannyml/issues/408)
## [0.10.7] - 2024-06-07
### Changed
- Optimized summary stats and overall performance by avoiding unnecessary copy operations and index resets in during chunking
[(#390)](https://github.com/NannyML/nannyml/issues/390)
- Optimized performance of `nannyml.base.PerMetricPerColumnResult` filter operations by adding a short-circuit path
when only filtering on period. [(#391)](https://github.com/NannyML/nannyml/issues/391)
- Optimized performance of all data quality calculators by avoiding unnecessary evaluations and avoiding copy and index reset operations
[(#392)](https://github.com/NannyML/nannyml/issues/392)
### Fixed
- Fixed an issue in the Wasserstein "big data heuristic" where outliers caused the binning to cause out-of-memory errors. Thanks! [@nikml](https://github.com/nikml)!
[(#393)](https://github.com/NannyML/nannyml/issues/393)
- Fixed a typo in the `salary_range` values of the synthetic car loan example dataset. `20K - 20K €` is now `20K - 40K €`.
[(#395)](https://github.com/NannyML/nannyml/issues/395)
## [0.10.6] - 2024-05-16
### Changed
- Make predictions optional for performance calcuation. When not provided, only AUROC and average precision will be calculated. [(#380)](https://github.com/NannyML/nannyml/issues/380)
- Small DLE docs updates
- Combed through and optimized the reconstruction error calculation with PCA resulting in a nice speedup. Cheers [@nikml](https://github.com/nikml)! [(#385)](https://github.com/NannyML/nannyml/issues/385)
- Updated summary stats value limits to be in line with the rest of the library. Changed from `np.nan` to `None`. [(#387)](https://github.com/NannyML/nannyml/issues/387)
### Fixed
- Fixed a breaking issue in the sampling error calculation for the median summary statistic when there is only a single value for a column. [(#377)](https://github.com/NannyML/nannyml/issues/377)
- Drop `identifier` column from the documentation example for reconstruction error calculation with PCA. [(#382)](https://github.com/NannyML/nannyml/issues/382)
- Fix an issue where default threshold configurations would get changed when upon setting custom thresholds, bad mutables! [(#386)](https://github.com/NannyML/nannyml/issues/386)
## [0.10.5] - 2024-03-08
### Changed
- Updated dependencies for Python 3.8 and up. [(#375)](https://github.com/NannyML/nannyml/issues/375)
### Added
- Support for the *average precision* metric for binary classification in realized and estimated performance. [(#374)](https://github.com/NannyML/nannyml/issues/374)
## [0.10.4] - 2024-03-04
### Changed
- We've changed the defaults for the `incomplete` parameter in the `SizeBasedChunker` and `CountBasedChunker`
to `keep` from the previous `append`. This means that from now on, by default, you might have an additional
"incomplete" final chunk. Previously these records would have been appended to the last "complete" chunk.
This change was required for some internal developments, and we also felt it made more sense when looking at
continuous monitoring (as the incomplete chunk will be filled up later as more data is appended). [(#367)](https://github.com/NannyML/nannyml/issues/367)
- We've renamed the *Classifier for Drift Detection (CDD)* to the more appropriate *Domain Classifier*. [(#368)](https://github.com/NannyML/nannyml/issues/368)
- Bumped the version of the `pyarrow` dependency to `^14.0.0` if you're running on Python 3.8 or up.
Congrats on your first contribution here [@amrit110](https://github.com/amrit110), much appreciated!
### Fixed
- Continuous distribution plots will now be scaled per chunk, as opposed to globally. [(#369)](https://github.com/NannyML/nannyml/issues/369)
## [0.10.3] - 2024-02-17
### Fixed
- Handle median summary stat calculation failing due to NaN values
- Fix standard deviation summary stat sampling error calculation occasionally returning infinity [(#363)](https://github.com/NannyML/nannyml/issues/363)
- Fix plotting confidence bands when value gaps occur [(#364)](https://github.com/NannyML/nannyml/issues/364)
### Added
- New multivariate drift detection method using a classifier and density ration estimation.
## [0.10.2] - 2024-02-13
### Changed
- Removed p-value based thresholds for Chi2 univariate drift detection [(#349)](https://github.com/NannyML/nannyml/issues/349)
- Change default thresholds for univariate drift methods to standard deviation based thresholds.
- Add summary stats support to the Runner and CLI [(#353)](https://github.com/NannyML/nannyml/issues/353)
- Add unique identifier columns to included datasets for better joining [(#348)](https://github.com/NannyML/nannyml/issues/348)
- Remove unused `confidence_deviation` properties in CBPE metrics [(#357)](https://github.com/NannyML/nannyml/issues/357)
- Improved error handling: failing metric calculation for a single chunk will no longer stop an entire calculator.
### Added
- Add feature distribution calculators [(#352)](https://github.com/NannyML/nannyml/issues/352)
### Fixed
- Fix join column settings for CLI [(#356)](https://github.com/NannyML/nannyml/issues/356)
- Fix crashes in `UnseenValuesCalculator`
## [0.10.1] - 2023-11-21
- Various small fixes to the docs, thanks once again ghostwriter [@NeoKish](https://github.com/NeoKish)! [(#345)](https://github.com/NannyML/nannyml/issues/345)
- Fixed an issue with estimated accuracy for multiclass classification in CBPE. [(#346)](https://github.com/NannyML/nannyml/issues/346)
## [0.10.0] - 2023-11-21
### Changed
- Telemetry now detects AKS and EKS and NannyML Cloud runtimes. [(#325)](https://github.com/NannyML/nannyml/issues/325)
- Runner was refactored, so it can be extended with premium NannyML calculators and estimators. [(#325)](https://github.com/NannyML/nannyml/issues/325)
- Sped up telemetry reporting to ensure it doesn't hinder performance.
- Some love for the docs as [@santiviquez](https://github.com/santiviquez) tediously standardized variable names. [(#338)](https://github.com/NannyML/nannyml/issues/338)
- Optimize calculations for L-infinity method. [[(#340)](https://github.com/NannyML/nannyml/issues/340)]
- Refactored the `CalibratorFactory` to align with our other factory implementations. [[(#341)](https://github.com/NannyML/nannyml/issues/341)]
- Updated the `Calibrator` interface with `*args` and `**kwargs` for easier extension.
- Small refactor to the `ResultComparisonMixin` to allow easier extension.
### Added
- Added support for directly estimating the confusion matrix of multiclass classification models using CBPE.
Big thanks to our appreciated alumnus [@cartgr](https://github.com/cartgr) for the effort (and sorry it took soooo long). [(#287)](https://github.com/NannyML/nannyml/issues/287)
- Added `DatabaseWriter` support for results from `MissingValuesCaclulator` and `UnseenValuesCalculator`. Some
excellent work by [@bgalvao](https://github.com/bgalvao), thanks for being a long-time user and supporter!
### Fixed
- Fix issues with calculation and filtering in performance calculation and estimation. [(#321)](https://github.com/NannyML/nannyml/issues/321)
- Fix multivariate reconstruction error plot labels. [(#323)](https://github.com/NannyML/nannyml/issues/323)
- Log a warning when performance metrics for a chunk will return `NaN` value. [(#326)](https://github.com/NannyML/nannyml/issues/326)
- Fix issues with ReadTheDocs build failing
- Fix erroneous `specificity` calculation, both realized and estimated. Well spotted [@nikml](https://github.com/nikml)! [(#334)](https://github.com/NannyML/nannyml/issues/334)
- Fix threshold computation when dealing with `NaN` values. Major thanks to the eagle-eyed [@giodavoli](https://github.com/giodavoli). [(#333)](https://github.com/NannyML/nannyml/issues/333)
- Fix exports for confusion matrix metrics using the `DatabaseWriter`. An inspiring commit that lead to some other changes.
Great job [@shezadkhan137](https://github.com/shezadkhan137)! [(#335)](https://github.com/NannyML/nannyml/issues/335)
- Fix incorrect normalization for the business value metric in realized and estimated performance. [(#337)](https://github.com/NannyML/nannyml/issues/337)
- Fix handling `NaN` values when fitting univariate drift. [[(#340)](https://github.com/NannyML/nannyml/issues/340)]
## [0.9.1] - 2023-07-12
### Changed
- Updated Mendable client library version to deal with styling overrides in the RTD documentation theme
- Removed superfluous limits for confidence bands in the CBPE class (these are present in the metric classes instead)
- Threshold value limiting behaviour (e.g. overriding a value and emitting a warning) will be triggered not only when
the value crosses the threshold but also when it is equal to the threshold value. This is because we interpret the
threshold as a theoretical maximum.
### Added
- Added a new example notebook walking through a full use case using the NYC Green Taxi dataset, based on the blog of [@santiviquez](https://github.com/santiviquez)
### Fixed
- Fixed broken Docker container build due to changes in public Poetry installation procedure
- Fixed broken image source link in the README, thanks [@NeoKish](https://github.com/NeoKish)!
## [0.9.0] - 2023-06-26
### Changed
- Updated API docs for the `nannyml.io` package, thanks [@maciejbalawejder](https://github.com/maciejbalawejder) [(#286)](https://github.com/NannyML/nannyml/issues/286)
- Restricted versions of `numpy` to be `<1.25`, since there seems to be a change in the `roc_auc` calculation somehow [(#301)](https://github.com/NannyML/nannyml/issues/301)
### Added
- Support for Data Quality calculators in the CLI runner
- Support for Data Quality results in `Ranker` implementations [(#297)](https://github.com/NannyML/nannyml/issues/297)
- Support `mendable` in the docs [(#295)](https://github.com/NannyML/nannyml/issues/295)
- Documentation landing page [(#303)](https://github.com/NannyML/nannyml/issues/303)
- Support for calculations with delayed targets [(#306)](https://github.com/NannyML/nannyml/issues/306)
### Fixed
- Small changes to quickstart, thanks [@NeoKish](https://github.com/NeoKish) [(#291)](https://github.com/NannyML/nannyml/issues/291)
- Fix an issue passing `*args` and `**kwargs` in `Result.filter()` and subclasses [(#298)](https://github.com/NannyML/nannyml/issues/298)
- Double listing of the binary dataset documentation page
- Add missing thresholds to `roc_auc` in `CBPE` [(#294)](https://github.com/NannyML/nannyml/issues/294)
- Fix plotting issue due to introduction of additional values in the 'display names tuple' [(#305)](https://github.com/NannyML/nannyml/issues/305)
- Fix broken exception handling due to inheriting from `BaseException` and not `Exception` [(#307)](https://github.com/NannyML/nannyml/issues/307)
## [0.8.6] - 2023-05-24
### Changed
- Significant QA work on all the documentation, thanks [@santiviquez](https://github.com/santiviquez) and
[@maciejbalawejder](https://github.com/maciejbalawejder)
- Reworked the [`nannyml.runner`](nannyml/runner.py) and the accompanying configuration format to improve flexibility (e.g. setting
custom initialization parameters, running a calculator multiple times, excluding a calculator, ...).
- Added support for custom thresholds to the [`nannyml.runner`](nannyml/runner.py)
- Simplified some of the `nannyml.io` interfaces, especially the [`nannyml.io.RawFilesWriter`](nannyml/io/raw_files_writer.py)
- Reworked the [`nannyml.base.Result`](nannyml/base.py)
- Totally revamped [quickstart documentation](docs/quick.rst) based on a real life dataset, thanks [@jakubnml](https://github.com/jakubnml)
### Added
- Added new calculators to support simple data quality metrics such as counting missing or unseen values.
For more information, check out the [data quality tutorials](https://nannyml.readthedocs.io/en/main/tutorials/data_quality.html).
### Fixed
- Fixed an issue where x-axis titles would appear on top of plots
- Removed erroneous checks during calculation of realized regression performance metrics. [(#279)](https://github.com/NannyML/nannyml/issues/279)
- Fixed an issue dealing with `az://` URLs in the CLI, thanks [@michael-nml](https://github.com/michael-nml) [(#283)](https://github.com/NannyML/nannyml/issues/283)
## [0.8.5] - 2023-03-29
### Changed
- Applied new rules for visualizations. Estimated values will be the color indigo and represented with a dashed line.
Calculated values will be blue and have a solid line. This color coding might be overridden in comparison plots.
Data periods will no longer have different colors, we've added some additional text fields to the plot to indicate the data period.
- Cleaned up legends in plots, since there will no longer be a different entry for reference and analysis periods of metrics.
- Removed the lower threshold for default thresholds of the KS and Wasserstein drift detection methods.
### Added
- We've added the `business_value` metric for both estimated and realized binary classification performance. It allows
you to assign a value (or cost) to true positive, true negative, false positive and false negative occurrences.
This can help you track something like a monetary value or business impact of a model as a metric. Read more in the
business value tutorials ([estimated](https://nannyml.readthedocs.io/en/latest/tutorials/performance_estimation/binary_performance_estimation/business_value_estimation.html)
or [realized](https://nannyml.readthedocs.io/en/latest/tutorials/performance_calculation/binary_performance_calculation/business_value_calculation.html))
or the [how it works](https://nannyml.readthedocs.io/en/latest/how_it_works/business_value.html) page.
### Fixed
- Sync quickstart of the README with the dedicated quickstart page. [(#256)](https://github.com/NannyML/nannyml/issues/256)
Thanks [@NeoKish](https://github.com/NeoKish)!
- Fixed incorrect code snippet order in the thresholding tutorial. [(#258)](https://github.com/NannyML/nannyml/issues/258)
Thanks once more to the one and only [@NeoKish](https://github.com/NeoKish)!
- Fixed broken container build that had sneakily been going on for a while
- Fixed incorrect confidence band color in comparison plots [(#259)](https://github.com/NannyML/nannyml/issues/259)
- Fixed incorrect titles and missing legends in comparison plots [(#264)](https://github.com/NannyML/nannyml/issues/264)
- Fixed an issue where numerical series marked as category would cause issues during Chi2 calculation
## [0.8.4] - 2023-03-17
### Changed
- Updated univariate drift methods to no longer store all reference data by default [(#182)](https://github.com/NannyML/nannyml/issues/182)
- Updated univariate drift methods to deal better with missing data [(#202)](https://github.com/NannyML/nannyml/issues/202)
- Updated the included example datasets
- Critical security updates for dependencies
- Updated visualization of multi-level table headers in the docs [(#242)](https://github.com/NannyML/nannyml/issues/242)
- Improved typing support for Result classes using generics
### Added
- Support for estimating the confusion matrix for binary classification [(#191)](https://github.com/NannyML/nannyml/issues/191)
- Added `treat_as_categorical` parameter to univariate drift calculator [(#239)](https://github.com/NannyML/nannyml/issues/239)
- Added comparison plots to help visualize two different metrics at once
### Fixed
- Fix missing confidence boundaries in some plots [(#193)](https://github.com/NannyML/nannyml/issues/193)
- Fix incorrect metric names on plot y-axes [(#195)](https://github.com/NannyML/nannyml/issues/195)
- Fix broken links to external docs [(#196)](https://github.com/NannyML/nannyml/issues/196)
- Fix missing display name to performance calculation and estimation charts [(#200)](https://github.com/NannyML/nannyml/issues/200)
- Fix missing confidence boundaries for single metric plots [(#203)](https://github.com/NannyML/nannyml/issues/203)
- Fix incorrect code in example notebook for ranking
- Fix result corruption when re-using calculators [(#206)](https://github.com/NannyML/nannyml/issues/206)
- Fix unintentional period filtering [(#199)](https://github.com/NannyML/nannyml/issues/199)
- Fixed some typing issues [(#213)](https://github.com/NannyML/nannyml/issues/213)
- Fixed missing data requirements documentation on regression [(#215)](https://github.com/NannyML/nannyml/issues/215)
- Corrections in the glossary [(#214)](https://github.com/NannyML/nannyml/issues/214), thanks [@sebasmos](https://github.com/sebasmos)!
- Fix missing treshold in plotting legend [(#219)](https://github.com/NannyML/nannyml/issues/219)
- Fix missing annotation in single row & column charts [(#221)](https://github.com/NannyML/nannyml/issues/221)
- Fix outdated performance estimation and calculation docs [(#223)](https://github.com/NannyML/nannyml/issues/223)
- Fix categorical encoding of unseen values for DLE [(#224)](https://github.com/NannyML/nannyml/issues/224)
- Fix incorrect legend for None timeseries [(#235)](https://github.com/NannyML/nannyml/issues/235)
## [0.8.3] - 2023-01-31
### Added
- Added some extra semantic methods on results for easy property access. No dealing with multilevel indexes required.
- Added functionality to compare results and plot that comparison. Early release version.
### Fixed
- Pinned Sphinx version to 4.5.0 in the [documentation requirements](docs/requirements.txt).
Version selector, copy toggle buttons and some styling were broken on RTD due to unintended usage of Sphinx 6 which
treats jQuery in a different way.
## [0.8.2] - 2023-01-24
### Changed
- Log Ranker usage logging
- Remove some redundant parameters in `plot()` function calls for data reconstruction results, univariate drift results,
CBPE results and DLE results.
- Support "single metric/column" arguments in addition to lists in class creation [(#165)](https://github.com/NannyML/nannyml/issues/165)
- Fix incorrect 'None' checks when dealing with defaults in univariate drift calculator
- Multiple updates and corrections to the docs (thanks [@nikml](https://github.com/nikml)!), including:
- Updating univariate drift tutorial
- Updating README
- Update PCA: How it works
- Fix incorrect plots
- Fix quickstart [(#171)](https://github.com/NannyML/nannyml/issues/171)
- Update chunker docstrings to match parameter names, thanks [@mrggementiza](https://github.com/jrggementiza)!
- Make sequence 'None' checks more readable, thanks [@mrggementiza](https://github.com/jrggementiza)!
- Ensure error handling in usage logging does not cause errors...
- Start using `OrdinalEncoder` instead of `LabelEncorder` in DLE. This allows us to deal with "unseen" values in the
analysis period.
### Added
- Added a Store to provide persistence for objects. Main use case for now is storing fitted calculators to be reused
later without needing to fit on reference again. Current store implementation uses a local or remote filesystem as a
persistence layer. Check out the documentation on [persisting calculators](https://nannyml.readthedocs.io/en/latest/tutorials/persisting_calculators.html).
### Fixed
- Fix incorrect interpretation of `y_pred` column as continuous values for the included sample binary classification data.
Converting the column explicitly to "category" data type for now, update of the dataset to follow soon.
[(#171)](https://github.com/NannyML/nannyml/issues/171)
- Fix broken image link in README, thanks [@mrggementiza](https://github.com/jrggementiza)!
- Fix missing key in the CLI section on raw files output, thanks [@CoffiDev](https://github.com/CoffiDev)!
- Fix upper and lower thresholds for data reconstruction being swapped [(#179)](https://github.com/NannyML/nannyml/issues/179)
- Fix stacked bar chart plots (missing bars + too many categories shown)
## [0.8.1] - 2022-12-01
### Changed
- Thorough refactor of the `nannyml.drift.ranker` module. The abstract base class and factory have been dropped in favor
of a more flexible approach.
- Thorough refactor of our Plotly-based plotting modules. These have been rewritten from scratch to make them more
modular and composable. This will allow us to deliver more powerful and meaningful visualizations faster.
### Added
- Added a new univariate drift method. The [`Hellinger distance`](https://nannyml.readthedocs.io/en/v0.8.1/how_it_works/univariate_drift_detection.html#hellinger-distance), used for continuous variables.
- Added an [extensive write-up]() on when to use which univariate drift method.
- Added a new way to rank the results of univariate drift calculation. The `CorrelationRanker` ranks columns based on
the correlation between the drift value and the change in realized or estimated performance. Read all about it in the
[ranking documentation](https://nannyml.readthedocs.io/en/v0.8.1/how_it_works/ranking.html)
### Fixed
- Disabled usage logging for or GitHub workflows
- Allow passing a single string to the `metrics` parameter of the `result.filter()` function, as per special request.
## [0.8.0] - 2022-11-22
### Changed
- Updated `mypy` to a new version, immediately resulting in some new checks that failed.
### Added
- Added new univariate drift methods. The [`Wasserstein distance`](https://nannyml.readthedocs.io/en/latest/how_it_works/univariate_drift_detection.html#wasserstein-distance) for continuous variables,
and the [`L-Infinity distance`](https://nannyml.readthedocs.io/en/main/how_it_works/univariate_drift_detection.html#l-infinity-distance) for categorical variables.
- Added usage logging to our key functions. Check out the [docs](https://nannyml.readthedocs.io/en/latest/usage_logging.html#providing-a-env-file) to find out more on what, why, how, and how to
disable it if you want to.
### Fixed
- Fixed and updated various parts of the docs, reported at warp speed! Thanks [@NeoKish](https://github.com/NeoKish)!
- Fixed `mypy` issues concerning 'implicit optionals'.
## [0.7.0] - 2022-11-07
### Changed
- Updated the handling of "leftover" observations when using the `SizeBasedChunker` and `CountBasedChunker`.
Renamed the parameter for tweaking that behavior to `incomplete`, that can be set to `keep`, `drop` or `append`.
Default behavior for both is now to append leftover observations to the last _full_ chunk.
- Refactored the `nannyml.drift` module. The intermediate structural level (`model_inputs`, `model_outputs`, `targets`)
has been removed and turned into a single unified `UnivariateDriftCalculator`. The old built-in statistics have been
re-implemented as `Methods`, allowing us to add new methods to detect univariate drift.
- Simplified a lot of the codebase (but also complicated some bits) by storing results internally as multilevel-indexed
DataFrames. This means we no longer have to 'convey information' by encoding data column names and method names in
the names of result columns. We've introduced a new paradigm to deal with results. Drill down to the data you really
need by using the `filter` method, which returns a new `Result` instance, with a smaller 'scope'. Then turn this
`Result` into a DataFrame using the `to_df` method.
- Changed the structure of the [pyproject.toml](pyproject.toml) file due to a Poetry upgrade to version 1.2.1.
### Added
- Expanded the `nannyml.io` module with new `Writer` implementations: `DatabaseWriter` that exports data into multiple
tables in a relational database and the `PickleFileWriter` which stores the
pickled `Results` on local/remote/cloud disk.
- Added a new univariate drift detection method based on the Jensen-Shannon distance.
Used within the `UnivariateDriftCalculator`.
### Fixed
- Added [lightgbm](https://github.com/microsoft/LightGBM) installation instructions to our installation guide.
## [0.6.3] - 2022-09-22
### Changed
- `dependencybot` dependency updates
- `stalebot` setup
### Fixed
- CBPE now uses uncalibrated `y_pred_proba` values to calculate realized performance. Fixed for both binary and
multiclass use cases [(#98)](https://github.com/NannyML/nannyml/issues/98)
- Fix an issue where reference data was rendered incorrectly on joy plots
- Updated the 'California Housing' example docs, thanks for the help [@NeoKish](https://github.com/NeoKish)
- Fix lower confidence bounds and thresholds under zero for regression cases. When the lower limit is set to 0,
the lower threshold will not be plotted. [(#127)](https://github.com/NannyML/nannyml/issues/127)
## [0.6.2] - 2022-09-16
### Changed
- Made the `timestamp_column_name` required by all calculators and estimators optional. The main consequences of this
are plots have a chunk-index based x-axis now when no timestamp column name was given. You can also not chunk by
period when the timestamp column name is not specified.
### Fixed
- Added missing `s3fs` dependency
- Fixed outdated plotting kind constants in the runner (used by CLI)
- Fixed some missing images and incorrect version numbers in the README, thanks [@NeoKish](https://github.com/NeoKish)!
### Added
- Added a lot of additional tests, mainly concerning plotting and the [`Runner`](nannyml/runner.py) class
## [0.6.1] - 2022-09-09
### Changed
- Use the `problem_type` parameter to determine the correct graph to output when plotting model output drift
### Fixed
- Showing the wrong plot title for DLE estimation result plots, thanks [@NeoKish](https://github.com/NeoKish)
- Fixed incorrect plot kinds in some error feedback for the model output drift calculator
- Fixed missing `problem_type` argument in the Quickstart guide
- Fix incorrect visualization of confidence bands on reference data in DEE and CBPE result plots
## [0.6.0] - 2022-09-07
### Added
- Added support for regression problems across all calculators and estimators.
In some cases a required `problem_type` parameter is required during calculator/estimator initialization, this
is a breaking change. Read more about using regression in our
[tutorials](https://nannyml.readthedocs.io/en/main/tutorials.html) and about our new performance estimation
for regression using the [Direct Loss Estimation (DLE)](https://nannyml.readthedocs.io/en/main/how_it_works/performance_estimation.html#direct-loss-estimation-dle) algorithm.
### Changed
- Improved `tox` running speed by skipping some unnecessary package installations.
Thanks [@baskervilski](https://github.com/baskervilski)!
### Fixed
- Fixed an issue where some Pandas column datatypes were not recognized as continuous by NannyML, causing them to be
dropped in calculations. Thanks for reporting [@Dbhasin1](https://github.com/Dbhasin1)!
- Fixed an issue where some helper columns for visualization crept into the stored reference results. Good catch
[@Dbhasin1](https://github.com/Dbhasin1)!
- Fixed an issue where a `Reader` instance would raise a `WriteException`. Thanks for those eagle eyes
[@baskervilski](https://github.com/baskervilski)!
## [0.5.3] - 2022-08-30
### Changed
- We've completely overhauled the way we determine the "stability" of our estimations. We've moved on from determining
a minimum `Chunk` size to estimating the *sampling error* for an operation on a `Chunk`.
- A **sampling error** value will be provided per metric per `Chunk` in the result data for
**reconstruction error multivariate drift calculator**, all **performance calculation metrics** and
all **performance estimation metrics**.
- Confidence bounds are now also based on this *sampling error* and will display a range around an estimation +/- 3
times the *sampling error* in **CBPE** and **reconstruction error multivariate drift calculator**.
Be sure to check out our [in-depth documentation](https://nannyml.readthedocs.io/en/main/how_it_works/estimation_of_standard_error.html#estimation-of-standard-error)
on how it works or dive right into the [implementation](nannyml/sampling_error).
### Fixed
- Fixed issue where an outdated version of Numpy caused Pandas to fail reading string columns in some scenarios
[(#93)](https://github.com/NannyML/nannyml/issues/93). Thank you, [@bernhardbarker](https://github.com/bernhardbarker) and
[@ga-tardochisalles](https://github.com/ga-tardochisalles) for the investigative work!
## [0.5.2] - 2022-08-17
### Changed
- Swapped out ASCII art library from 'art' to 'PyFiglet' because the former was not yet present in conda-forge.
### Fixed
- Some leftover parameter was forgotten during cleanup, breaking CLI functionality
- CLI progressbar was broken due to a boolean check with task ID 0.
## [0.5.1] - 2022-08-16
### Added
- Added simple CLI implementation to support automation and MLOps toolchain use cases. Supports reading/writing to
cloud storage using S3, GCS, ADL, ABFS and AZ protocols. Containerized version available at
[dockerhub](https://hub.docker.com/repository/docker/nannyml/nannyml).
### Changed
- `make clean` now also clears `__pycache__`
- Fixed some inconsistencies in docstrings (they still need some additional love though)
## [0.5.0] - 2022-07-07
### Changed
- Replaced the whole Metadata system by a more intuitive approach.
### Fixed
- Fix docs [(#87)](https://github.com/NannyML/nannyml/issues/79) and [(#89)](https://github.com/NannyML/nannyml/issues/89), thanks [@NeoKish](https://github.com/NeoKish)
- Fix confidence bounds for binary settings [(#86)](https://github.com/NannyML/nannyml/issues/86), thanks [@rfrenoy](https://github.com/rfrenoy)
- Fix README [(#87)](https://github.com/NannyML/nannyml/issues/79), thanks [@NeoKish](https://github.com/NeoKish)
- Fix index misalignment on calibration [(#79)](https://github.com/NannyML/nannyml/issues/79)
- Fix Poetry dev-dependencies issues [(#78)](https://github.com/NannyML/nannyml/issues/78), thanks [@rfrenoy](https://github.com/rfrenoy)
- Fix incorrect documentation links [(#76)](https://github.com/NannyML/nannyml/issues/76), thanks [@SoyGema](https://github.com/SoyGema)
## [0.4.1] - 2022-05-19
### Added
- Added limited support for ``regression`` use cases: create or extract ``RegressionMetadata`` and use it for drift
detection. Performance estimation and calculation require more research.
### Changed
- ``DefaultChunker`` splits into 10 chunks of equal size.
- ``SizeBasedChunker`` no longer drops incomplete last chunk by default, but this is now configurable behavior.
## [0.4.0] - 2022-05-13
### Added
- Added support for new metrics in the Confidence Based Performance Estimator (CBPE). It now estimates ``roc_auc``,
``f1``, ``precision``, ``recall`` and ``accuracy``.
- Added support for **multiclass classification**. This includes
- Specifying ``multiclass classification metadata`` + support in automated metadata extraction (by introducing a
``model_type`` parameter).
- Support for all ``CBPE`` metrics.
- Support for realized performance calculation using the ``PerformanceCalculator``.
- Support for all types of drift detection (model inputs, model output, target distribution).
- A new synthetic toy dataset.
### Changed
- Removed the ``identifier`` property from the ``ModelMetadata`` class. Joining ``analysis`` data and
``analysis target`` values should be done upfront or index-based.
- Added an ``exclude_columns`` parameter to the ``extract_metadata`` function. Use it to specify the columns that should
not be considered as model metadata or features.
- All ``fit`` methods now return the fitted object. This allows chaining ``Calculator``/``Estimator`` instantiation
and fitting into a single line.
- Custom metrics are no longer supported in the ``PerformanceCalculator``. Only the predefined metrics remain supported.
- Big documentation revamp: we've tweaked overall structure, page structure and incorporated lots of feedback.
- Improvements to consistency and readability for the 'hover' visualization in the step plots, including consistent
color usage, conditional formatting, icon usage etc.
- Improved indication of "realized" and "estimated" performance in all ``CBPE`` step plots
(changes to hover, axes and legends)
### Fixed
- Updated homepage in project metadata
- Added missing metadata modification to the *quickstart*
- Perform some additional check on reference data during preprocessing
- Various documentation suggestions [(#58)](https://github.com/NannyML/nannyml/issues/58)
## [0.3.2] - 2022-05-03
### Fixed
- Deal with out-of-time-order data when chunking
- Fix reversed Y-axis and plot labels in continuous distribution plots
## [0.3.1] - 2022-04-11
### Changed
- Publishing to PyPi did not like raw sections in ReST, replaced by Markdown version.
## [0.3.0] - 2022-04-08
### Added
- Added support for both predicted labels and predicted probabilities in ``ModelMetadata``.
- Support for monitoring model performance metrics using the ``PerformanceCalculator``.
- Support for monitoring target distribution using the ``TargetDistributionCalculator``
### Changed
- Plotting will default to using step plots.
- Restructured the ``nannyml.drift`` package and subpackages. *Breaking changes*!
- Metadata completeness check will now fail when there are features of ``FeatureType.UNKNOWN``.
- Chunk date boundaries are now calculated differently for a ``PeriodBasedChunker``, using the
theoretical period for boundaries as opposed to the observed boundaries within the chunk observations.
- Updated version of the ``black`` pre-commit hook due to breaking changes in its ``click`` dependency.
- The *minimum chunk size* will now be provided by each individual ``calculator`` / ``estimator`` / ``metric``,
allowing for each of them to warn the end user when chunk sizes are suboptimal.
### Fixed
- Restrict version of the ``scipy`` dependency to be ``>=1.7.3, <1.8.0``. Planned to be relaxed ASAP.
- Deal with missing values in chunks causing ``NaN`` values when concatenating.
- Crash when estimating CBPE without a target column present
- Incorrect label in ``ModelMetadata`` printout
## [0.2.1] - 2022-03-22
### Changed
- Allow calculators/estimators to provide appropriate ``min_chunk_size`` upon splitting into ``chunks``.
### Fixed
- Data reconstruction drift calculation failing when there are no categorical or continuous features
[(#36)](https://github.com/NannyML/nannyml/issues/36)
- Incorrect scaling on continuous feature distribution plot [(#39)](https://github.com/NannyML/nannyml/issues/39)
- Missing ``needs_calibration`` checks before performing score calibration in CBPE
- Fix crash on chunking when missing target values in reference data
## [0.2.0] - 2022-03-03
### Added
- Result classes for Calculators and Estimators.
### Changed
- Updated the documentation to reflect the changes introduced by result classes,
specifically to plotting functionality.
- Add support for imputing of missing values in the ``DataReconstructionDriftCalculator``.
### Removed
- ``nannyml.plots.plots`` was removed.
Plotting is now meant to be done using ``DriftResult.plot()`` or ``EstimatorResult.plot()``.
## [0.1.1] - 2022-03-03
### Fixed
- Fixed an issue where data reconstruction drift calculation also used model predictions during decomposition.
## [0.1.0] - 2022-03-03
### Added
- Chunking base classes and implementations
- Metadata definitions and utilities
- Drift calculator base classes and implementations
- Univariate statistical drift calculator
- Multivariate data reconstruction drift calculator
- Drifted feature ranking base classes and implementations
- Alert count based ranking
- Performance estimator base classes and implementations
- Certainty based performance estimator
- Plotting utilities with support for
- Stacked bar plots
- Line plots
- Joy plots
- Documentation
- Quick start guide
- User guides
- Deep dives
- Example notebooks
- Technical reference documentation
|
NannyMLREPO_NAMEnannymlPATH_START.@nannyml_extracted@[email protected]@.PATH_END.py
|
{
"filename": "check.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/core/computation/check.py",
"type": "Python"
}
|
from __future__ import annotations
from pandas.compat._optional import import_optional_dependency
ne = import_optional_dependency("numexpr", errors="warn")
NUMEXPR_INSTALLED = ne is not None
__all__ = ["NUMEXPR_INSTALLED"]
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@core@[email protected]@.PATH_END.py
|
{
"filename": "mcmc.py",
"repo_name": "annadeg/jwst-msafit",
"repo_path": "jwst-msafit_extracted/jwst-msafit-main/msafit/fitting/mcmc.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
import emcee
__all__ = ["make_init_ball","get_scale_estimate","plot_autocorr"]
def get_scale_estimate(prior_func):
"""find a reasonable typical value for the prior distribution.
For uniform priors this is somewhere around the middle, for other priors
it is the sigma or scale.
Parameters
----------
prior_func : Prior
prior class instance from msafit.fitting.priors
Returns
-------
float
value from the prior distribution
"""
pf_name = str(type(prior_func))
params = prior_func.params
if 'Uniform' in pf_name:
diff = params['maxi']-params['mini']
if 'Log' in pf_name:
return np.exp(diff/2.)
else: return diff/10.
elif 'Normal' in pf_name:
if 'Log' in pf_name:
return np.exp(params['mode']+params['sigma'])-np.exp(params['mode'])
else:
return params['sigma']
elif 'Beta' in pf_name:
return prior_func.loc + np.sqrt( (params['alpha']*params['beta']) / ((params['alpha']+params['beta'])**2 * (params['alpha']+params['beta']+1)) )
elif 'StudentT' in pf_name:
return params['scale']
else: return 1.
def make_init_ball(init_point,fit_model,nwalkers,e=0.01):
"""creates a small Gaussian ball around the initial parameter vector
Parameters
----------
init_point : 1D array
parameter values of the initial point
fit_model : FitModel instance
holds fit details, the physical model and psf library
nwalkers : int
number of walkers for emcee
e : float, optional
value used to scale the dispersion of the Gaussian
Returns
-------
ndarray
array of size (nwalkers,nparams)
"""
ball = np.zeros((len(init_point),nwalkers))
for i, ival in enumerate(init_point):
pn,pn_mod = fit_model.free_params[i]
prior_func = fit_model.fit_config["fit_params"][pn]["prior"]
pval = get_scale_estimate(prior_func)
ball[i] = np.random.normal(loc=ival,scale=np.abs(e*pval),size=nwalkers)
return ball.T
def plot_autocorr(tau_mean,niter_check,fdir):
"""make basic plot of the autocorrelation timescale vs the iteration number
Parameters
----------
tau_mean : float
autocorr
niter_check : int
interval to plot
fname : str
output file
"""
plt.figure()
plt.plot(np.arange(len(tau_mean))[niter_check::niter_check],tau_mean[niter_check::niter_check],ls='solid')
plt.xlabel('iteration number',fontsize=14)
plt.ylabel(r'$\tau$',fontsize=14)
plt.tick_params(direction='in',top=True,right=True)
plt.tight_layout()
plt.savefig(fdir+'autocorr.pdf')
plt.close()
|
annadegREPO_NAMEjwst-msafitPATH_START.@jwst-msafit_extracted@jwst-msafit-main@msafit@[email protected]@.PATH_END.py
|
{
"filename": "hst.py",
"repo_name": "natashabatalha/PandExo",
"repo_path": "PandExo_extracted/PandExo-master/pandexo/engine/hst.py",
"type": "Python"
}
|
import numpy as np
import pandas as pd
from .create_input import hst_spec
import batman
from .RECTE import RECTE
from scipy import optimize
def wfc3_GuessNOrbits(trdur):
'''Predict number of HST orbits
Predict number of HST orbits for transit observation when not provided by the user.
Parameters
----------
trdur : float
transit duration in days
Returns
-------
float
number of requested orbits per transit (including discarded thermal-settling orbit)
'''
# Compute # of HST orbits during transit
# ~96 minutes per HST orbit
orbitsTr = trdur*24.*60/96
if orbitsTr <= 1.5:
norbits = 4.
elif orbitsTr <= 2.0:
norbits = 5.
else:
norbits = np.ceil(orbitsTr*2+1)
return norbits
def wfc3_GuessParams(jmag, disperser, scanDirection, subarray, obsTime, maxScanHeight=180., maxExptime=150., targetFluence=30000., hmag=None):
'''Predict nsamp and samp_seq when values not provided by the user.
Parameters
----------
jmag : float
J-band magnitude
disperser : str
grism ('G141' or 'G102')
scanDirection : str
spatial scan direction ('Forward' or 'Round Trip')
subarray : str
Subarray aperture ('grism256' or 'grism512')
obsTime : float
Available observing time per HST orbit in seconds
maxScanHeight : float
(optional) maximum scan height in pixels
maxExptime : float
(Optional) default=150.0, maximum exposure time in seconds
targetFluence : float
(Optional) Desired fluence in electrons per pixel
hmag : float
(Optional) H-band magnitude
Returns
-------
float
nsamp--number of up-the-ramp samples (1..15)
str
samp_seq--time between non-destructive reads
'''
allnsamp = np.arange(1, 16)
allsampseq = ['spars5', 'spars10', 'spars25']
maxDutyCycle = 0
for samp_seq in allsampseq:
for nsamp in allnsamp:
exptime, tottime, scanRate, scanHeight, fluence = wfc3_obs(jmag, disperser, scanDirection,
subarray, nsamp, samp_seq, targetFluence, hmag)
# Compute duty cycle and compare
# Exposure time should be less than 2.5 minutes to achieve good time resolution
ptsOrbit = np.floor(obsTime/tottime)
dutyCycle = (exptime*(ptsOrbit-1))/50./60*100
if (dutyCycle > maxDutyCycle) and (exptime < maxExptime) and (scanHeight < maxScanHeight):
maxDutyCycle = dutyCycle
bestsampseq = samp_seq
bestnsamp = nsamp
return bestnsamp, bestsampseq
def wfc3_obs(jmag, disperser, scanDirection, subarray, nsamp, samp_seq, targetFluence=30000., hmag=None):
'''Determine the recommended exposure time, scan rate, scan height, and overheads.
Parameters
----------
jmag : float
J-band magnitude
disperser : str
Grism ('G141' or 'G102')
scanDirection : str
spatial scan direction ('Forward' or 'Round Trip')
subarray : str
Subarray aperture ('grism256' or 'grism512')
nsamp : float
Number of up-the-ramp samples (1..15)
samp_seq : str
Time between non-destructive reads ('SPARS5', 'SPARS10', or 'SPARS25')
targetFluence : float
(Optional) Desired fluence in electrons per pixel
hmag : float
(Optional) H-band magnitude
Returns
-------
float
exptime--exposure time in seconds
float
tottime--total frame time including overheads in seconds
float
scanRate--recommented scan rate in arcsec/s
float
scanHeight--scan height in pixels
float
fluence--maximum pixel fluence in electrons
'''
# Estimate exposure time
if subarray == 'grism512':
# GRISM512
if samp_seq == 'spars5':
exptime = 0.853 + (nsamp-1)*2.9215 # SPARS5
elif samp_seq == 'spars10':
exptime = 0.853 + (nsamp-1)*7.9217 # SPARS10
elif samp_seq == 'spars25':
exptime = 0.853 + (nsamp-1)*22.9213 # SPARS25
else:
print(("****HALTED: Unknown SAMP_SEQ: %s" % samp_seq))
return
else:
# GRISM256
if samp_seq == 'spars5':
exptime = 0.280 + (nsamp-1)*2.349 # SPARS5
elif samp_seq == 'spars10':
exptime = 0.278 + (nsamp-1)*7.3465 # SPARS10
elif samp_seq == 'spars25':
exptime = 0.278 + (nsamp-1)*22.346 # SPARS25
else:
print(("****HALTED: Unknown SAMP_SEQ: %s" % samp_seq))
return
# Recommended scan rate
if hmag == None:
hmag = jmag
#scanRate = np.round(1.9*10**(-0.4*(hmag-5.9)), 3) # arcsec/s
#scanRate = np.round(2363./targetFluence*10**(-0.4*(jmag-9.75)), 3) # arcsec/s
scanRate = (2491./targetFluence)*10**(-0.4*(jmag-9.75)) - (161/targetFluence)*10**(-0.4*(jmag-hmag))
if disperser == 'g102':
# G102/G141 flux ratio is ~0.8
scanRate *= 0.8
# Max fluence in electrons/pixel
#fluence = (5.5/scanRate)*10**(-0.4*(hmag-15))*2.4 # electrons
#fluence = (2363./scanRate)*10**(-0.4*(jmag-9.75)) # electrons
fluence = (2491./scanRate)*10**(-0.4*(jmag-9.75)) - (161/scanRate)*10**(-0.4*(jmag-hmag))
if disperser == 'g102':
# WFC3_ISR_2012-08 states that the G102/G141 scale factor is 0.96 DN/electron
fluence *= 0.96
# G102/G141 flux ratio is ~0.8
fluence *= 0.8
# Scan height in pixels
scanRatep = scanRate/0.121 # pixels/s
scanHeight = scanRatep*exptime # pixels
'''
#Quadratic correlation between scanRate and read overhead
foo = np.array([0.0,0.1,0.3,0.5,1.0,2.0,3.0,4.0])/0.121
bar = np.array([ 40, 40, 41, 42, 45, 50, 56, 61])
c = np.polyfit(foo,bar,2)
model = c[2] + c[1]*foo + c[0]*foo**2
#c = [ 6.12243227e-04, 6.31621064e-01, 3.96040946e+01]
'''
# Define instrument overheads (in seconds)
c = [6.12243227e-04, 6.31621064e-01, 3.96040946e+01]
read = c[2] + c[1]*scanRatep + c[0]*scanRatep**2
# Correlation between scanHeight/scanRate and pointing overhead was determined elsewhere
if scanDirection == 'Round Trip':
# Round Trip scan direction doesn't have to return to starting point, therefore no overhead
pointing = 0.
elif scanDirection == 'Forward':
c = [3.18485340e+01, 3.32968829e-02, 1.65687590e-02,
7.65510038e-01, -6.24504499e+01, 5.51452028e-03]
pointing = c[0]*(1 - np.exp(-c[2]*(scanHeight-c[4]))) + \
c[1]*scanHeight + c[3]*scanRatep + c[5]*scanRatep**2
else:
print(("****HALTED: Unknown scan direction: %s" % scanDirection))
return
# Estimate total frame time including overheads
tottime = exptime+read+pointing # seconds
return exptime, tottime, scanRate, scanHeight, fluence
def wfc3_TExoNS(dictinput):
'''Compute Transit depth uncertainty
Compute the transit depth uncertainty for a defined system and number of spectrophotometric channels.
Written by Kevin Stevenson October 2016
Parameters
----------
dictinput : dict
dictionary containing instrument parameters and exoplanet specific parameters. {"pandeia_input":dict1, "pandexo_input":dict1}
Returns
-------
float
deptherr--transit depth uncertainty per spectrophotometric channel
float
chanrms--light curve root mean squarerms
float
ptsOrbit--number of HST frames per orbit
'''
pandeia_input = dictinput['pandeia_input']
pandexo_input = dictinput['pandexo_input']
jmag = pandexo_input['star']['jmag']
if np.ma.is_masked(jmag):
print("Jmag not found.")
try:
hmag = pandexo_input['star']['hmag']
except:
hmag = jmag
print("Hmag not found. Assuming no color dependence in the stellar type.")
trdur = pandexo_input['planet']['transit_duration']
numTr = pandexo_input['observation']['noccultations']
schedulability = pandeia_input['strategy']['schedulability']
scanDirection = pandeia_input['strategy']['scanDirection']
nchan = pandeia_input['strategy']['nchan']
norbits = pandeia_input['strategy']['norbits']
useFirstOrbit = pandeia_input['strategy']['useFirstOrbit']
try:
targetFluence = pandeia_input['strategy']['targetFluence']
except:
targetFluence = 30000.
print("Assuming a target fluence of 30,000 electrons.")
disperser = pandeia_input['configuration']['instrument']['disperser'].lower(
)
subarray = pandeia_input['configuration']['detector']['subarray'].lower()
nsamp = pandeia_input['configuration']['detector']['nsamp']
samp_seq = pandeia_input['configuration']['detector']['samp_seq']
try:
samp_seq = samp_seq.lower()
except:
pass
if disperser == 'g141':
# Define reference Jmag, flux, variance, and exposure time for GJ1214
refmag = 9.750
refflux = 2.32e8
refvar = 2.99e8
refexptime = 88.436
elif disperser == 'g102':
# Define reference Jmag, flux, variance, and exposure time for WASP12
refmag = 10.477
refflux = 8.26e7
refvar = 9.75e7
refexptime = 103.129
else:
print(("****HALTED: Unknown disperser: %s" % disperser))
return
# Determine max recommended scan height
if subarray == 'grism512':
maxScanHeight = 430
elif subarray == 'grism256':
maxScanHeight = 180
else:
print(("****HALTED: Unknown subarray aperture: %s" % subarray))
return
# Define maximum frame time
maxExptime = 150.
# Define available observing time per HST orbit in seconds
if str(schedulability) == '30':
obsTime = 51.3*60
elif str(schedulability) == '100':
obsTime = 46.3*60
else:
print(("****HALTED: Unknown schedulability: %s" % schedulability))
return
# Compute recommended number of HST orbits and compare to user specified value
guessorbits = wfc3_GuessNOrbits(trdur)
if norbits == None:
norbits = guessorbits
elif norbits != guessorbits:
print(("****WARNING: Number of specified HST orbits does not match number of recommended orbits: %0.0f" % guessorbits))
if nsamp == 0 or nsamp == None or samp_seq == None or samp_seq == "none":
# Estimate reasonable values
nsamp, samp_seq = wfc3_GuessParams(jmag, disperser, scanDirection, subarray, obsTime, maxScanHeight, maxExptime, targetFluence, hmag)
# Calculate observation parameters
exptime, tottime, scanRate, scanHeight, fluence = wfc3_obs(jmag, disperser, scanDirection, subarray,
nsamp, samp_seq, targetFluence, hmag=hmag)
if scanHeight > maxScanHeight:
print(("****WARNING: Computed scan height exceeds maximum recommended height of %0.0f pixels." % maxScanHeight))
if exptime > maxExptime:
print(("****WARNING: Computed frame time (%0.0f seconds) exceeds maximum recommended duration of %0.0f seconds." % (exptime, maxExptime)))
# Compute number of data points (frames) per orbit
ptsOrbit = np.floor(obsTime/tottime)
# First point (frame) is always low, ignore when computing duty cycle
dutyCycle = (exptime*(ptsOrbit-1))/50./60*100
# Compute number of non-destructive reads per orbit
readsOrbit = ptsOrbit*(nsamp+1)
# Look for mid-orbit buffer dumps
if (subarray == 'grism256') and (readsOrbit >= 300) and (exptime <= 43):
print(
"****WARNING: Observing plan may incur mid-orbit buffer dumps. Check with APT.")
if (subarray == 'grism512') and (readsOrbit >= 120) and (exptime <= 100):
print(
"****WARNING: Observing plan may incur mid-orbit buffer dumps. Check with APT.")
# Compute number of HST orbits per transit
# ~96 minutes per HST orbit
orbitsTr = trdur*24.*60/96
# Estimate number of good points during planet transit
# First point in each HST orbit is flagged as bad; therefore, subtract from total
if orbitsTr < 0.5:
# Entire transit fits within one HST orbit
ptsInTr = ptsOrbit * orbitsTr/0.5 - 1
elif orbitsTr <= 1.5:
# Assume one orbit centered on mid-transit time
ptsInTr = ptsOrbit - 1
elif orbitsTr < 2.:
# Assume one orbit during full transit and one orbit during ingress/egress
ptsInTr = ptsOrbit * \
(np.floor(orbitsTr) +
np.min((1, np.remainder(orbitsTr-np.floor(orbitsTr)-0.5, 1)/0.5))) - 2
else:
# Assume transit contains 2+ orbits timed to maximize # of data points.
ptsInTr = ptsOrbit * (np.floor(orbitsTr) + np.min(
(1, np.remainder(orbitsTr-np.floor(orbitsTr), 1)/0.5))) - np.ceil(orbitsTr)
# Estimate number of good points outside of transit
# Discard first HST orbit
ptsOutTr = (ptsOrbit-1) * (norbits-1) - ptsInTr
# Compute transit depth uncertainty per spectrophotometric channel
ratio = 10**((refmag - jmag)/2.5)
flux = ratio*refflux*exptime/refexptime
fluxvar = ratio*refvar*exptime/refexptime
chanflux = flux/nchan
chanvar = fluxvar/nchan
chanrms = np.sqrt(chanvar)/chanflux*1e6 # ppm
inTrrms = chanrms/np.sqrt(ptsInTr*numTr) # ppm
outTrrms = chanrms/np.sqrt(ptsOutTr*numTr) # ppm
deptherr = np.sqrt(inTrrms**2 + outTrrms**2) # ppm
info = {"Number of HST orbits": norbits,
"Use first orbit": useFirstOrbit,
"WFC3 parameters: NSAMP": nsamp,
"WFC3 parameters: SAMP_SEQ": samp_seq.upper(),
"Scan Direction": scanDirection,
"Recommended scan rate (arcsec/s)": scanRate,
"Scan height (pixels)": scanHeight,
"Maximum pixel fluence (electrons)": fluence,
"exposure time": exptime,
"Estimated duty cycle (outside of Earth occultation)": dutyCycle,
"Transit depth uncertainty(ppm)": deptherr,
"Number of channels": nchan,
"Number of Transits": numTr}
return {"spec_error": deptherr/1e6,
"light_curve_rms": chanrms/1e6,
"nframes_per_orb": ptsOrbit,
"info": info}
def calc_start_window(eventType, rms, ptsOrbit, numOrbits, depth, inc, aRs, period, windowSize, ecc=0, w=90., duration=None, offset=0., useFirstOrbit=False):
'''Calculate earliest and latest start times
Plot earliest and latest possible spectroscopic light curves for given start window size
Parameters
----------
eventType : str
'transit' or 'eclipse'
rms : float
light curve root-mean-square
ptsOrbit : int
number of frames per HST orbit
numOrbits : int
number of HST orbits per visit
depth : float
transit/eclipse depth
inc : float
orbital inclination in degrees
aRs : float
Semi-major axis in units of stellar radii (a/R*)
period : float
orbital period in days
windowSize : float
observation start window size in minutes
ecc : float
(Optional) eccentricity
w : float
(Optional) longitude of periastron in degrees
duration : float
(Optional) full transit/eclipse duration in days
offset : float
(Optional) manual offset in observation start time, in minutes
useFirstOrbit : bool
(Optional) whether to use first orbit
Returns
-------
float
minphase--earliest observation start phase
float
maxphase--latest observation start phase
'''
ptsOrbit = int(ptsOrbit)
numOrbits = int(numOrbits)
hstperiod = 96./60/24 # HST orbital period, days
punc = windowSize/120./24/period # Half start window size, in phase
cosi = np.cos(inc*np.pi/180) # Cosine of the inclination
rprs = np.sqrt(depth) # Planet-star radius ratio
params = batman.TransitParams()
if eventType == 'transit':
midpt = period
b = aRs*cosi*(1-ecc**2)/(1+ecc*np.sin(w*np.pi/180)) # Impact parameter
# Account for planet speed on eccentric orbits
sfactor = np.sqrt(1-ecc**2)/(1+ecc*np.sin(w*np.pi/180))
# limb darkening coefficients
params.u = [0.1, 0.1]
elif eventType == 'eclipse':
#midpt = period/2*(1+4*ecc*np.cos(w*np.pi/180)/np.pi)
midpt = calculate_tsec(period, ecc, w*np.pi/180, inc*np.pi/180, t0=0, tperi=None, winn_approximation=False)
b = aRs*cosi*(1-ecc**2)/(1-ecc*np.sin(w*np.pi/180)) # Impact parameter
# Account for planet speed on eccentric orbits
sfactor = np.sqrt(1-ecc**2)/(1-ecc*np.sin(w*np.pi/180))
# limb darkening coefficients
params.u = [0.0, 0.0]
else:
print(("****HALTED: Unknown event type: %s" % eventType))
return
params.t0 = midpt/period # phase of transit/eclipse
params.per = 1. # orbital period, units are orbital phase
# planet radius (in units of stellar radii)
params.rp = rprs
# semi-major axis (in units of stellar radii)
params.a = aRs
params.inc = inc # orbital inclination (in degrees)
params.ecc = ecc # eccentricity
params.w = w # longitude of periastron (in degrees)
params.limb_dark = "quadratic" # limb darkening model
# Transit/eclipse duration (in days)
if duration == None:
duration = period/np.pi * \
np.arcsin(
1./aRs*np.sqrt(((1+rprs)**2-(aRs*cosi)**2)/(1-cosi**2)))*sfactor
phase1 = (midpt + duration/2. - hstperiod*(numOrbits-2 -
useFirstOrbit) - hstperiod/2 + offset/24./60)/period
phase2 = (midpt - duration/2. - hstperiod*2 + offset/24./60)/period
minphase = (phase1+phase2)/2-punc
maxphase = (phase1+phase2)/2+punc
# Compute light curves at extremes of HST start window
npts = int(4 * ptsOrbit * numOrbits)
phdur = duration/period
phase1 = np.linspace(minphase+(1-useFirstOrbit)*hstperiod/period,
minphase+hstperiod/period*(numOrbits-1)+hstperiod/period/2, int(npts))
phase2 = np.linspace(maxphase+(1-useFirstOrbit)*hstperiod/period,
maxphase+hstperiod/period*(numOrbits-1)+hstperiod/period/2, int(npts))
m = batman.TransitModel(params, phase1)
trmodel1 = m.light_curve(params)
m = batman.TransitModel(params, phase2)
trmodel2 = m.light_curve(params)
obsphase1 = []
obsphase2 = []
for i in range(numOrbits):
obsphase1 = np.r_[obsphase1, np.linspace(
minphase+hstperiod/period*i, minphase+hstperiod/period*i+hstperiod/period/2, int(ptsOrbit))]
obsphase2 = np.r_[obsphase2, np.linspace(
maxphase+hstperiod/period*i, maxphase+hstperiod/period*i+hstperiod/period/2, int(ptsOrbit))]
m = batman.TransitModel(params, obsphase1)
obstr1 = m.light_curve(params) + np.random.normal(0, rms, obsphase1.shape)
m = batman.TransitModel(params, obsphase2)
obstr2 = m.light_curve(params) + np.random.normal(0, rms, obsphase2.shape)
return {'obsphase1': obsphase1, 'light_curve_rms': rms, 'obstr1': obstr1, 'obsphase2': obsphase2,
'obstr2': obstr2, 'minphase': minphase, 'maxphase': maxphase, 'phase1': phase1,
'phase2': phase2, 'trmodel1': trmodel1, 'trmodel2': trmodel2, 'eventType': eventType, 'planet period':period}
def planet_spec(planet, star, w_unit, disperser, deptherr, nchan, smooth=None):
'''Plot exoplanet transmission/emission spectrum
Parameters
----------
planet: dict
planet dictionary from exo_input
star : dict
star dictionary from exo_input
w_unit : str
wavelength unit (um or nm)
disperser :
grism (g102 or g141)
deptherr : float
simulated transit/eclipse depth uncertainty
nchan : float
number of spectrophotometric channels
smooth : float
(Optional)length of smoothing kernel
Returns
-------
dict
contains following keys {'model_wave','model_spec','binwave','binspec',
'error','wmin','wmax'}
'''
# Load model wavelengths and spectrum
mwave, mspec = hst_spec(planet, star) # np.loadtxt(specfile, unpack=True)
# Convert wavelength to microns
# if w_unit == 'um':
# pass
# elif w_unit == 'nm':
# mwave /= 1000.
# else:
# print(("****HALTED: Unrecognized wavelength unit: '%s'" % w_unit))
# return
# Smooth model spectrum (optional)
if smooth != None:
from .hst_smooth import smooth as sm
mspec = sm(mspec, smooth)
# Determine disperser wavelength boundaries
if disperser == 'g141':
wmin = 1.125
wmax = 1.650
elif disperser == 'g102':
wmin = 0.84
wmax = 1.13
else:
print(("****HALTED: Unrecognized disperser name: '%s'" % disperser))
return
# Determine wavelength bins
binsize = (wmax - wmin)/nchan
wave_low = np.round([i for i in np.linspace(wmin, wmax-binsize, nchan)], 3)
wave_hi = np.round([i for i in np.linspace(wmin+binsize, wmax, nchan)], 3)
binwave = (wave_low + wave_hi)/2.
# Create simulated spectrum by binning model spectrum and addding uncertainty
binspec = np.zeros(nchan)
for i in range(nchan):
ispec = np.where((mwave >= wave_low[i])*(mwave <= wave_hi[i]))
binspec[i] = np.mean(mspec[ispec])
binspec += np.random.normal(0, deptherr, nchan)
return {'model_wave': mwave, 'model_spec': mspec, 'binwave': binwave, 'binspec': binspec,
'error': deptherr, 'wmin': wmin, 'wmax': wmax}
def compute_sim_lightcurve(exposureDict, lightCurveDict, calRamp=False):
"""Compute simulated HST light curves
Function to take the fluence and error estimates from wfc3_TExoNS
and the model light curve from calc_start_window to simulate observed
light curves. Ramp effect systemacts simulated by RECTE can be
included by turning on the calRamp switch
Parameters
----------
exposureDict : dict
Includes information for the observation. This dictionary is
returned by function wfc3_TExoNS. Relevant keys in the dictionary
are ['info']["Maximum pixel fluence (electrons)"] and
['info']['exposure time']
lightCurveDict : dict
Includes the model light curve. The light curves are estimed by
calc_start_window
calRamp : bool (default: False)
Switch to turn on/off RECTE ramp calculation. Calculate realistic
ramp effect systematics when the switch is turned on.
Returns
-------
dict
Resulting light curve (unit: e/pixel) for the earliest and latest time
"""
fluence = exposureDict['info']["Maximum pixel fluence (electrons)"]
exptime = exposureDict['info']['exposure time']
obst1 = (lightCurveDict['obsphase1'] -
lightCurveDict['obsphase1'][0]) *\
lightCurveDict['planet period'] * 86400 # in seconds
obst2 = (lightCurveDict['obsphase2'] -
lightCurveDict['obsphase2'][0]) *\
lightCurveDict['planet period'] * 86400 # in seconds
counts1 = lightCurveDict['obstr1'] * fluence
counts2 = lightCurveDict['obstr2'] * fluence
# use RECTE to calculate the ramp if calRamp option is turned on
if calRamp:
counts1 = RECTE(counts1 / exptime,
obst1,
exptime)
counts2 = RECTE(counts2 / exptime,
obst2,
exptime)
count_noise = lightCurveDict['light_curve_rms'] * fluence
resultDict = lightCurveDict.copy()
resultDict['counts1'] = counts1
resultDict['counts2'] = counts2
resultDict['count_noise'] = count_noise
resultDict['ramp_included'] = calRamp
resultDict['model_counts1'] = resultDict['trmodel1'] * fluence
resultDict['model_counts2'] = resultDict['trmodel2'] * fluence
return resultDict
def compute_sim_hst(dictinput,verbose=False):
"""Sets up HST simulations
Function to set up explanet observations for HST only and
compute simulated spectrum.
Parameters
----------
dictinput : dict
instrument and pandexo dictionaries in format {"pandeia_input":dict1, "pandexo_input":dict2}
Returns
-------
dict
All hst output info needed to plot simulated data, light curves timing info
"""
pandexo_input = dictinput['pandexo_input']
pandeia_input = dictinput['pandeia_input']
disperser = pandeia_input['configuration']['instrument']['disperser'].lower()
# add a switch for ramp calculation
calRamp = pandeia_input['strategy']['calculateRamp']
if not pandeia_input['strategy']['useFirstOrbit']:
if verbose:print("Dropping first orbit designed by observation strategy")
if verbose:print("Do not calculate ramp profile")
calRamp = False
numorbits = pandeia_input['strategy']['norbits']
nchan = pandeia_input['strategy']['nchan']
windowSize = pandeia_input['strategy']['windowSize']
useFirstOrbit = pandeia_input['strategy']['useFirstOrbit']
calc_type = pandexo_input['planet']['type']
jmag = pandexo_input['star']['jmag']
hmag = pandexo_input['star']['hmag']
specfile = pandexo_input['planet']['exopath']
w_unit = pandexo_input['planet']['w_unit']
f_unit = pandexo_input['planet']['f_unit']
depth = pandexo_input['planet']['depth']
inc = pandexo_input['planet']['i']
aRs = pandexo_input['planet']['ars']
period = pandexo_input['planet']['period']
ecc = pandexo_input['planet']['ecc']
w = pandexo_input['planet']['w']
try:
offset = pandeia_input['strategy']['offset']
except:
offset = 0.
# check to see if ecc or w was provided
if (type(ecc) != float) and (type(ecc) != int):
ecc = 0.0
if (type(w) != float) and (type(w) != int):
w = 90.0
if (type(windowSize) != float) and (type(windowSize) != int):
windowSize = 20.0
if f_unit == "rp^2/r*^2":
eventType = 'transit'
elif f_unit == "fp/f*":
eventType = 'eclipse'
elif calc_type == 'grid':
eventType = 'transit'
else:
raise Exception('Units are not correct. Pick rp^2/r*^2 or fp/f*')
a = wfc3_TExoNS(dictinput)
b = calc_start_window(eventType, a['light_curve_rms'], a['nframes_per_orb'], a['info']['Number of HST orbits'],
depth, inc, aRs, period, windowSize, ecc, w, useFirstOrbit=useFirstOrbit, offset=offset)
c = planet_spec(pandexo_input['planet'], pandexo_input['star'],
w_unit, disperser, a['spec_error'], nchan, smooth=20)
info_div = create_out_div(a['info'], b['minphase'], b['maxphase'])
simLightCurve = compute_sim_lightcurve(a, b, calRamp=calRamp)
return {"wfc3_TExoNS": a,
"calc_start_window": b,
"planet_spec": c,
"light_curve": simLightCurve,
"info_div": info_div}
def create_out_div(input_dict, minphase, maxphase):
"""Function to render input dicts in html format for web front end
Parameters
----------
input_dict : dict
any input dictionary
Returns
-------
div
html rendered table
"""
input_dict['Start observations between orbital phases'] = str(
minphase)+'-'+str(maxphase)
input_div = pd.DataFrame.from_dict(input_dict, orient='index')
input_div.columns = ['Value']
input_div = input_div.to_html()
input_div = '<table class="table table-striped"> \n' + \
input_div[36:len(input_div)]
input_div = input_div.encode()
return input_div
def calculate_tsec(period, ecc, omega, inc, t0 = None, tperi = None, winn_approximation = False):
''' Function to calculate the time of secondary eclipse.
This uses Halley's method (Newton-Raphson, but using second derivatives) to first find the true anomaly (f) at which secondary eclipse occurs,
then uses this to get the eccentric anomaly (E) at secondary eclipse, which gives the mean anomaly (M) at secondary
eclipse using Kepler's equation. This finally leads to the time of secondary eclipse using the definition of the mean
anomaly (M = n*(t - tau) --- here tau is the time of pericenter passage, n = 2*pi/period the mean motion).
Time inputs can be either the time of periastron passage directly or the time of transit center. If the latter, the
true anomaly for primary transit will be calculated using Halley's method as well, and this will be used to get the
time of periastron passage.
Parameters
----------
period : float
The period of the transit in days.
ecc : float
Eccentricity of the orbit
omega : float
Argument of periastron passage (in radians)
inc : string
Inclination of the orbit (in radians)
t0 : float
The transit time in BJD or HJD (will be used to get time of periastron passage).
tperi : float
The time of periastron passage in BJD or HJD (needed if t0 is not supplied).
winn_approximation : boolean
If True, the approximation in Winn (2010) is used --- (only valid for not very eccentric and inclined orbits).
Returns
-------
tsec : float
The time of secondary eclipse '''
# Check user is not playing trick on us:
if period<0:
raise Exception('Period cannot be a negative number.')
if ecc<0 or ecc>1:
raise Exception('Eccentricity (e) is out of bounds (0 < e < 1).')
# Use true anomaly approximation given in Winn (2010) as starting point:
f_occ_0 = (-0.5*np.pi) - omega
if not winn_approximation:
f_occ = optimize.newton(drsky, f_occ_0, fprime = drsky_prime, fprime2 = drsky_2prime, args = (ecc, omega, inc,))
else:
f_occ = f_occ_0
# Define the mean motion, n:
n = 2.*np.pi/period
# If time of transit center is given, use it to calculate the time of periastron passage. If no time of periastron
# or time-of-transit center given, raise error:
if tperi is None:
# For this, find true anomaly during transit. Use Winn (2010) as starting point:
f_tra_0 = (np.pi/2.) - omega
if not winn_approximation:
f_tra = optimize.newton(drsky, f_tra_0, fprime = drsky_prime, fprime2 = drsky_2prime, args = (ecc, omega, inc,))
else:
f_tra = f_tra_0
# Get eccentric anomaly during transit:
E = getE(f_tra, ecc)
# Get mean anomaly during transit:
M = getM(E, ecc)
# Get time of periastron passage from mean anomaly definition:
tperi = t0 - (M/n)
elif (tperi is None) and (t0 is None):
raise ValueError('The time of periastron passage or time-of-transit center has to be supplied for the calculation to work.')
# Get eccentric anomaly:
E = getE(f_occ, ecc)
# Get mean anomaly during secondary eclipse:
M = getM(E, ecc)
# Get the time of secondary eclipse using the definition of the mean anomaly:
tsec = (M/n) + tperi
# Note returned time-of-secondary eclipse is the closest to the time of periastron passage and/or time-of-transit center. Check that
# the returned tsec is the *next* tsec to the time of periastron or t0 (i.e., the closest *future* tsec):
if t0 is not None:
tref = t0
else:
tref = tperi
if tref > tsec:
while True:
tsec += period
if tsec > tref:
break
return tsec
def drsky_2prime(x, ecc, omega, inc):
''' Second derivative of function drsky. This is the second derivative with respect to f of the drsky function.
Parameters
----------
x : float
True anomaly
ecc : float
Eccentricity of the orbit
omega : float
Argument of periastron passage (in radians)
inc : float
Inclination of the orbit (in radians)
Returns
-------
drsky_2prime : float
Function evaluated at x, ecc, omega, inc'''
sq_sini = np.sin(inc)**2
sin_o_p_f = np.sin(x+omega)
cos_o_p_f = np.cos(x+omega)
ecosf = ecc*np.cos(x)
esinf = ecc*np.sin(x)
f1 = esinf - esinf*sq_sini*(sin_o_p_f**2)
f2 = -sq_sini*(ecosf + 4.)*(sin_o_p_f*cos_o_p_f)
return f1+f2
def drsky_prime(x, ecc, omega, inc):
''' Derivative of function drsky. This is the first derivative with respect to f of the drsky function.
Parameters
----------
x : float
True anomaly
ecc : float
Eccentricity of the orbit
omega : float
Argument of periastron passage (in radians)
inc : float
Inclination of the orbit (in radians)
Returns
-------
drsky_prime : float
Function evaluated at x, ecc, omega, inc'''
sq_sini = np.sin(inc)**2
sin_o_p_f = np.sin(x+omega)
cos_o_p_f = np.cos(x+omega)
ecosf = ecc*np.cos(x)
esinf = ecc*np.sin(x)
f1 = (cos_o_p_f**2 - sin_o_p_f**2)*(sq_sini)*(1. + ecosf)
f2 = -ecosf*(1 - (sin_o_p_f**2)*(sq_sini))
f3 = esinf*sin_o_p_f*cos_o_p_f*sq_sini
return f1+f2+f3
def drsky(x, ecc, omega, inc):
''' Function whose roots we wish to find to obtain time of secondary (and primary) eclipse(s)
When one takes the derivative of equation (5) in Winn (2010; https://arxiv.org/abs/1001.2010v5), and equates that to zero (to find the
minimum/maximum of said function), one gets to an equation of the form g(x) = 0. This function (drsky) is g(x), where x is the true
anomaly.
Parameters
----------
x : float
True anomaly
ecc : float
Eccentricity of the orbit
omega : float
Argument of periastron passage (in radians)
inc : float
Inclination of the orbit (in radians)
Returns
-------
drsky : float
Function evaluated at x, ecc, omega, inc '''
sq_sini = np.sin(inc)**2
sin_o_p_f = np.sin(x+omega)
cos_o_p_f = np.cos(x+omega)
f1 = sin_o_p_f*cos_o_p_f*sq_sini*(1. + ecc*np.cos(x))
f2 = ecc*np.sin(x)*(1. - sin_o_p_f**2 * sq_sini)
return f1 - f2
def getE(f,ecc):
""" Function that returns the eccentric anomaly
Note normally this is defined in terms of cosines (see, e.g., Section 2.4 in Murray and Dermott), but numerically
this is troublesome because the arccosine doesn't handle negative numbers by definition (equation 2.43). That's why
the arctan version is better as signs are preserved (derivation is also in the same section, equation 2.46).
Parameters
----------
f : float
True anomaly
ecc : float
Eccentricity
Returns
-------
E : float
Eccentric anomaly """
return 2. * np.arctan(np.sqrt((1.-ecc)/(1.+ecc))*np.tan(f/2.))
def getM(E, ecc):
""" Function that returns the mean anomaly using Kepler's equation
Parameters
----------
E : float
Eccentric anomaly
ecc: float
Eccentricity
Returns
-------
M : float
Mean anomaly """
return E - ecc*np.sin(E)
|
natashabatalhaREPO_NAMEPandExoPATH_START.@PandExo_extracted@PandExo-master@pandexo@[email protected]@.PATH_END.py
|
{
"filename": "_offsetgroup.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/box/_offsetgroup.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OffsetgroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="offsetgroup", parent_name="box", **kwargs):
super(OffsetgroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@box@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/frontends/eagle/__init__.py",
"type": "Python"
}
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@frontends@eagle@[email protected]_END.py
|
|
{
"filename": "test_arxiv.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/document_loaders/test_arxiv.py",
"type": "Python"
}
|
import shutil
from http.client import HTTPMessage
from pathlib import Path
from typing import List, Union
from unittest.mock import patch
from urllib.error import HTTPError
import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders.arxiv import ArxivLoader
EXAMPLE_HELLO_PDF_PATH = Path(__file__).parents[1] / "examples" / "hello.pdf"
def assert_docs(docs: List[Document]) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata) == {"Published", "Title", "Authors", "Summary"}
def test_load_success() -> None:
"""Test that returns one document"""
loader = ArxivLoader(query="1605.08386", load_max_docs=2)
docs = loader.load()
assert len(docs) == 1
print(docs[0].metadata) # noqa: T201
print(docs[0].page_content) # noqa: T201
assert_docs(docs)
def test_load_returns_no_result() -> None:
"""Test that returns no docs"""
loader = ArxivLoader(query="1605.08386WWW", load_max_docs=2)
docs = loader.load()
assert len(docs) == 0
def test_load_returns_limited_docs() -> None:
"""Test that returns several docs"""
expected_docs = 2
loader = ArxivLoader(query="ChatGPT", load_max_docs=expected_docs)
docs = loader.load()
assert len(docs) == expected_docs
assert_docs(docs)
def test_load_returns_full_set_of_metadata() -> None:
"""Test that returns several docs"""
loader = ArxivLoader(query="ChatGPT", load_max_docs=1, load_all_available_meta=True)
docs = loader.load()
assert len(docs) == 1
for doc in docs:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata).issuperset(
{"Published", "Title", "Authors", "Summary"}
)
print(doc.metadata) # noqa: T201
assert len(set(doc.metadata)) > 4
def test_skip_http_error() -> None:
"""Test skipping unexpected Http 404 error of a single doc"""
tmp_hello_pdf_path = Path(__file__).parent / "hello.pdf"
def first_download_fails() -> Union[HTTPError, str]:
if not hasattr(first_download_fails, "firstCall"):
first_download_fails.__setattr__("firstCall", False)
raise HTTPError(
url="", code=404, msg="Not Found", hdrs=HTTPMessage(), fp=None
)
else:
# Return temporary example pdf path
shutil.copy(EXAMPLE_HELLO_PDF_PATH, tmp_hello_pdf_path)
return str(tmp_hello_pdf_path.absolute())
with patch("arxiv.Result.download_pdf") as mock_download_pdf:
# Set up the mock to raise HTTP 404 error
mock_download_pdf.side_effect = first_download_fails
# Load documents
loader = ArxivLoader(
query="ChatGPT",
load_max_docs=2,
load_all_available_meta=True,
continue_on_failure=True,
)
docs = loader.load()
# Only 1 of 2 documents should be loaded
assert len(docs) == 1
@pytest.mark.skip(reason="test could be flaky")
def test_load_issue_9046() -> None:
"""Test for the fixed issue 9046"""
expected_docs = 3
# ":" character could not be an issue
loader = ArxivLoader(
query="MetaGPT: Meta Programming for Multi-Agent Collaborative Framework",
load_max_docs=expected_docs,
)
docs = loader.load()
assert_docs(docs)
assert "MetaGPT" in docs[0].metadata["Title"]
# "-" character could not be an issue
loader = ArxivLoader(
query="MetaGPT - Meta Programming for Multi-Agent Collaborative Framework",
load_max_docs=expected_docs,
)
docs = loader.load()
assert_docs(docs)
assert "MetaGPT" in docs[0].metadata["Title"]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@document_loaders@[email protected]_END.py
|
{
"filename": "python-reference_sum_models.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/concepts/python-reference_sum_models.md",
"type": "Markdown"
}
|
# sum_models
## {{ dl--purpose }} {#purpose}
{% include [sum_limits-python__sum-limits__desc](../_includes/work_src/reusage-python/python__sum-limits__desc.md) %}
## {{ dl--invoke-format }} {#call-format}
```python
sum_models(models,
weights=None,
ctr_merge_policy='IntersectingCountersAverage')
```
## {{ dl--parameters }} {#parameters}
{% include [sum_limits-python__sum-limits__parameters](../_includes/work_src/reusage-python/python__sum-limits__parameters.md) %}
{% note info %}
- The bias of the models sum is equal to the weighted sum of models biases.
- The scale of the models sum is equal to 1, leaf values are scaled before the summation.
{% endnote %}
## {{ dl--output-format }} {#usage-example}
{{ product }} model
## {{ input_data__title__example }} {#example}
```python
from catboost import CatBoostClassifier, Pool, sum_models
from catboost.datasets import amazon
import numpy as np
from sklearn.model_selection import train_test_split
train_df, _ = amazon()
y = train_df.ACTION
X = train_df.drop('ACTION', axis=1)
categorical_features_indices = np.where(X.dtypes != np.float)[0]
X_train, X_validation, y_train, y_validation = train_test_split(X,
y,
train_size=0.8,
random_state=42)
train_pool = Pool(X_train,
y_train,
cat_features=categorical_features_indices)
validate_pool = Pool(X_validation,
y_validation,
cat_features=categorical_features_indices)
models = []
for i in range(5):
model = CatBoostClassifier(iterations=100,
random_seed=i)
model.fit(train_pool,
eval_set=validate_pool)
models.append(model)
models_avrg = sum_models(models,
weights=[1.0/len(models)] * len(models))
```
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@concepts@[email protected]_END.py
|
{
"filename": "line_prob.py",
"repo_name": "HETDEX/elixer",
"repo_path": "elixer_extracted/elixer-main/elixer/line_prob.py",
"type": "Python"
}
|
#import Bayes.bayesian
#import Bayes.nb
import math
try:
from ConfigParser import RawConfigParser
except:
from configparser import RawConfigParser
import os
import sys
import astropy.stats.biweight as biweight
import matplotlib.pyplot as plt
from scipy.stats import bayes_mvs
try:
from elixer import global_config as G
from elixer import weighted_biweight as elixer_biweight
import elixer.line_classifier.probs.classification_prob as LineClassifierPro
# import elixer.line_classifier.probs.classification_prob_leung as LineClassifierPro_Leung
except:
import global_config as G
import line_classifier.probs.classification_prob as LineClassifierPro
import weighted_biweight as elixer_biweight
# import line_classifier.probs.classification_prob_leung as LineClassifierPro_Leung
import numpy as np
#numpy import array
#don't need to do this ... is performed in source_prob call
#from line_classifier.misc.tools import generate_cosmology_from_config, read_flim_file
MAX_PLAE_POII = 1000.
MIN_PLAE_POII = 0.001
UNIVERSE_CONFIG = None
FLUX_LIMIT_FN = None
COSMOLOGY = None
#log = G.logging.getLogger('line_prob_logger')
#log.setLevel(G.logging.DEBUG)
log = G.Global_Logger('line_prob_logger')
log.setlevel(G.LOG_LEVEL)
def conf_interval_asym(data,avg,conf=0.68):
"""
:param data:
:param avg:
:param conf:
:return:
"""
high, low = None, None
try:
size = len(data)
if size < 10:
log.info(f"conf_interval_asym, data size too small {size}")
return None, None
step = int(round(conf/2. * size))
s = np.array(sorted(data))
idx = (np.abs(s - avg)).argmin()
if (idx == 0) or (idx == size-1):
#there is a problem or the list is all essentially identical
if np.std(s) < 1e-5: #effectively zero, the average has no error and the ci is avg +/- 0
return s[idx], s[idx]
#what if many of the same value, want to put our position in the center of that run
same_idx = np.where(s==s[idx])[0]
if len(same_idx) > 1:
log.debug(f"conf_interval_asym, multiple matches ({len(same_idx)}) to avg {avg}")
idx = int(np.nanmedian(same_idx))
low = s[max(0,idx-step)]
high = s[min(size-1,idx+step)]
log.debug(f"Asym Confidence interval: high: {high}, low: {low}, ci: {conf}, len: {size}")
except:
log.debug("Exception in conf_interval_asym",exc_info=True)
return high, low
def conf_interval(num_samples,sd,conf=0.95):
"""
mean +/- error ... this is the +/- error part as 95% (or other) confidence interval (assuming normal distro)
:param num_samples:
:param sd: standard deviation
:param conf:
:return:
"""
if num_samples < 30:
return None
#todo: put in other values
if conf == 0.68:
t = 1.0
elif conf == 0.95:
t = 1.96
elif conf == 0.99:
t = 2.576
else:
log.debug("todo: need to handle other confidence intervals: ", conf)
return None
return t * sd / np.sqrt(num_samples)
# def example_prob_LAE():
# Cosmology = {'omega_M_0': 0.3, 'omega_lambda_0': 0.7, 'h': 0.70, 'omega_k_0': 0}
# LAE_Priors = 1 # case 1
# alpha_LAE = -1.65 # but can have other values based on LAE_Priors case adopted (-1.36, -1.3, etc?)
#
# # def main():
#
# # initialization taken directly from Andrew's code, but I don't know what they mean
# Bayes.nb.init(_alpha_LAE=-1.65, _mult_LStar_LAE=1, _mult_PhiStar_LAE=1, _mult_w0_LAE=1,
# _alpha_OII=-1.2, _mult_LStar_OII=1, _mult_PhiStar_OII=1, _mult_w0_OII=1)
#
# ratio_LAE, plgd, pogd = Bayes.bayesian.prob_ratio(wl_obs=3900.0, lineFlux=1.6e-17, ew_obs=-20.0,
# c_obs=None, which_color=None, addl_fluxes=None,
# sky_area=5.5e-07, cosmo=Cosmology, LAE_priors=LAE_Priors,
# EW_case=1, W_0=None, z_OII=None, sigma=None)
#
# print(ratio_LAE, plgd, pogd)
def fiber_area_in_sqdeg(num_fibers=1):
#assumes no overlap
return num_fibers*(math.pi*G.Fiber_Radius**2)/(3600.)**2
#
#
# #wrapper for Andrew Leung's base code
# def prob_LAE_old(wl_obs,lineFlux,ew_obs,c_obs, which_color=None, addl_wavelengths=None, addl_fluxes=None,addl_errors=None,
# sky_area=None, cosmo=None, lae_priors=None, ew_case=None, W_0=None, z_OII=None, sigma=None):
# '''
#
# :param wl_obs:
# :param lineFlux:
# :param ew_obs:
# :param c_obs:
# :param which_color:
# :param addl_fluxes: cgs flux
# :param addl_wavelengths: wavelength(observed) for each addl_flux at same index
# :param sky_area:
# :param cosmo:
# :param lae_priors:
# :param ew_case:
# :param W_0:
# :param z_OII:
# :param sigma:
# :return:
# '''
#
# #sanity check
# if (ew_obs is None) or (ew_obs == -300) or (ew_obs == 666) or (lineFlux < 0):
# #bsically, sentinel (null) values from Karl's input file
# return 0.0,0.0,0.0
#
# #what about different equivalent width calculations for LAE vs OII (different rest wavelength ... or is the ew_obs
# #not rest_frame --- the more likely ... in which case, don't divide by the ratio of observed wavelength/rest)?
#
# #addl_fluxes should be a dictionary: {wavlength,flux} (need to update downstream code though)
# #to be consistent with Andrew's code, made addl_fluxex and addl_wavelengths, index matched arrays
#
# #sky_area in square degrees (should this be the overlapped sum of all fibers? each at 0.75" radius?
# #makes very little difference in the output
#
# #LAE call needs LAE_Priors, but OII does not
# #OII calls need the last four parameters (EW_case, z_OII, W_0, sigma) but the LAE calls do not
#
# Cosmology_default = {'omega_M_0': 0.3, 'omega_lambda_0': 0.7, 'h': 0.70, 'omega_k_0': 0}
# LAE_Priors_default = 1 # case 1
# EW_case_default = 1
# alpha_LAE_default = -1.65 # but can have other values based on LAE_Priors case adopted (-1.36, -1.3, etc?)
# sky_area_default = 1.36e-7 # one fiber in square degrees (has very little effect on probabilities anyway and
# # really is only used in the simulation
# #looks like z_OII is meant to be a list that is used by the simulation, so not needed here
# #the actual z_OII that is used is calculated as you would expect from the wavelength, assuming it is OII
#
# # def main():
# if cosmo is None:
# cosmo = Cosmology_default
#
# if lae_priors is None:
# lae_priors = LAE_Priors_default
#
# if ew_case is None:
# ew_case = EW_case_default
#
# if sky_area is None:
# sky_area = sky_area_default
#
#
# #looks like lineFlux should be in cgs units (despite the paper showing luminosity func in Jy)
# #convert lineFlux from cgs to Jansky
# #lineFlux = lineFlux / (1e-23) * (wl_obs**2)/(3e18)
#
# #suppress sign of EW (always wants positive)
# ew_obs = abs(ew_obs)
#
# #todo: addl_errors=None
# try:
# # initialization taken directly from Andrew's code, but I don't know what they mean
# Bayes.nb.init(_alpha_LAE=alpha_LAE_default, _mult_LStar_LAE=1, _mult_PhiStar_LAE=1, _mult_w0_LAE=1,
# _alpha_OII=-1.2, _mult_LStar_OII=1, _mult_PhiStar_OII=1, _mult_w0_OII=1)
#
# #plgd == Probability of lae given the data?
# #pogd = Probability of OII given the data ?
# ratio_LAE, plgd, pogd = Bayes.bayesian.prob_ratio(wl_obs=wl_obs, lineFlux=lineFlux, ew_obs=ew_obs,
# c_obs=c_obs, which_color=which_color,
# addl_wavelengths=addl_wavelengths,
# addl_fluxes=addl_fluxes,
# sky_area=sky_area, cosmo=cosmo, LAE_priors=lae_priors,
# EW_case=ew_case, W_0=W_0, z_OII=z_OII, sigma=sigma)
#
#
# #ratio_LAE is plgd/pogd
# #slightly different representation of ratio_LAE (so recomputed for elixer use)
# if (plgd is not None) and (plgd > 0.0):
# if (pogd is not None) and (pogd > 0.0):
# ratio_LAE = float(plgd) / pogd
# else:
# ratio_LAE = float('inf')
# else:
# ratio_LAE = 0.0
#
# ratio_LAE = min(MAX_PLAE_POII,ratio_LAE)
# except:
# ratio_LAE = 0
# plgd = 0
# pogd = 0
# log.error("Exception calling into Bayes: ", exc_info=True)
#
#
# return ratio_LAE, plgd, pogd
#
#def source_prob(config, ra, dec, zs, fluxes, flux_errs, ews_obs, ew_err, c_obs, which_color, addl_fluxes,
# addl_fluxes_error, addl_line_names, flim_file, extended_output=False):
# """
# Return P(LAE) = P(LAE|DATA)/P(DATA) and evidence ratio P(DATA|LAE)P(LAE)/(P(DATA|OII)P(OII))
# given input information about the sources
#
# Parameters
# ----------
# config : ConfigParser
# configuration object
# ra, dec, zs, fluxes, flux_errs, ews_obs, ew_err, c_obs : array
# positions, redshifts (assuming LAE), line fluxes, errors on
# line fluxes, equivalent widths, errors on EW and
# colours of the sources (latter two not used!)
# which_color : str
# which colour is given
# addl_fluxes, addl_fluxes_error : 2D array
# each i of addl_fluxes[i, :] should correspond,
# in order, to the fluxes measured for each source
# for the emission line named at position i in
# addl_line_names. To not use set to None (not an
# array of None!)
# addl_line_names : array
# names of emission lines stored in addl_fluxes, in the
# correct order (see above). To not use set to None (not an array of None!)
# flim_file : str
# file containing the flux limits
# h : float
# Hubbles constant/100
# extended_output : bool
# Return extended output
#
# Returns
# -------
# posterior_odds, prob_lae_given_data : float arrays
# posterior_odds = P(DATA|LAE)P(LAE)/(P(DATA|OII)P(OII))
# P(LAE|DATA) = P(DATA|LAE)*P(LAE)/(P(DATA|LAE)*P(LAE) + P(DATA|OII)*P(OII))
#
# """
#
# def xxx_bootstrap_prob_LAE(wl_obs,lineFlux,lineFlux_err=None, continuum=None, continuum_err=None, c_obs=None, which_color=None,
# addl_wavelengths=None, addl_fluxes=None,addl_errors=None, sky_area=None, cosmo=None, lae_priors=None, ew_case=None, W_0=None,
# z_OII=None, sigma=None, num_bootstraps=10000, confidence=0.68):
# """
#
# :param wl_obs:
# :param lineFlux:
# :param lineFlux_err:
# :param continuum:
# :param continuum_err:
# :param c_obs:
# :param which_color:
# :param addl_wavelengths:
# :param addl_fluxes:
# :param addl_errors:
# :param sky_area:
# :param cosmo:
# :param lae_priors:
# :param ew_case:
# :param W_0:
# :param z_OII:
# :param sigma:
# :param num_bootstraps:
# :param confidence: confidence interval ... commonly 0.68 or 0.95 or 0.99, etc
# :return:
# """
# #sanity check
# if confidence < 0 or confidence > 1.0:
# log.debug("Nonsense confidence (%f) in bootstrap_prob_LAE" %(confidence))
# return None, None, None, None
#
# lineflux_array = np.random.normal(lineFlux,lineFlux_err,num_bootstraps)
# continuum_array = np.random.normal(continuum,continuum_err,num_bootstraps)
# ew_obs_array = lineflux_array / continuum_array
#
# lae_oii_ratio_list = []
# p_lae_list = []
# p_oii_list = []
#
# for lf,ew in zip(lineflux_array,ew_obs_array):
# try:
# lae_oii_ratio, p_lae, p_oii = prob_LAE(wl_obs=wl_obs,
# lineFlux=lf,
# ew_obs=ew,
# lineFlux_err=0,
# ew_obs_err=0,
# c_obs=None, which_color=None, addl_wavelengths=addl_wavelengths,
# addl_fluxes=addl_fluxes, addl_errors=addl_errors, sky_area=None,
# cosmo=None, lae_priors=None,
# ew_case=None, W_0=None,
# z_OII=None, sigma=None, estimate_error=False)
#
# lae_oii_ratio_list.append(lae_oii_ratio)
# p_lae_list.append(p_lae)
# p_oii_list.append(p_oii)
#
# except:
# log.debug("Exception calling prob_LAE in bootstrap_prob_LAE",exc_info=True)
#
# #now the "original" single call at the "exact" values for the flux and ew
# try:
# lae_oii_ratio, p_lae, p_oii = prob_LAE(wl_obs=wl_obs,
# lineFlux=lineFlux,
# ew_obs=lineFlux/continuum,
# lineFlux_err=0,
# ew_obs_err=0,
# c_obs=None, which_color=None, addl_wavelengths=addl_wavelengths,
# addl_fluxes=addl_fluxes, addl_errors=addl_errors, sky_area=None,
# cosmo=None, lae_priors=None,
# ew_case=None, W_0=None,
# z_OII=None, sigma=None, estimate_error=False)
# except:
# log.debug("Exception calling standard prob_LAE in bootstrap_prob_LAE", exc_info=True)
# return None, None, None, None
#
# try:
# #using biweight
# loc = biweight.biweight_location(lae_oii_ratio_list) #the "average"
# scale = biweight.biweight_scale(lae_oii_ratio_list)
# ci = conf_interval(len(lae_oii_ratio_list),scale,conf=0.68)
# ratio_LAE_list = [loc,loc-ci,loc+ci]
# # temp:
# #import matplotlib.pyplot as plt
# plt.close('all')
# plt.figure()
# vals, bins, _ = plt.hist(lae_oii_ratio_list, bins="auto")
# plt.title("%0.3g (%0.3g, %0.3g) bins (%d)\n min (%0.3g) max (%0.3g)"
# % (ratio_LAE_list[0], ratio_LAE_list[1], ratio_LAE_list[2], len(bins) - 1, min(vals), max(vals)))
# plt.savefig("plae_bw_hist_%d.png" %num_bootstraps)
# except:
# log.debug("Exception calling biweight or conf_interval in bootstap_prob_LAE", exc_info=True)
# return lae_oii_ratio, p_lae, p_oii, None
#
#
#
# try:
# if True:
# mean_cntr, var_cntr, std_cntr = bayes_mvs(lae_oii_ratio_list, alpha=0.68)
# ratio_LAE_list = [lae_oii_ratio, None, None]
# if not np.isnan(mean_cntr[0]):
# ratio_LAE_list[0] = mean_cntr[0]
# ratio_LAE_list[1] = mean_cntr[1][0]
# ratio_LAE_list[2] = mean_cntr[1][1]
# #temp:
# #import matplotlib.pyplot as plt
# plt.close('all')
# plt.figure()
# vals, bins, _ = plt.hist(lae_oii_ratio_list,bins="auto")
# plt.title("%0.3g (%0.3g, %0.3g) bins (%d)\n min (%0.3g) max (%0.3g)"
# % (ratio_LAE_list[0], ratio_LAE_list[1], ratio_LAE_list[2],len(bins)-1,min(vals),max(vals)))
# plt.savefig("plae_mean_hist_%d.png" %num_bootstraps)
# else:
# ratio_LAE_list[1] = lae_oii_ratio
# ratio_LAE_list[2] = lae_oii_ratio
#
# mean_cntr, var_cntr, std_cntr = bayes_mvs(p_lae_list, alpha=0.68)
# plgd_list = [p_lae,None,None]
# if not np.isnan(mean_cntr[0]):
# plgd_list[0] = mean_cntr[0]
# plgd_list[1] = mean_cntr[1][0]
# plgd_list[2] = mean_cntr[1][1]
# else:
# plgd_list[1] = p_lae
# plgd_list[2] = p_lae
#
# mean_cntr, var_cntr, std_cntr = bayes_mvs(p_oii_list, alpha=0.68)
# pogd_list = [p_oii,None,None]
# if not np.isnan(mean_cntr[0]):
# pogd_list[0] = mean_cntr[0]
# pogd_list[1] = mean_cntr[1][0]
# pogd_list[2] = mean_cntr[1][1]
# else:
# pogd_list[1] = p_oii
# pogd_list[2] = p_oii
# except:
# log.debug("Exception calling bayes_mvs in bootstrap_prob_LAE", exc_info=True)
# return lae_oii_ratio, p_lae, p_oii, None
#
# log.info("Bootstrap PLAE: %0.4g (%0.4g, %0.4g) " %(ratio_LAE_list[0],ratio_LAE_list[1],ratio_LAE_list[2]))
#
# return lae_oii_ratio, p_lae, p_oii, {'ratio':ratio_LAE_list,'plgd':plgd_list,'pogd':pogd_list}
#
#
def prob_LAE(wl_obs,lineFlux,lineFlux_err=None, ew_obs=None, ew_obs_err=None, c_obs=None, which_color=None, addl_wavelengths=None,
addl_fluxes=None,addl_errors=None, sky_area=None, cosmo=None, lae_priors=None, ew_case=None, W_0=None,
z_OII=None, sigma=None,estimate_error=False):
#temporarary ... call both and compare
# old_ratio_LAE, old_plgd, old_pogd = prob_LAE_old(wl_obs,lineFlux,ew_obs,c_obs, which_color=which_color,
# addl_wavelengths=addl_wavelengths, addl_fluxes=addl_fluxes,addl_errors=addl_errors,
# sky_area=sky_area, cosmo=cosmo, lae_priors=lae_priors, ew_case=ew_case, W_0=W_0, z_OII=z_OII, sigma=sigma)
#
global UNIVERSE_CONFIG, FLUX_LIMIT_FN
if UNIVERSE_CONFIG is None:
try:
config_fn = os.path.join(os.path.dirname(os.path.realpath(__file__)), G.RELATIVE_PATH_UNIVERSE_CONFIG)
UNIVERSE_CONFIG = RawConfigParser()
UNIVERSE_CONFIG.read(config_fn)
log.debug("Load universe config for LAE/OII discriminator: %s" %config_fn)
FLUX_LIMIT_FN = os.path.join(os.path.dirname(os.path.realpath(__file__)),G.RELATIVE_PATH_FLUX_LIM_FN)
log.debug("Load flux limit filename for LAE/OII discriminator: %s" % FLUX_LIMIT_FN)
# don't need to do this ... is performed in source_prob call
#COSMOLOGY = generate_cosmology_from_config(UNIVERSE_CONFIG)
except:
log.warning("Exception loading LAE/OII discriminator config",exc_info=True)
print("Exception loading LAE/OII discriminator config")
# posterior_odds = 0.0
# prob_lae_given_data = 0.0
#build up parameters (need to be numpy arrays for the call)
ra = None #have no meaning in this case? could set to [100.0] and [0.0] per example?
dec = None
z_LyA = wl_obs / G.LyA_rest - 1.0
z_OII = wl_obs / G.OII_rest - 1.0
#suppress sign of EW (always wants positive)
# (and, note this is the OBSERVERED EqW, not EW/(1+z_LAE) ... that calc occurs inside the calls)
ew_obs = abs(ew_obs)
if lineFlux_err is None:
lineFlux_err = 0.0
if ew_obs_err is None:
ew_obs_err = 0.0
#convert additional wavelengths into names for the call
#from the UNIVERSE_CONFIG file
known_lines = UNIVERSE_CONFIG.items("wavelengths") #array of tuples (name,wavelength)
extra_fluxes = []
extra_fluxes_err = []
extra_fluxes_name = []
#all the extra lines used by the Bayes code are visible in our range only if OII is the primary
#so assume OII and shift to rest frame
# LAE = 1215.668
# OII = 3727.45
# NeIII = 3869.00
# H_beta = 4861.32
# OIII4959 = 4958.91
# OIII5007 = 5006.84
#iterate over all in addl_wavelengths, if +/- (1? 2? AA ... what are we using elsewhere?) assign name
#if no match, toss the additional flux, etc
wl_unc = 2.0 #AA
if (addl_wavelengths is None) or (addl_fluxes is None) or (addl_errors is None):
addl_wavelengths = []
addl_fluxes = []
addl_errors = []
try:
for n, w in known_lines:
w_oii = float(w) * (z_OII + 1.)
for i in range(len(addl_fluxes)):
if abs(w_oii-addl_wavelengths[i]) < wl_unc:
extra_fluxes.append(addl_fluxes[i])
extra_fluxes_name.append(n)
try:
extra_fluxes_err.append(addl_errors[i])
except:
extra_fluxes_err.append(0.0)
log.warning("Exception (non-fatal) building extra line fluxes in line_prob.py. " + \
"Unable to set flux uncertainty.", exc_info=True)
break
except:
log.error("Exception building extra line fluxes in line_prob.py.", exc_info=True)
if estimate_error:
return 0,0,0,{}
else:
return 0,0,0
plae_errors = {} #call classifier multiple times and get an error estimate on the PLAE/POII ratio
#at least for now, just call for EqW ... that is the biggest error source
#flux_array_range = [lineFlux]
if estimate_error and ew_obs_err:
ew_list = [ew_obs,ew_obs-ew_obs_err,ew_obs+ew_obs_err] # so: value, -error, +error
else:
ew_list = [ew_obs]
posterior_odds_list = []
prob_lae_given_data_list = []
for e in ew_list:
try:
posterior_odds, prob_lae_given_data = LineClassifierPro.source_prob(UNIVERSE_CONFIG,
np.array([ra]), np.array([dec]), np.array([z_LyA]),
np.array([lineFlux]), np.array([lineFlux_err]),
np.array([e]), np.array([ew_obs_err]),
c_obs=None, which_color=None,
addl_fluxes=np.array(extra_fluxes),
addl_fluxes_error=np.array(extra_fluxes_err),
addl_line_names=np.array(extra_fluxes_name),
flim_file=FLUX_LIMIT_FN,extended_output=False)
if isinstance(posterior_odds,list) or isinstance(posterior_odds,np.ndarray):
if len(posterior_odds) == 1:
posterior_odds = posterior_odds[0]
else:
log.info("Weird. posterior_odds %s" %(posterior_odds))
if isinstance(prob_lae_given_data,list) or isinstance(prob_lae_given_data,np.ndarray):
if len(prob_lae_given_data) == 1:
prob_lae_given_data = prob_lae_given_data[0]
else:
log.info("Weird. prob_lae_given_data %s" %(prob_lae_given_data))
posterior_odds_list.append(posterior_odds)
prob_lae_given_data_list.append(prob_lae_given_data)
except:
log.error("Exception calling LineClassifierPro::source_prob()", exc_info=True)
###############################
#just testing ....
###############################
if False:
# @pytest.mark.parametrize("z, fluxes, ew_obs, addl_fluxes, addl_names, e_ratio, e_prob_lae",
# [
# (1.9, 9e-17, 40, None, None, 1e+32, 1.0),
# (2.48, 9e-17, 40, None, None, 9.0769011810393501, 0.90076314314944406),
# (3.18, 9e-17, 40, None, None, 0.17790889751426178, 0.151037909544365),
# (2.08, 9e-17, 40, [[5e-17]], ["NeIII"], 10.917948575339162, 0.91609294219734949),
# (2.12, 9e-17, 40, [[6e-17]], ["H_beta"], 2.2721726484396545e-09,
# 2.2721726536024229e-09),
# (2.08, 9e-17, 40, [[7e-17], [9e-17 * 4.752 / 1.791]], ["OIII4959", "OIII5007"], 0.0,
# 0.0)
# ])
#flim_file = '/home/dustin/code/python/elixer/line_classifier_install/tests/data/Line_flux_limit_5_sigma_baseline.dat'
#posterior_odds, prob_lae_given_data = LineClassifierPro.source_prob(UNIVERSE_CONFIG, [100.0], [0.0],
# [2.08], array([9e-17]),
# array([0.0]), [40.], [0.0], None, None, None, None, None,
# FLUX_LIMIT_FN)
try:
from elixer.line_classifier.misc.tools import read_flim_file
except:
from line_classifier.misc.tools import read_flim_file
flims = read_flim_file(FLUX_LIMIT_FN)
errors = []
for x in ["NeIII"]:
zoii = (1.0 + 2.08) * UNIVERSE_CONFIG.getfloat("wavelengths", "LAE") / UNIVERSE_CONFIG.getfloat("wavelengths", "OII") - 1.0
lambda_ = UNIVERSE_CONFIG.getfloat("wavelengths", x) * (1.0 + zoii)
errors.append(0.2 * flims(lambda_ / UNIVERSE_CONFIG.getfloat("wavelengths", "LAE") - 1.0))
errors = np.array(errors)
posterior_odds, prob_lae_given_data = LineClassifierPro.source_prob(UNIVERSE_CONFIG, ra,dec,
np.array([2.08]), np.array([9e-17]), np.array([0.0]),
[40.0], [0.0], None, None,
np.array([5e-17]), errors, np.array(["NeIII"]), FLUX_LIMIT_FN)
#LineClassifierPro.source_prob(extended_output=False)
################################
# end testing
###############################
pogd_list = []
plgd_list = []
ratio_LAE_list = []
for posterior_odds in posterior_odds_list:
if (posterior_odds is not None) and (posterior_odds != 0):
pogd = float(prob_lae_given_data) / posterior_odds
else:
pogd = 0.
plgd = float(prob_lae_given_data)
ratio_LAE = float(min(MAX_PLAE_POII, posterior_odds))
ratio_LAE = float(max(ratio_LAE,MIN_PLAE_POII))
ratio_LAE_list.append(ratio_LAE)
plgd_list.append(plgd)
if type(pogd) != float:
pogd_list.append(pogd.value)
else:
pogd_list.append(pogd)
#temporary -- compare results and note if the new method disagrees with the old
# if old_ratio_LAE + ratio_LAE > 0.2: #if they are both small, don't bother
# if abs((old_ratio_LAE - ratio_LAE)/(0.5*(old_ratio_LAE+ratio_LAE))) > 0.01:
# msg = "Warning! Difference in P(LAE)/P(OII).\n Original: P(LAE|data) = %f, P(OII|data) = %f, ratio = %f" \
# "\n New: P(LAE|data) = %f, P(OII|data) = %f, ratio = %f" \
# %(old_plgd,old_pogd,old_ratio_LAE,plgd,pogd,ratio_LAE)
#
# log.warning("***" + msg)
# #print(msg)
if estimate_error:
return ratio_LAE_list[0], plgd_list[0], pogd_list[0], {'ratio':ratio_LAE_list,'plgd':plgd_list,'pogd':pogd_list}
else:
return ratio_LAE_list[0], plgd_list[0], pogd_list[0]
def mc_prob_LAE(wl_obs,lineFlux,lineFlux_err=None, continuum=None, continuum_err=None, ew_obs=None, ew_obs_err=None,
c_obs=None, which_color=None, addl_wavelengths=None, addl_fluxes=None,addl_errors=None, sky_area=None,
cosmo=None, lae_priors=None, ew_case=None, W_0=None, z_OII=None, sigma=None,
num_mc=G.MC_PLAE_SAMPLE_SIZE, confidence=G.MC_PLAE_CONF_INTVL, continuum_err_statistical=None):
"""
:param wl_obs:
:param lineFlux:
:param lineFlux_err:
:param continuum:
:param continuum_err:
:param ew_obs: reconstruct continuum if not provided
:param ew_obs_err: reconstruct continuum_err if not provided
:param c_obs:
:param which_color:
:param addl_wavelengths:
:param addl_fluxes:
:param addl_errors:
:param sky_area:
:param cosmo:
:param lae_priors:
:param ew_case:
:param W_0:
:param z_OII:
:param sigma:
:param num_mc:
:param confidence: confidence interval ... commonly 0.68 or 0.95 or 0.99, etc
:return:
"""
try:
#sanity check
if confidence < 0 or confidence > 1.0:
log.debug("Nonsense confidence (%f) in mc_prob_LAE" %(confidence))
return None, None, None, None
if (continuum is None):
if (ew_obs is None):
log.debug("Insufficient info for mc_prob_LAE, continuum and ew_obs not provided")
return 0, 0, 0, {'ratio':[0,0,0],'plgd':[0],'pogd':[0]}
else: #build continuum and continuum error from ew
if ew_obs > 0:
continuum = lineFlux / ew_obs
if (ew_obs_err is not None) and (ew_obs_err != 0):
#this can be negative if the ew error is unreasonably small compared to the lineflux error
#in which case the sqrt fails, but set it to 0.0 and the error will be dominated by the lineflux anyway
continuum_err = continuum * np.sqrt(max(0.0,(ew_obs_err/ew_obs)**2 - (lineFlux_err/lineFlux)**2))
else:
continuum_err = 0.0
else:
log.debug("Invalid lineflux or continuum or ew_obs")
return 0, 0, 0, {'ratio':[0,0,0],'plgd':[0],'pogd':[0]}
if (lineFlux <= 0) or (continuum <= 0) or (np.isnan(lineFlux)) or (np.isnan(continuum)):
log.debug("Invalid lineflux or continuum")
return 0, 0, 0, {'ratio':[0,0,0],'plgd':[0],'pogd':[0]}
if (lineFlux_err is None):
log.debug("LineFlux error is None")
lineFlux_err = 0
if (lineFlux_err < 0):
log.debug("LineFlux error < 0")
lineFlux_err = 0
#if lineFlux_err > lineFlux:
# log.debug(f"LineFlux error large: {lineFlux_err:0.4g}. Set to lineflux: {lineFlux:0.4g}")
# lineFlux_err = lineFlux
if (continuum_err is None):
log.debug("Continuum error is None")
continuum_err = 0
if (continuum_err < 0):
log.debug("Continuum error < 0")
continuum_err = 0
#if continuum_err > continuum:
# log.debug(f"continuum error large: {continuum_err:0.4g}. Set to continuum: {continuum:0.4g}")
# continuum_err = continuum
if continuum_err == lineFlux_err == 0:
log.debug("Continuum error and Lineflux error set to zero. Single run only (no mc).")
num_mc = 1
_max_sample_retry = 10 #number of attemps to get a valid lineflux and continuum (both must be positive)
log.debug(f"Sampling {num_mc} PLAE/POII. Lf {lineFlux} +/- {lineFlux_err}, Cont {continuum} +/- {continuum_err}")
# lineflux_array = np.random.normal(lineFlux,lineFlux_err,num_mc)
# continuum_array = np.random.normal(continuum,continuum_err,num_mc)
# ew_obs_array = lineflux_array / continuum_array
lae_oii_ratio_list = []
p_lae_list = []
p_oii_list = []
continuum_bright_limit = min(continuum+continuum_err, continuum * (1. + G.CONTINUUM_BRIGHT_REL_ERR_LIMIT))
global UNIVERSE_CONFIG, FLUX_LIMIT_FN
if UNIVERSE_CONFIG is None:
try:
config_fn = os.path.join(os.path.dirname(os.path.realpath(__file__)), G.RELATIVE_PATH_UNIVERSE_CONFIG)
UNIVERSE_CONFIG = RawConfigParser()
UNIVERSE_CONFIG.read(config_fn)
log.debug("Load universe config for LAE/OII discriminator: %s" %config_fn)
FLUX_LIMIT_FN = os.path.join(os.path.dirname(os.path.realpath(__file__)),G.RELATIVE_PATH_FLUX_LIM_FN)
log.debug("Load flux limit filename for LAE/OII discriminator: %s" % FLUX_LIMIT_FN)
# don't need to do this ... is performed in source_prob call
#COSMOLOGY = generate_cosmology_from_config(UNIVERSE_CONFIG)
except:
log.warning("Exception loading LAE/OII discriminator config",exc_info=True)
print("Exception loading LAE/OII discriminator config")
# posterior_odds = 0.0
# prob_lae_given_data = 0.0
#build up parameters (need to be numpy arrays for the call)
ra = None #have no meaning in this case? could set to [100.0] and [0.0] per example?
dec = None
z_LyA = wl_obs / G.LyA_rest - 1.0
z_OII = wl_obs / G.OII_rest - 1.0
#convert additional wavelengths into names for the call
#from the UNIVERSE_CONFIG file
known_lines = UNIVERSE_CONFIG.items("wavelengths") #array of tuples (name,wavelength)
extra_fluxes = []
extra_fluxes_err = []
extra_fluxes_name = []
#all the extra lines used by the Bayes code are visible in our range only if OII is the primary
#so assume OII and shift to rest frame
# LAE = 1215.668
# OII = 3727.45
# NeIII = 3869.00
# H_beta = 4861.32
# OIII4959 = 4958.91
# OIII5007 = 5006.84
#iterate over all in addl_wavelengths, if +/- (1? 2? AA ... what are we using elsewhere?) assign name
#if no match, toss the additional flux, etc
wl_unc = 2.0 #AA
if (addl_wavelengths is None) or (addl_fluxes is None) or (addl_errors is None):
addl_wavelengths = []
addl_fluxes = []
addl_errors = []
try:
for n, w in known_lines:
w_oii = float(w) * (z_OII + 1.)
for i in range(len(addl_fluxes)):
if abs(w_oii-addl_wavelengths[i]) < wl_unc:
extra_fluxes.append(addl_fluxes[i])
extra_fluxes_name.append(n)
try:
extra_fluxes_err.append(addl_errors[i])
except:
extra_fluxes_err.append(0.0)
log.warning("Exception (non-fatal) building extra line fluxes in line_prob.py. " + \
"Unable to set flux uncertainty.", exc_info=True)
break
except:
log.error("Exception building extra line fluxes in line_prob.py.", exc_info=True)
if estimate_error:
return 0, 0, 0, {'ratio':[0,0,0],'plgd':[0],'pogd':[0]}
else:
return 0,0,0
plae_errors = {} #call classifier multiple times and get an error estimate on the PLAE/POII ratio
#at least for now, just call for EqW ... that is the biggest error source
#flux_array_range = [lineFlux]
posterior_odds_list = []
prob_lae_given_data_list = []
setup = {} #first run setup date for the LineClassifierPro ... will be populated by source_prob on first call
#then passed in on subsequent calls to speed up processing
#ct = 0
#for lf,ew in zip(lineflux_array,ew_obs_array):
# if continuum_err_statistical is not None:
# continuum_err_use = continuum_err_statistical
# else:
# continuum_err_use = continuum_err
for i in range(num_mc):
tryagain = 0
lf = 0
cn = 0
ew = 0
while tryagain < _max_sample_retry:
lf = np.random.normal(lineFlux, lineFlux_err)
cn = np.random.normal(continuum, continuum_err)
#if cn > continuum_bright_limit:
# cn = np.random.normal(continuum, continuum * G.CONTINUUM_BRIGHT_REL_ERR_LIMIT) #ontinuum_bright_limit
if lf > 0 and cn > 0:
ew = lf / cn
break
else:
tryagain += 1
if not (tryagain < _max_sample_retry):
log.info("Failed to properly sample lineflux and/or continuum. Cannot continue.")
break
try:
#z_LyA = 2.2
#ct += 1
#log.debug("%d"%ct)
posterior_odds, prob_lae_given_data,setup = LineClassifierPro.source_prob(UNIVERSE_CONFIG,
np.array([ra]), np.array([dec]),
np.array([z_LyA]),
np.array([lf]),
np.array([0.0]),
np.array([ew]), np.array([0.0]),
c_obs=None, which_color=None,
addl_fluxes=np.array(extra_fluxes),
addl_fluxes_error=np.array(
extra_fluxes_err),
addl_line_names=np.array(
extra_fluxes_name),
flim_file=FLUX_LIMIT_FN,
extended_output=False,
setup=setup)
#log.debug(f"LF {lf}, Cont {cn}, z {z_LyA:0.4f}, EW {ew:0.2f}, EWr {ew/(z_LyA+1):0.2f} PLAE/POII {float(posterior_odds):0.2f}")
if isinstance(posterior_odds,list) or isinstance(posterior_odds,np.ndarray):
if len(posterior_odds) == 1:
posterior_odds = posterior_odds[0]
else:
log.info("Weird. posterior_odds %s" %(posterior_odds))
if isinstance(prob_lae_given_data,list) or isinstance(prob_lae_given_data,np.ndarray):
if len(prob_lae_given_data) == 1:
prob_lae_given_data = prob_lae_given_data[0]
else:
log.info("Weird. prob_lae_given_data %s" %(prob_lae_given_data))
if (posterior_odds is not None) and (posterior_odds != 0):
pogd = float(prob_lae_given_data) / posterior_odds
else:
pogd = 0.
plgd = float(prob_lae_given_data)
pogd = float(pogd)
#the base code can limit this to 1000.0 (explicitly) if P(OII|Data) == 0,
#so we DO need to force these to the max of 1000.0 (which could otherwise be exceeded
#if P(OII|data) > 0 but very small)
posterior_odds = float(posterior_odds)
posterior_odds = max(MIN_PLAE_POII,min(MAX_PLAE_POII,posterior_odds))
lae_oii_ratio_list.append(float(posterior_odds))
p_lae_list.append(plgd)
p_oii_list.append(pogd)
except:
log.debug("Exception calling prob_LAE in mc_prob_LAE",exc_info=True)
#we were unable to get a sampling, so just call once with the exact values
if len(lae_oii_ratio_list) == 0:
try:
lf = lineFlux
ew = lineFlux/continuum
posterior_odds, prob_lae_given_data,setup = LineClassifierPro.source_prob(UNIVERSE_CONFIG,
np.array([ra]), np.array([dec]),
np.array([z_LyA]),
np.array([lf]),
np.array([0.0]),
np.array([ew]), np.array([0.0]),
c_obs=None, which_color=None,
addl_fluxes=np.array(extra_fluxes),
addl_fluxes_error=np.array(
extra_fluxes_err),
addl_line_names=np.array(
extra_fluxes_name),
flim_file=FLUX_LIMIT_FN,
extended_output=False,
setup=setup)
if isinstance(posterior_odds,list) or isinstance(posterior_odds,np.ndarray):
if len(posterior_odds) == 1:
posterior_odds = posterior_odds[0]
else:
log.info("Weird. posterior_odds %s" %(posterior_odds))
if isinstance(prob_lae_given_data,list) or isinstance(prob_lae_given_data,np.ndarray):
if len(prob_lae_given_data) == 1:
prob_lae_given_data = prob_lae_given_data[0]
else:
log.info("Weird. prob_lae_given_data %s" %(prob_lae_given_data))
if (posterior_odds is not None) and (posterior_odds != 0):
pogd = float(prob_lae_given_data) / posterior_odds
else:
pogd = 0.
plgd = float(prob_lae_given_data)
pogd = float(pogd)
log.debug("Sampling (%d) PLAE/POII ... done. Unable to sample. No details returned." % (num_mc))
return float(posterior_odds), plgd, pogd, None
except:
log.debug("Exception calling prob_LAE in mc_prob_LAE",exc_info=True)
try:
#lae_oii_ratio_list = np.array(lae_oii_ratio_list)
#using biweight
log.debug("Biweight ...")
try:
loc = biweight.biweight_location(lae_oii_ratio_list)
hi,lo = conf_interval_asym(lae_oii_ratio_list,loc)
#the actual std can be huge and is dodgy to compute since we are capped 1000 - 0.001
#so, use the quantiles in the middle 0.68 (rouhgly +/- 1sd IF this were normal distro
adj_std = 0.5 * (np.quantile(lae_oii_ratio_list,0.16) + np.quantile(lae_oii_ratio_list,0.84))
if (hi is None) or (lo is None):
log.debug("Unable to perform direct asym confidence interval. Reverting to old method.")
loc = biweight.biweight_location(lae_oii_ratio_list) # the "average"
scale = biweight.biweight_scale(lae_oii_ratio_list)
ci = conf_interval(len(lae_oii_ratio_list), scale * np.sqrt(num_mc), conf=confidence)
if ci is not None:
ratio_LAE_list = [loc, loc - ci, loc + ci, adj_std] # np.nanstd(lae_oii_ratio_list)]
else:
log.warning("Confidence Interval is None in line_prob::mc_prob_LAE (p1)")
ratio_LAE_list = [loc, 0.001, 1000.0, adj_std]
else:
ratio_LAE_list = [loc, lo, hi,adj_std] #np.nanstd(lae_oii_ratio_list)]
except:
log.debug("Unable to perform direct asym confidence interval. Reverting to old method.")
loc = biweight.biweight_location(lae_oii_ratio_list) # the "average"
scale = biweight.biweight_scale(lae_oii_ratio_list)
ci = conf_interval(len(lae_oii_ratio_list), scale * np.sqrt(num_mc), conf=confidence)
adj_std = 0.5 * (np.quantile(lae_oii_ratio_list, 0.16) + np.quantile(lae_oii_ratio_list, 0.84))
if ci is not None:
ratio_LAE_list = [loc, loc - ci, loc + ci,adj_std]#np.nanstd(lae_oii_ratio_list)]
else:
log.warning("Confidence Interval is None in line_prob::mc_prob_LAE (p2)")
ratio_LAE_list = [loc,0.001,1000.0,adj_std]
if False:
try: #this data is often skewed, so run bootstraps to normalize and take the confidence interval there
loc,ci = elixer_biweight.bootstrap_confidence_interval(lae_oii_ratio_list,confidence=confidence)
if (loc is None) or (ci is None):
log.debug("Unable to perform confidence interval via bootstrap. Reverting to old method.")
loc = biweight.biweight_location(lae_oii_ratio_list) # the "average"
scale = biweight.biweight_scale(lae_oii_ratio_list)
ci = conf_interval(len(lae_oii_ratio_list), scale * np.sqrt(num_mc), conf=confidence)
ratio_LAE_list = [loc, loc - ci, loc + ci,np.nanstd(lae_oii_ratio_list)]
except: #if it fails, fall back to the old way (and assume a normal distribution)
loc = biweight.biweight_location(lae_oii_ratio_list) # the "average"
scale = biweight.biweight_scale(lae_oii_ratio_list)
ci = conf_interval(len(lae_oii_ratio_list), scale * np.sqrt(num_mc), conf=confidence)
ratio_LAE_list = [loc, loc - ci, loc + ci,np.nanstd(lae_oii_ratio_list)]
#??? should the 'scale' by multiplied by sqrt(# samples) to be consistent?
#??? I think the sigma_mc == true sigma / sqrt(# samples) (kind of backward from sample vs population)
#ci = conf_interval(len(lae_oii_ratio_list), scale, conf=confidence)
#ci = conf_interval(len(lae_oii_ratio_list),scale*np.sqrt(num_mc),conf=confidence)
log.debug("Raw Biweight: %0.4g (%0.4g, %0.4g), min (%0.4g) max (%0.4g) std (%0.4g), Q1 (%0.4g) Q2 (%0.4g) Q3 (%0.4g)"
% (ratio_LAE_list[0], ratio_LAE_list[1], ratio_LAE_list[2], min(lae_oii_ratio_list), max(lae_oii_ratio_list),ratio_LAE_list[3],
np.quantile(lae_oii_ratio_list,0.25),np.quantile(lae_oii_ratio_list,0.50),np.quantile(lae_oii_ratio_list,0.75))
)
try:
mean_cntr, var_cntr, std_cntr = bayes_mvs(lae_oii_ratio_list, alpha=confidence)
log.debug("Bayes MVS: %0.4g (%0.4g, %0.4g), min (%0.4g) max (%0.4g) std(%0.4g), Q1 (%0.4g) Q2 (%0.4g) Q3 (%0.4g)"
% (mean_cntr[0], mean_cntr[1][0], mean_cntr[1][1], min(lae_oii_ratio_list), max(lae_oii_ratio_list), std_cntr[0],
np.quantile(lae_oii_ratio_list,0.25),np.quantile(lae_oii_ratio_list,0.50),np.quantile(lae_oii_ratio_list,0.75))
)
except:
pass
# for i in range(len(ratio_LAE_list)): #force the range to be between MIN_PLAE_POII and MAX_PLAE_POII
# ratio_LAE_list[i] = max(min(MAX_PLAE_POII,ratio_LAE_list[i]),MIN_PLAE_POII)
# log.debug("Limited Biweight: %0.3g (%0.3g, %0.3g) min (%0.3g) max (%0.3g)"
# % (ratio_LAE_list[0], ratio_LAE_list[1], ratio_LAE_list[2], min(lae_oii_ratio_list),
# max(lae_oii_ratio_list)))
# temp:
if False:
log.debug("plotting ..." )
plt.close('all')
#plt.figure()
vals, bins, _ = plt.hist(lae_oii_ratio_list, bins="auto")
plt.title("%0.3g (%0.3g, %0.3g) bins (%d)\n min (%0.3g) max (%0.3g) "
% (ratio_LAE_list[0], ratio_LAE_list[1], ratio_LAE_list[2], len(bins) - 1, min(vals), max(vals)))
plt.savefig("plae_bw_hist_%d.png" %num_mc)
except:
log.debug("Exception calling biweight or conf_interval in mc_prob_LAE", exc_info=True)
return None, None, None, None
log.debug("Sampling (%d) PLAE/POII ... done" % (num_mc))
return ratio_LAE_list[0], p_lae_list[0], p_oii_list[0], {'ratio':ratio_LAE_list,'plgd':p_lae_list,'pogd':p_oii_list}
except:
log.debug("Exception calling mc_prob_LAE", exc_info=True)
return None, None, None, None
|
HETDEXREPO_NAMEelixerPATH_START.@elixer_extracted@elixer-main@elixer@[email protected]_END.py
|
{
"filename": "test_from_texts.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/partners/qdrant/tests/integration_tests/test_from_texts.py",
"type": "Python"
}
|
import tempfile
import uuid
from typing import Optional
import pytest # type: ignore[import-not-found]
from langchain_core.documents import Document
from langchain_qdrant import Qdrant
from langchain_qdrant.vectorstores import QdrantException
from tests.integration_tests.common import (
ConsistentFakeEmbeddings,
assert_documents_equals,
)
from tests.integration_tests.fixtures import qdrant_locations
def test_qdrant_from_texts_stores_duplicated_texts() -> None:
"""Test end to end Qdrant.from_texts stores duplicated texts separately."""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(
["abc", "abc"],
ConsistentFakeEmbeddings(),
collection_name=collection_name,
path=str(tmpdir),
)
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 2 == client.count(collection_name).count
@pytest.mark.parametrize("batch_size", [1, 64])
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
def test_qdrant_from_texts_stores_ids(
batch_size: int, vector_name: Optional[str]
) -> None:
"""Test end to end Qdrant.from_texts stores provided ids."""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
ids = [
"fa38d572-4c31-4579-aedc-1960d79df6df",
"cdc1aa36-d6ab-4fb2-8a94-56674fd27484",
]
vec_store = Qdrant.from_texts(
["abc", "def"],
ConsistentFakeEmbeddings(),
ids=ids,
collection_name=collection_name,
path=str(tmpdir),
batch_size=batch_size,
vector_name=vector_name,
)
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 2 == client.count(collection_name).count
stored_ids = [point.id for point in client.scroll(collection_name)[0]]
assert set(ids) == set(stored_ids)
@pytest.mark.parametrize("vector_name", ["custom-vector"])
def test_qdrant_from_texts_stores_embeddings_as_named_vectors(vector_name: str) -> None:
"""Test end to end Qdrant.from_texts stores named vectors if name is provided."""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(),
collection_name=collection_name,
path=str(tmpdir),
vector_name=vector_name,
)
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 5 == client.count(collection_name).count
assert all(
vector_name in point.vector # type: ignore[operator]
for point in client.scroll(collection_name, with_vectors=True)[0]
)
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
def test_qdrant_from_texts_reuses_same_collection(vector_name: Optional[str]) -> None:
"""Test if Qdrant.from_texts reuses the same collection"""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
embeddings = ConsistentFakeEmbeddings()
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
embeddings,
collection_name=collection_name,
path=str(tmpdir),
vector_name=vector_name,
)
del vec_store
vec_store = Qdrant.from_texts(
["foo", "bar"],
embeddings,
collection_name=collection_name,
path=str(tmpdir),
vector_name=vector_name,
)
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 7 == client.count(collection_name).count
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
def test_qdrant_from_texts_raises_error_on_different_dimensionality(
vector_name: Optional[str],
) -> None:
"""Test if Qdrant.from_texts raises an exception if dimensionality does not match"""
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name,
path=str(tmpdir),
vector_name=vector_name,
)
del vec_store
with pytest.raises(QdrantException):
Qdrant.from_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(dimensionality=5),
collection_name=collection_name,
path=str(tmpdir),
vector_name=vector_name,
)
@pytest.mark.parametrize(
["first_vector_name", "second_vector_name"],
[
(None, "custom-vector"),
("custom-vector", None),
("my-first-vector", "my-second_vector"),
],
)
def test_qdrant_from_texts_raises_error_on_different_vector_name(
first_vector_name: Optional[str],
second_vector_name: Optional[str],
) -> None:
"""Test if Qdrant.from_texts raises an exception if vector name does not match"""
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name,
path=str(tmpdir),
vector_name=first_vector_name,
)
del vec_store
with pytest.raises(QdrantException):
Qdrant.from_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(dimensionality=5),
collection_name=collection_name,
path=str(tmpdir),
vector_name=second_vector_name,
)
def test_qdrant_from_texts_raises_error_on_different_distance() -> None:
"""Test if Qdrant.from_texts raises an exception if distance does not match"""
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(),
collection_name=collection_name,
path=str(tmpdir),
distance_func="Cosine",
)
del vec_store
with pytest.raises(QdrantException) as excinfo:
Qdrant.from_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(),
collection_name=collection_name,
path=str(tmpdir),
distance_func="Euclid",
)
expected_message = (
"configured for COSINE similarity, but requested EUCLID. Please set "
"`distance_func` parameter to `COSINE`"
)
assert expected_message in str(excinfo.value)
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
def test_qdrant_from_texts_recreates_collection_on_force_recreate(
vector_name: Optional[str],
) -> None:
"""Test if Qdrant.from_texts recreates the collection even if config mismatches"""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name,
path=str(tmpdir),
vector_name=vector_name,
)
del vec_store
vec_store = Qdrant.from_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(dimensionality=5),
collection_name=collection_name,
path=str(tmpdir),
vector_name=vector_name,
force_recreate=True,
)
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 2 == client.count(collection_name).count
@pytest.mark.parametrize("batch_size", [1, 64])
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "foo"])
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "bar"])
def test_qdrant_from_texts_stores_metadatas(
batch_size: int, content_payload_key: str, metadata_payload_key: str
) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Qdrant.from_texts(
texts,
ConsistentFakeEmbeddings(),
metadatas=metadatas,
location=":memory:",
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
batch_size=batch_size,
)
output = docsearch.similarity_search("foo", k=1)
assert_documents_equals(
output, [Document(page_content="foo", metadata={"page": 0})]
)
@pytest.mark.parametrize("location", qdrant_locations(use_in_memory=False))
def test_from_texts_passed_optimizers_config_and_on_disk_payload(location: str) -> None:
from qdrant_client import models
collection_name = uuid.uuid4().hex
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
optimizers_config = models.OptimizersConfigDiff(memmap_threshold=1000)
vec_store = Qdrant.from_texts(
texts,
ConsistentFakeEmbeddings(),
metadatas=metadatas,
optimizers_config=optimizers_config,
on_disk_payload=True,
on_disk=True,
collection_name=collection_name,
location=location,
)
collection_info = vec_store.client.get_collection(collection_name)
assert collection_info.config.params.vectors.on_disk is True # type: ignore
assert collection_info.config.optimizer_config.memmap_threshold == 1000
assert collection_info.config.params.on_disk_payload is True
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@partners@qdrant@tests@integration_tests@[email protected]_END.py
|
{
"filename": "_marker.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/choropleth/unselected/_marker.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "choropleth.unselected"
_path_str = "choropleth.unselected.marker"
_valid_props = {"opacity"}
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity of unselected points, applied only when
a selection exists.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
"""
def __init__(self, arg=None, opacity=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choropleth.unselected.Marker`
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.choropleth.unselected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choropleth.unselected.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@choropleth@unselected@[email protected]_END.py
|
{
"filename": "test.py",
"repo_name": "wmpg/WesternMeteorPyLib",
"repo_path": "WesternMeteorPyLib_extracted/WesternMeteorPyLib-master/test.py",
"type": "Python"
}
|
""" Working script for testing chunks of code. Nothing really of value here. """
import numpy as np
from wmpl.Utils.Math import findClosestPoints, vectNorm, vectMag
from wmpl.Utils.TrajConversions import date2JD, ecef2ENU, enu2ECEF, cartesian2Geo, geo2Cartesian
def calcSpatialResidual(jd, state_vect, radiant_eci, stat, meas):
""" Calculate horizontal and vertical residuals from the radiant line, for the given observed point.
Arguments:
jd: [float] Julian date
state_vect: [3 element ndarray] ECI position of the state vector
radiant_eci: [3 element ndarray] radiant direction vector in ECI
stat: [3 element ndarray] position of the station in ECI
meas: [3 element ndarray] line of sight from the station, in ECI
Return:
(hres, vres): [tuple of floats] residuals in horitontal and vertical direction from the radiant line
"""
meas = vectNorm(meas)
# Calculate closest points of approach (observed line of sight to radiant line) from the state vector
obs_cpa, rad_cpa, d = findClosestPoints(stat, meas, state_vect, radiant_eci)
# Vector pointing from the point on the trajectory to the point on the line of sight
p = obs_cpa - rad_cpa
# Calculate geographical coordinates of the state vector
lat, lon, elev = cartesian2Geo(jd, *state_vect)
# Calculate ENU (East, North, Up) vector at the position of the state vector, and direction of the radiant
nn = np.array(ecef2ENU(lat, lon, *radiant_eci))
# Convert the vector to polar coordinates
theta = np.arctan2(nn[1], nn[0])
phi = np.arccos(nn[2]/vectMag(nn))
# Local reference frame unit vectors
hx = np.array([ -np.cos(theta), np.sin(theta), 0.0])
vz = np.array([-np.cos(phi)*np.sin(theta), -np.cos(phi)*np.cos(theta), np.sin(phi)])
hy = np.array([ np.sin(phi)*np.sin(theta), np.sin(phi)*np.cos(theta), np.cos(phi)])
# Calculate local reference frame unit vectors in ECEF coordinates
ehorzx = enu2ECEF(lat, lon, *hx)
ehorzy = enu2ECEF(lat, lon, *hy)
evert = enu2ECEF(lat, lon, *vz)
ehx = np.dot(p, ehorzx)
ehy = np.dot(p, ehorzy)
# Calculate vertical residuals
vres = np.sign(ehx)*np.hypot(ehx, ehy)
# Calculate horizontal residuals
hres = np.dot(p, evert)
return hres, vres
if __name__ == "__main__":
import sys
import matplotlib.pyplot as plt
import pyximport
pyximport.install(setup_args={'include_dirs':[np.get_include()]})
from wmpl.MetSim.MetSimErosionCyTools import luminousEfficiency, ionizationEfficiency
### Plot different lum effs ###
# Range of velocities
vel_range = np.linspace(2000, 72000, 100)
# Range of masses
masses = [1e-11, 1e-9, 1e-7, 1e-5, 0.001, 0.01, 0.1, 1, 10, 100, 1000]
lum_eff_types = [1, 2, 3, 4, 5, 6, 7]
lum_eff_labels = ["Revelle & Ceplecha (2001) - Type I",
"Revelle & Ceplecha (2001) - Type II",
"Revelle & Ceplecha (2001) - Type III",
"Borovicka et al. (2013) - Kosice",
"CAMO faint meteors",
"Ceplecha & McCrosky (1976)",
"Borovicka et al. (2020) - Two strengths"]
for i, lum_type in enumerate(lum_eff_types):
for mass in masses:
lum_list = []
for vel in vel_range:
lum = luminousEfficiency(lum_type, 0.0, vel, mass)
lum_list.append(lum)
plt.plot(vel_range/1000, 100*np.array(lum_list), label="{:s} kg".format(str(mass)), zorder=4)
plt.title(lum_eff_labels[i])
plt.xlabel("Velocity (km/s)")
plt.ylabel("Tau (%)")
plt.legend()
plt.grid(color='0.9')
plt.show()
# Plot the ionization efficiency
beta_arr = np.array([ionizationEfficiency(vel) for vel in vel_range])
plt.semilogy(vel_range/1000, 100*beta_arr, label="Jones (1997)")
plt.xlabel("Velocity (km/s)")
plt.ylabel("Beta (%)")
plt.show()
sys.exit()
### ###
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
jd = date2JD(2018, 2, 14, 10, 30, 0)
lat = np.radians(45.0)
lon = np.radians(13.0)
h = 100000
state_vect = np.array(geo2Cartesian(lat, lon, h, jd))
radiant_eci = np.array([0.0, 1.0, 0.0])
stat = np.array(geo2Cartesian(lat, lon, h + 10, jd))
meas = np.array([0.0, 0.0, 1.0])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
print(calcSpatialResidual(jd, state_vect, radiant_eci, stat, meas))
# Plot the origin
#ax.scatter(0, 0, 0)
# Plot the first point
ax.scatter(*state_vect)
# Plot the line from the origin
rad_x, rad_y, rad_z = -vectNorm(state_vect)
rst_x, rst_y, rst_z = state_vect
meteor_len = 1000000
ax.quiver(rst_x, rst_y, rst_z, rad_x, rad_y, rad_z, length=meteor_len, normalize=True, color='b',
arrow_length_ratio=0.1)
# Plot the radiant direction line
rad_x, rad_y, rad_z = -radiant_eci
rst_x, rst_y, rst_z = state_vect
meteor_len = 1000000
ax.quiver(rst_x, rst_y, rst_z, rad_x, rad_y, rad_z, length=meteor_len, normalize=True, color='r',
arrow_length_ratio=0.1)
# Plot the second point
ax.scatter(*stat)
# Plot the direction of the second vector
rad_x, rad_y, rad_z = -meas
rst_x, rst_y, rst_z = stat
meteor_len = 1000000
ax.quiver(rst_x, rst_y, rst_z, rad_x, rad_y, rad_z, length=meteor_len, normalize=True, color='g',
arrow_length_ratio=0.1)
# Calculate closest points of approach (observed line of sight to radiant line) from the state vector
obs_cpa, rad_cpa, d = findClosestPoints(stat, meas, state_vect, radiant_eci)
# Plot the closest points
ax.scatter(*obs_cpa)
ax.scatter(*rad_cpa)
# Set a constant aspect ratio
ax.set_aspect('equal', adjustable='box-forced')
plt.show()
|
wmpgREPO_NAMEWesternMeteorPyLibPATH_START.@WesternMeteorPyLib_extracted@[email protected]@.PATH_END.py
|
{
"filename": "test_verbosity.py",
"repo_name": "wdpozzo/raynest",
"repo_path": "raynest_extracted/raynest-main/raynest/tests/test_verbosity.py",
"type": "Python"
}
|
import unittest
import numpy as np
import raynest.model
class GaussianModel(raynest.model.Model):
"""
A simple gaussian model with parameters mean and sigma
"""
names=['mean','sigma']
bounds=[[-5,5],[0.05,1]]
data = np.array([x for x in np.random.normal(0.5,0.5,size=10)])
analyticZ = np.log(0.05)
@classmethod
def log_likelihood(cls,x):
return -0.5*x['mean']**2/x['sigma']**2 - np.log(x['sigma']) - 0.5*np.log(2.0*np.pi)
def log_prior(self,p):
if not self.in_bounds(p): return -np.inf
return -np.log(p['sigma']) - np.log(10) - np.log(0.95)
def force(self,x):
return np.zeros(1, dtype = {'names':x.names, 'formats':['f8' for _ in x.names]})
class GaussianTestCase(unittest.TestCase):
"""
Test the gaussian model with different verbose levels
"""
def setUp(self):
self.model = GaussianModel()
self.runs=[]
for v in range(4):
self.runs.append(raynest.raynest(self.model,verbose=v,nensemble=8,nlive=100,maxmcmc=100))
def test_run(self):
for r in self.runs:
r.run()
print('Analytic evidence: {0}'.format(self.model.analyticZ))
def test_all():
unittest.main(verbosity=2)
if __name__=='__main__':
unittest.main(verbosity=2)
|
wdpozzoREPO_NAMEraynestPATH_START.@raynest_extracted@raynest-main@raynest@tests@[email protected]_END.py
|
{
"filename": "test_sdsstractor.py",
"repo_name": "dstndstn/tractor",
"repo_path": "tractor_extracted/tractor-main/test/not-real-tests/test_sdsstractor.py",
"type": "Python"
}
|
from __future__ import print_function
import sys
from math import pi
import numpy as np
import pylab as plt
from astrometry.util.sip import Tan
from sdsstractor import *
#from compiled_profiles import *
#from galaxy_profiles import *
class FitsWcs(object):
def __init__(self, wcs):
self.wcs = wcs
def positionToPixel(self, pos, src=None):
x,y = self.wcs.radec2pixelxy(pos.ra, pos.dec)
return x,y
def pixelToPosition(self, x, y, src=None):
r,d = self.wcs.pixelxy2radec(x, y)
return RaDecPos(r,d)
def cdAtPixel(self, x, y):
cd = self.wcs.cd
return np.array([[cd[0], cd[1]], [cd[2],cd[3]]])
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-v', '--verbose', dest='verbose', action='count', default=0,
help='Make more verbose')
opt,args = parser.parse_args()
print('Opt.verbose = ', opt.verbose)
if opt.verbose == 0:
lvl = logging.INFO
else: # opt.verbose == 1:
lvl = logging.DEBUG
logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)
(images, simplexys, rois, zrange, nziv, footradecs
) = prepareTractor(False, False, rcfcut=[0])
print('Creating tractor...')
tractor = SDSSTractor(images, debugnew=False, debugchange=True)
'''
step 19, change-011
Changing source [131] PointSource at RA,Dec (120.5813, 9.3017) with SdssFlux: 2512.0 with scalar 373541.300971
Pixel position in image 1: 162.38751309 22.0818652585
* NCircularGaussianPSF: sigmas [ 0.911, 2.687, 9.871, 7.172, 18.284 ], weights [ 1.005, 0.083, -0.032, -0.007, 0.138 ]
* NCircularGaussianPSF: sigmas [ 1.014, 1.507, 3.778, 4.812 ], weights [ 1.002, 0.037, 0.050, 0.065 ]
'''
'''
to [ExpGalaxy(pos=RaDecPos(120.5813, 9.3017), flux=SdssFlux(2260.2), re=1.0, ab=0.50, phi=0.0)]
-->
ExpGalaxy at RA,Dec (120.5813, 9.3017) with SdssFlux: 3452.9, re=0.6, ab=0.88, phi=-0.1
to [DevGalaxy(pos=RaDecPos(120.5813, 9.3017), flux=SdssFlux(2260.2), re=1.0, ab=0.50, phi=0.0)]
DevGalaxy at RA,Dec (120.5813, 9.3018) with SdssFlux: 5292.4, re=1.0, ab=1.24, phi=-0.0
'''
pos = RaDecPos(120.5813, 9.3017)
flux = SdssFlux(3452.9 / SdssPhotoCal.scale)
#src = ExpGalaxy(pos, flux, 0.6, 0.88, -0.1)
src = ExpGalaxy(pos, flux, 0.6, 0.5, -0.1)
# After optimizing from ab=0.5:
pos = RaDecPos(120.58129, 9.30171)
flux = SdssFlux(4570. / SdssPhotoCal.scale)
src = ExpGalaxy(pos, flux, 0.9, 0.75, -35.7)
# small test galaxy (img 1)
src.setParams([120.5812843997946, 9.3017035130757009, 0.0049199047382452428, 1.2052972074376225, 0.55429578631330578, -47.611423262836823])
# big honkin' galaxy (img 0)
src = DevGalaxy(pos, flux, 0.9, 0.75, -35.7)
src.setParams([120.60275, 9.41350, 5.2767052, 22.8, 0.81, 5.0])
tractor.catalog.append(src)
def makePlots(tractor, fnpat, title1='', title2=''):
mods = tractor.getModelImages()
imgs = tractor.getImages()
chis = tractor.getChiImages()
for i,(mod,img,chi) in enumerate(zip(mods,imgs,chis)):
zr = zrange[i]
imargs = dict(interpolation='nearest', origin='lower',
vmin=zr[0], vmax=zr[1])
srcpatch = src.getModelPatch(img)
slc = srcpatch.getSlice(img)
plt.clf()
plt.subplot(2,2,1)
plotimage(img.getImage()[slc], **imargs)
plt.title(title1)
plt.subplot(2,2,2)
plotimage(mod[slc], **imargs)
plt.title(title2)
plt.subplot(2,2,3)
plotimage(chi[slc], interpolation='nearest', origin='lower',
vmin=-5, vmax=5)
plt.title('chi')
#plt.subplot(2,2,4)
#plotimage(img.getInvError()[slc],
# interpolation='nearest', origin='lower',
# vmin=0, vmax=0.1)
#plt.title('inv err')
fn = fnpat % i
plt.savefig(fn)
print('wrote', fn)
makePlots(tractor, 'opt-s00-%02i.png', #'pre-%02i.png',
title2='re %.1f, ab %.2f, phi %.1f' % (src.re, src.ab, src.phi))
if False:
p0 = src.getParams()
lnp0 = tractor.getLogProb()
for i,step in enumerate([3e-5, 3e-5, 1e-3, 0.1, 0.1, 15.]):
src.setParams(p0)
for j in range(5):
src.stepParam(i, -step)
for j in range(9):
src.stepParam(i, step)
lnp = tractor.getLogProb()
dlnp = lnp - lnp0
makePlots(tractor, 'step-%i-%02i-%%02i.png' % (i,j),
title1='dlnp=%.1f' % dlnp,
title2='re %.1f, ab %.2f, phi %.1f' % (src.re, src.ab, src.phi))
src.setParams(p0)
for ostep in range(10):
print()
print('Optimizing...')
#alphas = [1., 0.5, 0.25, 0.1, 0.01]
alphas=None
ppre = src.getParams()
lnppre = tractor.getLogProb()
dlnp,X,alpha = tractor.optimizeCatalogAtFixedComplexityStep(alphas=alphas)
ppost = src.getParams()
makePlots(tractor, 'opt-s%02i-%%02i.png' % (ostep+1),
title1='dlnp = %.1f' % dlnp,
title2='re %.1f, ab %.2f, phi %.1f' % (src.re, src.ab, src.phi))
print()
src.setParams(ppre)
print('Pre :', src)
src.setParams(ppost)
print('Post:', src)
src.setParams(ppre)
src.stepParams(X * 0.001)
dlnptiny = tractor.getLogProb() - lnppre
print('1e-3:', src)
makePlots(tractor, 'opt-s%02i-%%02ib.png' % (ostep+1),
title1='dlnp = %.1f' % dlnptiny,
title2='re %.1f, ab %.2f, phi %.1f' % (src.re, src.ab, src.phi))
src.setParams(ppre)
src.stepParams(X)
dlnpfull = tractor.getLogProb() - lnppre
print('Full:', src)
makePlots(tractor, 'opt-s%02i-%%02ic.png' % (ostep+1),
title1='dlnp = %.1f' % dlnpfull,
title2='re %.1f, ab %.2f, phi %.1f' % (src.re, src.ab, src.phi))
src.setParams(ppost)
print()
if dlnp < 1e-3:
break
print('Final source:', src)
print('Params:', src.getParams())
sys.exit(0)
imgi = 1
img = images[imgi]
patch = src.getModelPatch(img)
imargs1 = dict(interpolation='nearest', origin='lower')
plt.clf()
plt.imshow(patch.getImage(), **imargs1)
plt.colorbar()
plt.title('model')
plt.savefig('eg-1.png')
derivs = src.getParamDerivatives(img)
for i,deriv in enumerate(derivs):
plt.clf()
plt.imshow(deriv.getImage(), **imargs1)
plt.colorbar()
plt.title('derivative ' + deriv.getName())
plt.savefig('eg-deriv%i-0a.png' % i)
chi = tractor.getChiImage(imgi)
sl = patch.getSlice(img)
plt.clf()
plt.imshow(img.getImage()[sl], **imargs1)
plt.colorbar()
plt.title('image')
plt.savefig('eg-image.png')
plt.clf()
plt.imshow(chi[sl], **imargs1)
plt.colorbar()
plt.title('chi')
plt.savefig('eg-chi.png')
for i,deriv in enumerate(derivs):
plt.clf()
(H,W) = chi.shape
deriv.clipTo(W,H)
sl = deriv.getSlice(chi)
print('slice', sl)
print('deriv:', deriv)
print('chi sliced:', chi[sl].shape)
print('deriv:', deriv.getImage().shape)
plt.imshow(chi[sl] * deriv.getImage(), **imargs1)
plt.colorbar()
plt.title('chi * derivative ' + deriv.getName())
plt.savefig('eg-chideriv%i-0a.png' % i)
if False:
src = PointSource(pos, SdssFlux(2512.0 / SdssPhotoCal.scale))
tractor.catalog.append(src)
x,y = images[1].getWcs().positionToPixel(pos, src=src)
print('Pixel position in image 1:', x,y)
tractor.changeSourceTypes(srcs=[src])
if __name__ == '__main__':
main()
sys.exit(0)
angles = np.linspace(0, 2.*pi, 360)
x,y = np.cos(angles), np.sin(angles)
re = 3600./2. # arcsec
ab = 0.5
phi = 30. # deg
abfactor = ab
re_deg = re / 3600.
phi = np.deg2rad(phi)
# units of degrees
G = np.array([[ re_deg * np.cos(phi), re_deg * np.sin(phi) ],
[ re_deg * abfactor * -np.sin(phi), re_deg * abfactor * np.cos(phi) ]])
R = np.array([[ np.cos(phi), np.sin(phi) ],
[-np.sin(phi), np.cos(phi) ]])
S = re_deg * np.array([[ 1., 0 ],
[ 0, abfactor ]])
cp = np.cos(phi)
sp = np.sin(phi)
GG = re_deg * np.array([[ cp, sp * abfactor],
[-sp, cp * abfactor]])
print('R', R)
print('S', S)
RS = np.dot(R, S)
print('RS', RS)
print('G', G)
#G = RS
G = GG
rd = np.dot(G, np.vstack((x,y)))
print('rd', rd.shape)
r = rd[0,:]
d = rd[1,:]
plt.clf()
plt.plot(r, d, 'b-')
plt.axis('equal')
plt.savefig('g.png')
width = (2./7.2) # in deg
W,H = 500,500
scale = width / float(W)
cd = np.array([[-scale, 0],[0,-scale]])
cdi = linalg.inv(cd)
pg = np.dot(cdi, G)
pxy = np.dot(pg, np.vstack((x,y)))
px = pxy[0,:]
py = pxy[1,:]
plt.clf()
plt.plot(px, py, 'b-')
plt.axis('equal')
plt.savefig('g2.png')
T = np.dot(linalg.inv(G), cd)
XX,YY = np.meshgrid(np.arange(-1000,1200, 200),
np.arange( -600, 800, 200))
XX = XX.ravel()
YY = YY.ravel()
XY = vstack((XX,YY))
Tij = np.dot(T, XY)
print('Tij', Tij.shape)
for i in range(len(XX)):
plt.text(XX[i], YY[i], '(%.1f,%.1f)' % (Tij[0,i], Tij[1,i]),
fontsize=8, ha='center', va='center')
plt.savefig('g3.png')
profile = CompiledProfile(modelname='exp', profile_func=profile_exp, re=100, nrad=4)
#re_deg = 0.005 # 9 pix
re_deg = 0.002 #
repix = re_deg / scale
print('repix', repix)
cp = np.cos(phi)
sp = np.sin(phi)
G = re_deg * np.array([[ cp, sp * abfactor],
[-sp, cp * abfactor]])
T = np.dot(linalg.inv(G), cd)
X = profile.sample_transform(T, repix, ab, W/2, H/2, W, H, 1,
debugstep=1)
(xlo,xhi,ylo,yhi, cre,cn, cpixw, cpixh, re_factor, ab_factor,
Tij, ii, jj) = X
print('box size', cpixw, cpixh)
print('re_factor', re_factor)
print('ab_factor', ab_factor)
plt.clf()
plt.plot(Tij[0,:], Tij[1,:], 'b.')
plt.title('Tij')
plt.savefig('g4.png')
plt.clf()
plt.plot(ii, jj, 'b.')
plt.savefig('g5.png')
plt.clf()
print('boxes:', len(xlo))
plt.plot(np.vstack((xlo,xhi,xhi,xlo,xlo)),
np.vstack((ylo,ylo,yhi,yhi,ylo)), 'b-')
plt.savefig('g6.png')
plt.axis([0,1000,0,1000])
plt.savefig('g7.png')
#sys.exit(0)
ra,dec = 1.,45.
width = (2./7.2) # in deg
W,H = 500,500
wcs = Tan()
wcs.crval[0] = ra
wcs.crval[1] = dec
wcs.crpix[0] = W/2.
wcs.crpix[1] = H/2.
scale = width / float(W)
wcs.cd[0] = -scale
wcs.cd[1] = 0
wcs.cd[2] = 0
wcs.cd[3] = -scale
wcs.imagew = W
wcs.imageh = H
wcs = FitsWcs(wcs)
pos = RaDecPos(ra, dec)
flux = SdssFlux(1e4)
# arcsec
repix = 25.
re = 3600. * scale * repix
ab = 0.5
phi = 30.0
eg = ExpGalaxy(pos, flux, re, ab, phi)
image = np.zeros((H,W))
invvar = np.zeros_like(image) + 1.
photocal = SdssPhotoCal(SdssPhotoCal.scale)
psf = NCircularGaussianPSF([1.5], [1.0])
sky = 0.
img = Image(data=image, invvar=invvar, psf=psf, wcs=wcs, sky=sky,
photocal=photocal)
patch = eg.getModelPatch(img)
imargs1 = dict(interpolation='nearest', origin='lower')
plt.clf()
plt.imshow(patch.getImage(), **imargs1)
plt.colorbar()
plt.savefig('eg-1.png')
|
dstndstnREPO_NAMEtractorPATH_START.@tractor_extracted@tractor-main@test@not-real-tests@[email protected]_END.py
|
{
"filename": "_familysrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/area/hoverlabel/font/_familysrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="familysrc", parent_name="area.hoverlabel.font", **kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@area@hoverlabel@font@[email protected]_END.py
|
{
"filename": "prepare_internet.ipynb",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/benchmarks/quality_benchmarks/prepare_internet/prepare_internet.ipynb",
"type": "Jupyter Notebook"
}
|
### Instruction
To obtain the dataset Internet used for algorithms comparison:
1) Download `kdd_internet_usage.arff` file from http://www.cs.odu.edu/~mukka/cs795sum10dm/datasets/uci-20070111/nominal/kdd_internet_usage.arff.
2) Put it to the same directory as this notebook.
3) Run all the cells of this notebook successively to produce files for training and testing.
```python
resulting_train_filename = "train"
resulting_test_filename = "test"
```
### Preparing the data
```python
import pandas as pd
import re
import scipy.io.arff
```
```python
with open("kdd_internet_usage.arff", "rb") as fin:
data, meta = scipy.io.arff.loadarff(fin)
data = pd.DataFrame(data)
```
```python
data.head()
```
<div>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Actual_Time</th>
<th>Age</th>
<th>Community_Building</th>
<th>Community_Membership_Family</th>
<th>Community_Membership_Hobbies</th>
<th>Community_Membership_None</th>
<th>Community_Membership_Other</th>
<th>Community_Membership_Political</th>
<th>Community_Membership_Professional</th>
<th>Community_Membership_Religious</th>
<th>...</th>
<th>Web_Page_Creation</th>
<th>Who_Pays_for_Access_Dont_Know</th>
<th>Who_Pays_for_Access_Other</th>
<th>Who_Pays_for_Access_Parents</th>
<th>Who_Pays_for_Access_School</th>
<th>Who_Pays_for_Access_Self</th>
<th>Who_Pays_for_Access_Work</th>
<th>Willingness_to_Pay_Fees</th>
<th>Years_on_Internet</th>
<th>who</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>Consultant</td>
<td>41</td>
<td>Equally</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>...</td>
<td>Yes</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>Other_sources</td>
<td>1-3_yr</td>
<td>93819</td>
</tr>
<tr>
<th>1</th>
<td>College_Student</td>
<td>28</td>
<td>Equally</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>...</td>
<td>No</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>Already_paying</td>
<td>Under_6_mo</td>
<td>95708</td>
</tr>
<tr>
<th>2</th>
<td>Other</td>
<td>25</td>
<td>More</td>
<td>1</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>...</td>
<td>Yes</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>1</td>
<td>Other_sources</td>
<td>1-3_yr</td>
<td>97218</td>
</tr>
<tr>
<th>3</th>
<td>Salesperson</td>
<td>28</td>
<td>More</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>...</td>
<td>Yes</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>Already_paying</td>
<td>1-3_yr</td>
<td>91627</td>
</tr>
<tr>
<th>4</th>
<td>K-12_Student</td>
<td>17</td>
<td>More</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>1</td>
<td>0</td>
<td>...</td>
<td>Yes</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>Already_paying</td>
<td>1-3_yr</td>
<td>49906</td>
</tr>
</tbody>
</table>
<p>5 rows × 72 columns</p>
</div>
```python
target = data["Who_Pays_for_Access_Work"].apply(lambda x: 1 if x == '0' else -1)
data.drop(["Who_Pays_for_Access_Work", "Willingness_to_Pay_Fees", "Years_on_Internet", "who"], axis=1, inplace=True)
```
```python
data.shape
```
(10108, 68)
### Preparing train/test split
```python
train_idx = pd.read_csv("stratified_train_idx.txt", header=None)
test_idx = pd.read_csv("stratified_test_idx.txt", header=None)
```
```python
Xtrain = data.iloc[train_idx[0]]
Ytrain = target.iloc[train_idx[0]]
Xtest = data.iloc[test_idx[0]]
Ytest = target.iloc[test_idx[0]]
```
```python
# creating file with features
def prepare_pool(data, labels, filename):
X = data.values
y = labels.values
with open(filename, "w") as fout:
for i in range(data.shape[0]):
fout.write(str(y[i]) + "\t" + "\t".join(map(str, X[i])) + "\n")
```
```python
prepare_pool(Xtrain, Ytrain, resulting_train_filename)
prepare_pool(Xtest, Ytest, resulting_test_filename)
```
```python
categorical_features = {0, 1, 2, 11, 12, 18, 19, 20, 21, 31, 32, 33, 34, 36, 37, 38, 39, 59, 60, 61, 62}
with open(resulting_train_filename + '.cd', 'w') as fout:
fout.write('0\tTarget\n')
for cat_f_id in sorted(categorical_features):
fout.write('{}\tCateg\n'.format(cat_f_id + 1))
```
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@benchmarks@quality_benchmarks@prepare_internet@[email protected]_END.py
|
{
"filename": "obslog.py",
"repo_name": "plazar/coast_guard",
"repo_path": "coast_guard_extracted/coast_guard-master/database/obslog.py",
"type": "Python"
}
|
import sqlalchemy as sa
# Create metadata object
metadata = sa.MetaData()
# Define the metadata object for the obsinfo table
sa.Table('obsinfo', metadata,
sa.Column('object', sa.String(32), nullable=False),
sa.Column('mjd', sa.Float(53), nullable=False),
sa.Column('equinox', sa.Float, nullable=False),
sa.Column('equinox_obs', sa.Float, nullable=False),
sa.Column('exposuretime', sa.Float, nullable=False),
sa.Column('projectid', sa.String(32), nullable=False),
sa.Column('observer', sa.String(32), nullable=False),
sa.Column('scan', sa.Integer, nullable=False),
sa.Column('obstimestamp', sa.String(32), nullable=False),
sa.Column('lst', sa.Float(53), nullable=False),
sa.Column('nobs', sa.Integer, nullable=False),
sa.Column('nsubs', sa.Integer, nullable=False),
sa.Column('subsnum', sa.Integer, nullable=False),
sa.Column('obsnum', sa.Integer, nullable=False),
sa.Column('subfirstmjd', sa.Float(53), nullable=False),
sa.Column('sublastmjd', sa.Float(53), nullable=False),
sa.Column('ctype1', sa.String(32), nullable=False),
sa.Column('ctype2', sa.String(32), nullable=False),
sa.Column('wcsname', sa.String(32), nullable=False),
sa.Column('lon', sa.Float, nullable=False),
sa.Column('lat', sa.Float, nullable=False),
sa.Column('longoff', sa.Float, nullable=False),
sa.Column('latoff', sa.Float, nullable=False),
sa.Column('scantype', sa.String(32), nullable=False),
sa.Column('scanmode', sa.String(32), nullable=False),
sa.Column('scandir', sa.String(8), nullable=False),
sa.Column('scanlen', sa.Float, nullable=False),
sa.Column('scanrot', sa.Float, nullable=False),
sa.Column('scanxvel', sa.Float, nullable=False),
sa.Column('nuseband', sa.Integer, nullable=False),
sa.Column('nusefeed', sa.Integer, nullable=False),
sa.Column('phases', sa.Integer, nullable=False),
sa.Column('freqthrow', sa.Float, nullable=False),
sa.Column('azcorr', sa.Float, nullable=False),
sa.Column('elcorr', sa.Float, nullable=False),
sa.Column('refraction', sa.Float, nullable=False),
sa.Column('nula', sa.Float, nullable=False),
sa.Column('nule', sa.Float, nullable=False),
sa.Column('cols', sa.Float, nullable=False),
sa.Column('linx', sa.Float, nullable=False),
sa.Column('liny', sa.Float, nullable=False),
sa.Column('linz', sa.Float, nullable=False),
sa.Column('rotx', sa.Float, nullable=False),
sa.Column('roty', sa.Float, nullable=False),
sa.Column('rotz', sa.Float, nullable=False),
sa.Column('temperature', sa.Float, nullable=False),
sa.Column('pressure', sa.Float, nullable=False),
sa.Column('humidity', sa.Float, nullable=False),
sa.Column('windspeed', sa.Float, nullable=False),
sa.Column('winddir', sa.Float, nullable=False),
sa.Column('vlsr', sa.Float, nullable=False),
sa.Column('vblsr', sa.Float, nullable=False),
sa.Column('vhel', sa.Float, nullable=False),
sa.Column('vbar', sa.Float, nullable=False),
sa.Column('azim', sa.Float, nullable=False),
sa.Column('elev', sa.Float, nullable=False),
sa.Column('focus1', sa.Float, nullable=False),
sa.Column('focus2', sa.Float, nullable=False),
sa.Column('focus3', sa.Float, nullable=False),
sa.Column('focus4', sa.Float, nullable=False),
sa.Column('focus5', sa.Float, nullable=False),
sa.Column('focus6', sa.Float, nullable=False),
sa.Column('focus7', sa.Float, nullable=False),
sa.Column('focus8', sa.Float, nullable=False),
sa.Column('febe', sa.String(32), nullable=False),
sa.Column('feversion', sa.String(32), nullable=False),
sa.Column('ifinvert', sa.String(1), nullable=False),
sa.Column('obstype', sa.String(32), nullable=False),
sa.Column('scantime', sa.Float, nullable=False),
sa.Column('scanxspacing', sa.Float, nullable=False),
sa.Column('scanyspacing', sa.Float, nullable=False),
sa.Column('scanskew', sa.Float, nullable=False),
sa.Column('posgnp', sa.Float, nullable=False),
sa.Column('dewang', sa.Float, nullable=False),
sa.Column('channels', sa.Integer, nullable=False),
sa.Column('freqres', sa.Float, nullable=False),
sa.Column('bandwidth', sa.Float, nullable=False),
sa.Column('molecule', sa.String(32), nullable=False),
sa.Column('restfreq', sa.Float(53), nullable=False),
sa.Column('sideband', sa.String(32), nullable=False),
sa.Column('velwcsname', sa.String(32), nullable=False),
sa.Column('velsys', sa.String(32), nullable=False),
sa.Column('refchan', sa.Float, nullable=False),
sa.Column('velrefchan', sa.Float, nullable=False),
sa.Column('velchansep', sa.Float, nullable=False),
sa.Column('velrestframe', sa.String(32), nullable=False),
sa.Column('velobsframe', sa.String(32), nullable=False),
sa.Column('velsource', sa.Float, nullable=False),
sa.Column('velobserver', sa.Float, nullable=False),
sa.Column('utc2ut1', sa.Float, nullable=False),
sa.Column('tblank', sa.Float, nullable=False),
sa.Column('tsync', sa.Float, nullable=False),
sa.Column('dayofyear', sa.Integer, nullable=False),
sa.Column('skyfreq', sa.Float(53), nullable=False),
sa.Column('movefoc', sa.String(1), nullable=False),
sa.Column('ctrlbuttons', sa.Integer, nullable=False),
sa.Column('obsstatus', sa.String(8), nullable=False),
mysql_engine='InnoDB', mysql_charset='ascii')
|
plazarREPO_NAMEcoast_guardPATH_START.@coast_guard_extracted@coast_guard-master@[email protected]@.PATH_END.py
|
{
"filename": "proxy.py",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/crossbar/master/api/proxy.py",
"type": "Python"
}
|
###############################################################################
#
# Crossbar.io Master
# Copyright (c) typedef int GmbH. Licensed under EUPLv1.2.
#
###############################################################################
from crossbar.master.api.remote import RemoteApi
__all__ = ('RemoteProxyApi', )
class RemoteProxyApi(RemoteApi):
PREFIX = 'crossbarfabriccenter.remote.proxy.'
PROCS = {
# these are worker level procedures
'worker': [
'get_proxy_transports',
'get_proxy_transport',
'start_proxy_transport',
'stop_proxy_transport',
'get_web_transport_services',
'get_web_transport_service',
'start_web_transport_service',
'stop_web_transport_service',
'get_proxy_routes',
'get_proxy_realm_route',
'list_proxy_realm_routes',
'start_proxy_realm_route',
'stop_proxy_realm_route',
'get_proxy_connections',
'get_proxy_connection',
'start_proxy_connection',
'stop_proxy_connection',
],
}
EVENTS = {
# these are worker level topics
'worker': [
'on_proxy_transport_starting',
'on_proxy_transport_started',
'on_proxy_transport_stopping',
'on_proxy_transport_stopped',
]
}
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@crossbar@master@[email protected]@.PATH_END.py
|
{
"filename": "_dual_annealing.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/optimize/_dual_annealing.py",
"type": "Python"
}
|
# Dual Annealing implementation.
# Copyright (c) 2018 Sylvain Gubian <[email protected]>,
# Yang Xiang <[email protected]>
# Author: Sylvain Gubian, Yang Xiang, PMP S.A.
"""
A Dual Annealing global optimization algorithm
"""
import numpy as np
from scipy.optimize import OptimizeResult
from scipy.optimize import minimize, Bounds
from scipy.special import gammaln
from scipy._lib._util import check_random_state
from scipy.optimize._constraints import new_bounds_to_old
__all__ = ['dual_annealing']
class VisitingDistribution:
"""
Class used to generate new coordinates based on the distorted
Cauchy-Lorentz distribution. Depending on the steps within the strategy
chain, the class implements the strategy for generating new location
changes.
Parameters
----------
lb : array_like
A 1-D NumPy ndarray containing lower bounds of the generated
components. Neither NaN or inf are allowed.
ub : array_like
A 1-D NumPy ndarray containing upper bounds for the generated
components. Neither NaN or inf are allowed.
visiting_param : float
Parameter for visiting distribution. Default value is 2.62.
Higher values give the visiting distribution a heavier tail, this
makes the algorithm jump to a more distant region.
The value range is (1, 3]. Its value is fixed for the life of the
object.
rand_gen : {`~numpy.random.RandomState`, `~numpy.random.Generator`}
A `~numpy.random.RandomState`, `~numpy.random.Generator` object
for using the current state of the created random generator container.
"""
TAIL_LIMIT = 1.e8
MIN_VISIT_BOUND = 1.e-10
def __init__(self, lb, ub, visiting_param, rand_gen):
# if you wish to make _visiting_param adjustable during the life of
# the object then _factor2, _factor3, _factor5, _d1, _factor6 will
# have to be dynamically calculated in `visit_fn`. They're factored
# out here so they don't need to be recalculated all the time.
self._visiting_param = visiting_param
self.rand_gen = rand_gen
self.lower = lb
self.upper = ub
self.bound_range = ub - lb
# these are invariant numbers unless visiting_param changes
self._factor2 = np.exp((4.0 - self._visiting_param) * np.log(
self._visiting_param - 1.0))
self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0)
/ (self._visiting_param - 1.0))
self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * (
3.0 - self._visiting_param))
self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5
self._d1 = 2.0 - self._factor5
self._factor6 = np.pi * (1.0 - self._factor5) / np.sin(
np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1))
def visiting(self, x, step, temperature):
""" Based on the step in the strategy chain, new coordinates are
generated by changing all components is the same time or only
one of them, the new values are computed with visit_fn method
"""
dim = x.size
if step < dim:
# Changing all coordinates with a new visiting value
visits = self.visit_fn(temperature, dim)
upper_sample, lower_sample = self.rand_gen.uniform(size=2)
visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample
visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample
x_visit = visits + x
a = x_visit - self.lower
b = np.fmod(a, self.bound_range) + self.bound_range
x_visit = np.fmod(b, self.bound_range) + self.lower
x_visit[np.fabs(
x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10
else:
# Changing only one coordinate at a time based on strategy
# chain step
x_visit = np.copy(x)
visit = self.visit_fn(temperature, 1)[0]
if visit > self.TAIL_LIMIT:
visit = self.TAIL_LIMIT * self.rand_gen.uniform()
elif visit < -self.TAIL_LIMIT:
visit = -self.TAIL_LIMIT * self.rand_gen.uniform()
index = step - dim
x_visit[index] = visit + x[index]
a = x_visit[index] - self.lower[index]
b = np.fmod(a, self.bound_range[index]) + self.bound_range[index]
x_visit[index] = np.fmod(b, self.bound_range[
index]) + self.lower[index]
if np.fabs(x_visit[index] - self.lower[
index]) < self.MIN_VISIT_BOUND:
x_visit[index] += self.MIN_VISIT_BOUND
return x_visit
def visit_fn(self, temperature, dim):
""" Formula Visita from p. 405 of reference [2] """
x, y = self.rand_gen.normal(size=(dim, 2)).T
factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0))
factor4 = self._factor4_p * factor1
# sigmax
x *= np.exp(-(self._visiting_param - 1.0) * np.log(
self._factor6 / factor4) / (3.0 - self._visiting_param))
den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) /
(3.0 - self._visiting_param))
return x / den
class EnergyState:
"""
Class used to record the energy state. At any time, it knows what is the
currently used coordinates and the most recent best location.
Parameters
----------
lower : array_like
A 1-D NumPy ndarray containing lower bounds for generating an initial
random components in the `reset` method.
upper : array_like
A 1-D NumPy ndarray containing upper bounds for generating an initial
random components in the `reset` method
components. Neither NaN or inf are allowed.
callback : callable, ``callback(x, f, context)``, optional
A callback function which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and `context` has value in [0, 1, 2]
"""
# Maximum number of trials for generating a valid starting point
MAX_REINIT_COUNT = 1000
def __init__(self, lower, upper, callback=None):
self.ebest = None
self.current_energy = None
self.current_location = None
self.xbest = None
self.lower = lower
self.upper = upper
self.callback = callback
def reset(self, func_wrapper, rand_gen, x0=None):
"""
Initialize current location is the search domain. If `x0` is not
provided, a random location within the bounds is generated.
"""
if x0 is None:
self.current_location = rand_gen.uniform(self.lower, self.upper,
size=len(self.lower))
else:
self.current_location = np.copy(x0)
init_error = True
reinit_counter = 0
while init_error:
self.current_energy = func_wrapper.fun(self.current_location)
if self.current_energy is None:
raise ValueError('Objective function is returning None')
if (not np.isfinite(self.current_energy) or np.isnan(
self.current_energy)):
if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
init_error = False
message = (
'Stopping algorithm because function '
'create NaN or (+/-) infinity values even with '
'trying new random parameters'
)
raise ValueError(message)
self.current_location = rand_gen.uniform(self.lower,
self.upper,
size=self.lower.size)
reinit_counter += 1
else:
init_error = False
# If first time reset, initialize ebest and xbest
if self.ebest is None and self.xbest is None:
self.ebest = self.current_energy
self.xbest = np.copy(self.current_location)
# Otherwise, we keep them in case of reannealing reset
def update_best(self, e, x, context):
self.ebest = e
self.xbest = np.copy(x)
if self.callback is not None:
val = self.callback(x, e, context)
if val is not None:
if val:
return ('Callback function requested to stop early by '
'returning True')
def update_current(self, e, x):
self.current_energy = e
self.current_location = np.copy(x)
class StrategyChain:
"""
Class that implements within a Markov chain the strategy for location
acceptance and local search decision making.
Parameters
----------
acceptance_param : float
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
visit_dist : VisitingDistribution
Instance of `VisitingDistribution` class.
func_wrapper : ObjectiveFunWrapper
Instance of `ObjectiveFunWrapper` class.
minimizer_wrapper: LocalSearchWrapper
Instance of `LocalSearchWrapper` class.
rand_gen : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
energy_state: EnergyState
Instance of `EnergyState` class.
"""
def __init__(self, acceptance_param, visit_dist, func_wrapper,
minimizer_wrapper, rand_gen, energy_state):
# Local strategy chain minimum energy and location
self.emin = energy_state.current_energy
self.xmin = np.array(energy_state.current_location)
# Global optimizer state
self.energy_state = energy_state
# Acceptance parameter
self.acceptance_param = acceptance_param
# Visiting distribution instance
self.visit_dist = visit_dist
# Wrapper to objective function
self.func_wrapper = func_wrapper
# Wrapper to the local minimizer
self.minimizer_wrapper = minimizer_wrapper
self.not_improved_idx = 0
self.not_improved_max_idx = 1000
self._rand_gen = rand_gen
self.temperature_step = 0
self.K = 100 * len(energy_state.current_location)
def accept_reject(self, j, e, x_visit):
r = self._rand_gen.uniform()
pqv_temp = 1.0 - ((1.0 - self.acceptance_param) *
(e - self.energy_state.current_energy) / self.temperature_step)
if pqv_temp <= 0.:
pqv = 0.
else:
pqv = np.exp(np.log(pqv_temp) / (
1. - self.acceptance_param))
if r <= pqv:
# We accept the new location and update state
self.energy_state.update_current(e, x_visit)
self.xmin = np.copy(self.energy_state.current_location)
# No improvement for a long time
if self.not_improved_idx >= self.not_improved_max_idx:
if j == 0 or self.energy_state.current_energy < self.emin:
self.emin = self.energy_state.current_energy
self.xmin = np.copy(self.energy_state.current_location)
def run(self, step, temperature):
self.temperature_step = temperature / float(step + 1)
self.not_improved_idx += 1
for j in range(self.energy_state.current_location.size * 2):
if j == 0:
if step == 0:
self.energy_state_improved = True
else:
self.energy_state_improved = False
x_visit = self.visit_dist.visiting(
self.energy_state.current_location, j, temperature)
# Calling the objective function
e = self.func_wrapper.fun(x_visit)
if e < self.energy_state.current_energy:
# We have got a better energy value
self.energy_state.update_current(e, x_visit)
if e < self.energy_state.ebest:
val = self.energy_state.update_best(e, x_visit, 0)
if val is not None:
if val:
return val
self.energy_state_improved = True
self.not_improved_idx = 0
else:
# We have not improved but do we accept the new location?
self.accept_reject(j, e, x_visit)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during annealing')
# End of StrategyChain loop
def local_search(self):
# Decision making for performing a local search
# based on strategy chain results
# If energy has been improved or no improvement since too long,
# performing a local search with the best strategy chain location
if self.energy_state_improved:
# Global energy has improved, let's see if LS improves further
e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest,
self.energy_state.ebest)
if e < self.energy_state.ebest:
self.not_improved_idx = 0
val = self.energy_state.update_best(e, x, 1)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during local search')
# Check probability of a need to perform a LS even if no improvement
do_ls = False
if self.K < 90 * len(self.energy_state.current_location):
pls = np.exp(self.K * (
self.energy_state.ebest - self.energy_state.current_energy) /
self.temperature_step)
if pls >= self._rand_gen.uniform():
do_ls = True
# Global energy not improved, let's see what LS gives
# on the best strategy chain location
if self.not_improved_idx >= self.not_improved_max_idx:
do_ls = True
if do_ls:
e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin)
self.xmin = np.copy(x)
self.emin = e
self.not_improved_idx = 0
self.not_improved_max_idx = self.energy_state.current_location.size
if e < self.energy_state.ebest:
val = self.energy_state.update_best(
self.emin, self.xmin, 2)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during dual annealing')
class ObjectiveFunWrapper:
def __init__(self, func, maxfun=1e7, *args):
self.func = func
self.args = args
# Number of objective function evaluations
self.nfev = 0
# Number of gradient function evaluation if used
self.ngev = 0
# Number of hessian of the objective function if used
self.nhev = 0
self.maxfun = maxfun
def fun(self, x):
self.nfev += 1
return self.func(x, *self.args)
class LocalSearchWrapper:
"""
Class used to wrap around the minimizer used for local search
Default local minimizer is SciPy minimizer L-BFGS-B
"""
LS_MAXITER_RATIO = 6
LS_MAXITER_MIN = 100
LS_MAXITER_MAX = 1000
def __init__(self, search_bounds, func_wrapper, *args, **kwargs):
self.func_wrapper = func_wrapper
self.kwargs = kwargs
self.jac = self.kwargs.get('jac', None)
self.minimizer = minimize
bounds_list = list(zip(*search_bounds))
self.lower = np.array(bounds_list[0])
self.upper = np.array(bounds_list[1])
# If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method
if not self.kwargs:
n = len(self.lower)
ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,
self.LS_MAXITER_MIN),
self.LS_MAXITER_MAX)
self.kwargs['method'] = 'L-BFGS-B'
self.kwargs['options'] = {
'maxiter': ls_max_iter,
}
self.kwargs['bounds'] = list(zip(self.lower, self.upper))
elif callable(self.jac):
def wrapped_jac(x):
return self.jac(x, *args)
self.kwargs['jac'] = wrapped_jac
def local_search(self, x, e):
# Run local search from the given x location where energy value is e
x_tmp = np.copy(x)
mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)
if 'njev' in mres:
self.func_wrapper.ngev += mres.njev
if 'nhev' in mres:
self.func_wrapper.nhev += mres.nhev
# Check if is valid value
is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)
in_bounds = np.all(mres.x >= self.lower) and np.all(
mres.x <= self.upper)
is_valid = is_finite and in_bounds
# Use the new point only if it is valid and return a better results
if is_valid and mres.fun < e:
return mres.fun, mres.x
else:
return e, x_tmp
def dual_annealing(func, bounds, args=(), maxiter=1000,
minimizer_kwargs=None, initial_temp=5230.,
restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
maxfun=1e7, seed=None, no_local_search=False,
callback=None, x0=None):
"""
Find the global minimum of a function using Dual Annealing.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence or `Bounds`
Bounds for variables. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. Sequence of ``(min, max)`` pairs for each element in `x`.
args : tuple, optional
Any additional fixed parameters needed to completely specify the
objective function.
maxiter : int, optional
The maximum number of global search iterations. Default value is 1000.
minimizer_kwargs : dict, optional
Extra keyword arguments to be passed to the local minimizer
(`minimize`). Some important options could be:
``method`` for the minimizer method to use and ``args`` for
objective function additional arguments.
initial_temp : float, optional
The initial temperature, use higher values to facilitates a wider
search of the energy landscape, allowing dual_annealing to escape
local minima that it is trapped in. Default value is 5230. Range is
(0.01, 5.e4].
restart_temp_ratio : float, optional
During the annealing process, temperature is decreasing, when it
reaches ``initial_temp * restart_temp_ratio``, the reannealing process
is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
visit : float, optional
Parameter for visiting distribution. Default value is 2.62. Higher
values give the visiting distribution a heavier tail, this makes
the algorithm jump to a more distant region. The value range is (1, 3].
accept : float, optional
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
maxfun : int, optional
Soft limit for the number of objective function calls. If the
algorithm is in the middle of a local search, this number will be
exceeded, the algorithm will stop just after the local search is
done. Default value is 1e7.
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Specify `seed` for repeatable minimizations. The random numbers
generated with this seed only affect the visiting distribution function
and new coordinates generation.
no_local_search : bool, optional
If `no_local_search` is set to True, a traditional Generalized
Simulated Annealing will be performed with no local search
strategy applied.
callback : callable, optional
A callback function with signature ``callback(x, f, context)``,
which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and ``context`` has value in [0, 1, 2], with the
following meaning:
- 0: minimum detected in the annealing process.
- 1: detection occurred in the local search process.
- 2: detection done in the dual annealing process.
If the callback implementation returns True, the algorithm will stop.
x0 : ndarray, shape(n,), optional
Coordinates of a single N-D starting point.
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``fun`` the value
of the function at the solution, and ``message`` which describes the
cause of the termination.
See `OptimizeResult` for a description of other attributes.
Notes
-----
This function implements the Dual Annealing optimization. This stochastic
approach derived from [3]_ combines the generalization of CSA (Classical
Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
to a strategy for applying a local search on accepted locations [4]_.
An alternative implementation of this same algorithm is described in [5]_
and benchmarks are presented in [6]_. This approach introduces an advanced
method to refine the solution found by the generalized annealing
process. This algorithm uses a distorted Cauchy-Lorentz visiting
distribution, with its shape controlled by the parameter :math:`q_{v}`
.. math::
g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
\\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
\\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\
\\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
\\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
Where :math:`t` is the artificial time. This visiting distribution is used
to generate a trial jump distance :math:`\\Delta x(t)` of variable
:math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
From the starting point, after calling the visiting distribution
function, the acceptance probability is computed as follows:
.. math::
p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
\\frac{1}{1-q_{a}}}\\}}
Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
acceptance probability is assigned to the cases where
.. math::
[1-(1-q_{a}) \\beta \\Delta E] < 0
The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
.. math::
T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
1 + t\\right)^{q_{v}-1}-1}
Where :math:`q_{v}` is the visiting parameter.
.. versionadded:: 1.2.0
References
----------
.. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs
statistics. Journal of Statistical Physics, 52, 479-487 (1998).
.. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.
Physica A, 233, 395-406 (1996).
.. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated
Annealing Algorithm and Its Application to the Thomson Model.
Physics Letters A, 233, 216-220 (1997).
.. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated
Annealing. Physical Review E, 62, 4473 (2000).
.. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized
Simulated Annealing for Efficient Global Optimization: the GenSA
Package for R. The R Journal, Volume 5/1 (2013).
.. [6] Mullen, K. Continuous Global Optimization in R. Journal of
Statistical Software, 60(6), 1 - 45, (2014).
:doi:`10.18637/jss.v060.i06`
Examples
--------
The following example is a 10-D problem, with many local minima.
The function involved is called Rastrigin
(https://en.wikipedia.org/wiki/Rastrigin_function)
>>> import numpy as np
>>> from scipy.optimize import dual_annealing
>>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)
>>> lw = [-5.12] * 10
>>> up = [5.12] * 10
>>> ret = dual_annealing(func, bounds=list(zip(lw, up)))
>>> ret.x
array([-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09,
-6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09,
-6.05775280e-09, -5.00668935e-09]) # random
>>> ret.fun
0.000000
"""
if isinstance(bounds, Bounds):
bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb))
# noqa: E501
if x0 is not None and not len(x0) == len(bounds):
raise ValueError('Bounds size does not match x0')
lu = list(zip(*bounds))
lower = np.array(lu[0])
upper = np.array(lu[1])
# Check that restart temperature ratio is correct
if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:
raise ValueError('Restart temperature ratio has to be in range (0, 1)')
# Checking bounds are valid
if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(
np.isnan(lower)) or np.any(np.isnan(upper))):
raise ValueError('Some bounds values are inf values or nan values')
# Checking that bounds are consistent
if not np.all(lower < upper):
raise ValueError('Bounds are not consistent min < max')
# Checking that bounds are the same length
if not len(lower) == len(upper):
raise ValueError('Bounds do not have the same dimensions')
# Wrapper for the objective function
func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)
# minimizer_kwargs has to be a dict, not None
minimizer_kwargs = minimizer_kwargs or {}
minimizer_wrapper = LocalSearchWrapper(
bounds, func_wrapper, *args, **minimizer_kwargs)
# Initialization of random Generator for reproducible runs if seed provided
rand_state = check_random_state(seed)
# Initialization of the energy state
energy_state = EnergyState(lower, upper, callback)
energy_state.reset(func_wrapper, rand_state, x0)
# Minimum value of annealing temperature reached to perform
# re-annealing
temperature_restart = initial_temp * restart_temp_ratio
# VisitingDistribution instance
visit_dist = VisitingDistribution(lower, upper, visit, rand_state)
# Strategy chain instance
strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,
minimizer_wrapper, rand_state, energy_state)
need_to_stop = False
iteration = 0
message = []
# OptimizeResult object to be returned
optimize_res = OptimizeResult()
optimize_res.success = True
optimize_res.status = 0
t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0
# Run the search loop
while not need_to_stop:
for i in range(maxiter):
# Compute temperature for this step
s = float(i) + 2.0
t2 = np.exp((visit - 1) * np.log(s)) - 1.0
temperature = initial_temp * t1 / t2
if iteration >= maxiter:
message.append("Maximum number of iteration reached")
need_to_stop = True
break
# Need a re-annealing process?
if temperature < temperature_restart:
energy_state.reset(func_wrapper, rand_state)
break
# starting strategy chain
val = strategy_chain.run(i, temperature)
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
# Possible local search at the end of the strategy chain
if not no_local_search:
val = strategy_chain.local_search()
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
iteration += 1
# Setting the OptimizeResult values
optimize_res.x = energy_state.xbest
optimize_res.fun = energy_state.ebest
optimize_res.nit = iteration
optimize_res.nfev = func_wrapper.nfev
optimize_res.njev = func_wrapper.ngev
optimize_res.nhev = func_wrapper.nhev
optimize_res.message = message
return optimize_res
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@optimize@[email protected]_END.py
|
{
"filename": "README.md",
"repo_name": "deepskies/deeplenstronomy",
"repo_path": "deeplenstronomy_extracted/deeplenstronomy-master/README.md",
"type": "Markdown"
}
|
# Welcome to `deeplenstronomy`!
[](https://joss.theoj.org/papers/e978dd566d1f290055a02d76288e95e1)
[](http://arxiv.org/abs/2102.02830)
[](https://pypi.org/project/deeplenstronomy/)
[](https://github.com/deepskies/deeplenstronomy/blob/master/LICENSE)
`deeplenstronomy` is a tool for simulating large datasets for applying deep learning to strong gravitational lensing.
It works by wrapping the functionalities of [`lenstronomy`](https://github.com/sibirrer/lenstronomy) in a convenient yaml-style interface, allowing users to embrace the astronomer part of their brain rather than their programmer part when generating training datasets.
## Installation
**With conda (Recommended)**
- Step 0: Set up an environment. This can be done straightforwardly with a `conda` installation:
```
conda create -n deeplens python=3.7 jupyter scipy pandas numpy matplotlib astropy h5py PyYAML mpmath future
conda activate deeplens
```
- Step 1: `pip install lenstronomy`
- Step 2: `pip install deeplenstronomy`
**With pip**
- Step 1: `pip install deeplenstronomy`
## [Getting Started and Example Notebooks](https://deepskies.github.io/deeplenstronomy/Notebooks/)
Start by reading the [Getting Started Guide](https://deepskies.github.io/deeplenstronomy/Notebooks/GettingStarted.html) to familiarize yourself with the `deeplenstronomy` style.
After that, check out the example notebooks below:
### Notebooks for `deeplenstronomy` Utilities
- [Creating `deeplenstronomy` Configuration Files](https://deepskies.github.io/deeplenstronomy/Notebooks/ConfigFiles.html)
- [Generating Datasets](https://deepskies.github.io/deeplenstronomy/Notebooks/DeepLenstronomyDemo.html)
- [Visualizing `deeplenstronomy` Images](https://deepskies.github.io/deeplenstronomy/Notebooks/Visualization.html)
- [Utilizing Astronomical Surveys](https://deepskies.github.io/deeplenstronomy/Notebooks/Surveys.html)
- [Defining Your Own Probability Distributions](https://deepskies.github.io/deeplenstronomy/Notebooks/UserDistributions.html)
- [Using Your Own Images as Backgrounds](https://deepskies.github.io/deeplenstronomy/Notebooks/BackgroundsDemo.html)
- [Simulating Time-Series Datasets](https://deepskies.github.io/deeplenstronomy/Notebooks/TimeSeriesDemo.html)
### Notebooks for Applying `deeplenstronomy` to Machine Learning Analyses
- [Using `deeplenstronomy` for Active Learning](https://deepskies.github.io/deeplenstronomy/Notebooks/ActiveUpdateDemo.html)
- [Using `deeplenstronomy` for Classification and Regression](https://deepskies.github.io/deeplenstronomy/Notebooks/Metrics.html)
### Notebooks for Suggested Science Cases
- [A Walkthrough of Using `deeplenstronomy` for Science](https://deepskies.github.io/deeplenstronomy/Notebooks/FullExample.html)
## API Documentation
`deeplenstronomy` is designed so that users only need to work with their personal configuration files and the dataset generatation and image visualization functions.
However, if you would like to view the full API documentation, you can visit the [docs](https://deepskies.github.io/deeplenstronomy/docs/) page.
## Citation
If you use `deeplenstronomy` in your work, please include the following citations:
```
@article{deeplenstronomy,
doi = {10.21105/joss.02854},
url = {https://doi.org/10.21105/joss.02854},
year = {2021},
publisher = {The Open Journal},
volume = {6},
number = {58},
pages = {2854},
author = {Robert Morgan and Brian Nord and Simon Birrer and Joshua Yao-Yu Lin and Jason Poh},
title = {deeplenstronomy: A dataset simulation package for strong gravitational lensing},
journal = {Journal of Open Source Software}
}
@article{lenstronomy,
title = "lenstronomy: Multi-purpose gravitational lens modelling software package",
journal = "Physics of the Dark Universe",
volume = "22",
pages = "189 - 201",
year = "2018",
issn = "2212-6864",
doi = "10.1016/j.dark.2018.11.002",
url = "http://www.sciencedirect.com/science/article/pii/S2212686418301869",
author = "Simon Birrer and Adam Amara",
keywords = "Gravitational lensing, Software, Image simulations"
}
```
## Contact
If you have any questions or run into any errors with the beta release of `deeplenstronomy`, please don't hesitate to reach out:
Rob Morgan
<br>
robert [dot] morgan [at] wisc.edu
You can also message me on the DES, DELVE, LSSTC, deepskies, or lenstronomers Slack workspaces
<!---
.. image:: https://badge.fury.io/py/deeplenstronomy.png
:target: http://badge.fury.io/py/deeplenstronomy
.. image:: https://travis-ci.org/bnord/deeplenstronomy.png?branch=master
:target: https://travis-ci.org/bnord/deeplenstronomy
--->
|
deepskiesREPO_NAMEdeeplenstronomyPATH_START.@deeplenstronomy_extracted@[email protected]@.PATH_END.py
|
{
"filename": "_center.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/layout/geo/_center.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Center(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.geo"
_path_str = "layout.geo.center"
_valid_props = {"lat", "lon"}
# lat
# ---
@property
def lat(self):
"""
Sets the latitude of the map's center. For all projection
types, the map's latitude center lies at the middle of the
latitude range by default.
The 'lat' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["lat"]
@lat.setter
def lat(self, val):
self["lat"] = val
# lon
# ---
@property
def lon(self):
"""
Sets the longitude of the map's center. By default, the map's
longitude center lies at the middle of the longitude range for
scoped projection and above `projection.rotation.lon`
otherwise.
The 'lon' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["lon"]
@lon.setter
def lon(self, val):
self["lon"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
lat
Sets the latitude of the map's center. For all
projection types, the map's latitude center lies at the
middle of the latitude range by default.
lon
Sets the longitude of the map's center. By default, the
map's longitude center lies at the middle of the
longitude range for scoped projection and above
`projection.rotation.lon` otherwise.
"""
def __init__(self, arg=None, lat=None, lon=None, **kwargs):
"""
Construct a new Center object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.geo.Center`
lat
Sets the latitude of the map's center. For all
projection types, the map's latitude center lies at the
middle of the latitude range by default.
lon
Sets the longitude of the map's center. By default, the
map's longitude center lies at the middle of the
longitude range for scoped projection and above
`projection.rotation.lon` otherwise.
Returns
-------
Center
"""
super(Center, self).__init__("center")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.geo.Center
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.geo.Center`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("lat", None)
_v = lat if lat is not None else _v
if _v is not None:
self["lat"] = _v
_v = arg.pop("lon", None)
_v = lon if lon is not None else _v
if _v is not None:
self["lon"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@layout@geo@[email protected]_END.py
|
{
"filename": "michi2_filter_flux_2sigma.py",
"repo_name": "1054/Crab.Toolkit.michi2",
"repo_path": "Crab.Toolkit.michi2_extracted/Crab.Toolkit.michi2-master/bin/michi2_filter_flux_2sigma.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
import os
import sys
import numpy
import astropy
import astropy.io.ascii as asciitable
from copy import copy
####################################
# MAIN #
####################################
if not len(sys.argv) > 2:
print('Usage: michi2_filter_flux_3sigma.py input_flux.txt output_flux.txt')
sys.exit()
data_table = asciitable.read(sys.argv[1])
if not len(data_table.colnames) >= 3:
print('Error! The input flux data table does not have at least three columns: wavelength, flux density and error in flux density.')
sys.exit()
#
w = data_table.field(data_table.colnames[0])
f = data_table.field(data_table.colnames[1])
ferr = data_table.field(data_table.colnames[2])
mask = (f<2.0*ferr) | (w<=0)
isel = numpy.argwhere(mask).flatten()
if len(isel) > 0:
#print(isel)
#print(data_table)
data_table.remove_rows(isel)
#print(data_table)
# set zero error data error to 1/3 flux
w = data_table.field(data_table.colnames[0])
f = data_table.field(data_table.colnames[1])
ferr = data_table.field(data_table.colnames[2])
mask = (ferr==0)
isel = numpy.argwhere(mask).flatten()
if len(isel) > 0:
ferr[mask] = f[mask] / 3.0
for iseli in isel:
print('Limited row %d error from zero to 1/3: w = %s, f = %s, ferr = %s'%(iseli, w[iseli], f[iseli], ferr[iseli]))
# deal with duplicated w
i = 0
while i < len(data_table):
w = data_table.field(data_table.colnames[0])
f = data_table.field(data_table.colnames[1])
ferr = data_table.field(data_table.colnames[2])
# identify duplicated w
mask2 = (w==w[i])
isel2 = numpy.argwhere(mask2).flatten()
if len(isel2) >= 2:
# found duplicated w
print('Found wavelength-duplicated rows: %s'%(isel2))
print(data_table[isel2])
f_to_average = f[mask2]
ferr_to_average = ferr[mask2]
f_averaged = numpy.sum(f_to_average/ferr_to_average**2)/numpy.sum(1/ferr_to_average**2)
ferr_averaged = numpy.sqrt(1/numpy.sum(ferr_to_average**(-2))) # error propagation of weighted mean, see -- http://www.physics.umd.edu/courses/Phys261/F06/ErrorPropagation.pdf
# limit S/N not larger than 10
#if ferr_averaged < f_averaged/10.0:
# ferr_averaged = f_averaged/10.0
# store into data_table
f[i] = f_averaged # change f will directly change data_table!
ferr[i] = ferr_averaged # change ferr will directly change data_table!
print('Averaged wavelength-duplicated rows: w = %s, f = %s, ferr = %s'%(w[i], f_averaged, ferr_averaged))
# remove those duplicated rows, but keep current i row.
isel3 = isel2[(isel2 != i)]
for iseli in isel3:
print('data_table.remove_rows(%d)'%(iseli))
data_table.remove_rows(isel3)
i = i+1
# limit S/N to be not larger than 10
w = data_table.field(data_table.colnames[0])
f = data_table.field(data_table.colnames[1])
ferr = data_table.field(data_table.colnames[2])
mask = (ferr<f/10.0)
isel = numpy.argwhere(mask).flatten()
if len(isel) > 0:
ferr[mask] = f[mask] / 10.0
for iseli in isel:
print('Limited row %d S/N no larger than 10: w = %s, f = %s, ferr = %s'%(iseli, w[iseli], f[iseli], ferr[iseli]))
# sort
data_table.sort(data_table.colnames[0])
# output
out_file = sys.argv[2]
asciitable.write(data_table, out_file, Writer=asciitable.Ipac, delimiter=' ', overwrite=True)
#asciitable.write(data_table, sys.stdout, Writer=asciitable.Ipac, delimiter=' ')
with open(out_file, 'r+') as fp:
out_content = fp.readlines() # read everything in the file
out_iline = 0
out_header = [] # Ipac format has multiple comment lines (commented by the char '\\') and 4 header lines.
fp.seek(0)
while out_iline < len(out_content):
if out_content[out_iline][0] == '\\':
# if his is a commented line, then we change the comment mark to '#'
out_content[out_iline] = '#' + out_content[out_iline][1:]
fp.write(out_content[out_iline])
else:
if len(out_header) == 0:
# if this is the first header line, then replace the first white space by '#', or if there is no white space, preprend '#'.
if out_content[out_iline][0] == ' ':
out_content[out_iline] = '#' + out_content[out_iline][1:]
else:
out_content[out_iline] = '#' + out_content[out_iline]
# append header to 'out_header' list
out_header.append(out_content[out_iline])
# write only one header line
fp.write(out_content[out_iline])
#
elif len(out_header) < 4:
# append header to 'out_header' list
out_header.append(out_content[out_iline])
# skip the 2nd to 4th header line
pass
else:
# write data line
fp.write(out_content[out_iline])
#
out_iline = out_iline + 1
fp.truncate()
fp.close()
#os.system('sed -i.bak -e "$(grep \"\\\" %s | wc -l)s/^ /#/" "%s"'%(out_file, out_file))
#os.system('sed -i.bak -e "2d;3d;4d" "%s"'%(out_file))
#if os.path.isfile(out_file+'.bak'):
# os.system('rm "%s"'%(out_file+'.bak'))
print('Output to "%s"!'%(out_file))
| |
{
"filename": "test_functional.py",
"repo_name": "radiocosmology/driftscan",
"repo_path": "driftscan_extracted/driftscan-master/tests/test_functional.py",
"type": "Python"
}
|
"""Functional test suite for checking integrity of the analysis product generation.
The tests using the KL spectrum are effected by changes to the default cosmology/power
spectrum in cora, and there is no easy way to pin this during the tests. Best option is
to just update the products *if* the defaults change.
Also, due to problems with double forking MPI processes this test suite can only run
once within a single process. In particular you can't run this test suite followed by
`test_functional_skip.py`
"""
import shutil
import os
import subprocess
import tarfile
import numpy as np
import pytest
import h5py
from pathlib import Path
from platform import python_version
from urllib.request import urlretrieve
# Ensure we're using the correct package
_basedir = Path(__file__).parent.resolve()
def approx(x, rel=1e-4, abs=1e-8):
"""Pytest approx with changed defaults."""
return pytest.approx(x, rel=rel, abs=abs)
def orth_equal_approx(x, y, abs=1e-8):
"""Tet if two basis sets are roughly equal."""
overlap = np.dot(x, y.T.conj())
d = np.abs(np.abs(overlap) - np.identity(y.shape[0]))
return (d < abs).all()
@pytest.fixture(scope="module")
def test_dir(tmpdir_factory):
# If DRIFT_TESTDIR is set then use that
if "DRIFT_TESTDIR" in os.environ:
_base = Path(os.environ["DRIFT_TESTDIR"])
else:
_base = Path(str(tmpdir_factory.mktemp("testdrift")))
return _base
# NOTE: we can't run this twice in the same test run. I think this is because
# MPI refuses to startup if you try to fork and MPI process from within an MPI
# process. It works once because the MPI env is not initialised until the
# `ProductManager` call which occurs *after* the product generation.
def _gen_prod(output_dir: Path, config: Path):
# If the data already exists then we don't need to re-run the tests
if not output_dir.exists():
output_dir.mkdir(parents=True)
shutil.copy(config, output_dir / "params.yaml")
cmd = "drift-makeproducts run params.yaml"
# If we're not on macOS try running under MPI
# On macOS this has recently been giving problems when running the MPI
# job from within pytest
if "DRIFT_NO_MPI" not in os.environ:
nproc = 2 # Use a fixed number to check that the MPI code works
cmd = ("mpirun -np %i --oversubscribe -bind-to none " % nproc) + cmd
print(f"Running test in: {output_dir}")
print("Generating products:", cmd)
proc = subprocess.run(cmd.split(), cwd=output_dir)
print("Done.")
retval = proc.returncode
else:
retval = 0
# Can't import this until the subprocess call is done, otherwise the nested
# MPI environments will fail
from drift.core import manager as pm
manager = pm.ProductManager.from_config(output_dir / "params.yaml")
return retval, output_dir, manager
@pytest.fixture(scope="module")
def products_run(test_dir):
# Generate the standard test products
return _gen_prod(
test_dir / f"prod_python_{python_version()}", _basedir / "testparams.yaml"
)
@pytest.fixture()
def return_code(products_run):
return products_run[0]
@pytest.fixture()
def testdir(products_run):
return products_run[1]
@pytest.fixture()
def manager(products_run):
return products_run[2]
@pytest.fixture(scope="module")
def saved_products(test_dir: Path):
_base = test_dir / "saved_products"
_base.mkdir(parents=True, exist_ok=True)
# Download the products into the root directory such that they don't need
# to be downloaded on each test run
prodfile = _basedir / "drift_testproducts.tar.gz"
# Download the test products if they don't exist locally
if not prodfile.exists():
print("Downloading test verification data.")
url = "http://bao.chimenet.ca/testcache/drift_testproducts.tar.gz"
urlretrieve(url, prodfile)
with tarfile.open(prodfile, "r:gz") as tf:
tf.extractall(path=_base)
def _load(fname):
path = _base / fname
if not path.exists():
raise ValueError("Saved product %s does not exist" % path)
return h5py.File(path, "r")
return _load
def test_return_code(return_code):
"""Test that the products exited cleanly."""
code = return_code // 256
assert code == 0
def test_signal_exit(return_code):
"""Test that the products exited cleanly."""
signal = return_code % 256
assert signal == 0
def test_manager(manager, testdir):
"""Check that the product manager code loads properly."""
mfile = Path(manager.directory)
tfile = testdir / "testdir"
assert mfile.samefile(tfile) # Manager does not see same directory
# Add padding to start of the btm dataset to account for the compact storage
def _pad_btm_m(fh):
bm_saved = fh["beam_m"][:]
m = int(fh.attrs["m"])
final_pad = [(0, 0)] * (bm_saved.ndim - 1) + [(m, 0)]
return np.pad(bm_saved, final_pad, mode="constant", constant_values=0)
# This works despite the non-determinism because the elements are small.
def test_beam_m(manager, saved_products):
"""Check the consistency of the m-ordered beams."""
# Load cached beam transfer and insert the zeros that are missing from the beginning
# of the l axis
with saved_products("beam_m_14.hdf5") as f:
bm_saved = _pad_btm_m(f)
bm = manager.beamtransfer.beam_m(14)
assert bm_saved.shape == bm.shape # Beam matrix (m=14) shape has changed
assert bm == approx(bm_saved) # Beam matrix (m=14) is incorrect
def test_svd_spectrum(manager, saved_products):
"""Test the SVD spectrum."""
with saved_products("svdspectrum.hdf5") as f:
svd_saved = f["singularvalues"][:]
svd = manager.beamtransfer.svd_all()
assert svd_saved.shape == svd.shape # SVD spectrum shapes not equal
assert svd == approx(svd_saved, rel=1e-3, abs=400) # SVD spectrum is incorrect
def test_kl_spectrum(manager, saved_products):
"""Check the KL spectrum (for the foregroundless model)."""
with saved_products("evals_kl.hdf5") as f:
ev_saved = f["evals"][:]
ev = manager.kltransforms["kl"].evals_all()
assert ev_saved.shape == ev.shape # KL spectrum shapes not equal
assert ev == approx(ev_saved) # KL spectrum is incorrect
@pytest.mark.skip(reason="Non determinstic SHT (libsharp), means this doesn't work")
def test_kl_mode(manager, saved_products):
"""Check a KL mode (m=26) for the foregroundless model."""
with saved_products("ev_kl_m_26.hdf5") as f:
evecs_saved = f["evecs"][:]
evals, evecs = manager.kltransforms["kl"].modes_m(26)
assert evecs_saved.shape == evecs.shape # KL mode shapes not equal
assert orth_equal_approx(evecs, evecs_saved, abs=1e-5) # KL mode is incorrect
@pytest.mark.skip(reason="Non determinstic SHT (libsharp), means this doesn't work")
def test_dk_mode(manager, saved_products):
"""Check a KL mode (m=38) for the model with foregrounds."""
with saved_products("ev_dk_m_38.hdf5") as f:
evecs_saved = f["evecs"][:]
evals, evecs = manager.kltransforms["dk"].modes_m(38)
assert evecs_saved.shape == evecs.shape # DK mode shapes not equal
assert evecs == approx(evecs_saved) # DK mode is incorrect
def test_kl_fisher(manager, saved_products):
"""Test the Fisher matrix consistency. Use an approximate test as Monte-Carlo."""
with saved_products("fisher_kl.hdf5") as f:
fisher_saved = f["fisher"][:]
bias_saved = f["bias"][:]
ps = manager.psestimators["ps1"]
fisher, bias = ps.fisher_bias()
assert fisher_saved.shape == fisher.shape # KL Fisher shapes not equal
assert fisher == approx(fisher_saved, rel=3e-2, abs=1) # KL Fisher is incorrect
assert bias_saved.shape == bias.shape # KL bias shapes not equal
assert bias == approx(bias_saved, rel=3e-2, abs=1) # KL bias is incorrect.
def test_dk_fisher(manager, saved_products):
"""Test the DK Fisher matrix consistency. Use an approximate test as Monte-Carlo."""
with saved_products("fisher_dk.hdf5") as f:
fisher_saved = f["fisher"][:]
bias_saved = f["bias"][:]
ps = manager.psestimators["ps2"]
fisher, bias = ps.fisher_bias()
assert fisher_saved.shape == fisher.shape # DK Fisher shapes not equal
assert fisher == approx(fisher_saved, rel=3e-2, abs=1) # DK Fisher is incorrect
assert bias_saved.shape == bias.shape # DK bias shapes not equal
assert bias == approx(bias_saved, rel=3e-2, abs=1) # DK bias is incorrect.
@pytest.mark.skip(reason="Non determinstic SHT (libsharp), means this doesn't work")
def test_svd_mode(manager, saved_products):
"""Test that the SVD modes are correct."""
with saved_products("svd_m_14.hdf5") as f:
svd_saved = f["beam_svd"][:]
invsvd_saved = f["invbeam_svd"][:]
ut_saved = f["beam_ut"][:]
svd = manager.beamtransfer.beam_svd(14)
invsvd = manager.beamtransfer.invbeam_svd(14)
ut = manager.beamtransfer.beam_ut(14)
assert svd_saved.shape == svd.shape # SVD beam matrix (m=14) shape has changed
assert svd == approx(svd_saved) # SVD beam matrix (m=14) is incorrect
assert invsvd == approx(invsvd_saved) # Inverse SVD beam matrix (m=14) is incorrect
assert ut == approx(ut_saved) # SVD UT matrix (m=14) is incorrect
def test_dk_spectrum(manager, saved_products):
"""Check the KL spectrum (for the model with foregrounds)."""
with saved_products("evals_dk.hdf5") as f:
ev_saved = f["evals"][:]
ev = manager.kltransforms["dk"].evals_all()
assert ev_saved.shape == ev.shape # DK spectrum shapes not equal
assert ev == approx(ev_saved, rel=1e-2) # DK spectrum is incorrect
|
radiocosmologyREPO_NAMEdriftscanPATH_START.@driftscan_extracted@driftscan-master@tests@[email protected]_END.py
|
{
"filename": "instrument.py",
"repo_name": "trident-project/trident",
"repo_path": "trident_extracted/trident-main/trident/instrument.py",
"type": "Python"
}
|
"""
Instrument class and member functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2016, Trident Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#-----------------------------------------------------------------------------
from yt.units.yt_array import \
YTQuantity
from trident.absorption_spectrum.absorption_spectrum import \
_bin_space_units
class Instrument(object):
"""
An instrument class for specifying a spectrograph/telescope pair
**Parameters**
:lambda_min: float or YTQuantity
Minimum desired wavelength for generated spectrum in angstroms
:lambda_max: float or YTQuantity
Maximum desired wavelength for generated spectrum in angstroms
:n_lambda: int
Number of desired wavelength bins for the spectrum
Setting dlambda overrides n_lambda value
Default: None
:dlambda: float or YTQuantity
Desired bin width for the spectrum in angstroms
Setting dlambda overrides n_lambda value
Default: None
:lsf_kernel: string
The filename for the :class:`~trident.LSF` kernel
Default: None
:name: string
Name assigned to the :class:`~trident.Instrument` object
Default: None
"""
def __init__(self, lambda_min, lambda_max, n_lambda=None,
dlambda=None, lsf_kernel=None, bin_space='wavelength',
name=None):
self.bin_space = bin_space
if str(lambda_min) != 'auto' and not isinstance(lambda_min, YTQuantity):
lambda_min = YTQuantity(lambda_min, _bin_space_units[self.bin_space])
self.lambda_min = lambda_min
if str(lambda_max) != 'auto' and not isinstance(lambda_max, YTQuantity):
lambda_max = YTQuantity(lambda_max, _bin_space_units[self.bin_space])
self.lambda_max = lambda_max
self.lsf_kernel = lsf_kernel
self.name = name
if n_lambda is None and dlambda is None:
raise RuntimeError("Either n_lambda or dlambda must be set to "
"specify the binsize")
elif dlambda is not None:
if not isinstance(dlambda, YTQuantity):
dlambda = YTQuantity(dlambda, _bin_space_units[self.bin_space])
if str(lambda_min) == 'auto' or str(lambda_max) == 'auto':
n_lambda = 'auto'
else:
# adding 1 here to assure we cover full lambda range
n_lambda = (lambda_max - lambda_min) / dlambda + 1
self.n_lambda = n_lambda
if dlambda is None:
# adding 1 here to assure we cover full lambda range
dlambda = (lambda_max - lambda_min) / float(n_lambda - 1)
self.dlambda = dlambda
def __repr__(self):
disp = "<Instrument>:\n"
disp += " name: %s\n" % self.name
disp += " lambda_min: %s\n" % self.lambda_min
disp += " lambda_max: %s\n" % self.lambda_max
disp += " n_lambda: %s\n" % self.n_lambda
disp += " dlambda: %s\n" % self.dlambda
disp += " lsf_kernel: %s\n" % self.lsf_kernel
return disp
|
trident-projectREPO_NAMEtridentPATH_START.@trident_extracted@trident-main@[email protected]@.PATH_END.py
|
{
"filename": "functions.py",
"repo_name": "justinread/gravsphere",
"repo_path": "gravsphere_extracted/gravsphere-master/functions.py",
"type": "Python"
}
|
import numpy as np
from scipy.integrate import simps as integrator
from scipy.misc.common import derivative
from scipy.special import gamma
from scipy.integrate import quad, dblquad
from constants import *
multimode = 'normal'
###########################################################
#For setting cosmology priors on coreNFWtides parameters.
def cosmo_cfunc(M200,h):
#From Dutton & Maccio 2014. Requires as input masses
#defined in 200c system in units of Msun:
c = 10.**(0.905 - 0.101 * (np.log10(M200*h)-12.))
return c
def cosmo_cfunc_WDM(M200,h,OmegaM,rhocrit,mWDM):
#Use formula in https://arxiv.org/pdf/1112.0330.pdf
#to modify CDM M200-c200 relation to the WDM
#one. Assumes mWDM in keV, dimensionless h
#M200 in Msun and rhocrit in Msun kpc^-3.
cCDM = cosmo_cfunc(M200,h)
gamma1 = 15.0
gamma2 = 0.3
lamfseff = 0.049*(mWDM)**(-1.11)*\
(OmegaM/0.25)**(0.11)*(h/0.7)**(1.22)*1000.0
lamhm = 13.93*lamfseff
Mhm = 4.0/3.0*np.pi*rhocrit*(lamhm/2.0)**3.0
cWDM = cCDM * (1.0 + gamma1*Mhm / M200)**(-gamma2)
return cWDM
###########################################################
#For constraining particle DM models:
def rhoNFW(r,rhos,rs):
return rhos/((r/rs)*(1.+(r/rs))**2.)
def sidm_novel(rc,M200,c,oden,rhocrit):
#Calculate SIDM model parameters from the coreNFWtides
#model fit. For this to be valid, the coreNFWtides fit
#should assume a pure-core model, with n=1. See
#Read et al. 2018 for further details.
#Returns cross section/particle mass in cm^2 / g.
GammaX = 0.005/(1e9*year)
Guse = G*Msun/kpc
rho_unit = Msun/kpc**3.0
rc = np.abs(rc)*10.0
gcon=1./(np.log(1.+c)-c/(1.+c))
deltachar=oden*c**3.*gcon/3.
rv=(3./4.*M200/(np.pi*oden*rhocrit))**(1./3.)
rs=rv/c
rhos=rhocrit*deltachar
rhorc = rhoNFW(rc,rhos,rs)
r = np.logspace(np.log10(rc),np.log10(rs*5000.0),50000)
rho = rhoNFW(r,rhos,rs)
mass = M200*gcon*(np.log(1.0 + r/rs)-r/rs/(1.0+r/rs))
sigvtworc = Guse/rhorc*integrator(mass*rho/r**2.0,r)
sigvrc = np.sqrt(sigvtworc)
sigm = np.sqrt(np.pi)*GammaX/(4.0*rhorc*rho_unit*sigvrc)
return sigm*100.0**2.0/1000.0
def radius_dsph(s, b, distance):
return np.sqrt((distance * np.sin(b))**2. + s*s)
def integrand(s, b, distance, rho, Mpars):
value = np.sin(b) * rho(np.array([radius_dsph(s, b, distance)]), Mpars)**2
return value
def integrand_D(s, b, distance, rho, Mpars):
value = np.sin(b) * rho(np.array([radius_dsph(s, b, distance)]), Mpars)
return value
def get_J(rho, Mpars, distance, r_max):
"""
Compute the J factor.
:param distance: the distance of the galaxy in kpc
:param r_max: the maximum radius over which to integrate
[this gives an integration angle of
alpha = r_max/distance (rads)]
:param r: the radius array for the density profile in kpc
:param rho: the density array for the density profile in Msun/kpc^3
:return: the J factor in in GeV c^-4 cm^-5
"""
#Min/max integration angles in radians:
b_min = 0.0
b_max = np.arcsin(r_max/distance)
#This is an appropriate choice for Dwarf galaxies but
#should be reconsidered for large mass systems:
Rmaximum = 250.0
#Upper/lower limits:
s_min_bound = lambda b : -(Rmaximum**2 - (distance*np.sin(b))**2 )**0.5
s_max_bound = lambda b : (Rmaximum**2 - (distance*np.sin(b))**2 )**0.5
#Computation J_max:
Acc_arr = 1.0e-8
J_max = dblquad(integrand,b_min,b_max,s_min_bound,\
s_max_bound,args=(distance,rho,Mpars),\
epsabs=Acc_arr,epsrel=Acc_arr)
J_max = J_max[0]*kpccm*2.*np.pi*Msunkpc3toGeVcm3**2.0
#Error checking:
if (J_max == np.inf):
print('Argh! Infinite J_max!! Bye bye...')
sys.exit(0)
if (J_max < 0):
print('Argh! Negative J_max!! Bye bye...')
sys.exit(0)
return J_max # in GeV^2 c^-4 cm^-5
def get_D(rho, Mpars, distance, r_max):
"""
Compute the D factor.
:param distance: the distance of the galaxy in kpc
:param r_max: the maximum radius over which to integrate
[this gives an integration angle of
alpha = r_max/distance (rads)]
:param r: the radius array for the density profile in kpc
:param rho: the density array for the density profile in Msun/kpc^3
:return: the D factor in in GeV c^-2 cm^-2
"""
# Min/max integration angles in radians:
r_min = 0.0
b_min = np.arcsin(r_min/distance)
b_max = np.arcsin(r_max/distance)
#This is an appropriate choice for Dwarf galaxies but
#should be reconsidered for large mass systems:
Rmaximum = 250.0
#Upper/lower limits:
s_min_bound = lambda b : -(Rmaximum**2 - (distance*np.sin(b))**2 )**0.5
s_max_bound = lambda b : (Rmaximum**2 - (distance*np.sin(b))**2 )**0.5
#Computation J_max:
Acc_arr = 1.0e-8
D_max = dblquad(integrand_D,b_min,b_max,s_min_bound,\
s_max_bound,args=(distance,rho,Mpars),\
epsabs=Acc_arr,epsrel=Acc_arr)
D_max = D_max[0]*kpccm*2.*np.pi*Msunkpc3toGeVcm3
#Error checking:
if (D_max == np.inf):
print('Argh! Infinite D_max!! Bye bye...')
sys.exit(0)
if (D_max < 0):
print('Argh! Negative D_max!! Bye bye...')
sys.exit(0)
return D_max # in GeV c^-2 cm^-2
###########################################################
#For DM mass profile:
def corenfw_tides_den(r,M200,c,rc,n,rt,delta):
gcon=1./(np.log(1.+c)-c/(1.+c))
deltachar=oden*c**3.*gcon/3.
rv=(3./4.*M200/(np.pi*oden*rhocrit))**(1./3.)
rs=rv/c
rhos=rhocrit*deltachar
rhoanal = rhos/((r/rs)*(1.+(r/rs))**2.)
manal = M200 * gcon * (np.log(1.0 + r/rs)-r/rs/(1.0+r/rs))
x = r/np.abs(rc)
f = np.tanh(x)
my_manal = manal*f**n
my_rhoanal = rhoanal*f**n + \
1.0/(4.*np.pi*r**2.*np.abs(rc))*manal*(1.0-f**2.)*n*f**(n-1.0)
frt = np.tanh(rt/np.abs(rc))
manal_rt = M200 * gcon * (np.log(1.0 + rt/rs)-rt/rs/(1.0+rt/rs))
my_rhoanal_rt = rhos/((rt/rs)*(1.+(rt/rs))**2.)*frt**n + \
1.0/(4.*np.pi*rt**2.*np.abs(rc))*manal_rt*(1.0-frt**2.)*n*frt**(n-1.0)
my_rhoanal[r > rt] = my_rhoanal_rt * (r[r > rt]/rt)**(-delta)
return my_rhoanal
def corenfw_tides_mass(r,M200,c,rc,n,rt,delta):
gcon=1./(np.log(1.+c)-c/(1.+c))
deltachar=oden*c**3.*gcon/3.
rv=(3./4.*M200/(np.pi*oden*rhocrit))**(1./3.)
rs=rv/c
rhos=rhocrit*deltachar
rhoanal = rhos/((r/rs)*(1.+(r/rs))**2.)
manal = M200 * gcon * (np.log(1.0 + r/rs)-r/rs/(1.0+r/rs))
x = r/np.abs(rc)
f = np.tanh(x)
my_manal = manal*f**n
frt = np.tanh(rt/np.abs(rc))
manal_rt = M200 * gcon * (np.log(1.0 + rt/rs)-rt/rs/(1.0+rt/rs))
my_rhoanal_rt = rhos/((rt/rs)*(1.+(rt/rs))**2.)*frt**n + \
1.0/(4.*np.pi*rt**2.*np.abs(rc))*manal_rt*(1.0-frt**2.)*n*frt**(n-1.0)
Mrt = manal_rt*frt**n
my_manal[r > rt] = Mrt + \
4.0*np.pi*my_rhoanal_rt*rt**3.0/(3.0-delta)*\
((r[r > rt]/rt)**(3.0-delta)-1.0)
return my_manal
def corenfw_tides_dlnrhodlnr(r,M200,c,rc,n,rt,delta):
dden = derivative(\
lambda x: corenfw_tides_den(x,M200,c,rc,n,rt,delta),\
r,dx=1e-6)
dlnrhodlnr = dden / corenfw_tides_den(r,M200,c,rc,n,rt,delta) * r
return dlnrhodlnr
def vmax_func(M200,c200,h):
oden = 200.0
Guse = G*Msun/kpc
r200=(3./4.*M200/(np.pi*oden*rhocrit))**(1./3.)
#This from Sigad et al. 2000 (via Schneider et al. 2017):
vmax = 0.465*np.sqrt(Guse*M200/r200)/\
np.sqrt(1.0/c200*np.log(1.0+c200)-(1.0+c200)**(-1.0))
return vmax/kms
###########################################################
#For DM mass profile and VSPs of GC mocks (for overlaying
#true solution):
def alpbetgamden(r,rho0,r0,alp,bet,gam):
return rho0*(r/r0)**(-gam)*(1.0+(r/r0)**alp)**((gam-bet)/alp)
def alpbetgamdlnrhodlnr(r,rho0,r0,alp,bet,gam):
return -gam + (gam-bet)*(r/r0)**alp*(1.0+(r/r0)**alp)**(-1.0)
def alpbetgammass(r,rho0,r0,alp,bet,gam):
den = rho0*(r/r0)**(-gam)*(1.0+(r/r0)**alp)**((gam-bet)/alp)
mass = np.zeros(len(r))
for i in range(3,len(r)):
mass[i] = integrator(4.0*np.pi*r[:i]**2.*den[:i],r[:i])
return mass
def alpbetgamsigr(r,rho0s,r0s,alps,bets,gams,rho0,r0,alp,bet,gam,ra):
nu = alpbetgamden(r,rho0s,r0s,alps,bets,gams)
mass = alpbetgammass(r,rho0,r0,alp,bet,gam)
gf = gfunc_osipkov(r,ra)
sigr = np.zeros(len(r))
for i in range(len(r)-3):
sigr[i] = 1.0/nu[i]/gf[i] * \
integrator(Guse*mass[i:]*nu[i:]/r[i:]**2.0*\
gf[i:],r[i:])
return sigr
def osipkov(r,r0):
return r**2.0/(r**2.0+r0**2.0)
def gfunc_osipkov(r,r0):
n0 = 2.0
bet0 = 0.0
betinf = 1.0
gfunc = r**(2.0*betinf)*\
((r0/r)**n0+1.0)**(2.0/n0*(betinf-bet0))
return gfunc
def alpbetgamvsp(rho0s,r0s,alps,bets,gams,rho0,r0,alp,bet,gam,ra):
intpnts = np.int(1e4)
r = np.logspace(np.log10(r0s/50.0),np.log10(500.0*r0s),\
np.int(intpnts))
nu = alpbetgamden(r,rho0s,r0s,alps,bets,gams)
massnu = alpbetgamden(r,rho0s,r0s,alps,bets,gams)
mass = alpbetgammass(r,rho0,r0,alp,bet,gam)
sigr = alpbetgamsigr(r,rho0s,r0s,alps,bets,gams,rho0,\
r0,alp,bet,gam,ra)
bet = osipkov(r,ra)
sigstar = np.zeros(len(r))
for i in range(1,len(r)-3):
sigstar[i] = 2.0*integrator(nu[i:]*r[i:]/\
np.sqrt(r[i:]**2.0-r[i-1]**2.0),\
r[i:])
#Normalise similarly to the data:
norm = integrator(sigstar*2.0*np.pi*r,r)
nu = nu / norm
sigstar = sigstar / norm
#VSPs:
vsp1 = \
integrator(2.0/5.0*Guse*mass*nu*(5.0-2.0*bet)*\
sigr*r,r)/1.0e12
vsp2 = \
integrator(4.0/35.0*Guse*mass*nu*(7.0-6.0*bet)*\
sigr*r**3.0,r)/1.0e12
#Richardson & Fairbairn zeta parameters:
Ntotuse = integrator(sigstar*r,r)
sigint = integrator(sigstar*r**3.0,r)
zeta_A = 9.0/10.0*Ntotuse*integrator(Guse*mass*nu*(\
5.0-2.0*bet)*sigr*r,r)/\
(integrator(Guse*mass*nu*r,r))**2.0
zeta_B = 9.0/35.0*Ntotuse**2.0*\
integrator(Guse*mass*nu*(7.0-6.0*bet)*sigr*r**3.0,r)/\
((integrator(Guse*mass*nu*r,r))**2.0*sigint)
return vsp1, vsp2, zeta_A, zeta_B
#Richardson-Fairbairn VSP estimators:
def richfair_vsp(vz,Rkin,mskin):
vsp1_RF = 1.0/(np.pi*2.0)*\
np.sum(vz**4.0*mskin)/np.sum(mskin)
vsp2_RF = 1.0/(np.pi*2.0)*\
np.sum(vz**4.0*mskin*Rkin**2.0)/np.sum(mskin*Rkin**2.0)
return vsp1_RF, vsp2_RF
###########################################################
#For optional central dark mass (e.g. remnants, black hole):
def plumden(r,pars):
return 3.0*pars[0]/(4.*np.pi*pars[1]**3.)*\
(1.0+r**2./pars[1]**2.)**(-5./2.)
def plummass(r,pars):
return pars[0]*r**3./(r**2.+pars[1]**2.)**(3./2.)
###########################################################
#For Jeans modelling:
def sigp(r1,r2,nu,Sigfunc,M,Mcentral,beta,betaf,nupars,Mpars,\
betpars,\
Mstar_rad,Mstar_prof,Mstar,Arot,Rhalf,G,rmin,rmax):
#Calculate projected velocity dispersion profiles
#given input *functions* nu(r); M(r); beta(r); betaf(r).
#Also input is an array Mstar_prof(Mstar_rad) describing the 3D
#cumulative stellar mass profile. This should be normalised
#so that it peaks at 1.0. The total stellar mass is passed in Mstar.
#Set up theta integration array:
intpnts = np.int(150)
thmin = 0.
bit = 1.e-5
thmax = np.pi/2.-bit
th = np.linspace(thmin,thmax,intpnts)
sth = np.sin(th)
cth = np.cos(th)
cth2 = cth**2.
#Set up rint interpolation array:
rint = np.logspace(np.log10(rmin),np.log10(rmax),intpnts)
#First calc sigr2(rint):
sigr2 = np.zeros(len(rint))
nur = nu(rint,nupars)
betafunc = betaf(rint,betpars,Rhalf,Arot)
for i in range(len(rint)):
rq = rint[i]/cth
Mq = M(rq,Mpars)+Mcentral(rq,Mpars)
if (Mstar > 0):
Mq = Mq+Mstar*np.interp(rq,Mstar_rad,Mstar_prof)
nuq = nu(rq,nupars)
betafuncq = betaf(rq,betpars,Rhalf,Arot)
sigr2[i] = 1./nur[i]/rint[i]/betafunc[i] * \
integrator(G*Mq*nuq*betafuncq*sth,th)
#And now the sig_LOS projection:
Sig = Sigfunc(rint,nupars)
sigLOS2 = np.zeros(len(rint))
for i in range(len(rint)):
rq = rint[i]/cth
nuq = nu(rq,nupars)
sigr2q = np.interp(rq,rint,sigr2,left=0,right=0)
betaq = beta(rq,betpars)
sigLOS2[i] = 2.0*rint[i]/Sig[i]*\
integrator((1.0-betaq*cth2)*nuq*sigr2q/cth2,th)
sigr2out = np.interp(r2,rint,sigr2,left=0,right=0)
sigLOS2out = np.interp(r2,rint,sigLOS2,left=0,right=0)
Sigout = np.interp(r1,rint,Sig,left=0,right=0)
return sigr2out,Sigout,sigLOS2out
def sigp_vs(r1,r2,nu,Sigfunc,M,Mcentral,beta,betaf,nupars,Mpars,\
betpars,\
Mstar_rad,Mstar_prof,Mstar,Arot,Rhalf,G,rmin,rmax):
#Calculate projected velocity dispersion profiles
#given input *functions* nu(r); M(r); beta(r); betaf(r).
#Also input is an array Mstar_prof(Mstar_rad) describing the 3D
#cumulative stellar mass profile. This should be normalised
#so that it peaks at 1.0. The total stellar mass is passed in Mstar.
#Finally, the routine calculates a dimensional version of the
#fourth order "virial shape" parmaeters in Richardson & Fairbairn 2014
#described in their equations 8 and 9.
#Set up theta integration array:
intpnts = np.int(150)
thmin = 0.
bit = 1.e-5
thmax = np.pi/2.-bit
th = np.linspace(thmin,thmax,intpnts)
sth = np.sin(th)
cth = np.cos(th)
cth2 = cth**2.
#Set up rint interpolation array:
rintpnts = np.int(150)
rint = np.logspace(np.log10(rmin),\
np.log10(rmax),rintpnts)
#First calc sigr2(rint):
sigr2 = np.zeros(len(rint))
nur = nu(rint,nupars)
betafunc = betaf(rint,betpars,Rhalf,Arot)
for i in range(len(rint)):
rq = rint[i]/cth
Mq = M(rq,Mpars)+Mcentral(rq,Mpars)
if (Mstar > 0):
Mq = Mq+Mstar*np.interp(rq,Mstar_rad,Mstar_prof)
nuq = nu(rq,nupars)
betafuncq = betaf(rq,betpars,Rhalf,Arot)
sigr2[i] = 1./nur[i]/rint[i]/betafunc[i] * \
integrator(G*Mq*nuq*betafuncq*sth,th)
#And now the sig_LOS projection:
Sig = Sigfunc(rint,nupars)
sigLOS2 = np.zeros(len(rint))
for i in range(len(rint)):
rq = rint[i]/cth
nuq = nu(rq,nupars)
sigr2q = np.interp(rq,rint,sigr2,left=0,right=0)
betaq = beta(rq,betpars)
sigLOS2[i] = 2.0*rint[i]/Sig[i]*\
integrator((1.0-betaq*cth2)*nuq*sigr2q/cth2,th)
#And now the dimensional fourth order "virial shape"
#parameters:
betar = beta(rint,betpars)
Mr = M(rint,Mpars)+Mstar*np.interp(rint,Mstar_rad,Mstar_prof)
vs1 = 2.0/5.0*integrator(nur*(5.0-2.0*betar)*sigr2*\
G*Mr*rint,rint)
vs2 = 4.0/35.0*integrator(nur*(7.0-6.0*betar)*sigr2*\
G*Mr*rint**3.0,rint)
sigr2out = np.interp(r2,rint,sigr2,left=0,right=0)
sigLOS2out = np.interp(r2,rint,sigLOS2,left=0,right=0)
Sigout = np.interp(r1,rint,Sig,left=0,right=0)
return sigr2out,Sigout,sigLOS2out,vs1,vs2
def sigp_prop(r1,r2,r3,nu,Sigfunc,M,Mcentral,beta,betaf,nupars,Mpars,\
betpars,\
Mstar_rad,Mstar_prof,Mstar,Arot,Rhalf,G,rmin,rmax):
#Calculate projected velocity dispersion profiles
#given input *functions* nu(r); M(r); beta(r); betaf(r).
#Also input is an array Mstar_prof(Mstar_rad) describing the 3D
#cumulative stellar mass profile. This should be normalised
#so that it peaks at 1.0. The total stellar mass is passed in Mstar.
#Set up theta integration array:
intpnts = np.int(150)
thmin = 0.
bit = 1.e-5
thmax = np.pi/2.-bit
th = np.linspace(thmin,thmax,intpnts)
sth = np.sin(th)
cth = np.cos(th)
cth2 = cth**2.
rint = np.logspace(np.log10(rmin),np.log10(rmax),intpnts)
sigr2 = np.zeros(len(rint))
nur = nu(rint,nupars)
betafunc = betaf(rint,betpars,Rhalf,Arot)
for i in range(len(rint)):
rq = rint[i]/cth
Mq = M(rq,Mpars)+Mcentral(rq,Mpars)
if (Mstar > 0):
Mq = Mq+Mstar*np.interp(rq,Mstar_rad,Mstar_prof)
nuq = nu(rq,nupars)
betafuncq = betaf(rq,betpars,Rhalf,Arot)
sigr2[i] = 1./nur[i]/rint[i]/betafunc[i] * \
integrator(G*Mq*nuq*betafuncq*sth,th)
Sig = Sigfunc(rint,nupars)
sigLOS2 = np.zeros(len(rint))
sigpmr2 = np.zeros(len(rint))
sigpmt2 = np.zeros(len(rint))
for i in range(len(rint)):
rq = rint[i]/cth
nuq = nu(rq,nupars)
sigr2q = np.interp(rq,rint,sigr2,left=0,right=0)
betaq = beta(rq,betpars)
sigLOS2[i] = 2.0*rint[i]/Sig[i]*\
integrator((1.0-betaq*cth2)*nuq*sigr2q/cth2,th)
sigpmr2[i] = 2.0*rint[i]/Sig[i]*\
integrator((1.0-betaq+betaq*cth2)*nuq*sigr2q/cth2,th)
sigpmt2[i] = 2.0*rint[i]/Sig[i]*\
integrator((1.0-betaq)*nuq*sigr2q/cth2,th)
sigr2out = np.interp(r2,rint,sigr2,left=0,right=0)
sigLOS2out = np.interp(r2,rint,sigLOS2,left=0,right=0)
sigpmr2out = np.interp(r3,rint,sigpmr2,left=0,right=0)
sigpmt2out = np.interp(r3,rint,sigpmt2,left=0,right=0)
Sigout = np.interp(r1,rint,Sig,left=0,right=0)
return sigr2out,Sigout,sigLOS2out,sigpmr2out,sigpmt2out
def sigp_prop_vs(r1,r2,r3,nu,Sigfunc,M,Mcentral,beta,betaf,nupars,Mpars,\
betpars,\
Mstar_rad,Mstar_prof,Mstar,Arot,Rhalf,G,rmin,rmax):
#Calculate projected velocity dispersion profiles
#given input *functions* nu(r); M(r); beta(r); betaf(r).
#Also input is an array Mstar_prof(Mstar_rad) describing the 3D
#cumulative stellar mass profile. This should be normalised
#so that it peaks at 1.0. The total stellar mass is passed in Mstar.
#Set up theta integration array:
intpnts = np.int(150)
thmin = 0.
bit = 1.e-5
thmax = np.pi/2.-bit
th = np.linspace(thmin,thmax,intpnts)
sth = np.sin(th)
cth = np.cos(th)
cth2 = cth**2.
rint = np.logspace(np.log10(rmin),np.log10(rmax),intpnts)
sigr2 = np.zeros(len(rint))
nur = nu(rint,nupars)
betafunc = betaf(rint,betpars,Rhalf,Arot)
for i in range(len(rint)):
rq = rint[i]/cth
Mq = M(rq,Mpars)+Mcentral(rq,Mpars)
if (Mstar > 0):
Mq = Mq+Mstar*np.interp(rq,Mstar_rad,Mstar_prof)
nuq = nu(rq,nupars)
betafuncq = betaf(rq,betpars,Rhalf,Arot)
sigr2[i] = 1./nur[i]/rint[i]/betafunc[i] * \
integrator(G*Mq*nuq*betafuncq*sth,th)
Sig = Sigfunc(rint,nupars)
sigLOS2 = np.zeros(len(rint))
sigpmr2 = np.zeros(len(rint))
sigpmt2 = np.zeros(len(rint))
for i in range(len(rint)):
rq = rint[i]/cth
nuq = nu(rq,nupars)
sigr2q = np.interp(rq,rint,sigr2,left=0,right=0)
betaq = beta(rq,betpars)
sigLOS2[i] = 2.0*rint[i]/Sig[i]*\
integrator((1.0-betaq*cth2)*nuq*sigr2q/cth2,th)
sigpmr2[i] = 2.0*rint[i]/Sig[i]*\
integrator((1.0-betaq+betaq*cth2)*nuq*sigr2q/cth2,th)
sigpmt2[i] = 2.0*rint[i]/Sig[i]*\
integrator((1.0-betaq)*nuq*sigr2q/cth2,th)
sigr2out = np.interp(r2,rint,sigr2,left=0,right=0)
sigLOS2out = np.interp(r2,rint,sigLOS2,left=0,right=0)
sigpmr2out = np.interp(r3,rint,sigpmr2,left=0,right=0)
sigpmt2out = np.interp(r3,rint,sigpmt2,left=0,right=0)
Sigout = np.interp(r1,rint,Sig,left=0,right=0)
#And now the dimensional fourth order "virial shape"
#parameters:
betar = beta(rint,betpars)
Mr = M(rint,Mpars)+Mstar*np.interp(rint,Mstar_rad,Mstar_prof)
vs1 = 2.0/5.0*integrator(nur*(5.0-2.0*betar)*sigr2*\
G*Mr*rint,rint)
vs2 = 4.0/35.0*integrator(nur*(7.0-6.0*betar)*sigr2*\
G*Mr*rint**3.0,rint)
return sigr2out,Sigout,sigLOS2out,sigpmr2out,sigpmt2out,\
vs1,vs2
def beta(r,betpars):
bet0star = betpars[0]
betinfstar = betpars[1]
r0 = 10.**betpars[2]
n = betpars[3]
#Ensure stability at beta extremities:
if (bet0star > 0.98):
bet0star = 0.98
if (bet0star < -0.95):
bet0star = -0.95
if (betinfstar > 0.98):
betinfstar = 0.98
if (betinfstar < -0.95):
betinfstar = -0.95
bet0 = 2.0*bet0star / (1.0 + bet0star)
betinf = 2.0*betinfstar / (1.0 + betinfstar)
beta = bet0 + (betinf-bet0)*(1.0/(1.0 + (r0/r)**n))
return beta
def betaf(r,betpars,Rhalf,Arot):
bet0star = betpars[0]
betinfstar = betpars[1]
r0 = 10.**betpars[2]
n = betpars[3]
#Ensure stability at beta extremities:
if (bet0star > 0.98):
bet0star = 0.98
if (bet0star < -0.95):
bet0star = -0.95
if (betinfstar > 0.98):
betinfstar = 0.98
if (betinfstar < -0.95):
betinfstar = -0.95
bet0 = 2.0*bet0star / (1.0 + bet0star)
betinf = 2.0*betinfstar / (1.0 + betinfstar)
betafn = r**(2.0*betinf)*((r0/r)**n+1.0)**(2.0/n*(betinf-bet0))*\
np.exp(-2.0*Arot*r/Rhalf)
return betafn
###########################################################
#For data binning:
def binthedata(R,ms,Nbin):
#Nbin is the number of particles / bin:
index = np.argsort(R)
right_bin_edge = np.zeros(len(R))
norm = np.zeros(len(R))
cnt = 0
jsum = 0
for i in range(len(R)):
if (jsum < Nbin):
norm[cnt] = norm[cnt] + ms[index[i]]
right_bin_edge[cnt] = R[index[i]]
jsum = jsum + ms[index[i]]
if (jsum >= Nbin):
jsum = 0.0
cnt = cnt + 1
right_bin_edge = right_bin_edge[:cnt]
norm = norm[:cnt]
surfden = np.zeros(cnt)
rbin = np.zeros(cnt)
for i in range(len(rbin)):
if (i == 0):
surfden[i] = norm[i] / \
(np.pi*right_bin_edge[i]**2.0)
rbin[i] = right_bin_edge[i]/2.0
else:
surfden[i] = norm[i] / \
(np.pi*right_bin_edge[i]**2.0-\
np.pi*right_bin_edge[i-1]**2.0)
rbin[i] = (right_bin_edge[i]+right_bin_edge[i-1])/2.0
surfdenerr = surfden / np.sqrt(Nbin)
#Calculate the projected half light radius &
#surface density integral:
Rhalf, Menc_tot = surf_renorm(rbin,surfden)
#And normalise the profile:
surfden = surfden / Menc_tot
surfdenerr = surfdenerr / Menc_tot
return rbin, surfden, surfdenerr, Rhalf
def surf_renorm(rbin,surfden):
#Calculate the integral of the surface density
#so that it can then be renormalised.
#Calcualte also Rhalf.
ranal = np.linspace(0,10,np.int(5000))
surfden_ranal = np.interp(ranal,rbin,surfden,left=0,right=0)
Menc_tot = 2.0*np.pi*integrator(surfden_ranal*ranal,ranal)
Menc_half = 0.0
i = 3
while (Menc_half < Menc_tot/2.0):
Menc_half = 2.0*np.pi*\
integrator(surfden_ranal[:i]*ranal[:i],ranal[:i])
i = i + 1
Rhalf = ranal[i-1]
return Rhalf, Menc_tot
###########################################################
#For calculating confidence intervals:
def calcmedquartnine(array):
index = np.argsort(array,axis=0)
median = array[index[np.int(len(array)/2.)]]
sixlowi = np.int(16./100. * len(array))
sixhighi = np.int(84./100. * len(array))
ninelowi = np.int(2.5/100. * len(array))
ninehighi = np.int(97.5/100. * len(array))
nineninelowi = np.int(0.15/100. * len(array))
nineninehighi = np.int(99.85/100. * len(array))
sixhigh = array[index[sixhighi]]
sixlow = array[index[sixlowi]]
ninehigh = array[index[ninehighi]]
ninelow = array[index[ninelowi]]
nineninehigh = array[index[nineninehighi]]
nineninelow = array[index[nineninelowi]]
return median, sixlow, sixhigh, ninelow, ninehigh,\
nineninelow, nineninehigh
###########################################################
#For fitting the surface brightness:
def Sig_addpnts(x,y,yerr):
#If using neg. Plummer component, add some more
#"data points" at large & small radii bounded on
#zero and the outermost data point. This
#will disfavour models with globally
#negative tracer density.
addpnts = len(x)
xouter = np.max(x)
youter = np.min(y)
xinner = np.min(x)
yinner = np.max(y)
xadd_right = np.logspace(np.log10(xouter),\
np.log10(xouter*1000),addpnts)
yadd_right = np.zeros(addpnts) + youter/2.0
yerradd_right = yadd_right
xadd_left = np.logspace(np.log10(xinner),\
np.log10(xinner/1000),addpnts)
yadd_left = np.zeros(addpnts) + yinner
yerradd_left = yadd_left/2.0
x = np.concatenate((x,xadd_right))
y = np.concatenate((y,yadd_right))
yerr = np.concatenate((yerr,yerradd_right))
x = np.concatenate((xadd_left,x))
y = np.concatenate((yadd_left,y))
yerr = np.concatenate((yerradd_left,yerr))
return x,y,yerr
#For stellar and tracer profiles:
def multiplumden(r,pars):
Mpars = pars[0:np.int(len(pars)/2.0)]
apars = pars[np.int(len(pars)/2.0):len(pars)]
nplum = len(Mpars)
multplum = np.zeros(len(r))
for i in range(len(Mpars)):
if (multimode == 'seq'):
if (i == 0):
aparsu = apars[0]
else:
aparsu = apars[i] + apars[i-1]
else:
aparsu = apars[i]
multplum = multplum + \
3.0*Mpars[i]/(4.*np.pi*aparsu**3.)*\
(1.0+r**2./aparsu**2.)**(-5./2.)
return multplum
def multiplumsurf(r,pars):
Mpars = pars[0:np.int(len(pars)/2.0)]
apars = pars[np.int(len(pars)/2.0):len(pars)]
nplum = len(Mpars)
multplum = np.zeros(len(r))
for i in range(len(Mpars)):
if (multimode == 'seq'):
if (i == 0):
aparsu = apars[0]
else:
aparsu = apars[i] + apars[i-1]
else:
aparsu = apars[i]
multplum = multplum + \
Mpars[i]*aparsu**2.0 / \
(np.pi*(aparsu**2.0+r**2.0)**2.0)
return multplum
def multiplumdlnrhodlnr(r,pars):
Mpars = pars[0:np.int(len(pars)/2.0)]
apars = pars[np.int(len(pars)/2.0):len(pars)]
nplum = len(Mpars)
multplumden = np.zeros(len(r))
multplumdden = np.zeros(len(r))
for i in range(len(Mpars)):
if (multimode == 'seq'):
if (i == 0):
aparsu = apars[0]
else:
aparsu = apars[i] + apars[i-1]
else:
aparsu = apars[i]
multplumden = multplumden + \
3.0*Mpars[i]/(4.*np.pi*aparsu**3.)*\
(1.0+r**2./aparsu**2.)**(-5./2.)
multplumdden = multplumdden - \
15.0*Mpars[i]/(4.*np.pi*aparsu**3.)*\
r/aparsu**2.*(1.0+r**2./aparsu**2.)**(-7./2.)
return multplumdden*r/multplumden
def multiplummass(r,pars):
Mpars = pars[0:np.int(len(pars)/2.0)]
apars = pars[np.int(len(pars)/2.0):len(pars)]
nplum = len(Mpars)
multplum = np.zeros(len(r))
for i in range(len(Mpars)):
if (multimode == 'seq'):
if (i == 0):
aparsu = apars[0]
else:
aparsu = apars[i] + apars[i-1]
else:
aparsu = apars[i]
multplum = multplum + \
Mpars[i]*r**3./(r**2.+aparsu**2.)**(3./2.)
return multplum
def threeplumsurf(r,M1,M2,M3,a1,a2,a3):
return multiplumsurf(r,[M1,M2,M3,\
a1,a2,a3])
def threeplumden(r,M1,M2,M3,a1,a2,a3):
return multiplumden(r,[M1,M2,M3,\
a1,a2,a3])
def threeplummass(r,M1,M2,M3,a1,a2,a3):
return multiplummass(r,[M1,M2,M3,\
a1,a2,a3])
def Rhalf_func(M1,M2,M3,a1,a2,a3):
#Calculate projected half light radius for
#the threeplum model:
ranal = np.logspace(-3,1,np.int(500))
Mstar_surf = threeplumsurf(ranal,M1,M2,M3,a1,a2,a3)
Menc_half = 0.0
i = 3
while (Menc_half < (M1+M2+M3)/2.0):
Menc_half = 2.0*np.pi*\
integrator(Mstar_surf[:i]*ranal[:i],ranal[:i])
i = i + 1
Rhalf = ranal[i-1]
return Rhalf
###########################################################
#For fitting the velocity distribution in each bin [no errors]:
def monte(func,a,b,n):
#Function to perform fast 1D Monte-Carlo integration
#for convolution integrals:
xrand = np.random.uniform(a,b,n)
integral = func(xrand).sum()
return (b-a)/np.float(n)*integral
def velpdf_noerr(vz,theta):
vzmean = theta[0]
alp = theta[1]
bet = theta[2]
backamp = theta[3]
backmean = theta[4]
backsig = theta[5]
pdf = (1.0-backamp)*bet/(2.0*alp*gamma(1.0/bet))*\
np.exp(-(np.abs(vz-vzmean)/alp)**bet) + \
backamp/(np.sqrt(2.0*np.pi)*backsig)*\
np.exp(-0.5*(vz-backmean)**2.0/backsig**2.0)
return pdf
#For fitting the velocity distribution in each bin [fast]
#Uses an approximation to the true convolution integral.
def velpdffast(vz,vzerr,theta):
vzmean = theta[0]
bet = theta[2]
fgamma = gamma(1.0/bet)/gamma(3.0/bet)
alp = np.sqrt(theta[1]**2.0+vzerr**2.0*fgamma)
backamp = theta[3]
backmean = theta[4]
backsig = np.sqrt(theta[5]**2.0 + vzerr**2.0)
pdf = (1.0-backamp)*bet/(2.0*alp*gamma(1.0/bet))*\
np.exp(-(np.abs(vz-vzmean)/alp)**bet) + \
backamp/(np.sqrt(2.0*np.pi)*backsig)*\
np.exp(-0.5*(vz-backmean)**2.0/backsig**2.0)
return pdf
def velpdf_func(vz,vzerr,vzint,theta):
#Inner integral function for convolving
#velpdf with a Gaussian error PDF. Change
#this function to implement non-Gaussian
#errors.
vzmean = theta[0]
alp = theta[1]
bet = theta[2]
backamp = theta[3]
backmean = theta[4]
backsig = theta[5]
pdf = (1.0-backamp)*bet/(2.0*alp*gamma(1.0/bet))*\
np.exp(-(np.abs(vzint-vzmean)/alp)**bet)*\
1.0/(np.sqrt(2.0*np.pi)*vzerr)*\
np.exp(-0.5*(vz-vzint)**2.0/vzerr**2.0)+\
backamp/(np.sqrt(2.0*np.pi)*backsig)*\
np.exp(-0.5*(vzint-backmean)**2.0/backsig**2.0)*\
1.0/(np.sqrt(2.0*np.pi)*vzerr)*\
np.exp(-0.5*(vz-vzint)**2.0/vzerr**2.0)
return pdf
#For fitting the velocity distribution in each bin with
#full (expensive) convolution integral:
def velpdf(vz,vzerr,theta):
#Generalised Gaussian + Gaussian convolved with
#vzerr, assuming Gaussian errors:
vzmean = theta[0]
sig = vztwo_calc(theta)
vzlow = -sig*10+vzmean
vzhigh = sig*10+vzmean
if (type(vz) == np.ndarray):
pdf = np.zeros(len(vz))
for i in range(len(vz)):
pdf_func = lambda vzint : velpdf_func(vz[i],\
vzerr[i],vzint,theta)
pdf[i] = quad(pdf_func,vzlow,vzhigh)[0]
else:
pdf_func = lambda vzint : velpdf_func(vz,\
vzerr,vzint,theta)
pdf = quad(pdf_func,vzlow,vzhigh)[0]
return pdf
def velpdfmonte(vz,vzerr,theta):
#Generalised Gaussian + Gaussian convolved with
#vzerr, assuming Gaussian errors:
npnts = np.int(500)
vzmean = theta[0]
sig = vztwo_calc(theta)
vzlow = -sig*10+vzmean
vzhigh = sig*10+vzmean
if (type(vz) == np.ndarray):
pdf = np.zeros(len(vz))
for i in range(len(vz)):
pdf_func = lambda vzint : velpdf_func(vz[i],\
vzerr[i],vzint,theta)
pdf[i] = monte(pdf_func,vzlow,vzhigh,npnts)
else:
pdf_func = lambda vzint : velpdf_func(vz,\
vzerr,vzint,theta)
pdf = monte(pdf_func,vzlow,vzhigh,npnts)
return pdf
def vztwo_calc(theta):
#Calculate <vlos^2>^(1/2) from
#generalised Gaussian parameters:
alp = theta[1]
bet = theta[2]
return np.sqrt(alp**2.0*gamma(3.0/bet)/gamma(1.0/bet))
def vzfour_calc(theta):
#Calculate <vlos^4> from
#generalised Gaussian parameters:
alp = theta[1]
bet = theta[2]
sig = vztwo_calc(theta)
kurt = gamma(5.0/bet)*gamma(1.0/bet)/(gamma(3.0/bet))**2.0
return kurt*sig**4.0
def kurt_calc(theta):
#Calculate kurtosis from generalised
#Gaussian parameters:
alp = theta[1]
bet = theta[2]
kurt = gamma(5.0/bet)*gamma(1.0/bet)/(gamma(3.0/bet))**2.0
return kurt
def vzfourfunc(ranal,rbin,vzfourbin):
#Interpolate and extrapolate
#vzfour(R) over and beyond the data:
vzfour = np.interp(ranal,rbin,vzfourbin)
return vzfour
#For calculating the Likelihood from the vsp array:
def vsppdf_calc(vsp):
#First bin the data:
nbins = 50
bins_plus_one = np.linspace(np.min(vsp),np.max(vsp),nbins+1)
bins = np.linspace(np.min(vsp),np.max(vsp),nbins)
vsp_pdf, bins_plus_one = np.histogram(vsp, bins=bins_plus_one)
vsp_pdf = vsp_pdf / np.max(vsp_pdf)
binsout = bins[vsp_pdf > 0]
vsp_pdfout = vsp_pdf[vsp_pdf > 0]
return binsout, vsp_pdfout
def vsp_pdf(vsp,bins,vsp_pdf):
return np.interp(vsp,bins,vsp_pdf,left=0,right=0)
|
justinreadREPO_NAMEgravspherePATH_START.@gravsphere_extracted@[email protected]@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "dfm/emcee",
"repo_path": "emcee_extracted/emcee-main/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python
# Inspired by:
# https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/
import codecs
import os
import re
from setuptools import find_packages, setup
# PROJECT SPECIFIC
NAME = "emcee"
PACKAGES = find_packages(where="src")
META_PATH = os.path.join("src", "emcee", "__init__.py")
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
]
INSTALL_REQUIRES = ["numpy"]
SETUP_REQUIRES = [
"setuptools>=40.6.0",
"setuptools_scm",
"wheel",
]
EXTRA_REQUIRE = {
"extras": ["h5py", "scipy"],
"tests": ["pytest", "pytest-cov", "coverage[toml]"],
}
# END PROJECT SPECIFIC
HERE = os.path.dirname(os.path.realpath(__file__))
def read(*parts):
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
def find_meta(meta, meta_file=read(META_PATH)):
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta), meta_file, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
if __name__ == "__main__":
setup(
name=NAME,
use_scm_version={
"write_to": os.path.join(
"src", NAME, "{0}_version.py".format(NAME)
),
"write_to_template": '__version__ = "{version}"\n',
},
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
url=find_meta("uri"),
project_urls={
"Source": "https://github.com/dfm/emcee",
},
license=find_meta("license"),
description=find_meta("description"),
long_description=read("README.rst"),
long_description_content_type="text/x-rst",
packages=PACKAGES,
package_dir={"": "src"},
include_package_data=True,
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
extras_require=EXTRA_REQUIRE,
classifiers=CLASSIFIERS,
zip_safe=False,
options={"bdist_wheel": {"universal": "1"}},
)
|
dfmREPO_NAMEemceePATH_START.@emcee_extracted@[email protected]@.PATH_END.py
|
{
"filename": "np_array_ops.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/ops/numpy_ops/np_array_ops.py",
"type": "Python"
}
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common array methods."""
# pylint: disable=g-direct-tensorflow-import
import builtins
import enum
import functools
import math
import numbers
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor as tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import array_ops_stack
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_assert
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sort_ops
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.numpy_ops import np_dtypes
from tensorflow.python.ops.numpy_ops import np_utils
from tensorflow.python.types import core as core_tf_types
from tensorflow.python.util import nest
from tensorflow.python.util import tf_export
newaxis = np.newaxis
tf_export.tf_export('experimental.numpy.newaxis', v1=[]).export_constant(
__name__, 'newaxis'
)
@tf_export.tf_export('experimental.numpy.empty', v1=[])
@np_utils.np_doc('empty')
def empty(shape, dtype=float): # pylint: disable=redefined-outer-name
return zeros(shape, dtype)
@tf_export.tf_export('experimental.numpy.empty_like', v1=[])
@np_utils.np_doc('empty_like')
def empty_like(a, dtype=None):
return zeros_like(a, dtype)
@tf_export.tf_export('experimental.numpy.zeros', v1=[])
@np_utils.np_doc('zeros')
def zeros(shape, dtype=float): # pylint: disable=redefined-outer-name
dtype = (
np_utils.result_type(dtype) if dtype else np_dtypes.default_float_type()
)
return array_ops.zeros(shape, dtype=dtype)
@tf_export.tf_export('experimental.numpy.zeros_like', v1=[])
@np_utils.np_doc('zeros_like')
def zeros_like(a, dtype=None): # pylint: disable=missing-docstring
dtype = np_utils.result_type_unary(a, dtype)
dtype = dtypes.as_dtype(dtype) # Work around b/149877262
return array_ops.zeros_like(a, dtype)
@tf_export.tf_export('experimental.numpy.ones', v1=[])
@np_utils.np_doc('ones')
def ones(shape, dtype=float): # pylint: disable=redefined-outer-name
if dtype:
dtype = np_utils.result_type(dtype)
return array_ops.ones(shape, dtype=dtype)
@tf_export.tf_export('experimental.numpy.ones_like', v1=[])
@np_utils.np_doc('ones_like')
def ones_like(a, dtype=None):
dtype = np_utils.result_type_unary(a, dtype)
return array_ops.ones_like(a, dtype)
@tf_export.tf_export('experimental.numpy.eye', v1=[])
@np_utils.np_doc('eye')
def eye(N, M=None, k=0, dtype=float): # pylint: disable=invalid-name,missing-docstring
if dtype:
dtype = np_utils.result_type(dtype)
if not M:
M = N
# Making sure N, M and k are `int`
N = int(N)
M = int(M)
k = int(k)
if k >= M or -k >= N:
# tf.linalg.diag will raise an error in this case
return zeros([N, M], dtype=dtype)
if k == 0:
return linalg_ops.eye(N, M, dtype=dtype)
# We need the precise length, otherwise tf.linalg.diag will raise an error
diag_len = builtins.min(N, M)
if k > 0:
if N >= M:
diag_len -= k
elif N + k > M:
diag_len = M - k
elif k <= 0:
if M >= N:
diag_len += k
elif M - k > N:
diag_len = N + k
diagonal_ = array_ops.ones([diag_len], dtype=dtype)
return array_ops.matrix_diag(diagonal=diagonal_, num_rows=N, num_cols=M, k=k)
@tf_export.tf_export('experimental.numpy.identity', v1=[])
@np_utils.np_doc('identity')
def identity(n, dtype=float):
return eye(N=n, M=n, dtype=dtype)
@tf_export.tf_export('experimental.numpy.full', v1=[])
@np_utils.np_doc('full')
def full(shape, fill_value, dtype=None): # pylint: disable=redefined-outer-name
if not isinstance(shape, np_arrays.ndarray):
shape = asarray(np_arrays.convert_to_tensor(shape, dtype_hint=np.int32))
shape = atleast_1d(shape)
fill_value = asarray(fill_value, dtype=dtype)
return array_ops.broadcast_to(fill_value, shape)
# Using doc only here since np full_like signature doesn't seem to have the
# shape argument (even though it exists in the documentation online).
@tf_export.tf_export('experimental.numpy.full_like', v1=[])
@np_utils.np_doc_only('full_like')
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): # pylint: disable=missing-docstring,redefined-outer-name
"""order, subok and shape arguments mustn't be changed."""
if order != 'K':
raise ValueError('Non-standard orders are not supported.')
if not subok:
raise ValueError('subok being False is not supported.')
if shape:
raise ValueError('Overriding the shape is not supported.')
a = asarray(a)
dtype = dtype or np_utils.result_type(a)
fill_value = asarray(fill_value, dtype=dtype)
return array_ops.broadcast_to(fill_value, array_ops.shape(a))
def _array_internal(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
"""Main implementation of np.array()."""
result_t = val
if not isinstance(result_t, tensor_lib.Tensor):
dtype = np_utils.result_type_unary(result_t, dtype)
# We can't call `convert_to_tensor(result_t, dtype=dtype)` here because
# convert_to_tensor doesn't allow incompatible arguments such as (5.5, int)
# while np.array allows them. We need to convert-then-cast.
# EagerTensor conversion complains about "mixed types" when converting
# tensors with no dtype information. This is because it infers types based
# on one selected item in the list. So e.g. when converting [2., 2j]
# to a tensor, it will select float32 as the inferred type and not be able
# to convert the list to a float 32 tensor.
# Since we have some information about the final dtype we care about, we
# supply that information so that convert_to_tensor will do best-effort
# conversion to that dtype first.
result_t = np_arrays.convert_to_tensor(result_t, dtype_hint=dtype)
result_t = math_ops.cast(result_t, dtype=dtype)
elif dtype:
result_t = math_ops.cast(result_t, dtype)
if copy:
result_t = array_ops.identity(result_t)
max_ndmin = 32
if ndmin > max_ndmin:
raise ValueError(
f'ndmin bigger than allowable number of dimensions: {max_ndmin}.'
)
if ndmin == 0:
return result_t
ndims = array_ops.rank(result_t)
def true_fn():
old_shape = array_ops.shape(result_t)
new_shape = array_ops.concat(
[array_ops.ones(ndmin - ndims, dtypes.int32), old_shape], axis=0
)
return array_ops.reshape(result_t, new_shape)
result_t = np_utils.cond(
np_utils.greater(ndmin, ndims), true_fn, lambda: result_t
)
return result_t
# TODO(wangpeng): investigate whether we can make `copy` default to False.
# pylint: disable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args
@tf_export.tf_export('experimental.numpy.array', v1=[])
@np_utils.np_doc_only('array')
def array(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
"""Since Tensors are immutable, a copy is made only if val is placed on a
different device than the current one. Even if `copy` is False, a new Tensor
may need to be built to satisfy `dtype` and `ndim`. This is used only if `val`
is an ndarray or a Tensor.
""" # pylint:disable=g-docstring-missing-newline
if dtype:
dtype = np_utils.result_type(dtype)
return _array_internal(val, dtype, copy, ndmin)
# pylint: enable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args
@tf_export.tf_export('experimental.numpy.asarray', v1=[])
@np_utils.np_doc('asarray')
def asarray(a, dtype=None):
if dtype:
dtype = np_utils.result_type(dtype)
if isinstance(a, np_arrays.ndarray) and (
not dtype or dtype == a.dtype.as_numpy_dtype
):
return a
return array(a, dtype, copy=False)
@tf_export.tf_export('experimental.numpy.asanyarray', v1=[])
@np_utils.np_doc('asanyarray')
def asanyarray(a, dtype=None):
return asarray(a, dtype)
@tf_export.tf_export('experimental.numpy.ascontiguousarray', v1=[])
@np_utils.np_doc('ascontiguousarray')
def ascontiguousarray(a, dtype=None):
return array(a, dtype, ndmin=1)
# Numerical ranges.
@tf_export.tf_export('experimental.numpy.arange', v1=[])
@np_utils.np_doc('arange')
def arange(start, stop=None, step=1, dtype=None):
"""Returns `step`-separated values in the range [start, stop).
Args:
start: Start of the interval. Included in the range.
stop: End of the interval. If not specified, `start` is treated as 0 and
`start` value is used as `stop`. If specified, it is not included in the
range if `step` is integer. When `step` is floating point, it may or may
not be included.
step: The difference between 2 consecutive values in the output range. It is
recommended to use `linspace` instead of using non-integer values for
`step`.
dtype: Optional. Type of the resulting ndarray. Could be a python type, a
NumPy type or a TensorFlow `DType`. If not provided, the largest type of
`start`, `stop`, `step` is used.
Raises:
ValueError: If step is zero.
"""
if not step:
raise ValueError('step must be non-zero.')
if dtype:
dtype = np_utils.result_type(dtype)
else:
if stop is None:
dtype = np_utils.result_type(start, step)
else:
dtype = np_utils.result_type(start, step, stop)
if step > 0 and (
(stop is not None and start > stop) or (stop is None and start < 0)
):
return array([], dtype=dtype)
if step < 0 and (
(stop is not None and start < stop) or (stop is None and start > 0)
):
return array([], dtype=dtype)
# TODO(srbs): There are some bugs when start or stop is float type and dtype
# is integer type.
return math_ops.cast(
math_ops.range(start, limit=stop, delta=step), dtype=dtype
)
# Building matrices.
@tf_export.tf_export('experimental.numpy.diag', v1=[])
@np_utils.np_doc('diag')
def diag(v, k=0): # pylint: disable=missing-docstring
"""Raises an error if input is not 1- or 2-d."""
v = asarray(v)
v_rank = array_ops.rank(v)
v.shape.with_rank_at_most(2)
# TODO(nareshmodi): Consider a np_utils.Assert version that will fail during
# tracing time if the shape is known.
control_flow_assert.Assert(
np_utils.logical_or(math_ops.equal(v_rank, 1), math_ops.equal(v_rank, 2)),
[v_rank],
)
def _diag(v, k):
return np_utils.cond(
math_ops.equal(array_ops.size(v), 0),
lambda: array_ops.zeros([abs(k), abs(k)], dtype=v.dtype),
lambda: array_ops.matrix_diag(v, k=k),
)
def _diag_part(v, k):
v_shape = array_ops.shape(v)
v, k = np_utils.cond(
np_utils.logical_or(
np_utils.less_equal(k, -1 * np_utils.getitem(v_shape, 0)),
np_utils.greater_equal(k, np_utils.getitem(v_shape, 1)),
),
lambda: (array_ops.zeros([0, 0], dtype=v.dtype), 0),
lambda: (v, k),
)
result = array_ops.matrix_diag_part(v, k=k)
return result
result = np_utils.cond(
math_ops.equal(v_rank, 1), lambda: _diag(v, k), lambda: _diag_part(v, k)
)
return result
@tf_export.tf_export('experimental.numpy.diagonal', v1=[])
@np_utils.np_doc('diagonal')
def diagonal(a, offset=0, axis1=0, axis2=1): # pylint: disable=missing-docstring
a = asarray(a)
maybe_rank = a.shape.rank
if (
maybe_rank is not None
and offset == 0
and (axis1 == maybe_rank - 2 or axis1 == -2)
and (axis2 == maybe_rank - 1 or axis2 == -1)
):
return array_ops.matrix_diag_part(a)
a = moveaxis(a, (axis1, axis2), (-2, -1))
a_shape = array_ops.shape(a)
def _zeros(): # pylint: disable=missing-docstring
return (
array_ops.zeros(
array_ops.concat([a_shape[:-1], [0]], 0), dtype=a.dtype
),
0,
)
# All zeros since diag_part doesn't handle all possible k (aka offset).
# Written this way since cond will run shape inference on both branches,
# and diag_part shape inference will fail when offset is out of bounds.
a, offset = np_utils.cond(
np_utils.logical_or(
np_utils.less_equal(offset, -1 * np_utils.getitem(a_shape, -2)),
np_utils.greater_equal(offset, np_utils.getitem(a_shape, -1)),
),
_zeros,
lambda: (a, offset),
)
a = array_ops.matrix_diag_part(a, k=offset)
return a
@tf_export.tf_export('experimental.numpy.diagflat', v1=[])
@np_utils.np_doc('diagflat')
def diagflat(v, k=0):
v = asarray(v)
return diag(array_ops.reshape(v, [-1]), k)
def _promote_dtype(*arrays):
dtype = np_utils.result_type(*arrays)
def _fast_asarray(a):
if isinstance(a, np_arrays.ndarray) and dtype == a.dtype.as_numpy_dtype:
return a
return _array_internal(a, dtype=dtype, copy=False)
return [_fast_asarray(a) for a in arrays]
def _promote_dtype_binary(t1, t2):
dtype = np_utils._result_type_binary(t1, t2) # pylint: disable=protected-access
if not (
isinstance(t1, np_arrays.ndarray) and dtype == t1.dtype.as_numpy_dtype
):
t1 = _array_internal(t1, dtype=dtype, copy=False)
if not (
isinstance(t2, np_arrays.ndarray) and dtype == t2.dtype.as_numpy_dtype
):
t2 = _array_internal(t2, dtype=dtype, copy=False)
return t1, t2
@tf_export.tf_export('experimental.numpy.all', v1=[])
@np_utils.np_doc('all')
def all(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin
a = asarray(a, dtype=bool)
return math_ops.reduce_all(input_tensor=a, axis=axis, keepdims=keepdims)
@tf_export.tf_export('experimental.numpy.any', v1=[])
@np_utils.np_doc('any')
def any(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin
a = asarray(a, dtype=bool)
return math_ops.reduce_any(input_tensor=a, axis=axis, keepdims=keepdims)
@tf_export.tf_export('experimental.numpy.compress', v1=[])
@np_utils.np_doc('compress')
def compress(condition, a, axis=None): # pylint: disable=redefined-outer-name,missing-function-docstring
condition = asarray(condition, dtype=bool)
a = asarray(a)
if condition.ndim != 1:
raise ValueError('condition must be a 1-d array.')
# `np.compress` treats scalars as 1-d arrays.
if a.ndim == 0:
a = ravel(a)
if axis is None:
a = ravel(a)
axis = 0
if axis < 0:
axis += a.ndim
assert axis >= 0 and axis < a.ndim
# `tf.boolean_mask` requires the first dimensions of array and condition to
# match. `np.compress` pads condition with False when it is shorter.
condition_t = condition
a_t = a
if condition.shape[0] < a.shape[axis]:
padding = array_ops.fill([a.shape[axis] - condition.shape[0]], False)
condition_t = array_ops.concat([condition_t, padding], axis=0)
return array_ops.boolean_mask(tensor=a_t, mask=condition_t, axis=axis)
@tf_export.tf_export('experimental.numpy.copy', v1=[])
@np_utils.np_doc('copy')
def copy(a):
return array(a, copy=True)
def _maybe_promote_to_int(a):
if dtypes.as_dtype(a.dtype).is_integer:
# If a is an integer type and its precision is less than that of `int`,
# the output type will be `int`.
a_numpy_dtype = a.dtype.as_numpy_dtype
output_type = np.promote_types(a_numpy_dtype, int)
if output_type != a_numpy_dtype:
a = asarray(a, dtype=output_type)
return a
@tf_export.tf_export('experimental.numpy.cumprod', v1=[])
@np_utils.np_doc('cumprod')
def cumprod(a, axis=None, dtype=None): # pylint: disable=missing-docstring
a = asarray(a, dtype=dtype)
if dtype is None:
a = _maybe_promote_to_int(a)
# If axis is None, the input is flattened.
if axis is None:
a = ravel(a)
axis = 0
elif axis < 0:
axis += array_ops.rank(a)
return math_ops.cumprod(a, axis)
@tf_export.tf_export('experimental.numpy.cumsum', v1=[])
@np_utils.np_doc('cumsum')
def cumsum(a, axis=None, dtype=None): # pylint: disable=missing-docstring
a = asarray(a, dtype=dtype)
if dtype is None:
a = _maybe_promote_to_int(a)
# If axis is None, the input is flattened.
if axis is None:
a = ravel(a)
axis = 0
elif axis < 0:
axis += array_ops.rank(a)
return math_ops.cumsum(a, axis)
@tf_export.tf_export('experimental.numpy.imag', v1=[])
@np_utils.np_doc('imag')
def imag(val):
val = asarray(val)
# TODO(srbs): np.imag returns a scalar if `val` is a scalar, whereas we always
# return an ndarray.
return math_ops.imag(val)
_TO_INT_ = 0
_TO_FLOAT = 1
def _reduce(
tf_fn,
a,
axis=None,
dtype=None,
keepdims=None,
promote_int=_TO_INT_,
tf_bool_fn=None,
preserve_bool=False,
):
"""A general reduction function.
Args:
tf_fn: the TF reduction function.
a: the array to be reduced.
axis: (optional) the axis along which to do the reduction. If None, all
dimensions are reduced.
dtype: (optional) the dtype of the result.
keepdims: (optional) whether to keep the reduced dimension(s).
promote_int: how to promote integer and bool inputs. There are three
choices. (1) `_TO_INT_` always promotes them to np.int_ or np.uint; (2)
`_TO_FLOAT` always promotes them to a float type (determined by
dtypes.default_float_type); (3) None: don't promote.
tf_bool_fn: (optional) the TF reduction function for bool inputs. It will
only be used if `dtype` is explicitly set to `np.bool_` or if `a`'s dtype
is `np.bool_` and `preserve_bool` is True.
preserve_bool: a flag to control whether to use `tf_bool_fn` if `a`'s dtype
is `np.bool_` (some reductions such as np.sum convert bools to integers,
while others such as np.max preserve bools.
Returns:
An ndarray.
"""
if dtype:
dtype = np_utils.result_type(dtype)
if keepdims is None:
keepdims = False
a = asarray(a, dtype=dtype)
if (
dtype == np.bool_ or preserve_bool and a.dtype == np.bool_
) and tf_bool_fn is not None:
return tf_bool_fn(input_tensor=a, axis=axis, keepdims=keepdims)
if dtype is None:
dtype = a.dtype.as_numpy_dtype
if np.issubdtype(dtype, np.integer) or dtype == np.bool_:
if promote_int == _TO_INT_:
# If a is an integer/bool type and whose bit width is less than np.int_,
# numpy up-casts it to np.int_ based on the documentation at
# https://numpy.org/doc/1.18/reference/generated/numpy.sum.html
if dtype == np.bool_:
is_signed = True
width = 8 # We can use any number here that is less than 64
else:
is_signed = np.issubdtype(dtype, np.signedinteger)
width = np.iinfo(dtype).bits
# Numpy int_ and uint are defined as 'long' and 'unsigned long', so
# should have the same bit width.
if ops.is_auto_dtype_conversion_enabled():
# We default to 32 bits when using auto dtype conversion semantics.
if width < np.iinfo(np.int32).bits:
if is_signed:
dtype = np.int32
else:
dtype = np.uint32
else:
if width < np.iinfo(np.int_).bits:
if is_signed:
dtype = np.int_
else:
dtype = np.uint
a = math_ops.cast(a, dtype)
elif promote_int == _TO_FLOAT:
# Use a default float type.
a = math_ops.cast(a, np_utils.result_type(float))
if isinstance(axis, tensor_lib.Tensor) and axis.dtype not in (
dtypes.int32,
dtypes.int64,
):
axis = math_ops.cast(axis, dtypes.int64)
return tf_fn(input_tensor=a, axis=axis, keepdims=keepdims)
# TODO (DarrenZhang01): Add `axis` support to the `size` API.
@tf_export.tf_export('experimental.numpy.size', v1=[])
@np_utils.np_doc('size')
def size(x, axis=None): # pylint: disable=missing-docstring
if axis is not None:
raise NotImplementedError(
'axis argument is not supported in the current `np.size` implementation'
)
if isinstance(x, (int, float, np.int32, np.int64, np.float32, np.float64)):
return 1
x = asarray(x)
if x.shape.is_fully_defined():
return np.prod(x.shape.as_list(), dtype=int)
else:
return array_ops.size_v2(x)
@tf_export.tf_export('experimental.numpy.sum', v1=[])
@np_utils.np_doc('sum')
def sum(a, axis=None, dtype=None, keepdims=None): # pylint: disable=redefined-builtin
return _reduce(
math_ops.reduce_sum,
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
tf_bool_fn=math_ops.reduce_any,
)
@tf_export.tf_export('experimental.numpy.prod', v1=[])
@np_utils.np_doc('prod')
def prod(a, axis=None, dtype=None, keepdims=None):
return _reduce(
math_ops.reduce_prod,
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
tf_bool_fn=math_ops.reduce_all,
)
@tf_export.tf_export('experimental.numpy.mean', v1=[])
@np_utils.np_doc('mean', unsupported_params=['out'])
def mean(a, axis=None, dtype=None, out=None, keepdims=None):
if out is not None:
raise ValueError('Setting out is not supported.')
return _reduce(
math_ops.reduce_mean,
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
promote_int=_TO_FLOAT,
)
@tf_export.tf_export('experimental.numpy.amax', v1=[])
@np_utils.np_doc('amax', unsupported_params=['out'])
def amax(a, axis=None, out=None, keepdims=None):
if out is not None:
raise ValueError('Setting out is not supported.')
return _reduce(
math_ops.reduce_max,
a,
axis=axis,
dtype=None,
keepdims=keepdims,
promote_int=None,
tf_bool_fn=math_ops.reduce_any,
preserve_bool=True,
)
@tf_export.tf_export('experimental.numpy.amin', v1=[])
@np_utils.np_doc('amin', unsupported_params=['out'])
def amin(a, axis=None, out=None, keepdims=None):
if out is not None:
raise ValueError('Setting out is not supported.')
return _reduce(
math_ops.reduce_min,
a,
axis=axis,
dtype=None,
keepdims=keepdims,
promote_int=None,
tf_bool_fn=math_ops.reduce_all,
preserve_bool=True,
)
@tf_export.tf_export('experimental.numpy.var', v1=[])
@np_utils.np_doc('var')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=None): # pylint: disable=missing-docstring
if dtype:
working_dtype = np_utils.result_type(a, dtype)
else:
working_dtype = None
if out is not None:
raise ValueError('Setting out is not supported.')
if ddof != 0:
# TF reduce_variance doesn't support ddof, so calculate it using raw ops.
def reduce_fn(input_tensor, axis, keepdims):
means = math_ops.reduce_mean(input_tensor, axis=axis, keepdims=True)
centered = input_tensor - means
if input_tensor.dtype in (dtypes.complex64, dtypes.complex128):
centered = math_ops.cast(
math_ops.real(centered * math_ops.conj(centered)),
input_tensor.dtype,
)
else:
centered = math_ops.square(centered)
squared_deviations = math_ops.reduce_sum(
centered, axis=axis, keepdims=keepdims
)
if axis is None:
n = array_ops.size(input_tensor)
else:
if axis < 0:
axis += array_ops.rank(input_tensor)
n = math_ops.reduce_prod(
array_ops.gather(array_ops.shape(input_tensor), axis)
)
n = math_ops.cast(n - ddof, input_tensor.dtype)
return math_ops.cast(math_ops.divide(squared_deviations, n), dtype)
else:
reduce_fn = math_ops.reduce_variance
result = _reduce(
reduce_fn,
a,
axis=axis,
dtype=working_dtype,
keepdims=keepdims,
promote_int=_TO_FLOAT,
)
if dtype:
result = math_ops.cast(result, dtype)
return result
@tf_export.tf_export('experimental.numpy.std', v1=[])
@np_utils.np_doc('std')
def std(a, axis=None, keepdims=None): # pylint: disable=missing-function-docstring
return _reduce(
math_ops.reduce_std,
a,
axis=axis,
dtype=None,
keepdims=keepdims,
promote_int=_TO_FLOAT,
)
@tf_export.tf_export('experimental.numpy.ravel', v1=[])
@np_utils.np_doc('ravel')
def ravel(a): # pylint: disable=missing-docstring
a = asarray(a)
return array_ops.reshape(a, [-1])
@tf_export.tf_export('experimental.numpy.real', v1=[])
@np_utils.np_doc('real')
def real(val):
val = asarray(val)
# TODO(srbs): np.real returns a scalar if val is a scalar, whereas we always
# return an ndarray.
return math_ops.real(val)
@tf_export.tf_export('experimental.numpy.repeat', v1=[])
@np_utils.np_doc('repeat')
def repeat(a, repeats, axis=None): # pylint: disable=missing-docstring
a = asarray(a)
original_shape = a._shape_as_list() # pylint: disable=protected-access
# Best effort recovery of the shape.
known_shape = original_shape is not None and None not in original_shape
if known_shape:
if not original_shape:
original_shape = (repeats,)
else:
repeats_np = np.ravel(np.array(repeats))
if repeats_np.size == 1:
repeats_np = repeats_np.item()
if axis is None:
original_shape = (repeats_np * np.prod(original_shape),)
else:
original_shape[axis] = repeats_np * original_shape[axis]
else:
if axis is None:
original_shape = (repeats_np.sum(),)
else:
original_shape[axis] = repeats_np.sum()
repeats = asarray(repeats)
result = array_ops.repeat(a, repeats, axis)
if known_shape:
result.set_shape(original_shape)
return result
@tf_export.tf_export('experimental.numpy.around', v1=[])
@np_utils.np_doc('around')
def around(a, decimals=0): # pylint: disable=missing-docstring
a = asarray(a)
dtype = a.dtype.as_numpy_dtype
factor = math.pow(10, decimals)
if np.issubdtype(dtype, np.inexact):
factor = math_ops.cast(factor, dtype)
else:
# Use float as the working dtype when a.dtype is exact (e.g. integer),
# because `decimals` can be negative.
float_dtype = np_utils.result_type(float)
a = a.astype(float_dtype)
factor = math_ops.cast(factor, float_dtype)
a = math_ops.multiply(a, factor)
a = math_ops.round(a)
a = math_ops.divide(a, factor)
return a.astype(dtype)
setattr(np_arrays.ndarray, '__round__', around)
@tf_export.tf_export('experimental.numpy.reshape', v1=[])
@np_utils.np_doc('reshape')
def reshape(a, newshape, order='C'):
"""order argument can only b 'C' or 'F'."""
if order not in {'C', 'F'}:
raise ValueError('Unsupported order argument {}'.format(order))
a = asarray(a)
if isinstance(newshape, int):
newshape = [newshape]
if order == 'F':
r = array_ops.transpose(
array_ops.reshape(array_ops.transpose(a), newshape[::-1])
)
else:
r = array_ops.reshape(a, newshape)
return r
def _reshape_method_wrapper(a, *newshape, **kwargs):
order = kwargs.pop('order', 'C')
if kwargs:
raise ValueError('Unsupported arguments: {}'.format(kwargs.keys()))
if len(newshape) == 1 and not isinstance(newshape[0], int):
newshape = newshape[0]
return reshape(a, newshape, order=order)
@tf_export.tf_export('experimental.numpy.expand_dims', v1=[])
@np_utils.np_doc('expand_dims')
def expand_dims(a, axis):
a = asarray(a)
return array_ops.expand_dims(a, axis=axis)
@tf_export.tf_export('experimental.numpy.squeeze', v1=[])
@np_utils.np_doc('squeeze')
def squeeze(a, axis=None):
a = asarray(a)
return array_ops.squeeze(a, axis)
@tf_export.tf_export('experimental.numpy.flatten', v1=[])
@np_utils.np_doc('flatten', link=np_utils.NoLink())
def flatten(a, order='C'):
a = asarray(a)
if order == 'C' or order == 'A' or order == 'K':
# Row major.
return array_ops.reshape(a, [-1])
elif order == 'F':
# Column major
return array_ops.reshape(array_ops.transpose(a), [-1])
else:
raise ValueError(
'order can only be C, A, K (all row major) or F (column major).'
)
@tf_export.tf_export('experimental.numpy.transpose', v1=[])
@np_utils.np_doc('transpose')
def transpose(a, axes=None):
a = asarray(a)
if axes is not None:
axes = asarray(axes)
return array_ops.transpose(a=a, perm=axes)
@tf_export.tf_export('experimental.numpy.swapaxes', v1=[])
@np_utils.np_doc('swapaxes')
def swapaxes(a, axis1, axis2): # pylint: disable=missing-docstring
a = asarray(a)
def adjust_axes(axes, rank):
def f(x):
if isinstance(x, int):
if x < 0:
x = x + rank
else:
x = array_ops.where_v2(x < 0, np_utils.add(x, a_rank), x)
return x
return nest.map_structure(f, axes)
if (
a.shape.rank is not None
and isinstance(axis1, int)
and isinstance(axis2, int)
):
# This branch makes sure `perm` is statically known, to avoid a
# not-compile-time-constant XLA error.
a_rank = a.shape.rank
axis1, axis2 = adjust_axes((axis1, axis2), a_rank)
perm = list(range(a_rank))
perm[axis1] = axis2
perm[axis2] = axis1
else:
a_rank = array_ops.rank(a)
axis1, axis2 = adjust_axes((axis1, axis2), a_rank)
perm = math_ops.range(a_rank)
perm = array_ops.tensor_scatter_update(
perm, [[axis1], [axis2]], [axis2, axis1]
)
a = array_ops.transpose(a, perm)
return a
@tf_export.tf_export('experimental.numpy.moveaxis', v1=[])
@np_utils.np_doc('moveaxis')
def moveaxis(a, source, destination): # pylint: disable=missing-docstring
"""Raises ValueError if source, destination not in (-ndim(a), ndim(a))."""
if not source and not destination:
return a
a = asarray(a)
if isinstance(source, int):
source = (source,)
if isinstance(destination, int):
destination = (destination,)
if len(source) != len(destination):
raise ValueError('The lengths of source and destination must equal')
a_rank = np_utils._maybe_static(array_ops.rank(a)) # pylint: disable=protected-access
def _correct_axis(axis, rank):
if axis < 0:
return axis + rank
return axis
source = tuple(_correct_axis(axis, a_rank) for axis in source)
destination = tuple(_correct_axis(axis, a_rank) for axis in destination)
if a.shape.rank is not None:
perm = [i for i in range(a_rank) if i not in source]
for dest, src in sorted(zip(destination, source)):
assert dest <= len(perm)
perm.insert(dest, src)
else:
r = math_ops.range(a_rank)
def _remove_indices(a, b):
"""Remove indices (`b`) from `a`."""
items = array_ops_stack.unstack(
sort_ops.sort(array_ops_stack.stack(b)), num=len(b)
)
i = 0
result = []
for item in items:
result.append(a[i:item])
i = item + 1
result.append(a[i:])
return array_ops.concat(result, 0)
minus_sources = _remove_indices(r, source)
minus_dest = _remove_indices(r, destination)
perm = array_ops.scatter_nd(
array_ops.expand_dims(minus_dest, 1), minus_sources, [a_rank]
)
perm = array_ops.tensor_scatter_update(
perm, array_ops.expand_dims(destination, 1), source
)
a = array_ops.transpose(a, perm)
return a
@tf_export.tf_export('experimental.numpy.pad', v1=[])
@np_utils.np_doc('pad')
def pad(array, pad_width, mode, **kwargs): # pylint: disable=redefined-outer-name
"""Only supports modes 'constant', 'reflect' and 'symmetric' currently."""
constant_values = kwargs.get('constant_values', 0)
if not (mode == 'constant' or mode == 'reflect' or mode == 'symmetric'):
raise ValueError('Unsupported padding mode: ' + mode)
mode = mode.upper()
array = asarray(array)
pad_width = asarray(pad_width, dtype=dtypes.int32)
return array_ops.pad(
tensor=array,
paddings=pad_width,
mode=mode,
constant_values=constant_values,
)
@tf_export.tf_export('experimental.numpy.take', v1=[])
@np_utils.np_doc('take')
def take(a, indices, axis=None, out=None, mode='clip'):
"""out argument is not supported, and default mode is clip."""
if out is not None:
raise ValueError('out argument is not supported in take.')
if mode not in {'raise', 'clip', 'wrap'}:
raise ValueError("Invalid mode '{}' for take".format(mode))
a = asarray(a)
indices = asarray(indices)
if axis is None:
a = array_ops.reshape(a, [-1])
axis = 0
axis_size = array_ops.shape(a, out_type=indices.dtype)[axis]
if mode == 'clip':
indices = clip_ops.clip_by_value(indices, 0, axis_size - 1)
elif mode == 'wrap':
indices = math_ops.floormod(indices, axis_size)
else:
raise ValueError("The 'raise' mode to take is not supported.")
return array_ops.gather(a, indices, axis=axis)
@tf_export.tf_export('experimental.numpy.where', v1=[])
@np_utils.np_doc_only('where')
def where(condition, x=None, y=None):
"""Raises ValueError if exactly one of x or y is not None."""
condition = asarray(condition, dtype=np.bool_)
if x is None and y is None:
return nonzero(condition)
elif x is not None and y is not None:
x, y = _promote_dtype(x, y)
return array_ops.where_v2(condition, x, y)
raise ValueError('Both x and y must be ndarrays, or both must be None.')
@tf_export.tf_export('experimental.numpy.select', v1=[])
@np_utils.np_doc('select')
def select(condlist, choicelist, default=0): # pylint: disable=missing-docstring
if len(condlist) != len(choicelist):
msg = 'condlist must have length equal to choicelist ({} vs {})'
raise ValueError(msg.format(len(condlist), len(choicelist)))
if not condlist:
raise ValueError('condlist must be non-empty')
choices = _promote_dtype(default, *choicelist)
choicelist = choices[1:]
output = choices[0]
# The traversal is in reverse order so we can return the first value in
# choicelist where condlist is True.
for cond, choice in zip(condlist[::-1], choicelist[::-1]):
output = where(cond, choice, output)
return output
@tf_export.tf_export('experimental.numpy.shape', v1=[])
@np_utils.np_doc(
'shape',
link=np_utils.Link(
'https://numpy.org/doc/1.18/reference/generated/numpy.shape.html'
),
)
def shape(a):
a = asarray(a)
return a.shape
@tf_export.tf_export('experimental.numpy.ndim', v1=[])
@np_utils.np_doc('ndim', link=np_utils.NoLink())
def ndim(a):
a = asarray(a)
return a.ndim
@tf_export.tf_export('experimental.numpy.isscalar', v1=[])
@np_utils.np_doc('isscalar')
def isscalar(num):
return ndim(num) == 0
def _boundaries_to_sizes(a, boundaries, axis):
"""Converting boundaries of splits to sizes of splits.
Args:
a: the array to be split.
boundaries: the boundaries, as in np.split.
axis: the axis along which to split.
Returns:
A list of sizes of the splits, as in tf.split.
"""
if axis >= len(a.shape):
raise ValueError('axis %s is out of bound for shape %s' % (axis, a.shape))
total_size = a.shape[axis]
sizes = []
sizes_sum = 0
prev = 0
for i, b in enumerate(boundaries):
size = b - prev
if size < 0:
raise ValueError(
'The %s-th boundary %s is smaller than the previous boundary %s'
% (i, b, prev)
)
size = builtins.min(size, builtins.max(0, total_size - sizes_sum))
sizes.append(size)
sizes_sum += size
prev = b
sizes.append(builtins.max(0, total_size - sizes_sum))
return sizes
@tf_export.tf_export('experimental.numpy.split', v1=[])
@np_utils.np_doc('split')
def split(ary, indices_or_sections, axis=0):
ary = asarray(ary)
if not isinstance(indices_or_sections, int):
indices_or_sections = _boundaries_to_sizes(ary, indices_or_sections, axis)
return array_ops.split(ary, indices_or_sections, axis=axis)
def _split_on_axis(np_fun_name, axis): # pylint: disable=missing-function-docstring
@np_utils.np_doc(np_fun_name)
def f(ary, indices_or_sections):
# for 1-D array, hsplit becomes vsplit
new_axis = np_utils.cond(
math_ops.equal(axis, 1),
lambda: np_utils.cond( # pylint: disable=g-long-lambda
math_ops.equal(array_ops.rank(ary), 1), lambda: 0, lambda: axis
),
lambda: axis,
)
if isinstance(indices_or_sections, int):
ary_shape = ary.shape[new_axis]
if ary_shape is not None and ary_shape % indices_or_sections:
raise ValueError('array split does not result in an equal division')
return split(ary, indices_or_sections, axis=new_axis)
return f
vsplit = tf_export.tf_export('experimental.numpy.vsplit', v1=[])(
_split_on_axis('vsplit', axis=0)
)
hsplit = tf_export.tf_export('experimental.numpy.hsplit', v1=[])(
_split_on_axis('hsplit', axis=1)
)
dsplit = tf_export.tf_export('experimental.numpy.dsplit', v1=[])(
_split_on_axis('dsplit', axis=2)
)
@tf_export.tf_export('experimental.numpy.broadcast_to', v1=[])
@np_utils.np_doc('broadcast_to')
def broadcast_to(array, shape): # pylint: disable=redefined-outer-name
return full(shape, array)
@tf_export.tf_export('experimental.numpy.stack', v1=[])
@np_utils.np_doc('stack')
def stack(arrays, axis=0): # pylint: disable=missing-function-docstring
if isinstance(arrays, (np_arrays.ndarray, tensor_lib.Tensor)):
arrays = asarray(arrays)
if axis == 0:
return arrays
else:
return swapaxes(arrays, 0, axis)
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
return asarray(array_ops_stack.stack(unwrapped_arrays, axis))
@tf_export.tf_export('experimental.numpy.hstack', v1=[])
@np_utils.np_doc('hstack')
def hstack(tup):
arrays = [atleast_1d(a) for a in tup]
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
rank = array_ops.rank(unwrapped_arrays[0])
return np_utils.cond(
math_ops.equal(rank, 1),
lambda: array_ops.concat(unwrapped_arrays, axis=0),
lambda: array_ops.concat(unwrapped_arrays, axis=1),
)
@tf_export.tf_export('experimental.numpy.vstack', v1=[])
@np_utils.np_doc('vstack')
def vstack(tup):
arrays = [atleast_2d(a) for a in tup]
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
return array_ops.concat(unwrapped_arrays, axis=0)
@tf_export.tf_export('experimental.numpy.dstack', v1=[])
@np_utils.np_doc('dstack')
def dstack(tup):
arrays = [atleast_3d(a) for a in tup]
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
return array_ops.concat(unwrapped_arrays, axis=2)
def _pad_left_to(n, old_shape):
old_shape = asarray(old_shape, dtype=np.int32)
new_shape = array_ops.pad(
old_shape,
[[math_ops.maximum(n - array_ops.size(old_shape), 0), 0]],
constant_values=1,
)
return asarray(new_shape)
def _atleast_nd(n, new_shape, *arys):
"""Reshape arrays to be at least `n`-dimensional.
Args:
n: The minimal rank.
new_shape: a function that takes `n` and the old shape and returns the
desired new shape.
*arys: ndarray(s) to be reshaped.
Returns:
The reshaped array(s).
"""
def f(x):
# pylint: disable=g-long-lambda
x = asarray(x)
return asarray(
np_utils.cond(
np_utils.greater(n, array_ops.rank(x)),
lambda: reshape(x, new_shape(n, array_ops.shape(x))),
lambda: x,
)
)
arys = list(map(f, arys))
if len(arys) == 1:
return arys[0]
else:
return arys
@tf_export.tf_export('experimental.numpy.atleast_1d', v1=[])
@np_utils.np_doc('atleast_1d')
def atleast_1d(*arys):
return _atleast_nd(1, _pad_left_to, *arys)
@tf_export.tf_export('experimental.numpy.atleast_2d', v1=[])
@np_utils.np_doc('atleast_2d')
def atleast_2d(*arys):
return _atleast_nd(2, _pad_left_to, *arys)
@tf_export.tf_export('experimental.numpy.atleast_3d', v1=[])
@np_utils.np_doc('atleast_3d')
def atleast_3d(*arys): # pylint: disable=missing-docstring
def new_shape(_, old_shape):
# pylint: disable=g-long-lambda
ndim_ = array_ops.size(old_shape)
return np_utils.cond(
math_ops.equal(ndim_, 0),
lambda: constant_op.constant([1, 1, 1], dtype=dtypes.int32),
lambda: np_utils.cond(
math_ops.equal(ndim_, 1),
lambda: array_ops.pad(old_shape, [[1, 1]], constant_values=1),
lambda: array_ops.pad(old_shape, [[0, 1]], constant_values=1),
),
)
return _atleast_nd(3, new_shape, *arys)
@tf_export.tf_export('experimental.numpy.nonzero', v1=[])
@np_utils.np_doc('nonzero')
def nonzero(a):
a = atleast_1d(a)
if a.shape.rank is None:
raise ValueError(
"The rank of `a` is unknown, so we can't decide how many "
'arrays to return.'
)
return array_ops_stack.unstack(
array_ops.where_v2(math_ops.cast(a, dtypes.bool)), a.shape.rank, axis=1
)
@tf_export.tf_export('experimental.numpy.diag_indices', v1=[])
@np_utils.np_doc('diag_indices')
def diag_indices(n, ndim=2): # pylint: disable=missing-docstring,redefined-outer-name
if n < 0:
raise ValueError(
'n argument to diag_indices must be nonnegative, got {}'.format(n)
)
if ndim < 0:
raise ValueError(
'ndim argument to diag_indices must be nonnegative, got {}'.format(ndim)
)
return (math_ops.range(n),) * ndim
@tf_export.tf_export('experimental.numpy.tri', v1=[])
@np_utils.np_doc('tri')
def tri(N, M=None, k=0, dtype=None): # pylint: disable=invalid-name,missing-docstring
M = M if M is not None else N
if dtype is not None:
dtype = np_utils.result_type(dtype)
else:
# Use a default float type.
dtype = np_utils.result_type(float)
if k < 0:
lower = -k - 1
if lower > N:
r = array_ops.zeros([N, M], dtype)
else:
# Keep as tf bool, since we create an upper triangular matrix and invert
# it.
o = array_ops.ones([N, M], dtype=dtypes.bool)
r = math_ops.cast(
math_ops.logical_not(array_ops.matrix_band_part(o, lower, -1)), dtype
)
else:
o = array_ops.ones([N, M], dtype)
if k > M:
r = o
else:
r = array_ops.matrix_band_part(o, -1, k)
return r
@tf_export.tf_export('experimental.numpy.tril', v1=[])
@np_utils.np_doc('tril')
def tril(m, k=0): # pylint: disable=missing-docstring
m = asarray(m)
if m.shape.ndims is None:
raise ValueError('Argument to tril should have known rank')
m_shape = m.shape.as_list()
if len(m_shape) < 2:
raise ValueError('Argument to tril must have rank at least 2')
if m_shape[-1] is None or m_shape[-2] is None:
raise ValueError(
'Currently, the last two dimensions of the input array '
'need to be known.'
)
z = constant_op.constant(0, m.dtype)
mask = tri(*m_shape[-2:], k=k, dtype=bool)
return array_ops.where_v2(
array_ops.broadcast_to(mask, array_ops.shape(m)), m, z
)
@tf_export.tf_export('experimental.numpy.triu', v1=[])
@np_utils.np_doc('triu')
def triu(m, k=0): # pylint: disable=missing-docstring
m = asarray(m)
if m.shape.ndims is None:
raise ValueError('Argument to triu should have known rank')
m_shape = m.shape.as_list()
if len(m_shape) < 2:
raise ValueError('Argument to triu must have rank at least 2')
if m_shape[-1] is None or m_shape[-2] is None:
raise ValueError(
'Currently, the last two dimensions of the input array '
'need to be known.'
)
z = constant_op.constant(0, m.dtype)
mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
return array_ops.where_v2(
array_ops.broadcast_to(mask, array_ops.shape(m)), z, m
)
@tf_export.tf_export('experimental.numpy.flip', v1=[])
@np_utils.np_doc('flip')
def flip(m, axis=None): # pylint: disable=missing-docstring
m = asarray(m)
if axis is None:
return array_ops.reverse(m, math_ops.range(array_ops.rank(m)))
axis = np_utils._canonicalize_axis(axis, array_ops.rank(m)) # pylint: disable=protected-access
return array_ops.reverse(m, [axis])
@tf_export.tf_export('experimental.numpy.flipud', v1=[])
@np_utils.np_doc('flipud')
def flipud(m): # pylint: disable=missing-docstring
return flip(m, 0)
@tf_export.tf_export('experimental.numpy.fliplr', v1=[])
@np_utils.np_doc('fliplr')
def fliplr(m): # pylint: disable=missing-docstring
return flip(m, 1)
@tf_export.tf_export('experimental.numpy.roll', v1=[])
@np_utils.np_doc('roll')
def roll(a, shift, axis=None): # pylint: disable=missing-docstring
a = asarray(a)
if axis is not None:
return manip_ops.roll(a, shift, axis)
# If axis is None, the roll happens as a 1-d tensor.
original_shape = array_ops.shape(a)
a = manip_ops.roll(array_ops.reshape(a, [-1]), shift, 0)
return array_ops.reshape(a, original_shape)
@tf_export.tf_export('experimental.numpy.rot90', v1=[])
@np_utils.np_doc('rot90')
def rot90(m, k=1, axes=(0, 1)): # pylint: disable=missing-docstring
m_rank = array_ops.rank(m)
ax1, ax2 = np_utils._canonicalize_axes(axes, m_rank) # pylint: disable=protected-access
k = k % 4
if k == 0:
return m
elif k == 2:
return flip(flip(m, ax1), ax2)
else:
perm = math_ops.range(m_rank)
perm = array_ops.tensor_scatter_update(perm, [[ax1], [ax2]], [ax2, ax1])
if k == 1:
return transpose(flip(m, ax2), perm)
else:
return flip(transpose(m, perm), ax2)
@tf_export.tf_export('experimental.numpy.vander', v1=[])
@np_utils.np_doc('vander')
def vander(x, N=None, increasing=False): # pylint: disable=missing-docstring,invalid-name
x = asarray(x)
x_shape = array_ops.shape(x)
if N is None:
N = x_shape[0]
N_temp = np_utils.get_static_value(N) # pylint: disable=invalid-name
if N_temp is not None:
N = N_temp
if N < 0:
raise ValueError('N must be nonnegative')
else:
control_flow_assert.Assert(N >= 0, [N])
rank = array_ops.rank(x)
rank_temp = np_utils.get_static_value(rank)
if rank_temp is not None:
rank = rank_temp
if rank != 1:
raise ValueError('x must be a one-dimensional array')
else:
control_flow_assert.Assert(math_ops.equal(rank, 1), [rank])
if increasing:
start = 0
limit = N
delta = 1
else:
start = N - 1
limit = -1
delta = -1
x = array_ops.expand_dims(x, -1)
return math_ops.pow(
x, math_ops.cast(math_ops.range(start, limit, delta), dtype=x.dtype)
)
@tf_export.tf_export('experimental.numpy.ix_', v1=[])
@np_utils.np_doc('ix_')
def ix_(*args): # pylint: disable=missing-docstring
n = len(args)
output = []
for i, a in enumerate(args):
a = asarray(a)
a_rank = array_ops.rank(a)
a_rank_temp = np_utils.get_static_value(a_rank)
if a_rank_temp is not None:
a_rank = a_rank_temp
if a_rank != 1:
raise ValueError(
'Arguments must be 1-d, got arg {} of rank {}'.format(i, a_rank)
)
else:
control_flow_assert.Assert(math_ops.equal(a_rank, 1), [a_rank])
new_shape = [1] * n
new_shape[i] = -1
dtype = a.dtype
if dtype == dtypes.bool:
output.append(array_ops.reshape(nonzero(a)[0], new_shape))
elif dtype.is_integer:
output.append(array_ops.reshape(a, new_shape))
else:
raise ValueError(
'Only integer and bool dtypes are supported, got {}'.format(dtype)
)
return output
@tf_export.tf_export('experimental.numpy.broadcast_arrays', v1=[])
@np_utils.np_doc('broadcast_arrays')
def broadcast_arrays(*args, **kwargs): # pylint: disable=missing-docstring
subok = kwargs.pop('subok', False)
if subok:
raise ValueError('subok=True is not supported.')
if kwargs:
raise ValueError('Received unsupported arguments {}'.format(kwargs.keys()))
args = [asarray(arg) for arg in args]
return np_utils.tf_broadcast(*args)
@tf_export.tf_export('experimental.numpy.sign', v1=[])
@np_utils.np_doc_only('sign')
def sign(x, out=None, where=None, **kwargs): # pylint: disable=missing-docstring,redefined-outer-name
if out:
raise ValueError('tf.numpy doesnt support setting out.')
if where:
raise ValueError('tf.numpy doesnt support setting where.')
if kwargs:
raise ValueError('tf.numpy doesnt support setting {}'.format(kwargs.keys()))
x = asarray(x)
# Numpy 2.x and later uses the same definition of sign.
if np.lib.NumpyVersion(np.__version__) >= '2.0.0.dev0':
return math_ops.sign(x)
dtype = x.dtype.as_numpy_dtype
if np.issubdtype(dtype, np.complexfloating):
result = math_ops.cast(math_ops.sign(math_ops.real(x)), dtype)
else:
result = math_ops.sign(x)
return result
# Note that np.take_along_axis may not be present in some supported versions of
# numpy.
@tf_export.tf_export('experimental.numpy.take_along_axis', v1=[])
@np_utils.np_doc('take_along_axis')
def take_along_axis(arr, indices, axis): # pylint: disable=missing-docstring
arr = asarray(arr)
indices = asarray(indices)
if axis is None:
return take_along_axis(arr.ravel(), indices, 0)
rank = array_ops.rank(arr)
axis = axis + rank if axis < 0 else axis
# Broadcast shapes to match, ensure that the axis of interest is not
# broadcast.
arr_shape_original = array_ops.shape(arr, out_type=indices.dtype)
indices_shape_original = array_ops.shape(indices, out_type=indices.dtype)
arr_shape = array_ops.tensor_scatter_update(arr_shape_original, [[axis]], [1])
indices_shape = array_ops.tensor_scatter_update(
indices_shape_original, [[axis]], [1]
)
broadcasted_shape = array_ops.broadcast_dynamic_shape(
arr_shape, indices_shape
)
arr_shape = array_ops.tensor_scatter_update(
broadcasted_shape, [[axis]], [arr_shape_original[axis]]
)
indices_shape = array_ops.tensor_scatter_update(
broadcasted_shape, [[axis]], [indices_shape_original[axis]]
)
arr = array_ops.broadcast_to(arr, arr_shape)
indices = array_ops.broadcast_to(indices, indices_shape)
# Save indices shape so we can restore it later.
possible_result_shape = indices.shape
# Correct indices since gather doesn't correctly handle negative indices.
indices = array_ops.where_v2(indices < 0, indices + arr_shape[axis], indices)
swapaxes_ = lambda t: swapaxes(t, axis, -1)
dont_move_axis_to_end = math_ops.equal(axis, np_utils.subtract(rank, 1))
arr = np_utils.cond(
dont_move_axis_to_end, lambda: arr, lambda: swapaxes_(arr)
)
indices = np_utils.cond(
dont_move_axis_to_end, lambda: indices, lambda: swapaxes_(indices)
)
arr_shape = array_ops.shape(arr)
arr = array_ops.reshape(arr, [-1, arr_shape[-1]])
indices_shape = array_ops.shape(indices)
indices = array_ops.reshape(indices, [-1, indices_shape[-1]])
result = array_ops.gather(arr, indices, batch_dims=1)
result = array_ops.reshape(result, indices_shape)
result = np_utils.cond(
dont_move_axis_to_end, lambda: result, lambda: swapaxes_(result)
)
result.set_shape(possible_result_shape)
return result
# pylint: disable=redefined-builtin,undefined-variable
@tf_export.tf_export('experimental.numpy.max', v1=[])
@np_utils.np_doc('max', link=np_utils.AliasOf('amax'))
def max(a, axis=None, keepdims=None):
return amax(a, axis=axis, keepdims=keepdims)
@tf_export.tf_export('experimental.numpy.min', v1=[])
@np_utils.np_doc('min', link=np_utils.AliasOf('amin'))
def min(a, axis=None, keepdims=None):
return amin(a, axis=axis, keepdims=keepdims)
@tf_export.tf_export('experimental.numpy.round', v1=[])
@np_utils.np_doc('round', link=np_utils.AliasOf('around'))
def round(a, decimals=0):
return around(a, decimals=decimals)
# pylint: enable=redefined-builtin,undefined-variable
_SLICE_ERROR = (
'only integers, slices (`:`), ellipsis (`...`), '
'numpy.newaxis (`None`) and integer or boolean arrays are valid indices'
)
def _as_index(idx, need_scalar=True):
"""Helper function to parse idx as an index.
Args:
idx: index
need_scalar: If idx needs to be a scalar value.
Returns:
A pair, (indx, bool). First one is the parsed index and can be a tensor,
or scalar integer / Dimension. Second one is True if rank is known to be 0.
Raises:
IndexError: For incorrect indices.
"""
if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)):
return idx, True
data = asarray(idx)
if data.dtype == dtypes.bool:
if data.shape.ndims != 1:
# TODO(agarwal): handle higher rank boolean masks.
raise NotImplementedError('Need rank 1 for bool index %s' % idx)
data = array_ops.where_v2(data)
data = array_ops.reshape(data, [-1])
if need_scalar and data.shape.rank not in (None, 0):
raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx))
np_dtype = data.dtype.as_numpy_dtype
if not np.issubdtype(np_dtype, np.integer):
raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx))
if data.dtype not in (dtypes.int64, dtypes.int32):
# TF slicing can only handle int32/int64. So we need to cast.
promoted_dtype = np.promote_types(np.int32, np_dtype)
if promoted_dtype == np.int32:
data = math_ops.cast(data, dtypes.int32)
elif promoted_dtype == np.int64:
data = math_ops.cast(data, dtypes.int64)
else:
raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx))
return data, data.shape.rank == 0
class _UpdateMethod(enum.Enum):
UPDATE = 0
ADD = 1
MIN = 2
MAX = 3
def _slice_helper(tensor, slice_spec, update_method=None, updates=None):
"""Helper function for __getitem__ and _with_index_update_helper.
This function collects the indices in `slice_spec` into two buckets, which we
can call "idx1" and "idx2" here. idx1 is intended for `strided_slice`, idx2
`gather`. They also correspond to "basic indices" and "advanced indices" in
numpy. This function supports both reading and writing at the indices. The
reading path can be summarized as `gather(stride_slice(tensor, idx1),
idx2)`. The writing path can be summarized as `strided_slice_update(tensor,
idx1, scatter(strided_slice(tensor, idx1), idx2, updates))`. (`gather` here
means `tf.gather` or `tf.gather_nd`; `scatter` here means
`tf.tensor_scatter_update`.) The writing path is inefficient because it needs
to first read out a portion (probably much larger than `updates`) of `tensor`
using `strided_slice`, update it, and then write the portion back. An
alternative approach is to only use `scatter`, which amounts to using the
indexing mechanism of gather/scatter to implement
strided_slice/strided_slice_update. This is feasible for XLA Gather/Scatter
because they support spans (e.g. `2:5`) in indices (as begin/end pairs), but
not TF gather/scatter because they don't support spans (except those that
cover entire dimensions, i.e. `:`). If we materialize spans into individual
indices, the size of the index tensor would explode. (Note that XLA
Gather/Scatter have a similar problem for stride > 1 because they don't
support strides. Indices such as `1:2:8` will need to be materialized into
individual indices such as [1, 3, 5, 7].)
Args:
tensor: the tensor to be read from or write into.
slice_spec: the indices.
update_method: (optional) a member of `_UpdateMethod`, indicating how to
update the values (replacement, add, etc.). `None` indicates just reading.
updates: (optional) the new values to write into `tensor`. It must have the
same dtype as `tensor`.
Returns:
The result of reading (if `update_method` is `None`) or the updated `tensor`
after writing.
"""
begin, end, strides = [], [], []
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
advanced_indices = []
shrink_indices = []
for index, s in enumerate(slice_spec):
if isinstance(s, slice):
if s.start is not None:
begin.append(_as_index(s.start)[0])
else:
begin.append(0)
begin_mask |= 1 << index
if s.stop is not None:
end.append(_as_index(s.stop)[0])
else:
end.append(0)
end_mask |= 1 << index
if s.step is not None:
strides.append(_as_index(s.step)[0])
else:
strides.append(1)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= 1 << index
elif s is array_ops.newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= 1 << index
else:
s, is_scalar = _as_index(s, False)
if is_scalar:
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= 1 << index
shrink_indices.append(index)
else:
begin.append(0)
end.append(0)
strides.append(1)
begin_mask |= 1 << index
end_mask |= 1 << index
advanced_indices.append((index, s, ellipsis_mask != 0))
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(
None,
'strided_slice',
[tensor] + begin + end + strides,
skip_on_eager=False,
) as name:
if begin:
packed_begin, packed_end, packed_strides = (
array_ops_stack.stack(begin),
array_ops_stack.stack(end),
array_ops_stack.stack(strides),
)
if (
packed_begin.dtype == dtypes.int64
or packed_end.dtype == dtypes.int64
or packed_strides.dtype == dtypes.int64
):
if packed_begin.dtype != dtypes.int64:
packed_begin = math_ops.cast(packed_begin, dtypes.int64)
if packed_end.dtype != dtypes.int64:
packed_end = math_ops.cast(packed_end, dtypes.int64)
if packed_strides.dtype != dtypes.int64:
packed_strides = math_ops.cast(packed_strides, dtypes.int64)
else:
var_empty = constant_op.constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
if update_method == _UpdateMethod.UPDATE and not advanced_indices:
return array_ops.tensor_strided_slice_update(
tensor,
packed_begin,
packed_end,
packed_strides,
updates,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name,
)
else:
# TODO(b/164251540): Find a better way to support update that does not
# involve one read + two writes.
if updates is not None:
original_tensor = tensor
# TODO(agarwal): set_shape on tensor to set rank.
tensor = array_ops.strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name,
)
if not advanced_indices:
if update_method is None:
return tensor
assert update_method != _UpdateMethod.UPDATE
# TF lacks TensorStridedSliceAdd and alike, so we need to do
# read+add+update.
if update_method == _UpdateMethod.ADD:
update_op = math_ops.add
elif update_method == _UpdateMethod.MIN:
update_op = math_ops.minimum
elif update_method == _UpdateMethod.MAX:
update_op = math_ops.maximum
return array_ops.tensor_strided_slice_update(
original_tensor,
packed_begin,
packed_end,
packed_strides,
update_op(tensor, updates),
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name + '_2',
)
advanced_indices_map = {}
for index, data, had_ellipsis in advanced_indices:
if had_ellipsis:
num_shrink = len([x for x in shrink_indices if x > index])
dim = index - len(slice_spec) + num_shrink
else:
num_shrink = len([x for x in shrink_indices if x < index])
dim = index - num_shrink
advanced_indices_map[dim] = data
dims = sorted(advanced_indices_map.keys())
dims_contiguous = True
if len(dims) > 1:
if dims[0] < 0 and dims[-1] >= 0: # not all same sign
dims_contiguous = False
else:
for i in range(len(dims) - 1):
if dims[i] + 1 != dims[i + 1]:
dims_contiguous = False
break
indices = [advanced_indices_map[x] for x in dims]
indices = _promote_dtype(*indices)
indices = np_utils.tf_broadcast(*indices)
stacked_indices = array_ops_stack.stack(indices, axis=-1)
# Skip the contiguous-dims optimization for update because there is no
# tf.*scatter* op that supports the `axis` argument.
if not dims_contiguous or updates is not None:
if range(len(dims)) != dims:
tensor = moveaxis(tensor, dims, range(len(dims)))
tensor_shape_prefix = array_ops.shape(
tensor, out_type=stacked_indices.dtype
)[: len(dims)]
stacked_indices = array_ops.where_v2(
stacked_indices < 0,
stacked_indices + tensor_shape_prefix,
stacked_indices,
)
if updates is None:
return array_ops.gather_nd(tensor, stacked_indices)
else:
# We only need to move-axis `updates` in the contiguous case becausce
# only in this case the result dimensions of advanced indexing are in
# the middle of `updates`. In the non-contiguous case, those dimensions
# are always at the front.
if dims_contiguous:
# TODO(wangpeng): Support unknown rank (e.g. by partially flattening
# `updates`)
if stacked_indices.shape.rank is None:
raise NotImplementedError(
'Rank of the advanced indices must currently be known'
)
batch_size = stacked_indices.shape.rank - 1
batch_start = dims[0]
if batch_start < 0:
batch_start += len(dims) - batch_size
def range_(start, length):
return range(start, start + length)
updates = moveaxis(
updates, range_(batch_start, batch_size), range(batch_size)
)
if update_method == _UpdateMethod.UPDATE:
update_op = array_ops.tensor_scatter_update
elif update_method == _UpdateMethod.ADD:
update_op = array_ops.tensor_scatter_add
elif update_method == _UpdateMethod.MIN:
update_op = array_ops.tensor_scatter_min
elif update_method == _UpdateMethod.MAX:
update_op = array_ops.tensor_scatter_max
tensor = update_op(tensor, stacked_indices, updates)
if range(len(dims)) != dims:
tensor = moveaxis(tensor, range(len(dims)), dims)
return array_ops.tensor_strided_slice_update(
original_tensor,
packed_begin,
packed_end,
packed_strides,
tensor,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name + '_2',
)
# Note that gather_nd does not support gathering from inside the array.
# To avoid shuffling data back and forth, we transform the indices and
# do a gather instead.
rank = np_utils._maybe_static(array_ops.rank(tensor)) # pylint: disable=protected-access
dims = [(x + rank if x < 0 else x) for x in dims]
shape_tensor = array_ops.shape(tensor)
dim_sizes = array_ops.gather(shape_tensor, dims)
if len(dims) == 1:
stacked_indices = indices[0]
stacked_indices = math_ops.cast(stacked_indices, dtypes.int32)
stacked_indices = array_ops.where_v2(
stacked_indices < 0, stacked_indices + dim_sizes, stacked_indices
)
axis = dims[0]
if len(dims) > 1:
index_scaling = math_ops.cumprod(dim_sizes, reverse=True, exclusive=True)
def _tensordot(a, b):
# TODO(b/168657656): This function should be replaced by
# tensordot(axis=1) once MatMul has int32 XLA kernel.
b = array_ops.broadcast_to(b, array_ops.shape(a))
return math_ops.reduce_sum(a * b, axis=-1)
stacked_indices = _tensordot(stacked_indices, index_scaling)
flat_shape = array_ops.concat(
[shape_tensor[:axis], [-1], shape_tensor[axis + len(dims) :]], axis=0
)
tensor = array_ops.reshape(tensor, flat_shape)
return array_ops.gather(tensor, stacked_indices, axis=axis)
def _as_spec_tuple(slice_spec):
"""Convert slice_spec to tuple."""
if isinstance(slice_spec, (list, tuple)) and not isinstance(
slice_spec, np.ndarray
):
is_index = True
for s in slice_spec:
if s is None or s is Ellipsis or isinstance(s, (list, tuple, slice)):
is_index = False
break
elif isinstance(s, (np_arrays.ndarray, np.ndarray)) and s.ndim != 0:
is_index = False
break
if not is_index:
return tuple(slice_spec)
return (slice_spec,)
def _getitem(self, slice_spec):
"""Implementation of ndarray.__getitem__."""
if (
isinstance(slice_spec, bool)
or (
isinstance(slice_spec, core_tf_types.Tensor)
and slice_spec.dtype == dtypes.bool
)
or (
isinstance(slice_spec, (np.ndarray, np_arrays.ndarray))
and slice_spec.dtype == np.bool_
)
):
return array_ops.boolean_mask(tensor=self, mask=slice_spec)
if not isinstance(slice_spec, tuple):
slice_spec = _as_spec_tuple(slice_spec)
result_t = _slice_helper(self, slice_spec)
return result_t
def _with_index_update_helper(update_method, a, slice_spec, updates):
"""Implementation of ndarray._with_index_*."""
if (
isinstance(slice_spec, bool)
or (
isinstance(slice_spec, core_tf_types.Tensor)
and slice_spec.dtype == dtypes.bool
)
or (
isinstance(slice_spec, (np.ndarray, np_arrays.ndarray))
and slice_spec.dtype == np.bool_
)
):
slice_spec = nonzero(slice_spec)
if not isinstance(slice_spec, tuple):
slice_spec = _as_spec_tuple(slice_spec)
a_dtype = a.dtype
a, updates = _promote_dtype_binary(a, updates)
result_t = _slice_helper(a, slice_spec, update_method, updates)
return result_t.astype(a_dtype)
setattr(np_arrays.ndarray, '_numpy_style_getitem', _getitem)
setattr(
np_arrays.ndarray,
'_with_index_update',
functools.partial(_with_index_update_helper, _UpdateMethod.UPDATE),
)
setattr(
np_arrays.ndarray,
'_with_index_add',
functools.partial(_with_index_update_helper, _UpdateMethod.ADD),
)
setattr(
np_arrays.ndarray,
'_with_index_min',
functools.partial(_with_index_update_helper, _UpdateMethod.MIN),
)
setattr(
np_arrays.ndarray,
'_with_index_max',
functools.partial(_with_index_update_helper, _UpdateMethod.MAX),
)
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@ops@numpy_ops@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/prefect/server/services/__init__.py",
"type": "Python"
}
|
import prefect.server.services.cancellation_cleanup
import prefect.server.services.flow_run_notifications
import prefect.server.services.foreman
import prefect.server.services.late_runs
import prefect.server.services.pause_expirations
import prefect.server.services.scheduler
import prefect.server.services.telemetry
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@prefect@server@services@[email protected]_END.py
|
{
"filename": "incremental_pca.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scikit-learn/py2/sklearn/decomposition/incremental_pca.py",
"type": "Python"
}
|
"""Incremental Principal Components Analysis."""
# Author: Kyle Kastner <[email protected]>
# Giorgio Patrini
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from .base import _BasePCA
from ..utils import check_array, gen_batches
from ..utils.extmath import svd_flip, _incremental_mean_and_var
class IncrementalPCA(_BasePCA):
"""Incremental principal components analysis (IPCA).
Linear dimensionality reduction using Singular Value Decomposition of
centered data, keeping only the most significant singular vectors to
project the data to a lower dimensional space.
Depending on the size of the input data, this algorithm can be much more
memory efficient than a PCA.
This algorithm has constant memory complexity, on the order
of ``batch_size``, enabling use of np.memmap files without loading the
entire file into memory.
The computational overhead of each SVD is
``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples
remain in memory at a time. There will be ``n_samples / batch_size`` SVD
computations to get the principal components, versus 1 large SVD of
complexity ``O(n_samples * n_features ** 2)`` for PCA.
Read more in the :ref:`User Guide <IncrementalPCA>`.
Parameters
----------
n_components : int or None, (default=None)
Number of components to keep. If ``n_components `` is ``None``,
then ``n_components`` is set to ``min(n_samples, n_features)``.
batch_size : int or None, (default=None)
The number of samples to use for each batch. Only used when calling
``fit``. If ``batch_size`` is ``None``, then ``batch_size``
is inferred from the data and set to ``5 * n_features``, to provide a
balance between approximation accuracy and memory consumption.
copy : bool, (default=True)
If False, X will be overwritten. ``copy=False`` can be used to
save memory but is unsafe for general use.
whiten : bool, optional
When True (False by default) the ``components_`` vectors are divided
by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometimes
improve the predictive accuracy of the downstream estimators by
making data respect some hard-wired assumptions.
Attributes
----------
components_ : array, shape (n_components, n_features)
Components with maximum variance.
explained_variance_ : array, shape (n_components,)
Variance explained by each of the selected components.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If all components are stored, the sum of explained variances is equal
to 1.0
mean_ : array, shape (n_features,)
Per-feature empirical mean, aggregate over calls to ``partial_fit``.
var_ : array, shape (n_features,)
Per-feature empirical variance, aggregate over calls to
``partial_fit``.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf.
n_components_ : int
The estimated number of components. Relevant when
``n_components=None``.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Notes
-----
Implements the incremental PCA model from:
`D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3,
pp. 125-141, May 2008.`
See http://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf
This model is an extension of the Sequential Karhunen-Loeve Transform from:
`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and
its Application to Images, IEEE Transactions on Image Processing, Volume 9,
Number 8, pp. 1371-1374, August 2000.`
See http://www.cs.technion.ac.il/~mic/doc/skl-ip.pdf
We have specifically abstained from an optimization used by authors of both
papers, a QR decomposition used in specific situations to reduce the
algorithmic complexity of the SVD. The source for this technique is
`Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5,
section 5.4.4, pp 252-253.`. This technique has been omitted because it is
advantageous only when decomposing a matrix with ``n_samples`` (rows)
>= 5/3 * ``n_features`` (columns), and hurts the readability of the
implemented algorithm. This would be a good opportunity for future
optimization, if it is deemed necessary.
References
----------
D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77,
Issue 1-3, pp. 125-141, May 2008.
G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5,
Section 5.4.4, pp. 252-253.
See also
--------
PCA
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, whiten=False, copy=True,
batch_size=None):
self.n_components = n_components
self.whiten = whiten
self.copy = copy
self.batch_size = batch_size
def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y: Passthrough for ``Pipeline`` compatibility.
Returns
-------
self: object
Returns the instance itself.
"""
self.components_ = None
self.n_samples_seen_ = 0
self.mean_ = .0
self.var_ = .0
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.noise_variance_ = None
X = check_array(X, copy=self.copy, dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if self.batch_size is None:
self.batch_size_ = 5 * n_features
else:
self.batch_size_ = self.batch_size
for batch in gen_batches(n_samples, self.batch_size_):
self.partial_fit(X[batch], check_input=False)
return self
def partial_fit(self, X, y=None, check_input=True):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self: object
Returns the instance itself.
"""
if check_input:
X = check_array(X, copy=self.copy, dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if not hasattr(self, 'components_'):
self.components_ = None
if self.n_components is None:
self.n_components_ = n_features
elif not 1 <= self.n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d, need "
"more rows than columns for IncrementalPCA "
"processing" % (self.n_components, n_features))
else:
self.n_components_ = self.n_components
if (self.components_ is not None) and (self.components_.shape[0] !=
self.n_components_):
raise ValueError("Number of input features has changed from %i "
"to %i between calls to partial_fit! Try "
"setting n_components to a fixed value." %
(self.components_.shape[0], self.n_components_))
# This is the first partial_fit
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = 0
self.mean_ = .0
self.var_ = .0
# Update stats - they are 0 if this is the fisrt step
col_mean, col_var, n_total_samples = \
_incremental_mean_and_var(X, last_mean=self.mean_,
last_variance=self.var_,
last_sample_count=self.n_samples_seen_)
# Whitening
if self.n_samples_seen_ == 0:
# If it is the first step, simply whiten X
X -= col_mean
else:
col_batch_mean = np.mean(X, axis=0)
X -= col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = \
np.sqrt((self.n_samples_seen_ * n_samples) /
n_total_samples) * (self.mean_ - col_batch_mean)
X = np.vstack((self.singular_values_.reshape((-1, 1)) *
self.components_, X, mean_correction))
U, S, V = linalg.svd(X, full_matrices=False)
U, V = svd_flip(U, V, u_based_decision=False)
explained_variance = S ** 2 / n_total_samples
explained_variance_ratio = S ** 2 / np.sum(col_var * n_total_samples)
self.n_samples_seen_ = n_total_samples
self.components_ = V[:self.n_components_]
self.singular_values_ = S[:self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[:self.n_components_]
self.explained_variance_ratio_ = \
explained_variance_ratio[:self.n_components_]
if self.n_components_ < n_features:
self.noise_variance_ = \
explained_variance[self.n_components_:].mean()
else:
self.noise_variance_ = 0.
return self
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scikit-learn@py2@sklearn@decomposition@[email protected]_END.py
|
{
"filename": "_nticks.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/mesh3d/colorbar/_nticks.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NticksValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="nticks", parent_name="mesh3d.colorbar", **kwargs):
super(NticksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@mesh3d@colorbar@[email protected]_END.py
|
{
"filename": "test_fouriertransform.py",
"repo_name": "AOtools/aotools",
"repo_path": "aotools_extracted/aotools-main/test/test_fouriertransform.py",
"type": "Python"
}
|
from aotools import fouriertransform
import numpy
def test_ft():
data = numpy.random.random((100))
ft_data = fouriertransform.ft(data, 0.1)
assert ft_data.shape == data.shape
def test_ift():
data = numpy.random.random((100))
ift_data = fouriertransform.ift(data, 1.)
assert ift_data.shape == data.shape
def test_ft2():
data = numpy.zeros((10, 10))
ft_data = fouriertransform.ft2(data, 1.)
assert ft_data.shape == data.shape
def test_ift2():
data = numpy.zeros((10, 10))
ift_data = fouriertransform.ift2(data, 1.)
assert ift_data.shape == data.shape
def test_rft():
data = numpy.zeros((100))
data_width = len(data)
rft_data = fouriertransform.rft(data, 1.)
width, = rft_data.shape
rescaled_width = (width-1)*2
assert rescaled_width == data_width
# def test_rft2():
# data = numpy.zeros((10, 10))
# rft2_data = fouriertransform.rft2(data, 1.)
# print(rft2_data.shape)
# assert rft2_data.shape == data.shape
|
AOtoolsREPO_NAMEaotoolsPATH_START.@aotools_extracted@aotools-main@test@[email protected]_END.py
|
{
"filename": "galaxy_catalog_analysis_tutorial6.ipynb",
"repo_name": "astropy/halotools",
"repo_path": "halotools_extracted/halotools-master/docs/notebooks/galcat_analysis/basic_examples/galaxy_catalog_analysis_tutorial6.ipynb",
"type": "Jupyter Notebook"
}
|
# Example 6: Mean infall velocity into cluster BCGs
In this example we'll show how to calculate the mean infall velocity of galaxies towards the cluster BCGs.
## Generate a mock galaxy catalog
Let's start out by generating a mock galaxy catalog into an N-body
simulation in the usual way. Here we'll assume you have the z=0
rockstar halos for the bolshoi simulation, as this is the
default halo catalog.
```python
from halotools.empirical_models import PrebuiltSubhaloModelFactory
model = PrebuiltSubhaloModelFactory('smhm_binary_sfr')
from halotools.sim_manager import CachedHaloCatalog
halocat = CachedHaloCatalog(simname='multidark', redshift=0, halo_finder='rockstar')
model.populate_mock(halocat)
```
Our mock galaxies are stored in the ``galaxy_table`` of ``model.mock``
in the form of an Astropy Table.
## Extract the position and velocity coordinates
```python
from halotools.mock_observables import return_xyz_formatted_array, mean_radial_velocity_vs_r
```
```python
cluster_central_mask = (model.mock.galaxy_table['stellar_mass'] > 10**11.5)
cluster_centrals = model.mock.galaxy_table[cluster_central_mask]
low_mass_tracers_mask = ((model.mock.galaxy_table['stellar_mass'] > 10**10) &
(model.mock.galaxy_table['stellar_mass'] < 10**10.5))
low_mass_tracers = model.mock.galaxy_table[low_mass_tracers_mask]
```
```python
cluster_pos = return_xyz_formatted_array(cluster_centrals['x'], cluster_centrals['y'] ,cluster_centrals['z'])
cluster_vel = return_xyz_formatted_array(cluster_centrals['vx'], cluster_centrals['vy'] ,cluster_centrals['vz'])
low_mass_tracers_pos = return_xyz_formatted_array(low_mass_tracers['x'], low_mass_tracers['y'] ,low_mass_tracers['z'])
low_mass_tracers_vel = return_xyz_formatted_array(low_mass_tracers['vx'], low_mass_tracers['vy'] ,low_mass_tracers['vz'])
```
## Calculate $<V_{\rm rad}>(r)$
```python
rbins = np.logspace(-0.5, 1.25, 15)
rbin_midpoints = (rbins[1:] + rbins[:-1])/2.
vr_clusters = mean_radial_velocity_vs_r(cluster_pos, cluster_vel, rbins_absolute=rbins,
sample2=low_mass_tracers_pos, velocities2=low_mass_tracers_vel,
period = model.mock.Lbox)
```
### Plot the result
```python
%matplotlib inline
fig, ax = plt.subplots(1, 1)
__=ax.plot(rbin_midpoints, vr_clusters, color='k')
xscale = ax.set_xscale('log')
xlim = ax.set_xlim(xmin=0.5, xmax=20)
xlabel = ax.set_xlabel(r'$r $ $\rm{[Mpc]}$', fontsize=15)
ylabel = ax.set_ylabel(r'$\langle V_{\rm rad}\rangle$ $[{\rm km/s}]$', fontsize=15)
title = ax.set_title('Radial infall velocity into cluster BCGs', fontsize=15)
fig.savefig('cluster_bcg_infall_velocity.png',
bbox_extra_artists=[xlabel, ylabel], bbox_inches='tight')
```

```python
```
|
astropyREPO_NAMEhalotoolsPATH_START.@halotools_extracted@halotools-master@docs@notebooks@galcat_analysis@basic_examples@[email protected]_END.py
|
{
"filename": "gpu_rnn.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jaxlib/gpu_rnn.py",
"type": "Python"
}
|
# Copyright 2022 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import jaxlib.mlir.ir as ir
import jaxlib.mlir.dialects.stablehlo as hlo
import numpy as np
from jaxlib import xla_client
from .gpu_common_utils import GpuLibNotLinkedError
for cuda_module_name in [".cuda", "jax_cuda12_plugin"]:
try:
_rnn = importlib.import_module(f"{cuda_module_name}._rnn", package="jaxlib")
except ImportError:
_rnn = None
else:
break
if _rnn:
for _name, _value in _rnn.registrations().items():
xla_client.register_custom_call_target(_name, _value, platform='CUDA')
compute_rnn_workspace_reserve_space_sizes = _rnn.compute_rnn_workspace_reserve_space_sizes
def cudnn_rnn_lowering(ctx, input, h_0, c_0, weights, seq_lengths, *,
input_size: int, hidden_size: int, num_layers: int,
dropout: bool, bidirectional: bool,
cudnn_allow_tf32: bool):
"""CuDnn RNN."""
out_dtype = ctx.avals_out[0].dtype
if out_dtype == np.float32:
out_type = ir.F32Type.get()
elif out_dtype == np.float64:
out_type = ir.F64Type.get()
elif out_dtype == np.complex64:
out_type = ir.ComplexType.get(ir.F32Type.get())
elif out_dtype == np.complex128:
out_type = ir.ComplexType.get(ir.F64Type.get())
else:
raise ValueError(f'Unknown output type {out_dtype}')
output_type = ir.RankedTensorType.get(ctx.avals_out[0].shape, out_type)
batch_size = ctx.avals_in[0].shape[0]
max_seq_length = ctx.avals_in[0].shape[1]
# workspace_shape = ctx.avals_out[3].shape
workspace_size, _ = compute_rnn_workspace_reserve_space_sizes(
input_size, hidden_size, num_layers, batch_size, max_seq_length,
dropout, bidirectional, cudnn_allow_tf32)
workspace_shape = (workspace_size,)
workspace_type = ir.RankedTensorType.get(workspace_shape, ir.F32Type.get())
reserve_space_shape = ctx.avals_out[3].shape
reserve_space_type = ir.RankedTensorType.get(reserve_space_shape,
ir.F32Type.get())
if not _rnn:
raise GpuLibNotLinkedError()
opaque = _rnn.build_rnn_descriptor(input_size, hidden_size, num_layers,
batch_size, max_seq_length, dropout,
bidirectional, cudnn_allow_tf32,
workspace_shape[0],
reserve_space_shape[0])
i32_type = ir.IntegerType.get_signless(32)
out = hlo.CustomCallOp(
[output_type, h_0.type, c_0.type, workspace_type, reserve_space_type],
[input, h_0, c_0, weights, seq_lengths],
call_target_name=ir.StringAttr.get('cudnn_rnn'),
has_side_effect=ir.BoolAttr.get(False),
backend_config=ir.StringAttr.get(opaque),
api_version=ir.IntegerAttr.get(i32_type, 2),
called_computations=ir.ArrayAttr.get([]),
)
return out.results[:-2] + out.results[-1:] # drop workspace output
def _hlo_zeros_f32(shape):
return hlo.constant(
ir.DenseElementsAttr.get(
np.zeros(shape, dtype=np.float32), type=ir.F32Type.get()))
def cudnn_rnn_bwd_lowering(ctx, dy, dhn, dcn, x, h0, c0, w, y,
reserve_space, seq_lengths, *, input_size: int,
hidden_size: int, num_layers: int, dropout: bool,
bidirectional: bool, cudnn_allow_tf32: bool):
"""CuDnn RNN Backward pass."""
batch_size = ctx.avals_in[3].shape[0]
max_seq_length = ctx.avals_in[3].shape[1]
workspace_size, _ = compute_rnn_workspace_reserve_space_sizes(
input_size, hidden_size, num_layers, batch_size, max_seq_length,
dropout, bidirectional, cudnn_allow_tf32)
workspace_shape = (workspace_size,)
workspace_type = ir.RankedTensorType.get(workspace_shape, ir.F32Type.get())
reserve_space_shape = ctx.avals_in[8].shape
if _rnn is None:
raise RuntimeError("cuda couldn't be imported")
opaque = _rnn.build_rnn_descriptor(input_size, hidden_size, num_layers,
batch_size, max_seq_length, dropout,
bidirectional, cudnn_allow_tf32,
workspace_shape[0],
reserve_space_shape[0])
i32_type = ir.IntegerType.get_signless(32)
zeroed_dw = _hlo_zeros_f32(ctx.avals_out[3].shape)
out = hlo.CustomCallOp(
[x.type, h0.type, c0.type, w.type, workspace_type], [
dy, dhn, dcn, x, h0, c0, w, y, reserve_space, zeroed_dw,
seq_lengths
],
call_target_name=ir.StringAttr.get('cudnn_rnn_bwd'),
has_side_effect=ir.BoolAttr.get(False),
backend_config=ir.StringAttr.get(opaque),
api_version=ir.IntegerAttr.get(i32_type, 2),
called_computations=ir.ArrayAttr.get([]),
output_operand_aliases=ir.ArrayAttr.get([
hlo.OutputOperandAlias.get(
output_tuple_indices=[3],
operand_index=9,
operand_tuple_indices=[])
]))
return out.results[:-1] # drop workspace output
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jaxlib@[email protected]_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/sankey/link/hoverlabel/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="sankey.link.hoverlabel", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud
for `lineposition`.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
shadowsrc
Sets the source reference on Chart Studio Cloud
for `shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud
for `textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@sankey@link@hoverlabel@[email protected]_END.py
|
{
"filename": "JeffreysPrior.py",
"repo_name": "dokester/BayesicFitting",
"repo_path": "BayesicFitting_extracted/BayesicFitting-master/BayesicFitting/source/JeffreysPrior.py",
"type": "Python"
}
|
import math as math
import numpy as numpy
import warnings
from .Prior import Prior
from .Tools import setAttribute as setatt
__author__ = "Do Kester"
__year__ = 2024
__license__ = "GPL3"
__version__ = "3.2.1"
__url__ = "https://www.bayesicfitting.nl"
__status__ = "Perpetual Beta"
# * This file is part of the BayesicFitting package.
# *
# * BayesicFitting is free software: you can redistribute it and/or modify
# * it under the terms of the GNU Lesser General Public License as
# * published by the Free Software Foundation, either version 3 of
# * the License, or ( at your option ) any later version.
# *
# * BayesicFitting is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU Lesser General Public License for more details.
# *
# * The GPL3 license can be found at <http://www.gnu.org/licenses/>.
# *
# * A JAVA version of this code was part of the Herschel Common
# * Science System (HCSS), also under GPL3.
# *
# * 2010 - 2014 Do Kester, SRON (Java code)
# * 2016 - 202024 Do Kester.
class JeffreysPrior( Prior ):
"""
Jeffreys prior distribution, for scale-like parameters.
Jeffreys prior is a improper prior ( i.e. its integral is unbound ).
Because of that it always needs limits, low and high, such that
0 < low < high < +Inf.
Pr( x ) = 1.0 / ( x * norm ) if low < x < high
0.0 otherwise
where norm = log( high ) - log( low )
No limits are set by default.
domain2unit: u = ( log( d ) - log( lo ) ) / ( log( hi ) - log( lo ) );
unit2domain: d = exp( u * ( log( hi ) - log( lo ) ) + log( lo ) );
Examples
--------
>>> pr = JeffreysPrior() # unbound prior
>>> pr = JeffreysPrior( limits=[0.1,1.0] ) # limited to the range [0.1,1.0]
Hidden Attributes
-----------------
_logLo : float
log( lowLimit )
_norm : float
log( highLimit / lowLimit )
Attributes from Prior
--------------------=
lowLimit, highLimit, deltaP, _lowDomain, _highDomain
The default of lowLimit and _lowDomain is zero.
"""
# *********CONSTRUCTORS***************************************************
def __init__( self, limits=None, prior=None ):
"""
Default constructor.
Parameters
----------
limits : list of 2 floats
2 limits resp. low and high
prior : JeffreysPrior
prior to copy (with new limits if applicable)
"""
super( ).__init__( limits=limits, domain=[0,math.inf], prior=prior )
def copy( self ):
return JeffreysPrior( prior=self, limits=self.limits )
def getIntegral( self ) :
"""
Return the integral of JeffreysPrior from lowLimit to highLimit.
"""
return self._urng
def domain2Unit( self, dval ):
"""
Return a value in [0,1] given a value within the valid domain of
a parameter for a Jeffreys distribution.
Parameters
----------
dval : float
value within the domain of a parameter
"""
if numpy.any( dval <= 0 ) :
raise ValueError()
return numpy.log( dval )
def unit2Domain( self, uval ):
"""
Return a value within the valid domain of the parameter given a value
between [0,1] for a Jeffreys distribution.
Parameters
----------
uval : float
value within [0,1]
"""
return numpy.exp( uval )
def result( self, x ):
"""
Return a the result of the distribution function at x.
Parameters
----------
x : float
value within the domain of a parameter
"""
if math.isnan( self._urng ) :
raise AttributeError( "Limits are needed for JeffreysPrior" )
try :
return 0 if self.isOutOfLimits( x ) else 1.0 / ( x * self._urng )
except ValueError :
with warnings.catch_warnings():
warnings.simplefilter( "ignore", category=RuntimeWarning )
return numpy.where( self.isOutOfLimits( x ), 0,
1.0 / ( x * self._urng ) )
# logResult has no better definition than the default: just take the math.log of result.
# No specialized method here.
def partialLog( self, p ):
"""
Return partial derivative of log( Prior ) wrt parameter.
Parameters
----------
p : float
the value
"""
try :
return math.nan if self.isOutOfLimits( p ) else -1 / p
except ValueError :
return numpy.where( self.isOutOfLimits( p ), math.nan, -1 / p )
def isBound( self ):
""" Return true if the integral over the prior is bound. """
return self.hasLowLimit( ) and self.hasHighLimit( )
def shortName( self ):
""" Return a string representation of the prior. """
return str( "JeffreysPrior" + ( " unbound." if not self.isBound( ) else "" ) )
# * End of JeffreysPrior
|
dokesterREPO_NAMEBayesicFittingPATH_START.@BayesicFitting_extracted@BayesicFitting-master@BayesicFitting@[email protected]@.PATH_END.py
|
{
"filename": "pointless.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py3/pygments/lexers/pointless.py",
"type": "Python"
}
|
"""
pygments.lexers.pointless
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Pointless.
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words
from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \
Punctuation, String, Text
__all__ = ['PointlessLexer']
class PointlessLexer(RegexLexer):
"""
For Pointless source code.
"""
name = 'Pointless'
url = 'https://ptls.dev'
aliases = ['pointless']
filenames = ['*.ptls']
version_added = '2.7'
ops = words([
"+", "-", "*", "/", "**", "%", "+=", "-=", "*=",
"/=", "**=", "%=", "|>", "=", "==", "!=", "<", ">",
"<=", ">=", "=>", "$", "++",
])
keywords = words([
"if", "then", "else", "where", "with", "cond",
"case", "and", "or", "not", "in", "as", "for",
"requires", "throw", "try", "catch", "when",
"yield", "upval",
], suffix=r'\b')
tokens = {
'root': [
(r'[ \n\r]+', Text),
(r'--.*$', Comment.Single),
(r'"""', String, 'multiString'),
(r'"', String, 'string'),
(r'[\[\](){}:;,.]', Punctuation),
(ops, Operator),
(keywords, Keyword),
(r'\d+|\d*\.\d+', Number),
(r'(true|false)\b', Name.Builtin),
(r'[A-Z][a-zA-Z0-9]*\b', String.Symbol),
(r'output\b', Name.Variable.Magic),
(r'(export|import)\b', Keyword.Namespace),
(r'[a-z][a-zA-Z0-9]*\b', Name.Variable)
],
'multiString': [
(r'\\.', String.Escape),
(r'"""', String, '#pop'),
(r'"', String),
(r'[^\\"]+', String),
],
'string': [
(r'\\.', String.Escape),
(r'"', String, '#pop'),
(r'\n', Error),
(r'[^\\"]+', String),
],
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py3@pygments@[email protected]@.PATH_END.py
|
{
"filename": "_yref.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/funnel/marker/colorbar/_yref.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YrefValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="yref", parent_name="funnel.marker.colorbar", **kwargs
):
super(YrefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["container", "paper"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@funnel@marker@colorbar@[email protected]_END.py
|
{
"filename": "calc_slab.py",
"repo_name": "galtay/rabacus",
"repo_path": "rabacus_extracted/rabacus-master/rabacus/solvers/calc_slab.py",
"type": "Python"
}
|
""" Solves a slab with plane parallel radiation incident from one side. """
import numpy as np
import one_zone as ozn
from rabacus.atomic import chemistry
from rabacus.atomic import cooling
from rabacus.constants import units
from rabacus.utils import utils
__all__ = ['Slab']
class Slab:
"""
Solves a slab geometry with plane parallel radiation incident from one
side. The slab is divided into `Nl` layers.
Args:
`Edges` (array): Distance to layer edges
`T` (array): Temperature in each layer
`nH` (array): hydrogen number density in each layer
`nHe` (array): helium number density in each layer
`rad_src` (:class:`~rabacus.rad_src.plane.PlaneSource`): Plane source
.. note::
The arrays `T`, `nH`, and `nHe` must all be the same size. This size
determines the number of shells, `Nl`. `Edges` determines the positions
of the layer edges and must have `Nl+1` entries.
Kwargs:
`rec_meth` (string): How to treat recombinations {``fixed``, ``thresh``}
`fixed_fcA` (float): If `rec_meth` = ``fixed``, constant caseA fraction
`thresh_P_A` (float): If `rec_meth` = ``thresh``, this is the probability
of absorption, P = 1 - exp(-tau), at which the transition to caseB rates
begins.
`thresh_P_B` (float): If `rec_meth` = ``thresh``, this is the probability
of absorption, P = 1 - exp(-tau), at which the transition to caseB ends.
`thresh_xmfp` (float): Determines the distance to probe when calculating
optical depth for the caseA to caseB transition. Specifies a multiple
of the mean free path for fully neutral gas. In equation form,
L = thresh_xmfp / (n * sigma)
`atomic_fit_name` (string): Source for atomic rate fits {``hg97``}
`find_Teq` (bool): If ``False``, use fixed input T, if ``True`` solve for
equilibrium T
`z` (float): Redshift, only need if `find_Teq` = ``True``
`verbose` (bool): Verbose output?
`tol` (float): tolerance for all convergence tests
`thin` (bool): if ``True`` only solves optically thin
Attributes:
`U` (:class:`~rabacus.constants.units.Units`)
`z_c` (array): distance from surface of slab to center of layer
`dz` (array): thickness of layer
`NH_c` (array): H column density from surface of slab to center of layer
`dNH` (array): H column density through layer
`NH1_c` (array): HI column density from surface of slab to center of
layer
`dNH1` (array): HI column density through layer
`H1i` (array): HI photo-ionization rate
`H1h` (array): HI photo-heating rate
`xH1` (array): H neutral fraction nHI / nH
`xH2` (array): H ionized fraction nHII / nH
`ne` (array): electron number density
`fcA_H2` (array): HII case A fraction
`cool` (array): cooling rate [erg / (cm^3 K)]
`heat` (array): heating rate [erg / (cm^3 K)]
`heatH1` (array): contribution to `heat` from H1 photoheating
`dtauH1_th` (array): HI optical depth at H1 ionizing threshold
through layer
`tauH1_th_lo` (array): HI optical depth below this layer
`tauH1_th_hi` (array): HI optical depth above this layer
`NH1_thru` (float): HI column density through entire slab
`Nl` (int): Number of layers
`itr` (int): Number of iterations to converge
.. note::
For many of the attributes above, there are analagous versions for
helium. We also note that the source of the atomic rates fit is stored
in the variable `atomic_fit_name`, but the source of the photoionization
cross section fits are stored in the point source object in the variable
`px_fit_type`.
Examples:
The following code will solve a monochromatic source incident
onto a slab with fixed recombination rates and temperatues. In this
case we use a Haardt and Madau 2012 spectrum to calculate a grey photon
energy which is then used to create a monochromatic plane source ::
import numpy as np
import rabacus as ra
# create Haardt and Madau 2012 source
#---------------------------------------------------------------
q_min = 1.0; q_max = 4.0e2; z = 3.0
hm12 = ra.rad_src.PlaneSource( q_min, q_max, 'hm12', z=z )
# get grey photon energy and create a monochromatic plane source
#---------------------------------------------------------------
q_mono = hm12.grey.E.He2 / hm12.PX.Eth_H1
mono = ra.rad_src.PlaneSource( q_mono, q_mono, 'monochromatic' )
mono.normalize_H1i( hm12.thin.H1i )
# describe slab
#---------------------------------------------------------------
Nl = 512; Yp = 0.24
nH = np.ones(Nl) * 1.5e-2 / ra.U.cm**3
nHe = nH * Yp * 0.25 / (1.0-Yp)
Tslab = np.ones(Nl) * 1.0e4 * ra.U.K
Lslab = 200.0 * ra.U.kpc
Edges = np.linspace( 0.0 * ra.U.kpc, Lslab, Nl+1 )
# solve slab
#---------------------------------------------------------------
slab = ra.solvers.Slab( Edges, Tslab, nH, nHe, mono,
rec_meth='fixed', fixed_fcA=1.0 )
"""
def __init__(self,
Edges,
T,
nH,
nHe,
rad_src,
#
rec_meth = "fixed",
fixed_fcA = 1.0,
thresh_P_A = 0.01,
thresh_P_B = 0.99,
thresh_xmfp = 3.0e1,
atomic_fit_name = "hg97",
find_Teq = False,
z = None,
verbose = False,
tol = 1.0e-10,
thin = False,
):
# attach units
#-----------------------------------------------
self.U = units.Units()
# check input
#-----------------------------------------------
assert( Edges.size == T.size + 1 )
assert( T.shape == nH.shape == nHe.shape )
assert( T.shape == (T.size,) )
assert( rec_meth == 'fixed' or rec_meth == 'thresh' )
if find_Teq and z==None:
msg = 'need to supply redshift if finding equilibrium temperature'
raise utils.InputError(msg)
if rad_src.source_type != 'plane':
msg = 'source type needs to be plane'
raise utils.InputError(msg)
# set units
#-----------------------------------------------
Edges.units = 'cm'
T.units = 'K'
nH.units = '1.0/cm^3'
nHe.units = '1.0/cm^3'
# attach input
#-----------------------------------------------
self.Edges = Edges.copy()
self.T = T.copy()
self.nH = nH.copy()
self.nHe = nHe.copy()
self.rad_src = rad_src
self.rec_meth = rec_meth
if self.rec_meth == 'fixed':
self.fixed_fcA = fixed_fcA
elif self.rec_meth == 'thresh':
assert thresh_xmfp > 0.0
assert thresh_P_B > thresh_P_A
self.thresh_xmfp = thresh_xmfp
self.thresh_P_A = thresh_P_A
self.thresh_P_B = thresh_P_B
self.thresh_dPAB = thresh_P_B - thresh_P_A
self.thresh_tau_A = -np.log( 1.0 - thresh_P_A )
self.thresh_tau_B = -np.log( 1.0 - thresh_P_B )
self.atomic_fit_name = atomic_fit_name
self.find_Teq = find_Teq
self.verbose = verbose
self.tol = tol
self.thin = thin
if find_Teq:
self.z = z
# initialize slab
#-----------------------------------------------
self.init_slab()
self.set_optically_thin()
if self.thin:
return
# solve slab (sweep until convergence)
#-----------------------------------------------------------
conv_old = np.sum( self.ne )
not_converged = True
self.itr = 0
while not_converged:
self.sweep_slab()
conv_new = np.sum( self.ne )
if np.abs( conv_new/conv_old - 1.0 ) < self.tol:
not_converged = False
conv_old = conv_new
self.itr += 1
# finalize slab
#-----------------------------------------------------------
self.finalize_slab()
def init_slab( self ):
""" Initialize slab values. """
if self.verbose:
print 'begin initialization'
# instantiate atomic rates (optically thin)
#-----------------------------------------------
if self.rec_meth == 'fixed':
fcA_H2 = self.fixed_fcA
fcA_He2 = self.fixed_fcA
fcA_He3 = self.fixed_fcA
elif self.rec_meth == 'thresh':
fcA_H2 = 1.0
fcA_He2 = 1.0
fcA_He3 = 1.0
kchem = chemistry.ChemistryRates(
self.T[0], fcA_H2, fcA_He2, fcA_He3,
H1i = self.rad_src.thin.H1i,
He1i = self.rad_src.thin.He1i,
He2i = self.rad_src.thin.He2i,
fit_name = self.atomic_fit_name
)
kcool = cooling.CoolingRates(
self.T[0], fcA_H2, fcA_He2, fcA_He3,
H1h = self.rad_src.thin.H1h,
He1h = self.rad_src.thin.He1h,
He2h = self.rad_src.thin.He2h,
fit_name = self.atomic_fit_name
)
self.kchem = kchem
self.kcool = kcool
if self.verbose:
print ' created kchem and kcool'
# setup arrays
#-----------------------------------------------
self.Nl = self.nH.size
Nl = self.Nl
self.dz = self.Edges[1:] - self.Edges[0:-1]
self.z_c = self.Edges[0:-1] + 0.5 * self.dz
self.xH1 = np.zeros(Nl)
self.dNH1 = np.zeros(Nl) / self.U.cm**2
self.dtauH1_th = np.zeros(Nl)
self.tauH1_th_lo = np.zeros(Nl) # H1 optical depth below layer
self.tauH1_th_hi = np.zeros(Nl) # H1 optical depth above layer
self.H1i = np.zeros(Nl) / self.U.s
self.H1h = np.zeros(Nl) * self.U.eV / self.U.s
self.fcA_H2 = np.zeros(Nl)
self.xHe1 = np.zeros(Nl)
self.dNHe1 = np.zeros(Nl) / self.U.cm**2
self.dtauHe1_th = np.zeros(Nl)
self.tauHe1_th_lo = np.zeros(Nl) # He1 optical depth below layer
self.tauHe1_th_hi = np.zeros(Nl) # He1 optical depth above layer
self.He1i = np.zeros(Nl) / self.U.s
self.He1h = np.zeros(Nl) * self.U.eV / self.U.s
self.fcA_He2 = np.zeros(Nl)
self.xHe2 = np.zeros(Nl)
self.dNHe2 = np.zeros(Nl) / self.U.cm**2
self.dtauHe2_th = np.zeros(Nl)
self.tauHe2_th_lo = np.zeros(Nl) # He2 optical depth below layer
self.tauHe2_th_hi = np.zeros(Nl) # He2 optical depth above layer
self.He2i = np.zeros(Nl) / self.U.s
self.He2h = np.zeros(Nl) * self.U.eV / self.U.s
self.fcA_He3 = np.zeros(Nl)
self.cool = np.zeros(Nl) * self.U.eV / (self.U.s * self.U.cm**3)
self.heat = np.zeros(Nl) * self.U.eV / (self.U.s * self.U.cm**3)
self.heatH1 = np.zeros(Nl) * self.U.eV / (self.U.s * self.U.cm**3)
self.heatHe1 = np.zeros(Nl) * self.U.eV / (self.U.s * self.U.cm**3)
self.heatHe2 = np.zeros(Nl) * self.U.eV / (self.U.s * self.U.cm**3)
self.xH2 = np.zeros(Nl)
self.xHe3 = np.zeros(Nl)
self.ne = np.zeros(Nl) / self.U.cm**3
if self.verbose:
print ' created simple arrays'
# calc NH and NHe between S and center of each layer
#----------------------------------------
self.dNH = self.dz * self.nH
self.dNHe = self.dz * self.nHe
self.NH_c = np.zeros(Nl) / self.U.cm**2
self.NHe_c = np.zeros(Nl) / self.U.cm**2
for i in xrange(Nl):
self.NH_c[i] = np.sum( self.dNH[0:i] ) + 0.5 * self.dNH[i]
self.NHe_c[i] = np.sum( self.dNHe[0:i] ) + 0.5 * self.dNHe[i]
if self.verbose:
print ' created NH_c and NHe_c'
# calc mean free path when fully neutral for each shell
# also calculate the bounds for each shell
#--------------------------------------------------------
if self.rec_meth == 'thresh':
L = self.Edges[-1]
self.mfp_H1 = 1.0 / ( self.nH * self.rad_src.th.sigma_H1 )
self.L_H1 = self.mfp_H1 * self.thresh_xmfp
if np.any( self.L_H1 > L ):
indx = np.where( self.L_H1 > L )
self.L_H1[indx] = L
self.mfp_He1 = 1.0 / ( self.nHe * self.rad_src.th.sigma_He1 )
self.L_He1 = self.mfp_He1 * self.thresh_xmfp
if np.any( self.L_He1 > L ):
indx = np.where( self.L_He1 > L )
self.L_He1[indx] = L
self.mfp_He2 = 1.0 / ( self.nHe * self.rad_src.th.sigma_He2 )
self.L_He2 = self.mfp_He2 * self.thresh_xmfp
if np.any( self.L_He2 > L ):
indx = np.where( self.L_He2 > L )
self.L_He2[indx] = L
self.ii_H1 = np.zeros(Nl, dtype=int)
self.ff_H1 = np.zeros(Nl, dtype=int)
self.ii_He1 = np.zeros(Nl, dtype=int)
self.ff_He1 = np.zeros(Nl, dtype=int)
self.ii_He2 = np.zeros(Nl, dtype=int)
self.ff_He2 = np.zeros(Nl, dtype=int)
for i in xrange(Nl):
self.ii_H1[i], self.ff_H1[i] = \
self.return_bounds( i, self.L_H1[i] )
self.ii_He1[i], self.ff_He1[i] = \
self.return_bounds( i, self.L_He1[i] )
self.ii_He2[i], self.ff_He2[i] = \
self.return_bounds( i, self.L_He2[i] )
if self.verbose:
print ' created index arrays for fcA'
if self.verbose:
print 'initialization complete'
def return_bounds( self, i, Ltarget ):
""" Given a shell number and a distance, return the indices such that
self.r[i] - self.r[ii] > Ltarget and self.r[ff] - self.r[i] >
Ltarget. """
L = 0.0 * self.U.cm
k = 1
while L < Ltarget:
ii = i-k
if ii < 0:
ii = 0
L = 1.5 * Ltarget
else:
L = L + self.dz[ii]
k += 1
L = 0.0 * self.U.cm
k = 1
while L < Ltarget:
ff = i+k
if ff >= self.Nl:
ff = self.Nl-1
L = 1.5 * Ltarget
else:
L = L + self.dz[ff]
k += 1
return ii,ff
def set_optically_thin( self ):
""" Set optically thin ionzation state """
# initialize a chemistry object
#----------------------------------------
if self.rec_meth == 'fixed':
self.fcA_H2 = np.ones(self.Nl) * self.fixed_fcA
self.fcA_He2 = np.ones(self.Nl) * self.fixed_fcA
self.fcA_He3 = np.ones(self.Nl) * self.fixed_fcA
else:
self.fcA_H2 = np.ones(self.Nl)
self.fcA_He2 = np.ones(self.Nl)
self.fcA_He3 = np.ones(self.Nl)
kchem = chemistry.ChemistryRates(
self.T, self.fcA_H2, self.fcA_He2, self.fcA_He3,
H1i = np.ones(self.Nl) * self.rad_src.thin.H1i,
He1i = np.ones(self.Nl) * self.rad_src.thin.He1i,
He2i = np.ones(self.Nl) * self.rad_src.thin.He2i,
fit_name = self.atomic_fit_name,
)
kcool = cooling.CoolingRates(
self.T, self.fcA_H2, self.fcA_He2, self.fcA_He3,
H1h = np.ones(self.Nl) * self.rad_src.thin.H1h,
He1h = np.ones(self.Nl) * self.rad_src.thin.He1h,
He2h = np.ones(self.Nl) * self.rad_src.thin.He2h,
fit_name = self.atomic_fit_name,
)
if self.find_Teq:
x = ozn.Solve_PCTE( self.nH, self.nHe, kchem, kcool, self.z,
self.tol )
self.T = x.Teq
else:
x = ozn.Solve_PCE( self.nH, self.nHe, kchem, self.tol )
self.xH1 = x.H1
self.xH2 = x.H2
self.xHe1 = x.He1
self.xHe2 = x.He2
self.xHe3 = x.He3
# summarize
#----------------------------------------
self.dNH1 = self.dNH * self.xH1
self.dNHe1 = self.dNHe * self.xHe1
self.dNHe2 = self.dNHe * self.xHe2
self.dtauH1_th = self.dNH1 * self.rad_src.th.sigma_H1
self.dtauHe1_th = self.dNHe1 * self.rad_src.th.sigma_He1
self.dtauHe2_th = self.dNHe2 * self.rad_src.th.sigma_He2
self.ne = self.xH2 * self.nH + \
( self.xHe2 + 2.0 * self.xHe3 ) * self.nHe
def set_taus( self, i ):
""" Calculate optical depth at the ionization thresholds of each
species above and below this layer. Does not include a contribution
from the layer itself.
Args:
`i` (int): layer index
"""
self.tauH1_th_lo[i] = np.sum( self.dtauH1_th[0:i] )
self.tauH1_th_hi[i] = np.sum( self.dtauH1_th[i+1:self.Nl] )
self.tauHe1_th_lo[i] = np.sum( self.dtauHe1_th[0:i] )
self.tauHe1_th_hi[i] = np.sum( self.dtauHe1_th[i+1:self.Nl] )
self.tauHe2_th_lo[i] = np.sum( self.dtauHe2_th[0:i] )
self.tauHe2_th_hi[i] = np.sum( self.dtauHe2_th[i+1:self.Nl] )
def return_fcA( self, tau_th ):
""" Given the optical depth at nu_th for any given species, returns
the case A fraction. """
if tau_th < self.thresh_tau_A:
fcA = 1.0
elif tau_th > self.thresh_tau_B:
fcA = 0.0
else:
P = 1.0 - np.exp( -tau_th )
delta = P - self.thresh_P_A
fcB = delta / self.thresh_dPAB
fcA = 1.0 - fcB
return fcA
def set_fcA( self, i ):
""" Calculate case A fraction for each species in this layer.
Args:
`i` (int): layer index
"""
if self.rec_meth == 'fixed':
self.fcA_H2[i] = self.fixed_fcA
self.fcA_He2[i] = self.fixed_fcA
self.fcA_He3[i] = self.fixed_fcA
elif self.rec_meth == 'thresh':
ii = self.ii_H1[i]; ff = self.ff_H1[i]
tau_lo = np.sum( self.dtauH1_th[ii:i] )
fcA_lo = self.return_fcA( tau_lo )
tau_hi = np.sum( self.dtauH1_th[i+1:ff+1] )
fcA_hi = self.return_fcA( tau_hi )
self.fcA_H2[i] = (fcA_lo + fcA_hi) * 0.5
ii = self.ii_He1[i]; ff = self.ff_He1[i]
tau_lo = np.sum( self.dtauHe1_th[ii:i] )
fcA_lo = self.return_fcA( tau_lo )
tau_hi = np.sum( self.dtauHe1_th[i+1:ff+1] )
fcA_hi = self.return_fcA( tau_hi )
self.fcA_He2[i] = (fcA_lo + fcA_hi) * 0.5
ii = self.ii_He2[i]; ff = self.ff_He2[i]
tau_lo = np.sum( self.dtauHe2_th[ii:i] )
fcA_lo = self.return_fcA( tau_lo )
tau_hi = np.sum( self.dtauHe2_th[i+1:ff+1] )
fcA_hi = self.return_fcA( tau_hi )
self.fcA_He3[i] = (fcA_lo + fcA_hi) * 0.5
def set_photoion_rates( self, i ):
""" set photoionization rates for this layer
Args:
`i` (int): layer index
"""
tauH1_th = np.float( self.tauH1_th_lo[i] + 0.5 * self.dtauH1_th[i] )
tauHe1_th = np.float( self.tauHe1_th_lo[i] + 0.5 * self.dtauHe1_th[i] )
tauHe2_th = np.float( self.tauHe2_th_lo[i] + 0.5 * self.dtauHe2_th[i] )
self.H1i[i] = self.rad_src.shld_H1i( tauH1_th, tauHe1_th, tauHe2_th )
self.He1i[i] = self.rad_src.shld_He1i( tauH1_th, tauHe1_th, tauHe2_th )
self.He2i[i] = self.rad_src.shld_He2i( tauH1_th, tauHe1_th, tauHe2_th )
def set_photoheat_rates( self, i ):
""" set photoheating rates for this layer
Args:
`i` (int): layer index
"""
tauH1_th = np.float( self.tauH1_th_lo[i] + 0.5 * self.dtauH1_th[i] )
tauHe1_th = np.float( self.tauHe1_th_lo[i] + 0.5 * self.dtauHe1_th[i] )
tauHe2_th = np.float( self.tauHe2_th_lo[i] + 0.5 * self.dtauHe2_th[i] )
self.H1h[i] = self.rad_src.shld_H1h( tauH1_th, tauHe1_th, tauHe2_th )
self.He1h[i] = self.rad_src.shld_He1h( tauH1_th, tauHe1_th, tauHe2_th )
self.He2h[i] = self.rad_src.shld_He2h( tauH1_th, tauHe1_th, tauHe2_th )
def sweep_slab(self):
""" Performs one sweep through slab. """
for i in xrange(self.Nl):
if self.verbose:
print 'i = ', i
# calc tauXX above and below this layer
# (constant during iterations)
#----------------------------------------
self.set_taus( i )
# calculate fcA (average over directions)
# (constant during iterations)
#----------------------------------------
self.set_fcA( i )
# iterate until we have convergence in the optical
# depth through this layer
#--------------------------------------------------
conv_old = self.dtauH1_th[i] + \
self.dtauHe1_th[i] + self.dtauHe2_th[i]
not_converged = True
itr = 0
while not_converged:
# calculate photoion / chemistry rates
#----------------------------------------
self.set_photoion_rates( i )
self.kchem.set( self.T[i],
self.fcA_H2[i],
self.fcA_He2[i],
self.fcA_He3[i],
H1i = self.H1i[i],
He1i = self.He1i[i],
He2i = self.He2i[i] )
# calculate photoheat / cooling rates
#----------------------------------------
self.set_photoheat_rates( i )
self.kcool.set( self.T[i],
self.fcA_H2[i],
self.fcA_He2[i],
self.fcA_He3[i],
H1h = self.H1h[i],
He1h = self.He1h[i],
He2h = self.He2h[i] )
# if we are finding the equilibrium temperature
# we need to call Solve_PCTE
#----------------------------------------
if self.find_Teq:
x = ozn.Solve_PCTE( np.ones(1) * self.nH[i],
np.ones(1) * self.nHe[i],
self.kchem,
self.kcool,
self.z,
self.tol )
self.T[i] = x.Teq
# otherwise we call Solve_PCE
#----------------------------------------
else:
x = ozn.Solve_PCE( np.ones(1) * self.nH[i],
np.ones(1) * self.nHe[i],
self.kchem, self.tol )
# set the ionization fractions in the object
#----------------------------------------
self.xH1[i] = x.H1
self.xH2[i] = x.H2
self.xHe1[i] = x.He1
self.xHe2[i] = x.He2
self.xHe3[i] = x.He3
# calculate heating rates in layer
#----------------------------------------
self.heatH1[i] = self.H1h[i] * self.nH[i] * self.xH1[i]
self.heatHe1[i] = self.He1h[i] * self.nHe[i] * self.xHe1[i]
self.heatHe2[i] = self.He2h[i] * self.nHe[i] * self.xHe2[i]
self.heat[i] = self.heatH1[i] + \
self.heatHe1[i] + self.heatHe2[i]
# calculate electron density in layer
#----------------------------------------
self.ne[i] = self.xH2[i] * self.nH[i] + \
( self.xHe2[i] + 2 * self.xHe3[i] ) * self.nHe[i]
# calculate tauXX through layer
#----------------------------------------
self.dNH1[i] = self.dNH[i] * self.xH1[i]
self.dNHe1[i] = self.dNHe[i] * self.xHe1[i]
self.dNHe2[i] = self.dNHe[i] * self.xHe2[i]
self.dtauH1_th[i] = self.dNH1[i] * self.rad_src.th.sigma_H1
self.dtauHe1_th[i] = self.dNHe1[i] * self.rad_src.th.sigma_He1
self.dtauHe2_th[i] = self.dNHe2[i] * self.rad_src.th.sigma_He2
# check convergence
#----------------------------------------
conv_new = self.dtauH1_th[i] + \
self.dtauHe1_th[i] + self.dtauHe2_th[i]
if np.abs( conv_new/conv_old - 1.0 ) < self.tol:
not_converged = False
conv_old = conv_new
itr += 1
def finalize_slab( self ):
""" Calculate some final values using fully solved slab. """
# calculate column densities up to each layer
#-----------------------------------------------------------
self.NH1_c = np.zeros(self.Nl) / self.U.cm**2
self.NHe1_c = np.zeros(self.Nl) / self.U.cm**2
self.NHe2_c = np.zeros(self.Nl) / self.U.cm**2
for i in xrange(self.Nl):
self.NH1_c[i] = np.sum( self.dNH1[0:i] ) + 0.5 * self.dNH1[i]
self.NHe1_c[i] = np.sum( self.dNHe1[0:i] ) + 0.5 * self.dNHe1[i]
self.NHe2_c[i] = np.sum( self.dNHe2[0:i] ) + 0.5 * self.dNHe2[i]
# calculate column densities through whole slab
#-----------------------------------------------------------
self.NH1_thru = np.sum( self.dNH * self.xH1 )
self.logNH1_thru = np.log10( self.NH1_thru.magnitude )
self.NHe1_thru = np.sum( self.dNHe * self.xHe1 )
self.logNHe1_thru = np.log10( self.NHe1_thru.magnitude )
self.NHe2_thru = np.sum( self.dNHe * self.xHe2 )
self.logNHe2_thru = np.log10( self.NHe2_thru.magnitude )
# set preferred units
#-----------------------------------------------------------
self.z_c.units = 'kpc'
# delete things we don't need
#-----------------------------------------------------------
del(self.kchem)
del(self.kcool)
# calculate effective photoionization rates
# (i.e. the photoionization rates that result in
# the same ionization fractions if caseA rates
# are used). Note, we assume that the recombination
# radiation is peaked near the ionization thresholds
# and therefore does not alter the temperature.
#------------------------------------------------
kchemA = chemistry.ChemistryRates(
self.T, fcA_H2 = 1.0, fcA_He2 = 1.0, fcA_He3 = 1.0,
H1i = self.H1i, He1i = self.He1i, He2i = self.He2i,
fit_name = self.atomic_fit_name )
self.H1i_eff = kchemA.reH2 * self.ne * self.xH2 / self.xH1 - \
kchemA.ciH1 * self.ne
self.He1i_eff = kchemA.reHe2 * self.ne * self.xHe2 / self.xHe1 - \
kchemA.ciHe1 * self.ne
self.He2i_eff = kchemA.reHe3 * self.ne * self.xHe3 / self.xHe2 - \
kchemA.ciHe2 * self.ne
|
galtayREPO_NAMErabacusPATH_START.@rabacus_extracted@rabacus-master@rabacus@solvers@[email protected]_END.py
|
{
"filename": "muLAnFig.py",
"repo_name": "muLAn-project/muLAn",
"repo_path": "muLAn_extracted/muLAn-master/muLAn/utils/muLAnFig.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""Create a figure from muLAn outputs of publication quality"""
# Copyright (c) 2014-2018 Clément Ranc & Arnaud Cassan
# Distributed under the terms of the MIT license
#
# This module is part of software:
# muLAn: gravitational MICROlensing Analysis code
# https://github.com/muLAn-project/muLAn
import ConfigParser as cp
from scipy.interpolate import interp1d
import glob
import numpy as np
from copy import copy
import muLAn.mulan as mulan
import muLAn.packages.general_tools as gtools
import matplotlib.pyplot as plt
class figure():
"""Create a figure from muLAn outputs
Calling muLAnFig
================
fig = muLAnFig.figure(figsize=(10,6), labelposx=0.8,
labelposy=[0.6, 0.9], labelsize=10)
Parameters
----------
figsize : tuple, optional
Figure size (x, y). Default is: figsize=(10,6).
labelposx: float, optional
Horizontal position of the labels of the observatories
names. Possible values are 0 (left) to 1 (right).
Default is: labelposx=0.8.
labelposy: sequence of float, optional
A 2-length sequence [ymin, ymax], which defines the
vertical range where the labels of the observatories names
will be displayed. Possible values are 0 (down) to 1 (up).
Default is: labelposy=[0.6, 0.9].
labelsize=10: float, optional
Font size of the labels of the observatories names.
Default is: labelsize=10
User methods
------------
addinset_caustics : see below
addinset_lightcurve : see below
save : see below
show : see below
Returns
-------
out : figure
Using show will open a matplotlib.pylab interactive plot.
Using save will store the figure in the disk. NB: do not
use show before save, the saved figure will be empty.
Examples
--------
Examples in automatic mode (called from the EVENT/ working directory):
>>> fig = muLAnFig.figure(figsize=(10,6), labelposx=0.83,
labelposy=[0.54, 0.94], labelsize=9)
>>> fig.plot(trange=[7100, 7200], lcrange=[12, 16.8],
resrange=[-0.38, 0.38])
>>> fig.addinset_caustics([0.17, 0.64, 0.2, 0.3], xrange=[-1.75, -1.55],
yrange=[-0.12, 0.13])
>>> fig.addinset_lightcurve([0.15, 0.65, 0.3, 0.3], trange=[7144, 7148],
lcrange=[12, 15.8])
>>> fig.save('Plots/Figure.pdf')
>>> fig.show()
>>> fig = muLAnFig.figure()
>>> fig.plot()
>>> fig.addinset_caustics([0.2, 0.7, 0.2, 0.2])
>>> fig.addinset_lightcurve([0.2, 0.4, 0.2, 0.2])
>>> fig.show()
Example in manual mode:
>>> fig = muLAnFig.figure(labelposx=0.83, labelposy=[0.54, 0.94])
>>> fig.plot(data=[('data1.dat', '#000000', 'Tel1'),
('data2.dat', '#FF00FF', 'Tel2')], lctraj=[('EARTH.dat', 'black')],
trange=[7100, 7200], lcrange=[12, 16.8], resrange=[-0.38, 0.38])
>>> fig.addinset_caustics([0.17, 0.64, 0.2, 0.3], caus=[('caustic.dat', 'red')])
>>> fig.save('Plots/Figure.pdf')
"""
def __init__(self, figsize=(10,6), labelposx=0.8, labelposy=[0.6, 0.9], labelsize=10):
"""Inititalize figure layout and search for muLAn output files"""
self._labelposx = labelposx
self._labelposy = labelposy
self._labelsize = labelsize
self._lccompt = 1
self._causcompt = 1
try:
print " Searching for muLAn outputs...",
self._getconfig()
print "\033[1m\033[32mfound\033[0m"
except:
self._cfgsetup = None
self._cfgobs = None
print "\033[1m\033[35mnot found\033[0m (may lead to an error in non-manual mode)"
try:
print " Searching for muLAn best-fit parameters file...",
self._getbestfitparams()
print "\033[1m\033[32mfound\033[0m"
except:
print "\033[1m\033[35mnot found\033[0m (may lead to an error in non-manual mode)"
# figure layout
plt.close('all')
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
self.fig, (self._LC, self._RES) = plt.subplots(2, sharex=True, figsize=figsize, gridspec_kw={'height_ratios':[3, 1]})
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.97, top=0.97, wspace=None, hspace=0.)
def plot(self, data=None, lctraj=None, trange=None, lcrange=None, resrange=None):
"""Create main figure pannel
Parameters
----------
data: sequence of tuples, optional
A n-length sequence [..., ('data_i.dat', 'color_i', 'label_i'), ...],
where 'data_i.dat' is the ith data file, 'color_i' its color (name or
hexadecimal code), and 'label_i' the name to be displayed on the
figure labels. Default is: use muLAn outputs (data=None).
lctraj: sequence of tuples, optional
A n-length sequence [..., ('lctraj_i.dat', 'color_i'), ...], where
'lctraj_i.dat' is the ith trajectory + light curve file, and
'color_i' its color (name or hexadecimal code).
Default is: use muLAn outputs (lctraj=None).
trange: sequence of float, optional
A 2-length sequence [tmin, tmax], which defines
the range of dates to be plotted in the main plot.
Default is: automatic range (trange=None)
lcrange: sequence of float, optional
A 2-length sequence [magmin, magmax], which defines
the range of magnitudes to be plotted in the main plot.
Default is: automatic range (lcrange=None)
resrange: sequence of float, optional
A 2-length sequence [magmin, magmax], which defines
the range of magnitudes to be plotted in the residuals plot.
Default is: automatic range (resrange=None)
"""
print "\033[1m Creating main figure layout...\033[0m"
# test whether to select outputs from muLAn's .ini files
if (data == None) and (self._cfgobs != None):
data = self._getdata()
if (lctraj == None) and (self._cfgobs != None):
lctraj = self._getlctraj()
self.data = data
self.lctraj = lctraj
if trange: self._RES.set_xlim([trange[0], trange[1]])
if resrange: self._RES.set_ylim([resrange[0], resrange[1]])
if lcrange: self._LC.set_ylim([lcrange[0], lcrange[1]])
self._LC.invert_yaxis()
lwidth = 1.
fontsize = 12
plt.setp(self._LC.spines.values(), linewidth=lwidth)
plt.setp(self._RES.spines.values(), linewidth=lwidth)
self._LC.tick_params(labelsize=fontsize, width=0.8, direction='in', length=8)
self._RES.tick_params(labelsize=fontsize, width=0.8, direction='in', length=8)
self._RES.set_xlabel(r'HJD - 2,450,000', size=fontsize)
self._LC.set_ylabel(r'Magnitude', size=fontsize)
self._RES.set_ylabel(r'Residuals', size=fontsize)
# plot theoretical light curves
for lctraji, color in self.lctraj:
print " Reading theoretical light curve file:\033[3m", lctraji, "\033[0m"
hjdr, amp, magr, xt, yt = np.loadtxt(lctraji, unpack=True)
hjd, mag = self._optimizemc(hjdr, magr)
self._LC.plot(hjd, mag, color=color, linewidth=1)
# observatory names label positions
if not self._labelposx: x = 0.7
else: x = self._labelposx
if not self._labelposy: ymin, ymax = 0.5, 0.7
else: ymin, ymax = self._labelposy
y = iter(np.linspace(ymin, ymax, len(self.data), endpoint=False))
# read and plot data and residuals
for datai, color, obsname in self.data:
print " Reading data file:\033[3m", datai, "\033[0m"
ID, hjd, mag, errmag, resmag, amp, erramp, resamp, bkg, seeing, xs, ys, chi, toto, tata = np.loadtxt(datai, unpack=True)
self._LC.errorbar(hjd, mag, errmag, fmt='o', color=color, markersize=4, alpha=0.4, linewidth=1)
self._RES.plot((np.min(hjd), np.max(hjd)), (0., 0.), 'k-', linewidth=0.4)
self._RES.errorbar(hjd, resmag, errmag, fmt='o', color=color, markersize=4, alpha=0.4, linewidth=1)
# display observatory names
self._LC.annotate(r'$\bullet$ ' + obsname, xy=(x, y.next()), xycoords='figure fraction', color=color, fontsize=self._labelsize, bbox={'facecolor':'red', 'alpha':0.5, 'pad':10})
def addinset_lightcurve(self, layout, trange=None, lcrange=None):
"""Add a light curve zoom pannel to the plot
Parameters
----------
layout: sequence of float
A 4-length sequence [left, bottom, width, height], which
defines the position and shape of the inset.
trange: sequence of float, optional
A 2-length sequence [tmin, tmax], which defines
the range of dates to be plotted in the inset plot.
Default is: automatic range (trange=None)
lcrange: sequence of float, optional
A 2-length sequence [magmin, magmax], which defines
the range of magnitudes to be plotted in the inset plot.
Default is: automatic range (lcrange=None)
"""
print "\033[1m Creating light curve inset " + str(self._lccompt) + "...\033[0m"
self._lccompt += 1
# pannel layout
ZLC = self.fig.add_axes(layout)
if trange: ZLC.set_xlim(trange)
if lcrange: ZLC.set_ylim(lcrange)
ZLC.invert_yaxis()
# read and plot observed data and residuals
for datai, color, obsname in self.data:
print " Reading data file:\033[3m", datai, "\033[0m"
ID, hjd, mag, errmag, resmag, amp, erramp, resamp, bkg, seeing, xs, ys, chi, toto, tata = np.loadtxt(datai, unpack=True)
ZLC.errorbar(hjd, mag, errmag, fmt='o', color=color, markersize=4, alpha=0.4, linewidth=1)
# plot theoretical light curves
for lctraji, color in self.lctraj:
print " Reading theoretical light curve file:\033[3m", lctraji, "\033[0m"
hjdr, amp, magr, xt, yt = np.loadtxt(lctraji, unpack=True)
hjd, mag = self._optimizemc(hjdr, magr)
# hjd, amp, mag, xt, yt = np.loadtxt(lctraji, unpack=True)
ZLC.plot(hjd, mag, color=color, linewidth=1)
def addinset_caustics(self, layout, caus=None, src=None, xrange=None, yrange=None):
"""Add a caustic pannel to the plot
Parameters
----------
layout: sequence of float
A 4-length sequence [left, bottom, width, height], which
defines the position and shape of the inset.
caus: sequence of tuples, optional
A n-length sequence [..., ('caus_i.dat', 'color_i'), ...], where
'caus_i.dat' is the ith caustic file, and 'color_i' its color (name or
hexadecimal code). Default is: use muLAn outputs (caus=None).
src: sequence of tuples, optional
A n-length sequence [..., (date, color), ...], where
`date` is the time (HJD) at which the source edge is plotted, and
`color` is the color of the edge of the source, that day.
Default is None (no source plotted). If src='t0', then the source is
plotted at t0 in black. Other exemple: src=[(7387.0, 'green'), (7384.8, 'pink')]
xrange: sequence of float, optional
A 2-length sequence [xmin, xmax], which defines
the horizontal range for the caustic plot.
Default is: automatic range (xange=None)
yrange: sequence of float, optional
A 2-length sequence [ymin, ymax], which defines
the vertical range for the caustic plot.
Default is: automatic range (yrange=None)
"""
print "\033[1m Creating caustics inset "+ str(self._causcompt) + "...\033[0m"
self._causcompt += 1
# pannel layout
CAU = self.fig.add_axes(layout)
if not (xrange and yrange): CAU.set_aspect('equal')
if xrange: CAU.set_xlim(xrange)
if yrange: CAU.set_ylim(yrange)
# plot trajectories
for lctraji, color in self.lctraj:
print " Reading trajectory file:\033[3m", lctraji, "\033[0m"
hjd, amp, mag, xt, yt = np.loadtxt(lctraji, unpack=True)
CAU.plot(xt, yt, color=color, linewidth=1)
# Plot the source edge
if src != None:
if src == 't0':
time_src = [self._bf['t0']]
color_src = ['k']
else:
time_src = [time for time, color in src]
color_src = [color for time, color in src]
x_interp = interp1d(hjd, xt)
y_interp = interp1d(hjd, yt)
for i in range(len(time_src)):
print " Plotting the source at \033[3m", time_src[i], "\033[0m"
if (time_src[i] < hjd[-1]) & (time_src[i] > hjd[0]):
src_edge = (x_interp(time_src[i]) + 1j * y_interp(time_src[i]))
src_edge = src_edge + self._bf['rho'] * np.exp(1j * np.linspace(0, 2.0*np.pi, 100))
print color_src[i]
CAU.plot(src_edge.real, src_edge.imag, color=color_src[i], linewidth=1)
else:
print " Skipped: the source trajectory does not include that day (choose a day closer than t0)"
# Load caustics
fname = "{:s}CAUSTIC.dat".format(self._pathoutputs)
fcaustics = np.loadtxt(fname, unpack=False, dtype=np.float64)
n_caus = fcaustics.shape[1] / 2
# Load caustic times
if n_caus > 1:
fname = "{:s}CAUSTIC.dat".format(self._pathoutputs)
fcaustics = open(fname, 'r')
for line in fcaustics: break
fcaustics.close()
times = line.replace(" ", "").replace("x", "").replace("y", "").replace("#", "").replace("(", "").replace("\n", "").split(")")
times = np.unique(times)
try:
times[0] = 't0 = {:.6f}'.format(self._bf['t0'])
except AttributeError:
times[0] = "t0"
else:
try:
times = np.atleast_1d(['t0 = {:.6f}'.format(self._bf['t0'])])
except AttributeError:
times = np.atleast_1d(['t0'])
# Plot caustics
if caus == None:
color_caus = ['red', 'Orange', 'SeaGreen', 'LightSeaGreen', 'CornflowerBlue', 'DarkViolet']
else:
color_caus = [color for cau, color in caus]
for i in range(n_caus):
# print " Plotting caustic " + str(i + 1) + "..."
print " Plotting caustic:\033[3m", times[i], "\033[0m"
xc = fcaustics.T[2*i]
yc = fcaustics.T[2*i + 1]
CAU.scatter(xc, yc, marker='.', c=color_caus[i], s=0.1)
color_caus = np.roll(color_caus, -1)
def save(self, figname):
"""Save figure"""
print "\033[1m Saving figure: \033[3m" + figname + "...\033[0m"
plt.savefig(figname)
def show(self):
"""Show figure"""
plt.show()
def _optimizemc(self, x, y, err=0.001):
"""Optimize the sampling of the input curve"""
print " ... optimizing sampling:",
N = len(x)
ts = np.zeros(N, dtype=np.float_)
As = np.zeros(N, dtype=np.float_)
# Algorithm
n = 0
i = 0
while n < N:
cond = True
As[n] = y[i]
ts[n] = x[i]
n += 1
p = 2
while p <= N-1-i: # 2≤p
if np.logical_not(cond):
break
for k in np.arange(p-1)+1: # 1≤k≤p-1
Alin = (x[i+k] - x[i]) * (y[i+p] - y[i]) / (x[i+p] - x[i]) + y[i]
cond = np.abs(Alin - y[i+k]) <= err
if np.logical_not(cond):
i = i+p-1
break
p += 1
if (p == N-i): break
ts[n-1] = x[i]
As[n-1] = y[i]
ts[n] = x[N-1]
As[n] = y[N-1]
xopt = copy(ts[0:n+1])
yopt = copy(As[0:n+1])
# verbose
print "\033[3mkeeping " + str(n + 1) + " points out of " + str(N) + "\033[0m"
return xopt, yopt
def _getdata(self):
"""Get usefull data file names from muLAn's .ini files"""
# Extract required information
data = list()
for i in xrange(len(self._observatories)):
table = [a.strip() for a in self._cfgobs.get('ObservatoriesDetails', self._observatories[i]).split(',')]
fname = "{:s}{:s}.dat".format(self._pathoutputs, self._observatories[i].upper())
data.append((fname, "#" + table[1], table[0]))
return data
def _getlctraj(self):
"""Get usefull light curve/trajetory file names from muLAn's .ini files"""
# Extract required information
traj = list()
colors = np.array(['black', 'blue', 'orange'])
for i in xrange(len(self._observatories)):
table = [a.strip() for a in self._cfgobs.get('ObservatoriesDetails', self._observatories[i]).split(',')]
traj.append(table[4])
traj = np.unique(traj)
traj_final = list()
for a in traj:
fname = "{:s}{:s}.dat".format(self._pathoutputs, a.upper())
traj_final.append((fname, colors[0]))
colors = np.roll(colors, -1)
return traj_final
def _getconfig(self):
"""Load the configuration files *.ini."""
# Path of the event
path_event = mulan.getpath_event()
# Configuration files
fname_setup = "{:s}setup.ini".format(path_event)
fname_advanced = "{:s}advancedsetup.ini".format(path_event)
fname_obs = "{:s}observatories.ini".format(path_event)
# Load configuration files
cfgsetup = cp.SafeConfigParser()
cfgsetup.read([fname_setup, fname_advanced])
cfgobs = cp.SafeConfigParser()
cfgobs.read(path_event + 'observatories.ini')
# text = "Load parameter files..."
# gtools.communicate(cfgsetup, 1, text, opts=[gtools.printoption.level0], prefix=True, newline=True)
# Add the path to the configuration
cfgsetup.set('FullPaths', 'Event', path_event)
# Check the paths
if cfgsetup.get('FullPaths', 'Code').replace(" ", "") != "":
if cfgsetup.get('FullPaths', 'Code')[-1] != '/':
cfgsetup.set('FullPaths', 'Code', cfgsetup.get('FullPaths', 'Code') + '/')
if cfgsetup.get('FullPaths', 'Event')[-1] != '/':
cfgsetup.set('FullPaths', 'Event', cfgsetup.get('FullPaths', 'Event') + '/')
if cfgsetup.get('RelativePaths', 'Data')[-1] != '/':
cfgsetup.set('RelativePaths', 'Data', cfgsetup.get('RelativePaths', 'Data') + '/')
if cfgsetup.get('RelativePaths', 'Plots')[-1] != '/':
cfgsetup.set('RelativePaths', 'Plots', cfgsetup.get('RelativePaths', 'Plots') + '/')
if cfgsetup.get('RelativePaths', 'Chains')[-1] != '/':
cfgsetup.set('RelativePaths', 'Chains', cfgsetup.get('RelativePaths', 'Chains') + '/')
if cfgsetup.get('RelativePaths', 'Outputs')[-1] != '/':
cfgsetup.set('RelativePaths', 'Outputs', cfgsetup.get('RelativePaths', 'Outputs') + '/')
if cfgsetup.get('RelativePaths', 'Archives')[-1] != '/':
cfgsetup.set('RelativePaths', 'Archives', cfgsetup.get('RelativePaths', 'Archives') + '/')
if cfgsetup.get('RelativePaths', 'ModelsHistory')[-1] != '/':
cfgsetup.set('RelativePaths', 'ModelsHistory', cfgsetup.get('RelativePaths', 'ModelsHistory') + '/')
self._cfgsetup = cfgsetup
self._cfgobs = cfgobs
# Data file names
data2find = np.array(self._cfgobs.options('ObservatoriesDetails'))
# Cross-match with data to use
data2use = np.array(self._cfgsetup.options('Observatories'))
data2use = np.array([data2find[i] for i in xrange(len(data2find)) if np.any(data2use == data2find[i])])
# Cross-match with existing files
path = self._cfgsetup.get('FullPaths', 'Event') + self._cfgsetup.get('RelativePaths', 'Outputs')
data_filenames = glob.glob(path + '*')
observatories = [a.split('/')[-1] for a in data_filenames]
data_filenames = [data_filenames[i] for i in xrange(len(data_filenames)) if
np.any(data2use == observatories[i].rpartition('.')[0].lower())]
observatories = [ob.rpartition('.')[0].lower() for ob in observatories if
np.any(data2use == ob.rpartition('.')[0].lower())]
self._observatories = observatories
self._pathoutputs = path
def _getbestfitparams(self):
"""Load the values of the best fit."""
fname = "{:s}Results.txt".format(self._pathoutputs)
file_res = open(fname, 'r')
res = ""
for line in file_res:
res = res + line
file_res.close()
res = res.split("Best-fitting parameters")[1]
res = res.split("Site")[0]
res = res.replace("\n", "").split(" ")
res = [a for a in res if a != ""]
res = [a for a in res if a != "="]
res = np.reshape(res, (len(res)/2, 2))
bf = dict()
[bf.update({res.T[0][i]: float(res.T[1][i])}) for i in range(res.shape[0])]
self._bf = bf
if __name__ == '__main__':
help(muLAnFig)
|
muLAn-projectREPO_NAMEmuLAnPATH_START.@muLAn_extracted@muLAn-master@muLAn@[email protected]@.PATH_END.py
|
{
"filename": "_ticktextsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/sunburst/marker/colorbar/_ticktextsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicktextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="ticktextsrc",
parent_name="sunburst.marker.colorbar",
**kwargs
):
super(TicktextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@sunburst@marker@colorbar@[email protected]_END.py
|
{
"filename": "functional_api.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/guides/functional_api.py",
"type": "Python"
}
|
"""
Title: The Functional API
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2019/03/01
Last modified: 2020/04/12
Description: Complete guide to the functional API.
Accelerator: GPU
"""
"""
## Setup
"""
import numpy as np
import keras
from keras import layers
from keras import ops
"""
## Introduction
The Keras *functional API* is a way to create models that are more flexible
than the `keras.Sequential` API. The functional API can handle models
with non-linear topology, shared layers, and even multiple inputs or outputs.
The main idea is that a deep learning model is usually
a directed acyclic graph (DAG) of layers.
So the functional API is a way to build *graphs of layers*.
Consider the following model:
<div class="k-default-codeblock">
```
(input: 784-dimensional vectors)
↧
[Dense (64 units, relu activation)]
↧
[Dense (64 units, relu activation)]
↧
[Dense (10 units, softmax activation)]
↧
(output: logits of a probability distribution over 10 classes)
```
</div>
This is a basic graph with three layers.
To build this model using the functional API, start by creating an input node:
"""
inputs = keras.Input(shape=(784,))
"""
The shape of the data is set as a 784-dimensional vector.
The batch size is always omitted since only the shape of each sample is specified.
If, for example, you have an image input with a shape of `(32, 32, 3)`,
you would use:
"""
# Just for demonstration purposes.
img_inputs = keras.Input(shape=(32, 32, 3))
"""
The `inputs` that is returned contains information about the shape and `dtype`
of the input data that you feed to your model.
Here's the shape:
"""
inputs.shape
"""
Here's the dtype:
"""
inputs.dtype
"""
You create a new node in the graph of layers by calling a layer on this `inputs`
object:
"""
dense = layers.Dense(64, activation="relu")
x = dense(inputs)
"""
The "layer call" action is like drawing an arrow from "inputs" to this layer
you created.
You're "passing" the inputs to the `dense` layer, and you get `x` as the output.
Let's add a few more layers to the graph of layers:
"""
x = layers.Dense(64, activation="relu")(x)
outputs = layers.Dense(10)(x)
"""
At this point, you can create a `Model` by specifying its inputs and outputs
in the graph of layers:
"""
model = keras.Model(inputs=inputs, outputs=outputs, name="mnist_model")
"""
Let's check out what the model summary looks like:
"""
model.summary()
"""
You can also plot the model as a graph:
"""
keras.utils.plot_model(model, "my_first_model.png")
"""
And, optionally, display the input and output shapes of each layer
in the plotted graph:
"""
keras.utils.plot_model(
model, "my_first_model_with_shape_info.png", show_shapes=True
)
"""
This figure and the code are almost identical. In the code version,
the connection arrows are replaced by the call operation.
A "graph of layers" is an intuitive mental image for a deep learning model,
and the functional API is a way to create models that closely mirrors this.
"""
"""
## Training, evaluation, and inference
Training, evaluation, and inference work exactly in the same way for models
built using the functional API as for `Sequential` models.
The `Model` class offers a built-in training loop (the `fit()` method)
and a built-in evaluation loop (the `evaluate()` method). Note
that you can easily [customize these loops](/guides/customizing_what_happens_in_fit/)
to implement training routines beyond supervised learning
(e.g. [GANs](https://keras.io/examples/generative/dcgan_overriding_train_step/)).
Here, load the MNIST image data, reshape it into vectors,
fit the model on the data (while monitoring performance on a validation split),
then evaluate the model on the test data:
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype("float32") / 255
x_test = x_test.reshape(10000, 784).astype("float32") / 255
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.RMSprop(),
metrics=["accuracy"],
)
history = model.fit(
x_train, y_train, batch_size=64, epochs=2, validation_split=0.2
)
test_scores = model.evaluate(x_test, y_test, verbose=2)
print("Test loss:", test_scores[0])
print("Test accuracy:", test_scores[1])
"""
For further reading, see the [training and evaluation](/guides/training_with_built_in_methods/) guide.
"""
"""
## Save and serialize
Saving the model and serialization work the same way for models built using
the functional API as they do for `Sequential` models. The standard way
to save a functional model is to call `model.save()`
to save the entire model as a single file. You can later recreate the same model
from this file, even if the code that built the model is no longer available.
This saved file includes the:
- model architecture
- model weight values (that were learned during training)
- model training config, if any (as passed to `compile()`)
- optimizer and its state, if any (to restart training where you left off)
"""
model.save("my_model.keras")
del model
# Recreate the exact same model purely from the file:
model = keras.models.load_model("my_model.keras")
"""
For details, read the model [serialization & saving](
/guides/serialization_and_saving/) guide.
"""
"""
## Use the same graph of layers to define multiple models
In the functional API, models are created by specifying their inputs
and outputs in a graph of layers. That means that a single
graph of layers can be used to generate multiple models.
In the example below, you use the same stack of layers to instantiate two models:
an `encoder` model that turns image inputs into 16-dimensional vectors,
and an end-to-end `autoencoder` model for training.
"""
encoder_input = keras.Input(shape=(28, 28, 1), name="img")
x = layers.Conv2D(16, 3, activation="relu")(encoder_input)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.Conv2D(16, 3, activation="relu")(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name="encoder")
encoder.summary()
x = layers.Reshape((4, 4, 1))(encoder_output)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu")(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation="relu")(x)
autoencoder = keras.Model(encoder_input, decoder_output, name="autoencoder")
autoencoder.summary()
"""
Here, the decoding architecture is strictly symmetrical
to the encoding architecture, so the output shape is the same as
the input shape `(28, 28, 1)`.
The reverse of a `Conv2D` layer is a `Conv2DTranspose` layer,
and the reverse of a `MaxPooling2D` layer is an `UpSampling2D` layer.
"""
"""
## All models are callable, just like layers
You can treat any model as if it were a layer by invoking it on an `Input` or
on the output of another layer. By calling a model you aren't just reusing
the architecture of the model, you're also reusing its weights.
To see this in action, here's a different take on the autoencoder example that
creates an encoder model, a decoder model, and chains them in two calls
to obtain the autoencoder model:
"""
encoder_input = keras.Input(shape=(28, 28, 1), name="original_img")
x = layers.Conv2D(16, 3, activation="relu")(encoder_input)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.Conv2D(16, 3, activation="relu")(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name="encoder")
encoder.summary()
decoder_input = keras.Input(shape=(16,), name="encoded_img")
x = layers.Reshape((4, 4, 1))(decoder_input)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu")(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation="relu")(x)
decoder = keras.Model(decoder_input, decoder_output, name="decoder")
decoder.summary()
autoencoder_input = keras.Input(shape=(28, 28, 1), name="img")
encoded_img = encoder(autoencoder_input)
decoded_img = decoder(encoded_img)
autoencoder = keras.Model(autoencoder_input, decoded_img, name="autoencoder")
autoencoder.summary()
"""
As you can see, the model can be nested: a model can contain sub-models
(since a model is just like a layer).
A common use case for model nesting is *ensembling*.
For example, here's how to ensemble a set of models into a single model
that averages their predictions:
"""
def get_model():
inputs = keras.Input(shape=(128,))
outputs = layers.Dense(1)(inputs)
return keras.Model(inputs, outputs)
model1 = get_model()
model2 = get_model()
model3 = get_model()
inputs = keras.Input(shape=(128,))
y1 = model1(inputs)
y2 = model2(inputs)
y3 = model3(inputs)
outputs = layers.average([y1, y2, y3])
ensemble_model = keras.Model(inputs=inputs, outputs=outputs)
"""
## Manipulate complex graph topologies
### Models with multiple inputs and outputs
The functional API makes it easy to manipulate multiple inputs and outputs.
This cannot be handled with the `Sequential` API.
For example, if you're building a system for ranking customer issue tickets by
priority and routing them to the correct department,
then the model will have three inputs:
- the title of the ticket (text input),
- the text body of the ticket (text input), and
- any tags added by the user (categorical input)
This model will have two outputs:
- the priority score between 0 and 1 (scalar sigmoid output), and
- the department that should handle the ticket (softmax output
over the set of departments).
You can build this model in a few lines with the functional API:
"""
num_tags = 12 # Number of unique issue tags
num_words = 10000 # Size of vocabulary obtained when preprocessing text data
num_departments = 4 # Number of departments for predictions
title_input = keras.Input(
shape=(None,), name="title"
) # Variable-length sequence of ints
body_input = keras.Input(
shape=(None,), name="body"
) # Variable-length sequence of ints
tags_input = keras.Input(
shape=(num_tags,), name="tags"
) # Binary vectors of size `num_tags`
# Embed each word in the title into a 64-dimensional vector
title_features = layers.Embedding(num_words, 64)(title_input)
# Embed each word in the text into a 64-dimensional vector
body_features = layers.Embedding(num_words, 64)(body_input)
# Reduce sequence of embedded words in the title into a single 128-dimensional vector
title_features = layers.LSTM(128)(title_features)
# Reduce sequence of embedded words in the body into a single 32-dimensional vector
body_features = layers.LSTM(32)(body_features)
# Merge all available features into a single large vector via concatenation
x = layers.concatenate([title_features, body_features, tags_input])
# Stick a logistic regression for priority prediction on top of the features
priority_pred = layers.Dense(1, name="priority")(x)
# Stick a department classifier on top of the features
department_pred = layers.Dense(num_departments, name="department")(x)
# Instantiate an end-to-end model predicting both priority and department
model = keras.Model(
inputs=[title_input, body_input, tags_input],
outputs={"priority": priority_pred, "department": department_pred},
)
"""
Now plot the model:
"""
keras.utils.plot_model(
model, "multi_input_and_output_model.png", show_shapes=True
)
"""
When compiling this model, you can assign different losses to each output.
You can even assign different weights to each loss -- to modulate
their contribution to the total training loss.
"""
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.BinaryCrossentropy(from_logits=True),
keras.losses.CategoricalCrossentropy(from_logits=True),
],
loss_weights=[1.0, 0.2],
)
"""
Since the output layers have different names, you could also specify
the losses and loss weights with the corresponding layer names:
"""
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"priority": keras.losses.BinaryCrossentropy(from_logits=True),
"department": keras.losses.CategoricalCrossentropy(from_logits=True),
},
loss_weights={"priority": 1.0, "department": 0.2},
)
"""
Train the model by passing lists of NumPy arrays of inputs and targets:
"""
# Dummy input data
title_data = np.random.randint(num_words, size=(1280, 10))
body_data = np.random.randint(num_words, size=(1280, 100))
tags_data = np.random.randint(2, size=(1280, num_tags)).astype("float32")
# Dummy target data
priority_targets = np.random.random(size=(1280, 1))
dept_targets = np.random.randint(2, size=(1280, num_departments))
model.fit(
{"title": title_data, "body": body_data, "tags": tags_data},
{"priority": priority_targets, "department": dept_targets},
epochs=2,
batch_size=32,
)
"""
When calling fit with a `Dataset` object, it should yield either a
tuple of lists like `([title_data, body_data, tags_data], [priority_targets, dept_targets])`
or a tuple of dictionaries like
`({'title': title_data, 'body': body_data, 'tags': tags_data}, {'priority': priority_targets, 'department': dept_targets})`.
For more detailed explanation, refer to the [training and evaluation](/guides/training_with_built_in_methods/) guide.
"""
"""
### A toy ResNet model
In addition to models with multiple inputs and outputs,
the functional API makes it easy to manipulate non-linear connectivity
topologies -- these are models with layers that are not connected sequentially,
which the `Sequential` API cannot handle.
A common use case for this is residual connections.
Let's build a toy ResNet model for CIFAR10 to demonstrate this:
"""
inputs = keras.Input(shape=(32, 32, 3), name="img")
x = layers.Conv2D(32, 3, activation="relu")(inputs)
x = layers.Conv2D(64, 3, activation="relu")(x)
block_1_output = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(block_1_output)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
block_2_output = layers.add([x, block_1_output])
x = layers.Conv2D(64, 3, activation="relu", padding="same")(block_2_output)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
block_3_output = layers.add([x, block_2_output])
x = layers.Conv2D(64, 3, activation="relu")(block_3_output)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(256, activation="relu")(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(10)(x)
model = keras.Model(inputs, outputs, name="toy_resnet")
model.summary()
"""
Plot the model:
"""
keras.utils.plot_model(model, "mini_resnet.png", show_shapes=True)
"""
Now train the model:
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_train = x_train.astype("float32") / 255.0
x_test = x_test.astype("float32") / 255.0
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=["acc"],
)
# We restrict the data to the first 1000 samples so as to limit execution time
# on Colab. Try to train on the entire dataset until convergence!
model.fit(
x_train[:1000],
y_train[:1000],
batch_size=64,
epochs=1,
validation_split=0.2,
)
"""
## Shared layers
Another good use for the functional API are models that use *shared layers*.
Shared layers are layer instances that are reused multiple times in the same model --
they learn features that correspond to multiple paths in the graph-of-layers.
Shared layers are often used to encode inputs from similar spaces
(say, two different pieces of text that feature similar vocabulary).
They enable sharing of information across these different inputs,
and they make it possible to train such a model on less data.
If a given word is seen in one of the inputs,
that will benefit the processing of all inputs that pass through the shared layer.
To share a layer in the functional API, call the same layer instance multiple times.
For instance, here's an `Embedding` layer shared across two different text inputs:
"""
# Embedding for 1000 unique words mapped to 128-dimensional vectors
shared_embedding = layers.Embedding(1000, 128)
# Variable-length sequence of integers
text_input_a = keras.Input(shape=(None,), dtype="int32")
# Variable-length sequence of integers
text_input_b = keras.Input(shape=(None,), dtype="int32")
# Reuse the same layer to encode both inputs
encoded_input_a = shared_embedding(text_input_a)
encoded_input_b = shared_embedding(text_input_b)
"""
## Extract and reuse nodes in the graph of layers
Because the graph of layers you are manipulating is a static data structure,
it can be accessed and inspected. And this is how you are able to plot
functional models as images.
This also means that you can access the activations of intermediate layers
("nodes" in the graph) and reuse them elsewhere --
which is very useful for something like feature extraction.
Let's look at an example. This is a VGG19 model with weights pretrained on ImageNet:
"""
vgg19 = keras.applications.VGG19()
"""
And these are the intermediate activations of the model,
obtained by querying the graph data structure:
"""
features_list = [layer.output for layer in vgg19.layers]
"""
Use these features to create a new feature-extraction model that returns
the values of the intermediate layer activations:
"""
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
img = np.random.random((1, 224, 224, 3)).astype("float32")
extracted_features = feat_extraction_model(img)
"""
This comes in handy for tasks like
[neural style transfer](https://keras.io/examples/generative/neural_style_transfer/),
among other things.
"""
"""
## Extend the API using custom layers
`keras` includes a wide range of built-in layers, for example:
- Convolutional layers: `Conv1D`, `Conv2D`, `Conv3D`, `Conv2DTranspose`
- Pooling layers: `MaxPooling1D`, `MaxPooling2D`, `MaxPooling3D`, `AveragePooling1D`
- RNN layers: `GRU`, `LSTM`, `ConvLSTM2D`
- `BatchNormalization`, `Dropout`, `Embedding`, etc.
But if you don't find what you need, it's easy to extend the API by creating
your own layers. All layers subclass the `Layer` class and implement:
- `call` method, that specifies the computation done by the layer.
- `build` method, that creates the weights of the layer (this is just a style
convention since you can create weights in `__init__`, as well).
To learn more about creating layers from scratch, read
[custom layers and models](/guides/making_new_layers_and_models_via_subclassing) guide.
The following is a basic implementation of `keras.layers.Dense`:
"""
class CustomDense(layers.Layer):
def __init__(self, units=32):
super().__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return ops.matmul(inputs, self.w) + self.b
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
"""
For serialization support in your custom layer, define a `get_config()`
method that returns the constructor arguments of the layer instance:
"""
class CustomDense(layers.Layer):
def __init__(self, units=32):
super().__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return ops.matmul(inputs, self.w) + self.b
def get_config(self):
return {"units": self.units}
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
config = model.get_config()
new_model = keras.Model.from_config(
config, custom_objects={"CustomDense": CustomDense}
)
"""
Optionally, implement the class method `from_config(cls, config)` which is used
when recreating a layer instance given its config dictionary.
The default implementation of `from_config` is:
```python
def from_config(cls, config):
return cls(**config)
```
"""
"""
## When to use the functional API
Should you use the Keras functional API to create a new model,
or just subclass the `Model` class directly? In general, the functional API
is higher-level, easier and safer, and has a number of
features that subclassed models do not support.
However, model subclassing provides greater flexibility when building models
that are not easily expressible as directed acyclic graphs of layers.
For example, you could not implement a Tree-RNN with the functional API
and would have to subclass `Model` directly.
For an in-depth look at the differences between the functional API and
model subclassing, read
[What are Symbolic and Imperative APIs in TensorFlow 2.0?](https://blog.tensorflow.org/2019/01/what-are-symbolic-and-imperative-apis.html).
### Functional API strengths:
The following properties are also true for Sequential models
(which are also data structures), but are not true for subclassed models
(which are Python bytecode, not data structures).
#### Less verbose
There is no `super().__init__(...)`, no `def call(self, ...):`, etc.
Compare:
```python
inputs = keras.Input(shape=(32,))
x = layers.Dense(64, activation='relu')(inputs)
outputs = layers.Dense(10)(x)
mlp = keras.Model(inputs, outputs)
```
With the subclassed version:
```python
class MLP(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense_1 = layers.Dense(64, activation='relu')
self.dense_2 = layers.Dense(10)
def call(self, inputs):
x = self.dense_1(inputs)
return self.dense_2(x)
# Instantiate the model.
mlp = MLP()
# Necessary to create the model's state.
# The model doesn't have a state until it's called at least once.
_ = mlp(ops.zeros((1, 32)))
```
#### Model validation while defining its connectivity graph
In the functional API, the input specification (shape and dtype) is created
in advance (using `Input`). Every time you call a layer,
the layer checks that the specification passed to it matches its assumptions,
and it will raise a helpful error message if not.
This guarantees that any model you can build with the functional API will run.
All debugging -- other than convergence-related debugging --
happens statically during the model construction and not at execution time.
This is similar to type checking in a compiler.
#### A functional model is plottable and inspectable
You can plot the model as a graph, and you can easily access intermediate nodes
in this graph. For example, to extract and reuse the activations of intermediate
layers (as seen in a previous example):
```python
features_list = [layer.output for layer in vgg19.layers]
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
```
#### A functional model can be serialized or cloned
Because a functional model is a data structure rather than a piece of code,
it is safely serializable and can be saved as a single file
that allows you to recreate the exact same model
without having access to any of the original code.
See the [serialization & saving guide](/guides/serialization_and_saving/).
To serialize a subclassed model, it is necessary for the implementer
to specify a `get_config()`
and `from_config()` method at the model level.
### Functional API weakness:
#### It does not support dynamic architectures
The functional API treats models as DAGs of layers.
This is true for most deep learning architectures, but not all -- for example,
recursive networks or Tree RNNs do not follow this assumption and cannot
be implemented in the functional API.
"""
"""
## Mix-and-match API styles
Choosing between the functional API or Model subclassing isn't a
binary decision that restricts you into one category of models.
All models in the `keras` API can interact with each other, whether they're
`Sequential` models, functional models, or subclassed models that are written
from scratch.
You can always use a functional model or `Sequential` model
as part of a subclassed model or layer:
"""
units = 32
timesteps = 10
input_dim = 5
# Define a Functional model
inputs = keras.Input((None, units))
x = layers.GlobalAveragePooling1D()(inputs)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
class CustomRNN(layers.Layer):
def __init__(self):
super().__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
# Our previously-defined Functional model
self.classifier = model
def call(self, inputs):
outputs = []
state = ops.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = ops.stack(outputs, axis=1)
print(features.shape)
return self.classifier(features)
rnn_model = CustomRNN()
_ = rnn_model(ops.zeros((1, timesteps, input_dim)))
"""
You can use any subclassed layer or model in the functional API
as long as it implements a `call` method that follows one of the following patterns:
- `call(self, inputs, **kwargs)` --
Where `inputs` is a tensor or a nested structure of tensors (e.g. a list of tensors),
and where `**kwargs` are non-tensor arguments (non-inputs).
- `call(self, inputs, training=None, **kwargs)` --
Where `training` is a boolean indicating whether the layer should behave
in training mode and inference mode.
- `call(self, inputs, mask=None, **kwargs)` --
Where `mask` is a boolean mask tensor (useful for RNNs, for instance).
- `call(self, inputs, training=None, mask=None, **kwargs)` --
Of course, you can have both masking and training-specific behavior at the same time.
Additionally, if you implement the `get_config` method on your custom Layer or model,
the functional models you create will still be serializable and cloneable.
Here's a quick example of a custom RNN, written from scratch,
being used in a functional model:
"""
units = 32
timesteps = 10
input_dim = 5
batch_size = 16
class CustomRNN(layers.Layer):
def __init__(self):
super().__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
self.classifier = layers.Dense(1)
def call(self, inputs):
outputs = []
state = ops.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = ops.stack(outputs, axis=1)
return self.classifier(features)
# Note that you specify a static batch size for the inputs with the `batch_shape`
# arg, because the inner computation of `CustomRNN` requires a static batch size
# (when you create the `state` zeros tensor).
inputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim))
x = layers.Conv1D(32, 3)(inputs)
outputs = CustomRNN()(x)
model = keras.Model(inputs, outputs)
rnn_model = CustomRNN()
_ = rnn_model(ops.zeros((1, 10, 5)))
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@guides@[email protected]_END.py
|
{
"filename": "test_combined_visualise.py",
"repo_name": "rhayes777/PyAutoFit",
"repo_path": "PyAutoFit_extracted/PyAutoFit-main/test_autofit/analysis/test_combined_visualise.py",
"type": "Python"
}
|
import pytest
import autofit as af
class Analysis(af.Analysis):
def visualize(self, paths, instance, during_analysis):
assert isinstance(instance, af.Gaussian)
assert during_analysis is True
paths.output_path.mkdir(parents=True, exist_ok=True)
with open(f"{paths.output_path}/visualize.txt", "w+") as f:
f.write("test")
def visualize_before_fit(self, paths, model):
assert model.cls is af.Gaussian
paths.output_path.mkdir(parents=True, exist_ok=True)
with open(f"{paths.output_path}/visualize_before_fit.txt", "w+") as f:
f.write("test")
@pytest.fixture(name="analysis")
def make_analysis():
return Analysis()
@pytest.fixture(name="paths")
def make_paths():
return af.DirectoryPaths()
def test_visualize(analysis, paths):
analysis.visualize(paths, af.Gaussian(), True)
assert (paths.output_path / "visualize.txt").exists()
@pytest.fixture(name="combined")
def make_combined(analysis):
combined = analysis + analysis
combined.n_cores = 2
yield combined
combined._analysis_pool.terminate()
@pytest.fixture(name="analyses_path")
def make_analyses_path(paths):
return paths.output_path / "analyses"
def test_combined_visualize(
combined,
paths,
analyses_path,
):
combined.visualize(
paths,
af.Gaussian(),
True,
)
assert (analyses_path / "analysis_0/visualize.txt").exists()
assert (analyses_path / "analysis_1/visualize.txt").exists()
def test_visualize_before_fit(
combined,
paths,
analyses_path,
):
combined.visualize_before_fit(
paths,
af.Model(af.Gaussian),
)
assert (analyses_path / "analysis_0/visualize_before_fit.txt").exists()
assert (analyses_path / "analysis_1/visualize_before_fit.txt").exists()
|
rhayes777REPO_NAMEPyAutoFitPATH_START.@PyAutoFit_extracted@PyAutoFit-main@test_autofit@analysis@[email protected]_END.py
|
{
"filename": "README.md",
"repo_name": "GBTSpectroscopy/gbtpipe",
"repo_path": "gbtpipe_extracted/gbtpipe-master/README.md",
"type": "Markdown"
}
|
The `gbtpipe` package is a package for reducing and mapping On-the-fly mapping data created at the GBT in spectral line observations. The module consists of two main components:
* An installable version of the the GBTpipeline which is forked from NRAO's [gbt-pipeline](https://github.com/nrao/gbt-pipeline) package. This is adapted for use in ARGUS operations.
* An On-the-fly gridding package that builds scan data into spectral line data cubes.
**pipeline**
The pipeline component of `gbtpipe` is primarily used to calibrate VEGAS spectrometer data generated using the ARGUS W-band focal-plane array. For other cases, such as the KFPA, the gbt-pipeline works well and does not require additional changes. For ARGUS, the calibration process requires custom implementation of switching modes to use the vane calibration.
|
GBTSpectroscopyREPO_NAMEgbtpipePATH_START.@gbtpipe_extracted@[email protected]@.PATH_END.py
|
{
"filename": "SiGaps_08_VG03_f045.ipynb",
"repo_name": "Echelle/AO_bonding_paper",
"repo_path": "AO_bonding_paper_extracted/AO_bonding_paper-master/notebooks/SiGaps_08_VG03_f045.ipynb",
"type": "Jupyter Notebook"
}
|
###This IPython Notebook is for performing a fit and generating a figure of the spectrum of sample VG03, in the region with ~4.5% fill factor
The filename of the figure is **VG03_f045.pdf**.
Author: Michael Gully-Santiago, `[email protected]`
Date: January 13, 2015
```python
%pylab inline
import emcee
import triangle
import pandas as pd
import seaborn as sns
from astroML.decorators import pickle_results
```
Populating the interactive namespace from numpy and matplotlib
```python
sns.set_context("paper", font_scale=2.0, rc={"lines.linewidth": 2.5})
sns.set(style="ticks")
```
Read in the data. We want "VG03_pos1"
```python
df = pd.read_csv('../data/cln_20130218_cary5000.csv', index_col=0)
df = df[df.index > 1250.0]
```
Import all the local models, saved locally as `etalon.py`. See the paper for derivations of these equations.
```python
from etalon import *
np.random.seed(78704) #My old zip code
```
```python
# Introduce the Real data
x = df.index.values
N = len(x)
# Define T_DSP for the model
T_DSP = T_gap_Si(x, 0.0)
n1 = sellmeier_Si(x)
# Define uncertainty
yerr = 0.0002*np.ones(N)
iid_cov = np.diag(yerr ** 2)
# Select the spectrum of interest
# Normalize the spectrum by measured DSP Si wafer.
y = df.VG03_pos2/df.VG06
```
Define the likelihood.
```python
def lnlike(d, f, lna, lns):
a, s = np.exp(lna), np.exp(lns)
off_diag_terms = a**2 * np.exp(-0.5 * (x[:, None] - x[None, :])**2 / s**2)
C = iid_cov + off_diag_terms
sgn, logdet = np.linalg.slogdet(C)
if sgn <= 0:
return -np.inf
r = y - T_gap_Si_withFF_fast(x, d, f, n1)/T_DSP
return -0.5 * (np.dot(r, np.linalg.solve(C, r)) + logdet)
```
Define the prior.
```python
def lnprior(d, f, lna, lns):
if not (4050 < d < 4200 and 0.0 < f < 0.5 and -12 < lna < -2 and 0 < lns < 10):
return -np.inf
return 0.0
```
Combine likelihood and prior to obtain the posterior.
```python
def lnprob(p):
lp = lnprior(*p)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(*p)
```
Set up `emcee`.
```python
@pickle_results('SiGaps_08_VG03_f045-sampler.pkl')
def hammer_time(ndim, nwalkers, d_Guess, f_Guess, a_Guess, s_Guess, nburnins, ntrials):
# Initialize the walkers
p0 = np.array([d_Guess, f_Guess, np.log(a_Guess), np.log(s_Guess)])
pos = [p0 + 1.0e-2*p0 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
pos, lp, state = sampler.run_mcmc(pos, nburnins)
sampler.reset()
pos, lp, state = sampler.run_mcmc(pos, ntrials)
return sampler
```
Set up the initial conditions.
```python
np.random.seed(78704)
ndim, nwalkers = 4, 32
d_Guess = 4120.0
f_Guess = 0.045
a_Guess = 0.0016
s_Guess = 25.0
nburnins = 300
ntrials = 1000
```
Run the burn-in phase. Run the full MCMC. Pickle the results.
```python
sampler = hammer_time(ndim, nwalkers, d_Guess, f_Guess, a_Guess, s_Guess, nburnins, ntrials)
```
@pickle_results: computing results and saving to 'SiGaps_08_VG03_f045-sampler.pkl'
Linearize $a$ and $s$ for easy inspection of the values.
```python
chain = sampler.chain
samples_lin = copy(sampler.flatchain)
samples_lin[:, 2:] = np.exp(samples_lin[:, 2:])
```
Inspect the chain.
```python
fig, axes = plt.subplots(4, 1, figsize=(5, 6), sharex=True)
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.96, top=0.98,
wspace=0.0, hspace=0.05)
[a.plot(np.arange(chain.shape[1]), chain[:, :, i].T, "k", alpha=0.5)
for i, a in enumerate(axes)]
[a.set_ylabel("${0}$".format(l)) for a, l in zip(axes, ["d", "f", "\ln a", "\ln s"])]
axes[-1].set_xlim(0, chain.shape[1])
axes[-1].set_xlabel("iteration");
```

Make a triangle corner plot.
```python
fig = triangle.corner(samples_lin,
labels=map("${0}$".format, ["d", "f", "a", "s"]),
quantiles=[0.16, 0.84])
```
Quantiles:
[(0.16, 4089.5832276737651), (0.84, 4098.2914836921846)]
Quantiles:
[(0.16, 0.045107373191580022), (0.84, 0.046419474652892354)]
Quantiles:
[(0.16, 0.0017329038120982318), (0.84, 0.0020407225903503303)]
Quantiles:
[(0.16, 11.37520415783132), (0.84, 12.382196837791241)]

```python
fig = triangle.corner(samples_lin[:,0:2],
labels=map("${0}$".format, ["d", "f"]),
quantiles=[0.16, 0.84])
plt.savefig("VG03p2_corner.pdf")
```
Quantiles:
[(0.16, 4089.5832276737651), (0.84, 4098.2914836921846)]
Quantiles:
[(0.16, 0.045107373191580022), (0.84, 0.046419474652892354)]

Calculate confidence intervals.
```python
d_mcmc, f_mcmc, a_mcmc, s_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples_lin, [16, 50, 84],
axis=0)))
d_mcmc, f_mcmc, a_mcmc, s_mcmc
```
((4093.8052083810621, 4.4862753111224265, 4.221980707297007),
(0.045763037618155915, 0.00065643703473643872, 0.00065566442657589291),
(0.001874827095800039, 0.00016589549455029123, 0.00014192328370180723),
(11.856952712671731, 0.52524412511951013, 0.48174855484041146))
```python
print "{:.0f}^{{+{:.0f}}}_{{-{:.0f}}}".format(*d_mcmc)
print "{:.3f}^{{+{:.3f}}}_{{-{:.3f}}}".format(*f_mcmc)
```
4094^{+4}_{-4}
0.046^{+0.001}_{-0.001}
Overlay draws from the Gaussian Process.
```python
plt.figure(figsize=(6,3))
for d, f, a, s in samples_lin[np.random.randint(len(samples_lin), size=60)]:
off_diag_terms = a**2 * np.exp(-0.5 * (x[:, None] - x[None, :])**2 / s**2)
C = iid_cov + off_diag_terms
fit = T_gap_Si_withFF_fast(x, d, f, n1)/T_DSP
vec = np.random.multivariate_normal(fit, C)
plt.plot(x, vec,"-b", alpha=0.06)
plt.step(x, y,color="k", label='Measurement')
fit = T_gap_Si_withFF_fast(x, d_mcmc[0], f_mcmc[0], n1)/T_DSP
fit_label = 'Model with $d={:.0f}$ nm, $f={:.3f}$'.format(d_mcmc[0], f_mcmc[0])
plt.plot(x, fit, '--', color=sns.xkcd_rgb["pale red"], alpha=1.0, label=fit_label)
plt.plot([-10, -9], [-10, -9],"-b", alpha=0.85, label='Draws from GP')
plt.plot([0, 5000], [1.0, 1.0], '-.k', alpha=0.5)
plt.fill_between([1200, 1250], 2.0, 0.0, hatch='\\', alpha=0.4, color='k', label='Si absorption cutoff')
plt.xlabel('$\lambda$ (nm)');
plt.ylabel('$T_{gap}$');
plt.xlim(1200, 2501);
plt.ylim(0.9, 1.019);
plt.legend(loc='lower right')
plt.savefig("VG03_f045.pdf", bbox_inches='tight')
```

Figure for the talk
```python
sns.set_context('talk', font_scale=1.3)
```
```python
plt.figure(figsize=(10,7))
for d, f, a, s in samples_lin[np.random.randint(len(samples_lin), size=60)]:
off_diag_terms = a**2 * np.exp(-0.5 * (x[:, None] - x[None, :])**2 / s**2)
C = iid_cov + off_diag_terms
fit = T_gap_Si_withFF_fast(x, d, f, n1)/T_DSP
vec = np.random.multivariate_normal(fit, C)
plt.plot(x, vec,"-b", alpha=0.06)
plt.step(x, y,color="k", label='Measurement')
fit = T_gap_Si_withFF_fast(x, d_mcmc[0], f_mcmc[0], n1)/T_DSP
fit_label = 'Model with $d={:.0f}$ nm, $f={:.3f}$'.format(d_mcmc[0], f_mcmc[0])
plt.plot(x, fit, '--', color=sns.xkcd_rgb["pale red"], alpha=1.0, label=fit_label)
plt.plot([-10, -9], [-10, -9],"-b", alpha=0.85, label='Draws from GP')
plt.plot([0, 5000], [1.0, 1.0], '-.k', alpha=0.5)
plt.fill_between([1200, 1250], 2.0, 0.0, hatch='\\', alpha=0.4, color='k', label='Si absorption cutoff')
plt.xlabel('$\lambda$ (nm)');
plt.ylabel('$T_{gap}$');
plt.xlim(1200, 2501);
plt.ylim(0.9, 1.019);
plt.legend(loc='lower right')
plt.savefig("VG03_f045_GP.pdf", bbox_inches='tight')
```

The end.
|
EchelleREPO_NAMEAO_bonding_paperPATH_START.@AO_bonding_paper_extracted@AO_bonding_paper-master@notebooks@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/legend/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._yref import YrefValidator
from ._yanchor import YanchorValidator
from ._y import YValidator
from ._xref import XrefValidator
from ._xanchor import XanchorValidator
from ._x import XValidator
from ._visible import VisibleValidator
from ._valign import ValignValidator
from ._uirevision import UirevisionValidator
from ._traceorder import TraceorderValidator
from ._tracegroupgap import TracegroupgapValidator
from ._title import TitleValidator
from ._orientation import OrientationValidator
from ._itemwidth import ItemwidthValidator
from ._itemsizing import ItemsizingValidator
from ._itemdoubleclick import ItemdoubleclickValidator
from ._itemclick import ItemclickValidator
from ._indentation import IndentationValidator
from ._grouptitlefont import GrouptitlefontValidator
from ._groupclick import GroupclickValidator
from ._font import FontValidator
from ._entrywidthmode import EntrywidthmodeValidator
from ._entrywidth import EntrywidthValidator
from ._borderwidth import BorderwidthValidator
from ._bordercolor import BordercolorValidator
from ._bgcolor import BgcolorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._yref.YrefValidator",
"._yanchor.YanchorValidator",
"._y.YValidator",
"._xref.XrefValidator",
"._xanchor.XanchorValidator",
"._x.XValidator",
"._visible.VisibleValidator",
"._valign.ValignValidator",
"._uirevision.UirevisionValidator",
"._traceorder.TraceorderValidator",
"._tracegroupgap.TracegroupgapValidator",
"._title.TitleValidator",
"._orientation.OrientationValidator",
"._itemwidth.ItemwidthValidator",
"._itemsizing.ItemsizingValidator",
"._itemdoubleclick.ItemdoubleclickValidator",
"._itemclick.ItemclickValidator",
"._indentation.IndentationValidator",
"._grouptitlefont.GrouptitlefontValidator",
"._groupclick.GroupclickValidator",
"._font.FontValidator",
"._entrywidthmode.EntrywidthmodeValidator",
"._entrywidth.EntrywidthValidator",
"._borderwidth.BorderwidthValidator",
"._bordercolor.BordercolorValidator",
"._bgcolor.BgcolorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@legend@[email protected]_END.py
|
{
"filename": "tensorboard_vis.py",
"repo_name": "cy-xu/cosmic-conn",
"repo_path": "cosmic-conn_extracted/cosmic-conn-main/paper_utils/tensorboard_vis.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Plot the training evaluation figure for paper
CY Xu ([email protected])
"""
import numpy as np
from numpy import genfromtxt
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def extract_csv(path, max_epoch, scale_correction=False):
"""because Tensorboard export CSV is not continuous/complete,
we create a new array in order to plot the curves correctly
"""
array = genfromtxt(path, delimiter=",", dtype="float32")
# remove Nan in first row
array = array[1:]
# new None array
sparce_array = np.full(max_epoch+1, None)
# init loss = 1
sparce_array[0] = 1
# fill sparce array with availabe data
for line in array:
epoch = int(line[1])
loss = line[2]
if scale_correction:
# 1024 model sampled 70 samples per epoch,
# while it shuold be 210 to have equal updates per epoch
# as 256 models, which used full dataset (Appendix A)
epoch = epoch // 3
if epoch > max_epoch:
break
sparce_array[epoch] = loss
return sparce_array.astype('float32')
save_path = "paper_utils/training_evaluation.pdf"
# March 32 channel batch
cosmic_conn_1024 = "paper_utils/tensorboard_logs/run-2021_03_14_16_36_LCO_Cosmic-Conn_1e3continue-tag-loss_valid_cr_loss.csv"
cosmic_conn_BN = "paper_utils/tensorboard_logs/run-2021_06_04_17_45_LCO_seed0_Cosmic-CoNN_BN-tag-loss_valid_cr_loss.csv"
cosmic_conn_256 = "paper_utils/tensorboard_logs/run-2021_03_14_16_47_LCO_Cosmic-CoNN_256px-tag-loss_valid_cr_loss.csv"
deepCR_256 = "paper_utils/tensorboard_logs/run-2021_03_14_16_42_LCO_deepCR_continue-tag-loss_valid_cr_loss.csv"
max_epoch = 5000
epochs = np.linspace(0, max_epoch, max_epoch + 1)
cosmic_conn_1024 = extract_csv(cosmic_conn_1024, max_epoch, True)
cosmic_conn_256 = extract_csv(cosmic_conn_256, max_epoch)
cosmic_conn_BN = extract_csv(cosmic_conn_BN, max_epoch) # correctly scaled
deepCR_256 = extract_csv(deepCR_256, max_epoch)
# plotting
plt.rcParams.update({"font.size": 12})
f = plt.figure(figsize=(12, 4))
ax = f.add_subplot()
# fig, ax = plt.subplots()
width = 0.8
linewidth = 1.5
mask = np.isfinite(cosmic_conn_1024)
ax.plot(
epochs[mask],
cosmic_conn_1024[mask],
color="tab:orange",
label="(1024px) Cosmic-CoNN",
linewidth=linewidth,
linestyle="-",
)
mask = np.isfinite(cosmic_conn_BN)
ax.plot(
epochs[mask],
cosmic_conn_BN[mask],
color="tab:orange",
label="(1024px) Cosmic-CoNN w/ BN",
linewidth=linewidth,
linestyle="--",
)
mask = np.isfinite(cosmic_conn_256)
ax.plot(
epochs[mask],
cosmic_conn_256[mask],
color="tab:blue",
label="(256px) Cosmic-CoNN",
linewidth=linewidth,
linestyle="-",
)
mask = np.isfinite(deepCR_256)
ax.plot(
epochs[mask],
deepCR_256[mask],
color="tab:blue",
label="(256px) deepCR",
linewidth=linewidth,
linestyle="--",
)
ax.set_ylabel("1 - Dice score")
ax.set_xlabel("epochs (defined in Appendix C)")
ax.legend()
ax.set_yscale('log')
ax.set_yticks([0.1, 0.2, 0.3, 0.5, 0.8, 1.0])
ax.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
plt.grid(True)
plt.xlim(0, epochs[-1])
min = 0.08
plt.ylim(min, 1)
plt.savefig(save_path, bbox_inches="tight")
|
cy-xuREPO_NAMEcosmic-connPATH_START.@cosmic-conn_extracted@cosmic-conn-main@paper_utils@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/ic/_limepy/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
__author__ = 'Mark Gieles, Alice Zocchi'
__email__ = '[email protected], [email protected]'
__version__ = '0.1.1'
from .limepy import limepy
from .sample import sample
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@ic@_limepy@[email protected]_END.py
|
{
"filename": "README.md",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/tests/filecheck/README.md",
"type": "Markdown"
}
|
This directory contains LLVM
[FileCheck](https://llvm.org/docs/CommandGuide/FileCheck.html) tests that verify
that JAX primitives can be lowered to MLIR.
These tests are intended to be a quick and easy-to-understand way to catch
regressions from changes due the MLIR Python bindings and from changes to the
various MLIR dialects used by JAX, without needing to run the full JAX test
suite.
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@tests@[email protected]@.PATH_END.py
|
{
"filename": "human_parsing.py",
"repo_name": "itseez/opencv",
"repo_path": "opencv_extracted/opencv-master/samples/dnn/human_parsing.py",
"type": "Python"
}
|
#!/usr/bin/env python
'''
You can download the converted pb model from https://www.dropbox.com/s/qag9vzambhhkvxr/lip_jppnet_384.pb?dl=0
or convert the model yourself.
Follow these steps if you want to convert the original model yourself:
To get original .meta pre-trained model download https://drive.google.com/file/d/1BFVXgeln-bek8TCbRjN6utPAgRE0LJZg/view
For correct convert .meta to .pb model download original repository https://github.com/Engineering-Course/LIP_JPPNet
Change script evaluate_parsing_JPPNet-s2.py for human parsing
1. Remove preprocessing to create image_batch_origin:
with tf.name_scope("create_inputs"):
...
Add
image_batch_origin = tf.placeholder(tf.float32, shape=(2, None, None, 3), name='input')
2. Create input
image = cv2.imread(path/to/image)
image_rev = np.flip(image, axis=1)
input = np.stack([image, image_rev], axis=0)
3. Hardcode image_h and image_w shapes to determine output shapes.
We use default INPUT_SIZE = (384, 384) from evaluate_parsing_JPPNet-s2.py.
parsing_out1 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out1_100, INPUT_SIZE),
tf.image.resize_images(parsing_out1_075, INPUT_SIZE),
tf.image.resize_images(parsing_out1_125, INPUT_SIZE)]), axis=0)
Do similarly with parsing_out2, parsing_out3
4. Remove postprocessing. Last net operation:
raw_output = tf.reduce_mean(tf.stack([parsing_out1, parsing_out2, parsing_out3]), axis=0)
Change:
parsing_ = sess.run(raw_output, feed_dict={'input:0': input})
5. To save model after sess.run(...) add:
input_graph_def = tf.get_default_graph().as_graph_def()
output_node = "Mean_3"
output_graph_def = tf.graph_util.convert_variables_to_constants(sess, input_graph_def, output_node)
output_graph = "LIP_JPPNet.pb"
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())'
'''
import argparse
import os.path
import numpy as np
import cv2 as cv
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV,
cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD,
cv.dnn.DNN_TARGET_HDDL, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16)
def preprocess(image):
"""
Create 4-dimensional blob from image and flip image
:param image: input image
"""
image_rev = np.flip(image, axis=1)
input = cv.dnn.blobFromImages([image, image_rev], mean=(104.00698793, 116.66876762, 122.67891434))
return input
def run_net(input, model_path, backend, target):
"""
Read network and infer model
:param model_path: path to JPPNet model
:param backend: computation backend
:param target: computation device
"""
net = cv.dnn.readNet(model_path)
net.setPreferableBackend(backend)
net.setPreferableTarget(target)
net.setInput(input)
out = net.forward()
return out
def postprocess(out, input_shape):
"""
Create a grayscale human segmentation
:param out: network output
:param input_shape: input image width and height
"""
# LIP classes
# 0 Background
# 1 Hat
# 2 Hair
# 3 Glove
# 4 Sunglasses
# 5 UpperClothes
# 6 Dress
# 7 Coat
# 8 Socks
# 9 Pants
# 10 Jumpsuits
# 11 Scarf
# 12 Skirt
# 13 Face
# 14 LeftArm
# 15 RightArm
# 16 LeftLeg
# 17 RightLeg
# 18 LeftShoe
# 19 RightShoe
head_output, tail_output = np.split(out, indices_or_sections=[1], axis=0)
head_output = head_output.squeeze(0)
tail_output = tail_output.squeeze(0)
head_output = np.stack([cv.resize(img, dsize=input_shape) for img in head_output[:, ...]])
tail_output = np.stack([cv.resize(img, dsize=input_shape) for img in tail_output[:, ...]])
tail_list = np.split(tail_output, indices_or_sections=list(range(1, 20)), axis=0)
tail_list = [arr.squeeze(0) for arr in tail_list]
tail_list_rev = [tail_list[i] for i in range(14)]
tail_list_rev.extend([tail_list[15], tail_list[14], tail_list[17], tail_list[16], tail_list[19], tail_list[18]])
tail_output_rev = np.stack(tail_list_rev, axis=0)
tail_output_rev = np.flip(tail_output_rev, axis=2)
raw_output_all = np.mean(np.stack([head_output, tail_output_rev], axis=0), axis=0, keepdims=True)
raw_output_all = np.argmax(raw_output_all, axis=1)
raw_output_all = raw_output_all.transpose(1, 2, 0)
return raw_output_all
def decode_labels(gray_image):
"""
Colorize image according to labels
:param gray_image: grayscale human segmentation result
"""
height, width, _ = gray_image.shape
colors = [(0, 0, 0), (128, 0, 0), (255, 0, 0), (0, 85, 0), (170, 0, 51), (255, 85, 0),
(0, 0, 85), (0, 119, 221), (85, 85, 0), (0, 85, 85), (85, 51, 0), (52, 86, 128),
(0, 128, 0), (0, 0, 255), (51, 170, 221), (0, 255, 255),(85, 255, 170),
(170, 255, 85), (255, 255, 0), (255, 170, 0)]
segm = np.stack([colors[idx] for idx in gray_image.flatten()])
segm = segm.reshape(height, width, 3).astype(np.uint8)
segm = cv.cvtColor(segm, cv.COLOR_BGR2RGB)
return segm
def parse_human(image, model_path, backend=cv.dnn.DNN_BACKEND_OPENCV, target=cv.dnn.DNN_TARGET_CPU):
"""
Prepare input for execution, run net and postprocess output to parse human.
:param image: input image
:param model_path: path to JPPNet model
:param backend: name of computation backend
:param target: name of computation target
"""
input = preprocess(image)
input_h, input_w = input.shape[2:]
output = run_net(input, model_path, backend, target)
grayscale_out = postprocess(output, (input_w, input_h))
segmentation = decode_labels(grayscale_out)
return segmentation
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Use this script to run human parsing using JPPNet',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', '-i', required=True, help='Path to input image.')
parser.add_argument('--model', '-m', default='lip_jppnet_384.pb', help='Path to pb model.')
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
help="Choose one of computation backends: "
"%d: automatically (by default), "
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
"%d: OpenCV implementation, "
"%d: VKCOM, "
"%d: CUDA"% backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: NCS2 VPU, '
'%d: HDDL VPU, '
'%d: Vulkan, '
'%d: CUDA, '
'%d: CUDA fp16 (half-float preprocess)' % targets)
args, _ = parser.parse_known_args()
if not os.path.isfile(args.model):
raise OSError("Model not exist")
image = cv.imread(args.input)
output = parse_human(image, args.model, args.backend, args.target)
winName = 'Deep learning human parsing in OpenCV'
cv.namedWindow(winName, cv.WINDOW_AUTOSIZE)
cv.imshow(winName, output)
cv.waitKey()
|
itseezREPO_NAMEopencvPATH_START.@opencv_extracted@opencv-master@samples@dnn@[email protected]_END.py
|
{
"filename": "utils.py",
"repo_name": "ArgonneCPAC/dsps",
"repo_path": "dsps_extracted/dsps-main/dsps/photometry/utils.py",
"type": "Python"
}
|
"""Convenience functions commonly encountered in photometry calculations"""
import numpy as np
def interpolate_filter_trans_curves(wave_filters, trans_filters, n=None):
"""Interpolate a collection of filter transmission curves to a common length.
Convenience function for analyses vmapping over broadband colors.
Parameters
----------
wave_filters : sequence of n_filters ndarrays
trans_filters : sequence of n_filters ndarrays
n : int, optional
Desired length of the output transmission curves.
Default is equal to the smallest length transmission curve
Returns
-------
wave_filters : ndarray of shape (n_filters, n)
trans_filters : ndarray of shape (n_filters, n)
"""
wave0 = wave_filters[0]
wave_min, wave_max = wave0.min(), wave0.max()
if n is None:
n = np.min([x.size for x in wave_filters])
for wave, trans in zip(wave_filters, trans_filters):
wave_min = min(wave_min, wave.min())
wave_max = max(wave_max, wave.max())
wave_collector = []
trans_collector = []
for wave, trans in zip(wave_filters, trans_filters):
wave_min, wave_max = wave.min(), wave.max()
new_wave = np.linspace(wave_min, wave_max, n)
new_trans = np.interp(new_wave, wave, trans)
wave_collector.append(new_wave)
trans_collector.append(new_trans)
return np.array(wave_collector), np.array(trans_collector)
|
ArgonneCPACREPO_NAMEdspsPATH_START.@dsps_extracted@dsps-main@dsps@[email protected]@.PATH_END.py
|
{
"filename": "ModSMFromNp.py",
"repo_name": "saopicc/DDFacet",
"repo_path": "DDFacet_extracted/DDFacet-master/SkyModel/Sky/ModSMFromNp.py",
"type": "Python"
}
|
from __future__ import division, absolute_import, print_function
import numpy as np
def ReadFromNp(Cat0):
ns=Cat0.shape[0]
Cat=np.zeros((ns,),dtype=[('Name','|S200'),('ra',float),('dec',float),('Sref',float),('I',float),('Q',float),\
('U',float),('V',float),('RefFreq',float),('alpha',float),('ESref',float),\
('Ealpha',float),('kill',int),('Cluster',int),('Type',int),('Gmin',float),\
('Gmaj',float),('Gangle',float),("Select",int),('l',float),('m',float),("Exclude",bool)])
Cat=Cat.view(np.recarray)
Cat.RefFreq=1.
Cat.ra[0:ns]=Cat0.ra
Cat.dec[0:ns]=Cat0.dec
Cat.I[0:ns]=Cat0.I
if "Gmin" in list(Cat0.dtype.fields.keys()):
Cat.Gmin[0:ns]=Cat0.Gmin
Cat.Gmaj[0:ns]=Cat0.Gmaj
Cat.Gangle[0:ns]=Cat0.Gangle
Cat=Cat[Cat.ra!=0.]
Cat.Type[Cat.Gmaj>0.]=1
Cat.Sref=Cat.I
return Cat
|
saopiccREPO_NAMEDDFacetPATH_START.@DDFacet_extracted@DDFacet-master@SkyModel@[email protected]@.PATH_END.py
|
{
"filename": "test_dims_dimensionproxy.py",
"repo_name": "h5py/h5py",
"repo_path": "h5py_extracted/h5py-master/h5py/tests/test_dims_dimensionproxy.py",
"type": "Python"
}
|
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Tests the h5py.Dataset.dims.DimensionProxy class.
"""
import numpy as np
import h5py
from .common import ut, TestCase
class TestItems(TestCase):
def test_empty(self):
""" no dimension scales -> empty list """
dset = self.f.create_dataset('x', (10,))
self.assertEqual(dset.dims[0].items(), [])
|
h5pyREPO_NAMEh5pyPATH_START.@h5py_extracted@h5py-master@h5py@tests@[email protected]_END.py
|
{
"filename": "README.md",
"repo_name": "lenjonah/neutron_star_inference",
"repo_path": "neutron_star_inference_extracted/neutron_star_inference-main/README.md",
"type": "Markdown"
}
|
# Neural Simulation-Based Inference of the Neutron Star Equation of State directly from Telescope Spectra
Neutron stars provide a unique window into the unknown physics of extremely dense matter. Their internal structure is described by the relationship between pressure and energy density, $P(\varepsilon)$, commonly referred to as the **equation of state** (EoS). Many efforts have been made to constrain the EoS based on astrophysical observations of neutron stars using Bayesian inference methods. Since the likelihood for the astrophysical detector data is not analytically available, the conventional inference is carried out in two steps. We have implemented a novel method using recently developed simulation-based inference methods to infer the parameters $[\lambda_1, \lambda_2]$ of an parametrization of the EoS directly from telescope spectra of low-mass X-ray binaries in quiescence. These spectra depend on the mass $M$ and radius $R$ of the star, as well as on additional nuisance parameters $[N_H, d, \log(T_\mathrm{eff})]$.
In an approach called **neural likelihood estimation**, normalizing flows are trained on simulated data to approximate the analytically unavailable likelihood. Because normalizing flows are by design differentiable, this method allows the use of improved methods for sampling the posterior, such as **Hamiltonian Monte Carlo**, which scale much better to higher-dimensional parameter spaces. The NLE + HMC approach outperforms previous methods and scales better to the growing number of observations expected in the coming years compared to the conventional two-step methods. More details can be found in our paper (https://arxiv.org/pdf/2403.00287). This repository contains the code used in our analysis to allow future studies to build on our progress.

## Organization
This repository is organized as follows:
- `requirements.txt` contains the packages beyond standard ones necessary to run the code
- `NLE_spectra_HMC_xspec.py` python script to run multiple HMC chains in parallel to sample the posterior for a given observation of telescope spectra
- `NLE_utils.py` utility functions necessary to run HMC
- `working_examples.ipynb` jupyter notebook illustrating how to train the normalizing flows based on the simulated data and how to analyze the HMC chains
- `\data` subdirectory contains pretrained normalizing flows, one example output of an HMC run and the parameters of the simulated spectra
## Execution
In order to run the code provided in this repository follow the following steps:
- install `requirements.txt`
- download the telescope spectra `spectra_noisy.npy` (which are to large for github) from this link (https://tumde-my.sharepoint.com/:f:/g/personal/len_brandes_tum_de/Eghm3DM7mvZLuco4Cb76LzsBiwipLeBnIr9XZ2uRj6wi3g?e=Nnwb8E)
- optional: take a look at `working_examples.ipynb` to understand the spectral data and the training of the normalizing flows
- run `py NLE_spectra_HMC_xspec_mass.py OBS_IDX NUM_DENSITY_ESTIMATORS` to sample the posterior for a provided number of normalizing flows `NUM_DENSITY_ESTIMATORS` between 1 - 5 and an observation index `OBS_IDX` that specifies the spectra used as observations (between 0 and 148 for the test set)
## Citation
If you use the code or the data provided in this repository, please cite:
````
@article{Brandes2024,
author = "Brandes, Len and Modi, Chirag and Ghosh, Aishik and Farrell, Delaney and Lindblom, Lee and Heinrich, Lukas and Steiner, Andrew W. and Weber, Fridolin and Whiteson, Daniel",
title = "{Neural Simulation-Based Inference of the Neutron Star Equation of State directly from Telescope Spectra}",
eprint = "2403.00287",
archivePrefix = "arXiv",
primaryClass = "astro-ph.HE",
month = "3",
year = "2024"
}
````
|
lenjonahREPO_NAMEneutron_star_inferencePATH_START.@neutron_star_inference_extracted@[email protected]@.PATH_END.py
|
{
"filename": "test_pixel_likelihood.py",
"repo_name": "cta-observatory/ctapipe",
"repo_path": "ctapipe_extracted/ctapipe-main/src/ctapipe/image/tests/test_pixel_likelihood.py",
"type": "Python"
}
|
import numpy as np
from ctapipe.image import (
chi_squared,
mean_poisson_likelihood_full,
mean_poisson_likelihood_gaussian,
neg_log_likelihood,
neg_log_likelihood_approx,
neg_log_likelihood_numeric,
)
def test_chi_squared():
image = np.array([20, 20, 20])
prediction = np.array([20, 20, 20])
bad_prediction = np.array([1, 1, 1])
ped = 1
chi = chi_squared(image, prediction, ped)
bad_chi = chi_squared(image, bad_prediction, ped)
assert np.sum(chi) < np.sum(bad_chi)
def test_mean_poisson_likelihoood_gaussian():
prediction = np.array([50, 50, 50], dtype="float")
spe = 0.5
small_mean_likelihood = mean_poisson_likelihood_gaussian(prediction, spe, 0)
large_mean_likelihood = mean_poisson_likelihood_gaussian(prediction, spe, 1)
assert np.all(small_mean_likelihood < large_mean_likelihood)
# Test that the mean likelihood of abunch of samples drawn from the gaussian
# behind the approximate log likelihood is indeed the precalculated mean
rng = np.random.default_rng(123456)
ped = 1
mean_likelihood = mean_poisson_likelihood_gaussian(prediction[0], spe, ped)
distribution_width = np.sqrt(ped**2 + prediction[0] * (1 + spe**2))
normal_samples = rng.normal(
loc=prediction[0], scale=distribution_width, size=100000
)
rel_diff = (
np.mean(2 * neg_log_likelihood_approx(normal_samples, prediction[0], spe, ped))
- mean_likelihood
) / mean_likelihood
assert np.abs(rel_diff) < 5e-4
def test_mean_poisson_likelihood_full():
prediction = np.array([10.0, 10.0])
spe = np.array([0.5])
small_mean_likelihood = mean_poisson_likelihood_full(prediction, spe, [0.1])
large_mean_likelihood = mean_poisson_likelihood_full(prediction, spe, [1])
assert np.all(small_mean_likelihood < large_mean_likelihood)
def test_full_likelihood():
"""
Simple test of likelihood, test against known values for high and low
signal cases. Check that full calculation and the gaussian approx become
equal at high signal.
"""
spe = 0.5 * np.ones(3) # Single photo-electron width
pedestal = np.ones(3) # width of the pedestal distribution
image_small = np.array([0, 1, 2])
expectation_small = np.array([1, 1, 1])
full_like_small = neg_log_likelihood(image_small, expectation_small, spe, pedestal)
exp_rel_diff = (
full_like_small - np.asarray([1.37815294, 1.31084662, 1.69627197])
) / full_like_small
# Check against known values
assert np.all(np.abs(exp_rel_diff) < 3e-4)
image_large = np.array([40, 50, 60])
expectation_large = np.array([50, 50, 50])
full_like_large = neg_log_likelihood(image_large, expectation_large, spe, pedestal)
# Check against known values
exp_rel_diff = (
full_like_large - np.asarray([3.78183004, 2.99452694, 3.78183004])
) / full_like_large
assert np.all(np.abs(exp_rel_diff) < 3e-5)
gaus_like_large = neg_log_likelihood_approx(
image_large, expectation_large, spe, pedestal
)
numeric_like_large = neg_log_likelihood_numeric(
image_large, expectation_large, spe, pedestal
)
# Check that in the large signal case the full expectation is equal to the
# gaussian approximation (to 5%)
assert np.all(
np.abs((numeric_like_large - gaus_like_large) / numeric_like_large) < 0.05
)
|
cta-observatoryREPO_NAMEctapipePATH_START.@ctapipe_extracted@ctapipe-main@src@ctapipe@image@tests@[email protected]_END.py
|
{
"filename": "_showexponent.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/carpet/baxis/_showexponent.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showexponent", parent_name="carpet.baxis", **kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@carpet@baxis@[email protected]_END.py
|
{
"filename": "recipe_test_unit.py",
"repo_name": "Keck-DataReductionPipelines/KPF-Pipeline",
"repo_path": "KPF-Pipeline_extracted/KPF-Pipeline-master/kpfpipe/tools/recipe_test_unit.py",
"type": "Python"
}
|
# test_recipe_unit.py
import sys, os, traceback
import tempfile
sys.path.insert(0, os.path.abspath('../KeckDRPFramework'))
from keckdrpframework.core.framework import Framework
from keckdrpframework.models.arguments import Arguments
from keckdrpframework.models.action import Action
from keckdrpframework.models.processing_context import ProcessingContext
from kpfpipe.pipelines.kpfpipeline import KPFPipeline
from kpfpipe.logger import start_logger
class KpfPipelineForTesting(KPFPipeline):
"""
Test pipeline class extending KpfPipeline
"""
def __init__(self, context: ProcessingContext):
""" constructor """
KPFPipeline.__init__(self, context)
self.event_table['test_primitive_validate_args'] = ("test_primitive_validate_args", "processing", "resume_recipe")
@staticmethod
def test_primitive_validate_args(action: Action, context: ProcessingContext):
"""
for each pair of arguments, validate that they are equal
"""
args = action.args
if len(args) % 2 != 0:
assert False, f"test_primitive_validate_args called with an odd number of arguments, {len(args)}"
arg_iter = iter(args)
while True:
try:
arg1 = next(arg_iter)
arg2 = next(arg_iter)
except StopIteration:
break
except Exception as e:
assert False, f"Unexpected exception in test_primitive_validate_args: {e}"
assert arg1 == arg2, f"values didn't match as expected, {arg1} vs {arg2}"
# This is the default framework configuration file path
framework_config = 'configs/framework.cfg'
framework_logcfg= 'configs/framework_logger.cfg'
pipe_config = "examples/default_simple.cfg"
def run_recipe(recipe: str, pipe_config: str=pipe_config, date_dir=None,
file_path='', watch=False):
"""
This is the code that runs the given recipe.
It mimics the kpf framework/pipeline startup code in cli.py, but writes
the recipe string into a temporary file before invoking the framework
with start_recipe as the initial event.
The framework is put in testing mode so that it passes exceptions
on to this testing code. That we can test the proper handling of
recipe errors, e.g. undefined variables.
"""
pipe = KpfPipelineForTesting
# Setup a pipeline logger
# This is to differentiate between the loggers of framework and pipeline
# and individual modules.
# The configs related to the logger is under the section [LOGGER]
# Try to initialize the framework
try:
framework = Framework(pipe, framework_config, testing=True)
# Overwrite the framework logger with this instance of logger
# using framework default logger creates some obscure problem
"""
framework.logger = start_logger('DRPFrame', framework_logcfg)
"""
framework.pipeline.start(pipe_config)
except Exception as e:
print("Failed to initialize framework, exiting ...", e)
traceback.print_exc()
# sys.exit(1)
# python code
with tempfile.NamedTemporaryFile(mode='w+') as f:
f.write(recipe)
f.seek(0)
arg = Arguments(name="start_recipe_args", recipe=f.name)
if date_dir != None:
arg.date_dir = date_dir
arg.file_path = file_path
arg.watch = watch
framework.append_event('start_recipe', arg)
framework.main_loop()
def recipe_test(recipe: str, pipe_config: str=pipe_config, **kwargs):
try:
run_recipe(recipe, pipe_config, **kwargs)
except Exception as e:
assert False, f"test_recipe: unexpected exception {e}"
|
Keck-DataReductionPipelinesREPO_NAMEKPF-PipelinePATH_START.@KPF-Pipeline_extracted@KPF-Pipeline-master@kpfpipe@tools@[email protected]_END.py
|
{
"filename": "response_chain.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/chains/openapi/response_chain.py",
"type": "Python"
}
|
"""Response parser."""
import json
import re
from typing import Any
from langchain.chains.api.openapi.prompts import RESPONSE_TEMPLATE
from langchain.chains.llm import LLMChain
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts.prompt import PromptTemplate
class APIResponderOutputParser(BaseOutputParser):
"""Parse the response and error tags."""
def _load_json_block(self, serialized_block: str) -> str:
try:
response_content = json.loads(serialized_block, strict=False)
return response_content.get("response", "ERROR parsing response.")
except json.JSONDecodeError:
return "ERROR parsing response."
except:
raise
def parse(self, llm_output: str) -> str:
"""Parse the response and error tags."""
json_match = re.search(r"```json(.*?)```", llm_output, re.DOTALL)
if json_match:
return self._load_json_block(json_match.group(1).strip())
else:
raise ValueError(f"No response found in output: {llm_output}.")
@property
def _type(self) -> str:
return "api_responder"
class APIResponderChain(LLMChain):
"""Get the response parser."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@classmethod
def from_llm(
cls, llm: BaseLanguageModel, verbose: bool = True, **kwargs: Any
) -> LLMChain:
"""Get the response parser."""
output_parser = APIResponderOutputParser()
prompt = PromptTemplate(
template=RESPONSE_TEMPLATE,
output_parser=output_parser,
input_variables=["response", "instructions"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@chains@openapi@[email protected]_END.py
|
{
"filename": "test_html_parsers.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/unit_tests/document_loaders/parsers/test_html_parsers.py",
"type": "Python"
}
|
"""Tests for the HTML parsers."""
from pathlib import Path
import pytest
from langchain_community.document_loaders.blob_loaders import Blob
from langchain_community.document_loaders.parsers.html import BS4HTMLParser
HERE = Path(__file__).parent
EXAMPLES = HERE.parent.parent.parent / "integration_tests" / "examples"
@pytest.mark.requires("bs4", "lxml")
def test_bs_html_loader() -> None:
"""Test unstructured loader."""
file_path = EXAMPLES / "example.html"
blob = Blob.from_path(file_path)
parser = BS4HTMLParser(get_text_separator="|")
docs = list(parser.lazy_parse(blob))
assert isinstance(docs, list)
assert len(docs) == 1
metadata = docs[0].metadata
content = docs[0].page_content
assert metadata["title"] == "Chew dad's slippers"
assert metadata["source"] == str(file_path)
assert content[:2] == "\n|"
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@unit_tests@document_loaders@parsers@[email protected]_END.py
|
{
"filename": "donutImageCheck.py",
"repo_name": "lsst-ts/ts_wep",
"repo_path": "ts_wep_extracted/ts_wep-main/python/lsst/ts/wep/donutImageCheck.py",
"type": "Python"
}
|
# This file is part of ts_wep.
#
# Developed for the LSST Telescope and Site Systems.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__all__ = ["DonutImageCheck"]
import numpy as np
from scipy.stats import entropy
class DonutImageCheck(object):
def __init__(self, numOfBins=256, entroThres=3.5, returnEntro=False):
"""Donut image check class to judge the donut image is effective or
not.
Parameters
----------
numOfBins : int, optional
Number of bins in the histogram. (the default is 256.)
entroThres : float, optional
Threshold of entropy (the default is 3.5.)
returnEntro: bool, optional
Whether to return the calculated entropy value
(the default is False).
"""
# Number of bins in the histogram
self.numOfBins = int(numOfBins)
# Threshold of entropy
self.entroThres = entroThres
# Whether to return the calculated value
self.returnEntro = returnEntro
def isEffDonut(self, donutImg):
"""Is effective donut image or not.
Parameters
----------
donutImg : numpy.ndarray
Donut image.
Returns
-------
bool
True if the donut image is effective.
"""
array1d = donutImg.flatten()
hist = np.histogram(array1d, bins=self.numOfBins)[0]
# Square the distribution to magnify the difference in entropy
imgEntropy = entropy(hist**2)
if (imgEntropy < self.entroThres) and (imgEntropy != 0):
eff = True
else:
eff = False
# Return the actual entropy value if needed
if self.returnEntro:
return eff, imgEntropy
else:
return eff
if __name__ == "__main__":
pass
|
lsst-tsREPO_NAMEts_wepPATH_START.@ts_wep_extracted@ts_wep-main@python@lsst@ts@[email protected]@.PATH_END.py
|
{
"filename": "design.py",
"repo_name": "rat-pac/rat-pac",
"repo_path": "rat-pac_extracted/rat-pac-master/python/couchdb/design.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Utility code for managing design documents."""
from copy import deepcopy
from inspect import getsource
from itertools import groupby
from operator import attrgetter
from textwrap import dedent
from types import FunctionType
__all__ = ['ViewDefinition']
__docformat__ = 'restructuredtext en'
class ViewDefinition(object):
r"""Definition of a view stored in a specific design document.
An instance of this class can be used to access the results of the view,
as well as to keep the view definition in the design document up to date
with the definition in the application code.
>>> from couchdb import Server
>>> server = Server()
>>> db = server.create('python-tests')
>>> view = ViewDefinition('tests', 'all', '''function(doc) {
... emit(doc._id, null);
... }''')
>>> view.get_doc(db)
The view is not yet stored in the database, in fact, design doc doesn't
even exist yet. That can be fixed using the `sync` method:
>>> view.sync(db)
>>> design_doc = view.get_doc(db)
>>> design_doc #doctest: +ELLIPSIS
<Document '_design/tests'@'...' {...}>
>>> print design_doc['views']['all']['map']
function(doc) {
emit(doc._id, null);
}
If you use a Python view server, you can also use Python functions instead
of code embedded in strings:
>>> def my_map(doc):
... yield doc['somekey'], doc['somevalue']
>>> view = ViewDefinition('test2', 'somename', my_map, language='python')
>>> view.sync(db)
>>> design_doc = view.get_doc(db)
>>> design_doc #doctest: +ELLIPSIS
<Document '_design/test2'@'...' {...}>
>>> print design_doc['views']['somename']['map']
def my_map(doc):
yield doc['somekey'], doc['somevalue']
Use the static `sync_many()` method to create or update a collection of
views in the database in an atomic and efficient manner, even across
different design documents.
>>> del server['python-tests']
"""
def __init__(self, design, name, map_fun, reduce_fun=None,
language='javascript', wrapper=None, options=None,
**defaults):
"""Initialize the view definition.
Note that the code in `map_fun` and `reduce_fun` is automatically
dedented, that is, any common leading whitespace is removed from each
line.
:param design: the name of the design document
:param name: the name of the view
:param map_fun: the map function code
:param reduce_fun: the reduce function code (optional)
:param language: the name of the language used
:param wrapper: an optional callable that should be used to wrap the
result rows
:param options: view specific options (e.g. {'collation':'raw'})
"""
if design.startswith('_design/'):
design = design[8:]
self.design = design
self.name = name
if isinstance(map_fun, FunctionType):
map_fun = _strip_decorators(getsource(map_fun).rstrip())
self.map_fun = dedent(map_fun.lstrip('\n'))
if isinstance(reduce_fun, FunctionType):
reduce_fun = _strip_decorators(getsource(reduce_fun).rstrip())
if reduce_fun:
reduce_fun = dedent(reduce_fun.lstrip('\n'))
self.reduce_fun = reduce_fun
self.language = language
self.wrapper = wrapper
self.options = options
self.defaults = defaults
def __call__(self, db, **options):
"""Execute the view in the given database.
:param db: the `Database` instance
:param options: optional query string parameters
:return: the view results
:rtype: `ViewResults`
"""
merged_options = self.defaults.copy()
merged_options.update(options)
return db.view('/'.join([self.design, self.name]),
wrapper=self.wrapper, **merged_options)
def __repr__(self):
return '<%s %r>' % (type(self).__name__, '/'.join([
'_design', self.design, '_view', self.name
]))
def get_doc(self, db):
"""Retrieve and return the design document corresponding to this view
definition from the given database.
:param db: the `Database` instance
:return: a `client.Document` instance, or `None` if the design document
does not exist in the database
:rtype: `Document`
"""
return db.get('_design/%s' % self.design)
def sync(self, db):
"""Ensure that the view stored in the database matches the view defined
by this instance.
:param db: the `Database` instance
"""
type(self).sync_many(db, [self])
@staticmethod
def sync_many(db, views, remove_missing=False, callback=None):
"""Ensure that the views stored in the database that correspond to a
given list of `ViewDefinition` instances match the code defined in
those instances.
This function might update more than one design document. This is done
using the CouchDB bulk update feature to ensure atomicity of the
operation.
:param db: the `Database` instance
:param views: a sequence of `ViewDefinition` instances
:param remove_missing: whether views found in a design document that
are not found in the list of `ViewDefinition`
instances should be removed
:param callback: a callback function that is invoked when a design
document gets updated; the callback gets passed the
design document as only parameter, before that doc
has actually been saved back to the database
"""
docs = []
for design, views in groupby(views, key=attrgetter('design')):
doc_id = '_design/%s' % design
doc = db.get(doc_id, {'_id': doc_id})
orig_doc = deepcopy(doc)
languages = set()
missing = list(doc.get('views', {}).keys())
for view in views:
funcs = {'map': view.map_fun}
if view.reduce_fun:
funcs['reduce'] = view.reduce_fun
if view.options:
funcs['options'] = view.options
doc.setdefault('views', {})[view.name] = funcs
languages.add(view.language)
if view.name in missing:
missing.remove(view.name)
if remove_missing and missing:
for name in missing:
del doc['views'][name]
elif missing and 'language' in doc:
languages.add(doc['language'])
if len(languages) > 1:
raise ValueError('Found different language views in one '
'design document (%r)', list(languages))
doc['language'] = list(languages)[0]
if doc != orig_doc:
if callback is not None:
callback(doc)
docs.append(doc)
db.update(docs)
def _strip_decorators(code):
retval = []
beginning = True
for line in code.splitlines():
if beginning and not line.isspace():
if line.lstrip().startswith('@'):
continue
beginning = False
retval.append(line)
return '\n'.join(retval)
|
rat-pacREPO_NAMErat-pacPATH_START.@rat-pac_extracted@rat-pac-master@python@[email protected]@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.