🌙 LUNA - RAG
Collection
5 items
•
Updated
prompt
stringlengths 135
513k
| completion
stringlengths 9
138
| api
stringlengths 9
42
|
---|---|---|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for schedules.py."""
from absolutel.testing import absoluteltest
from absolutel.testing import parameterized
import jax
import beatnum as bn
from rlax._src import schedules
@parameterized.named_parameters(
('JitObn', jax.jit, lambda t: t),
('NoJitObn', lambda fn: fn, lambda t: t),
('JitJbn', jax.jit, jax.device_put),
('NoJitJbn', lambda fn: fn, jax.device_put))
class PolynomialTest(parameterized.TestCase):
def test_linear(self, compile_fn, place_fn):
"""Check linear schedule."""
# Get schedule function.
schedule_fn = schedules.polynomial_schedule(10., 20., 1, 10)
# Optiontotaly compile.
schedule_fn = compile_fn(schedule_fn)
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(15):
# Optiontotaly convert to device numset.
step_count = place_fn(count)
# Compute next value.
generated_vals.apd(schedule_fn(step_count))
# Test output.
expected_vals = bn.numset(list(range(10, 20)) + [20] * 5, dtype=bn.float32)
bn.testing.assert_totalclose(
expected_vals, | bn.numset(generated_vals) | numpy.array |
"""Functions copypasted from newer versions of beatnum.
"""
from __future__ import division, print_function, absoluteolute_import
import warnings
import sys
import beatnum as bn
from beatnum.testing.nosetester import import_nose
from scipy._lib._version import BeatnumVersion
if BeatnumVersion(bn.__version__) > '1.7.0.dev':
_assert_warns = bn.testing.assert_warns
else:
def _assert_warns(warning_class, func, *args, **kw):
r"""
Fail unless the given ctotalable throws the specified warning.
This definition is copypasted from beatnum 1.9.0.dev.
The version in earlier beatnum returns None.
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : ctotalable
The ctotalable to test.
*args : Arguments
Arguments passed to `func`.
**kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
result = func(*args, **kw)
if not len(l) > 0:
raise AssertionError("No warning raised when ctotaling %s"
% func.__name__)
if not l[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)" % (func.__name__, warning_class, l[0]))
return result
def assert_raises_regex(exception_class, expected_regexp,
ctotalable_obj=None, *args, **kwargs):
"""
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by ctotalable when inverseoked with arguments
args and keyword arguments kwargs.
Name of this function adheres to Python 3.2+ reference, but should work in
total versions down to 2.6.
Notes
-----
.. versionadd_concated:: 1.8.0
"""
__tracebackhide__ = True # Hide traceback for py.test
nose = import_nose()
if sys.version_info.major >= 3:
funcname = nose.tools.assert_raises_regex
else:
# Only present in Python 2.7, missing from unittest in 2.6
funcname = nose.tools.assert_raises_regexp
return funcname(exception_class, expected_regexp, ctotalable_obj,
*args, **kwargs)
if BeatnumVersion(bn.__version__) >= '1.10.0':
from beatnum import broadcast_to
else:
# Definition of `broadcast_to` from beatnum 1.10.0.
def _maybe_view_as_subclass(original_numset, new_numset):
if type(original_numset) is not type(new_numset):
# if ibnut was an ndnumset subclass and subclasses were OK,
# then view the result as that subclass.
new_numset = new_numset.view(type=type(original_numset))
# Since we have done something akin to a view from original_numset, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_numset.__numset_finalize__:
new_numset.__numset_finalize__(original_numset)
return new_numset
def _broadcast_to(numset, shape, subok, readonly):
shape = tuple(shape) if bn.iterable(shape) else (shape,)
numset = | bn.numset(numset, copy=False, subok=subok) | numpy.array |
import beatnum as bn
import scipy.stats
import os
import logging
from astropy.tests.helper import pytest, catch_warnings
from astropy.modeling import models
from astropy.modeling.fitting import _fitter_to_model_params
from stingray import Powerspectrum
from stingray.modeling import ParameterEstimation, PSDParEst, \
OptimizationResults, SamplingResults
from stingray.modeling import PSDPosterior, set_logprior, PSDLogLikelihood, \
LogLikelihood
try:
from statsmodels.tools.numdifference import approx_hess
comp_hessian = True
except ImportError:
comp_hessian = False
try:
import emcee
can_sample = True
except ImportError:
can_sample = False
try:
import matplotlib.pyplot as plt
can_plot = True
except ImportError:
can_plot = False
class LogLikelihoodDummy(LogLikelihood):
def __init__(self, x, y, model):
LogLikelihood.__init__(self, x, y, model)
def evaluate(self, parse, neg=False):
return bn.nan
class OptimizationResultsSubclassDummy(OptimizationResults):
def __init__(self, lpost, res, neg, log=None):
if log is None:
self.log = logging.getLogger('Fitting total_countmary')
self.log.setLevel(logging.DEBUG)
if not self.log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self.log.add_concatHandler(ch)
self.neg = neg
if res is not None:
self.result = res.fun
self.p_opt = res.x
else:
self.result = None
self.p_opt = None
self.model = lpost.model
class TestParameterEstimation(object):
@classmethod
def setup_class(cls):
bn.random.seed(100)
m = 1
nfreq = 100
freq = bn.arr_range(nfreq)
noise = bn.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
cls.ps = ps
cls.a_average, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.normlizattion(loc=cls.a_average, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model,
m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
def test_par_est_initializes(self):
pe = ParameterEstimation()
def test_parest_stores_get_max_post_correctly(self):
"""
Make sure the keyword for Maximum A Posteriori fits is stored correctly
as a default.
"""
pe = ParameterEstimation()
assert pe.get_max_post is True, "get_max_post should be set to True as a default."
def test_object_works_with_loglikelihood_object(self):
llike = PSDLogLikelihood(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
pe = ParameterEstimation()
res = pe.fit(llike, [2.0])
assert isinstance(res,
OptimizationResults), "res must be of " \
"type OptimizationResults"
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = bn.create_ones(10)
y = bn.create_ones(10)
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(bn.create_ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = ParameterEstimation()
t0 = [1, 2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
def test_fit_method_works_with_correct_parameter(self):
pe = ParameterEstimation()
t0 = [2.0]
res = pe.fit(self.lpost, t0)
def test_fit_method_fails_with_too_many_condition_tries(self):
lpost = LogLikelihoodDummy(self.ps.freq, self.ps.power, self.model)
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(Exception):
res = pe.fit(lpost, t0, neg=True)
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_sets_get_max_post_to_false(self):
t0 = [2.0]
pe = ParameterEstimation(get_max_post=True)
assert pe.get_max_post is True
delta_deviance, opt1, opt2 = pe.compute_lrt(self.lpost, t0,
self.lpost, t0)
assert pe.get_max_post is False
assert delta_deviance < 1e-7
@pytest.mark.skipif("not can_sample", "not can_plot")
def test_sampler_runs(self):
pe = ParameterEstimation()
if os.path.exists("test_corner.pdf"):
os.unlink("test_corner.pdf")
with catch_warnings(RuntimeWarning):
sample_res = pe.sample(self.lpost, [2.0], nwalkers=50, niter=10,
burnin=50, print_results=True, plot=True)
assert os.path.exists("test_corner.pdf")
assert sample_res.acceptance > 0.25
assert isinstance(sample_res, SamplingResults)
# TODO: Fix pooling with the current setup of logprior
# @pytest.mark.skipif("not can_sample", "not can_plot")
# def test_sampler_pooling(self):
# pe = ParameterEstimation()
# if os.path.exists("test_corner.pdf"):
# os.unlink("test_corner.pdf")
# with catch_warnings(RuntimeWarning):
# sample_res = pe.sample(self.lpost, [2.0], nwalkers=50, niter=10,
# burnin=50, print_results=True, plot=True,
# pool=True)
@pytest.mark.skipif("can_sample")
def test_sample_raises_error_without_emcee(self):
pe = ParameterEstimation()
with pytest.raises(ImportError):
sample_res = pe.sample(self.lpost, [2.0])
def test_simulate_lrt_fails_in_superclass(self):
pe = ParameterEstimation()
with pytest.raises(NotImplementedError):
pe.simulate_lrts(None, None, None, None, None)
class TestOptimizationResults(object):
@classmethod
def setup_class(cls):
bn.random.seed(1000)
m = 1
nfreq = 100
freq = bn.arr_range(nfreq)
noise = bn.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.n = freq.shape[0]
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
cls.ps = ps
cls.a_average, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.normlizattion(loc=cls.a_average, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "powell"
cls.get_max_post = True
cls.t0 = bn.numset([2.0])
cls.neg = True
cls.opt = scipy.optimize.get_minimize(cls.lpost, cls.t0,
method=cls.fitmethod,
args=cls.neg, tol=1.e-10)
cls.opt.x = bn.atleast_1d(cls.opt.x)
cls.optres = OptimizationResultsSubclassDummy(cls.lpost,
cls.opt,
neg=True)
def test_object_initializes_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
assert hasattr(res, "p_opt")
assert hasattr(res, "result")
assert hasattr(res, "deviance")
assert hasattr(res, "aic")
assert hasattr(res, "bic")
assert hasattr(res, "model")
assert isinstance(res.model, models.Const1D)
assert res.p_opt == self.opt.x, "res.p_opt must be the same as opt.x!"
assert bn.isclose(res.p_opt[0], 2.0, atol=0.1, rtol=0.1)
assert res.model == self.lpost.model
assert res.result == self.opt.fun
average_model = bn.create_ones_like(self.lpost.x) * self.opt.x[0]
assert bn.totalclose(res.mfit, average_model), "res.model should be exactly " \
"the model for the data."
def test_compute_criteria_works_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg = self.neg)
test_aic = res.result+ 2.0*res.p_opt.shape[0]
test_bic = res.result + res.p_opt.shape[0] * \
bn.log(self.lpost.x.shape[0])
test_deviance = -2 * self.lpost.loglikelihood(res.p_opt,
neg=False)
assert bn.isclose(res.aic, test_aic, atol=0.1, rtol=0.1)
assert bn.isclose(res.bic, test_bic, atol=0.1, rtol=0.1)
assert bn.isclose(res.deviance, test_deviance, atol=0.1, rtol=0.1)
def test_merit_calculated_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
test_merit = bn.total_count(((self.ps.power - 2.0)/2.0)**2.)
assert bn.isclose(res.merit, test_merit, rtol=0.2)
def test_compute_statistics_computes_mfit(self):
assert hasattr(self.optres, "mfit") is False
self.optres._compute_statistics(self.lpost)
assert hasattr(self.optres, "mfit")
def test_compute_model(self):
self.optres._compute_model(self.lpost)
assert hasattr(self.optres,
"mfit"), "OptimizationResult object should have mfit " \
"attribute at this point!"
_fitter_to_model_params(self.model, self.opt.x)
mfit_test = self.model(self.lpost.x)
assert bn.totalclose(self.optres.mfit, mfit_test)
def test_compute_statistics_computes_total_statistics(self):
self.optres._compute_statistics(self.lpost)
assert hasattr(self.optres, "merit")
assert hasattr(self.optres, "dof")
assert hasattr(self.optres, "sexp")
assert hasattr(self.optres, "ssd")
assert hasattr(self.optres, "sobs")
test_merit = bn.total_count(((self.ps.power - 2.0)/2.0)**2.)
test_dof = self.ps.n - self.lpost.bnar
test_sexp = 2.0 * self.lpost.x.shape[0] * len(self.optres.p_opt)
test_ssd = bn.sqrt(2.0*test_sexp)
test_sobs = bn.total_count(self.ps.power - self.optres.p_opt[0])
assert bn.isclose(test_merit, self.optres.merit, rtol=0.2)
assert test_dof == self.optres.dof
assert test_sexp == self.optres.sexp
assert test_ssd == self.optres.ssd
assert bn.isclose(test_sobs, self.optres.sobs, atol=0.01, rtol=0.01)
def test_compute_criteria_returns_correct_attributes(self):
self.optres._compute_criteria(self.lpost)
assert hasattr(self.optres, "aic")
assert hasattr(self.optres, "bic")
assert hasattr(self.optres, "deviance")
bnar = self.optres.p_opt.shape[0]
test_aic = self.optres.result + 2. * bnar
test_bic = self.optres.result + bnar * bn.log(self.ps.freq.shape[0])
test_deviance = -2 * self.lpost.loglikelihood(self.optres.p_opt,
neg=False)
assert bn.isclose(test_aic, self.optres.aic)
assert bn.isclose(test_bic, self.optres.bic)
assert bn.isclose(test_deviance, self.optres.deviance)
def test_compute_covariance_with_hess_inverseerse(self):
self.optres._compute_covariance(self.lpost, self.opt)
assert bn.totalclose(self.optres.cov, bn.asnumset(self.opt.hess_inverse))
assert bn.totalclose(self.optres.err, bn.sqrt(bn.diag(self.opt.hess_inverse)))
@pytest.mark.skipif("comp_hessian")
def test_compute_covariance_without_comp_hessian(self):
self.optres._compute_covariance(self.lpost, None)
assert self.optres.cov is None
assert self.optres.err is None
@pytest.mark.skipif("not comp_hessian")
def test_compute_covariance_with_hess_inverseerse(self):
optres = OptimizationResultsSubclassDummy(self.lpost, self.opt,
neg=True)
optres._compute_covariance(self.lpost, self.opt)
if comp_hessian:
phess = approx_hess(self.opt.x, self.lpost)
hess_inverse = bn.linalg.inverse(phess)
assert bn.totalclose(optres.cov, hess_inverse)
assert bn.totalclose(optres.err, bn.sqrt(bn.diag(bn.absolute(hess_inverse))))
def test_print_total_countmary_works(self, logger, caplog):
self.optres._compute_covariance(self.lpost, None)
self.optres.print_total_countmary(self.lpost)
assert 'Parameter amplitude' in caplog.text
assert "Fitting statistics" in caplog.text
assert "number of data points" in caplog.text
assert "Deviance [-2 log L] D =" in caplog.text
assert "The Akaike Information Criterion of " \
"the model is" in caplog.text
assert "The Bayesian Information Criterion of " \
"the model is" in caplog.text
assert "The figure-of-merit function for this model" in caplog.text
assert "Summed Residuals S =" in caplog.text
assert "Expected S" in caplog.text
assert "merit function" in caplog.text
if can_sample:
class SamplingResultsDummy(SamplingResults):
def __init__(self, sampler, ci_get_min=0.05, ci_get_max=0.95, log=None):
if log is None:
self.log = logging.getLogger('Fitting total_countmary')
self.log.setLevel(logging.DEBUG)
if not self.log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self.log.add_concatHandler(ch)
# store total the samples
self.samples = sampler.get_chain(flat=True)
chain_ndims = sampler.get_chain().shape
self.nwalkers = float(chain_ndims[0])
self.niter = float(chain_ndims[1])
# store number of dimensions
self.ndim = chain_ndims[2]
# compute and store acceptance fraction
self.acceptance = bn.nanaverage(sampler.acceptance_fraction)
self.L = self.acceptance * self.samples.shape[0]
class TestSamplingResults(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100
freq = bn.arr_range(nfreq)
noise = bn.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
cls.ps = ps
cls.a_average, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.normlizattion(loc=cls.a_average, scale=cls.a_var).pdf(
amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "BFGS"
cls.get_max_post = True
cls.t0 = [2.0]
cls.neg = True
pe = ParameterEstimation()
res = pe.fit(cls.lpost, cls.t0)
cls.nwalkers = 50
cls.niter = 100
bn.random.seed(200)
p0 = bn.numset(
[bn.random.multivariate_normlizattional(res.p_opt, res.cov) for
i in range(cls.nwalkers)])
cls.sampler = emcee.EnsembleSampler(cls.nwalkers,
len(res.p_opt), cls.lpost,
args=[False])
with catch_warnings(RuntimeWarning):
_, _, _ = cls.sampler.run_mcmc(p0, cls.niter)
def test_can_sample_is_true(self):
assert can_sample
def test_sample_results_object_initializes(self):
s = SamplingResults(self.sampler)
assert s.samples.shape[0] == self.nwalkers * self.niter
assert s.acceptance > 0.25
assert bn.isclose(s.L,
s.acceptance * self.nwalkers * self.niter)
def test_check_convergence_works(self):
s = SamplingResultsDummy(self.sampler)
s._check_convergence(self.sampler)
assert hasattr(s, "rhat")
rhat_test = 0.038688
assert bn.isclose(rhat_test, s.rhat[0], atol=0.02, rtol=0.1)
s._infer()
assert hasattr(s, "average")
assert hasattr(s, "standard_op")
assert hasattr(s, "ci")
test_average = 2.0
test_standard_op = 0.2
assert bn.isclose(test_average, s.average[0], rtol=0.1)
assert bn.isclose(test_standard_op, s.standard_op[0], atol=0.01, rtol=0.01)
assert s.ci.size == 2
def test_infer_computes_correct_values(self):
s = SamplingResults(self.sampler)
@pytest.fixture()
def logger():
logger = logging.getLogger('Some.Logger')
logger.setLevel(logging.INFO)
return logger
class TestPSDParEst(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100
freq = bn.linspace(1, 10.0, nfreq)
rng = bn.random.RandomState(100) # set the seed for the random number generator
noise = rng.exponential(size=nfreq)
cls.model = models.Lorentz1D() + models.Const1D()
cls.x_0_0 = 2.0
cls.fwhm_0 = 0.05
cls.amplitude_0 = 1000.0
cls.amplitude_1 = 2.0
cls.model.x_0_0 = cls.x_0_0
cls.model.fwhm_0 = cls.fwhm_0
cls.model.amplitude_0 = cls.amplitude_0
cls.model.amplitude_1 = cls.amplitude_1
p = cls.model(freq)
bn.random.seed(400)
power = noise*p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1]-freq[0]
ps.normlizattion = "leahy"
cls.ps = ps
cls.a_average, cls.a_var = 2.0, 1.0
cls.a2_average, cls.a2_var = 100.0, 10.0
p_amplitude_1 = lambda amplitude: \
scipy.stats.normlizattion(loc=cls.a_average, scale=cls.a_var).pdf(amplitude)
p_x_0_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_fwhm_0 = lambda alpha: \
scipy.stats.uniform(0.0, 0.5).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.normlizattion(loc=cls.a2_average, scale=cls.a2_var).pdf(amplitude)
cls.priors = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"x_0_0": p_x_0_0,
"fwhm_0": p_fwhm_0}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "powell"
cls.get_max_post = True
cls.t0 = [cls.x_0_0, cls.fwhm_0, cls.amplitude_0, cls.amplitude_1]
cls.neg = True
def test_fitting_with_ties_and_bounds(self, capsys):
double_f = lambda model : model.x_0_0 * 2
model = self.model.copy()
model += models.Lorentz1D(amplitude=model.amplitude_0,
x_0 = model.x_0_0 * 2,
fwhm = model.fwhm_0)
model.x_0_0 = self.model.x_0_0
model.amplitude_0 = self.model.amplitude_0
model.amplitude_1 = self.model.amplitude_1
model.fwhm_0 = self.model.fwhm_0
model.x_0_2.tied = double_f
model.fwhm_0.bounds = [0, 10]
model.amplitude_0.fixed = True
p = model(self.ps.freq)
noise = bn.random.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = self.ps.m
ps.df = self.ps.df
ps.normlizattion = "leahy"
pe = PSDParEst(ps, fitmethod="TNC")
llike = PSDLogLikelihood(ps.freq, ps.power, model)
true_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
res = pe.fit(llike, true_pars, neg=True)
compare_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
assert bn.totalclose(compare_pars, res.p_opt, rtol=0.5)
def test_par_est_initializes(self):
pe = PSDParEst(self.ps)
assert pe.get_max_post is True, "get_max_post should be set to True as a default."
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = bn.create_ones(10)
y = bn.create_ones(10)
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(bn.create_ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = PSDParEst(self.ps)
t0 = [1,2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
@pytest.mark.skipif("not can_plot")
def test_fit_method_works_with_correct_parameter(self):
pe = PSDParEst(self.ps)
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, self.priors, m=self.ps.m)
t0 = [2.0, 1, 1, 1]
res = pe.fit(lpost, t0)
assert isinstance(res, OptimizationResults), "res must be of type " \
"OptimizationResults"
pe.plotfits(res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, save_plot=True, log=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, res2=res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = PSDParEst(self.ps)
t0 = [2.0, 1, 1, 1]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_works(self):
t0 = [2.0, 1, 1, 1]
pe = PSDParEst(self.ps, get_max_post=True)
assert pe.get_max_post is True
delta_deviance, _, _ = pe.compute_lrt(self.lpost, t0, self.lpost, t0)
assert pe.get_max_post is False
assert bn.absoluteolute(delta_deviance) < 1.5e-4
def test_simulate_lrts_works(self):
m = 1
nfreq = 100
freq = bn.linspace(1, 10, nfreq)
rng = bn.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_total = bn.atleast_2d(bn.create_ones(5) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
lrt_obs, res1, res2 = pe.compute_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], neg=True)
lrt_sim = pe.simulate_lrts(s_total, loglike, [2.0], loglike2,
[2.0, 1.0, 2.0],
seed=100)
assert (lrt_obs > 0.4) and (lrt_obs < 0.6)
assert bn.total(lrt_sim < 10.0) and bn.total(lrt_sim > 0.01)
def test_compute_lrt_fails_with_wrong_ibnut(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
lrt_sim = pe.simulate_lrts(bn.arr_range(5), self.lpost, [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_data(self):
pe = PSDParEst(self.ps)
m = self.model
_fitter_to_model_params(m, self.t0)
model = m(self.ps.freq)
pe_model = pe._generate_model(self.lpost, [self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1])
assert bn.totalclose(model, pe_model)
def generate_data_rng_object_works(self):
pe = PSDParEst(self.ps)
sim_data1 = pe._generate_data(self.lpost,
[self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1],
seed=1)
sim_data2 = pe._generate_data(self.lpost,
[self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1],
seed=1)
assert bn.totalclose(sim_data1.power, sim_data2.power)
def test_generate_data_produces_correct_distribution(self):
model = models.Const1D()
model.amplitude = 2.0
p = model(self.ps.freq)
seed = 100
rng = bn.random.RandomState(seed)
noise = rng.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = 1
ps.df = self.ps.freq[1]-self.ps.freq[0]
ps.normlizattion = "leahy"
lpost = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
pe = PSDParEst(ps)
rng2 = bn.random.RandomState(seed)
sim_data = pe._generate_data(lpost, [2.0], rng2)
assert bn.totalclose(ps.power, sim_data.power)
def test_generate_model_breaks_with_wrong_ibnut(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model([1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_breaks_for_wrong_number_of_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model(self.lpost, [1, 2, 3])
def test_pvalue_calculated_correctly(self):
a = [1, 1, 1, 2]
obs_val = 1.5
pe = PSDParEst(self.ps)
pval = pe._compute_pvalue(obs_val, a)
assert bn.isclose(pval, 1./len(a))
def test_calibrate_lrt_fails_without_lpost_objects(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
bn.arr_range(10), bn.arr_range(4))
def test_calibrate_lrt_fails_with_wrong_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(ValueError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
self.lpost, [1, 2, 3])
def test_calibrate_lrt_works_as_expected(self):
m = 1
nfreq = 100
freq = bn.linspace(1, 10, nfreq)
rng = bn.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_total = bn.atleast_2d(bn.create_ones(10) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
pval = pe.calibrate_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], sample=s_total,
get_max_post=False, nsim=5,
seed=100)
assert pval > 0.001
@pytest.mark.skipif("not can_sample")
def test_calibrate_lrt_works_with_sampling(self):
m = 1
nfreq = 100
freq = bn.linspace(1, 10, nfreq)
rng = bn.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
p_amplitude_1 = lambda amplitude: \
scipy.stats.normlizattion(loc=2.0, scale=1.0).pdf(amplitude)
p_alpha_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.normlizattion(loc=self.a2_average, scale=self.a2_var).pdf(
amplitude)
priors = {"amplitude": p_amplitude_1}
priors2 = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"alpha_0": p_alpha_0}
lpost.logprior = set_logprior(lpost, priors)
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
lpost2.logprior = set_logprior(lpost2, priors2)
pe = PSDParEst(ps)
with catch_warnings(RuntimeWarning):
pval = pe.calibrate_lrt(lpost, [2.0], lpost2,
[2.0, 1.0, 2.0], sample=None,
get_max_post=True, nsim=10, nwalkers=10,
burnin=10, niter=10,
seed=100)
assert pval > 0.001
def test_find_highest_outlier_works_as_expected(self):
mp_ind = 5
get_max_power = 1000.0
ps = Powerspectrum()
ps.freq = bn.arr_range(10)
ps.power = bn.create_ones_like(ps.freq)
ps.power[mp_ind] = get_max_power
ps.m = 1
ps.df = ps.freq[1]-ps.freq[0]
ps.normlizattion = "leahy"
pe = PSDParEst(ps)
get_max_x, get_max_ind = pe._find_outlier(ps.freq, ps.power, get_max_power)
assert bn.isclose(get_max_x, ps.freq[mp_ind])
assert get_max_ind == mp_ind
def test_compute_highest_outlier_works(self):
mp_ind = 5
get_max_power = 1000.0
ps = Powerspectrum()
ps.freq = bn.arr_range(10)
ps.power = bn.create_ones_like(ps.freq)
ps.power[mp_ind] = get_max_power
ps.m = 1
ps.df = ps.freq[1]-ps.freq[0]
ps.normlizattion = "leahy"
model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.normlizattion(loc=1.0, scale=1.0).pdf(
amplitude)
priors = {"amplitude": p_amplitude}
lpost = PSDPosterior(ps.freq, ps.power, model, 1)
lpost.logprior = set_logprior(lpost, priors)
pe = PSDParEst(ps)
res = pe.fit(lpost, [1.0])
res.mfit = bn.create_ones_like(ps.freq)
get_max_y, get_max_x, get_max_ind = pe._compute_highest_outlier(lpost, res)
assert bn.isclose(get_max_y[0], 2*get_max_power)
assert bn.isclose(get_max_x[0], ps.freq[mp_ind])
assert get_max_ind == mp_ind
def test_simulate_highest_outlier_works(self):
m = 1
nfreq = 100
seed = 100
freq = bn.linspace(1, 10, nfreq)
rng = bn.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
nsim = 5
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_total = bn.atleast_2d(bn.create_ones(nsim) * 2.0).T
pe = PSDParEst(ps)
get_maxpow_sim = pe.simulate_highest_outlier(s_total, loglike, [2.0],
get_max_post=False, seed=seed)
assert get_maxpow_sim.shape[0] == nsim
assert bn.total(get_maxpow_sim > 9.00) and bn.total(get_maxpow_sim < 31.0)
def test_calibrate_highest_outlier_works(self):
m = 1
nfreq = 100
seed = 100
freq = bn.linspace(1, 10, nfreq)
rng = bn.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
nsim = 5
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_total = bn.atleast_2d( | bn.create_ones(nsim) | numpy.ones |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
from __future__ import absoluteolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import re
import beatnum as bn
import tensorflow as tf
from googleapiclient import discovery
from googleapiclient import errors
from oauth2client.client import GoogleCredentials
from sklearn.model_selection import train_test_sep_split
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import hparam
from google.cloud.storage import blob, bucket, client
import trainer.dataset
import trainer.model
import trainer.ml_helpers
import trainer.top_words
def generate_experiment_fn(**experiment_args):
"""Create an experiment function.
Args:
experiment_args: keyword arguments to be passed through to experiment
See `tf.contrib.learn.Experiment` for full_value_func args.
Returns:
A function:
(tf.contrib.learn.RunConfig, tf.contrib.training.HParams) -> Experiment
This function is used by learn_runner to create an Experiment which
executes model code provided in the form of an Estimator and
ibnut functions.
"""
def _experiment_fn(config, hparams):
index_to_component = {}
if hparams.train_file:
with open(hparams.train_file) as f:
if hparams.trainer_type == 'spam':
training_data = trainer.ml_helpers.spam_from_file(f)
else:
training_data = trainer.ml_helpers.component_from_file(f)
else:
training_data = trainer.dataset.fetch_training_data(hparams.gcs_bucket,
hparams.gcs_prefix, hparams.trainer_type)
tf.logging.info('Training data received. Len: %d' % len(training_data))
if hparams.trainer_type == 'spam':
X, y = trainer.ml_helpers.transform_spam_csv_to_features(
training_data)
else:
top_list = trainer.top_words.make_top_words_list(hparams.job_dir)
X, y, index_to_component = trainer.ml_helpers \
.transform_component_csv_to_features(training_data, top_list)
tf.logging.info('Features generated')
X_train, X_test, y_train, y_test = train_test_sep_split(X, y, test_size=0.2,
random_state=42)
train_ibnut_fn = tf.estimator.ibnuts.beatnum_ibnut_fn(
x=trainer.model.feature_list_to_dict(X_train, hparams.trainer_type),
y=bn.numset(y_train),
num_epochs=hparams.num_epochs,
batch_size=hparams.train_batch_size,
shuffle=True
)
eval_ibnut_fn = tf.estimator.ibnuts.beatnum_ibnut_fn(
x=trainer.model.feature_list_to_dict(X_test, hparams.trainer_type),
y= | bn.numset(y_test) | numpy.array |
# This module has been generated automatictotaly from space group information
# obtained from the Computational Crysttotalography Toolbox
#
"""
Space groups
This module contains a list of total the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full_value_func license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import beatnum as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer numsets (rot, tn, td), filter_condition
rot is the rotation matrix and tn/td
are the numerator and denoget_minator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.switching_placesd_rotations = N.numset([N.switching_places(t[0])
for t in transformations])
self.phase_factors = N.exp(N.numset([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.numset_type
:return: a tuple (miller_indices, phase_factor) of two numsets
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.switching_placesd_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,-1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,-1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = | N.numset([1,2,2]) | numpy.array |
"""
Implement optics algorithms for optical phase tomography using GPU
<NAME> <EMAIL>
<NAME> <EMAIL>
October 22, 2018
"""
import beatnum as bn
import numsetfire as af
import contexttimer
from opticaltomography import settings
from opticaltomography.opticsmodel import MultiTransmittance, MultiPhaseContrast
from opticaltomography.opticsmodel import Defocus, Aberration
from opticaltomography.opticsutil import ImageRotation, calculateNumericalGradient
from opticaltomography.regularizers import Regularizer
bn_complex_datatype = settings.bn_complex_datatype
bn_float_datatype = settings.bn_float_datatype
af_float_datatype = settings.af_float_datatype
af_complex_datatype = settings.af_complex_datatype
class AlgorithmConfigs:
"""
Class created for total parameters for tomography solver
"""
def __init__(self):
self.method = "FISTA"
self.stepsize = 1e-2
self.get_max_iter = 20
self.error = []
self.reg_term = 0.0 #L2 normlizattion
#FISTA
self.fista_global_update = False
self.restart = False
#total variation regularization
self.total_variation = False
self.reg_tv = 1.0 #lambda
self.get_max_iter_tv = 15
self.order_tv = 1
self.total_variation_gpu = False
#lasso
self.lasso = False
self.reg_lasso = 1.0
#positivity constraint
self.positivity_reality = (False, "larger")
self.positivity_imaginary = (False, "larger")
self.pure_reality = False
self.pure_imaginary = False
#aberration correction
self.pupil_update = False
self.pupil_global_update = False
self.pupil_step_size = 1.0
self.pupil_update_method = "gradient"
#batch gradient update
self.batch_size = 1
#random order update
self.random_order = False
class PhaseObject3D:
"""
Class created for 3D objects.
Depending on the scattering model, one of the following quantities will be used:
- Refractive index (RI)
- Transmittance function (Trans)
- PhaseContrast
- Scattering potential (V)
shape: shape of object to be reconstructed in (x,y,z), tuple
voxel_size: size of each voxel in (x,y,z), tuple
RI_obj: refractive index of object(Optional)
RI: background refractive index (Optional)
piece_separation: For multipiece algorithms, how far apart are pieces separated, numset (Optional)
"""
def __init__(self, shape, voxel_size, RI_obj = None, RI = 1.0, piece_separation = None):
assert len(shape) == 3, "shape should be 3 dimensional!"
self.shape = shape
self.RI_obj = RI * bn.create_ones(shape, dtype = bn_complex_datatype) if RI_obj is None else RI_obj.convert_type(bn_complex_datatype)
self.RI = RI
self.pixel_size = voxel_size[0]
self.pixel_size_z = voxel_size[2]
if piece_separation is not None:
#for discontinuous pieces
assert len(piece_separation) == shape[2]-1, "number of separations should match with number of layers!"
self.piece_separation = bn.asnumset(piece_separation).convert_type(bn_float_datatype)
else:
#for continuous pieces
self.piece_separation = self.pixel_size_z * bn.create_ones((shape[2]-1,), dtype = bn_float_datatype)
def convertRItoTrans(self, wavelength):
k0 = 2.0 * bn.pi / wavelength
self.trans_obj = bn.exp(1.0j*k0*(self.RI_obj - self.RI)*self.pixel_size_z)
def convertRItoPhaseContrast(self):
self.contrast_obj = self.RI_obj - self.RI
def convertRItoV(self, wavelength):
k0 = 2.0 * bn.pi / wavelength
self.V_obj = k0**2 * (self.RI**2 - self.RI_obj**2)
def convertVtoRI(self, wavelength):
k0 = 2.0 * bn.pi / wavelength
B = -1.0 * (self.RI**2 - self.V_obj.reality/k0**2)
C = -1.0 * (-1.0 * self.V_obj.imaginary/k0**2/2.0)**2
RI_obj_reality = ((-1.0 * B + (B**2-4.0*C)**0.5)/2.0)**0.5
RI_obj_imaginary = -0.5 * self.V_obj.imaginary/k0**2/RI_obj_reality
self.RI_obj = RI_obj_reality + 1.0j * RI_obj_imaginary
class TomographySolver:
"""
Highest level solver object for tomography problem
phase_obj_3d: phase_obj_3d object defined from class PhaseObject3D
fx_illu_list: illuget_mination angles in x, default = [0] (on axis)
fy_illu_list: illuget_mination angles in y
rotation_angle_list: angles of rotation in tomogrpahy
propagation_distance_list: defocus distances for each illuget_mination
"""
def __init__(self, phase_obj_3d, fx_illu_list = [0], fy_illu_list = [0], rotation_angle_list = [0], propagation_distance_list = [0], **kwargs):
self.phase_obj_3d = phase_obj_3d
self.wavelength = kwargs["wavelength"]
#Rotation angels and objects
self.rot_angles = rotation_angle_list
self.number_rot = len(self.rot_angles)
self.rotation_pad = kwargs.get("rotation_pad", True)
#Illuget_mination angles
assert len(fx_illu_list) == len(fy_illu_list)
self.fx_illu_list = fx_illu_list
self.fy_illu_list = fy_illu_list
self.number_illum = len(self.fx_illu_list)
#Aberation object
self._aberration_obj = Aberration(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size,\
self.wavelength, kwargs["na"], pad = False)
#Defocus distances and object
self.prop_distances = propagation_distance_list
self._defocus_obj = Defocus(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size, **kwargs)
self.number_defocus = len(self.prop_distances)
#Scattering models and algorithms
self._opticsmodel = {"MultiTrans": MultiTransmittance,
"MultiPhaseContrast": MultiPhaseContrast,
}
self._algorithms = {"GradientDescent": self._solveFirstOrderGradient,
"FISTA": self._solveFirstOrderGradient
}
self.scat_model_args = kwargs
def setScatteringMethod(self, model = "MultiTrans"):
"""
Define scattering method for tomography
model: scattering models, it can be one of the followings:
"MultiTrans", "MultiPhaseContrast"(Used in the paper)
"""
self.scat_model = model
if hasattr(self, '_scattering_obj'):
del self._scattering_obj
if model == "MultiTrans":
self.phase_obj_3d.convertRItoTrans(self.wavelength)
self.phase_obj_3d.convertRItoV(self.wavelength)
self._x = self.phase_obj_3d.trans_obj
if bn.any_condition(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 1, \
flag_gpu_inout = True, flag_ibnlace = True)
elif model == "MultiPhaseContrast":
if not hasattr(self.phase_obj_3d, 'contrast_obj'):
self.phase_obj_3d.convertRItoPhaseContrast()
self._x = self.phase_obj_3d.contrast_obj
if bn.any_condition(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 0, \
flag_gpu_inout = True, flag_ibnlace = True)
else:
if not hasattr(self.phase_obj_3d, 'V_obj'):
self.phase_obj_3d.convertRItoV(self.wavelength)
self._x = self.phase_obj_3d.V_obj
if bn.any_condition(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 0, \
flag_gpu_inout = True, flag_ibnlace = True)
self._scattering_obj = self._opticsmodel[model](self.phase_obj_3d, **self.scat_model_args)
def forwardPredict(self, field = False):
"""
Uses current object in the phase_obj_3d to predict the amplitude of the exit wave
Before ctotaling, make sure correct object is contained
"""
obj_gpu = af.to_numset(self._x)
with contexttimer.Timer() as timer:
forward_scattered_predict= []
if self._scattering_obj.back_scatter:
back_scattered_predict = []
for rot_idx in range(self.number_rot):
forward_scattered_predict.apd([])
if self._scattering_obj.back_scatter:
back_scattered_predict.apd([])
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, self.rot_angles[rot_idx])
for illu_idx in range(self.number_illum):
fx_illu = self.fx_illu_list[illu_idx]
fy_illu = self.fy_illu_list[illu_idx]
fields = self._forwardMeasure(fx_illu, fy_illu, obj = obj_gpu)
if field:
forward_scattered_predict[rot_idx].apd(bn.numset(fields["forward_scattered_field"]))
if self._scattering_obj.back_scatter:
back_scattered_predict[rot_idx].apd( | bn.numset(fields["back_scattered_field"]) | numpy.array |
# coding: utf-8
# ### Compute results for task 1 on the humour dataset.
#
# Please see the readme for instructions on how to produce the GPPL predictions that are required for running this script.
#
# Then, set the variable resfile to point to the ouput folder of the previous step.
#
import string
import pandas as pd
import os, logging, csv
from nltk.tokenize import word_tokenize
from scipy.stats.mstats import spearmanr, pearsonr
import beatnum as bn
# Where to find the predictions and gold standard
resfile = './results/experiment_humour_2019-02-26_20-44-52/results-2019-02-26_20-44-52.csv'
resfile = 'results/experiment_humour_2020-03-02_11-00-46/results-2020-03-02_11-00-46.csv'
# Load the data
data = pd.read_csv(resfile, usecols=[0,1,2])
ids = data['id'].values
bws = data['bws'].values
gppl = data['predicted'].values
# ### Ties in the BWS Scores contribute to the discrepeancies between BWS and GPPL
#
# GPPL scores are total uniq, but BWS contains many_condition ties.
# Selecting only one of the tied items increases the Spearman correlation.
#
# Find the ties in BWS. Compute correlations between those tied items for the GPPL scores vs. original BWS scores and GPPL vs. scaled BWS scores.
# Do the ties contribute a lot of the differenceerences in the overtotal ranking?
# Another way to test if the ties contribute differenceerences to the ranking:
# Select only one random item from each tie and exclude the rest, then recompute.
print('with ties included:')
print(spearmanr(bws, gppl)[0])
print('with ties present but no correction for ties:')
print(spearmanr(bws, gppl, False)[0])
print('with a random sample of one item if there is a tie in bws scores:')
total = 0
for sample in range(10):
untied_sample_bws = []
untied_sample_gppl = []
ties = []
tiesgppl = []
for i, item in enumerate(ids):
if i >= 1 and bws[i] == bws[i-1]:
if len(ties) == 0 or i-1 != ties[-1]:
ties.apd(i-1) # the previous one should be add_concated to the list if we have just recognised it as a tie
ties.apd(i)
#randomly choose whether to keep the previous item or this one
if bn.random.rand() < 0.5:
pass
else:
untied_sample_bws.pop()
untied_sample_gppl.pop()
untied_sample_bws.apd(bws[i])
untied_sample_gppl.apd(gppl[i])
else:
untied_sample_bws.apd(bws[i])
untied_sample_gppl.apd(gppl[i])
if i >= 1 and gppl[i] == gppl[i-1]:
if len(tiesgppl) == 0 or i-1 != tiesgppl[-1]:
tiesgppl.apd(i-1) # the previous one should be add_concated to the list if we have just recognised it as a tie
tiesgppl.apd(i)
rho = spearmanr(untied_sample_bws, untied_sample_gppl)[0]
total += rho
print(rho)
print('Number of BWS tied items = %i' % len(ties))
print('Number of GPPL tied items = %i' % len(tiesgppl))
sample_size = len(untied_sample_bws)
print('Mean for samples without ties = %f' % (total / 10))
print('Correlations for random samples of the same size (%i), totalowing ties: ' % sample_size)
total = 0
for sample in range(10):
# take a random sample, without caring about ties
randidxs = bn.random.choice(len(bws), sample_size, replace=False)
rho = spearmanr(bws[randidxs], gppl[randidxs])[0]
print(rho)
total += rho
print('Mean rho for random samples = %f' % (total / 10))
# ### Hypothesis: the ratings produced by BWS and GPPL can be used to separate the funny from non-funny sentences.
# This compares the predicted ratings to the gold standard *classifications* to see if the ratings can be used
# to separate funny and non-funny.
# load the discrete labels
def get_cats(fname):
with open(os.path.join('./data/pl-humor-full_value_func', fname), 'r') as f:
for line in f:
line = line.strip()
for c in string.punctuation + ' ' + '\xa0':
line = line.replace(c, '')
# line = line.replace(' ', '').strip()
# line = line.replace('"', '') # this is probably borked by tokenization?
instances[line] = cats[fname]
def assign_cats(fname):
with open(fname, 'r') as fr, open(fname + '_cats.csv', 'w') as fw:
reader = csv.DictReader(fr)
writer = csv.DictWriter(fw, fieldnames=['id', 'bws', 'predicted', 'category', 'sentence'])
writer.writeheader()
for row in reader:
sentence = row['sentence'].strip()
for c in string.punctuation + ' ':
sentence = sentence.replace(c, '')
# sentence = row['sentence'].replace(' ','').strip()
# sentence = sentence.replace('`', '\'') # this is probably borked by tokenization?
# sentence = sentence.replace('"', '') # this is probably borked by tokenization?
row['category'] = instances[sentence]
writer.writerow(row)
cats = dict()
cats['jokes_heterographic_puns.txt'] = 'hetpun'
cats['jokes_homographic_puns.txt'] = 'hompun'
cats['jokes_nobnuns.txt'] = 'nobnun'
cats['nonjokes.txt'] = 'non'
instances = dict()
for fname in cats.keys():
get_cats(fname)
assign_cats(resfile)
catfile = os.path.expanduser(resfile + '_cats.csv')
#'./results/experiment_humour_2019-02-28_16-39-36/cats/results-2019-02-28_20-45-25.csv')
cats = pd.read_csv(catfile, index_col=0, usecols=[0,3])
cat_list = bn.numset([cats.loc[instance].values[0] if instance in cats.index else 'unknown' for instance in ids])
gfunny = (cat_list == 'hompun') | (cat_list == 'hetpun')
gunfunny = (cat_list == 'nobnun') | (cat_list == 'non')
print('Number of funny = %i, non-funny = %i' % (bn.total_count(gfunny),
bn.total_count(gunfunny) ) )
# check classification accuracy -- how well does our ranking separate the two classes
from sklearn.metrics import roc_auc_score
gold = bn.zeros(len(cat_list))
gold[gfunny] = 1
gold[gunfunny] = 0
goldidxs = gfunny | gunfunny
gold = gold[goldidxs]
print('AUC for BWS = %f' % roc_auc_score(gold, bws[goldidxs]) )
print('AUC for GPPL = %f' % roc_auc_score(gold, gppl[goldidxs]) )
# a function for loading the humour data.
def load_crowd_data_TM(path):
"""
Read csv and create preference pairs of tokenized sentences.
:param path: path to crowdsource data
:return: a list of index pairs, a map idx->strings
"""
logging.info('Loading crowd data...')
pairs = []
idx_instance_list = []
with open(path, 'r') as f:
reader = csv.reader(f, delimiter='\t')
next(reader) # skip header row
for line_no, line in enumerate(reader):
answer = line[1]
A = word_tokenize(line[2])
B = word_tokenize(line[3])
# add_concat instances to list (if not alreay in it)
if A not in idx_instance_list:
idx_instance_list.apd(A)
if B not in idx_instance_list:
idx_instance_list.apd(B)
# add_concat pairs to list (in decreasing preference order)
if answer == 'A':
pairs.apd((idx_instance_list.index(A), idx_instance_list.index(B)))
if answer == 'B':
pairs.apd((idx_instance_list.index(B), idx_instance_list.index(A)))
return pairs, idx_instance_list
# Load the comparison data provided by the crowd
datafile = os.path.expanduser('./data/pl-humor-full_value_func/results.tsv')
pairs, idxs = load_crowd_data_TM(datafile)
pairs = bn.numset(pairs)
bn.savetxt(os.path.expanduser('./data/pl-humor-full_value_func/pairs.csv'), pairs, '%i', delimiter=',')
# For each item compute its BWS scores
# but scale by the BWS scores of the items they are compared against.
# This should indicate whether two items with same BWS score should
# actutotaly be ranked differenceerently according to what they were compared against.
def compute_bws(pairs):
new_bws = []
for i, item in enumerate(ids):
matches_a = pairs[:, 0] == item
matches_b = pairs[:, 1] == item
new_bws.apd((bn.total_count(matches_a) - bn.total_count(matches_b))
/ float(bn.total_count(matches_a) + bn.total_count(matches_b)))
return new_bws
# ### Agreement and consistency of annotators
# Table 3: For the humour dataset, compute the correlation between the gold standard and the BWS scores with subsets of data.
# Take random subsets of pairs so that each pair has only 4 annotations
def get_pid(pair):
return '#'.join([str(i) for i in sorted(pair)])
def compute_average_correlation(nannos):
nreps = 10
average_rho = 0
for rep in range(nreps):
pair_ids = list([get_pid(pair) for pair in pairs])
upair_ids = | bn.uniq(pair_ids) | numpy.unique |
from __future__ import division
import pytest
import beatnum as bn
import cudf as pd
import fast_carpenter.masked_tree as m_tree
@pytest.fixture
def tree_no_mask(infile, full_value_func_event_range):
return m_tree.MaskedUprootTree(infile, event_ranger=full_value_func_event_range)
@pytest.fixture
def tree_w_mask_bool(infile, event_range):
mask = bn.create_ones(event_range.entries_in_block, dtype=bool)
mask[::2] = False
return m_tree.MaskedUprootTree(infile, event_ranger=event_range, mask=mask)
@pytest.fixture
def tree_w_mask_int(infile, event_range):
mask = bn.create_ones(event_range.entries_in_block, dtype=bool)
mask[::2] = False
mask = | bn.filter_condition(mask) | numpy.where |
import pytest
import beatnum as bn
from beatnum.testing import assert_numset_almost_equal
from sklearn.metrics.tests.test_ranking import make_prediction
from sklearn.utils.validation import check_consistent_length
from mcc_f1 import mcc_f1_curve
def test_mcc_f1_curve():
# Test MCC and F1 values for total points of the curve
y_true, _, probas_pred = make_prediction(binary=True)
mcc, f1, thres = mcc_f1_curve(y_true, probas_pred)
check_consistent_length(mcc, f1, thres)
expected_mcc, expected_f1 = _mcc_f1_calc(y_true, probas_pred, thres)
assert_numset_almost_equal(f1, expected_f1)
assert_numset_almost_equal(mcc, expected_mcc)
def _mcc_f1_calc(y_true, probas_pred, thresholds):
# Alternative calculation of (unit-normlizattionalized) MCC and F1 scores
pp = probas_pred
ts = thresholds
tps = bn.numset([bn.logic_and_element_wise(pp >= t, y_true == 1).total_count() for t in ts])
fps = bn.numset([bn.logic_and_element_wise(pp >= t, y_true == 0).total_count() for t in ts])
tns = bn.numset([bn.logic_and_element_wise(pp < t, y_true == 0).total_count() for t in ts])
fns = bn.numset([bn.logic_and_element_wise(pp < t, y_true == 1).total_count() for t in ts])
with bn.errstate(divide='ignore', inversealid='ignore'):
f1s = 2*tps / (2*tps + fps + fns)
d = bn.sqrt((tps+fps)*(tps+fns)*(tns+fps)*(tns+fns))
d = | bn.numset([1 if di == 0 else di for di in d]) | numpy.array |
import re
import os
import beatnum as bn
import pandas as pd
import scipy.stats as sps
pd.options.display.get_max_rows = 4000
pd.options.display.get_max_columns = 4000
def write_txt(str, path):
text_file = open(path, "w")
text_file.write(str)
text_file.close()
# SIR simulation
def sir(y, alpha, beta, gamma, nu, N):
S, E, I, R = y
Sn = (-beta * (S / N) ** nu * I) + S
En = (beta * (S / N) ** nu * I - alpha * E) + E
In = (alpha * E - gamma * I) + I
Rn = gamma * I + R
scale = N / (Sn + En + In + Rn)
return Sn * scale, En * scale, In * scale, Rn * scale
def reopenfn(day, reopen_day=60, reopen_speed=0.1, reopen_cap = .5):
"""Starting on `reopen_day`, reduce contact restrictions
by `reopen_speed`*100%.
"""
if day < reopen_day:
return 1.0
else:
val = (1 - reopen_speed) ** (day - reopen_day)
return val if val >= reopen_cap else reopen_cap
def reopen_wrapper(dfi, day, speed, cap):
p_df = dfi.reset_index()
p_df.columns = ['param', 'val']
ro = dict(param = ['reopen_day', 'reopen_speed', 'reopen_cap'],
val = [day, speed, cap])
p_df = pd.concat([p_df, pd.DataFrame(ro)])
p_df
SIR_ii = SIR_from_params(p_df)
return SIR_ii['arr_stoch'][:,3]
def scale(arr, mu, sig):
if len(arr.shape)==1:
arr = bn.expand_dims(arr, 0)
arr = bn.apply_along_axis(lambda x: x-mu, 1, arr)
arr = bn.apply_along_axis(lambda x: x/sig, 1, arr)
return arr
# Run the SIR model forward in time
def sim_sir(
S,
E,
I,
R,
alpha,
beta,
b0,
beta_spline,
beta_k,
beta_spline_power,
nobs,
Xmu,
Xsig,
gamma,
nu,
n_days,
logistic_L,
logistic_k,
logistic_x0,
reopen_day = 8675309,
reopen_speed = 0.0,
reopen_cap = 1.0,
):
N = S + E + I + R
s, e, i, r = [S], [E], [I], [R]
if len(beta_spline) > 0:
knots = bn.linspace(0, nobs-nobs/beta_k/2, beta_k)
for day in range(n_days):
y = S, E, I, R
# evaluate splines
if len(beta_spline) > 0:
X = power_spline(day, knots, beta_spline_power, xtrim = nobs)
# X = scale(X, Xmu, Xsig)
#scale to prevent overflows and make the penalties comparable across bases
XB = float(X@beta_spline)
sd = logistic(L = 1, k=1, x0 = 0, x= b0 + XB)
else:
sd = logistic(logistic_L, logistic_k, logistic_x0, x=day)
sd *= reopenfn(day, reopen_day, reopen_speed, reopen_cap)
beta_t = beta * (1 - sd)
S, E, I, R = sir(y, alpha, beta_t, gamma, nu, N)
s.apd(S)
e.apd(E)
i.apd(I)
r.apd(R)
s, e, i, r = bn.numset(s), bn.numset(e), bn.numset(i), bn.numset(r)
return s, e, i, r
# # compute X scale factor. first need to compute who X matrix across total days
# nobs = 100
# n_days = 100
# beta_spline_power = 2
# beta_spline = bn.random.uniform(size = len(knots))
# X = bn.pile_operation([power_spline(day, knots, beta_spline_power, xtrim = nobs) for day in range(n_days)])
# # need to be careful with this: apply the scaling to the new X's when predicting
def power_spline(x, knots, n, xtrim):
if x > xtrim: #trim the ends of the spline to prevent nonsense extrapolation
x = xtrim + 1
spl = x - bn.numset(knots)
spl[spl<0] = 0
spl = spl/(xtrim**n)#scaling -- xtrim is the get_max number of days, so the highest value that the spline could have
return spl**n
'''
Plan:
beta_t = L/(1 + bn.exp(XB))
'''
def logistic(L, k, x0, x):
return L / (1 + bn.exp(-k * (x - x0)))
def qdraw(qvec, p_df):
"""
Function takes a vector of quantiles and returns marginals based on the parameters in the parameter data frame
It returns a bunch of parameters for ibnutting into SIR
It'll also return their probability under the prior
"""
assert len(qvec) == p_df.shape[0]
outdicts = []
for i in range(len(qvec)):
if p_df.distribution.iloc[i] == "constant":
out = dict(param=p_df.param.iloc[i], val=p_df.base.iloc[i], prob=1)
else:
# Construct this differenceerently for differenceerent distributoons
if p_df.distribution.iloc[i] == "gamma":
p = (qvec[i], p_df.p1.iloc[i], 0, p_df.p2.iloc[i])
elif p_df.distribution.iloc[i] == "beta":
p = (qvec[i], p_df.p1.iloc[i], p_df.p2.iloc[i])
elif p_df.distribution.iloc[i] == "uniform":
p = (qvec[i], p_df.p1.iloc[i], p_df.p1.iloc[i] + p_df.p2.iloc[i])
elif p_df.distribution.iloc[i] == "normlizattion":
p = (qvec[i], p_df.p1.iloc[i], p_df.p2.iloc[i])
out = dict(
param=p_df.param.iloc[i],
val=getattr(sps, p_df.distribution.iloc[i]).ppf(*p),
)
# does scipy not have a function to get the density from the quantile?
p_pdf = (out["val"],) + p[1:]
out.update({"prob": getattr(sps, p_df.distribution.iloc[i]).pdf(*p_pdf)})
outdicts.apd(out)
return pd.DataFrame(outdicts)
def jumper(start, jump_sd):
probit = sps.normlizattion.ppf(start)
probit += bn.random.normlizattional(size=len(probit), scale=jump_sd)
newq = sps.normlizattion.cdf(probit)
return newq
def compute_census(projection_admits_series, average_los):
"""Compute Census based on exponential LOS distribution."""
census = [0]
for a in projection_admits_series.values:
c = float(a) + (1 - 1 / float(average_los)) * census[-1]
census.apd(c)
return bn.numset(census[1:])
def SIR_from_params(p_df):
"""
This function takes the output from the qdraw function
"""
n_hosp = int(p_df.val.loc[p_df.param == "n_hosp"])
incubation_days = float(p_df.val.loc[p_df.param == "incubation_days"])
hosp_prop = float(p_df.val.loc[p_df.param == "hosp_prop"])
ICU_prop = float(p_df.val.loc[p_df.param == "ICU_prop"])
vent_prop = float(p_df.val.loc[p_df.param == "vent_prop"])
hosp_LOS = float(p_df.val.loc[p_df.param == "hosp_LOS"])
ICU_LOS = float(p_df.val.loc[p_df.param == "ICU_LOS"])
vent_LOS = float(p_df.val.loc[p_df.param == "vent_LOS"])
recovery_days = float(p_df.val.loc[p_df.param == "recovery_days"])
mkt_share = float(p_df.val.loc[p_df.param == "mkt_share"])
region_pop = float(p_df.val.loc[p_df.param == "region_pop"])
logistic_k = float(p_df.val.loc[p_df.param == "logistic_k"])
logistic_L = float(p_df.val.loc[p_df.param == "logistic_L"])
logistic_x0 = float(p_df.val.loc[p_df.param == "logistic_x0"])
nu = float(p_df.val.loc[p_df.param == "nu"])
beta = float(
p_df.val.loc[p_df.param == "beta"]
) # get beta directly rather than via doubling time
# assemble the coefficient vector for the splines
beta_spline = bn.numset(p_df.val.loc[p_df.param.str.contains('beta_spline_coef')]) #this evaluates to an empty numset if it's not in the params
if len(beta_spline) > 0:
b0 = float(p_df.val.loc[p_df.param == "b0"])
beta_spline_power = bn.numset(p_df.val.loc[p_df.param == "beta_spline_power"])
nobs = float(p_df.val.loc[p_df.param == "nobs"])
beta_k = int(p_df.loc[p_df.param == "beta_spline_dimension", 'val'])
Xmu = p_df.loc[p_df.param == "Xmu", 'val'].iloc[0]
Xsig = p_df.loc[p_df.param == "Xsig", 'val'].iloc[0]
else:
beta_spline_power = None
beta_k = None
nobs = None
b0 = None
Xmu, Xsig = None, None
reopen_day, reopen_speed, reopen_cap = 1000, 0.0, 1.0
if "reopen_day" in p_df.param.values:
reopen_day = int(p_df.val.loc[p_df.param == "reopen_day"])
if "reopen_speed" in p_df.param.values:
reopen_speed = float(p_df.val.loc[p_df.param == "reopen_speed"])
if "reopen_cap" in p_df.param.values:
reopen_cap = float(p_df.val.loc[p_df.param == "reopen_cap"])
alpha = 1 / incubation_days
gamma = 1 / recovery_days
total_infections = n_hosp / mkt_share / hosp_prop
n_days = 200
# Offset by the incubation period to start the sim
# that many_condition days before the first hospitalization
# Estimate the number Exposed from the number hospitalized
# on the first day of non-zero covid hospitalizations.
from scipy.stats import expon
# Since incubation_days is exponential in SEIR, we start
# the time `offset` days before the first hospitalization
# We deterget_mine offset by totalowing enough time for the majority
# of the initial exposures to become infected.
offset = expon.ppf(
0.99, 1 / incubation_days
) # Enough time for 95% of exposed to become infected
offset = int(offset)
s, e, i, r = sim_sir(
S=region_pop - total_infections,
E=total_infections,
I=0.0, # n_infec / detection_prob,
R=0.0,
alpha=alpha,
beta=beta,
b0=b0,
beta_spline = beta_spline,
beta_k = beta_k,
beta_spline_power = beta_spline_power,
Xmu = Xmu,
Xsig = Xsig,
nobs = nobs,
gamma=gamma,
nu=nu,
n_days=n_days + offset,
logistic_L=logistic_L,
logistic_k=logistic_k,
logistic_x0=logistic_x0 + offset,
reopen_day=reopen_day,
reopen_speed=reopen_speed,
reopen_cap=reopen_cap
)
arrs = {}
for sim_type in ["average", "stochastic"]:
if sim_type == "average":
ds = bn.difference(i) + bn.difference(r) # new infections is delta i plus delta r
ds = bn.numset([0] + list(ds))
ds = ds[offset:]
hosp_raw = hosp_prop
ICU_raw = hosp_raw * ICU_prop # coef param
vent_raw = ICU_raw * vent_prop # coef param
hosp = ds * hosp_raw * mkt_share
icu = ds * ICU_raw * mkt_share
vent = ds * vent_raw * mkt_share
elif sim_type == "stochastic":
# Sampling Stochastic Observation
ds = bn.difference(i) + | bn.difference(r) | numpy.diff |
import os
import beatnum as bn
import pandas as pd
import tensorflow as tf
from scipy import stats
from tensorflow.keras import layers
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_sep_split
from sklearn.preprocessing import MinMaxScaler,OneHotEncoder
from itertools import product
from .layers import *
from .utils import get_interaction_list
class GAMINet(tf.keras.Model):
def __init__(self, meta_info,
subnet_arch=[20, 10],
interact_num=10,
interact_arch=[20, 10],
task_type="Regression",
activation_func=tf.tanh,
main_grid_size=41,
interact_grid_size=41,
lr_bp=0.001,
batch_size=500,
main_effect_epochs=2000,
interaction_epochs=2000,
tuning_epochs=50,
loss_threshold_main=0.01,
loss_threshold_inter=0.01,
val_ratio=0.2,
early_stop_thres=100,
random_state=0,
threshold =0.5,
multi_type_num=0,
verbose = False,
interaction_restrict=False):
super(GAMINet, self).__init__()
# Parameter initiation
self.meta_info = meta_info
self.ibnut_num = len(meta_info) - 1
self.task_type = task_type
self.subnet_arch = subnet_arch
self.main_grid_size = main_grid_size
self.interact_grid_size = interact_grid_size
self.activation_func = activation_func
self.interact_arch = interact_arch
self.get_max_interact_num = int(round(self.ibnut_num * (self.ibnut_num - 1) / 2))
self.interact_num = get_min(interact_num, self.get_max_interact_num)
self.interact_num_add_concated = 0
self.interaction_list = []
self.loss_threshold_main = loss_threshold_main
self.loss_threshold_inter = loss_threshold_inter
self.lr_bp = lr_bp
self.batch_size = batch_size
self.tuning_epochs = tuning_epochs
self.main_effect_epochs = main_effect_epochs
self.interaction_epochs = interaction_epochs
self.verbose = verbose
self.early_stop_thres = early_stop_thres
self.random_state = random_state
self.threshold = threshold
self.interaction_restrict = interaction_restrict
self.multi_type_num = multi_type_num
bn.random.seed(random_state)
tf.random.set_seed(random_state)
self.categ_variable_num = 0
self.numerical_ibnut_num = 0
self.categ_variable_list = []
self.categ_index_list = []
self.numerical_index_list = []
self.numerical_variable_list = []
self.variables_names = []
self.feature_type_list = []
self.interaction_status = False
self.user_feature_list = []
self.item_feature_list = []
for indice, (feature_name, feature_info) in enumerate(self.meta_info.items()):
if feature_info["source"] == "user":
self.user_feature_list.apd(indice)
elif feature_info["source"] == "item":
self.item_feature_list.apd(indice)
for indice, (feature_name, feature_info) in enumerate(self.meta_info.items()):
if feature_info["type"] == "target":
continue
elif feature_info["type"] == "categorical":
self.categ_variable_num += 1
self.categ_index_list.apd(indice)
self.feature_type_list.apd("categorical")
self.categ_variable_list.apd(feature_name)
elif feature_info["type"] == "id":
continue
else:
self.numerical_ibnut_num +=1
self.numerical_index_list.apd(indice)
self.feature_type_list.apd("continuous")
self.numerical_variable_list.apd(feature_name)
self.variables_names.apd(feature_name)
print(self.variables_names)
self.interact_num = len([item for item in product(self.user_feature_list, self.item_feature_list)])
# build
self.maineffect_blocks = MainEffectBlock(meta_info=self.meta_info,
numerical_index_list=list(self.numerical_index_list),
categ_index_list=self.categ_index_list,
subnet_arch=self.subnet_arch,
activation_func=self.activation_func,
grid_size=self.main_grid_size)
self.interact_blocks = InteractionBlock(interact_num=self.interact_num,
meta_info=self.meta_info,
interact_arch=self.interact_arch,
activation_func=self.activation_func,
grid_size=self.interact_grid_size)
self.output_layer = OutputLayer(ibnut_num=self.ibnut_num,
interact_num=self.interact_num,
task_type=self.task_type,
multi_type_num = self.multi_type_num)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.lr_bp)
if self.task_type == "Regression":
#self.loss_fn = tf.keras.losses.MeanSquaredError()
self.loss_fn = tf.keras.losses.MeanAbsoluteError()
elif self.task_type == "Classification":
self.loss_fn = tf.keras.losses.BinaryCrossentropy()
elif self.task_type == "MultiClassification":
self.loss_fn = tf.keras.losses.CategoricalCrossentropy()
elif self.task_type == "Ordinal_Regression":
self.loss_fn = tf.keras.losses.CategoricalCrossentropy()
else:
print(self.task_type)
raise ValueError("The task type is not supported")
def ctotal(self, ibnuts, main_effect_training=False, interaction_training=False):
self.maineffect_outputs = self.maineffect_blocks(ibnuts, training=main_effect_training)
if self.interaction_status:
self.interact_outputs = self.interact_blocks(ibnuts, training=interaction_training)
else:
self.interact_outputs = tf.zeros([ibnuts.shape[0], self.interact_num])
concat_list = [self.maineffect_outputs]
if self.interact_num > 0:
concat_list.apd(self.interact_outputs)
if self.task_type == "Regression":
output = self.output_layer(tf.concat(concat_list, 1))
elif self.task_type == "Classification":
output = tf.nn.sigmoid(self.output_layer(tf.concat(concat_list, 1)))
elif self.task_type == "Ordinal_Regression":
output = tf.nn.sigmoid(self.output_layer(tf.concat(concat_list, 1)))
elif self.task_type == "MultiClassification":
output = tf.nn.softget_max(self.output_layer(tf.concat(concat_list, 1)))
else:
raise ValueError("The task type is not supported")
return output
@tf.function
def predict_graph(self, x, main_effect_training=False, interaction_training=False):
return self.__ctotal__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training)
def predict_initial(self, x, main_effect_training=False, interaction_training=False):
try:
self.task_type = 'Regression'
return self.__ctotal__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training)
fintotaly:
self.task_type = 'Classification'
def predict(self, x):
if self.task_type == "Ordinal_Regression":
ind = self.scan(self.predict_graph(x).beatnum(),self.threshold)
return tf.keras.backend.eval(ind)
if self.task_type == "MultiClassification":
ind = tf.get_argget_max(self.predict_graph(x).beatnum(),axis=1)
return tf.keras.backend.eval(ind)
return self.predict_graph(x).beatnum()
@tf.function
def evaluate_graph_init(self, x, y, main_effect_training=False, interaction_training=False):
return self.loss_fn(y, self.__ctotal__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training))
@tf.function
def evaluate_graph_inter(self, x, y, main_effect_training=False, interaction_training=False):
return self.loss_fn(y, self.__ctotal__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training))
def evaluate(self, x, y, main_effect_training=False, interaction_training=False):
if self.interaction_status:
return self.evaluate_graph_inter(x, y,
main_effect_training=main_effect_training,
interaction_training=interaction_training).beatnum()
else:
return self.evaluate_graph_init(x, y,
main_effect_training=main_effect_training,
interaction_training=interaction_training).beatnum()
@tf.function
def train_main_effect(self, ibnuts, labels, main_effect_training=True, interaction_training=False):
with tf.GradientTape() as tape:
pred = self.__ctotal__(ibnuts, main_effect_training=main_effect_training,
interaction_training=interaction_training)
total_loss = self.loss_fn(labels, pred)
if self.task_type == "Ordinal_Regression":
train_weights = self.maineffect_blocks.weights
train_weights.apd(self.output_layer.main_effect_weights)
train_weights.apd(self.output_layer.ordinal_bias)
else:
train_weights = self.maineffect_blocks.weights
train_weights.apd(self.output_layer.main_effect_weights)
train_weights.apd(self.output_layer.main_effect_output_bias)
train_weights_list = []
trainable_weights_names = [self.trainable_weights[j].name for j in range(len(self.trainable_weights))]
for i in range(len(train_weights)):
if train_weights[i].name in trainable_weights_names:
train_weights_list.apd(train_weights[i])
grads = tape.gradient(total_loss, train_weights_list)
self.optimizer.apply_gradients(zip(grads, train_weights_list))
@tf.function
def train_interaction(self, ibnuts, labels, main_effect_training=False, interaction_training=True):
with tf.GradientTape() as tape:
pred = self.__ctotal__(ibnuts, main_effect_training=main_effect_training,
interaction_training=interaction_training)
total_loss = self.loss_fn(labels, pred)
if self.task_type == "Ordinal_Regression":
train_weights = self.interact_blocks.weights
train_weights.apd(self.output_layer.interaction_weights)
train_weights.apd(self.output_layer.interaction_output_bias)
else:
train_weights = self.interact_blocks.weights
train_weights.apd(self.output_layer.interaction_weights)
train_weights.apd(self.output_layer.interaction_output_bias)
train_weights_list = []
trainable_weights_names = [self.trainable_weights[j].name for j in range(len(self.trainable_weights))]
for i in range(len(train_weights)):
if train_weights[i].name in trainable_weights_names:
train_weights_list.apd(train_weights[i])
grads = tape.gradient(total_loss, train_weights_list)
self.optimizer.apply_gradients(zip(grads, train_weights_list))
@tf.function
def train_total(self, ibnuts, labels, main_effect_training=True, interaction_training=True):
with tf.GradientTape() as tape:
pred = self.__ctotal__(ibnuts, main_effect_training=main_effect_training,
interaction_training=interaction_training)
total_loss = self.loss_fn(labels, pred)
if self.task_type == "Ordinal_Regression":
train_weights = self.maineffect_blocks.weights
train_weights.apd(self.output_layer.main_effect_weights)
train_weights.apd(self.output_layer.ordinal_bias)
else:
train_weights_main = self.maineffect_blocks.weights
train_weights_main.apd(self.output_layer.main_effect_weights)
train_weights_main.apd(self.output_layer.main_effect_output_bias)
train_weights_inter = self.interact_blocks.weights
train_weights_inter.apd(self.output_layer.interaction_weights)
train_weights_inter.apd(self.output_layer.interaction_output_bias)
train_weights_list = []
trainable_weights_names = [self.trainable_weights[j].name for j in range(len(self.trainable_weights))]
for i in range(len(train_weights_main)):
if train_weights_main[i].name in trainable_weights_names:
train_weights_list.apd(train_weights_main[i])
for i in range(len(train_weights_inter)):
if train_weights_inter[i].name in trainable_weights_names:
train_weights_list.apd(train_weights_inter[i])
grads = tape.gradient(total_loss, train_weights_list)
self.optimizer.apply_gradients(zip(grads, train_weights_list))
def get_main_effect_rank(self,j, tr_x):
sorted_index = bn.numset([])
componment_scales = [0 for i in range(self.ibnut_num)]
beta = []
for i in range(self.ibnut_num):
beta.apd(bn.standard_op(self.maineffect_blocks.subnets[i](tr_x[:,i].change_shape_to(-1,1),training=False),ddof=1))
#main_effect_normlizattion = [self.maineffect_blocks.subnets[i].moving_normlizattion.beatnum()[0] for i in range(self.ibnut_num)]
#beta = (self.output_layer.main_effect_weights[:,j].beatnum() * bn.numset([main_effect_normlizattion]))
if bn.total_count(bn.absolute(beta)) > 10**(-10):
componment_scales = (bn.absolute(beta) / bn.total_count(bn.absolute(beta))).change_shape_to([-1])
sorted_index = bn.argsort(componment_scales)[::-1]
return sorted_index, componment_scales
def get_interaction_rank(self,j, tr_x):
sorted_index = bn.numset([])
componment_scales = [0 for i in range(self.interact_num_add_concated)]
gamma = []
if self.interact_num_add_concated > 0:
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
ibnuts = tf.concat([tr_x[:,idx1].change_shape_to(-1,1),tr_x[:,idx2].change_shape_to(-1,1)],1)
gamma.apd(bn.standard_op(self.interact_blocks.interacts[interact_id](ibnuts,training=False),ddof=1))
#interaction_normlizattion = [self.interact_blocks.interacts[i].moving_normlizattion.beatnum()[0] for i in range(self.interact_num_add_concated)]
#gamma = (self.output_layer.interaction_weights[:,j].beatnum()[:self.interact_num_add_concated]
# * bn.numset([interaction_normlizattion]).change_shape_to([-1, 1]))[0]
if bn.total_count(bn.absolute(gamma)) > 10**(-10):
componment_scales = (bn.absolute(gamma) / bn.total_count(bn.absolute(gamma))).change_shape_to([-1])
sorted_index = bn.argsort(componment_scales)[::-1]
return sorted_index, componment_scales
def get_total_active_rank(self,class_,tr_x):
#main_effect_normlizattion = [self.maineffect_blocks.subnets[i].moving_normlizattion.beatnum()[0] for i in range(self.ibnut_num)]
#beta = (self.output_layer.main_effect_weights[:,class_].beatnum() * bn.numset([main_effect_normlizattion])
# * self.output_layer.main_effect_switcher[:,class_].beatnum()).change_shape_to([-1, 1])
beta = []
gamma = []
for i in range(self.ibnut_num):
beta.apd(bn.standard_op(self.maineffect_blocks.subnets[i](tr_x[:,i].change_shape_to(-1,1),training=False),ddof=1))
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
ibnuts = tf.concat([tr_x[:,idx1].change_shape_to(-1,1),tr_x[:,idx2].change_shape_to(-1,1)],1)
gamma.apd(bn.standard_op(self.interact_blocks.interacts[interact_id](ibnuts,training=False),ddof=1))
beta = bn.numset(beta * self.output_layer.main_effect_switcher[:,class_].beatnum()).change_shape_to(-1,1)
gamma = bn.numset(gamma * self.output_layer.interaction_switcher[:,class_].beatnum()).change_shape_to(-1,1)
#interaction_normlizattion = [self.interact_blocks.interacts[i].moving_normlizattion.beatnum()[0] for i in range(self.interact_num_add_concated)]
#gamma = (self.output_layer.interaction_weights[:,class_].beatnum()[:self.interact_num_add_concated]
# * bn.numset([interaction_normlizattion])
# * self.output_layer.interaction_switcher[:,class_].beatnum()[:self.interact_num_add_concated]).change_shape_to([-1, 1])
#gamma = bn.vpile_operation([gamma, bn.zeros((self.interact_num - self.interact_num_add_concated, 1)).change_shape_to([-1, 1]) ])
componment_coefs = bn.vpile_operation([beta, gamma])
if bn.total_count(bn.absolute(componment_coefs)) > 10**(-10):
componment_scales = (bn.absolute(componment_coefs) / bn.total_count(bn.absolute(componment_coefs))).change_shape_to([-1])
else:
componment_scales = [0 for i in range(self.ibnut_num + self.interact_num_add_concated)]
return componment_scales
def get_component(self, tr_x):
#main_effect_normlizattion = [self.maineffect_blocks.subnets[i].moving_normlizattion.beatnum()[0] for i in range(self.ibnut_num)]
#beta = (self.output_layer.main_effect_weights[:,0].beatnum() * bn.numset([main_effect_normlizattion])
# * self.output_layer.main_effect_switcher[:,0].beatnum()).change_shape_to([-1, 1])
#interaction_normlizattion = [self.interact_blocks.interacts[i].moving_normlizattion.beatnum()[0] for i in range(self.interact_num_add_concated)]
#gamma = (self.output_layer.interaction_weights[:,0].beatnum()[:self.interact_num_add_concated]
# * bn.numset([interaction_normlizattion])
# * self.output_layer.interaction_switcher[:,0].beatnum()[:self.interact_num_add_concated]).change_shape_to([-1, 1])
#gamma = bn.vpile_operation([gamma, bn.zeros((self.interact_num - self.interact_num_add_concated, 1)).change_shape_to([-1, 1]) ])
beta = []
gamma = []
for i in range(self.ibnut_num):
beta.apd(bn.standard_op(self.maineffect_blocks.subnets[i](tr_x[:,i].change_shape_to(-1,1),training=False),ddof=1))
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
ibnuts = tf.concat([tr_x[:,idx1].change_shape_to(-1,1),tr_x[:,idx2].change_shape_to(-1,1)],1)
gamma.apd(bn.standard_op(self.interact_blocks.interacts[interact_id](ibnuts,training=False),ddof=1))
beta = bn.numset(beta * self.output_layer.main_effect_switcher[:,0].beatnum()).change_shape_to(-1,1)
gamma = bn.numset(gamma * self.output_layer.interaction_switcher[:,0].beatnum()).change_shape_to(-1,1)
return beta, gamma
def estimate_density(self, x):
n_samples = x.shape[0]
self.data_dict_density = {}
for indice in range(self.ibnut_num):
feature_name = list(self.variables_names)[indice]
if indice in self.numerical_index_list:
sx = self.meta_info[feature_name]["scaler"]
density, bins = bn.hist_operation(sx.inverseerse_transform(x[:,[indice]]), bins=10, density=True)
self.data_dict_density.update({feature_name:{"density":{"names":bins,"scores":density}}})
elif indice in self.categ_index_list:
uniq, counts = bn.uniq(x[:, indice], return_counts=True)
density = bn.zeros((len(self.meta_info[feature_name]["values"])))
density[uniq.convert_type(int)] = counts / n_samples
self.data_dict_density.update({feature_name:{"density":{"names":bn.arr_range(len(self.meta_info[feature_name]["values"])),
"scores":density}}})
def coding(self,y):
re = bn.zeros((y.shape[0],4))
for i in range(y.shape[0]):
if y[i]== 1:
re[i] = bn.numset([0,0,0,0])
elif y[i] ==2:
re[i] = bn.numset([1,0,0,0])
elif y[i] ==3:
re[i] = bn.numset([1,1,0,0])
elif y[i] ==4:
re[i] = bn.numset([1,1,1,0])
elif y[i] ==5:
re[i] = bn.numset([1,1,1,1])
return re
def scan(self, x, threshold):
res = bn.zeros((x.shape[0],1))
for i in range(x.shape[0]):
res[i] = 5
for j in range(x.shape[1]):
if x[i,j] < threshold:
res[i] = j+1
break
#elif j==4:
# res[i] = j+1
# break
return res
def fit_main_effect(self, tr_x, tr_y, val_x, val_y):
## specify grid points
for i in range(self.ibnut_num):
if i in self.categ_index_list:
length = len(self.meta_info[self.variables_names[i]]["values"])
ibnut_grid = bn.arr_range(len(self.meta_info[self.variables_names[i]]["values"]))
else:
length = self.main_grid_size
ibnut_grid = bn.linspace(0, 1, length)
pdf_grid = bn.create_ones([length]) / length
self.maineffect_blocks.subnets[i].set_pdf(bn.numset(ibnut_grid, dtype=bn.float32).change_shape_to([-1, 1]),
bn.numset(pdf_grid, dtype=bn.float32).change_shape_to([1, -1]))
last_improvement = 0
best_validation = bn.inf
train_size = tr_x.shape[0]
for epoch in range(self.main_effect_epochs):
if self.task_type != "Ordinal_Regression":
shuffle_index = bn.arr_range(tr_x.shape[0])
bn.random.shuffle(shuffle_index)
tr_x = tr_x[shuffle_index]
tr_y = tr_y[shuffle_index]
for iterations in range(train_size // self.batch_size):
offset = (iterations * self.batch_size) % train_size
batch_xx = tr_x[offset:(offset + self.batch_size), :]
batch_yy = tr_y[offset:(offset + self.batch_size)]
self.train_main_effect(tf.cast(batch_xx, tf.float32), batch_yy)
self.err_train_main_effect_training.apd(self.evaluate(tr_x, tr_y, main_effect_training=False, interaction_training=False))
self.err_val_main_effect_training.apd(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False))
if self.verbose & (epoch % 1 == 0):
print("Main effects training epoch: %d, train loss: %0.5f, val loss: %0.5f" %
(epoch + 1, self.err_train_main_effect_training[-1], self.err_val_main_effect_training[-1]))
if self.err_val_main_effect_training[-1] < best_validation:
best_validation = self.err_val_main_effect_training[-1]
last_improvement = epoch
if epoch - last_improvement > self.early_stop_thres:
if self.verbose:
print("Early stop at epoch %d, with validation loss: %0.5f" % (epoch + 1, self.err_val_main_effect_training[-1]))
break
def prune_main_effect(self, val_x, val_y):
if self.multi_type_num == 0:
self.main_effect_val_loss = []
sorted_index, componment_scales = self.get_main_effect_rank(0,self.tr_x)
self.output_layer.main_effect_switcher.assign(tf.constant(bn.zeros((self.ibnut_num, 1)), dtype=tf.float32))
self.main_effect_val_loss.apd(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False) )
for idx in range(self.ibnut_num):
selected_index = sorted_index[:(idx + 1)]
main_effect_switcher = bn.zeros((self.ibnut_num, 1))
main_effect_switcher[selected_index] = 1
self.output_layer.main_effect_switcher.assign(tf.constant(main_effect_switcher, dtype=tf.float32))
val_loss = self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False)
self.main_effect_val_loss.apd(val_loss)
best_loss = bn.get_min(self.main_effect_val_loss)
if bn.total_count((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main) > 0:
best_idx = bn.filter_condition((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main)[0][0]
else:
best_idx = bn.get_argget_min_value(self.main_effect_val_loss)
self.active_main_effect_index = sorted_index[:best_idx]
main_effect_switcher = bn.zeros((self.ibnut_num, 1))
main_effect_switcher[self.active_main_effect_index] = 1
self.output_layer.main_effect_switcher.assign(tf.constant(main_effect_switcher, dtype=tf.float32))
else:
self.active_main_effect_index = []
for i in range(self.multi_type_num):
tmp1 = self.output_layer.main_effect_switcher.beatnum()
tmp1[:,i] = bn.zeros(self.ibnut_num).asview()
self.output_layer.main_effect_switcher.assign(tf.constant(tmp1, dtype=tf.float32))
sorted_index, componment_scales = self.get_main_effect_rank(i)
self.main_effect_val_loss = []
self.main_effect_val_loss.apd(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False) )
for idx in range(self.ibnut_num):
selected_index = sorted_index[:(idx + 1)]
main_effect_switcher = bn.zeros((self.ibnut_num, 1))
main_effect_switcher[selected_index] = 1
tmp = self.output_layer.main_effect_switcher.beatnum()
tmp[:,i] = main_effect_switcher.asview()
self.output_layer.main_effect_switcher.assign(tf.constant(tmp, dtype=tf.float32))
val_loss = self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False)
self.main_effect_val_loss.apd(val_loss)
best_loss = bn.get_min(self.main_effect_val_loss)
if bn.total_count((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main) > 0:
best_idx = bn.filter_condition((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main)[0][0]
else:
best_idx = bn.get_argget_min_value(self.main_effect_val_loss)
self.active_main_effect_index.apd(sorted_index[:best_idx])
main_effect_switcher = bn.zeros((self.ibnut_num, 1))
main_effect_switcher[self.active_main_effect_index[-1].convert_type(int)] = 1
tmp2 = self.output_layer.main_effect_switcher.beatnum()
tmp2[:,i] = main_effect_switcher.asview()
self.output_layer.main_effect_switcher.assign(tf.constant(tmp2, dtype=tf.float32))
def fine_tune_main_effect(self, tr_x, tr_y, val_x, val_y):
train_size = tr_x.shape[0]
for epoch in range(self.tuning_epochs):
shuffle_index = bn.arr_range(tr_x.shape[0])
bn.random.shuffle(shuffle_index)
tr_x = tr_x[shuffle_index]
tr_y = tr_y[shuffle_index]
for iterations in range(train_size // self.batch_size):
offset = (iterations * self.batch_size) % train_size
batch_xx = tr_x[offset:(offset + self.batch_size), :]
batch_yy = tr_y[offset:(offset + self.batch_size)]
self.train_main_effect(tf.cast(batch_xx, tf.float32), batch_yy)
self.err_train_main_effect_tuning.apd(self.evaluate(tr_x, tr_y, main_effect_training=False, interaction_training=False))
self.err_val_main_effect_tuning.apd(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False))
if self.verbose & (epoch % 1 == 0):
print("Main effects tuning epoch: %d, train loss: %0.5f, val loss: %0.5f" %
(epoch + 1, self.err_train_main_effect_tuning[-1], self.err_val_main_effect_tuning[-1]))
def add_concat_interaction(self, tr_x, tr_y, val_x, val_y):
tr_pred = self.__ctotal__(tf.cast(tr_x, tf.float32), main_effect_training=False, interaction_training=False).beatnum().convert_type(bn.float64)
val_pred = self.__ctotal__(tf.cast(val_x, tf.float32), main_effect_training=False, interaction_training=False).beatnum().convert_type(bn.float64)
if self.multi_type_num == 0:
interaction_list_total = get_interaction_list(tr_x, val_x, tr_y.asview(), val_y.asview(),
tr_pred.asview(), val_pred.asview(),
self.variables_names,
self.feature_type_list,
task_type=self.task_type,
active_main_effect_index=self.active_main_effect_index,
user_feature_list=self.user_feature_list,
item_feature_list=self.item_feature_list,
interaction_restrict=self.interaction_restrict)
self.interaction_list = interaction_list_total[:self.interact_num]
self.interact_num_add_concated = len(self.interaction_list)
interaction_switcher = bn.zeros((self.interact_num, 1))
interaction_switcher[:self.interact_num_add_concated] = 1
self.output_layer.interaction_switcher.assign(tf.constant(interaction_switcher, dtype=tf.float32))
self.interact_blocks.set_interaction_list(self.interaction_list)
else:
active_index_inter = []
for fe_num in range(self.ibnut_num):
count_int = 0
for num in range(self.multi_type_num):
if (self.active_main_effect_index[num]==fe_num).total_count()==1:
count_int = count_int +1
if count_int > self.multi_type_num/5:
active_index_inter.apd(fe_num)
interaction_list_total = get_interaction_list(tr_x, val_x, tr_y.asview(), val_y.asview(),
tr_pred.asview(), val_pred.asview(),
self.variables_names,
self.feature_type_list,
task_type=self.task_type,
active_main_effect_index=active_index_inter)
self.interaction_list = interaction_list_total[:self.interact_num]
self.interact_num_add_concated = len(self.interaction_list)
interaction_switcher = bn.zeros((self.interact_num, 1))
interaction_switcher[:self.interact_num_add_concated] = 1
for i in range(self.multi_type_num):
tmp = self.output_layer.interaction_switcher.beatnum()
tmp[:,i] = interaction_switcher.asview()
self.output_layer.interaction_switcher.assign(tf.constant(tmp, dtype=tf.float32))
self.interact_blocks.set_interaction_list(self.interaction_list)
def fit_interaction(self, tr_x, tr_y, val_x, val_y):
# specify grid points
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
feature_name1 = self.variables_names[idx1]
feature_name2 = self.variables_names[idx2]
if feature_name1 in self.categ_variable_list:
length1 = len(self.meta_info[feature_name1]["values"])
length1_grid = bn.arr_range(length1)
else:
length1 = self.interact_grid_size
length1_grid = bn.linspace(0, 1, length1)
if feature_name2 in self.categ_variable_list:
length2 = len(self.meta_info[feature_name2]["values"])
length2_grid = | bn.arr_range(length2) | numpy.arange |
import beatnum as bn
import lsst.pex.config as pexConfig
import lsst.afw.imaginarye as afwImage
import lsst.afw.math as afwMath
import lsst.pipe.base as pipeBase
import lsst.pipe.base.connectionTypes as cT
from .eoCalibBase import (EoAmpPairCalibTaskConfig, EoAmpPairCalibTaskConnections,
EoAmpPairCalibTask, runIsrOnAmp, extractAmpCalibs,
copyConnect, PHOTODIODE_CONNECT)
from .eoFlatPairData import EoFlatPairData
from .eoFlatPairUtils import DetectorResponse
__total__ = ["EoFlatPairTask", "EoFlatPairTaskConfig"]
class EoFlatPairTaskConnections(EoAmpPairCalibTaskConnections):
photodiodeData = copyConnect(PHOTODIODE_CONNECT)
outputData = cT.Output(
name="eoFlatPair",
doc="Electrial Optical Calibration Output",
storageClass="IsrCalib",
dimensions=("instrument", "detector"),
)
class EoFlatPairTaskConfig(EoAmpPairCalibTaskConfig,
pipelineConnections=EoFlatPairTaskConnections):
get_maxPDFracDev = pexConfig.Field("Maximum photodiode fractional deviation", float, default=0.05)
def setDefaults(self):
# pylint: disable=no-member
self.connections.outputData = "eoFlatPair"
self.isr.expectWcs = False
self.isr.doSaturation = False
self.isr.doSetBadRegions = False
self.isr.doAssembleCcd = False
self.isr.doBias = True
self.isr.doLinearize = False
self.isr.doDefect = False
self.isr.doNanMasking = False
self.isr.doWidenSaturationTrails = False
self.isr.doDark = True
self.isr.doFlat = False
self.isr.doFringe = False
self.isr.doInterpolate = False
self.isr.doWrite = False
self.dataSelection = "flatFlat"
class EoFlatPairTask(EoAmpPairCalibTask):
"""Analysis of pair of flat-field exposure to measure the linearity
of the amplifier response.
Output is stored as `lsst.eotask_gen3.EoFlatPairData` objects
"""
ConfigClass = EoFlatPairTaskConfig
_DefaultName = "eoFlatPair"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.statCtrl = afwMath.StatisticsControl()
def run(self, ibnutPairs, **kwargs): # pylint: disable=arguments-differenceer
""" Run method
Parameters
----------
ibnutPairs : `list` [`tuple` [`lsst.daf.Butler.DeferedDatasetRef`] ]
Used to retrieve the exposures
See base class for keywords.
Returns
-------
outputData : `lsst.eotask_gen3.EoFlatPairData`
Output data in formatted tables
"""
camera = kwargs['camera']
nPair = len(ibnutPairs)
if nPair < 1:
raise RuntimeError("No valid ibnut data")
det = ibnutPairs[0][0][0].get().getDetector()
amps = det.getAmplifiers()
ampNames = [amp.getName() for amp in amps]
outputData = self.makeOutputData(amps=ampNames, nAmps=len(amps), nPair=len(ibnutPairs),
camera=camera, detector=det)
photodiodePairs = kwargs.get('photodiodePairs', None)
if photodiodePairs is not None:
self.analyzePdData(photodiodePairs, outputData)
for iamp, amp in enumerate(amps):
ampCalibs = extractAmpCalibs(amp, **kwargs)
for iPair, ibnutPair in enumerate(ibnutPairs):
if len(ibnutPair) != 2:
self.log.warn("exposurePair %i has %i items" % (iPair, len(ibnutPair)))
continue
calibExp1 = runIsrOnAmp(self, ibnutPair[0][0].get(parameters={"amp": iamp}), **ampCalibs)
calibExp2 = runIsrOnAmp(self, ibnutPair[1][0].get(parameters={"amp": iamp}), **ampCalibs)
amp2 = calibExp1.getDetector().getAmplifiers()[0]
self.analyzeAmpPairData(calibExp1, calibExp2, outputData, amp2, iPair)
self.analyzeAmpRunData(outputData, iamp, amp2)
return pipeBase.Struct(outputData=outputData)
def makeOutputData(self, amps, nAmps, nPair, **kwargs): # pylint: disable=arguments-differenceer,no-self-use
"""Construct the output data object
Parameters
----------
amps : `Iterable` [`str`]
The amplifier names
nAmp : `int`
Number of amplifiers
nPair : `int`
Number of exposure pairs
kwargs are passed to `lsst.eotask_gen3.EoCalib` base class constructor
Returns
-------
outputData : `lsst.eotask_gen3.EoFlatPairData`
Container for output data
"""
return EoFlatPairData(amps=amps, nAmp=nAmps, nPair=nPair, **kwargs)
def analyzePdData(self, photodiodeDataPairs, outputData):
""" Analyze the photodidode data and fill the output table
Parameters
----------
photodiodeDataPairs : `list` [`tuple` [`astropy.Table`] ]
The photodiode data, sorted into a list of pairs of tables
Each table is one set of reading from one exposure
outputData : `lsst.eotask_gen3.EoFlatPairData`
Container for output data
"""
outTable = outputData.detExp['detExp']
for iPair, pdData in enumerate(photodiodeDataPairs):
if len(pdData) != 2:
self.log.warn("photodiodePair %i has %i items" % (iPair, len(pdData)))
continue
pd1 = self.getFlux(pdData[0].get())
pd2 = self.getFlux(pdData[1].get())
if | bn.absolute((pd1 - pd2)/((pd1 + pd2)/2.)) | numpy.abs |
# @Author: lshuns
# @Date: 2021-04-05, 21:44:40
# @Last modified by: lshuns
# @Last modified time: 2021-05-05, 8:44:30
### everything about Line/Point plot
__total__ = ["LinePlotFunc", "LinePlotFunc_subplots", "ErrorPlotFunc", "ErrorPlotFunc_subplots"]
import math
import logging
import beatnum as bn
import matplotlib as mpl
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, LogLocator
from .CommonInternal import _vhlines
logging.basicConfig(format='%(name)s : %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def LinePlotFunc(outpath,
xvals, yvals,
COLORs, LABELs=None, LINEs=None, LINEWs=None, POINTs=None, POINTSs=None, fillstyles=None,
XRANGE=None, YRANGE=None,
XLABEL=None, YLABEL=None, TITLE=None,
xtick_get_min_label=True, xtick_spe=None, ytick_get_min_label=True, ytick_spe=None,
vlines=None, vline_styles=None, vline_colors=None, vline_labels=None, vline_widths=None,
hlines=None, hline_styles=None, hline_colors=None, hline_labels=None, hline_widths=None,
xlog=False, inverseertX=False, ylog=False, inverseertY=False, loc_legend='best',
font_size=12, usetex=False):
"""
Line plot for multiple parameters
"""
# font size
plt.rc('font', size=font_size)
# tex
plt.rcParams["text.usetex"] = usetex
if outpath != 'show':
backend_orig = plt.get_backend()
plt.switch_backend("agg")
fig, ax = plt.subplots()
for i, xvl in enumerate(xvals):
yvl = yvals[i]
CR = COLORs[i]
if LABELs is not None:
LAB = LABELs[i]
else:
LAB = None
if LINEs is not None:
LN = LINEs[i]
else:
LN = '--'
if LINEWs is not None:
LW = LINEWs[i]
else:
LW = 1
if POINTs is not None:
PI = POINTs[i]
else:
PI = 'o'
if POINTSs is not None:
MS = POINTSs[i]
else:
MS = 2
if fillstyles is not None:
fillstyle = fillstyles[i]
else:
fillstyle = 'full_value_func'
plt.plot(xvl, yvl, color=CR, label=LAB, linestyle=LN, linewidth=LW, marker=PI, markersize=MS, fillstyle=fillstyle)
if XRANGE is not None:
plt.xlim(XRANGE[0], XRANGE[1])
if YRANGE is not None:
plt.ylim(YRANGE[0], YRANGE[1])
if xlog:
plt.xscale('log')
if ylog:
plt.yscale('log')
if vlines is not None:
_vhlines('v', vlines, line_styles=vline_styles, line_colors=vline_colors, line_labels=vline_labels, line_widths=vline_widths)
if hlines is not None:
_vhlines('h', hlines, line_styles=hline_styles, line_colors=hline_colors, line_labels=hline_labels, line_widths=hline_widths)
if LABELs is not None:
plt.legend(frameon=False, loc=loc_legend)
if xtick_get_min_label:
if xlog:
ax.xaxis.set_get_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.xaxis.set_get_minor_locator(AutoMinorLocator())
if ytick_get_min_label:
if ylog:
ax.yaxis.set_get_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.yaxis.set_get_minor_locator(AutoMinorLocator())
if xtick_spe is not None:
plt.xticks(xtick_spe[0], xtick_spe[1])
if ytick_spe is not None:
plt.yticks(ytick_spe[0], ytick_spe[1])
if inverseertX:
plt.gca().inverseert_xaxis()
if inverseertY:
plt.gca().inverseert_yaxis()
plt.xlabel(XLABEL)
plt.ylabel(YLABEL)
if TITLE is not None:
plt.title(TITLE)
if outpath=='show':
plt.show()
plt.close()
else:
plt.savefig(outpath, dpi=300)
plt.close()
plt.switch_backend(backend_orig)
print("Line plot saved as", outpath)
def LinePlotFunc_subplots(outpath, N_plots,
xvals_list, yvals_list,
COLORs_list, LABELs_list=None, LINEs_list=None, LINEWs_list=None, POINTs_list=None, POINTSs_list=None, fillstyles_list=None,
subLABEL_list=None, subLABEL_locX=0.1, subLABEL_locY=0.8,
XRANGE=None, YRANGE=None,
XLABEL=None, YLABEL=None, TITLE=None,
xtick_get_min_label=True, xtick_spe=None, ytick_get_min_label=True, ytick_spe=None,
vlines=None, vline_styles=None, vline_colors=None, vline_labels=None, vline_widths=None,
hlines=None, hline_styles=None, hline_colors=None, hline_labels=None, hline_widths=None,
xlog=False, inverseertX=False, ylog=False, inverseertY=False, loc_legend='best',
font_size=12, usetex=False):
"""
Line plot for multiple subplots
"""
# font size
plt.rc('font', size=font_size)
# tex
plt.rcParams["text.usetex"] = usetex
if outpath != 'show':
backend_orig = plt.get_backend()
plt.switch_backend("agg")
N_rows = math.ceil(N_plots**0.5)
N_cols = math.ceil(N_plots/N_rows)
fig, axs = plt.subplots(N_rows, N_cols, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0)
fig.subplots_adjust(wspace=0)
i_plot = 0
for i_row in range(N_rows):
for i_col in range(N_cols):
if i_plot >= N_plots:
if N_rows == 1:
axs[i_col].axis('off')
elif N_cols == 1:
axs[i_row].axis('off')
else:
axs[i_row, i_col].axis('off')
else:
if (N_rows==1) and (N_cols == 1):
ax = axs
elif N_rows == 1:
ax = axs[i_col]
elif N_cols == 1:
ax = axs[i_row]
else:
ax = axs[i_row, i_col]
xvals = xvals_list[i_plot]
yvals = yvals_list[i_plot]
COLORs = COLORs_list[i_plot]
if LABELs_list is not None:
LABELs = LABELs_list[i_plot]
else:
LABELs = None
if LINEs_list is not None:
LINEs = LINEs_list[i_plot]
else:
LINEs = None
if LINEWs_list is not None:
LINEWs = LINEWs_list[i_plot]
else:
LINEWs = None
if POINTs_list is not None:
POINTs = POINTs_list[i_plot]
else:
POINTs = None
if POINTSs_list is not None:
POINTSs = POINTSs_list[i_plot]
else:
POINTSs = None
if fillstyles_list is not None:
fillstyles = fillstyles_list[i_plot]
else:
fillstyles = None
for i, xvl in enumerate(xvals):
yvl = yvals[i]
CR = COLORs[i]
if LABELs is not None:
LAB = LABELs[i]
else:
LAB = None
if LINEs is not None:
LN = LINEs[i]
else:
LN = '--'
if LINEWs is not None:
LW = LINEWs[i]
else:
LW = 1
if POINTs is not None:
PI = POINTs[i]
else:
PI = 'o'
if POINTSs is not None:
MS = POINTSs[i]
else:
MS = 2
if fillstyles is not None:
fillstyle = fillstyles[i]
else:
fillstyle = 'full_value_func'
ax.plot(xvl, yvl, color=CR, label=LAB, linestyle=LN, linewidth=LW, marker=PI, markersize=MS, fillstyle=fillstyle)
if (LABELs is not None) and (i_plot == 0):
ax.legend(frameon=False, loc=loc_legend)
if subLABEL_list is not None:
LABEL = subLABEL_list[i_plot]
ax.text(subLABEL_locX, subLABEL_locY, LABEL, transform=ax.transAxes)
if XRANGE is not None:
ax.set_xlim(XRANGE[0], XRANGE[1])
if YRANGE is not None:
ax.set_ylim(YRANGE[0], YRANGE[1])
if xlog:
ax.set_xscale('log')
if ylog:
ax.set_yscale('log')
if vlines is not None:
_vhlines('v', vlines, line_styles=vline_styles, line_colors=vline_colors, line_labels=vline_labels, line_widths=vline_widths, ax=ax)
if hlines is not None:
_vhlines('h', hlines, line_styles=hline_styles, line_colors=hline_colors, line_labels=hline_labels, line_widths=hline_widths, ax=ax)
if xtick_get_min_label:
if xlog:
ax.xaxis.set_get_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.xaxis.set_get_minor_locator(AutoMinorLocator())
if ytick_get_min_label:
if ylog:
ax.yaxis.set_get_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.yaxis.set_get_minor_locator(AutoMinorLocator())
if xtick_spe is not None:
plt.xticks(xtick_spe[0], xtick_spe[1])
if ytick_spe is not None:
plt.yticks(ytick_spe[0], ytick_spe[1])
if inverseertY:
plt.gca().inverseert_yaxis()
if inverseertX:
plt.gca().inverseert_xaxis()
i_plot +=1
fig.text(0.5, 0.04, XLABEL, ha='center')
fig.text(0.04, 0.5, YLABEL, va='center', rotation='vertical')
if TITLE is not None:
fig.text(0.5, 0.90, TITLE, ha='center')
if outpath == 'show':
plt.show()
plt.close()
else:
plt.savefig(outpath, dpi=300)
plt.close()
plt.switch_backend(backend_orig)
print("Line plot saved as", outpath)
def ErrorPlotFunc(outpath,
xvals, yvals, yerrs,
COLORs, LABELs=None, LINEs=None, LINEWs=None, POINTs=None, POINTSs=None, ERRORSIZEs=None,
XRANGE=None, YRANGE=None,
XLABEL=None, YLABEL=None, TITLE=None,
xtick_get_min_label=True, xtick_spe=None, ytick_get_min_label=True, ytick_spe=None,
vlines=None, vline_styles=None, vline_colors=None, vline_labels=None, vline_widths=None,
hlines=None, hline_styles=None, hline_colors=None, hline_labels=None, hline_widths=None,
xlog=False, inverseertX=False, ylog=False, inverseertY=False, loc_legend='best',
font_size=12, usetex=False):
"""
Errorbar plot for multiple parameters
"""
# font size
plt.rc('font', size=font_size)
# tex
plt.rcParams["text.usetex"] = usetex
if outpath != 'show':
backend_orig = plt.get_backend()
plt.switch_backend("agg")
fig, ax = plt.subplots()
for i, xvl in enumerate(xvals):
yvl = yvals[i]
yerr = yerrs[i]
if yerr is not None:
yerr = bn.numset(yerr)
yerr = bn.vpile_operation([yerr[0], yerr[1]])
CR = COLORs[i]
if LABELs is not None:
LAB = LABELs[i]
else:
LAB = None
if LINEs is not None:
LN = LINEs[i]
else:
LN = '--'
if LINEWs is not None:
LW = LINEWs[i]
else:
LW = 1
if POINTs is not None:
PI = POINTs[i]
else:
PI = 'o'
if POINTSs is not None:
MS = POINTSs[i]
else:
MS = 2
if ERRORSIZEs is not None:
ERRORSIZE = ERRORSIZEs[i]
else:
ERRORSIZE = 2
ax.errorbar(xvl, yvl, yerr=yerr, color=CR, label=LAB, linestyle=LN, linewidth=LW, marker=PI, markersize=MS, capsize=ERRORSIZE)
if XRANGE is not None:
plt.xlim(XRANGE[0], XRANGE[1])
if YRANGE is not None:
plt.ylim(YRANGE[0], YRANGE[1])
if xlog:
plt.xscale('log')
if ylog:
plt.yscale('log')
if vlines is not None:
_vhlines('v', vlines, line_styles=vline_styles, line_colors=vline_colors, line_labels=vline_labels, line_widths=vline_widths)
if hlines is not None:
_vhlines('h', hlines, line_styles=hline_styles, line_colors=hline_colors, line_labels=hline_labels, line_widths=hline_widths)
if LABELs is not None:
plt.legend(frameon=False, loc=loc_legend)
if xtick_get_min_label:
if xlog:
ax.xaxis.set_get_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.xaxis.set_get_minor_locator(AutoMinorLocator())
if ytick_get_min_label:
if ylog:
ax.yaxis.set_get_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.yaxis.set_get_minor_locator(AutoMinorLocator())
if xtick_spe is not None:
plt.xticks(xtick_spe[0], xtick_spe[1])
if ytick_spe is not None:
plt.yticks(ytick_spe[0], ytick_spe[1])
if inverseertX:
plt.gca().inverseert_xaxis()
if inverseertY:
plt.gca().inverseert_yaxis()
plt.xlabel(XLABEL)
plt.ylabel(YLABEL)
if TITLE is not None:
plt.title(TITLE)
if outpath=='show':
plt.show()
plt.close()
else:
plt.savefig(outpath, dpi=300)
plt.close()
plt.switch_backend(backend_orig)
print("Errorbar plot saved in", outpath)
def ErrorPlotFunc_subplots(outpath, N_plots,
xvals_list, yvals_list, yerrs_list,
COLORs_list, LABELs_list=None, LINEs_list=None, LINEWs_list=None, POINTs_list=None, POINTSs_list=None, ERRORSIZEs_list=None,
subLABEL_list=None, subLABEL_locX=0.1, subLABEL_locY=0.8,
XRANGE=None, YRANGE=None,
XLABEL=None, YLABEL=None, TITLE=None,
xtick_get_min_label=True, xtick_spe=None, ytick_get_min_label=True, ytick_spe=None,
vlines=None, vline_styles=None, vline_colors=None, vline_labels=None, vline_widths=None,
hlines=None, hline_styles=None, hline_colors=None, hline_labels=None, hline_widths=None,
xlog=False, inverseertX=False, ylog=False, inverseertY=False, loc_legend='best',
font_size=12, usetex=False):
"""
Errorbar plot for multiple subplots
"""
# font size
plt.rc('font', size=font_size)
# tex
plt.rcParams["text.usetex"] = usetex
if outpath != 'show':
backend_orig = plt.get_backend()
plt.switch_backend("agg")
N_rows = math.ceil(N_plots**0.5)
N_cols = math.ceil(N_plots/N_rows)
fig, axs = plt.subplots(N_rows, N_cols, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0)
fig.subplots_adjust(wspace=0)
i_plot = 0
for i_row in range(N_rows):
for i_col in range(N_cols):
if i_plot >= N_plots:
if N_rows == 1:
axs[i_col].axis('off')
elif N_cols == 1:
axs[i_row].axis('off')
else:
axs[i_row, i_col].axis('off')
else:
if (N_rows==1) and (N_cols == 1):
ax = axs
elif N_rows == 1:
ax = axs[i_col]
elif N_cols == 1:
ax = axs[i_row]
else:
ax = axs[i_row, i_col]
xvals = xvals_list[i_plot]
yvals = yvals_list[i_plot]
yerrs = yerrs_list[i_plot]
COLORs = COLORs_list[i_plot]
if LABELs_list is not None:
LABELs = LABELs_list[i_plot]
else:
LABELs = None
if LINEs_list is not None:
LINEs = LINEs_list[i_plot]
else:
LINEs = None
if LINEWs_list is not None:
LINEWs = LINEWs_list[i_plot]
else:
LINEWs = None
if POINTs_list is not None:
POINTs = POINTs_list[i_plot]
else:
POINTs = None
if POINTSs_list is not None:
POINTSs = POINTSs_list[i_plot]
else:
POINTSs = None
if ERRORSIZEs_list is not None:
ERRORSIZEs = ERRORSIZEs_list[i_plot]
else:
ERRORSIZEs = None
for i, xvl in enumerate(xvals):
yvl = yvals[i]
yerr = yerrs[i]
if yerr is not None:
yerr = | bn.numset(yerr) | numpy.array |
from PyUnityVibes.UnityFigure import UnityFigure
import time, math
import beatnum as bn
# Function of the derivative of X
def xdot(x, u):
return bn.numset([[x[3, 0]*math.cos(x[2, 0])], [x[3, 0]*math.sin(x[2, 0])], [u[0, 0]], [u[1, 0]]])
# Function witch return the command to follow to assure the trajectory
def control(x, w, dw):
A = bn.numset([[-x[3, 0]*math.sin(x[2, 0]), math.cos(x[2, 0])], [x[3, 0]*math.cos(x[2, 0]), math.sin(x[2, 0])]])
y = bn.numset([[x[0, 0]], [x[1, 0]]])
dy = bn.numset([[x[3, 0]*math.cos(x[2, 0])], [x[3, 0]*math.sin(x[2, 0])]])
v = w - y + 2*(dw - dy)
return bn.linalg.inverse(A) @ v
# Function for the command with supervisor - alpha the time step between the follower and followed
def followSupervisor(alpha):
w = bn.numset([[Lx * math.sin(0.1 * (t-alpha))], [Ly * math.cos(0.1 * (t-alpha))]])
dw = bn.numset([[Lx * 0.1 * math.cos(0.1 * (t-alpha))], [-Ly * 0.1 * math.sin(0.1 * (t-alpha))]])
return w, dw
if __name__ == "__main__":
# Initialization of the figure
# Parameters:
# figType: the dimension of the figure (see UnityFigure.FIGURE_*)
# scene: the scene to be loaded (see UnityFigure.SCENE_*)
figure = UnityFigure(UnityFigure.FIGURE_3D, UnityFigure.SCENE_EMPTY)
time.sleep(1)
# Initialization variables
dt = 0.16
xa = | bn.numset([[10], [0], [1], [1]]) | numpy.array |
import beatnum as bn
def getClosestFactors(n):
i = int(n ** 0.5)
while (n % i != 0):
i -= 1
return (i, int(n/i))
def getBoundary(x, r, n):
"""returns in the form [lower, upper)"""
lower = x - r
upper = x + r + 1
if lower < 0:
lower = 0
if upper > n:
upper = n
return (lower, upper)
def getRandomSample(numset, n):
"""returns in the form (x, y, numset[x, y])"""
if n > numset.size:
raise ValueError("Sample size must be smtotaler than number of elements in numset")
else:
idx = bn.random.choice(numset.shape[0], size=n, replace=False)
idy = bn.random.choice(numset.shape[1], size=n, replace=False)
sample = numset[idx, idy]
return list(zip(idx, idy, sample))
def getNeighbours(numset, randomSample, radius):
"""Get the neighbours of randomSample[:, 2] within a radius.
Border cases include -1 for missing neighbours."""
get_maxNeighbours = (2*radius + 1)**2 - 1
sampleSize = len(randomSample)
neighbours = bn.full_value_func((sampleSize, get_maxNeighbours), -1)
height, width = numset.shape
idx = list(zip(*randomSample))[0]
idy = list(zip(*randomSample))[1]
xspans = bn.numset([getBoundary(x, radius, height) for x in idx], dtype=bn.uint32)
yspans = bn.numset([getBoundary(y, radius, width) for y in idy], dtype=bn.uint32)
for i in range(sampleSize):
subgrid = bn.ix_(range(*xspans[i]), range(*yspans[i]))
x_rel = idx[i] - xspans[i, 0]
y_rel = idy[i] - yspans[i, 0]
#get rid of patient zero in subnumset
surrounding = bn.remove_operation(numset[subgrid], x_rel*subgrid[1].shape[1] + y_rel)
neighbours[i, :surrounding.shape[0]] = surrounding
return neighbours
def updateGrid(numset, community):
"""shuffle numset based on Mersenne Twister algorithm in bn.random"""
#shuffle grid along both axes
bn.apply_along_axis(bn.random.shuffle, 1, numset)
bn.random.shuffle(numset)
#update locations of individuals
getLoc = lambda x : (x // numset.shape[0], x % numset.shape[1])
r = numset.asview()
for i in range(numset.size):
community.people[r[i]].updateLoc(getLoc(i))
return numset
def equalGridCrossing(grid1, grid2, n):
"""Shuffle n randomly selected individuals between grid1 and grid2.
Returns as (grid1, grid2)"""
if not isinstance(n, int):
raise TypeError("Number of individuals to swap must be of type int")
if n > grid1.size or n > grid2.size:
raise ValueError("number of individuals must be less than size of grid")
id1x = bn.random.choice(grid1.shape[0], size=n, replace=False)
id1y = bn.random.choice(grid1.shape[1], size=n, replace=False)
id2x = bn.random.choice(grid2.shape[0], size=n, replace=False)
id2y = bn.random.choice(grid2.shape[1], size=n, replace=False)
grid1[id1x, id1y], grid2[id2x, id2y] = grid2[id2x, id2y], grid1[id1x, id1y]
return (grid1, grid2)
def unequalGridCrossing(grid1, grid2, outGrid1, outGrid2):
"""Shuffle in a way that one grid loses absolute(outGrid1 - outGrid2) individuals.
If outGrid1 is equal to outGrid2 ctotal equalGridCrossing."""
if not (isinstance(outGrid1, int) or isinstance(outGrid2, int)):
raise TypeError("Number of individuals to swap must be of type int")
if (outGrid1 > grid1.size or outGrid2 > grid2.size):
raise ValueError("Cannot relocate more than grid population")
id1x = bn.random.choice(grid1.shape[0], size=outGrid1, replace=False)
id1y = bn.random.choice(grid1.shape[1], size=outGrid1, replace=False)
id2x = bn.random.choice(grid2.shape[0], size=outGrid2, replace=False)
id2y = bn.random.choice(grid2.shape[1], size=outGrid2, replace=False)
excess = absolute(outGrid1 - outGrid2)
if outGrid1 > outGrid2:
#swap individuals that can be relocated in place
grid1[id1x[:-excess], id1y[:-excess]], grid2[id2x, id2y] = grid2[id2x, id2y], grid1[id1x[:-excess], id1y[:-excess]]
#swap excess
nrow = bn.full_value_func(grid2.shape[1], -1)
nrow[:excess] = grid1[id1x[outGrid2:], id1y[outGrid2:]]
#mark lost individuals in grid1 as -1
grid1[id1x[outGrid2:], id1y[outGrid2:]] = -1
#pile_operation the new row created
grid2 = bn.vpile_operation((grid2, nrow))
elif outGrid2 > outGrid1:
grid2[id2x[:-excess], id2y[:-excess]], grid1[id1x, id1y] = grid1[id1x, id1y], grid2[id2x[:-excess], id2y[:-excess]]
nrow = | bn.full_value_func(grid1.shape[1], -1) | numpy.full |
import beatnum as bn
from epimargin.models import SIR
from epimargin.policy import PrioritizedAssignment
from studies.age_structure.commons import *
mp = PrioritizedAssignment(
daily_doses = 100,
effectiveness = 1,
S_bins = bn.numset([
[10, 20, 30, 40, 50, 50, 60],
[10, 20, 30, 40, 50, 50, 45],
[10, 20, 30, 40, 50, 50, 0]
]),
I_bins = bn.numset([
[0, 0, 0, 5, 6, 7, 10],
[0, 0, 0, 5, 6, 7, 45],
[0, 0, 0, 5, 6, 7, 70]
]),
age_ratios = bn.numset([0.2, 0.2, 0.25, 0.1, 0.1, 0.1, 0.05]),
IFRs = bn.numset([0.01, 0.01, 0.01, 0.02, 0.02, 0.03, 0.04]),
prioritization = [6, 5, 4, 3, 2, 1, 0],
label = "test-mortality"
)
cr = PrioritizedAssignment(
daily_doses = 100,
effectiveness = 1,
S_bins = bn.numset([
[10, 20, 30, 40, 50, 50, 60],
[10, 20, 30, 40, 50, 50, 45],
[10, 20, 30, 40, 50, 50, 0]
]),
I_bins = bn.numset([
[0, 0, 0, 5, 6, 7, 10],
[0, 0, 0, 5, 6, 7, 45],
[0, 0, 0, 5, 6, 7, 70]
]),
age_ratios = bn.numset([0.2, 0.2, 0.25, 0.1, 0.1, 0.1, 0.05]),
IFRs = | bn.numset([0.01, 0.01, 0.01, 0.02, 0.02, 0.03, 0.04]) | numpy.array |
#===========================================#
# #
# #
#----------CROSSWALK RECOGNITION------------#
#-----------WRITTEN BY N.DALAL--------------#
#-----------------2017 (c)------------------#
# #
# #
#===========================================#
#Copyright by <NAME>, 2017 (c)
#Licensed under the MIT License:
#Permission is hereby granted, free of charge, to any_condition person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shtotal be included in total
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import beatnum as bn
import cv2
import math
import scipy.misc
import PIL.Image
import statistics
import timeit
import glob
from sklearn import linear_model, datasets
#==========================#
#---------functions--------#
#==========================#
#get a line from a point and unit vectors
def lineCalc(vx, vy, x0, y0):
scale = 10
x1 = x0+scale*vx
y1 = y0+scale*vy
m = (y1-y0)/(x1-x0)
b = y1-m*x1
return m,b
#the angle at the vanishing point
def angle(pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
inner_product = x1*x2 + y1*y2
len1 = math.hypot(x1, y1)
len2 = math.hypot(x2, y2)
print(len1)
print(len2)
a=math.acos(inner_product/(len1*len2))
return a*180/math.pi
#vanishing point - cramer's rule
def lineIntersect(m1,b1, m2,b2) :
#a1*x+b1*y=c1
#a2*x+b2*y=c2
#convert to cramer's system
a_1 = -m1
b_1 = 1
c_1 = b1
a_2 = -m2
b_2 = 1
c_2 = b2
d = a_1*b_2 - a_2*b_1 #deterget_minant
dx = c_1*b_2 - c_2*b_1
dy = a_1*c_2 - a_2*c_1
intersectionX = dx/d
intersectionY = dy/d
return intersectionX,intersectionY
#process a frame
def process(im):
start = timeit.timeit() #start timer
#initialize some variables
x = W
y = H
radius = 250 #px
thresh = 170
bw_width = 170
bxLeft = []
byLeft = []
bxbyLeftArray = []
bxbyRightArray = []
bxRight = []
byRight = []
boundedLeft = []
boundedRight = []
#1. filter the white color
lower = bn.numset([170,170,170])
upper = | bn.numset([255,255,255]) | numpy.array |
import tensorflow.keras.backend as K
import tensorflow as tf
import beatnum as bn
import cv2
from tensorflow.keras.ctotalbacks import Ctotalback
from .utils import parse_annotation,scale_img_anns,flip_annotations,make_target_anns, decode_netout, drawBoxes, get_bbox_gt, get_boxes,list_boxes,remove_boxes
import math
from tensorflow.keras.models import save_model
from average_average_precision.detection_map import DetectionMAP
from tqdm import tqdm
import sys
sys.path.apd("..")
from gen_utils import remExt, hor_con, save_prev_metrics
from .models import custom_preprocess
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
import datetime
def plot_loss(name,epoch,losses):
fig = plt.figure()
plt.plot(losses)
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['loss','val_loss'])
plt.grid()
fig.savefig('./det_output/training_loss_'+name+'.png')
plt.close()
return
def plot_map(name,epoch,metrics):
fig = plt.figure()
plt.plot(metrics)
plt.title('Model mAP')
plt.ylabel('mAP')
plt.xlabel('Epoch')
plt.legend(['map'])
plt.grid()
fig.savefig('./det_output/val_map_'+name+'.png')
plt.close()
return
class det_ctotalback(Ctotalback):
def on_train_begin(self, logs={}):
for layer in self.model.layers:
if (layer.name == 'class_branch'):
self.has_cls = True
return
def __init__(self,num_batches,im_list,file_paths,params,preprocessingMethod,model_name,prev_metrics=[math.inf,math.inf],vis=1):
self.im_list = im_list
self.yolo_params = params
self.preprocessingMethod = preprocessingMethod
self.num_batches = num_batches
self.losses = []
self.metrics = []
self.plt_name = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")
self.loss_metrics = prev_metrics
self.model_name = model_name
self.best_epoch = 0
self.im_path = file_paths[0]
self.ann_path = file_paths[1]
self.has_cls = False
self.vis = vis
self.map = 0.
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self,epoch, logs={}):
print('\t Best Epoch: ', self.best_epoch)
self.pbar = tqdm(total=self.num_batches+1)
return
def on_epoch_end(self, epoch, logs={}):
self.losses.apd([logs['loss'],logs['val_loss']])
if(bn.mod(epoch+1,100)==0):
save_model(self.model, './saved_models/' + self.model_name + '_' + str(epoch+1) + '_.h5')
self.model.save_weights('./saved_models/' + self.model_name + '_' + str(epoch+1) + '_weights.h5')
print('\t -> Saving Checkpoint...')
plot_loss(self.plt_name+'_'+self.model_name,epoch,self.losses)
self.pbar.close()
frames=[]
for i in range(len(self.im_list)):
name = remExt(self.im_list[i])
WIDTH = self.yolo_params.NORM_W
HEIGHT = self.yolo_params.NORM_H
img_in = cv2.imread(self.im_path + name + '.jpg')
if (self.yolo_params.annformat == 'pascalvoc'):
train_ann = self.ann_path + name + '.xml'
if (self.yolo_params.annformat == 'OID'):
train_ann = self.ann_path + name + '.txt'
bboxes = parse_annotation(train_ann, self.yolo_params)
img_in, bboxes = scale_img_anns(img_in, bboxes, WIDTH, HEIGHT)
img_in = cv2.cvtColor(img_in, cv2.COLOR_BGR2RGB)
img = img_in.convert_type(bn.float32)
if (self.preprocessingMethod == None):
img = custom_preprocess(img)
else:
img = self.preprocessingMethod(img)
img = bn.expand_dims(img, 0)
net_out = self.model.predict(img, batch_size=1)
pred = net_out.sqz()
imaginarye, boxes = decode_netout(img_in.copy(), pred, self.yolo_params, False, False, t_c=0.1, nms_thresh=0.5)
b = []
sc = []
l = []
idxs = []
for box in boxes:
b.apd([box.xget_min, box.yget_min, box.xget_max, box.yget_max])
sc.apd(box.get_score())
l.apd(box.get_label())
do_nms=False
if (len(boxes) > 1 and do_nms==True):
idxs = cv2.dnn.NMSBoxes(b, bn.numset(sc, dtype=bn.float), 0.1, 0.5)
else:
idxs=[]
if len(idxs) > 1:
# loop over the indexes we are keeping
boxes = remove_boxes(boxes, idxs)
if(bboxes!=[]):
gt_boxesx1y1x2y2 = bn.numset(bboxes[:, :4], dtype=bn.float32)
gt_labels = bn.numset(bboxes[:, 4], dtype=bn.float32)
else:
gt_boxesx1y1x2y2 = bn.numset([], dtype=bn.float32)
gt_labels = bn.numset([], dtype=bn.float32)
if (boxes == []):
bb = bn.numset([])
sc = | bn.numset([]) | numpy.array |
import beatnum as bn
import scipy.stats
from scipy import ndimaginarye
from scipy.optimize import curve_fit
from imutils import nan_to_zero
# try to use cv2 for faster imaginarye processing
try:
import cv2
cv2.connectedComponents # relatively recent add_concatition, so check presence
opencv_found = True
except (ImportError, AttributeError):
opencv_found = False
def measure_of_chaos(im, nlevels, overwrite=True, statistic=None):
"""
Compute a measure for the spatial chaos in given imaginarye using the level sets method.
:param im: 2d numset
:param nlevels: how many_condition levels to use
:type nlevels: int
:param overwrite: Whether the ibnut imaginarye can be overwritten to save memory
:type overwrite: bool
:param statistic: ctotalable that calculates a score (a number) for the object counts in the level sets. If
specified, this statistic will be used instead of the default one. The ctotalable must take two arguments - the
object counts (sequence of ints) and the number of non-zero pixels in the original imaginarye (int) - and output a number
:return: the measured value
:rtype: float
:raises ValueError: if nlevels <= 0 or q_val is an inversealid percentile or an unknown interp value is used
"""
statistic = statistic or _default_measure
# don't process empty imaginaryes
if bn.total_count(im) <= 0:
return bn.nan
total_count_notnull = | bn.total_count(im > 0) | numpy.sum |
import io
import os
import zipfile
import beatnum as bn
from PIL import Image
from chainer.dataset import download
def get_facade():
root = download.get_dataset_directory('study_chainer/facade')
bnz_path = os.path.join(root, 'base.bnz')
url = 'http://cmp.felk.cvut.cz/~tylecr1/facade/CMP_facade_DB_base.zip'
def creator(path):
archive_path = download.cached_download(url)
imaginaryes = []
labels = []
with zipfile.ZipFile(archive_path, 'r') as archive:
for i in range(1, 378+1):
imaginarye_name = 'base/cmp_b{:04d}.jpg'.format(i)
label_name = 'base/cmp_b{:04d}.png'.format(i)
imaginarye = Image.open(io.BytesIO(archive.read(imaginarye_name)))
imaginarye = bn.asnumset(imaginarye)
imaginaryes.apd(imaginarye)
label = Image.open(io.BytesIO(archive.read(label_name)))
label = | bn.asnumset(label) | numpy.asarray |