text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
sequencelengths 1
23
| text_hash
stringlengths 64
64
|
---|---|---|---|---|---|---|---|
"""
Tests on the crosswalk from JPER format to SWORD-supported DC/RIOXX XML
"""
# from octopus.modules.es.testindex import ESTestCase
from unittest import TestCase
from service.tests import fixtures
import sword2
from service import xwalk
from octopus.modules.jper import models
TERMS = "http://purl.org/dc/terms/"
DC = "http://purl.org/dc/elements/"
ALI = "http://www.niso.org/schemas/ali/1.0/"
RIOXX = "http://www.rioxx.net/schema/v2.0/rioxx/"
ATOM = "http://www.w3.org/2005/Atom"
class TestModels(TestCase):
def setUp(self):
super(TestModels, self).setUp()
def tearDown(self):
super(TestModels, self).tearDown()
def test_01_xwalk(self):
e = sword2.Entry()
n = models.OutgoingNotification(fixtures.NotificationFactory.outgoing_notification())
xwalk.to_dc_rioxx(n, e)
def _texts(ns, field):
return [el.text for el in e.entry.findall("{" + ns + "}" + field)]
def _attr(ns, field, att):
return [el.get(att) for el in e.entry.findall("{" + ns + "}" + field)]
identifiers = _texts(DC, "identifier")
assert len(identifiers) == 5
assert "http://router.jisc.ac.uk/api/v1/notification/1234567890/content/1" in identifiers
assert "http://router.jisc.ac.uk/api/v1/notification/1234567890/content/2" in identifiers
assert "http://router.jisc.ac.uk/api/v1/notification/1234567890/content/SimpleZip" in identifiers
assert "http://router.jisc.ac.uk/api/v1/notification/1234567890/content" in identifiers
assert "doi:10.pp/jit.1" in identifiers
available = _texts(TERMS, "available")
assert len(available) == 1
assert "2016-01-01T00:00:00Z" in available
titles = _texts(DC, "title")
assert len(titles) == 1
assert "Test Article" in titles
atitles = _texts(ATOM, "title")
assert len(atitles) == 1
assert "Test Article" in atitles
vers = _texts(RIOXX, "version")
assert len(vers) == 1
assert "AAM" in vers
pubs = _texts(DC, "publisher")
assert len(pubs) == 1
assert "Premier Publisher" in pubs
sources = _texts(DC, "source")
assert len(sources) == 5
assert "Journal of Important Things" in sources
assert "issn:1234-5678" in sources
assert "eissn:1234-5678" in sources
assert "pissn:9876-5432" in sources
assert "doi:10.pp/jit" in sources
asources = _texts(ATOM, "source")
assert len(asources) == 1
assert "Journal of Important Things" in asources
vor = _texts(RIOXX, "version_of_record")
assert len(vor) == 1
assert "doi:10.pp/jit.1" in vor
type = _texts(DC, "type")
assert len(type) == 1
assert "article" in type
creator = _texts(DC, "creator")
assert len(creator) == 6
assert "Richard Jones" in creator
assert "orcid:aaaa-0000-1111-bbbb" in creator
assert "email:[email protected]" in creator
assert "Mark MacGillivray" in creator
assert "orcid:dddd-2222-3333-cccc" in creator
assert "email:[email protected]" in creator
ra = _texts(RIOXX, "author")
assert len(ra) == 2
assert "Richard Jones" in ra
assert "Mark MacGillivray" in ra
ratts = _attr(RIOXX, "author", "id")
assert len(ratts) == 2
anames = [el.find("{" + ATOM + "}name").text for el in e.entry.findall("{" + ATOM + "}author")]
assert "Richard Jones" in anames
assert "Mark MacGillivray" in anames
affs = _texts(DC, "contributor")
assert len(affs) == 1
assert "Cottage Labs" in affs
acont = [el.find("{" + ATOM + "}name").text for el in e.entry.findall("{" + ATOM + "}contributor")]
assert len(acont) == 2
assert "Cottage Labs" in acont
assert "BBSRC" in acont
langs = _texts(DC, "language")
assert len(langs) == 1
assert "eng" in langs
pubd = _texts(DC, "date")
assert len(pubd) == 1
assert "2015-01-01T00:00:00Z" in pubd
rpubd = _texts(RIOXX, "publication_date")
assert len(rpubd) == 1
assert "2015-01-01T00:00:00Z" in rpubd
apubd = _texts(ATOM, "published")
assert len(apubd) == 1
assert "2015-01-01T00:00:00Z" in rpubd
accd = _texts(TERMS, "dateAccepted")
assert len(accd) == 1
assert "2014-09-01T00:00:00Z" in accd
subd = _texts(TERMS, "dateSubmitted")
assert len(subd) == 1
assert "2014-07-03T00:00:00Z" in subd
licref = _texts(ALI, "license_ref")
assert len(licref) == 1
assert "http://creativecommons.org/cc-by" in licref
startd = _attr(ALI, "license_ref", "start_date")
assert len(startd) == 1
assert "2016-01-01T00:00:00Z" in startd
rights = _texts(DC, "rights")
assert len(rights) == 1
assert "http://creativecommons.org/cc-by" in rights
arights = _texts(ATOM, "rights")
assert len(arights) == 1
assert "http://creativecommons.org/cc-by" in arights
projs = _texts(RIOXX, "project")
assert len(projs) == 1
assert "BB/34/juwef" in projs
funders = _attr(RIOXX, "project", "funder_name")
assert len(funders) == 1
assert "BBSRC" in funders
fids = _attr(RIOXX, "project", "funder_id")
assert len(fids) == 1
assert "ringold:bbsrcid" in fids
subs = _texts(DC, "subject")
assert len(subs) == 4
| JiscPER/jper-sword-out | service/tests/unit/test_xwalk.py | Python | apache-2.0 | 5,607 | [
"Octopus"
] | d573910612f6394d20d633d0e9bae492bff4672cf3411134775e6285f212dd61 |
#_PYTHON_INSERT_SAO_COPYRIGHT_HERE_(2007)_
#_PYTHON_INSERT_GPL_LICENSE_HERE_
import numpy
from sherpa.estmethods import *
from sherpa.utils import SherpaTestCase
# Test data arrays -- together this makes a line best fit with a
# 1D Gaussian function.
x = numpy.array(
[ 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769,
770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781,
782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793,
794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805,
806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817,
818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829,
830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841,
842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853,
854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865,
866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877,
878, 879, 880, 881])
y = numpy.array(
[ 1, 0, 1, 0, 1, 2, 0, 3, 0, 1, 0, 5,
0, 5, 3, 5, 6, 5, 11, 14, 11, 13, 12, 21,
15, 24, 20, 29, 32, 43, 47, 49, 50, 64, 60, 72,
61, 73, 83, 98, 99, 100, 94, 92, 121, 107, 126, 107,
112, 123, 114, 126, 113, 86, 111, 126, 95, 119, 93, 119,
93, 89, 75, 80, 71, 68, 59, 54, 61, 37, 21, 33,
37, 32, 31, 22, 19, 25, 14, 13, 12, 10, 7, 10,
5, 4, 8, 1, 5, 2, 1, 3, 1, 5, 0, 3,
1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1])
# These parameter values are known to be the best-fit parameter values
# of a Gaussian to the x- and y-arrays above.
fittedpars = numpy.array([32.3536, 807.863, 117.826])
limit_parnums = numpy.array([0,1,2])
maxpars = numpy.array([200, 900, 200])
minpars = numpy.array([1, 0, 0])
hardmaxpars = numpy.array([1.0e+120, 1.0e+120, 1.0e+120])
hardminpars = numpy.array([1.0e-120, -1.0e+120, -1.0e+120])
gfactor = 4.0 * 0.6931471805599453094172321214581766
def freeze_par(pars, parmins, parmaxes, i):
return (pars, parmins, parmaxes)
def thaw_par(i):
pass
def report_progress(i, lower, upper):
pass
def get_par_name( ii ):
pass
# Here's the 1D Gaussian function to use to generate predicted
# data, which will be compared to the y-array above
def gauss_func(p):
return p[2] * numpy.exp(-1.0 * gfactor *
(x - p[1]) * (x - p[1]) / p[0] / p[0])
# Compare the y-array above to values calculated with gauss_func,
# and calculate chi-squared, using Gehrel's approximation for
# estimating errors.
def stat(p):
errors = 1.0 + numpy.sqrt(y + 0.75)
fvec = (y - gauss_func(p) ) / errors
return ((fvec * fvec).sum(),)
# Easiest "fit" function for unit tests is actually just to
# return current parameter values. In this test, that will
# have the effect of making projection act like the old
# uncertainty method, but that is OK for the unit test.
def fitter(scb, pars, parmins, parmaxs):
return (1, pars, scb(pars)[0])
class test_estmethods(SherpaTestCase):
def test_covar_failures(self):
self.assertRaises(TypeError, Covariance().compute,
None, fitter, fittedpars,
minpars, maxpars,
hardminpars, hardmaxpars,
limit_parnums, freeze_par, thaw_par, report_progress)
self.assertRaises(RuntimeError, Covariance().compute,
stat, fitter, None, minpars, maxpars,
hardminpars, hardmaxpars, limit_parnums, freeze_par,
thaw_par, report_progress, get_par_name)
self.assertRaises(RuntimeError, Covariance().compute,
stat, fitter, numpy.array([1,2]),
minpars, maxpars, hardminpars, hardmaxpars,
limit_parnums, freeze_par, thaw_par,
report_progress, get_par_name)
self.assertRaises(RuntimeError, Covariance().compute,
stat, fitter, fittedpars, numpy.array([1,2]),
maxpars, hardminpars, hardmaxpars, limit_parnums,
freeze_par, thaw_par, report_progress, get_par_name)
def test_projection_failures(self):
self.assertRaises(TypeError, Projection().compute,
stat, None, fittedpars, minpars, maxpars,
hardminpars, hardmaxpars, limit_parnums, freeze_par,
thaw_par, report_progress, get_par_name)
self.assertRaises(RuntimeError, Projection().compute,
stat, fitter, None, minpars, maxpars,
hardminpars, hardmaxpars, limit_parnums,
freeze_par, thaw_par, report_progress, get_par_name)
self.assertRaises(RuntimeError, Projection().compute,
stat, fitter, numpy.array([1,2]),
minpars, maxpars, hardminpars, hardmaxpars,
limit_parnums, freeze_par, thaw_par,
report_progress, get_par_name)
self.assertRaises(RuntimeError, Projection().compute,
stat, fitter, fittedpars, numpy.array([1,2]),
maxpars, hardminpars, hardmaxpars, limit_parnums,
freeze_par, thaw_par, report_progress, get_par_name)
def test_covar(self):
standard = numpy.array([[ 0.4935702, 0.06857833, numpy.nan],
[ 0.06857833, 0.26405554, numpy.nan],
[ numpy.nan, numpy.nan, 2.58857314]])
results = Covariance().compute(stat, None, fittedpars,
minpars, maxpars,
hardminpars, hardmaxpars,
limit_parnums, freeze_par, thaw_par,
report_progress, get_par_name)
self.assertEqualWithinTol(standard.diagonal(),
#results[2].diagonal(), 1e-4)
results[1], 1e-4)
def test_projection(self):
standard_elo = numpy.array([-0.39973743, -0.26390339, -2.08784716])
standard_ehi = numpy.array([ 0.39580942, 0.26363223, 2.08789851])
results = Projection().compute(stat, fitter, fittedpars,
minpars, maxpars,
hardminpars, hardmaxpars,
limit_parnums, freeze_par, thaw_par,
report_progress, get_par_name)
self.assertEqualWithinTol(standard_elo,results[0], 1e-4)
self.assertEqualWithinTol(standard_ehi,results[1], 1e-4)
| brefsdal/sherpa | sherpa/estmethods/tests/test_estmethods.py | Python | gpl-2.0 | 7,198 | [
"Gaussian"
] | d902793c9d3bc4d0688250e2ad834d7dc3ca11962d7a390260edae7240ccad5c |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
# Import parser
import sys
import argparse
import os
# Importing manupulation packages
from astropy.io import fits
import numpy as np
import glob
from numpy.polynomial import chebyshev
from scipy import interpolate
from scipy import optimize
# Plotting
import matplotlib.pyplot as pl
import seaborn; seaborn.set_style('ticks')
import copy
from matplotlib.backends.backend_pdf import PdfPages
from util import *
from XSHcomb import XSHcomb, avg
class XSHextract(XSHcomb):
"""
Class to contain XSH spectrscopy extraction.
"""
def __init__(self, input_file, resp=None):
"""
Instantiate fitsfiles. Input list of file-names to be combined.
"""
if len(input_file) == 0:
raise ValueError("Input file list empty")
self.input_file = input_file
self.fitsfile = fits.open(self.input_file)
self.header = self.fitsfile[0].header
try:
self.flux = self.fitsfile[0].data
except:
raise ValueError("No flux-array. Aborpting")
try:
self.error = self.fitsfile[1].data
except IndexError:
print("Empty error extension. Inserting placeholder of ones")
self.error = np.ones_like(self.flux)
try:
self.bpmap = self.fitsfile[2].data
except IndexError:
print("Empty bad-pixel bap extension. Inserting placeholder of zeroes")
self.bpmap = np.zeros_like(self.flux)
self.flux = np.ma.array(self.flux, mask=self.bpmap.astype("bool"))
self.error = np.ma.array(self.error, mask=self.bpmap.astype("bool"))
self.base_name = "/".join(input_file.split("/")[:-1]) + "/" + "".join(input_file.split("/")[-1])[:-5]
if resp:
# Apply flux calibration from master response file
resp = fits.open(resp)
self.wl_response, self.response = resp[1].data.field('LAMBDA'), resp[1].data.field('RESPONSE')
f = interpolate.interp1d(10 * self.wl_response, self.response, bounds_error=False)
self.response = f(10.*((np.arange(self.header['NAXIS1']) - self.header['CRPIX1'])*self.header['CD1_1']+self.header['CRVAL1'])/(self.header['WAVECORR']))
if self.header['HIERARCH ESO SEQ ARM'] == "UVB" or self.header['HIERARCH ESO SEQ ARM'] == "VIS":
gain = self.header["HIERARCH ESO DET OUT1 GAIN"]
elif self.header['HIERARCH ESO SEQ ARM'] == "NIR":
gain = 1.0/2.12
else:
print("Missing arm keyword in header. Stopping.")
exit()
# Apply atmospheric extinciton correction
atmpath = "data/esostatic/xsh_paranal_extinct_model_"+self.header['HIERARCH ESO SEQ ARM'].lower()+".fits"
ext_atm = fits.open(atmpath)
self.wl_ext_atm, self.ext_atm = ext_atm[1].data.field('LAMBDA'), ext_atm[1].data.field('EXTINCTION')
f = interpolate.interp1d(10. * self.wl_ext_atm, self.ext_atm, bounds_error=False)
self.ext_atm = f(10.*(((np.arange(self.header['NAXIS1'])) - self.header['CRPIX1'])*self.header['CDELT1']+self.header['CRVAL1']) * self.header['WAVECORR'])
self.response = (10. * self.header['CD1_1'] * self.response * (10.**(0.4*self.header['HIERARCH ESO TEL AIRM START'] * self.ext_atm))) / ( gain * self.header['EXPTIME'])
# Get slit width
if self.header['HIERARCH ESO SEQ ARM'] == "UVB":
self.slit_width = float(self.header['HIERARCH ESO INS OPTI3 NAME'].split("x")[0])
elif self.header['HIERARCH ESO SEQ ARM'] == "VIS":
self.slit_width = float(self.header['HIERARCH ESO INS OPTI4 NAME'].split("x")[0])
elif self.header['HIERARCH ESO SEQ ARM'] == "NIR":
self.slit_width = float(self.header['HIERARCH ESO INS OPTI5 NAME'].split("x")[0])
def get_trace_profile(self, lower_element_nr = 1, upper_element_nr = 1, pol_degree = [3, 2, 2], bin_elements=100, adc_corr_guess=True, p0 = None, two_comp=False):
# Get binned spectrum
bin_length = int(len(self.haxis) / bin_elements)
bin_flux, bin_error = bin_image(self.flux, self.error, self.bpmap, bin_length, weight = True)
bin_haxis = 10.*(((np.arange(self.header['NAXIS1']/bin_length)) - self.header['CRPIX1'])*self.header['CD1_1']*bin_length+self.header['CRVAL1'])
# Cutting edges of image. Especially importnant for nodding combinations, due to the negative signals
if self.header['HIERARCH ESO SEQ ARM'] == "UVB" or self.header['HIERARCH ESO SEQ ARM'] == "VIS":
width = int(len(self.vaxis)/10)
elif self.header['HIERARCH ESO SEQ ARM'] == "NIR":
width = int(len(self.vaxis)/4)
else:
raise ValueError("Input image does not contain header keyword 'HIERARCH ESO SEQ ARM'. Cannot cut edges.")
# Save binned image for quality control
self.fitsfile[0].data = bin_flux
self.fitsfile[1].data = bin_error
self.fitsfile[0].header["CD1_1"] = self.fitsfile[0].header["CD1_1"] * bin_length
self.fitsfile.writeto(self.base_name+"_binned.fits", clobber=True)
self.fitsfile[0].header["CD1_1"] = self.fitsfile[0].header["CD1_1"] / bin_length
# Inital parameter guess
fwhm_sigma = 2. * np.sqrt(2.*np.log(2.)) #Conversion between header seeing value and fit seeing value.
if p0 == None:
p0 = [1e1*np.nanmean(bin_flux[bin_flux > 0]), np.median(self.vaxis), abs(self.header['HIERARCH ESO TEL AMBI FWHM START'])/fwhm_sigma, 0.5*abs(self.header['HIERARCH ESO TEL AMBI FWHM START'])/fwhm_sigma, 0, 0]
if two_comp:
p0 = [1e1*np.nanmean(bin_flux[bin_flux > 0]), np.median(self.vaxis), abs(self.header['HIERARCH ESO TEL AMBI FWHM START'])/fwhm_sigma, 0.5*abs(self.header['HIERARCH ESO TEL AMBI FWHM START'])/fwhm_sigma, 0, 0, 5e-1*np.nanmean(bin_flux[bin_flux > 0]), np.median(self.vaxis) + 2, 0.5, 0.1]
# Corrections to slit position from broken ADC, taken DOI: 10.1086/131052
# Pressure in hPa, Temperature in Celcius
p, T = self.header['HIERARCH ESO TEL AMBI PRES END'], self.header['HIERARCH ESO TEL AMBI TEMP']
# Convert hPa to mmHg
p = p * 0.7501
# Wavelength in microns
wl_m = bin_haxis/1e4
# Refractive index in dry air (n - 1)1e6
eq_1 = 64.328 + (29498.1/(146 - wl_m**-2)) + (255.4/(41 - wl_m**-2))
# Corrections for ambient temperature and pressure
eq_2 = eq_1*((p*(1. + (1.049 - 0.0157*T)*1e-6*p)) / (720.883*(1. + 0.003661*T)))
# Correction from water vapor. Water vapor obtained from the Antione equation, https://en.wikipedia.org/wiki/Antoine_equation
eq_3 = eq_2 - ((0.0624 - 0.000680*wl_m**-2) / (1. + 0.003661*T)) * 10**(8.07131 - (1730.63/(233.426 + T)))
# Isolate n
n = eq_3 / 1e6 + 1
# Angle relative to zenith
z = np.arccos(1/self.header['HIERARCH ESO TEL AIRM START'])
# Zero-deviation wavelength of arms, from http://www.eso.org/sci/facilities/paranal/instruments/xshooter/doc/VLT-MAN-ESO-14650-4942_v87.pdf
if self.header['HIERARCH ESO SEQ ARM'] == "UVB":
zdwl = 0.405
elif self.header['HIERARCH ESO SEQ ARM'] == "VIS":
zdwl = 0.633
elif self.header['HIERARCH ESO SEQ ARM'] == "NIR":
zdwl = 1.31
else:
raise ValueError("Input image does not contain header keyword 'HIERARCH ESO SEQ ARM'. Cannot determine ADC correction.")
zdwl_inx = find_nearest(wl_m, zdwl)
# Correction of position on slit, relative to Zero-deviation wavelength
dR = (206265*(n - n[zdwl_inx])*np.tan(z))
# Parameter containers
amp, cen, sig, gam = np.zeros_like(bin_haxis), np.zeros_like(bin_haxis), np.zeros_like(bin_haxis), np.zeros_like(bin_haxis)
eamp, ecen, esig, egam = np.zeros_like(bin_haxis), np.zeros_like(bin_haxis), np.zeros_like(bin_haxis), np.zeros_like(bin_haxis)
# Loop though along dispersion axis in the binned image and fit a Voigt
pp = PdfPages(self.base_name + "Quality_test_SPSF_fit.pdf")
x = np.arange(min(self.vaxis[width:-width]), max(self.vaxis[width:-width]), 0.01)
inp_cent = p0[1]
for ii, kk in enumerate(bin_haxis):
try:
# Edit trace position guess by analytic ADC-amount
if adc_corr_guess:
p0[1] = inp_cent + dR[ii]
elif not adc_corr_guess:
p0[1] = inp_cent
# Fit SPSF
if two_comp:
popt, pcov = optimize.curve_fit(two_voigt, self.vaxis[width:-width], bin_flux[:, ii][width:-width], p0 = p0, maxfev = 5000)
elif not two_comp:
popt, pcov = optimize.curve_fit(voigt, self.vaxis[width:-width], bin_flux[:, ii][width:-width], p0 = p0, maxfev = 5000)
pl.errorbar(self.vaxis[width:-width], bin_flux[:, ii][width:-width], yerr=bin_error[:, ii][width:-width], fmt=".k", capsize=0, elinewidth=0.5, ms=3)
if two_comp:
pl.plot(x, two_voigt(x, *popt), label="Best-fit")
elif not two_comp:
pl.plot(x, voigt(x, *popt), label="Best-fit")
guess_par = [popt[0]] + p0[1:]
guess_par[4] = popt[4]
guess_par[5] = popt[5]
if two_comp:
guess_par[-1] = popt[-1]
pl.plot(x, two_voigt(x, *guess_par), label="Fit guess parameters")
elif not two_comp:
pl.plot(x, voigt(x, *guess_par), label="Fit guess parameters")
pl.title("Profile fit in binned image, index: "+str(ii))
pl.xlabel("Slit position / [arcsec]")
pl.xlabel("Flux density")
pl.legend()
pp.savefig()
pl.clf()
except:
print("Fitting error at binned image index: "+str(ii)+". Replacing fit value with guess and set fit error to 10^10")
popt, pcov = p0, np.diag(1e10*np.ones_like(p0))
amp[ii], cen[ii], sig[ii], gam[ii] = popt[0], popt[1], popt[2], popt[3]
eamp[ii], ecen[ii], esig[ii], egam[ii] = np.sqrt(np.diag(pcov)[0]), np.sqrt(np.diag(pcov)[1]), np.sqrt(np.diag(pcov)[2]), np.sqrt(np.diag(pcov)[3])
pp.close()
# Mask elements too close to guess, indicating a bad fit.
ecen[:lower_element_nr] = 1e10
ecen[-upper_element_nr:] = 1e10
ecen[abs(cen/ecen) > abs(np.nanmean(cen/ecen)) + 5*np.nanstd(cen/ecen)] = 1e10
ecen[abs(amp - p0[0]) < p0[0]/100] = 1e10
ecen[abs(cen - p0[1]) < p0[1]/100] = 1e10
ecen[abs(sig - p0[2]) < p0[2]/100] = 1e10
ecen[abs(gam - p0[3]) < p0[3]/100] = 1e10
# Remove the 5 highest S/N pixels
ecen[np.argsort(sig/esig)[-5:]] = 1e10
ecen[np.argsort(gam/egam)[-5:]] = 1e10
# Fit polynomial for center and iteratively reject outliers
std_resid = 5
while std_resid > 0.5:
fitcen = chebyshev.chebfit(bin_haxis, cen, deg=pol_degree[0], w=1/ecen)
resid = cen - chebyshev.chebval(bin_haxis, fitcen)
avd_resid, std_resid = np.median(resid[ecen != 1e10]), np.std(resid[ecen != 1e10])
mask = (resid < avd_resid - std_resid) | (resid > avd_resid + std_resid)
ecen[mask] = 1e10
fitcenval = chebyshev.chebval(self.haxis, fitcen)
# Plotting for quality control
fig, (ax1, ax2, ax3, ax4) = pl.subplots(4,1, figsize=(14, 14), sharex=True)
ax1.errorbar(bin_haxis, cen, yerr=ecen, fmt=".k", capsize=0, elinewidth=0.5, ms=7)
ax1.plot(self.haxis, fitcenval)
vaxis_range = max(self.vaxis) - min(self.vaxis)
ax1.set_ylim((min(self.vaxis[width:-width]), max(self.vaxis[width:-width])))
ax1.set_ylabel("Profile center / [arcsec]")
ax1.set_title("Quality test: Center estimate")
# Sigma-clip outliers in S/N-space
esig[ecen == 1e10] = 1e10
esig[sig < 0.01] = 1e10
fitsig = chebyshev.chebfit(bin_haxis, sig, deg=pol_degree[1], w=1/esig**2)
fitsigval = chebyshev.chebval(self.haxis, fitsig)
# Ensure positivity
fitsigval[fitsigval < 0.1] = 0.1
# Plotting for quality control
ax2.errorbar(bin_haxis, sig, yerr=esig, fmt=".k", capsize=0, elinewidth=0.5, ms=7)
ax2.plot(self.haxis, fitsigval)
ax2.set_ylim((0, 1))
ax2.set_ylabel("Profile sigma width / [arcsec]")
ax2.set_title("Quality test: Profile Gaussian width estimate")
# Sigma-clip outliers in S/N-space
egam[ecen == 1e10] = 1e10
egam[gam < 1e-5] = 1e10
# sngam = gam/egam
# egam[sngam > 100 ] = 1e10
fitgam = chebyshev.chebfit(bin_haxis, gam, deg=pol_degree[2], w=1/egam**2)
fitgamval = chebyshev.chebval(self.haxis, fitgam)
# Ensure positivity
fitgamval[fitgamval < 0] = 0.0001
# Plotting for quality control
ax3.errorbar(bin_haxis, gam, yerr=egam, fmt=".k", capsize=0, elinewidth=0.5, ms=7)
ax3.plot(self.haxis, fitgamval)
ax3.set_ylim((-0.1, 1.0))
ax3.set_ylabel("Profile gamma width / [arcsec]")
ax3.set_title("Quality test: Profile Lorentzian width estimate")
# Amplitude replaced with ones
from scipy import interpolate, signal
eamp[ecen == 1e10] = 1e10
amp[amp < 0] = 1e-20
amp = signal.medfilt(amp, 5)
mask = ~(eamp == 1e10)
f = interpolate.interp1d(bin_haxis[mask], amp[mask], bounds_error=False, fill_value="extrapolate")
fitampval = f(self.haxis)
fitampval[fitampval <= 0] = 1e-20#np.nanmean(fitampval[fitampval > 0])
# Plotting for quality control
ax4.errorbar(bin_haxis, amp, fmt=".k", capsize=0, elinewidth=0.5, ms=5)
ax4.plot(self.haxis, fitampval)
ax4.set_ylabel("Profile amplitude / [counts/s]")
ax4.set_title("Quality test: Profile amplitude estimate")
ax4.set_xlabel(r"Wavelength / [$\mathrm{\AA}$]")
fig.subplots_adjust(hspace=0)
fig.savefig(self.base_name + "PSF_quality_control.pdf")
pl.close(fig)
# Calculating slitt-losses based on fit-width
if hasattr(self, 'slitcorr'):
self.slitcorr = slit_loss(fitsigval, self.slit_width, l_sigma=fitgamval)
self.full_profile, self.trace_model = np.zeros_like(self.flux), np.zeros_like(self.flux)
for ii, kk in enumerate(self.haxis):
self.trace_model[:, ii] = voigt(self.vaxis, fitampval[ii], fitcenval[ii], fitsigval[ii], fitgamval[ii])
self.full_profile[:, ii] = self.trace_model[:, ii] / abs(np.trapz(self.trace_model[:, ii]))
def extract_spectrum(self, extraction_bounds, optimal=None, slitcorr=None, edge_mask=None, pol_degree=None, bin_elements=None, plot_ext=None, adc_corr_guess=True, p0=None, two_comp=False):
"""Optimally extracts a spectrum from sky-subtracted X-shooter image.
Function to extract spectra from X-shooter images. Either sums the flux in a central aperture or uses a profile-weighted extraction.
fitsfile : fitsfile
Input sky-subtracted image with flux, error and bad-pixel map in extensions 0, 1, 2 respectively.
extraction_bounds : tuple
Tuple containing extraction bounds for the standard extraction.
outname : str
Name of saved spectrum
Returns
-------
Wavelength, Extracted spectrum, Associated error array : np.array, np.array, np.array
Notes
-----
na
"""
if slitcorr:
self.slitcorr = slitcorr
# Applying updated wavelength solution. This also includes barycentric correction etc.
self.haxis = 10.*(((np.arange(self.header['NAXIS1'])) + 1 - self.header['CRPIX1'])*self.header['CDELT1']+self.header['CRVAL1']) * self.header['WAVECORR'] * (1 + self.header['HIERARCH ESO QC VRAD BARYCOR']/3e5)
self.vaxis = ((np.arange(self.header['NAXIS2'])) - self.header['CRPIX2'])*self.header['CDELT2'] + self.header['CRVAL2']
# Finding extraction radius
seeing = (extraction_bounds[1] - extraction_bounds[0])*self.header['CDELT2']
# Construct spatial PSF to be used as weight in extraction
if optimal:
print("Fitting for the full spectral extraction profile")
XSHextract.get_trace_profile(self, lower_element_nr = int(tuple(edge_mask)[0]), upper_element_nr = int(tuple(edge_mask)[1]), pol_degree=pol_degree, bin_elements=bin_elements, adc_corr_guess=adc_corr_guess, p0=p0, two_comp=two_comp)
self.fitsfile[0].data = (self.flux - self.trace_model).data
self.fitsfile[1].data = self.error.data
self.fitsfile.writeto(self.base_name + "Profile_subtracted_image.fits", clobber=True)
elif not optimal:
print("Extracting spectrum between pixel " +str(extraction_bounds[0])+ " and " +str(extraction_bounds[1]))
print("Aperture width is: " + str(seeing) + " arcsec.")
print("Basing slitloss correction factor on the assumption that the aperture is the 2 * seeing FWHM.")
# Calculating slit-loss based on specified seeing.
if hasattr(self, 'slitcorr'):
self.slitcorr = slit_loss(seeing/(2*2.35), self.slit_width)
# Defining extraction aperture
ext_aper = slice(extraction_bounds[0], extraction_bounds[1])
# Interpolate over bad pixel map
self.flux.data[self.flux.mask] = np.nan
self.error.data[self.flux.mask] = np.nanmax(self.error.data[~self.flux.mask])
self.error = self.error.data
self.bpmap = self.flux.mask.astype("int")
self.flux = inpaint_nans(self.flux.data, kernel_size=5)
# Save interpolated image for quality control
self.fitsfile[0].data = self.flux
self.fitsfile[1].data = self.error
self.fitsfile.writeto(self.base_name+"_interpolated.fits", clobber=True)
if optimal:
# Do optimal extraction
denom = np.sum((self.full_profile**2. / self.error**2.), axis=0)
spectrum = np.sum(self.full_profile * self.flux / self.error**2., axis=0) / denom
errorspectrum = np.sqrt(1 / denom)
# Sum up bpvalues to find interpoalted values in 2-sigma width
self.bpmap[self.full_profile/np.max(self.full_profile) < 0.05] = 0
bpmap = np.sum(self.bpmap, axis=0)
extname = "optext.dat"
# Unpack masked array
spectrum = spectrum.data
errorspectrum = errorspectrum.data
elif not optimal:
# Do normal sum
spectrum, errorspectrum = np.sum(self.flux[ext_aper, :], axis=0), np.sqrt(np.sum(self.error[ext_aper, :]**2.0, axis=0))
bpmap = np.sum(self.bpmap[ext_aper, :], axis=0)
extname = "stdext.dat"
else:
print("Optimal argument need to be boolean")
# Boost error in noisy pixels, where noisy pixels are more than 50-sigma pixel-to-pixel variation based on error map
if self.header['HIERARCH ESO SEQ ARM'] == "UVB" or self.header['HIERARCH ESO SEQ ARM'] == "VIS":
sigma_reject = 100
elif self.header['HIERARCH ESO SEQ ARM'] == "NIR":
sigma_reject = 10
mask = (abs(np.diff(spectrum)) > sigma_reject * errorspectrum[1:])
errorspectrum[1:][mask] = np.nanmax(errorspectrum)
bpmap[1:][mask] = 1
extinc_corr, ebv = correct_for_dust(self.haxis, self.header["RA"], self.header["DEC"])
print("Applying the following extinction correction for queried E(B-V):"+str(ebv))
print(extinc_corr)
spectrum *= extinc_corr
errorspectrum *= extinc_corr
dt = [("wl_air", np.float64), ("wl_vac", np.float64), ("flux", np.float64), ("error", np.float64), ("bpmap", np.float64), ("extinc", np.float64)]
out_data = [self.haxis, convert_air_to_vacuum(self.haxis), spectrum, errorspectrum, bpmap, extinc_corr]
formatt = ['%10.6e', '%10.6e', '%10.6e', '%10.6e', '%10.6e', '%10.6e']
head = "air_wave vacuum_wave flux error bpmap E(B-V) = "+str(np.around(ebv, 3))
fil = self.base_name.split("/")[-1]
if hasattr(self, 'response'):
print("Applying the master response function")
spectrum *= self.response
errorspectrum *= self.response
dt.append(("response", np.float64))
out_data.append(self.response)
formatt.append('%10.6e')
head = head + " reponse"
try:
if not hasattr(self, 'response'):
self.response = np.genfromtxt("/".join(self.base_name.split("/")[:-1])+"/reduced_data/"+self.base_name.split("/")[-1][3:-6]+"/"+self.base_name.split("/")[-1][:3]+"/response_function.dat")
dt.append(("response", np.float64))
out_data.append(self.response)
formatt.append('%10.6e')
head = head + " reponse"
except:
pass
if hasattr(self, 'slitcorr'):
print("Estimated slitloss correction factor is:"+str(self.slitcorr))
if type(self.slitcorr) == np.float64:
self.slitcorr = np.ones_like(spectrum) * self.slitcorr
# spectrum *= self.slitcorr
# errorspectrum *= self.slitcorr
dt.append(("slitcorr", np.float64))
out_data.append(self.slitcorr)
formatt.append('%10.6e')
head = head + " slitloss"
try:
print("Attempting to find telluric correction ...")
tell_file = np.genfromtxt(glob.glob("/".join(self.base_name.split("/")[:-1])+"/"+ self.base_name.split("/")[-1][:3] + self.base_name.split("/")[-1][3:-6]+"*telluric*dat")[0])
trans = tell_file[:, 2]/tell_file[:, 1]
trans[np.isinf(trans)] = 1
dt.append(("telluric_correction", np.float64))
out_data.append(trans)
formatt.append('%10.6e')
head = head + " tell_corr"
except:
print("No telluric correciont was found ... Skipping.")
data = np.array(zip(*out_data), dtype=dt)
np.savetxt(self.base_name + extname, data, header=head, fmt = formatt, delimiter="\t")
if plot_ext:
fig, ax = pl.subplots()
mask = (bpmap == 0) & ~np.isnan(spectrum) & ~np.isinf(spectrum) & ~np.isnan(errorspectrum) & ~np.isinf(errorspectrum)
ax.errorbar(self.haxis[mask][::5], spectrum[mask][::5], yerr=errorspectrum[mask][::5], fmt=".k", capsize=0, elinewidth=0.5, ms=3, alpha=0.5)
ax.plot(self.haxis[mask][::5], spectrum[mask][::5], lw = 0.2, linestyle="steps-mid", alpha=0.5, rasterized=True)
ax.plot(self.haxis[mask][::5], errorspectrum[mask][::5], linestyle="steps-mid", lw=1.0, alpha=0.5, color = "grey")
ax.axhline(0, linestyle="dashed", color = "black", lw = 0.4)
m = np.average(spectrum[mask][int(len(spectrum)/10):int(-len(spectrum)/10)], weights=1/errorspectrum[mask][int(len(spectrum)/10):int(-len(spectrum)/10)])
s = np.nanstd(spectrum[mask][abs(spectrum[mask] - m) < 3 * np.nanstd(spectrum[mask]) ][int(len(spectrum)/10):int(-len(spectrum)/10)])
pl.xlim(min(self.haxis), max(self.haxis))
pl.ylim(- s, m + 5 * s)
pl.xlabel(r"Wavelength / [$\mathrm{\AA}$]")
pl.ylabel(r'Flux density [erg s$^{-1}$ cm$^{-1}$ $\AA^{-1}$]')
pl.savefig(self.base_name + "Extraction"+str(extname.split(".")[0])+".pdf")
# pl.show()
pl.clf()
return self.haxis, spectrum, errorspectrum
def run_extraction(args):
print("Running extraction on file: " + args.filepath)
print("with options:")
print("optimal = " + str(args.optimal))
print("slitcorr = " + str(args.slitcorr))
print("plot_ext = " + str(args.plot_ext))
print("adc_corr_guess = " + str(args.adc_corr_guess))
print("use_master_response = " + str(args.use_master_response))
print("")
# Look for response function at file dir
if not args.response_path and args.use_master_response:
print("--use_master_reponse is set, but no -response_path is. I will try to guess where the master reponse file is located.")
for ii, kk in enumerate(glob.glob("/".join(args.filepath.split("/")[:-1])+"/data_with_raw_calibs/M*.fits")):
try:
filetype = fits.open(kk)[0].header["CDBFILE"]
arm = fits.open(args.filepath)[0].header["HIERARCH ESO SEQ ARM"]
if "GRSF" in filetype and arm in filetype:
args.response_path = kk
except:
pass
if args.response_path:
print("Found master response at: "+str(args.response_path))
elif not args.response_path:
print("None found. Skipping flux calibration.")
if args.response_path and args.use_master_response:
# Look for response function at file dir
if os.path.isdir(args.response_path):
print("Path to response file supplied. Looking for response function.")
for ii, kk in enumerate(glob.glob(args.response_path+"/M*.fits")):
try:
filetype = fits.open(kk)[0].header["CDBFILE"]
arm = fits.open(args.filepath)[0].header["HIERARCH ESO SEQ ARM"]
if "GRSF" in filetype and arm in filetype:
args.response_path = kk
except:
pass
# args.response_path = response_file
if not os.path.isdir(args.response_path):
print("Found master response at: "+str(args.response_path))
elif os.path.isdir(args.response_path):
print("None found. Skipping flux calibration.")
args.response_path = None
# args.response_path = response_file
if not args.use_master_response:
args.response_path = None
spec = XSHextract(args.filepath, resp = args.response_path)
# Optimal extraction
wl, flux, error = spec.extract_spectrum(extraction_bounds=args.extraction_bounds, optimal=args.optimal, slitcorr=args.slitcorr, edge_mask=args.edge_mask, pol_degree=args.pol_degree, bin_elements=args.bin_elements, plot_ext=args.plot_ext, adc_corr_guess=args.adc_corr_guess, p0=args.p0, two_comp=args.two_comp)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('filepath', type=str, help='Path to file on which to run extraction')
parser.add_argument('-response_path', type=str, default=None, help='Response function to apply. Can either be a path to file or path to directory containing file. If directory, will look for correct file.')
parser.add_argument('-extraction_bounds', type=str, default="30, 60", help='Bounds in which to do the standard extraction. Must be indices over which to do the extraction. Example -extraction_bounds 30,60')
parser.add_argument('-edge_mask', type=str, default="1, 1", help='Tuple containing the edge masks. (10,10) means that 10 pixels are masked at each edge.')
parser.add_argument('-pol_degree', type=str, default="3,2,2", help='List containing the edge masks. Each number specify the degree of the polynomial used for the fit in central prosition, Gaussian width and Lorentzian width, respectively. Must be specified as 3,2,2 without the backets.')
parser.add_argument('-bin_elements', type=int, default=100, help='Integer specifying the number of elements to bin down to for tracing. A higher value will allow for a more precise tracing, but is only suitable for very high S/N objects')
parser.add_argument('--use_master_response', action="store_true" , help = 'Set this optional keyword if input file is not flux-calibrated. The master response function is applied to the extracted spectrum.')
parser.add_argument('--optimal', action="store_true" , help = 'Enable optimal extraction')
parser.add_argument('--slitcorr', action="store_true" , help = 'Apply slitloss correction based on profile width')
parser.add_argument('--plot_ext', action="store_true" , help = 'Plot extracted spectrum')
parser.add_argument('--adc_corr_guess', action="store_true" , help = 'Model atmospheric differential refracting for input guess of SPSF position on the slit. Set this keyword, in periods where the ADC on X-shooter is disabled.')
parser.add_argument('-p0', type=str, default=None, help = 'Input guess parameters for the profile fitting. Must be a list with 5 elements in the shape [Amplitude/flux density, Center/arcsec, Gaussian width/arcsec, Lorentzian width/arcsec, Constant offset, Offset slope]. If not set, resonable values will be used. If --two_comp is set, an additional two paramters are required, the amplitude and the position on the slit of the second component.')
parser.add_argument('--two_comp', action="store_true", help = 'If set, will add an additional PSF component in the profile fit to account for multiple, potentially overlapping sources. If this is set, p0 should probably also be specified for the inital guess on the position of the additional trace. The same widths for the two profiles are assumed.')
args = parser.parse_args(argv)
if not args.filepath:
print('When using arguments, you need to supply a filepath. Stopping execution')
exit()
if args.edge_mask:
args.edge_mask = [int(x) for x in args.edge_mask.split(",")]
if args.extraction_bounds:
args.extraction_bounds = [int(x) for x in args.extraction_bounds.split(",")]
if args.pol_degree:
args.pol_degree = [int(x) for x in args.pol_degree.split(",")]
if args.p0:
args.p0 = [float(x) for x in args.p0.split(",")]
print("Manually specified profile guess = " + str(args.p0))
run_extraction(args)
if __name__ == '__main__':
# If script is run from editor or without arguments, run using this:
if len(sys.argv) == 1:
"""
Central scipt to extract spectra from X-shooter for the X-shooter GRB sample.
"""
data_dir = "/Users/jselsing/Work/work_rawDATA/XSGRB/"
object_name = data_dir + "GRB161023A/"
arms = ["UVB", "VIS", "NIR"] # # UVB, VIS, NIR, ["UVB", "VIS", "NIR"]
OB = "OB1"
for ii in arms:
# Construct filepath
file_path = object_name+ii+OB+"skysub.fits"
# file_path = object_name+ii+"_combined.fits"
# Load in file
files = glob.glob(file_path)
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.filepath = files[0]
args.response_path = None # "/Users/jselsing/Work/work_rawDATA/XSGRB/GRB100814A/reduced_data/OB3/RESPONSE_MERGE1D_SLIT_UVB.fits", None
args.use_master_response = False # True, False
args.optimal = True # True, False
args.extraction_bounds = (40, 60 )
if ii == "NIR":
args.extraction_bounds = (33, 46)
args.slitcorr = True # True, False
args.plot_ext = True # True, False
args.adc_corr_guess = True # True, False
if ii == "UVB":
args.edge_mask = (10, 5)
elif ii == "VIS":
args.edge_mask = (5, 5)
elif ii == "NIR":
args.edge_mask = (5, 10)
args.pol_degree = [4, 4, 4]
args.bin_elements = 400
args.p0 = None # [1e-18, -2.5, 0.3, 0.1, -1e-18, 0], [1e-18, -2.5, 0.3, 0.1, -1e-18, 0, 1e-18, 2, 0.5, 0.1], None
args.two_comp = False # True, False
run_extraction(args)
else:
main(argv = sys.argv[1:])
| jselsing/XSGRB_reduction_scrips | py/XSHextract.py | Python | gpl-3.0 | 32,141 | [
"Gaussian"
] | 4af09ebc07a36686802e87dcf41aa5f696597f2fe63edf70425f5ba119b65483 |
import helper_functions as hf
from neuron import h
from Network_LFPy import Network
from Ia_LFPy import Ia
from Mn_LFPy import Mn
class Ia_network(Network):
#
def __init__(self, *args, **kwargs):
'''
class initialization
POPULATION_SIZE : int, number of cells
cellParameters : dict
populationParameters : dict
synapseParameters : dict
'''
super(Ia_network, self).__init__(*args, **kwargs)
self.cellPositions = {
'Mn' : [],
'Ia' : []
}
self.cellRotations = {
'Mn' : [],
'Ia' : []
}
sim_params = hf.get_net_params(hf.get_tempdata_address())
dummy_Ia = Ia(n_nodes = sim_params[0][0])
dummy_Mn = Mn()
self.cellMorphologies = {
'Mn' : dummy_Mn.morphology_address,
'Ia' : dummy_Ia.morphology_address
}
del dummy_Ia, dummy_Mn
#
def create_cells(self, cellindex):
"""Create and layout N cells in the network."""
cells = {}
position_factor = 1e3
sim_params = hf.get_net_params(hf.get_tempdata_address())
mn_pos_x = sim_params[10]
mn_pos_y = sim_params[11]
mn_pos_z = sim_params[12]
cell = Mn()
'''cell.set_pos(mn_pos_x[0] + cellindex * position_factor,
mn_pos_y[0] + cellindex * position_factor,
mn_pos_z[0] + cellindex * position_factor)
'''
cell.set_pos(cellindex * position_factor,
cellindex * position_factor,
cellindex * position_factor)
cells.update({"Mn" : cell})
self.cellPositions['Mn'].append([cell.somapos[0], cell.somapos[1], cell.somapos[2]])
cell = Ia(n_nodes = sim_params[0][0])
cell.set_pos(cellindex * position_factor,
cellindex * position_factor,
cellindex * position_factor)
cells.update({"Ia" : cell})
self.cellPositions['Ia'].append([cell.somapos[0], cell.somapos[1], cell.somapos[2]])
return cells
#
def connect_cells(self, cells, cellindex):
src = cells["Ia"]
tgt_syn = cells["Mn"].synlist[0]
nc = src.connect2target(src.Ia_node[0], tgt_syn)
nc.weight[0] = self._synapseParameters['weight'][0]
nc.delay = self._synapseParameters['weight'][0]
return nc
#
def cellsim(self, cellindex):
cells = self.create_cells(cellindex)
nc = self.connect_cells(cells, cellindex)
spiketimes = h.Vector() # Spike time of all cells
cell_ids = h.Vector() # Ids of spike times
nc.record(spiketimes, cell_ids, cellindex)
#perform NEURON simulation, results saved as attributes in cell
self.simulate(cells)
somav = cells['Mn'].somav
for key, value in cells.iteritems():
del value
del cells
#print("nsoma_sec = %d" % cells['Mn'].nsomasec)
#return dict with primary results from simulation
return {'somav' : somav}
def simulate(self,cells):
"""Run the simulation"""
if self.hasExtracellularVoltage:
for key, value in self.v_space.iteritems():
t_ext = np.arange(cells[key].tstopms / cells[key].timeres_python+ 1) * \
cells[key].timeres_python
original_locations = np.arange(cells[key].totnsegs)
v_interp = interp1d()
cells[key].insert_voltage(value)
cells['Mn'].simulate(rec_vmem=True)
cells['Ia'].simulate(rec_vmem=True)
| penguinscontrol/Spinal-Cord-Modeling | Python/Ia_network_LFPy.py | Python | gpl-2.0 | 3,715 | [
"NEURON"
] | 3645a7004f41fa2de4116d00cbaea00eb6021cbacceda80062c3633a45f1b5eb |
'''
'''
from zope.interface import implements
from Interfaces.ThirdPartyWrapper import IThirdPartyWrapper
import os
from StringIO import StringIO
# Unofficial biopython from "kellrott" fork
# branch "hmmer_bsd" (Retrieved June 22, 2012)
import biopython_temp.AlignIO as AlignIO
from biopython_temp._Hmmer import HmmScanCommandline, HmmPressCommandline, HmmAlignCommandline
class PfamHMMERQuery(object):
'''
classdocs
'''
implements(IThirdPartyWrapper)
expected_params = ['query_sequences_fasta_file', 'pfam_db_loc']
expected_executable_locations = ['hmmscan', 'hmmpress']
def setup_caller(self, executable_locations, params):
'''
Sets up self.executable_location
'''
for p in self.expected_params:
if not p in params:
raise Exception, "PfamQuery parameter %s not provided to call."%p
for p in self.expected_executable_locations:
if not p in executable_locations:
raise Exception, "PfamQuery executable %s not provided to call."%p
self.executable_locations = executable_locations
self.params = params
def call(self):
"""
Call executable with provided params.
"""
#try:
# Call HMM press (if it hasn't happened already.)
if not os.path.isfile(self.params['pfam_db_loc']+'/Pfam-A.hmm.h3i'):
call_cmd = HmmPressCommandline(cmd=self.executable_locations['hmmpress'],
hmm=self.params['pfam_db_loc']+'/Pfam-A.hmm',
force=False)
self.stdout_data, self.stderr_data = call_cmd()
call_cmd = HmmScanCommandline(cmd=self.executable_locations['hmmscan'],
hmm=self.params['pfam_db_loc']+'/Pfam-A.hmm',
input=self.params['query_sequences_fasta_file'])
self.stdout_data, self.stderr_data = call_cmd()
# Flag success
self.status = 0
#except:
# # Flag failure
# self.status = 1
def parse_results(self):
"""
Parse the stored call results and return
parsed data structure.
Result structure is usable by ProteinInformation retriever methods.
"""
# Expect return code to be zero
if not(self.status == 0):
raise Exception, "PfamScan didn't succeed. (Return status: %i)\nStderr: %s"%(self.status, self.stderr_data)
# Store matches for processing
pfam_hmm_io = AlignIO.parse(StringIO(self.stdout_data), "hmmer3")
pfam_hmm_matches = []
for s in pfam_hmm_io:
pfam_hmm_matches.append(s)
self.parsed_results = pfam_hmm_matches | midnighteuler/PySIFTER | PySIFTER/ThirdPartyWrappers/PfamHMMERQuery.py | Python | gpl-3.0 | 2,861 | [
"Biopython"
] | 418c98674bceb9cff36f12a2e34dd9595447f69abee8c160ae6cfb7836b16779 |
"""Common logging functions."""
from os.path import basename, splitext
import sys
import logging
import tempfile
import subprocess
from . import db
LOGGER = None # Global logger so we can switch between queries & blast DBs
FORMATTER = logging.Formatter('%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
NAME = 'atram_logger'
def setup(log_file, log_level, blast_db, query_file=''):
"""Logger setup."""
log_file = file_name(log_file, blast_db, query_file)
_setup(log_file, log_level)
def stitcher_setup(log_file, log_level):
"""Build a logger for the stitcher."""
_setup(log_file, log_level)
def _setup(log_file, log_level):
global LOGGER # pylint: disable=global-statement
if not LOGGER:
handler = logging.FileHandler(log_file)
handler.setFormatter(FORMATTER)
handler.setLevel(logging.DEBUG)
stream = logging.StreamHandler()
stream.setFormatter(FORMATTER)
stream.setLevel(logging.INFO)
LOGGER = logging.getLogger(log_file)
log_level = getattr(logging, log_level.upper())
LOGGER.setLevel(log_level)
LOGGER.addHandler(handler)
LOGGER.addHandler(stream)
info('#' * 80)
info('aTRAM version: {}'.format(db.ATRAM_VERSION))
info('Python version: {}'.format(' '.join(sys.version.split())))
info(' '.join(sys.argv[:]))
def file_name(log_file, blast_db, query_file=''):
"""
Create the log file name for each run.
Honor user's argument if given.
"""
if log_file:
return log_file
program = splitext(basename(sys.argv[0]))[0]
if query_file:
query_file = splitext(basename(query_file))[0]
return '{}.{}.{}.log'.format(blast_db, query_file, program)
return '{}.{}.log'.format(blast_db, program)
def subcommand(cmd, temp_dir, timeout=None):
"""
Call a subprocess and log the output.
Note: stdout=PIPE is blocking and large logs cause a hang.
So we don't use it.
"""
LOGGER.debug(cmd)
with tempfile.NamedTemporaryFile(mode='w', dir=temp_dir) as log_output:
try:
subprocess.check_call(
cmd,
shell=True,
timeout=timeout,
stdout=log_output,
stderr=log_output)
except Exception as err: # pylint: disable=broad-except
error('Exception: {}'.format(err))
finally:
with open(log_output.name) as log_input:
for line in log_input:
line = line.strip()
if line:
LOGGER.debug(line)
def info(msg):
"""Log an info message."""
LOGGER.info(msg)
def error(msg):
"""Log an error message."""
LOGGER.error(msg)
def fatal(msg):
"""Log an error message and exit."""
error(msg)
sys.exit(1)
| AntonelliLab/seqcap_processor | bin/aTRAM-master/lib/log.py | Python | mit | 2,913 | [
"BLAST"
] | 25c063d5a6fa03644d1ac695ea63842218b4cb0a4b89cf5797058e3d435cf96c |
import os
import sys
PROJECT_DIR = os.path.join(os.path.dirname(__file__), "..")
sys.path.append(PROJECT_DIR)
import classarg
def train(model,
lr=0.0001,
epoch=40,
nthread=1,
batch_size=20):
"""This example comes from a real script, where we want to train a
neuron network. Instead of using argparse and reading variables from a
result object, now you can focus on your program and forget about the
command-line parsing problem!
Arguments:
model: path to save the model. It will be overwritten if it exists.
lr: learning rate. Default to 0.0001.
epoch: training epoch. Default to 40.
nthread: # of threads to generate training data. Default to 1.
batch_size: batch_size. Default to 20.
"""
print('Training with lr={}, epoch={}, nthread={}, batch_size={}'.format(
lr, epoch, nthread, batch_size))
if __name__ == '__main__':
classarg.Simple(train)
| PatrickChen83/ClassArg | examples/simple_function.py | Python | mit | 978 | [
"NEURON"
] | e1b499b1e47578967508e3009cfd172fad694cb34944005a50873e1f872f04cb |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
"""Testing for common.Storage class."""
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
import unittest
from MooseDocs import common
class TestStorage(unittest.TestCase):
"""
Test the Storage object.
"""
def testAdd(self):
"""
Use add an __iter__ access
"""
storage = common.Storage(int)
storage.add('one', 1)
storage.add('four', 4)
storage.add('three', 3)
self.assertEqual(list(storage), [1,4,3])
with self.assertRaises(TypeError) as cm:
storage.add('value', 1.2)
self.assertIn('Incorrect object provided, expected', str(cm.exception))
with self.assertRaises(ValueError) as cm:
storage.add('one', 11)
self.assertIn("The key 'one' already", str(cm.exception))
def testGetItem(self):
"""
Test operator[] access
"""
storage = common.Storage(int)
storage.add('one', 1)
storage.add('four', 4)
storage.add('three', 3)
# str access
self.assertEqual(storage['four'], 4)
self.assertEqual(storage['one'], 1)
self.assertEqual(storage['three'], 3)
# int access
self.assertEqual(storage[0], 1)
self.assertEqual(storage[1], 4)
self.assertEqual(storage[2], 3)
# wrong type
with self.assertRaises(TypeError) as cm:
storage[1.2]
self.assertIn("The supplied type must be 'int'", str(cm.exception))
# bad index
with self.assertRaises(IndexError):
storage[42]
# bad key
with self.assertRaises(ValueError):
storage['42']
def testContains(self):
"""
Test 'in' operator.
"""
storage = common.Storage(int)
storage.add('one', 1)
storage.add('four', 4)
storage.add('three', 3)
# str
self.assertIn('one', storage)
self.assertIn('four', storage)
self.assertIn('three', storage)
# int
self.assertIn(0, storage)
self.assertIn(1, storage)
self.assertIn(2, storage)
# not in
self.assertNotIn('five', storage)
self.assertNotIn(42, storage)
def testInsert(self):
"""
Test that the insert methods work.
"""
storage = common.Storage(int)
storage.add('one', 1)
storage.add('four', 4)
storage.add('two', 2, '>one')
self.assertEqual(storage[1], 2)
storage.add('three', 3, '<four')
self.assertEqual(storage[2], 3)
storage.add('five', 5, '_end')
self.assertEqual(storage[4], 5)
storage.add('zero', 0, '_begin')
self.assertEqual(storage[0], 0)
storage.add('negative', -1, 0)
self.assertEqual(storage[0], -1)
storage.add('answer', 42, len(storage))
self.assertEqual(storage[-1], 42)
if __name__ == '__main__':
unittest.main(verbosity=2)
| harterj/moose | python/MooseDocs/test/common/test_storage.py | Python | lgpl-2.1 | 4,567 | [
"MOOSE"
] | 48f9977a61946c226f916803a479493373a06ea902ff99f3d63a4f016af1e64c |
# coding: utf-8
from pyelasticsearch import bulk_chunks, ElasticSearch
import csv
import sys
csv.field_size_limit(sys.maxsize)
es = ElasticSearch(urls='http://localhost:9200/', timeout=60, max_retries=2)
def iso_convert(iso2c):
"""
Takes a two character ISO country code and returns the corresponding 3
character ISO country code.
Parameters
----------
iso2c: A two character ISO country code.
Returns
-------
iso3c: A three character ISO country code.
"""
iso_dict = {"AD":"AND", "AE":"ARE", "AF":"AFG", "AG":"ATG", "AI":"AIA",
"AL":"ALB", "AM":"ARM", "AO":"AGO", "AQ":"ATA", "AR":"ARG",
"AS":"ASM", "AT":"AUT", "AU":"AUS", "AW":"ABW", "AX":"ALA",
"AZ":"AZE", "BA":"BIH", "BB":"BRB", "BD":"BGD", "BE":"BEL",
"BF":"BFA", "BG":"BGR", "BH":"BHR", "BI":"BDI", "BJ":"BEN",
"BL":"BLM", "BM":"BMU", "BN":"BRN", "BO":"BOL", "BQ":"BES",
"BR":"BRA", "BS":"BHS", "BT":"BTN", "BV":"BVT", "BW":"BWA",
"BY":"BLR", "BZ":"BLZ", "CA":"CAN", "CC":"CCK", "CD":"COD",
"CF":"CAF", "CG":"COG", "CH":"CHE", "CI":"CIV", "CK":"COK",
"CL":"CHL", "CM":"CMR", "CN":"CHN", "CO":"COL", "CR":"CRI",
"CU":"CUB", "CV":"CPV", "CW":"CUW", "CX":"CXR", "CY":"CYP",
"CZ":"CZE", "DE":"DEU", "DJ":"DJI", "DK":"DNK", "DM":"DMA",
"DO":"DOM", "DZ":"DZA", "EC":"ECU", "EE":"EST", "EG":"EGY",
"EH":"ESH", "ER":"ERI", "ES":"ESP", "ET":"ETH", "FI":"FIN",
"FJ":"FJI", "FK":"FLK", "FM":"FSM", "FO":"FRO", "FR":"FRA",
"GA":"GAB", "GB":"GBR", "GD":"GRD", "GE":"GEO", "GF":"GUF",
"GG":"GGY", "GH":"GHA", "GI":"GIB", "GL":"GRL", "GM":"GMB",
"GN":"GIN", "GP":"GLP", "GQ":"GNQ", "GR":"GRC", "GS":"SGS",
"GT":"GTM", "GU":"GUM", "GW":"GNB", "GY":"GUY", "HK":"HKG",
"HM":"HMD", "HN":"HND", "HR":"HRV", "HT":"HTI", "HU":"HUN",
"ID":"IDN", "IE":"IRL", "IL":"ISR", "IM":"IMN", "IN":"IND",
"IO":"IOT", "IQ":"IRQ", "IR":"IRN", "IS":"ISL", "IT":"ITA",
"JE":"JEY", "JM":"JAM", "JO":"JOR", "JP":"JPN", "KE":"KEN",
"KG":"KGZ", "KH":"KHM", "KI":"KIR", "KM":"COM", "KN":"KNA",
"KP":"PRK", "KR":"KOR", "XK":"XKX", "KW":"KWT", "KY":"CYM",
"KZ":"KAZ", "LA":"LAO", "LB":"LBN", "LC":"LCA", "LI":"LIE",
"LK":"LKA", "LR":"LBR", "LS":"LSO", "LT":"LTU", "LU":"LUX",
"LV":"LVA", "LY":"LBY", "MA":"MAR", "MC":"MCO", "MD":"MDA",
"ME":"MNE", "MF":"MAF", "MG":"MDG", "MH":"MHL", "MK":"MKD",
"ML":"MLI", "MM":"MMR", "MN":"MNG", "MO":"MAC", "MP":"MNP",
"MQ":"MTQ", "MR":"MRT", "MS":"MSR", "MT":"MLT", "MU":"MUS",
"MV":"MDV", "MW":"MWI", "MX":"MEX", "MY":"MYS", "MZ":"MOZ",
"NA":"NAM", "NC":"NCL", "NE":"NER", "NF":"NFK", "NG":"NGA",
"NI":"NIC", "NL":"NLD", "NO":"NOR", "NP":"NPL", "NR":"NRU",
"NU":"NIU", "NZ":"NZL", "OM":"OMN", "PA":"PAN", "PE":"PER",
"PF":"PYF", "PG":"PNG", "PH":"PHL", "PK":"PAK", "PL":"POL",
"PM":"SPM", "PN":"PCN", "PR":"PRI", "PS":"PSE", "PT":"PRT",
"PW":"PLW", "PY":"PRY", "QA":"QAT", "RE":"REU", "RO":"ROU",
"RS":"SRB", "RU":"RUS", "RW":"RWA", "SA":"SAU", "SB":"SLB",
"SC":"SYC", "SD":"SDN", "SS":"SSD", "SE":"SWE", "SG":"SGP",
"SH":"SHN", "SI":"SVN", "SJ":"SJM", "SK":"SVK", "SL":"SLE",
"SM":"SMR", "SN":"SEN", "SO":"SOM", "SR":"SUR", "ST":"STP",
"SV":"SLV", "SX":"SXM", "SY":"SYR", "SZ":"SWZ", "TC":"TCA",
"TD":"TCD", "TF":"ATF", "TG":"TGO", "TH":"THA", "TJ":"TJK",
"TK":"TKL", "TL":"TLS", "TM":"TKM", "TN":"TUN", "TO":"TON",
"TR":"TUR", "TT":"TTO", "TV":"TUV", "TW":"TWN", "TZ":"TZA",
"UA":"UKR", "UG":"UGA", "UM":"UMI", "US":"USA", "UY":"URY",
"UZ":"UZB", "VA":"VAT", "VC":"VCT", "VE":"VEN", "VG":"VGB",
"VI":"VIR", "VN":"VNM", "VU":"VUT", "WF":"WLF", "WS":"WSM",
"YE":"YEM", "YT":"MYT", "ZA":"ZAF", "ZM":"ZMB", "ZW":"ZWE",
"CS":"SCG", "AN":"ANT"}
try:
iso3c = iso_dict[iso2c]
return iso3c
except KeyError:
print('Bad code: ' + iso2c)
iso3c = "NA"
return iso3c
f = open('allCountries.txt', 'rt')
reader = csv.reader(f, delimiter='\t')
def documents(reader, es):
count = 0
for row in reader:
try:
coords = row[4] + "," + row[5]
country_code3 = iso_convert(row[8])
doc = {"geonameid" : row[0],
"name" : row[1],
"asciiname" : row[2],
"alternativenames" : row[3],
"coordinates" : coords, # 4, 5
"feature_class" : row[6],
"feature_code" : row[7],
"country_code2" : row[8],
"country_code3" : country_code3,
"cc2" : row[9],
"admin1_code" : row[10],
"admin2_code" : row[11],
"admin3_code" : row[12],
"admin4_code" : row[13],
"population" : row[14],
"elevation" : row[15],
"dem" : row[16],
"timzeone" : row[17],
"modification_date" : "2014-01-01"
}
yield es.index_op(doc, index='geonames', doc_type='geoname')
except:
count += 1
print 'Exception count:', count
chunk_count = 0
for chunk in bulk_chunks(documents(reader, es), docs_per_chunk=500):
es.bulk(chunk)
chunk_count += 1
print 'Chunk count:', chunk_count
es.refresh('geonames')
| johnb30/mordecai | setup/geonames_elasticsearch.py | Python | mit | 5,955 | [
"BWA"
] | d1d6cd1e966eb3110064f0d54904782fc9d674347360498cf5c6e710f3ab057b |
"""
Basic methods common to all matrices to be used
when creating more advanced matrices (e.g., matrices over rings,
etc.).
"""
from __future__ import print_function, division
from sympy.core.add import Add
from sympy.core.basic import Basic, Atom
from sympy.core.expr import Expr
from sympy.core.symbol import Symbol
from sympy.core.function import count_ops
from sympy.core.singleton import S
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, default_sort_key, range, \
NotIterable, Iterable
from sympy.simplify import simplify as _simplify, signsimp, nsimplify
from sympy.utilities.iterables import flatten
from sympy.functions import Abs
from sympy.core.compatibility import reduce, as_int, string_types
from sympy.assumptions.refine import refine
from sympy.core.decorators import call_highest_priority
from types import FunctionType
from collections import defaultdict
class MatrixError(Exception):
pass
class ShapeError(ValueError, MatrixError):
"""Wrong matrix shape"""
pass
class NonSquareMatrixError(ShapeError):
pass
class MatrixRequired(object):
"""All subclasses of matrix objects must implement the
required matrix properties listed here."""
rows = None
cols = None
shape = None
_simplify = None
@classmethod
def _new(cls, *args, **kwargs):
"""`_new` must, at minimum, be callable as
`_new(rows, cols, mat) where mat is a flat list of the
elements of the matrix."""
raise NotImplementedError("Subclasses must implement this.")
def __eq__(self, other):
raise NotImplementedError("Subclasses must implement this.")
def __getitem__(self, key):
"""Implementations of __getitem__ should accept ints, in which
case the matrix is indexed as a flat list, tuples (i,j) in which
case the (i,j) entry is returned, slices, or mixed tuples (a,b)
where a and b are any combintion of slices and integers."""
raise NotImplementedError("Subclasses must implement this.")
def __len__(self):
"""The total number of entries in the matrix."""
raise NotImplementedError("Subclasses must implement this.")
class MatrixShaping(MatrixRequired):
"""Provides basic matrix shaping and extracting of submatrices"""
def _eval_col_del(self, col):
def entry(i, j):
return self[i, j] if j < col else self[i, j + 1]
return self._new(self.rows, self.cols - 1, entry)
def _eval_col_insert(self, pos, other):
cols = self.cols
def entry(i, j):
if j < pos:
return self[i, j]
elif pos <= j < pos + other.cols:
return other[i, j - pos]
return self[i, j - other.cols]
return self._new(self.rows, self.cols + other.cols,
lambda i, j: entry(i, j))
def _eval_col_join(self, other):
rows = self.rows
def entry(i, j):
if i < rows:
return self[i, j]
return other[i - rows, j]
return classof(self, other)._new(self.rows + other.rows, self.cols,
lambda i, j: entry(i, j))
def _eval_extract(self, rowsList, colsList):
mat = list(self)
cols = self.cols
indices = (i * cols + j for i in rowsList for j in colsList)
return self._new(len(rowsList), len(colsList),
list(mat[i] for i in indices))
def _eval_get_diag_blocks(self):
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[:i, i:]
to_the_bottom = M[i:, :i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[:i, :i])
if M.shape == M[:i, :i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
def _eval_row_del(self, row):
def entry(i, j):
return self[i, j] if i < row else self[i + 1, j]
return self._new(self.rows - 1, self.cols, entry)
def _eval_row_insert(self, pos, other):
entries = list(self)
insert_pos = pos * self.cols
entries[insert_pos:insert_pos] = list(other)
return self._new(self.rows + other.rows, self.cols, entries)
def _eval_row_join(self, other):
cols = self.cols
def entry(i, j):
if j < cols:
return self[i, j]
return other[i, j - cols]
return classof(self, other)._new(self.rows, self.cols + other.cols,
lambda i, j: entry(i, j))
def _eval_tolist(self):
return [list(self[i,:]) for i in range(self.rows)]
def _eval_vec(self):
rows = self.rows
def entry(n, _):
# we want to read off the columns first
j = n // rows
i = n - j * rows
return self[i, j]
return self._new(len(self), 1, entry)
def col_del(self, col):
"""Delete the specified column."""
if col < 0:
col += self.cols
if not 0 <= col < self.cols:
raise ValueError("Column {} out of range.".format(col))
return self._eval_col_del(col)
def col_insert(self, pos, other):
"""Insert one or more columns at the given column position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.col_insert(1, V)
Matrix([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]])
See Also
========
col
row_insert
"""
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(other)
if pos < 0:
pos = self.cols + pos
if pos < 0:
pos = 0
elif pos > self.cols:
pos = self.cols
if self.rows != other.rows:
raise ShapeError(
"self and other must have the same number of rows.")
return self._eval_col_insert(pos, other)
def col_join(self, other):
"""Concatenates two matrices along self's last and other's first row.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.col_join(V)
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 1, 1]])
See Also
========
col
row_join
"""
# A null matrix can always be stacked (see #10770)
if self.rows == 0 and self.cols != other.cols:
return self._new(0, other.cols, []).col_join(other)
if self.cols != other.cols:
raise ShapeError(
"`self` and `other` must have the same number of columns.")
return self._eval_col_join(other)
def col(self, j):
"""Elementary column selector.
Examples
========
>>> from sympy import eye
>>> eye(2).col(0)
Matrix([
[1],
[0]])
See Also
========
row
col_op
col_swap
col_del
col_join
col_insert
"""
return self[:, j]
def extract(self, rowsList, colsList):
"""Return a submatrix by specifying a list of rows and columns.
Negative indices can be given. All indices must be in the range
-n <= i < n where n is the number of rows or columns.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 3, range(12))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
>>> m.extract([0, 1, 3], [0, 1])
Matrix([
[0, 1],
[3, 4],
[9, 10]])
Rows or columns can be repeated:
>>> m.extract([0, 0, 1], [-1])
Matrix([
[2],
[2],
[5]])
Every other row can be taken by using range to provide the indices:
>>> m.extract(range(0, m.rows, 2), [-1])
Matrix([
[2],
[8]])
RowsList or colsList can also be a list of booleans, in which case
the rows or columns corresponding to the True values will be selected:
>>> m.extract([0, 1, 2, 3], [True, False, True])
Matrix([
[0, 2],
[3, 5],
[6, 8],
[9, 11]])
"""
if not is_sequence(rowsList) or not is_sequence(colsList):
raise TypeError("rowsList and colsList must be iterable")
# ensure rowsList and colsList are lists of integers
if rowsList and all(isinstance(i, bool) for i in rowsList):
rowsList = [index for index, item in enumerate(rowsList) if item]
if colsList and all(isinstance(i, bool) for i in colsList):
colsList = [index for index, item in enumerate(colsList) if item]
# ensure everything is in range
rowsList = [a2idx(k, self.rows) for k in rowsList]
colsList = [a2idx(k, self.cols) for k in colsList]
return self._eval_extract(rowsList, colsList)
def get_diag_blocks(self):
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of
linear equations which may be decoupled by having a block diagonal
structure.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
>>> a1, a2, a3 = A.get_diag_blocks()
>>> a1
Matrix([
[1, 3],
[y, z**2]])
>>> a2
Matrix([[x]])
>>> a3
Matrix([[0]])
"""
return self._eval_get_diag_blocks()
@classmethod
def hstack(cls, *args):
"""Return a matrix formed by joining args horizontally (i.e.
by repeated application of row_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.hstack(eye(2), 2*eye(2))
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2]])
"""
if len(args) == 0:
return cls._new()
kls = type(args[0])
return reduce(kls.row_join, args)
def reshape(self, rows, cols):
"""Reshape the matrix. Total number of elements must remain the same.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 3, lambda i, j: 1)
>>> m
Matrix([
[1, 1, 1],
[1, 1, 1]])
>>> m.reshape(1, 6)
Matrix([[1, 1, 1, 1, 1, 1]])
>>> m.reshape(3, 2)
Matrix([
[1, 1],
[1, 1],
[1, 1]])
"""
if self.rows * self.cols != rows * cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
return self._new(rows, cols, lambda i, j: self[i * cols + j])
def row_del(self, row):
"""Delete the specified row."""
if row < 0:
row += self.rows
if not 0 <= row < self.rows:
raise ValueError("Row {} out of range.".format(row))
return self._eval_row_del(row)
def row_insert(self, pos, other):
"""Insert one or more rows at the given row position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.row_insert(1, V)
Matrix([
[0, 0, 0],
[1, 1, 1],
[0, 0, 0],
[0, 0, 0]])
See Also
========
row
col_insert
"""
from sympy.matrices import MutableMatrix
# Allows you to build a matrix even if it is null matrix
if not self:
return self._new(other)
if pos < 0:
pos = self.rows + pos
if pos < 0:
pos = 0
elif pos > self.rows:
pos = self.rows
if self.cols != other.cols:
raise ShapeError(
"`self` and `other` must have the same number of columns.")
return self._eval_row_insert(pos, other)
def row_join(self, other):
"""Concatenates two matrices along self's last and rhs's first column
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.row_join(V)
Matrix([
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1]])
See Also
========
row
col_join
"""
# A null matrix can always be stacked (see #10770)
if self.cols == 0 and self.rows != other.rows:
return self._new(other.rows, 0, []).row_join(other)
if self.rows != other.rows:
raise ShapeError(
"`self` and `rhs` must have the same number of rows.")
return self._eval_row_join(other)
def row(self, i):
"""Elementary row selector.
Examples
========
>>> from sympy import eye
>>> eye(2).row(0)
Matrix([[1, 0]])
See Also
========
col
row_op
row_swap
row_del
row_join
row_insert
"""
return self[i, :]
@property
def shape(self):
"""The shape (dimensions) of the matrix as the 2-tuple (rows, cols).
Examples
========
>>> from sympy.matrices import zeros
>>> M = zeros(2, 3)
>>> M.shape
(2, 3)
>>> M.rows
2
>>> M.cols
3
"""
return (self.rows, self.cols)
def tolist(self):
"""Return the Matrix as a nested Python list.
Examples
========
>>> from sympy import Matrix, ones
>>> m = Matrix(3, 3, range(9))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> m.tolist()
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
>>> ones(3, 0).tolist()
[[], [], []]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> ones(0, 3).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
return self._eval_tolist()
def vec(self):
"""Return the Matrix converted into a one column matrix by stacking columns
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 3], [2, 4]])
>>> m
Matrix([
[1, 3],
[2, 4]])
>>> m.vec()
Matrix([
[1],
[2],
[3],
[4]])
See Also
========
vech
"""
return self._eval_vec()
@classmethod
def vstack(cls, *args):
"""Return a matrix formed by joining args vertically (i.e.
by repeated application of col_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.vstack(eye(2), 2*eye(2))
Matrix([
[1, 0],
[0, 1],
[2, 0],
[0, 2]])
"""
if len(args) == 0:
return cls._new()
kls = type(args[0])
return reduce(kls.col_join, args)
class MatrixSpecial(MatrixRequired):
"""Construction of special matrices"""
@classmethod
def _eval_diag(cls, rows, cols, diag_dict):
"""diag_dict is a defaultdict containing
all the entries of the diagonal matrix."""
def entry(i, j):
return diag_dict[(i,j)]
return cls._new(rows, cols, entry)
@classmethod
def _eval_eye(cls, rows, cols):
def entry(i, j):
return S.One if i == j else S.Zero
return cls._new(rows, cols, entry)
@classmethod
def _eval_jordan_block(cls, rows, cols, eigenvalue, band='upper'):
if band == 'lower':
def entry(i, j):
if i == j:
return eigenvalue
elif j + 1 == i:
return S.One
return S.Zero
else:
def entry(i, j):
if i == j:
return eigenvalue
elif i + 1 == j:
return S.One
return S.Zero
return cls._new(rows, cols, entry)
@classmethod
def _eval_ones(cls, rows, cols):
def entry(i, j):
return S.One
return cls._new(rows, cols, entry)
@classmethod
def _eval_zeros(cls, rows, cols):
def entry(i, j):
return S.Zero
return cls._new(rows, cols, entry)
@classmethod
def diag(kls, *args, **kwargs):
"""Returns a matrix with the specified diagonal.
If matrices are passed, a block-diagonal matrix
is created.
kwargs
======
rows : rows of the resulting matrix; computed if
not given.
cols : columns of the resulting matrix; computed if
not given.
cls : class for the resulting matrix
Examples
========
>>> from sympy.matrices import Matrix
>>> Matrix.diag(1, 2, 3)
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> Matrix.diag([1, 2, 3])
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
The diagonal elements can be matrices; diagonal filling will
continue on the diagonal from the last element of the matrix:
>>> from sympy.abc import x, y, z
>>> a = Matrix([x, y, z])
>>> b = Matrix([[1, 2], [3, 4]])
>>> c = Matrix([[5, 6]])
>>> Matrix.diag(a, 7, b, c)
Matrix([
[x, 0, 0, 0, 0, 0],
[y, 0, 0, 0, 0, 0],
[z, 0, 0, 0, 0, 0],
[0, 7, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0],
[0, 0, 3, 4, 0, 0],
[0, 0, 0, 0, 5, 6]])
A given band off the diagonal can be made by padding with a
vertical or horizontal "kerning" vector:
>>> hpad = Matrix(0, 2, [])
>>> vpad = Matrix(2, 0, [])
>>> Matrix.diag(vpad, 1, 2, 3, hpad) + Matrix.diag(hpad, 4, 5, 6, vpad)
Matrix([
[0, 0, 4, 0, 0],
[0, 0, 0, 5, 0],
[1, 0, 0, 0, 6],
[0, 2, 0, 0, 0],
[0, 0, 3, 0, 0]])
The type of the resulting matrix can be affected with the ``cls``
keyword.
>>> type(Matrix.diag(1))
<class 'sympy.matrices.dense.MutableDenseMatrix'>
>>> from sympy.matrices import ImmutableMatrix
>>> type(Matrix.diag(1, cls=ImmutableMatrix))
<class 'sympy.matrices.immutable.ImmutableDenseMatrix'>
"""
klass = kwargs.get('cls', kls)
# allow a sequence to be passed in as the only argument
if len(args) == 1 and is_sequence(args[0]) and not getattr(args[0], 'is_Matrix', False):
args = args[0]
def size(m):
"""Compute the size of the diagonal block"""
if hasattr(m, 'rows'):
return m.rows, m.cols
return 1, 1
diag_rows = sum(size(m)[0] for m in args)
diag_cols = sum(size(m)[1] for m in args)
rows = kwargs.get('rows', diag_rows)
cols = kwargs.get('cols', diag_cols)
if rows < diag_rows or cols < diag_cols:
raise ValueError("A {} x {} diagnal matrix cannot accommodate a"
"diagonal of size at least {} x {}.".format(rows, cols,
diag_rows, diag_cols))
# fill a default dict with the diagonal entries
diag_entries = defaultdict(lambda: S.Zero)
row_pos, col_pos = 0, 0
for m in args:
if hasattr(m, 'rows'):
# in this case, we're a matrix
for i in range(m.rows):
for j in range(m.cols):
diag_entries[(i + row_pos, j + col_pos)] = m[i, j]
row_pos += m.rows
col_pos += m.cols
else:
# in this case, we're a single value
diag_entries[(row_pos, col_pos)] = m
row_pos += 1
col_pos += 1
return klass._eval_diag(rows, cols, diag_entries)
@classmethod
def eye(kls, rows, cols=None, **kwargs):
"""Returns an identity matrix.
Args
====
rows : rows of the matrix
cols : cols of the matrix (if None, cols=rows)
kwargs
======
cls : class of the returned matrix
"""
if cols is None:
cols = rows
klass = kwargs.get('cls', kls)
rows, cols = as_int(rows), as_int(cols)
return klass._eval_eye(rows, cols)
@classmethod
def jordan_block(kls, *args, **kwargs):
"""Returns a Jordan block with the specified size
and eigenvalue. You may call `jordan_block` with
two args (size, eigenvalue) or with keyword arguments.
kwargs
======
size : rows and columns of the matrix
rows : rows of the matrix (if None, rows=size)
cols : cols of the matrix (if None, cols=size)
eigenvalue : value on the diagonal of the matrix
band : position of off-diagonal 1s. May be 'upper' or
'lower'. (Default: 'upper')
cls : class of the returned matrix
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> Matrix.jordan_block(4, x)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
>>> Matrix.jordan_block(4, x, band='lower')
Matrix([
[x, 0, 0, 0],
[1, x, 0, 0],
[0, 1, x, 0],
[0, 0, 1, x]])
>>> Matrix.jordan_block(size=4, eigenvalue=x)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
"""
klass = kwargs.get('cls', kls)
size, eigenvalue = None, None
if len(args) == 2:
size, eigenvalue = args
elif len(args) == 1:
size = args[0]
elif len(args) != 0:
raise ValueError("'jordan_block' accepts 0, 1, or 2 arguments, not {}".format(len(args)))
rows, cols = kwargs.get('rows', None), kwargs.get('cols', None)
size = kwargs.get('size', size)
band = kwargs.get('band', 'upper')
# allow for a shortened form of `eigenvalue`
eigenvalue = kwargs.get('eigenval', eigenvalue)
eigenvalue = kwargs.get('eigenvalue', eigenvalue)
if eigenvalue is None:
raise ValueError("Must supply an eigenvalue")
if (size, rows, cols) == (None, None, None):
raise ValueError("Must supply a matrix size")
if size is not None:
rows, cols = size, size
elif rows is not None and cols is None:
cols = rows
elif cols is not None and rows is None:
rows = cols
rows, cols = as_int(rows), as_int(cols)
return klass._eval_jordan_block(rows, cols, eigenvalue, band)
@classmethod
def ones(kls, rows, cols=None, **kwargs):
"""Returns a matrix of ones.
Args
====
rows : rows of the matrix
cols : cols of the matrix (if None, cols=rows)
kwargs
======
cls : class of the returned matrix
"""
if cols is None:
cols = rows
klass = kwargs.get('cls', kls)
rows, cols = as_int(rows), as_int(cols)
return klass._eval_ones(rows, cols)
@classmethod
def zeros(kls, rows, cols=None, **kwargs):
"""Returns a matrix of zeros.
Args
====
rows : rows of the matrix
cols : cols of the matrix (if None, cols=rows)
kwargs
======
cls : class of the returned matrix
"""
if cols is None:
cols = rows
klass = kwargs.get('cls', kls)
rows, cols = as_int(rows), as_int(cols)
return klass._eval_zeros(rows, cols)
class MatrixProperties(MatrixRequired):
"""Provides basic properties of a matrix."""
def _eval_atoms(self, *types):
result = set()
for i in self:
result.update(i.atoms(*types))
return result
def _eval_free_symbols(self):
return set().union(*(i.free_symbols for i in self))
def _eval_has(self, *patterns):
return any(a.has(*patterns) for a in self)
def _eval_is_anti_symmetric(self, simpfunc):
if not all(simpfunc(self[i, j] + self[j, i]).is_zero for i in range(self.rows) for j in range(self.cols)):
return False
return True
def _eval_is_diagonal(self):
for i in range(self.rows):
for j in range(self.cols):
if i != j and self[i, j]:
return False
return True
# _eval_is_hermitian is called by some general sympy
# routines and has a different *args signature. Make
# sure the names don't clash by adding `_matrix_` in name.
def _eval_is_matrix_hermitian(self, simpfunc):
mat = self._new(self.rows, self.cols, lambda i, j: simpfunc(self[i, j] - self[j, i].conjugate()))
return mat.is_zero
def _eval_is_Identity(self):
def dirac(i, j):
if i == j:
return 1
return 0
return all(self[i, j] == dirac(i, j) for i in range(self.rows) for j in
range(self.cols))
def _eval_is_lower_hessenberg(self):
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 2, self.cols))
def _eval_is_lower(self):
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 1, self.cols))
def _eval_is_symbolic(self):
return self.has(Symbol)
def _eval_is_symmetric(self, simpfunc):
mat = self._new(self.rows, self.cols, lambda i, j: simpfunc(self[i, j] - self[j, i]))
return mat.is_zero
def _eval_is_zero(self):
if any(i.is_zero == False for i in self):
return False
if any(i.is_zero == None for i in self):
return None
return True
def _eval_is_upper_hessenberg(self):
return all(self[i, j].is_zero
for i in range(2, self.rows)
for j in range(min(self.cols, (i - 1))))
def _eval_values(self):
return [i for i in self if not i.is_zero]
def atoms(self, *types):
"""Returns the atoms that form the current object.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import Matrix
>>> Matrix([[x]])
Matrix([[x]])
>>> _.atoms()
{x}
"""
types = tuple(t if isinstance(t, type) else type(t) for t in types)
if not types:
types = (Atom,)
return self._eval_atoms(*types)
@property
def free_symbols(self):
"""Returns the free symbols within the matrix.
Examples
========
>>> from sympy.abc import x
>>> from sympy.matrices import Matrix
>>> Matrix([[x], [1]]).free_symbols
{x}
"""
return self._eval_free_symbols()
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import Matrix, SparseMatrix, Float
>>> from sympy.abc import x, y
>>> A = Matrix(((1, x), (0.2, 3)))
>>> B = SparseMatrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
>>> B.has(x)
True
>>> B.has(y)
False
>>> B.has(Float)
True
"""
return self._eval_has(*patterns)
def is_anti_symmetric(self, simplify=True):
"""Check if matrix M is an antisymmetric matrix,
that is, M is a square matrix with all M[i, j] == -M[j, i].
When ``simplify=True`` (default), the sum M[i, j] + M[j, i] is
simplified before testing to see if it is zero. By default,
the SymPy simplify function is used. To use a custom function
set simplify to a function that accepts a single argument which
returns a simplified expression. To skip simplification, set
simplify to False but note that although this will be faster,
it may induce false negatives.
Examples
========
>>> from sympy import Matrix, symbols
>>> m = Matrix(2, 2, [0, 1, -1, 0])
>>> m
Matrix([
[ 0, 1],
[-1, 0]])
>>> m.is_anti_symmetric()
True
>>> x, y = symbols('x y')
>>> m = Matrix(2, 3, [0, 0, x, -y, 0, 0])
>>> m
Matrix([
[ 0, 0, x],
[-y, 0, 0]])
>>> m.is_anti_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [0, x**2 + 2*x + 1, y,
... -(x + 1)**2 , 0, x*y,
... -y, -x*y, 0])
Simplification of matrix elements is done by default so even
though two elements which should be equal and opposite wouldn't
pass an equality test, the matrix is still reported as
anti-symmetric:
>>> m[0, 1] == -m[1, 0]
False
>>> m.is_anti_symmetric()
True
If 'simplify=False' is used for the case when a Matrix is already
simplified, this will speed things up. Here, we see that without
simplification the matrix does not appear anti-symmetric:
>>> m.is_anti_symmetric(simplify=False)
False
But if the matrix were already expanded, then it would appear
anti-symmetric and simplification in the is_anti_symmetric routine
is not needed:
>>> m = m.expand()
>>> m.is_anti_symmetric(simplify=False)
True
"""
# accept custom simplification
simpfunc = simplify
if not isinstance(simplify, FunctionType):
simpfunc = _simplify if simplify else lambda x: x
if not self.is_square:
return False
return self._eval_is_anti_symmetric(simpfunc)
def is_diagonal(self):
"""Check if matrix is diagonal,
that is matrix in which the entries outside the main diagonal are all zero.
Examples
========
>>> from sympy import Matrix, diag
>>> m = Matrix(2, 2, [1, 0, 0, 2])
>>> m
Matrix([
[1, 0],
[0, 2]])
>>> m.is_diagonal()
True
>>> m = Matrix(2, 2, [1, 1, 0, 2])
>>> m
Matrix([
[1, 1],
[0, 2]])
>>> m.is_diagonal()
False
>>> m = diag(1, 2, 3)
>>> m
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> m.is_diagonal()
True
See Also
========
is_lower
is_upper
is_diagonalizable
diagonalize
"""
return self._eval_is_diagonal()
@property
def is_hermitian(self, simplify=True):
"""Checks if the matrix is Hermitian.
In a Hermitian matrix element i,j is the complex conjugate of
element j,i.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy import I
>>> from sympy.abc import x
>>> a = Matrix([[1, I], [-I, 1]])
>>> a
Matrix([
[ 1, I],
[-I, 1]])
>>> a.is_hermitian
True
>>> a[0, 0] = 2*I
>>> a.is_hermitian
False
>>> a[0, 0] = x
>>> a.is_hermitian
>>> a[0, 1] = a[1, 0]*I
>>> a.is_hermitian
False
"""
if not self.is_square:
return False
simpfunc = simplify
if not isinstance(simplify, FunctionType):
simpfunc = _simplify if simplify else lambda x: x
return self._eval_is_matrix_hermitian(simpfunc)
@property
def is_Identity(self):
if not self.is_square:
return False
return self._eval_is_Identity()
@property
def is_lower_hessenberg(self):
r"""Checks if the matrix is in the lower-Hessenberg form.
The lower hessenberg matrix has zero entries
above the first superdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 2, 0, 0], [5, 2, 3, 0], [3, 4, 3, 7], [5, 6, 1, 1]])
>>> a
Matrix([
[1, 2, 0, 0],
[5, 2, 3, 0],
[3, 4, 3, 7],
[5, 6, 1, 1]])
>>> a.is_lower_hessenberg
True
See Also
========
is_upper_hessenberg
is_lower
"""
return self._eval_is_lower_hessenberg()
@property
def is_lower(self):
"""Check if matrix is a lower triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_lower
True
>>> m = Matrix(4, 3, [0, 0, 0, 2, 0, 0, 1, 4 , 0, 6, 6, 5])
>>> m
Matrix([
[0, 0, 0],
[2, 0, 0],
[1, 4, 0],
[6, 6, 5]])
>>> m.is_lower
True
>>> from sympy.abc import x, y
>>> m = Matrix(2, 2, [x**2 + y, y**2 + x, 0, x + y])
>>> m
Matrix([
[x**2 + y, x + y**2],
[ 0, x + y]])
>>> m.is_lower
False
See Also
========
is_upper
is_diagonal
is_lower_hessenberg
"""
return self._eval_is_lower()
@property
def is_square(self):
"""Checks if a matrix is square.
A matrix is square if the number of rows equals the number of columns.
The empty matrix is square by definition, since the number of rows and
the number of columns are both zero.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[1, 2, 3], [4, 5, 6]])
>>> b = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> c = Matrix([])
>>> a.is_square
False
>>> b.is_square
True
>>> c.is_square
True
"""
return self.rows == self.cols
def is_symbolic(self):
"""Checks if any elements contain Symbols.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.is_symbolic()
True
"""
return self._eval_is_symbolic()
def is_symmetric(self, simplify=True):
"""Check if matrix is symmetric matrix,
that is square matrix and is equal to its transpose.
By default, simplifications occur before testing symmetry.
They can be skipped using 'simplify=False'; while speeding things a bit,
this may however induce false negatives.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [0, 1, 1, 2])
>>> m
Matrix([
[0, 1],
[1, 2]])
>>> m.is_symmetric()
True
>>> m = Matrix(2, 2, [0, 1, 2, 0])
>>> m
Matrix([
[0, 1],
[2, 0]])
>>> m.is_symmetric()
False
>>> m = Matrix(2, 3, [0, 0, 0, 0, 0, 0])
>>> m
Matrix([
[0, 0, 0],
[0, 0, 0]])
>>> m.is_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2 , 2, 0, y, 0, 3])
>>> m
Matrix([
[ 1, x**2 + 2*x + 1, y],
[(x + 1)**2, 2, 0],
[ y, 0, 3]])
>>> m.is_symmetric()
True
If the matrix is already simplified, you may speed-up is_symmetric()
test by using 'simplify=False'.
>>> bool(m.is_symmetric(simplify=False))
False
>>> m1 = m.expand()
>>> m1.is_symmetric(simplify=False)
True
"""
simpfunc = simplify
if not isinstance(simplify, FunctionType):
simpfunc = _simplify if simplify else lambda x: x
if not self.is_square:
return False
return self._eval_is_symmetric(simpfunc)
@property
def is_upper_hessenberg(self):
"""Checks if the matrix is the upper-Hessenberg form.
The upper hessenberg matrix has zero entries
below the first subdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 4, 2, 3], [3, 4, 1, 7], [0, 2, 3, 4], [0, 0, 1, 3]])
>>> a
Matrix([
[1, 4, 2, 3],
[3, 4, 1, 7],
[0, 2, 3, 4],
[0, 0, 1, 3]])
>>> a.is_upper_hessenberg
True
See Also
========
is_lower_hessenberg
is_upper
"""
return self._eval_is_upper_hessenberg()
@property
def is_upper(self):
"""Check if matrix is an upper triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_upper
True
>>> m = Matrix(4, 3, [5, 1, 9, 0, 4 , 6, 0, 0, 5, 0, 0, 0])
>>> m
Matrix([
[5, 1, 9],
[0, 4, 6],
[0, 0, 5],
[0, 0, 0]])
>>> m.is_upper
True
>>> m = Matrix(2, 3, [4, 2, 5, 6, 1, 1])
>>> m
Matrix([
[4, 2, 5],
[6, 1, 1]])
>>> m.is_upper
False
See Also
========
is_lower
is_diagonal
is_upper_hessenberg
"""
return all(self[i, j].is_zero
for i in range(1, self.rows)
for j in range(min(i, self.cols)))
@property
def is_zero(self):
"""Checks if a matrix is a zero matrix.
A matrix is zero if every element is zero. A matrix need not be square
to be considered zero. The empty matrix is zero by the principle of
vacuous truth. For a matrix that may or may not be zero (e.g.
contains a symbol), this will be None
Examples
========
>>> from sympy import Matrix, zeros
>>> from sympy.abc import x
>>> a = Matrix([[0, 0], [0, 0]])
>>> b = zeros(3, 4)
>>> c = Matrix([[0, 1], [0, 0]])
>>> d = Matrix([])
>>> e = Matrix([[x, 0], [0, 0]])
>>> a.is_zero
True
>>> b.is_zero
True
>>> c.is_zero
False
>>> d.is_zero
True
>>> e.is_zero
"""
return self._eval_is_zero()
def values(self):
"""Return non-zero values of self."""
return self._eval_values()
class MatrixOperations(MatrixRequired):
"""Provides basic matrix shape and elementwise
operations. Should not be instantiated directly."""
def _eval_adjoint(self):
return self.transpose().conjugate()
def _eval_applyfunc(self, f):
out = self._new(self.rows, self.cols, [f(x) for x in self])
return out
def _eval_as_real_imag(self):
from sympy.functions.elementary.complexes import re, im
return (self.applyfunc(re), self.applyfunc(im))
def _eval_conjugate(self):
return self.applyfunc(lambda x: x.conjugate())
def _eval_permute_cols(self, perm):
# apply the permutation to a list
mapping = list(perm)
def entry(i, j):
return self[i, mapping[j]]
return self._new(self.rows, self.cols, entry)
def _eval_permute_rows(self, perm):
# apply the permutation to a list
mapping = list(perm)
def entry(i, j):
return self[mapping[i], j]
return self._new(self.rows, self.cols, entry)
def _eval_trace(self):
return sum(self[i, i] for i in range(self.rows))
def _eval_transpose(self):
return self._new(self.cols, self.rows, lambda i, j: self[j, i])
def adjoint(self):
"""Conjugate transpose or Hermitian conjugation."""
return self._eval_adjoint()
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
return self._eval_applyfunc(f)
def as_real_imag(self):
"""Returns a tuple containing the (real, imaginary) part of matrix."""
return self._eval_as_real_imag()
def conjugate(self):
"""Return the by-element conjugation.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> from sympy import I
>>> a = SparseMatrix(((1, 2 + I), (3, 4), (I, -I)))
>>> a
Matrix([
[1, 2 + I],
[3, 4],
[I, -I]])
>>> a.C
Matrix([
[ 1, 2 - I],
[ 3, 4],
[-I, I]])
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
D: Dirac conjugation
"""
return self._eval_conjugate()
def doit(self, **kwargs):
return self.applyfunc(lambda x: x.doit())
def evalf(self, prec=None, **options):
"""Apply evalf() to each element of self."""
return self.applyfunc(lambda i: i.evalf(prec, **options))
def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""Apply core.function.expand to each entry of the matrix.
Examples
========
>>> from sympy.abc import x
>>> from sympy.matrices import Matrix
>>> Matrix(1, 1, [x*(x+1)])
Matrix([[x*(x + 1)]])
>>> _.expand()
Matrix([[x**2 + x]])
"""
return self.applyfunc(lambda x: x.expand(
deep, modulus, power_base, power_exp, mul, log, multinomial, basic,
**hints))
@property
def H(self):
"""Return Hermite conjugate.
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m
Matrix([
[ 0],
[1 + I],
[ 2],
[ 3]])
>>> m.H
Matrix([[0, 1 - I, 2, 3]])
See Also
========
conjugate: By-element conjugation
D: Dirac conjugation
"""
return self.T.C
def permute(self, perm, orientation='rows', direction='forward'):
"""Permute the rows or columns of a matrix by the given list of swaps.
Parameters
==========
perm : a permutation. This may be a list swaps (e.g., `[[1, 2], [0, 3]]`),
or any valid input to the `Permutation` constructor, including a `Permutation()`
itself. If `perm` is given explicitly as a list of indices or a `Permutation`,
`direction` has no effect.
orientation : ('rows' or 'cols') whether to permute the rows or the columns
direction : ('forward', 'backward') whether to apply the permutations from
the start of the list first, or from the back of the list first
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='forward')
Matrix([
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='backward')
Matrix([
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]])
"""
# allow british variants and `columns`
if direction == 'forwards':
direction = 'forward'
if direction == 'backwards':
direction = 'backward'
if orientation == 'columns':
orientation = 'cols'
if direction not in ('forward', 'backward'):
raise TypeError("direction='{}' is an invalid kwarg. "
"Try 'forward' or 'backward'".format(direction))
if orientation not in ('rows', 'cols'):
raise TypeError("orientation='{}' is an invalid kwarg. "
"Try 'rows' or 'cols'".format(orientation))
# ensure all swaps are in range
max_index = self.rows if orientation == 'rows' else self.cols
if not all(0 <= t <= max_index for t in flatten(list(perm))):
raise IndexError("`swap` indices out of range.")
# see if we are a list of pairs
try:
assert len(perm[0]) == 2
# we are a list of swaps, so `direction` matters
if direction == 'backward':
perm = reversed(perm)
# since Permutation doesn't let us have non-disjoint cycles,
# we'll construct the explicit mapping ourselves XXX Bug #12479
mapping = list(range(max_index))
for (i, j) in perm:
mapping[i], mapping[j] = mapping[j], mapping[i]
perm = mapping
except (TypeError, AssertionError, IndexError):
pass
from sympy.combinatorics import Permutation
perm = Permutation(perm, size=max_index)
if orientation == 'rows':
return self._eval_permute_rows(perm)
if orientation == 'cols':
return self._eval_permute_cols(perm)
def permute_cols(self, swaps, direction='forward'):
"""Alias for `self.permute(swaps, orientation='cols', direction=direction)`
See Also
========
permute
"""
return self.permute(swaps, orientation='cols', direction=direction)
def permute_rows(self, swaps, direction='forward'):
"""Alias for `self.permute(swaps, orientation='rows', direction=direction)`
See Also
========
permute
"""
return self.permute(swaps, orientation='rows', direction=direction)
def refine(self, assumptions=True):
"""Apply refine to each element of the matrix.
Examples
========
>>> from sympy import Symbol, Matrix, Abs, sqrt, Q
>>> x = Symbol('x')
>>> Matrix([[Abs(x)**2, sqrt(x**2)],[sqrt(x**2), Abs(x)**2]])
Matrix([
[ Abs(x)**2, sqrt(x**2)],
[sqrt(x**2), Abs(x)**2]])
>>> _.refine(Q.real(x))
Matrix([
[ x**2, Abs(x)],
[Abs(x), x**2]])
"""
return self.applyfunc(lambda x: refine(x, assumptions))
def replace(self, F, G, map=False):
"""Replaces Function F in Matrix entries with Function G.
Examples
========
>>> from sympy import symbols, Function, Matrix
>>> F, G = symbols('F, G', cls=Function)
>>> M = Matrix(2, 2, lambda i, j: F(i+j)) ; M
Matrix([
[F(0), F(1)],
[F(1), F(2)]])
>>> N = M.replace(F,G)
>>> N
Matrix([
[G(0), G(1)],
[G(1), G(2)]])
"""
return self.applyfunc(lambda x: x.replace(F, G, map))
def simplify(self, ratio=1.7, measure=count_ops):
"""Apply simplify to each element of the matrix.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import sin, cos
>>> from sympy.matrices import SparseMatrix
>>> SparseMatrix(1, 1, [x*sin(y)**2 + x*cos(y)**2])
Matrix([[x*sin(y)**2 + x*cos(y)**2]])
>>> _.simplify()
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.simplify(ratio, measure))
def subs(self, *args, **kwargs): # should mirror core.basic.subs
"""Return a new matrix with subs applied to each entry.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import SparseMatrix, Matrix
>>> SparseMatrix(1, 1, [x])
Matrix([[x]])
>>> _.subs(x, y)
Matrix([[y]])
>>> Matrix(_).subs(y, x)
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.subs(*args, **kwargs))
def trace(self):
"""
Returns the trace of a square matrix i.e. the sum of the
diagonal elements.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.trace()
5
"""
if not self.rows == self.cols:
raise NonSquareMatrixError()
return self._eval_trace()
def transpose(self):
"""
Returns the transpose of the matrix.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.transpose()
Matrix([
[1, 3],
[2, 4]])
>>> from sympy import Matrix, I
>>> m=Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m.transpose()
Matrix([
[ 1, 3],
[2 + I, 4]])
>>> m.T == m.transpose()
True
See Also
========
conjugate: By-element conjugation
"""
return self._eval_transpose()
T = property(transpose, None, None, "Matrix transposition.")
C = property(conjugate, None, None, "By-element conjugation.")
n = evalf
def xreplace(self, rule): # should mirror core.basic.xreplace
"""Return a new matrix with xreplace applied to each entry.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import SparseMatrix, Matrix
>>> SparseMatrix(1, 1, [x])
Matrix([[x]])
>>> _.xreplace({x: y})
Matrix([[y]])
>>> Matrix(_).xreplace({y: x})
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.xreplace(rule))
_eval_simplify = simplify
def _eval_trigsimp(self, **opts):
from sympy.simplify import trigsimp
return self.applyfunc(lambda x: trigsimp(x, **opts))
class MatrixArithmetic(MatrixRequired):
"""Provides basic matrix arithmetic operations.
Should not be instantiated directly."""
_op_priority = 10.01
def _eval_Abs(self):
return self._new(self.rows, self.cols, lambda i, j: Abs(self[i, j]))
def _eval_add(self, other):
return self._new(self.rows, self.cols,
lambda i, j: self[i, j] + other[i, j])
def _eval_matrix_mul(self, other):
def entry(i, j):
try:
return sum(self[i,k]*other[k,j] for k in range(self.cols))
except TypeError:
# Block matrices don't work with `sum` or `Add` (ISSUE #11599)
# They don't work with `sum` because `sum` tries to add `0`
# initially, and for a matrix, that is a mix of a scalar and
# a matrix, which raises a TypeError. Fall back to a
# block-matrix-safe way to multiply if the `sum` fails.
ret = self[i, 0]*other[0, j]
for k in range(1, self.cols):
ret += self[i, k]*other[k, j]
return ret
return self._new(self.rows, other.cols, entry)
def _eval_matrix_mul_elementwise(self, other):
return self._new(self.rows, self.cols, lambda i, j: self[i,j]*other[i,j])
def _eval_matrix_rmul(self, other):
def entry(i, j):
return sum(other[i,k]*self[k,j] for k in range(other.cols))
return self._new(other.rows, self.cols, entry)
def _eval_pow_by_recursion(self, num):
if num == 1:
return self
if num % 2 == 1:
return self * self._eval_pow_by_recursion(num - 1)
ret = self._eval_pow_by_recursion(num // 2)
return ret * ret
def _eval_scalar_mul(self, other):
return self._new(self.rows, self.cols, lambda i, j: self[i,j]*other)
def _eval_scalar_rmul(self, other):
return self._new(self.rows, self.cols, lambda i, j: other*self[i,j])
def _eval_Mod(self, other):
from sympy import Mod
return self._new(self.rows, self.cols, lambda i, j: Mod(self[i, j], other))
# python arithmetic functions
def __abs__(self):
"""Returns a new matrix with entry-wise absolute values."""
return self._eval_Abs()
@call_highest_priority('__radd__')
def __add__(self, other):
"""Return self + other, raising ShapeError if shapes don't match."""
other = _matrixify(other)
# matrix-like objects can have shapes. This is
# our first sanity check.
if hasattr(other, 'shape'):
if self.shape != other.shape:
raise ShapeError("Matrix size mismatch: %s + %s" % (
self.shape, other.shape))
# honest sympy matrices defer to their class's routine
if getattr(other, 'is_Matrix', False):
# call the highest-priority class's _eval_add
a, b = self, other
if a.__class__ != classof(a, b):
b, a = a, b
return a._eval_add(b)
# Matrix-like objects can be passed to CommonMatrix routines directly.
if getattr(other, 'is_MatrixLike', False):
return MatrixArithmetic._eval_add(self, other)
raise TypeError('cannot add %s and %s' % (type(self), type(other)))
@call_highest_priority('__rdiv__')
def __div__(self, other):
return self * (S.One / other)
@call_highest_priority('__rmatmul__')
def __matmul__(self, other):
other = _matrixify(other)
if not getattr(other, 'is_Matrix', False) and not getattr(other, 'is_MatrixLike', False):
return NotImplemented
return self.__mul__(other)
@call_highest_priority('__rmul__')
def __mul__(self, other):
"""Return self*other where other is either a scalar or a matrix
of compatible dimensions.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> 2*A == A*2 == Matrix([[2, 4, 6], [8, 10, 12]])
True
>>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> A*B
Matrix([
[30, 36, 42],
[66, 81, 96]])
>>> B*A
Traceback (most recent call last):
...
ShapeError: Matrices size mismatch.
>>>
See Also
========
matrix_multiply_elementwise
"""
other = _matrixify(other)
# matrix-like objects can have shapes. This is
# our first sanity check.
if hasattr(other, 'shape') and len(other.shape) == 2:
if self.shape[1] != other.shape[0]:
raise ShapeError("Matrix size mismatch: %s * %s." % (
self.shape, other.shape))
# honest sympy matrices defer to their class's routine
if getattr(other, 'is_Matrix', False):
return self._eval_matrix_mul(other)
# Matrix-like objects can be passed to CommonMatrix routines directly.
if getattr(other, 'is_MatrixLike', False):
return MatrixArithmetic._eval_matrix_mul(self, other)
# if 'other' is not iterable then scalar multiplication.
if not isinstance(other, Iterable):
try:
return self._eval_scalar_mul(other)
except TypeError:
pass
return NotImplemented
def __neg__(self):
return self._eval_scalar_mul(-1)
@call_highest_priority('__rpow__')
def __pow__(self, num):
if not self.rows == self.cols:
raise NonSquareMatrixError()
try:
a = self
num = sympify(num)
if num.is_Number and num % 1 == 0:
if a.rows == 1:
return a._new([[a[0]**num]])
if num == 0:
return self._new(self.rows, self.cols, lambda i, j: int(i == j))
if num < 0:
num = -num
a = a.inv()
# When certain conditions are met,
# Jordan block algorithm is faster than
# computation by recursion.
elif a.rows == 2 and num > 100000:
try:
return a._matrix_pow_by_jordan_blocks(num)
except (AttributeError, MatrixError):
pass
return a._eval_pow_by_recursion(num)
elif isinstance(num, (Expr, float)):
return a._matrix_pow_by_jordan_blocks(num)
else:
raise TypeError(
"Only SymPy expressions or integers are supported as exponent for matrices")
except AttributeError:
raise TypeError("Don't know how to raise {} to {}".format(self.__class__, num))
@call_highest_priority('__add__')
def __radd__(self, other):
return self + other
@call_highest_priority('__matmul__')
def __rmatmul__(self, other):
other = _matrixify(other)
if not getattr(other, 'is_Matrix', False) and not getattr(other, 'is_MatrixLike', False):
return NotImplemented
return self.__rmul__(other)
@call_highest_priority('__mul__')
def __rmul__(self, other):
other = _matrixify(other)
# matrix-like objects can have shapes. This is
# our first sanity check.
if hasattr(other, 'shape') and len(other.shape) == 2:
if self.shape[0] != other.shape[1]:
raise ShapeError("Matrix size mismatch.")
# honest sympy matrices defer to their class's routine
if getattr(other, 'is_Matrix', False):
return other._new(other.as_mutable() * self)
# Matrix-like objects can be passed to CommonMatrix routines directly.
if getattr(other, 'is_MatrixLike', False):
return MatrixArithmetic._eval_matrix_rmul(self, other)
# if 'other' is not iterable then scalar multiplication.
if not isinstance(other, Iterable):
try:
return self._eval_scalar_rmul(other)
except TypeError:
pass
return NotImplemented
@call_highest_priority('__sub__')
def __rsub__(self, a):
return (-self) + a
@call_highest_priority('__rsub__')
def __sub__(self, a):
return self + (-a)
@call_highest_priority('__rtruediv__')
def __truediv__(self, other):
return self.__div__(other)
def multiply_elementwise(self, other):
"""Return the Hadamard product (elementwise product) of A and B
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> A.multiply_elementwise(B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
cross
dot
multiply
"""
if self.shape != other.shape:
raise ShapeError("Matrix shapes must agree {} != {}".format(self.shape, other.shape))
return self._eval_matrix_mul_elementwise(other)
class MatrixCommon(MatrixArithmetic, MatrixOperations, MatrixProperties,
MatrixSpecial, MatrixShaping):
"""All common matrix operations including basic arithmetic, shaping,
and special matrices like `zeros`, and `eye`."""
_diff_wrt = True
class _MinimalMatrix(object):
"""Class providing the minimum functionality
for a matrix-like object and implementing every method
required for a `MatrixRequired`. This class does not have everything
needed to become a full-fledged sympy object, but it will satisfy the
requirements of anything inheriting from `MatrixRequired`. If you wish
to make a specialized matrix type, make sure to implement these
methods and properties with the exception of `__init__` and `__repr__`
which are included for convenience."""
is_MatrixLike = True
_sympify = staticmethod(sympify)
_class_priority = 3
is_Matrix = True
is_MatrixExpr = False
@classmethod
def _new(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __init__(self, rows, cols=None, mat=None):
if isinstance(mat, FunctionType):
# if we passed in a function, use that to populate the indices
mat = list(mat(i, j) for i in range(rows) for j in range(cols))
try:
if cols is None and mat is None:
mat = rows
rows, cols = mat.shape
except AttributeError:
pass
try:
# if we passed in a list of lists, flatten it and set the size
if cols is None and mat is None:
mat = rows
cols = len(mat[0])
rows = len(mat)
mat = [x for l in mat for x in l]
except (IndexError, TypeError):
pass
self.mat = tuple(self._sympify(x) for x in mat)
self.rows, self.cols = rows, cols
if self.rows is None or self.cols is None:
raise NotImplementedError("Cannot initialize matrix with given parameters")
def __getitem__(self, key):
def _normalize_slices(row_slice, col_slice):
"""Ensure that row_slice and col_slice don't have
`None` in their arguments. Any integers are converted
to slices of length 1"""
if not isinstance(row_slice, slice):
row_slice = slice(row_slice, row_slice + 1, None)
row_slice = slice(*row_slice.indices(self.rows))
if not isinstance(col_slice, slice):
col_slice = slice(col_slice, col_slice + 1, None)
col_slice = slice(*col_slice.indices(self.cols))
return (row_slice, col_slice)
def _coord_to_index(i, j):
"""Return the index in _mat corresponding
to the (i,j) position in the matrix. """
return i * self.cols + j
if isinstance(key, tuple):
i, j = key
if isinstance(i, slice) or isinstance(j, slice):
# if the coordinates are not slices, make them so
# and expand the slices so they don't contain `None`
i, j = _normalize_slices(i, j)
rowsList, colsList = list(range(self.rows))[i], \
list(range(self.cols))[j]
indices = (i * self.cols + j for i in rowsList for j in
colsList)
return self._new(len(rowsList), len(colsList),
list(self.mat[i] for i in indices))
# if the key is a tuple of ints, change
# it to an array index
key = _coord_to_index(i, j)
return self.mat[key]
def __eq__(self, other):
return self.shape == other.shape and list(self) == list(other)
def __len__(self):
return self.rows*self.cols
def __repr__(self):
return "_MinimalMatrix({}, {}, {})".format(self.rows, self.cols,
self.mat)
@property
def shape(self):
return (self.rows, self.cols)
class _MatrixWrapper(object):
"""Wrapper class providing the minimum functionality
for a matrix-like object: .rows, .cols, .shape, indexability,
and iterability. CommonMatrix math operations should work
on matrix-like objects. For example, wrapping a numpy
matrix in a MatrixWrapper allows it to be passed to CommonMatrix.
"""
is_MatrixLike = True
def __init__(self, mat, shape=None):
self.mat = mat
self.rows, self.cols = mat.shape if shape is None else shape
def __getattr__(self, attr):
"""Most attribute access is passed straight through
to the stored matrix"""
return getattr(self.mat, attr)
def __getitem__(self, key):
return self.mat.__getitem__(key)
def _matrixify(mat):
"""If `mat` is a Matrix or is matrix-like,
return a Matrix or MatrixWrapper object. Otherwise
`mat` is passed through without modification."""
if getattr(mat, 'is_Matrix', False):
return mat
if hasattr(mat, 'shape'):
if len(mat.shape) == 2:
return _MatrixWrapper(mat)
return mat
def a2idx(j, n=None):
"""Return integer after making positive and validating against n."""
if type(j) is not int:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (j,))
if n is not None:
if j < 0:
j += n
if not (j >= 0 and j < n):
raise IndexError("Index out of range: a[%s]" % (j,))
return int(j)
def classof(A, B):
"""
Get the type of the result when combining matrices of different types.
Currently the strategy is that immutability is contagious.
Examples
========
>>> from sympy import Matrix, ImmutableMatrix
>>> from sympy.matrices.matrices import classof
>>> M = Matrix([[1, 2], [3, 4]]) # a Mutable Matrix
>>> IM = ImmutableMatrix([[1, 2], [3, 4]])
>>> classof(M, IM)
<class 'sympy.matrices.immutable.ImmutableDenseMatrix'>
"""
try:
if A._class_priority > B._class_priority:
return A.__class__
else:
return B.__class__
except Exception:
pass
try:
import numpy
if isinstance(A, numpy.ndarray):
return B.__class__
if isinstance(B, numpy.ndarray):
return A.__class__
except Exception:
pass
raise TypeError("Incompatible classes %s, %s" % (A.__class__, B.__class__))
| wxgeo/geophar | wxgeometrie/sympy/matrices/common.py | Python | gpl-2.0 | 67,513 | [
"DIRAC"
] | 0be157e2b42498b591bc2dddad699bb6623df181006f788ffc1981cab04abdee |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("nikolad.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception("Bad Request!")}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception("Permissin Denied")}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception("Page not Found")}),
url(r'^500/$', default_views.server_error),
]
| nikoladang/cookicutter-django-wagtail | config/urls.py | Python | bsd-3-clause | 1,430 | [
"VisIt"
] | c331e07486b8bd3a36e1533e538d23297b93fa50b00875d271d7fad6610c898a |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performs OAuth2 Web Server Flow to obtain credentials."""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import argparse
import os
import sys
import hyou
from hyou import py3
import oauth2client.client
TEST_CLIENT_ID = (
'958069810280-th697if59r9scrf1qh0sg6gd9d9u0kts.'
'apps.googleusercontent.com')
TEST_CLIENT_SECRET = '5nlcvd54WycOd8h8w7HD0avT'
def create_parser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--client-id', type=py3.str, default=TEST_CLIENT_ID,
help='OAuth2 client ID.')
parser.add_argument(
'--client-secret', type=py3.str, default=TEST_CLIENT_SECRET,
help='OAuth2 client secret.')
parser.add_argument(
'output_json_path', type=py3.str,
help='Output JSON path.')
return parser
def main(argv):
parser = create_parser()
opts = parser.parse_args(argv[1:])
flow = oauth2client.client.OAuth2WebServerFlow(
client_id=opts.client_id,
client_secret=opts.client_secret,
scope=hyou.SCOPES)
url = flow.step1_get_authorize_url('urn:ietf:wg:oauth:2.0:oob')
print()
print('Please visit this URL to get the authorization code:')
print(url)
print()
code = py3.input('Code: ').strip()
credentials = flow.step2_exchange(code)
with py3.open(opts.output_json_path, 'wb') as f:
os.fchmod(f.fileno(), 0o600)
f.write(
py3.native_str_to_bytes(credentials.to_json(), encoding='utf-8'))
print()
print('Credentials successfully saved to %s' % opts.output_json_path)
print()
print('WARNING: Keep it in a safe location! With the credentials,')
print(' all your Google Drive documents can be accessed.')
if __name__ == '__main__':
sys.exit(main(sys.argv))
| google/hyou | tools/generate_oauth2_credentials.py | Python | apache-2.0 | 2,462 | [
"VisIt"
] | 490a4615e4a923f0412b93b4ab30edb9c8866c470816bdba34b1434d7b7179b4 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Collections of messages and their translations, called cliques. Also
collections of cliques (uber-cliques).
'''
from __future__ import print_function
import re
import six
from grit import constants
from grit import exception
from grit import lazy_re
from grit import pseudo
from grit import pseudolocales
from grit import tclib
class UberClique(object):
'''A factory (NOT a singleton factory) for making cliques. It has several
methods for working with the cliques created using the factory.
'''
def __init__(self):
# A map from message ID to list of cliques whose source messages have
# that ID. This will contain all cliques created using this factory.
# Different messages can have the same ID because they have the
# same translateable portion and placeholder names, but occur in different
# places in the resource tree.
#
# Each list of cliques is kept sorted by description, to achieve
# stable results from the BestClique method, see below.
self.cliques_ = {}
# A map of clique IDs to list of languages to indicate translations where we
# fell back to English.
self.fallback_translations_ = {}
# A map of clique IDs to list of languages to indicate missing translations.
self.missing_translations_ = {}
def _AddMissingTranslation(self, lang, clique, is_error):
tl = self.fallback_translations_
if is_error:
tl = self.missing_translations_
id = clique.GetId()
if id not in tl:
tl[id] = {}
if lang not in tl[id]:
tl[id][lang] = 1
def HasMissingTranslations(self):
return len(self.missing_translations_) > 0
def MissingTranslationsReport(self):
'''Returns a string suitable for printing to report missing
and fallback translations to the user.
'''
def ReportTranslation(clique, langs):
text = clique.GetMessage().GetPresentableContent()
# The text 'error' (usually 'Error:' but we are conservative)
# can trigger some build environments (Visual Studio, we're
# looking at you) to consider invocation of grit to have failed,
# so we make sure never to output that word.
extract = re.sub(r'(?i)error', 'REDACTED', text[0:40])[0:40]
ellipsis = ''
if len(text) > 40:
ellipsis = '...'
langs_extract = langs[0:6]
describe_langs = ','.join(langs_extract)
if len(langs) > 6:
describe_langs += " and %d more" % (len(langs) - 6)
return " %s \"%s%s\" %s" % (clique.GetId(), extract, ellipsis,
describe_langs)
lines = []
if len(self.fallback_translations_):
lines.append(
"WARNING: Fell back to English for the following translations:")
for (id, langs) in self.fallback_translations_.items():
lines.append(
ReportTranslation(self.cliques_[id][0], list(langs.keys())))
if len(self.missing_translations_):
lines.append("ERROR: The following translations are MISSING:")
for (id, langs) in self.missing_translations_.items():
lines.append(
ReportTranslation(self.cliques_[id][0], list(langs.keys())))
return '\n'.join(lines)
def MakeClique(self, message, translateable=True):
'''Create a new clique initialized with a message.
Args:
message: tclib.Message()
translateable: True | False
'''
clique = MessageClique(self, message, translateable)
# Enable others to find this clique by its message ID
if message.GetId() in self.cliques_:
presentable_text = clique.GetMessage().GetPresentableContent()
if not message.HasAssignedId():
for c in self.cliques_[message.GetId()]:
assert c.GetMessage().GetPresentableContent() == presentable_text
self.cliques_[message.GetId()].append(clique)
# We need to keep each list of cliques sorted by description, to
# achieve stable results from the BestClique method, see below.
self.cliques_[message.GetId()].sort(
key=lambda c:c.GetMessage().GetDescription())
else:
self.cliques_[message.GetId()] = [clique]
return clique
def FindCliqueAndAddTranslation(self, translation, language):
'''Adds the specified translation to the clique with the source message
it is a translation of.
Args:
translation: tclib.Translation()
language: 'en' | 'fr' ...
Return:
True if the source message was found, otherwise false.
'''
if translation.GetId() in self.cliques_:
for clique in self.cliques_[translation.GetId()]:
clique.AddTranslation(translation, language)
return True
else:
return False
def BestClique(self, id):
'''Returns the "best" clique from a list of cliques. All the cliques
must have the same ID. The "best" clique is chosen in the following
order of preference:
- The first clique that has a non-ID-based description.
- If no such clique found, the first clique with an ID-based description.
- Otherwise the first clique.
This method is stable in terms of always returning a clique with
an identical description (on different runs of GRIT on the same
data) because self.cliques_ is sorted by description.
'''
clique_list = self.cliques_[id]
clique_with_id = None
clique_default = None
for clique in clique_list:
if not clique_default:
clique_default = clique
description = clique.GetMessage().GetDescription()
if description and len(description) > 0:
if not description.startswith('ID:'):
# this is the preferred case so we exit right away
return clique
elif not clique_with_id:
clique_with_id = clique
if clique_with_id:
return clique_with_id
else:
return clique_default
def BestCliquePerId(self):
'''Iterates over the list of all cliques and returns the best clique for
each ID. This will be the first clique with a source message that has a
non-empty description, or an arbitrary clique if none of them has a
description.
'''
for id in self.cliques_:
yield self.BestClique(id)
def BestCliqueByOriginalText(self, text, meaning):
'''Finds the "best" (as in BestClique()) clique that has original text
'text' and meaning 'meaning'. Returns None if there is no such clique.
'''
# If needed, this can be optimized by maintaining a map of
# fingerprints of original text+meaning to cliques.
for c in self.BestCliquePerId():
msg = c.GetMessage()
if msg.GetRealContent() == text and msg.GetMeaning() == meaning:
return msg
return None
def AllMessageIds(self):
'''Returns a list of all defined message IDs.
'''
return list(self.cliques_.keys())
def AllCliques(self):
'''Iterates over all cliques. Note that this can return multiple cliques
with the same ID.
'''
for cliques in self.cliques_.values():
for c in cliques:
yield c
def GenerateXtbParserCallback(self, lang, debug=False):
'''Creates a callback function as required by grit.xtb_reader.Parse().
This callback will create Translation objects for each message from
the XTB that exists in this uberclique, and add them as translations for
the relevant cliques. The callback will add translations to the language
specified by 'lang'
Args:
lang: 'fr'
debug: True | False
'''
def Callback(id, structure):
if id not in self.cliques_:
if debug:
print("Ignoring translation #%s" % id)
return
if debug:
print("Adding translation #%s" % id)
# We fetch placeholder information from the original message (the XTB file
# only contains placeholder names).
original_msg = self.BestClique(id).GetMessage()
translation = tclib.Translation(id=id)
for is_ph,text in structure:
if not is_ph:
translation.AppendText(text)
else:
found_placeholder = False
for ph in original_msg.GetPlaceholders():
if ph.GetPresentation() == text:
translation.AppendPlaceholder(tclib.Placeholder(
ph.GetPresentation(), ph.GetOriginal(), ph.GetExample()))
found_placeholder = True
break
if not found_placeholder:
raise exception.MismatchingPlaceholders(
'Translation for message ID %s had <ph name="%s"/>, no match\n'
'in original message' % (id, text))
self.FindCliqueAndAddTranslation(translation, lang)
return Callback
class CustomType(object):
'''A base class you should implement if you wish to specify a custom type
for a message clique (i.e. custom validation and optional modification of
translations).'''
def Validate(self, message):
'''Returns true if the message (a tclib.Message object) is valid,
otherwise false.
'''
raise NotImplementedError()
def ValidateAndModify(self, lang, translation):
'''Returns true if the translation (a tclib.Translation object) is valid,
otherwise false. The language is also passed in. This method may modify
the translation that is passed in, if it so wishes.
'''
raise NotImplementedError()
def ModifyTextPart(self, lang, text):
'''If you call ModifyEachTextPart, it will turn around and call this method
for each text part of the translation. You should return the modified
version of the text, or just the original text to not change anything.
'''
raise NotImplementedError()
def ModifyEachTextPart(self, lang, translation):
'''Call this to easily modify one or more of the textual parts of a
translation. It will call ModifyTextPart for each part of the
translation.
'''
contents = translation.GetContent()
for ix in range(len(contents)):
if (isinstance(contents[ix], six.string_types)):
contents[ix] = self.ModifyTextPart(lang, contents[ix])
class OneOffCustomType(CustomType):
'''A very simple custom type that performs the validation expressed by
the input expression on all languages including the source language.
The expression can access the variables 'lang', 'msg' and 'text()' where
'lang' is the language of 'msg', 'msg' is the message or translation being
validated and 'text()' returns the real contents of 'msg' (for shorthand).
'''
def __init__(self, expression):
self.expr = expression
def Validate(self, message):
return self.ValidateAndModify(MessageClique.source_language, message)
def ValidateAndModify(self, lang, msg):
def text():
return msg.GetRealContent()
return eval(self.expr, {},
{'lang' : lang,
'text' : text,
'msg' : msg,
})
class MessageClique(object):
'''A message along with all of its translations. Also code to bring
translations together with their original message.'''
# change this to the language code of Messages you add to cliques_.
# TODO(joi) Actually change this based on the <grit> node's source language
source_language = 'en'
# A constant translation we use when asked for a translation into the
# special language constants.CONSTANT_LANGUAGE.
CONSTANT_TRANSLATION = tclib.Translation(text='TTTTTT')
# A pattern to match messages that are empty or whitespace only.
WHITESPACE_MESSAGE = lazy_re.compile(r'^\s*$')
def __init__(self, uber_clique, message, translateable=True,
custom_type=None):
'''Create a new clique initialized with just a message.
Note that messages with a body comprised only of whitespace will implicitly
be marked non-translatable.
Args:
uber_clique: Our uber-clique (collection of cliques)
message: tclib.Message()
translateable: True | False
custom_type: instance of clique.CustomType interface
'''
# Our parent
self.uber_clique = uber_clique
# If not translateable, we only store the original message.
self.translateable = translateable
# We implicitly mark messages that have a whitespace-only body as
# non-translateable.
if MessageClique.WHITESPACE_MESSAGE.match(message.GetRealContent()):
self.translateable = False
# A mapping of language identifiers to tclib.BaseMessage and its
# subclasses (i.e. tclib.Message and tclib.Translation).
self.clique = { MessageClique.source_language : message }
# A list of the "shortcut groups" this clique is
# part of. Within any given shortcut group, no shortcut key (e.g. &J)
# must appear more than once in each language for all cliques that
# belong to the group.
self.shortcut_groups = []
# An instance of the CustomType interface, or None. If this is set, it will
# be used to validate the original message and translations thereof, and
# will also get a chance to modify translations of the message.
self.SetCustomType(custom_type)
def GetMessage(self):
'''Retrieves the tclib.Message that is the source for this clique.'''
return self.clique[MessageClique.source_language]
def GetId(self):
'''Retrieves the message ID of the messages in this clique.'''
return self.GetMessage().GetId()
def IsTranslateable(self):
return self.translateable
def AddToShortcutGroup(self, group):
self.shortcut_groups.append(group)
def SetCustomType(self, custom_type):
'''Makes this clique use custom_type for validating messages and
translations, and optionally modifying translations.
'''
self.custom_type = custom_type
if custom_type and not custom_type.Validate(self.GetMessage()):
raise exception.InvalidMessage(self.GetMessage().GetRealContent())
def MessageForLanguage(self, lang, pseudo_if_no_match=True,
fallback_to_english=False):
'''Returns the message/translation for the specified language, providing
a pseudotranslation if there is no available translation and a pseudo-
translation is requested.
The translation of any message whatsoever in the special language
'x_constant' is the message "TTTTTT".
Args:
lang: 'en'
pseudo_if_no_match: True
fallback_to_english: False
Return:
tclib.BaseMessage
'''
if not self.translateable:
return self.GetMessage()
if lang == constants.CONSTANT_LANGUAGE:
return self.CONSTANT_TRANSLATION
for msglang in self.clique:
if lang == msglang:
return self.clique[msglang]
if pseudo_if_no_match:
if lang == constants.PSEUDOLOCALE_LONG_STRINGS:
return pseudolocales.PseudoLongStringMessage(self.GetMessage())
elif lang == constants.PSEUDOLOCALE_RTL:
return pseudolocales.PseudoRTLMessage(self.GetMessage())
if fallback_to_english:
self.uber_clique._AddMissingTranslation(lang, self, is_error=False)
return self.GetMessage()
# If we're not supposed to generate pseudotranslations, we add an error
# report to a list of errors, then fail at a higher level, so that we
# get a list of all messages that are missing translations.
if not pseudo_if_no_match:
self.uber_clique._AddMissingTranslation(lang, self, is_error=True)
return pseudo.PseudoMessage(self.GetMessage())
def AllMessagesThatMatch(self, lang_re, include_pseudo = True):
'''Returns a map of all messages that match 'lang', including the pseudo
translation if requested.
Args:
lang_re: re.compile(r'fr|en')
include_pseudo: True
Return:
{ 'en' : tclib.Message,
'fr' : tclib.Translation,
pseudo.PSEUDO_LANG : tclib.Translation }
'''
if not self.translateable:
return [self.GetMessage()]
matches = {}
for msglang in self.clique:
if lang_re.match(msglang):
matches[msglang] = self.clique[msglang]
if include_pseudo:
matches[pseudo.PSEUDO_LANG] = pseudo.PseudoMessage(self.GetMessage())
return matches
def AddTranslation(self, translation, language):
'''Add a translation to this clique. The translation must have the same
ID as the message that is the source for this clique.
If this clique is not translateable, the function just returns.
Args:
translation: tclib.Translation()
language: 'en'
Throws:
grit.exception.InvalidTranslation if the translation you're trying to add
doesn't have the same message ID as the source message of this clique.
'''
if not self.translateable:
return
if translation.GetId() != self.GetId():
raise exception.InvalidTranslation(
'Msg ID %s, transl ID %s' % (self.GetId(), translation.GetId()))
assert not language in self.clique
# Because two messages can differ in the original content of their
# placeholders yet share the same ID (because they are otherwise the
# same), the translation we are getting may have different original
# content for placeholders than our message, yet it is still the right
# translation for our message (because it is for the same ID). We must
# therefore fetch the original content of placeholders from our original
# English message.
#
# See grit.clique_unittest.MessageCliqueUnittest.testSemiIdenticalCliques
# for a concrete explanation of why this is necessary.
original = self.MessageForLanguage(self.source_language, False)
if len(original.GetPlaceholders()) != len(translation.GetPlaceholders()):
print("ERROR: '%s' translation of message id %s does not match" %
(language, translation.GetId()))
assert False
transl_msg = tclib.Translation(id=self.GetId(),
text=translation.GetPresentableContent(),
placeholders=original.GetPlaceholders())
if (self.custom_type and
not self.custom_type.ValidateAndModify(language, transl_msg)):
print("WARNING: %s translation failed validation: %s" %
(language, transl_msg.GetId()))
self.clique[language] = transl_msg
| scheib/chromium | tools/grit/grit/clique.py | Python | bsd-3-clause | 18,254 | [
"xTB"
] | 140ca1d3e38af3dbd84a397e6ddd71265192d6424c60c1efe8645e5be5a0d59b |
"""Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos ([email protected])
# Bertrand Thirion <[email protected]>
#
# Based on mixture.py by:
# Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
#
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp, pinvh, squared_norm
from ..utils.validation import check_is_fitted
from .. import cluster
from .gmm import GMM
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approcimation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class DPGMM(GMM):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <dpgmm>`.
Parameters
----------
n_components: int, default 1
Number of mixture components.
covariance_type: string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha: float, default 1
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
super(DPGMM, self).__init__(n_components, covariance_type,
random_state=random_state,
tol=tol, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params, verbose=verbose)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = np.cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
check_is_fitted(self, 'means_')
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def _fit(self, X, y=None):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.random_state_ = check_random_state(self.random_state)
# initialization step
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
z = np.ones((n_samples, self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state_).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + n_samples)
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
curr_logprob, z = self.score_samples(X)
current_log_likelihood = (
curr_logprob.mean() + self._logprior(z) / n_samples)
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < self.tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
if self.n_iter == 0:
# Need to make sure that there is a z value to output
# Output zeros because it was just a quick initialization
z = np.zeros((X.shape[0], self.n_components))
self._set_weights()
return z
class VBGMM(DPGMM):
"""Variational Inference for the Gaussian Mixture Model
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <vbgmm>`.
Parameters
----------
n_components: int, default 1
Number of mixture components.
covariance_type: string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha: float, default 1
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Infinite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
tol=tol, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = float(alpha) / n_components
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
| jjx02230808/project0223 | sklearn/mixture/dpgmm.py | Python | bsd-3-clause | 32,075 | [
"Gaussian"
] | c9fc56003209f231a1cc1e59da1023b379eefabef59deba579cfbd70d1832dc0 |
''' Script to check the correctness of the analysis. The analysis is done on raw data and all results are compared to a recorded analysis.
'''
import os
import shutil
import unittest
from testbeam_analysis import result_analysis
from testbeam_analysis.tools import analysis_utils, test_tools
testing_path = os.path.dirname(__file__)
class TestResultAnalysis(unittest.TestCase):
@classmethod
def setUpClass(cls):
# virtual X server for plots under headless LINUX travis testing is needed
if os.getenv('TRAVIS', False) and os.getenv('TRAVIS_OS_NAME', False) == 'linux':
from xvfbwrapper import Xvfb # virtual X server for plots under headless LINUX travis testing is needed
cls.vdisplay = Xvfb()
cls.vdisplay.start()
cls.output_folder = 'tmp_test_res_output'
test_tools.create_folder(cls.output_folder)
cls.pixel_size = [250, 50] * 4 # in um
cls.n_pixels = [80, 336] * 4
cls.z_positions = [0., 19500, 108800, 128300] # in um
@classmethod
def tearDownClass(cls): # remove created files
shutil.rmtree(cls.output_folder)
@unittest.SkipTest
def test_residuals_calculation(self):
residuals = result_analysis.calculate_residuals(input_tracks_file=analysis_utils.get_data('fixtures/result_analysis/Tracks_result.h5'),
input_alignment_file=analysis_utils.get_data('fixtures/result_analysis/Alignment_result.h5'),
output_residuals_file=os.path.join(self.output_folder, 'Residuals.h5'),
n_pixels=self.n_pixels,
pixel_size=self.pixel_size,
max_chi2=10000)
# Only test row residuals, columns are too large (250 um) for meaningfull gaussian residuals distribution
self.assertAlmostEqual(residuals[1], 22.9135, msg='DUT 0 row residuals do not match', places=3)
self.assertAlmostEqual(residuals[3], 18.7317, msg='DUT 1 row residuals do not match', places=3)
self.assertAlmostEqual(residuals[5], 22.8645, msg='DUT 2 row residuals do not match', places=3)
self.assertAlmostEqual(residuals[7], 27.2816, msg='DUT 3 row residuals do not match', places=3)
@unittest.SkipTest
def test_efficiency_calculation(self):
efficiencies = result_analysis.calculate_efficiency(input_tracks_file=analysis_utils.get_data('fixtures/result_analysis/Tracks_result.h5'),
input_alignment_file=analysis_utils.get_data('fixtures/result_analysis/Alignment_result.h5'),
output_efficiency_file=os.path.join(self.output_folder, 'Efficiency.h5'),
bin_size=[(250, 50)]*4,
sensor_size=[(250 * 80., 336 * 50.)]*4,
pixel_size=[(250, 50)]*4,
n_pixels=[(80, 336)]*4,
minimum_track_density=2,
use_duts=None,
cut_distance=500,
max_distance=500,
#col_range=[(1250, 17500)]*4,
#row_range=[(1000, 16000)]*4,
force_prealignment=True)
self.assertAlmostEqual(efficiencies[0], 100.000, msg='DUT 0 efficiencies do not match', places=3)
self.assertAlmostEqual(efficiencies[1], 98.7013, msg='DUT 1 efficiencies do not match', places=3)
self.assertAlmostEqual(efficiencies[2], 97.4684, msg='DUT 2 efficiencies do not match', places=3)
self.assertAlmostEqual(efficiencies[3], 100.000, msg='DUT 3 efficiencies do not match', places=3)
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - [%(levelname)-8s] (%(threadName)-10s) %(message)s")
suite = unittest.TestLoader().loadTestsFromTestCase(TestResultAnalysis)
unittest.TextTestRunner(verbosity=2).run(suite)
| YannickDieter/testbeam_analysis | testbeam_analysis/testing/test_result_analysis.py | Python | mit | 4,563 | [
"Gaussian"
] | 1a6c212d65da776bd529f9ea5c3e680675bec54e0e7e1d49798385ac3183363d |
import ShtikerPage
from toontown.toonbase import ToontownGlobals
from direct.showbase import PythonUtil
from toontown.hood import ZoneUtil
from direct.gui.DirectGui import *
from panda3d.core import *
from panda3d.direct import *
from toontown.toonbase import TTLocalizer
class MapPage(ShtikerPage.ShtikerPage):
def __init__(self):
ShtikerPage.ShtikerPage.__init__(self)
def load(self):
ShtikerPage.ShtikerPage.load(self)
mapModel = loader.loadModel('phase_3.5/models/gui/toontown_map')
self.map = DirectFrame(parent=self, relief=None, image=mapModel.find('**/toontown_map'), image_scale=(1.8, 1, 1.35), scale=0.97, pos=(0, 0, 0.0775))
mapModel.removeNode()
self.allZones = []
for hood in ToontownGlobals.Hoods:
if hood not in [ToontownGlobals.GolfZone, ToontownGlobals.FunnyFarm]:
self.allZones.append(hood)
self.cloudScaleList = (((0.55, 0, 0.4), (0.35, 0, 0.25)),
(),
((0.45, 0, 0.45), (0.5, 0, 0.4)),
((0.7, 0, 0.45),),
((0.55, 0, 0.4),),
((0.6, 0, 0.4), (0.5332, 0, 0.32)),
((0.7, 0, 0.45), (0.7, 0, 0.45)),
((0.7998, 0, 0.39),),
((0.5, 0, 0.4),),
((-0.45, 0, 0.4),),
((-0.45, 0, 0.35),),
((0.5, 0, 0.35),),
((0.5, 0, 0.35),))
self.cloudPosList = (((0.575, 0.0, -0.04), (0.45, 0.0, -0.25)),
(),
((0.375, 0.0, 0.4), (0.5625, 0.0, 0.2)),
((-0.02, 0.0, 0.23),),
((-0.3, 0.0, -0.4),),
((0.25, 0.0, -0.425), (0.125, 0.0, -0.36)),
((-0.5625, 0.0, -0.07), (-0.45, 0.0, 0.2125)),
((-0.125, 0.0, 0.5),),
((0.66, 0.0, -0.4),),
((-0.68, 0.0, -0.444),),
((-0.6, 0.0, 0.45),),
((0.66, 0.0, 0.5),),
((0.4, 0.0, -0.35),))
self.labelPosList = ((0.594, 0.0, -0.075),
(0.0, 0.0, -0.1),
(0.475, 0.0, 0.25),
(0.1, 0.0, 0.15),
(-0.3, 0.0, -0.375),
(0.2, 0.0, -0.45),
(-0.55, 0.0, 0.0),
(-0.088, 0.0, 0.47),
(0.7, 0.0, -0.5),
(-0.7, 0.0, -0.5),
(-0.7, 0.0, 0.5),
(0.7, 0.0, 0.5),
(0.45, 0.0, -0.45))
self.labels = []
self.clouds = []
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
buttonLoc = (0.45, 0, - 0.74)
if base.housingEnabled:
buttonLoc = (0.55, 0, -0.74)
self.safeZoneButton = DirectButton(
parent=self.map,
relief=None,
image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')),
image_scale=(1.3, 1.1, 1.1),
pos=buttonLoc,
text=TTLocalizer.MapPageBackToPlayground,
text_scale=TTLocalizer.MPsafeZoneButton,
text_pos=(0, -0.02),
command=self.backToSafeZone)
self.goHomeButton = DirectButton(
parent=self.map,
relief=None,
image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')),
image_scale=(0.66, 1.1, 1.1),
pos=(0.15, 0, -.74),
text=TTLocalizer.MapPageGoHome,
text_scale=TTLocalizer.MPgoHomeButton,
text_pos=(0, -0.02),
command=self.goHome)
self.goHomeButton.hide()
guiButton.removeNode()
self.hoodLabel = DirectLabel(
parent=self.map,
relief=None,
pos=(-0.43, 0, -0.726),
text='',
text_scale=TTLocalizer.MPhoodLabel,
text_pos=(0, 0),
text_wordwrap=TTLocalizer.MPhoodLabelWordwrap)
self.hoodLabel.hide()
cloudModel = loader.loadModel('phase_3.5/models/gui/cloud')
cloudImage = cloudModel.find('**/cloud')
for hood in self.allZones:
abbrev = base.cr.hoodMgr.getNameFromId(hood)
fullname = base.cr.hoodMgr.getFullnameFromId(hood)
hoodIndex = self.allZones.index(hood)
label = DirectButton(
parent=self.map,
relief=None,
pos=self.labelPosList[hoodIndex],
pad=(0.2, 0.16),
text=('', fullname, fullname),
text_bg=Vec4(1, 1, 1, 0.4),
text_scale=0.055,
text_wordwrap=8,
rolloverSound=None,
clickSound=None,
pressEffect=0,
command=self.__buttonCallback,
extraArgs=[hood],
sortOrder=1)
label.resetFrameSize()
self.labels.append(label)
hoodClouds = []
for cloudScale, cloudPos in zip(self.cloudScaleList[hoodIndex], self.cloudPosList[hoodIndex]):
cloud = DirectFrame(
parent=self.map,
relief=None,
state=DGG.DISABLED,
image=cloudImage,
scale=(cloudScale[0], cloudScale[1], cloudScale[2]),
pos=(cloudPos[0], cloudPos[1], cloudPos[2]))
cloud.hide()
hoodClouds.append(cloud)
self.clouds.append(hoodClouds)
cloudModel.removeNode()
self.resetFrameSize()
return
def unload(self):
for labelButton in self.labels:
labelButton.destroy()
del self.labels
del self.clouds
self.safeZoneButton.destroy()
self.goHomeButton.destroy()
ShtikerPage.ShtikerPage.unload(self)
def enter(self):
ShtikerPage.ShtikerPage.enter(self)
try:
zone = base.cr.playGame.getPlace().getZoneId()
except:
zone = 0
if base.localAvatar.lastHood >= ToontownGlobals.BossbotHQ:
self.safeZoneButton['text'] = TTLocalizer.MapPageBackToCogHQ
else:
self.safeZoneButton['text'] = TTLocalizer.MapPageBackToPlayground
if zone and ZoneUtil.isPlayground(zone) or self.book.safeMode:
self.safeZoneButton.hide()
else:
self.safeZoneButton.show()
if base.cr.playGame.getPlaceId() == ToontownGlobals.MyEstate and base.cr.playGame.hood.loader.atMyEstate() or self.book.safeMode:
self.goHomeButton.hide()
elif base.housingEnabled:
self.goHomeButton.show()
if base.cr.playGame.getPlaceId() == ToontownGlobals.MyEstate:
if base.cr.playGame.hood.loader.atMyEstate():
self.hoodLabel['text'] = TTLocalizer.MapPageYouAreAtHome
self.hoodLabel.show()
else:
avatar = base.cr.identifyAvatar(base.cr.playGame.hood.loader.estateOwnerId)
if avatar:
avName = avatar.getName()
self.hoodLabel['text'] = TTLocalizer.MapPageYouAreAtSomeonesHome % TTLocalizer.GetPossesive(avName, 'book')
self.hoodLabel.show()
elif zone:
hoodName = ToontownGlobals.hoodNameMap.get(ZoneUtil.getCanonicalHoodId(zone), ('',))[-1]
streetName = ToontownGlobals.StreetNames.get(ZoneUtil.getCanonicalBranchZone(zone), ('',))[-1]
if hoodName:
self.hoodLabel['text'] = TTLocalizer.MapPageYouAreHere % (hoodName, streetName)
self.hoodLabel.show()
else:
self.hoodLabel.hide()
else:
self.hoodLabel.hide()
safeZonesVisited = base.localAvatar.hoodsVisited
hoodsAvailable = base.cr.hoodMgr.getAvailableZones()
hoodVisibleList = PythonUtil.intersection(safeZonesVisited, hoodsAvailable)
hoodTeleportList = base.localAvatar.getTeleportAccess()
for hood in self.allZones:
label = self.labels[self.allZones.index(hood)]
clouds = self.clouds[self.allZones.index(hood)]
if not self.book.safeMode and hood in hoodVisibleList:
label['text_fg'] = (0, 0, 0, 1)
label.show()
for cloud in clouds:
cloud.hide()
fullname = base.cr.hoodMgr.getFullnameFromId(hood)
if hood in hoodTeleportList:
text = TTLocalizer.MapPageGoTo % fullname
label['text'] = ('', text, text)
else:
label['text'] = ('', fullname, fullname)
else:
label.hide()
for cloud in clouds:
cloud.show()
def exit(self):
ShtikerPage.ShtikerPage.exit(self)
def backToSafeZone(self):
self.doneStatus = {'mode': 'teleport',
'hood': base.localAvatar.lastHood}
messenger.send(self.doneEvent)
def goHome(self):
if config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: VISITESTATE: Visit estate')
self.doneStatus = {'mode': 'gohome',
'hood': base.localAvatar.lastHood}
messenger.send(self.doneEvent)
def __buttonCallback(self, hood):
if hood in base.localAvatar.getTeleportAccess() and hood in base.cr.hoodMgr.getAvailableZones():
base.localAvatar.sendUpdate('checkTeleportAccess', [hood])
self.doneStatus = {'mode': 'teleport',
'hood': hood}
messenger.send(self.doneEvent)
| silly-wacky-3-town-toon/SOURCE-COD | toontown/shtiker/MapPage.py | Python | apache-2.0 | 9,414 | [
"VisIt"
] | ceae833ff41599575266aa747388e03698ef798578fe68a348b46ea274062a84 |
# -*- coding: utf-8 -*-
"""
Statistical measures of spike trains (e.g., Fano factor) and functions to estimate firing rates.
:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import numpy as np
import quantities as pq
import scipy.stats
import scipy.signal
import neo
from neo.core import SpikeTrain
import elephant.conversion as conv
import elephant.kernels as kernels
import warnings
# warnings.simplefilter('always', DeprecationWarning)
def isi(spiketrain, axis=-1):
"""
Return an array containing the inter-spike intervals of the SpikeTrain.
Accepts a Neo SpikeTrain, a Quantity array, or a plain NumPy array.
If either a SpikeTrain or Quantity array is provided, the return value will
be a quantities array, otherwise a plain NumPy array. The units of
the quantities array will be the same as spiketrain.
Parameters
----------
spiketrain : Neo SpikeTrain or Quantity array or NumPy ndarray
The spike times.
axis : int, optional
The axis along which the difference is taken.
Default is the last axis.
Returns
-------
NumPy array or quantities array.
"""
if axis is None:
axis = -1
intervals = np.diff(spiketrain, axis=axis)
if hasattr(spiketrain, 'waveforms'):
intervals = pq.Quantity(intervals.magnitude, units=spiketrain.units)
return intervals
def mean_firing_rate(spiketrain, t_start=None, t_stop=None, axis=None):
"""
Return the firing rate of the SpikeTrain.
Accepts a Neo SpikeTrain, a Quantity array, or a plain NumPy array.
If either a SpikeTrain or Quantity array is provided, the return value will
be a quantities array, otherwise a plain NumPy array. The units of
the quantities array will be the inverse of the spiketrain.
The interval over which the firing rate is calculated can be optionally
controlled with `t_start` and `t_stop`
Parameters
----------
spiketrain : Neo SpikeTrain or Quantity array or NumPy ndarray
The spike times.
t_start : float or Quantity scalar, optional
The start time to use for the interval.
If not specified, retrieved from the``t_start`
attribute of `spiketrain`. If that is not present, default to
`0`. Any value from `spiketrain` below this value is ignored.
t_stop : float or Quantity scalar, optional
The stop time to use for the time points.
If not specified, retrieved from the `t_stop`
attribute of `spiketrain`. If that is not present, default to
the maximum value of `spiketrain`. Any value from
`spiketrain` above this value is ignored.
axis : int, optional
The axis over which to do the calculation.
Default is `None`, do the calculation over the flattened array.
Returns
-------
float, quantities scalar, NumPy array or quantities array.
Notes
-----
If `spiketrain` is a Quantity or Neo SpikeTrain and `t_start` or `t_stop`
are not, `t_start` and `t_stop` are assumed to have the same units as
`spiketrain`.
Raises
------
TypeError
If `spiketrain` is a NumPy array and `t_start` or `t_stop`
is a quantity scalar.
"""
if t_start is None:
t_start = getattr(spiketrain, 't_start', 0)
found_t_start = False
if t_stop is None:
if hasattr(spiketrain, 't_stop'):
t_stop = spiketrain.t_stop
else:
t_stop = np.max(spiketrain, axis=axis)
found_t_start = True
# figure out what units, if any, we are dealing with
if hasattr(spiketrain, 'units'):
units = spiketrain.units
else:
units = None
# convert everything to the same units
if hasattr(t_start, 'units'):
if units is None:
raise TypeError('t_start cannot be a Quantity if '
'spiketrain is not a quantity')
t_start = t_start.rescale(units)
elif units is not None:
t_start = pq.Quantity(t_start, units=units)
if hasattr(t_stop, 'units'):
if units is None:
raise TypeError('t_stop cannot be a Quantity if '
'spiketrain is not a quantity')
t_stop = t_stop.rescale(units)
elif units is not None:
t_stop = pq.Quantity(t_stop, units=units)
if not axis or not found_t_start:
return np.sum((spiketrain >= t_start) & (spiketrain <= t_stop),
axis=axis) / (t_stop - t_start)
else:
# this is needed to handle broadcasting between spiketrain and t_stop
t_stop_test = np.expand_dims(t_stop, axis)
return np.sum((spiketrain >= t_start) & (spiketrain <= t_stop_test),
axis=axis) / (t_stop - t_start)
# we make `cv` an alias for scipy.stats.variation for the convenience
# of former NeuroTools users
cv = scipy.stats.variation
def fanofactor(spiketrains):
"""
Evaluates the empirical Fano factor F of the spike counts of
a list of `neo.core.SpikeTrain` objects.
Given the vector v containing the observed spike counts (one per
spike train) in the time window [t0, t1], F is defined as:
F := var(v)/mean(v).
The Fano factor is typically computed for spike trains representing the
activity of the same neuron over different trials. The higher F, the larger
the cross-trial non-stationarity. In theory for a time-stationary Poisson
process, F=1.
Parameters
----------
spiketrains : list of neo.SpikeTrain objects, quantity arrays, numpy arrays or lists
Spike trains for which to compute the Fano factor of spike counts.
Returns
-------
fano : float or nan
The Fano factor of the spike counts of the input spike trains. If an
empty list is specified, or if all spike trains are empty, F:=nan.
"""
# Build array of spike counts (one per spike train)
spike_counts = np.array([len(t) for t in spiketrains])
# Compute FF
if all([count == 0 for count in spike_counts]):
fano = np.nan
else:
fano = spike_counts.var() / spike_counts.mean()
return fano
def lv(v):
"""
Calculate the measure of local variation LV for
a sequence of time intervals between events.
Given a vector v containing a sequence of intervals, the LV is
defined as:
.math $$ LV := \\frac{1}{N}\\sum_{i=1}^{N-1}
\\frac{3(isi_i-isi_{i+1})^2}
{(isi_i+isi_{i+1})^2} $$
The LV is typically computed as a substitute for the classical
coefficient of variation for sequences of events which include
some (relatively slow) rate fluctuation. As with the CV, LV=1 for
a sequence of intervals generated by a Poisson process.
Parameters
----------
v : quantity array, numpy array or list
Vector of consecutive time intervals
Returns
-------
lvar : float
The LV of the inter-spike interval of the input sequence.
Raises
------
AttributeError :
If an empty list is specified, or if the sequence has less
than two entries, an AttributeError will be raised.
ValueError :
Only vector inputs are supported. If a matrix is passed to the
function a ValueError will be raised.
References
----------
..[1] Shinomoto, S., Shima, K., & Tanji, J. (2003). Differences in spiking
patterns among cortical neurons. Neural Computation, 15, 2823–2842.
"""
# convert to array, cast to float
v = np.asarray(v)
# ensure we have enough entries
if v.size < 2:
raise AttributeError("Input size is too small. Please provide "
"an input with more than 1 entry.")
# calculate LV and return result
# raise error if input is multi-dimensional
return 3. * np.mean(np.power(np.diff(v) / (v[:-1] + v[1:]), 2))
# sigma2kw and kw2sigma only needed for oldfct_instantaneous_rate!
# to finally be taken out of Elephant
def sigma2kw(form):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn("deprecated", DeprecationWarning, stacklevel=2)
if form.upper() == 'BOX':
coeff = 2.0 * np.sqrt(3)
elif form.upper() == 'TRI':
coeff = 2.0 * np.sqrt(6)
elif form.upper() == 'EPA':
coeff = 2.0 * np.sqrt(5)
elif form.upper() == 'GAU':
coeff = 2.0 * 2.7 # > 99% of distribution weight
elif form.upper() == 'ALP':
coeff = 5.0
elif form.upper() == 'EXP':
coeff = 5.0
return coeff
def kw2sigma(form):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn("deprecated", DeprecationWarning, stacklevel=2)
return 1/sigma2kw(form)
# to finally be taken out of Elephant
def make_kernel(form, sigma, sampling_period, direction=1):
"""
Creates kernel functions for convolution.
Constructs a numeric linear convolution kernel of basic shape to be used
for data smoothing (linear low pass filtering) and firing rate estimation
from single trial or trial-averaged spike trains.
Exponential and alpha kernels may also be used to represent postynaptic
currents / potentials in a linear (current-based) model.
Parameters
----------
form : {'BOX', 'TRI', 'GAU', 'EPA', 'EXP', 'ALP'}
Kernel form. Currently implemented forms are BOX (boxcar),
TRI (triangle), GAU (gaussian), EPA (epanechnikov), EXP (exponential),
ALP (alpha function). EXP and ALP are asymmetric kernel forms and
assume optional parameter `direction`.
sigma : Quantity
Standard deviation of the distribution associated with kernel shape.
This parameter defines the time resolution of the kernel estimate
and makes different kernels comparable (cf. [1] for symmetric kernels).
This is used here as an alternative definition to the cut-off
frequency of the associated linear filter.
sampling_period : float
Temporal resolution of input and output.
direction : {-1, 1}
Asymmetric kernels have two possible directions.
The values are -1 or 1, default is 1. The
definition here is that for direction = 1 the
kernel represents the impulse response function
of the linear filter. Default value is 1.
Returns
-------
kernel : numpy.ndarray
Array of kernel. The length of this array is always an odd
number to represent symmetric kernels such that the center bin
coincides with the median of the numeric array, i.e for a
triangle, the maximum will be at the center bin with equal
number of bins to the right and to the left.
norm : float
For rate estimates. The kernel vector is normalized such that
the sum of all entries equals unity sum(kernel)=1. When
estimating rate functions from discrete spike data (0/1) the
additional parameter `norm` allows for the normalization to
rate in spikes per second.
For example:
``rate = norm * scipy.signal.lfilter(kernel, 1, spike_data)``
m_idx : int
Index of the numerically determined median (center of gravity)
of the kernel function.
Examples
--------
To obtain single trial rate function of trial one should use::
r = norm * scipy.signal.fftconvolve(sua, kernel)
To obtain trial-averaged spike train one should use::
r_avg = norm * scipy.signal.fftconvolve(sua, np.mean(X,1))
where `X` is an array of shape `(l,n)`, `n` is the number of trials and
`l` is the length of each trial.
See also
--------
elephant.statistics.instantaneous_rate
References
----------
.. [1] Meier R, Egert U, Aertsen A, Nawrot MP, "FIND - a unified framework
for neural data analysis"; Neural Netw. 2008 Oct; 21(8):1085-93.
.. [2] Nawrot M, Aertsen A, Rotter S, "Single-trial estimation of neuronal
firing rates - from single neuron spike trains to population activity";
J. Neurosci Meth 94: 81-92; 1999.
"""
warnings.simplefilter('always', DeprecationWarning)
warnings.warn("deprecated", DeprecationWarning, stacklevel=2)
forms_abbreviated = np.array(['BOX', 'TRI', 'GAU', 'EPA', 'EXP', 'ALP'])
forms_verbose = np.array(['boxcar', 'triangle', 'gaussian', 'epanechnikov',
'exponential', 'alpha'])
if form in forms_verbose:
form = forms_abbreviated[forms_verbose == form][0]
assert form.upper() in ('BOX', 'TRI', 'GAU', 'EPA', 'EXP', 'ALP'), \
"form must be one of either 'BOX','TRI','GAU','EPA','EXP' or 'ALP'!"
assert direction in (1, -1), "direction must be either 1 or -1"
# conversion to SI units (s)
if sigma < 0:
raise ValueError('sigma must be positive!')
SI_sigma = sigma.rescale('s').magnitude
SI_time_stamp_resolution = sampling_period.rescale('s').magnitude
norm = 1. / SI_time_stamp_resolution
if form.upper() == 'BOX':
w = 2.0 * SI_sigma * np.sqrt(3)
# always odd number of bins
width = 2 * np.floor(w / 2.0 / SI_time_stamp_resolution) + 1
height = 1. / width
kernel = np.ones((1, width)) * height # area = 1
elif form.upper() == 'TRI':
w = 2 * SI_sigma * np.sqrt(6)
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution)
trileft = np.arange(1, halfwidth + 2)
triright = np.arange(halfwidth, 0, -1) # odd number of bins
triangle = np.append(trileft, triright)
kernel = triangle / triangle.sum() # area = 1
elif form.upper() == 'EPA':
w = 2.0 * SI_sigma * np.sqrt(5)
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution)
base = np.arange(-halfwidth, halfwidth + 1)
parabula = base**2
epanech = parabula.max() - parabula # inverse parabula
kernel = epanech / epanech.sum() # area = 1
elif form.upper() == 'GAU':
w = 2.0 * SI_sigma * 2.7 # > 99% of distribution weight
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution) # always odd
base = np.arange(-halfwidth, halfwidth + 1) * SI_time_stamp_resolution
g = np.exp(
-(base**2) / 2.0 / SI_sigma**2) / SI_sigma / np.sqrt(2.0 * np.pi)
kernel = g / g.sum() # area = 1
elif form.upper() == 'ALP':
w = 5.0 * SI_sigma
alpha = np.arange(
1, (
2.0 * np.floor(w / SI_time_stamp_resolution / 2.0) + 1) +
1) * SI_time_stamp_resolution
alpha = (2.0 / SI_sigma**2) * alpha * np.exp(
-alpha * np.sqrt(2) / SI_sigma)
kernel = alpha / alpha.sum() # normalization
if direction == -1:
kernel = np.flipud(kernel)
elif form.upper() == 'EXP':
w = 5.0 * SI_sigma
expo = np.arange(
1, (
2.0 * np.floor(w / SI_time_stamp_resolution / 2.0) + 1) +
1) * SI_time_stamp_resolution
expo = np.exp(-expo / SI_sigma)
kernel = expo / expo.sum()
if direction == -1:
kernel = np.flipud(kernel)
kernel = kernel.ravel()
m_idx = np.nonzero(kernel.cumsum() >= 0.5)[0].min()
return kernel, norm, m_idx
# to finally be taken out of Elephant
def oldfct_instantaneous_rate(spiketrain, sampling_period, form,
sigma='auto', t_start=None, t_stop=None,
acausal=True, trim=False):
"""
Estimate instantaneous firing rate by kernel convolution.
Parameters
-----------
spiketrain: 'neo.SpikeTrain'
Neo object that contains spike times, the unit of the time stamps
and t_start and t_stop of the spike train.
sampling_period : Quantity
time stamp resolution of the spike times. the same resolution will
be assumed for the kernel
form : {'BOX', 'TRI', 'GAU', 'EPA', 'EXP', 'ALP'}
Kernel form. Currently implemented forms are BOX (boxcar),
TRI (triangle), GAU (gaussian), EPA (epanechnikov), EXP (exponential),
ALP (alpha function). EXP and ALP are asymmetric kernel forms and
assume optional parameter `direction`.
sigma : string or Quantity
Standard deviation of the distribution associated with kernel shape.
This parameter defines the time resolution of the kernel estimate
and makes different kernels comparable (cf. [1] for symmetric kernels).
This is used here as an alternative definition to the cut-off
frequency of the associated linear filter.
Default value is 'auto'. In this case, the optimized kernel width for
the rate estimation is calculated according to [1]. Note that the
automatized calculation of the kernel width ONLY works for gaussian
kernel shapes!
t_start : Quantity (Optional)
start time of the interval used to compute the firing rate, if None
assumed equal to spiketrain.t_start
Default:None
t_stop : Qunatity
End time of the interval used to compute the firing rate (included).
If none assumed equal to spiketrain.t_stop
Default:None
acausal : bool
if True, acausal filtering is used, i.e., the gravity center of the
filter function is aligned with the spike to convolve
Default:None
m_idx : int
index of the value in the kernel function vector that corresponds
to its gravity center. this parameter is not mandatory for
symmetrical kernels but it is required when asymmetrical kernels
are to be aligned at their gravity center with the event times if None
is assumed to be the median value of the kernel support
Default : None
trim : bool
if True, only the 'valid' region of the convolved
signal are returned, i.e., the points where there
isn't complete overlap between kernel and spike train
are discarded
NOTE: if True and an asymmetrical kernel is provided
the output will not be aligned with [t_start, t_stop]
Returns
-------
rate : neo.AnalogSignalArray
Contains the rate estimation in unit hertz (Hz).
Has a property 'rate.times' which contains the time axis of the rate
estimate. The unit of this property is the same as the resolution that
is given as an argument to the function.
Raises
------
TypeError:
If argument value for the parameter `sigma` is not a quantity object
or string 'auto'.
See also
--------
elephant.statistics.make_kernel
References
----------
..[1] H. Shimazaki, S. Shinomoto, J Comput Neurosci (2010) 29:171–182.
"""
warnings.simplefilter('always', DeprecationWarning)
warnings.warn("deprecated", DeprecationWarning, stacklevel=2)
if sigma == 'auto':
form = 'GAU'
unit = spiketrain.units
kernel_width = sskernel(spiketrain.magnitude, tin=None,
bootstrap=True)['optw']
sigma = kw2sigma(form) * kernel_width * unit
elif not isinstance(sigma, pq.Quantity):
raise TypeError('sigma must be either a quantities object or "auto".'
' Found: %s, value %s' % (type(sigma), str(sigma)))
kernel, norm, m_idx = make_kernel(form=form, sigma=sigma,
sampling_period=sampling_period)
units = pq.CompoundUnit(
"%s*s" % str(sampling_period.rescale('s').magnitude))
spiketrain = spiketrain.rescale(units)
if t_start is None:
t_start = spiketrain.t_start
else:
t_start = t_start.rescale(spiketrain.units)
if t_stop is None:
t_stop = spiketrain.t_stop
else:
t_stop = t_stop.rescale(spiketrain.units)
time_vector = np.zeros(int((t_stop - t_start)) + 1)
spikes_slice = spiketrain.time_slice(t_start, t_stop) \
if len(spiketrain) else np.array([])
for spike in spikes_slice:
index = int((spike - t_start))
time_vector[index] += 1
r = norm * scipy.signal.fftconvolve(time_vector, kernel, 'full')
if np.any(r < 0):
warnings.warn('Instantaneous firing rate approximation contains '
'negative values, possibly caused due to machine '
'precision errors')
if acausal:
if not trim:
r = r[m_idx:-(kernel.size - m_idx)]
elif trim:
r = r[2 * m_idx:-2 * (kernel.size - m_idx)]
t_start = t_start + m_idx * spiketrain.units
t_stop = t_stop - ((kernel.size) - m_idx) * spiketrain.units
else:
if not trim:
r = r[m_idx:-(kernel.size - m_idx)]
elif trim:
r = r[2 * m_idx:-2 * (kernel.size - m_idx)]
t_start = t_start + m_idx * spiketrain.units
t_stop = t_stop - ((kernel.size) - m_idx) * spiketrain.units
rate = neo.AnalogSignalArray(signal=r.reshape(r.size, 1),
sampling_period=sampling_period,
units=pq.Hz, t_start=t_start)
return rate, sigma
def instantaneous_rate(spiketrain, sampling_period, kernel='auto',
cutoff=5.0, t_start=None, t_stop=None, trim=False):
"""
Estimates instantaneous firing rate by kernel convolution.
Parameters
-----------
spiketrain : 'neo.SpikeTrain'
Neo object that contains spike times, the unit of the time stamps
and t_start and t_stop of the spike train.
sampling_period : Time Quantity
Time stamp resolution of the spike times. The same resolution will
be assumed for the kernel
kernel : string 'auto' or callable object of :class:`Kernel` from module
'kernels.py'. Currently implemented kernel forms are rectangular,
triangular, epanechnikovlike, gaussian, laplacian, exponential,
and alpha function.
Example: kernel = kernels.RectangularKernel(sigma=10*ms, invert=False)
The kernel is used for convolution with the spike train and its
standard deviation determines the time resolution of the instantaneous
rate estimation.
Default: 'auto'. In this case, the optimized kernel width for the
rate estimation is calculated according to [1] and with this width
a gaussian kernel is constructed. Automatized calculation of the
kernel width is not available for other than gaussian kernel shapes.
cutoff : float
This factor determines the cutoff of the probability distribution of
the kernel, i.e., the considered width of the kernel in terms of
multiples of the standard deviation sigma.
Default: 5.0
t_start : Time Quantity (optional)
Start time of the interval used to compute the firing rate. If None
assumed equal to spiketrain.t_start
Default: None
t_stop : Time Quantity (optional)
End time of the interval used to compute the firing rate (included).
If None assumed equal to spiketrain.t_stop
Default: None
trim : bool
if False, the output of the Fast Fourier Transformation being a longer
vector than the input vector by the size of the kernel is reduced back
to the original size of the considered time interval of the spiketrain
using the median of the kernel.
if True, only the region of the convolved signal is returned, where
there is complete overlap between kernel and spike train. This is
achieved by reducing the length of the output of the Fast Fourier
Transformation by a total of two times the size of the kernel, and
t_start and t_stop are adjusted.
Default: False
Returns
-------
rate : neo.AnalogSignalArray
Contains the rate estimation in unit hertz (Hz).
Has a property 'rate.times' which contains the time axis of the rate
estimate. The unit of this property is the same as the resolution that
is given via the argument 'sampling_period' to the function.
Raises
------
TypeError:
If `spiketrain` is not an instance of :class:`SpikeTrain` of Neo.
If `sampling_period` is not a time quantity.
If `kernel` is neither instance of :class:`Kernel` or string 'auto'.
If `cutoff` is neither float nor int.
If `t_start` and `t_stop` are neither None nor a time quantity.
If `trim` is not bool.
ValueError:
If `sampling_period` is smaller than zero.
Example
--------
kernel = kernels.AlphaKernel(sigma = 0.05*s, invert = True)
rate = instantaneous_rate(spiketrain, sampling_period = 2*ms, kernel)
References
----------
..[1] H. Shimazaki, S. Shinomoto, J Comput Neurosci (2010) 29:171–182.
"""
# Checks of input variables:
if not isinstance(spiketrain, SpikeTrain):
raise TypeError(
"spiketrain must be instance of :class:`SpikeTrain` of Neo!\n"
" Found: %s, value %s" % (type(spiketrain), str(spiketrain)))
if not (isinstance(sampling_period, pq.Quantity) and
sampling_period.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality):
raise TypeError(
"The sampling period must be a time quantity!\n"
" Found: %s, value %s" % (type(sampling_period), str(sampling_period)))
if sampling_period.magnitude < 0:
raise ValueError("The sampling period must be larger than zero.")
if kernel == 'auto':
kernel_width = sskernel(spiketrain.magnitude, tin=None,
bootstrap=True)['optw']
unit = spiketrain.units
sigma = 1/(2.0 * 2.7) * kernel_width * unit
# factor 2.0 connects kernel width with its half width,
# factor 2.7 connects half width of Gaussian distribution with
# 99% probability mass with its standard deviation.
kernel = kernels.GaussianKernel(sigma)
elif not isinstance(kernel, kernels.Kernel):
raise TypeError(
"kernel must be either instance of :class:`Kernel` "
"or the string 'auto'!\n"
" Found: %s, value %s" % (type(kernel), str(kernel)))
if not (isinstance(cutoff, float) or isinstance(cutoff, int)):
raise TypeError("cutoff must be float or integer!")
if not (t_start is None or (isinstance(t_start, pq.Quantity) and
t_start.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality)):
raise TypeError("t_start must be a time quantity!")
if not (t_stop is None or (isinstance(t_stop, pq.Quantity) and
t_stop.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality)):
raise TypeError("t_stop must be a time quantity!")
if not (isinstance(trim, bool)):
raise TypeError("trim must be bool!")
# main function:
units = pq.CompoundUnit("%s*s" % str(sampling_period.rescale('s').magnitude))
spiketrain = spiketrain.rescale(units)
if t_start is None:
t_start = spiketrain.t_start
else:
t_start = t_start.rescale(spiketrain.units)
if t_stop is None:
t_stop = spiketrain.t_stop
else:
t_stop = t_stop.rescale(spiketrain.units)
time_vector = np.zeros(int((t_stop - t_start)) + 1)
spikes_slice = spiketrain.time_slice(t_start, t_stop) \
if len(spiketrain) else np.array([])
for spike in spikes_slice:
index = int((spike - t_start))
time_vector[index] += 1
if cutoff < kernel.min_cutoff:
cutoff = kernel.min_cutoff
warnings.warn("The width of the kernel was adjusted to a minimally "
"allowed width.")
t_arr = np.arange(-cutoff * kernel.sigma.rescale(units).magnitude,
cutoff * kernel.sigma.rescale(units).magnitude +
sampling_period.rescale(units).magnitude,
sampling_period.rescale(units).magnitude) * units
r = scipy.signal.fftconvolve(time_vector,
kernel(t_arr).rescale(pq.Hz).magnitude, 'full')
if np.any(r < 0):
warnings.warn("Instantaneous firing rate approximation contains "
"negative values, possibly caused due to machine "
"precision errors.")
if not trim:
r = r[kernel.median_index(t_arr):-(kernel(t_arr).size -
kernel.median_index(t_arr))]
elif trim:
r = r[2 * kernel.median_index(t_arr):-2 * (kernel(t_arr).size -
kernel.median_index(t_arr))]
t_start += kernel.median_index(t_arr) * spiketrain.units
t_stop -= (kernel(t_arr).size -
kernel.median_index(t_arr)) * spiketrain.units
rate = neo.AnalogSignalArray(signal=r.reshape(r.size, 1),
sampling_period=sampling_period,
units=pq.Hz, t_start=t_start, t_stop=t_stop)
return rate
def time_histogram(spiketrains, binsize, t_start=None, t_stop=None,
output='counts', binary=False):
"""
Time Histogram of a list of :attr:`neo.SpikeTrain` objects.
Parameters
----------
spiketrains : List of neo.SpikeTrain objects
Spiketrains with a common time axis (same `t_start` and `t_stop`)
binsize : quantities.Quantity
Width of the histogram's time bins.
t_start, t_stop : Quantity (optional)
Start and stop time of the histogram. Only events in the input
`spiketrains` falling between `t_start` and `t_stop` (both included)
are considered in the histogram. If `t_start` and/or `t_stop` are not
specified, the maximum `t_start` of all :attr:spiketrains is used as
`t_start`, and the minimum `t_stop` is used as `t_stop`.
Default: t_start = t_stop = None
output : str (optional)
Normalization of the histogram. Can be one of:
* `counts`'`: spike counts at each bin (as integer numbers)
* `mean`: mean spike counts per spike train
* `rate`: mean spike rate per spike train. Like 'mean', but the
counts are additionally normalized by the bin width.
binary : bool (optional)
If **True**, indicates whether all spiketrain objects should first
binned to a binary representation (using the `BinnedSpikeTrain` class
in the `conversion` module) and the calculation of the histogram is
based on this representation.
Note that the output is not binary, but a histogram of the converted,
binary representation.
Default: False
Returns
-------
time_hist : neo.AnalogSignalArray
A neo.AnalogSignalArray object containing the histogram values.
`AnalogSignal[j]` is the histogram computed between
`t_start + j * binsize` and `t_start + (j + 1) * binsize`.
See also
--------
elephant.conversion.BinnedSpikeTrain
"""
min_tstop = 0
if t_start is None:
# Find the internal range for t_start, where all spike trains are
# defined; cut all spike trains taking that time range only
max_tstart, min_tstop = conv._get_start_stop_from_input(spiketrains)
t_start = max_tstart
if not all([max_tstart == t.t_start for t in spiketrains]):
warnings.warn(
"Spiketrains have different t_start values -- "
"using maximum t_start as t_start.")
if t_stop is None:
# Find the internal range for t_stop
if min_tstop:
t_stop = min_tstop
if not all([min_tstop == t.t_stop for t in spiketrains]):
warnings.warn(
"Spiketrains have different t_stop values -- "
"using minimum t_stop as t_stop.")
else:
min_tstop = conv._get_start_stop_from_input(spiketrains)[1]
t_stop = min_tstop
if not all([min_tstop == t.t_stop for t in spiketrains]):
warnings.warn(
"Spiketrains have different t_stop values -- "
"using minimum t_stop as t_stop.")
sts_cut = [st.time_slice(t_start=t_start, t_stop=t_stop) for st in
spiketrains]
# Bin the spike trains and sum across columns
bs = conv.BinnedSpikeTrain(sts_cut, t_start=t_start, t_stop=t_stop,
binsize=binsize)
if binary:
bin_hist = bs.to_sparse_bool_array().sum(axis=0)
else:
bin_hist = bs.to_sparse_array().sum(axis=0)
# Flatten array
bin_hist = np.ravel(bin_hist)
# Renormalise the histogram
if output == 'counts':
# Raw
bin_hist = bin_hist * pq.dimensionless
elif output == 'mean':
# Divide by number of input spike trains
bin_hist = bin_hist * 1. / len(spiketrains) * pq.dimensionless
elif output == 'rate':
# Divide by number of input spike trains and bin width
bin_hist = bin_hist * 1. / len(spiketrains) / binsize
else:
raise ValueError('Parameter output is not valid.')
return neo.AnalogSignalArray(signal=bin_hist.reshape(bin_hist.size, 1),
sampling_period=binsize, units=bin_hist.units,
t_start=t_start)
def complexity_pdf(spiketrains, binsize):
"""
Complexity Distribution [1] of a list of :attr:`neo.SpikeTrain` objects.
Probability density computed from the complexity histogram which is the
histogram of the entries of the population histogram of clipped (binary)
spike trains computed with a bin width of binsize.
It provides for each complexity (== number of active neurons per bin) the
number of occurrences. The normalization of that histogram to 1 is the
probability density.
Parameters
----------
spiketrains : List of neo.SpikeTrain objects
Spiketrains with a common time axis (same `t_start` and `t_stop`)
binsize : quantities.Quantity
Width of the histogram's time bins.
Returns
-------
time_hist : neo.AnalogSignalArray
A neo.AnalogSignalArray object containing the histogram values.
`AnalogSignal[j]` is the histogram computed between .
See also
--------
elephant.conversion.BinnedSpikeTrain
References
----------
[1]Gruen, S., Abeles, M., & Diesmann, M. (2008). Impact of higher-order
correlations on coincidence distributions of massively parallel data.
In Dynamic Brain-from Neural Spikes to Behaviors (pp. 96-114).
Springer Berlin Heidelberg.
"""
# Computing the population histogram with parameter binary=True to clip the
# spike trains before summing
pophist = time_histogram(spiketrains, binsize, binary=True)
# Computing the histogram of the entries of pophist (=Complexity histogram)
complexity_hist = np.histogram(
pophist.magnitude, bins=range(0, len(spiketrains) + 2))[0]
# Normalization of the Complexity Histogram to 1 (probabilty distribution)
complexity_hist = complexity_hist / complexity_hist.sum()
# Convert the Complexity pdf to an neo.AnalogSignalArray
complexity_distribution = neo.AnalogSignalArray(
np.array(complexity_hist).reshape(len(complexity_hist), 1) *
pq.dimensionless, t_start=0 * pq.dimensionless,
sampling_period=1 * pq.dimensionless)
return complexity_distribution
"""Kernel Bandwidth Optimization.
Python implementation by Subhasis Ray.
Original matlab code (sskernel.m) here:
http://2000.jukuin.keio.ac.jp/shimazaki/res/kernel.html
This was translated into Python by Subhasis Ray, NCBS. Tue Jun 10
23:01:43 IST 2014
"""
def nextpow2(x):
""" Return the smallest integral power of 2 that >= x """
n = 2
while n < x:
n = 2 * n
return n
def fftkernel(x, w):
"""
y = fftkernel(x,w)
Function `fftkernel' applies the Gauss kernel smoother to an input
signal using FFT algorithm.
Input argument
x: Sample signal vector.
w: Kernel bandwidth (the standard deviation) in unit of
the sampling resolution of x.
Output argument
y: Smoothed signal.
MAY 5/23, 2012 Author Hideaki Shimazaki
RIKEN Brain Science Insitute
http://2000.jukuin.keio.ac.jp/shimazaki
Ported to Python: Subhasis Ray, NCBS. Tue Jun 10 10:42:38 IST 2014
"""
L = len(x)
Lmax = L + 3 * w
n = nextpow2(Lmax)
X = np.fft.fft(x, n)
f = np.arange(0, n, 1.0) / n
f = np.concatenate((-f[:n / 2], f[n / 2:0:-1]))
K = np.exp(-0.5 * (w * 2 * np.pi * f)**2)
y = np.fft.ifft(X * K, n)
y = y[:L].copy()
return y
def logexp(x):
if x < 1e2:
y = np.log(1 + np.exp(x))
else:
y = x
return y
def ilogexp(x):
if x < 1e2:
y = np.log(np.exp(x) - 1)
else:
y = x
return y
def cost_function(x, N, w, dt):
"""
The cost function
Cn(w) = sum_{i,j} int k(x - x_i) k(x - x_j) dx - 2 sum_{i~=j} k(x_i - x_j)
"""
yh = np.abs(fftkernel(x, w / dt)) # density
# formula for density
C = np.sum(yh ** 2) * dt - 2 * np.sum(yh * x) * \
dt + 2 / np.sqrt(2 * np.pi) / w / N
C = C * N * N
# formula for rate
# C = dt*sum( yh.^2 - 2*yh.*y_hist + 2/sqrt(2*pi)/w*y_hist )
return C, yh
def sskernel(spiketimes, tin=None, w=None, bootstrap=False):
"""
Calculates optimal fixed kernel bandwidth.
spiketimes: sequence of spike times (sorted to be ascending).
tin: (optional) time points at which the kernel bandwidth is to be estimated.
w: (optional) vector of kernel bandwidths. If specified, optimal
bandwidth is selected from this.
bootstrap (optional): whether to calculate the 95% confidence
interval. (default False)
Returns
A dictionary containing the following key value pairs:
'y': estimated density,
't': points at which estimation was computed,
'optw': optimal kernel bandwidth,
'w': kernel bandwidths examined,
'C': cost functions of w,
'confb95': (lower bootstrap confidence level, upper bootstrap confidence level),
'yb': bootstrap samples.
Ref: Shimazaki, Hideaki, and Shigeru Shinomoto. 2010. Kernel
Bandwidth Optimization in Spike Rate Estimation. Journal of
Computational Neuroscience 29 (1-2):
171-82. doi:10.1007/s10827-009-0180-4.
"""
if tin is None:
time = np.max(spiketimes) - np.min(spiketimes)
isi = np.diff(spiketimes)
isi = isi[isi > 0].copy()
dt = np.min(isi)
tin = np.linspace(np.min(spiketimes),
np.max(spiketimes),
min(int(time / dt + 0.5), 1000)) # The 1000 seems somewhat arbitrary
t = tin
else:
time = np.max(tin) - np.min(tin)
spiketimes = spiketimes[(spiketimes >= np.min(tin)) &
(spiketimes <= np.max(tin))].copy()
isi = np.diff(spiketimes)
isi = isi[isi > 0].copy()
dt = np.min(isi)
if dt > np.min(np.diff(tin)):
t = np.linspace(np.min(tin), np.max(tin),
min(int(time / dt + 0.5), 1000))
else:
t = tin
dt = np.min(np.diff(tin))
yhist, bins = np.histogram(spiketimes, np.r_[t - dt / 2, t[-1] + dt / 2])
N = np.sum(yhist)
yhist = yhist / (N * dt) # density
optw = None
y = None
if w is not None:
C = np.zeros(len(w))
Cmin = np.inf
for k, w_ in enumerate(w):
C[k], yh = cost_function(yhist, N, w_, dt)
if C[k] < Cmin:
Cmin = C[k]
optw = w_
y = yh
else:
# Golden section search on a log-exp scale
wmin = 2 * dt
wmax = max(spiketimes) - min(spiketimes)
imax = 20 # max iterations
w = np.zeros(imax)
C = np.zeros(imax)
tolerance = 1e-5
phi = 0.5 * (np.sqrt(5) + 1) # The Golden ratio
a = ilogexp(wmin)
b = ilogexp(wmax)
c1 = (phi - 1) * a + (2 - phi) * b
c2 = (2 - phi) * a + (phi - 1) * b
f1, y1 = cost_function(yhist, N, logexp(c1), dt)
f2, y2 = cost_function(yhist, N, logexp(c2), dt)
k = 0
while (np.abs(b - a) > (tolerance * (np.abs(c1) + np.abs(c2))))\
and (k < imax):
if f1 < f2:
b = c2
c2 = c1
c1 = (phi - 1) * a + (2 - phi) * b
f2 = f1
f1, y1 = cost_function(yhist, N, logexp(c1), dt)
w[k] = logexp(c1)
C[k] = f1
optw = logexp(c1)
y = y1 / (np.sum(y1 * dt))
else:
a = c1
c1 = c2
c2 = (2 - phi) * a + (phi - 1) * b
f1 = f2
f2, y2 = cost_function(yhist, N, logexp(c2), dt)
w[k] = logexp(c2)
C[k] = f2
optw = logexp(c2)
y = y2 / np.sum(y2 * dt)
k = k + 1
# Bootstrap confidence intervals
confb95 = None
yb = None
if bootstrap:
nbs = 1000
yb = np.zeros((nbs, len(tin)))
for ii in range(nbs):
idx = np.floor(np.random.rand(N) * N).astype(int)
xb = spiketimes[idx]
y_histb, bins = np.histogram(
xb, np.r_[t - dt / 2, t[-1] + dt / 2]) / dt / N
yb_buf = fftkernel(y_histb, optw / dt).real
yb_buf = yb_buf / np.sum(yb_buf * dt)
yb[ii, :] = np.interp(tin, t, yb_buf)
ybsort = np.sort(yb, axis=0)
y95b = ybsort[np.floor(0.05 * nbs).astype(int), :]
y95u = ybsort[np.floor(0.95 * nbs).astype(int), :]
confb95 = (y95b, y95u)
ret = np.interp(tin, t, y)
return {'y': ret,
't': tin,
'optw': optw,
'w': w,
'C': C,
'confb95': confb95,
'yb': yb}
| espenhgn/elephant | elephant/statistics.py | Python | bsd-3-clause | 42,367 | [
"Gaussian",
"NEURON"
] | 6fdda6c9b64c719f4a8ae06bccbc2b9c17d6370fa003a8dcdd54506feb777989 |
"""
.. module:: Pfn
:synopsis: ProcessPool and related classes
ProcessPool
ProcessPool creates a pool of worker subprocesses to handle a queue of tasks
much like the producers/consumers paradigm. Users just need to fill the queue
with tasks to be executed and worker tasks will execute them.
To construct ProcessPool one first should call its constructor::
pool = ProcessPool( minSize, maxSize, maxQueuedRequests )
where parameters are:
:param int minSize: at least <minSize> workers will be alive all the time
:param int maxSize: no more than <maxSize> workers will be alive all the time
:param int maxQueuedRequests: size for request waiting in a queue to be executed
In case another request is added to the full queue, the execution will
lock until another request is taken out. The ProcessPool will automatically increase and
decrease the pool of workers as needed, of course not exceeding above limits.
To add a task to the queue one should execute:::
pool.createAndQueueTask( funcDef,
args = ( arg1, arg2, ... ),
kwargs = { "kwarg1" : value1, "kwarg2" : value2 },
callback = callbackDef,
exceptionCallback = exceptionCallBackDef )
or alternatively by using ProcessTask instance:::
task = ProcessTask( funcDef,
args = ( arg1, arg2, ... )
kwargs = { "kwarg1" : value1, .. },
callback = callbackDef,
exceptionCallback = exceptionCallbackDef )
pool.queueTask( task )
where parameters are:
:param funcDef: callable by object definition (function, lambda, class with __call__ slot defined
:param list args: argument list
:param dict kwargs: keyword arguments dictionary
:param callback: callback function definition
:param exceptionCallback: exception callback function definition
The callback, exceptionCallbaks and the parameters are all optional. Once task has been added to the pool,
it will be executed as soon as possible. Worker subprocesses automatically return the return value of the task.
To obtain those results one has to execute::
pool.processRequests()
This method will process the existing return values of the task, even if the task does not return
anything. This method has to be called to clean the result queues. To wait until all the requests are finished
and process their result call::
pool.processAllRequests()
This function will block until all requests are finished and their result values have been processed.
It is also possible to set the ProcessPool in daemon mode, in which all results are automatically
processed as soon they are available, just after finalization of task execution. To enable this mode one
has to call::
pool.daemonize()
Callback functions
There are two types of callbacks that can be executed for each tasks: exception callback function and
results callback function. The first one is executed when unhandled exception has been raised during
task processing, and hence no task results are available, otherwise the execution of second callback type
is performed.
The callbacks could be attached in a two places:
- directly in ProcessTask, in that case those have to be shelvable/picklable, so they should be defined as
global functions with the signature :callback( task, taskResult ): where :task: is a :ProcessTask:
reference and :taskResult: is whatever task callable it returning for results callback and
:exceptionCallback( task, exc_info): where exc_info is a
:S_ERROR( "Exception": { "Value" : exceptionName, "Exc_info" : exceptionInfo ):
- in ProcessPool, in that case there is no limitation on the function type, except the signature, which
should follow :callback( task ): or :exceptionCallback( task ):, as those callbacks definitions
are not put into the queues
The first types of callbacks could be used in case various callable objects are put into the ProcessPool,
so you probably want to handle them differently depending on their results, while the second types are for
executing same type of callables in subprocesses and hence you are expecting the same type of results
everywhere.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import errno
import inspect
import multiprocessing
import os
import signal
import sys
import threading
import time
import six
from six.moves import queue as Queue
try:
from DIRAC.FrameworkSystem.Client.Logger import gLogger
except ImportError:
gLogger = None
try:
from DIRAC.Core.Utilities.LockRing import LockRing
except ImportError:
LockRing = None
try:
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
except ImportError:
def S_OK(val=""):
""" dummy S_OK """
return {'OK': True, 'Value': val}
def S_ERROR(mess):
""" dummy S_ERROR """
return {'OK': False, 'Message': mess}
sLog = gLogger.getSubLogger(__name__)
class WorkingProcess(multiprocessing.Process):
"""
.. class:: WorkingProcess
WorkingProcess is a class that represents activity that runs in a separate process.
It is running main thread (process) in daemon mode, reading tasks from :pendingQueue:, executing
them and pushing back tasks with results to the :resultsQueue:. If task has got a timeout value
defined a separate threading.Timer thread is started killing execution (and destroying worker)
after :ProcessTask.__timeOut: seconds.
Main execution could also terminate in a few different ways:
* on every failed read attempt (from empty :pendingQueue:), the idle loop counter is increased,
worker is terminated when counter is reaching a value of 10;
* when stopEvent is set (so ProcessPool is in draining mode),
* when parent process PID is set to 1 (init process, parent process with ProcessPool is dead).
"""
def __init__(self, pendingQueue, resultsQueue, stopEvent, keepRunning):
""" c'tor
:param self: self reference
:param pendingQueue: queue storing ProcessTask before exection
:type pendingQueue: multiprocessing.Queue
:param resultsQueue: queue storing callbacks and exceptionCallbacks
:type resultsQueue: multiprocessing.Queue
:param stopEvent: event to stop processing
:type stopEvent: multiprocessing.Event
"""
multiprocessing.Process.__init__(self)
# daemonize
self.daemon = True
# flag to see if task is being treated
self.__working = multiprocessing.Value('i', 0)
# task counter
self.__taskCounter = multiprocessing.Value('i', 0)
# task queue
self.__pendingQueue = pendingQueue
# results queue
self.__resultsQueue = resultsQueue
# stop event
self.__stopEvent = stopEvent
# keep process running until stop event
self.__keepRunning = keepRunning
# placeholder for watchdog thread
self.__watchdogThread = None
# placeholder for process thread
self.__processThread = None
# placeholder for current task
self.task = None
# start yourself at least
self.start()
def __watchdog(self):
"""
Watchdog thread target
Terminating/killing WorkingProcess when parent process is dead
:param self: self reference
"""
while True:
# parent is dead, commit suicide
if os.getppid() == 1:
os.kill(self.pid, signal.SIGTERM)
# wait for half a minute and if worker is still alive use REAL silencer
time.sleep(30)
# now you're dead
os.kill(self.pid, signal.SIGKILL)
# wake me up in 5 seconds
time.sleep(5)
def isWorking(self):
"""
Check if process is being executed
:param self: self reference
"""
return self.__working.value == 1
def taskProcessed(self):
"""
Tell how many tasks have been processed so far
:param self: self reference
"""
return self.__taskCounter
def __processTask(self):
"""
processThread target
:param self: self reference
"""
if self.task:
self.task.process()
def run(self):
"""
Task execution
Reads and executes ProcessTask :task: out of pending queue and then pushes it
to the results queue for callback execution.
:param self: self reference
"""
# start watchdog thread
self.__watchdogThread = threading.Thread(target=self.__watchdog)
self.__watchdogThread.daemon = True
self.__watchdogThread.start()
if LockRing:
# Reset all locks
lr = LockRing()
lr._openAll()
lr._setAllEvents()
# zero processed task counter
taskCounter = 0
# zero idle loop counter
idleLoopCount = 0
# main loop
while True:
# draining, stopEvent is set, exiting
if self.__stopEvent.is_set():
return
# clear task
self.task = None
# read from queue
try:
task = self.__pendingQueue.get(block=True, timeout=10)
except Queue.Empty:
# idle loop?
idleLoopCount += 1
# 10th idle loop - exit, nothing to do
if idleLoopCount == 10 and not self.__keepRunning:
return
continue
# toggle __working flag
self.__working.value = 1
# save task
self.task = task
# reset idle loop counter
idleLoopCount = 0
# process task in a separate thread
self.__processThread = threading.Thread(target=self.__processTask)
self.__processThread.start()
timeout = False
noResults = False
# join processThread with or without timeout
if self.task.getTimeOut():
self.__processThread.join(self.task.getTimeOut() + 10)
else:
self.__processThread.join()
# processThread is still alive? stop it!
if self.__processThread.is_alive():
if six.PY2:
self.__processThread._Thread__stop() # pylint: disable=no-member
self.task.setResult(S_ERROR(errno.ETIME, "Timed out"))
timeout = True
# if the task finished with no results, something bad happened, e.g.
# undetected timeout
if not self.task.taskResults() and not self.task.taskException():
self.task.setResult(S_ERROR("Task produced no results"))
noResults = True
# check results and callbacks presence, put task to results queue
if self.task.hasCallback() or self.task.hasPoolCallback():
self.__resultsQueue.put(task)
if timeout or noResults:
# The task execution timed out, stop the process to prevent it from running
# in the background
time.sleep(1)
os.kill(self.pid, signal.SIGKILL)
return
# increase task counter
taskCounter += 1
self.__taskCounter = taskCounter
# toggle __working flag
self.__working.value = 0
class ProcessTask(object):
""" Defines task to be executed in WorkingProcess together with its callbacks.
"""
# taskID
taskID = 0
def __init__(self,
taskFunction,
args=None,
kwargs=None,
taskID=None,
callback=None,
exceptionCallback=None,
usePoolCallbacks=False,
timeOut=0):
""" c'tor
:warning: taskFunction has to be callable: it could be a function, lambda OR a class with
__call__ operator defined. But be carefull with interpretation of args and kwargs, as they
are passed to different places in above cases:
1. for functions or lambdas args and kwargs are just treated as function parameters
2. for callable classess (say MyTask) args and kwargs are passed to class contructor
(MyTask.__init__) and MyTask.__call__ should be a method without parameters, i.e.
MyTask definition should be::
class MyTask:
def __init__( self, *args, **kwargs ):
...
def __call__( self ):
...
:warning: depending on :timeOut: value, taskFunction execution can be forcefully terminated
using SIGALRM after :timeOut: seconds spent, :timeOut: equal to zero means there is no any
time out at all, except those during :ProcessPool: finalization
:param self: self reference
:param mixed taskFunction: definition of callable object to be executed in this task
:param tuple args: non-keyword arguments
:param dict kwargs: keyword arguments
:param int taskID: task id, if not set,
:param int timeOut: estimated time to execute taskFunction in seconds (default = 0, no timeOut at all)
:param mixed callback: result callback function
:param mixed exceptionCallback: callback function to be fired upon exception in taskFunction
"""
self.__taskFunction = taskFunction
self.__taskArgs = args or []
self.__taskKwArgs = kwargs or {}
self.__taskID = taskID
self.__resultCallback = callback
self.__exceptionCallback = exceptionCallback
self.__timeOut = 0
# set time out
self.setTimeOut(timeOut)
self.__done = False
self.__exceptionRaised = False
self.__taskException = None
self.__taskResult = None
self.__usePoolCallbacks = usePoolCallbacks
def taskResults(self):
"""
Get task results
:param self: self reference
"""
return self.__taskResult
def taskException(self):
"""
Get task exception
:param self: self reference
"""
return self.__taskException
def enablePoolCallbacks(self):
"""
(re)enable use of ProcessPool callbacks
"""
self.__usePoolCallbacks = True
def disablePoolCallbacks(self):
"""
Disable execution of ProcessPool callbacks
"""
self.__usePoolCallbacks = False
def usePoolCallbacks(self):
"""
Check if results should be processed by callbacks defined in the :ProcessPool:
:param self: self reference
"""
return self.__usePoolCallbacks
def hasPoolCallback(self):
"""
Check if asked to execute :ProcessPool: callbacks
:param self: self reference
"""
return self.__usePoolCallbacks
def setTimeOut(self, timeOut):
"""
Set time out (in seconds)
:param self: selt reference
:param int timeOut: new time out value
"""
try:
self.__timeOut = int(timeOut)
return S_OK(self.__timeOut)
except (TypeError, ValueError) as error:
return S_ERROR(str(error))
def getTimeOut(self):
"""
Get timeOut value
:param self: self reference
"""
return self.__timeOut
def hasTimeOutSet(self):
"""
Check if timeout is set
:param self: self reference
"""
return bool(self.__timeOut != 0)
def getTaskID(self):
"""
TaskID getter
:param self: self reference
"""
return self.__taskID
def hasCallback(self):
"""
Callback existence checking
:param self: self reference
:return: True if callback or exceptionCallback has been defined, False otherwise
"""
return self.__resultCallback or self.__exceptionCallback or self.__usePoolCallbacks
def exceptionRaised(self):
"""
Flag to determine exception in process
:param self: self reference
"""
return self.__exceptionRaised
def doExceptionCallback(self):
"""
Execute exceptionCallback
:param self: self reference
"""
if self.__done and self.__exceptionRaised and self.__exceptionCallback:
self.__exceptionCallback(self, self.__taskException)
def doCallback(self):
"""
Execute result callback function
:param self: self reference
"""
if self.__done and not self.__exceptionRaised and self.__resultCallback:
self.__resultCallback(self, self.__taskResult)
def setResult(self, result):
"""
Set taskResult to result
"""
self.__taskResult = result
def process(self):
"""
Execute task
:param self: self reference
"""
self.__done = True
try:
# it's a function?
if inspect.isfunction(self.__taskFunction):
self.__taskResult = self.__taskFunction(*self.__taskArgs, **self.__taskKwArgs)
# or a class?
elif inspect.isclass(self.__taskFunction):
# create new instance
taskObj = self.__taskFunction(*self.__taskArgs, **self.__taskKwArgs)
# ## check if it is callable, raise TypeError if not
if not callable(taskObj):
raise TypeError("__call__ operator not defined not in %s class" % taskObj.__class__.__name__)
# ## call it at least
self.__taskResult = taskObj()
except Exception as x:
self.__exceptionRaised = True
if gLogger:
gLogger.exception("Exception in process of pool")
if self.__exceptionCallback or self.usePoolCallbacks():
retDict = S_ERROR('Exception')
retDict['Value'] = str(x)
retDict['Exc_info'] = sys.exc_info()[1]
self.__taskException = retDict
class ProcessPool(object):
"""
.. class:: ProcessPool
ProcessPool
This class is managing multiprocessing execution of tasks (:ProcessTask: instances) in a separate
sub-processes (:WorkingProcess:).
Pool depth
The :ProcessPool: is keeping required number of active workers all the time: slave workers are only created
when pendingQueue is being filled with tasks, not exceeding defined min and max limits. When pendingQueue is
empty, active workers will be cleaned up by themselves, as each worker has got built in
self-destroy mechanism after 10 idle loops.
Processing and communication
The communication between :ProcessPool: instance and slaves is performed using two :multiprocessing.Queues:
* pendingQueue, used to push tasks to the workers,
* resultsQueue for revert direction;
and one :multiprocessing.Event: instance (stopEvent), which is working as a fuse to destroy idle workers
in a clean manner.
Processing of task begins with pushing it into :pendingQueue: using :ProcessPool.queueTask: or
:ProcessPool.createAndQueueTask:. Every time new task is queued, :ProcessPool: is checking existance of
active and idle workers and spawning new ones when required. The task is then read and processed on worker
side. If results are ready and callback functions are defined, task is put back to the resultsQueue and it is
ready to be picked up by ProcessPool again. To perform this last step one has to call :ProcessPool.processResults:,
or alternatively ask for daemon mode processing, when this function is called again and again in
separate background thread.
Finalisation
Finalization for task processing is done in several steps:
* if pool is working in daemon mode, background result processing thread is joined and stopped
* :pendingQueue: is emptied by :ProcessPool.processAllResults: function, all enqueued tasks are executed
* :stopEvent: is set, so all idle workers are exiting immediately
* non-hanging workers are joined and terminated politelty
* the rest of workers, if any, are forcefully retained by signals: first by SIGTERM, and if is doesn't work
by SIGKILL
:warn: Be carefull and choose wisely :timeout: argument to :ProcessPool.finalize:. Too short time period can
cause that all workers will be killed.
"""
def __init__(self, minSize=2, maxSize=0, maxQueuedRequests=10,
strictLimits=True, poolCallback=None, poolExceptionCallback=None,
keepProcessesRunning=True):
""" c'tor
:param self: self reference
:param int minSize: minimal number of simultaniously executed tasks
:param int maxSize: maximal number of simultaniously executed tasks
:param int maxQueueRequests: size of pending tasks queue
:param bool strictLimits: flag to workers overcommitment
:param callable poolCallbak: results callback
:param callable poolExceptionCallback: exception callback
"""
# min workers
self.__minSize = max(1, minSize)
# max workers
self.__maxSize = max(self.__minSize, maxSize)
# queue size
self.__maxQueuedRequests = maxQueuedRequests
# flag to worker overcommit
self.__strictLimits = strictLimits
# pool results callback
self.__poolCallback = poolCallback
# pool exception callback
self.__poolExceptionCallback = poolExceptionCallback
# pending queue
self.__pendingQueue = multiprocessing.Queue(self.__maxQueuedRequests)
# results queue
self.__resultsQueue = multiprocessing.Queue(0)
# stop event
self.__stopEvent = multiprocessing.Event()
# keep processes running flag
self.__keepRunning = keepProcessesRunning
# lock
self.__prListLock = threading.Lock()
# workers dict
self.__workersDict = {}
# flag to trigger workers draining
self.__draining = False
# placeholder for daemon results processing
self.__daemonProcess = False
# create initial workers
self.__spawnNeededWorkingProcesses()
def stopProcessing(self, timeout=10):
"""
Case fire
:param self: self reference
"""
self.finalize(timeout)
def startProcessing(self):
"""
Restart processing again
:param self: self reference
"""
self.__draining = False
self.__stopEvent.clear()
self.daemonize()
def setPoolCallback(self, callback):
"""
Set ProcessPool callback function
:param self: self reference
:param callable callback: callback function
"""
if callable(callback):
self.__poolCallback = callback
def setPoolExceptionCallback(self, exceptionCallback):
"""
Set ProcessPool exception callback function
:param self: self refernce
:param callable exceptionCallback: exsception callback function
"""
if callable(exceptionCallback):
self.__poolExceptionCallback = exceptionCallback
def getMaxSize(self):
"""
MaxSize getter
:param self: self reference
"""
return self.__maxSize
def getMinSize(self):
"""
MinSize getter
:param self: self reference
"""
return self.__minSize
def getNumWorkingProcesses(self):
"""
Count processes currently being executed
:param self: self reference
"""
counter = 0
self.__prListLock.acquire()
try:
counter = len([pid for pid, worker in self.__workersDict.items() if worker.isWorking()])
finally:
self.__prListLock.release()
return counter
def getNumIdleProcesses(self):
"""
Count processes being idle
:param self: self reference
"""
counter = 0
self.__prListLock.acquire()
try:
counter = len([pid for pid, worker in self.__workersDict.items() if not worker.isWorking()])
finally:
self.__prListLock.release()
return counter
def getFreeSlots(self):
""" get number of free slots available for workers
:param self: self reference
"""
return max(0, self.__maxSize - self.getNumWorkingProcesses())
def __spawnWorkingProcess(self):
"""
Create new process
:param self: self reference
"""
self.__prListLock.acquire()
try:
worker = WorkingProcess(self.__pendingQueue, self.__resultsQueue, self.__stopEvent, self.__keepRunning)
while worker.pid is None:
time.sleep(0.1)
self.__workersDict[worker.pid] = worker
finally:
self.__prListLock.release()
def __cleanDeadProcesses(self):
"""
Delete references of dead workingProcesses from ProcessPool.__workingProcessList
"""
# check wounded processes
self.__prListLock.acquire()
try:
for pid, worker in list(self.__workersDict.items()):
if not worker.is_alive():
del self.__workersDict[pid]
finally:
self.__prListLock.release()
def __spawnNeededWorkingProcesses(self):
"""
Create N working process (at least self.__minSize, but no more
than self.__maxSize)
:param self: self reference
"""
self.__cleanDeadProcesses()
# if we're draining do not spawn new workers
if self.__draining or self.__stopEvent.is_set():
return
while len(self.__workersDict) < self.__minSize:
if self.__draining or self.__stopEvent.is_set():
return
self.__spawnWorkingProcess()
while self.hasPendingTasks() and \
self.getNumIdleProcesses() == 0 and \
len(self.__workersDict) < self.__maxSize:
if self.__draining or self.__stopEvent.is_set():
return
self.__spawnWorkingProcess()
time.sleep(0.1)
def queueTask(self, task, blocking=True, usePoolCallbacks=False):
"""
Enqueue new task into pending queue
:param self: self reference
:param ProcessTask task: new task to execute
:param bool blocking: flag to block if necessary and new empty slot is available (default = block)
:param bool usePoolCallbacks: flag to trigger execution of pool callbacks (default = don't execute)
"""
if not isinstance(task, ProcessTask):
raise TypeError("Tasks added to the process pool must be ProcessTask instances")
if usePoolCallbacks and (self.__poolCallback or self.__poolExceptionCallback):
task.enablePoolCallbacks()
self.__prListLock.acquire()
try:
self.__pendingQueue.put(task, block=blocking)
except Queue.Full:
self.__prListLock.release()
return S_ERROR("Queue is full")
finally:
self.__prListLock.release()
self.__spawnNeededWorkingProcesses()
# throttle a bit to allow task state propagation
time.sleep(0.1)
return S_OK()
def createAndQueueTask(self,
taskFunction,
args=None,
kwargs=None,
taskID=None,
callback=None,
exceptionCallback=None,
blocking=True,
usePoolCallbacks=False,
timeOut=0):
"""
Create new processTask and enqueue it in pending task queue
:param self: self reference
:param mixed taskFunction: callable object definition (FunctionType, LambdaType, callable class)
:param tuple args: non-keyword arguments passed to taskFunction c'tor
:param dict kwargs: keyword arguments passed to taskFunction c'tor
:param int taskID: task Id
:param mixed callback: callback handler, callable object executed after task's execution
:param mixed exceptionCallback: callback handler executed if testFunction had raised an exception
:param bool blocking: flag to block queue if necessary until free slot is available
:param bool usePoolCallbacks: fire execution of pool defined callbacks after task callbacks
:param int timeOut: time you want to spend executing :taskFunction:
"""
task = ProcessTask(taskFunction, args, kwargs, taskID, callback, exceptionCallback, usePoolCallbacks, timeOut)
return self.queueTask(task, blocking)
def hasPendingTasks(self):
"""
Check if taks are present in pending queue
:param self: self reference
:warning: results may be misleading if elements put into the queue are big
"""
return not self.__pendingQueue.empty()
def isFull(self):
"""
Check in peding queue is full
:param self: self reference
:warning: results may be misleading if elements put into the queue are big
"""
return self.__pendingQueue.full()
def isWorking(self):
"""
Check existence of working subprocesses
:param self: self reference
"""
return not self.__pendingQueue.empty() or self.getNumWorkingProcesses()
def processResults(self):
"""
Execute tasks' callbacks removing them from results queue
:param self: self reference
"""
processed = 0
log = sLog.getSubLogger('WorkingProcess')
while True:
if (
not log.debug(
"Start loop (t=0) queue size = %d, processed = %d" %
(self.__resultsQueue.qsize(),
processed)) and processed == 0 and self.__resultsQueue.qsize()):
log.debug("Process results, queue size = %d" % self.__resultsQueue.qsize())
start = time.time()
self.__cleanDeadProcesses()
log.debug("__cleanDeadProcesses", 't=%.2f' % (time.time() - start))
if not self.__pendingQueue.empty():
self.__spawnNeededWorkingProcesses()
log.debug("__spawnNeededWorkingProcesses", 't=%.2f' % (time.time() - start))
time.sleep(0.1)
if self.__resultsQueue.empty():
if self.__resultsQueue.qsize():
log.warn("Results queue is empty but has non zero size", "%d" % self.__resultsQueue.qsize())
# We only commit suicide if we reach a backlog greater than the maximum number of workers
if self.__resultsQueue.qsize() > self.__maxSize:
return -1
else:
return 0
if processed == 0:
log.debug("Process results, but queue is empty...")
break
# get task
task = self.__resultsQueue.get()
log.debug("__resultsQueue.get", 't=%.2f' % (time.time() - start))
# execute callbacks
try:
task.doExceptionCallback()
task.doCallback()
log.debug("doCallback", 't=%.2f' % (time.time() - start))
if task.usePoolCallbacks():
if self.__poolExceptionCallback and task.exceptionRaised():
self.__poolExceptionCallback(task.getTaskID(), task.taskException())
if self.__poolCallback and task.taskResults():
self.__poolCallback(task.getTaskID(), task.taskResults())
log.debug("__poolCallback", 't=%.2f' % (time.time() - start))
except Exception as error:
log.exception("Exception in callback", lException=error)
pass
processed += 1
if processed:
log.debug("Processed %d results" % processed)
else:
log.debug("No results processed")
return processed
def processAllResults(self, timeout=10):
"""
Process all enqueued tasks at once
:param self: self reference
"""
start = time.time()
while self.getNumWorkingProcesses() or not self.__pendingQueue.empty():
self.processResults()
time.sleep(1)
if time.time() - start > timeout:
break
self.processResults()
def finalize(self, timeout=60):
"""
Drain pool, shutdown processing in more or less clean way
:param self: self reference
:param timeout: seconds to wait before killing
"""
# start drainig
self.__draining = True
# join deamon process
if self.__daemonProcess:
self.__daemonProcess.join(timeout)
# process all tasks
self.processAllResults(timeout)
# set stop event, all idle workers should be terminated
self.__stopEvent.set()
# join idle workers
start = time.time()
log = sLog.getSubLogger("finalize")
nWorkers = 9999999
while self.__workersDict:
self.__cleanDeadProcesses()
if len(self.__workersDict) != nWorkers:
nWorkers = len(self.__workersDict)
log.debug("%d workers still active, timeout = %d" % (nWorkers, timeout))
if timeout <= 0 or time.time() - start >= timeout:
break
time.sleep(0.1)
# second clean up - join and terminate workers
if self.__workersDict:
log.debug("After cleaning dead processes, %d workers still active, timeout = %d" %
(len(self.__workersDict), timeout))
for worker in self.__workersDict.values():
if worker.is_alive():
worker.terminate()
worker.join(5)
self.__cleanDeadProcesses()
# third clean up - kill'em all!!!
if self.__workersDict:
log.debug("After terminating processes, %d workers still active, timeout = %d, kill them" %
(len(self.__workersDict), timeout))
self.__filicide()
def __filicide(self):
"""
Kill all workers, kill'em all!
:param self: self reference
"""
while self.__workersDict:
pid = list(self.__workersDict).pop(0)
worker = self.__workersDict[pid]
if worker.is_alive():
os.kill(pid, signal.SIGKILL)
del self.__workersDict[pid]
def daemonize(self):
"""
Make ProcessPool a finite being for opening and closing doors between
chambers.
Also just run it in a separate background thread to the death of
PID 0.
:param self: self reference
"""
if self.__daemonProcess:
return
self.__daemonProcess = threading.Thread(target=self.__backgroundProcess)
self.__daemonProcess.setDaemon(1)
self.__daemonProcess.start()
def __backgroundProcess(self):
"""
Daemon thread target
:param self: self reference
"""
while True:
if self.__draining:
return
self.processResults()
time.sleep(1)
def __del__(self):
"""
Delete slot
:param self: self reference
"""
self.finalize(timeout=10)
| yujikato/DIRAC | src/DIRAC/Core/Utilities/ProcessPool.py | Python | gpl-3.0 | 32,804 | [
"DIRAC"
] | 37fdf33579f48add70ec827671089914f7fd20bf24bc5b4235cce89f4aa43285 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sqlite3
con = sqlite3.connect('todo.db') # Warning: This file is created in the current directory
con.execute("CREATE TABLE todo (id INTEGER PRIMARY KEY, task char(100) NOT NULL, status bool NOT NULL)")
con.execute("INSERT INTO todo (task,status) VALUES ('test',0)")
con.execute("INSERT INTO todo (task,status) VALUES ('Read A-byte-of-python to get a good introduction into Python',0)")
con.execute("INSERT INTO todo (task,status) VALUES ('Visit the Python website',1)")
con.execute("INSERT INTO todo (task,status) VALUES ('Test various editors for and check the syntax highlighting',1)")
con.execute("INSERT INTO todo (task,status) VALUES ('Choose your favorite WSGI-Framework',0)")
con.commit()
| rarewin/my-studies | python/bottle-test/tutorial_todo-list/create_table.py | Python | bsd-2-clause | 754 | [
"VisIt"
] | d92828e74897e208d53005e6f970b2deaf215d1327d8a7708d66f4cbd905c691 |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see starthinker/scripts for possible source):
# - Command: "python starthinker_ui/manage.py airflow"
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
DV360 Bulk Editor
Allows bulk editing DV360 through Sheets and BigQuery.
- Select Load Partners, then click Save + Run, then a sheet called DV Editor will be created.
- In the Partners sheet tab, fill in Filter column then select Load Advertisers, click Save + Run.
- In the Advertisers sheet tab, fill in Filter column then select Load Campaigns, click Save + Run.
- In the Campaigns sheet tab, fill in Filter column, optional.
- Then select Load Insertion Orders And Line Items, click Save + Run.
- To update values, make changes on all Edit columns.
- Select Preview, then Save + Run.
- Check the Audit and Preview tabs to verify commit.
- To commit changes, select Update, then Save + Run.
- Check the Success and Error tabs.
- Update can be run multiple times.
- Update ONLY changes fields that do not match their original value.
- Insert operates only on Edit columns, ignores orignal value columns.
- Carefull when using drag to copy rows, values are incremented automatically.
- Modify audit logic by visting BigQuery and changing the views.
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'auth_dv':'user', # Credentials used for dv.
'auth_sheet':'user', # Credentials used for sheet.
'auth_bigquery':'service', # Credentials used for bigquery.
'recipe_name':'', # Name of Google Sheet to create.
'recipe_slug':'', # Name of Google BigQuery dataset to create.
'command':'Load Partners', # Action to take.
}
RECIPE = {
'setup':{
'day':[
],
'hour':[
]
},
'tasks':[
{
'dataset':{
'__comment__':'Ensure dataset exists.',
'auth':{'field':{'name':'auth_bigquery','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'dataset':{'field':{'name':'recipe_slug','prefix':'DV_Editor_','kind':'string','order':2,'default':'','description':'Name of Google BigQuery dataset to create.'}}
}
},
{
'drive':{
'__comment__':'Copy the default template to sheet with the recipe name',
'auth':{'field':{'name':'auth_sheet','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}},
'copy':{
'source':'https://docs.google.com/spreadsheets/d/18G6cGo4j5SsY08H8P53R22D_Pm6m-zkE6APd3EDLf2c/',
'destination':{'field':{'name':'recipe_name','prefix':'DV Editor ','kind':'string','order':3,'default':'','description':'Name of Google Sheet to create.'}}
}
}
},
{
'dv_editor':{
'__comment':'Depending on users choice, execute a different part of the solution.',
'auth_dv':{'field':{'name':'auth_dv','kind':'authentication','order':1,'default':'user','description':'Credentials used for dv.'}},
'auth_sheets':{'field':{'name':'auth_sheet','kind':'authentication','order':2,'default':'user','description':'Credentials used for sheet.'}},
'auth_bigquery':{'field':{'name':'auth_bigquery','kind':'authentication','order':3,'default':'service','description':'Credentials used for bigquery.'}},
'sheet':{'field':{'name':'recipe_name','prefix':'DV Editor ','kind':'string','order':4,'default':'','description':'Name of Google Sheet to create.'}},
'dataset':{'field':{'name':'recipe_slug','prefix':'DV_Editor_','kind':'string','order':5,'default':'','description':'Name of Google BigQuery dataset to create.'}},
'command':{'field':{'name':'command','kind':'choice','choices':['Clear Partners','Clear Advertisers','Clear Campaigns','Clear Insertion Orders And Line Items','Clear Preview','Clear Update','Load Partners','Load Advertisers','Load Campaigns','Load Insertion Orders And Line Items','Preview','Update'],'order':6,'default':'Load Partners','description':'Action to take.'}}
}
}
]
}
dag_maker = DAG_Factory('dv360_editor', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
| google/starthinker | dags/dv360_editor_dag.py | Python | apache-2.0 | 7,041 | [
"VisIt"
] | 2544aa17c7764d566d2fd39ed7a370f6533697034770781006e7642d183165ac |
"""Optimise the cache."""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
import os
from logging import warn
def _already_linked(a, b):
ai = os.stat(a)
bi = os.stat(b)
return (ai.st_dev, ai.st_ino) == (bi.st_dev, bi.st_ino)
def _byte_identical(a, b):
af = file(a, 'rb')
bf = file(b, 'rb')
while True:
adata = af.read(100)
bdata = bf.read(100)
if adata != bdata:
return False
if not adata:
return True
def _link(a, b, tmpfile):
"""Keep 'a', delete 'b' and hard-link to 'a'"""
if not _byte_identical(a, b):
warn(_("Files should be identical, but they're not!\n%(file_a)s\n%(file_b)s"), {'file_a': a, 'file_b': b})
b_dir = os.path.dirname(b)
old_mode = os.lstat(b_dir).st_mode
os.chmod(b_dir, old_mode | 0200) # Need write access briefly
try:
os.link(a, tmpfile)
try:
os.rename(tmpfile, b)
except:
os.unlink(tmpfile)
raise
finally:
os.chmod(b_dir, old_mode)
def optimise(impl_dir):
"""Scan an implementation cache directory for duplicate files, and
hard-link any duplicates together to save space.
@param impl_dir: a $cache/0install.net/implementations directory
@type impl_dir: str
@return: (unique bytes, duplicated bytes, already linked, manifest size)
@rtype: (int, int, int, int)"""
first_copy = {} # TypeDigest -> Path
dup_size = uniq_size = already_linked = man_size = 0
import random
for x in range(10):
tmpfile = os.path.join(impl_dir, 'optimise-%d' % random.randint(0, 1000000))
if not os.path.exists(tmpfile):
break
else:
raise Exception(_("Can't generate unused tempfile name!"))
for impl in os.listdir(impl_dir):
if impl.startswith('.') or '=' not in impl:
warn(_("Skipping non-implementation '%s'"), impl)
continue
manifest_path = os.path.join(impl_dir, impl, '.manifest')
try:
ms = file(manifest_path, 'rb')
except OSError, ex:
warn(_("Failed to read manifest file '%(manifest_path)s': %(exception)s"), {'manifest': manifest_path, 'exception': str(ex)})
continue
alg = impl.split('=', 1)[0]
if alg == 'sha1': continue
man_size += os.path.getsize(manifest_path)
dir = ""
for line in ms:
if line[0] == 'D':
itype, path = line.split(' ', 1)
assert path.startswith('/')
dir = path[1:-1] # Strip slash and newline
continue
if line[0] == "S":
itype, digest, size, rest = line.split(' ', 3)
uniq_size += long(size)
continue
assert line[0] in "FX"
itype, digest, mtime, size, path = line.split(' ', 4)
path = path[:-1] # Strip newline
size = long(size)
key = (itype, digest, mtime, size)
loc_path = (impl, dir, path)
first_loc = first_copy.get(key, None)
if first_loc:
first_full = os.path.join(impl_dir, *first_loc)
new_full = os.path.join(impl_dir, *loc_path)
if _already_linked(first_full, new_full):
already_linked += size
else:
_link(first_full, new_full, tmpfile)
dup_size += size
else:
first_copy[key] = loc_path
uniq_size += size
return (uniq_size, dup_size, already_linked, man_size)
| pombredanne/zero-install | zeroinstall/zerostore/optimise.py | Python | lgpl-2.1 | 3,098 | [
"VisIt"
] | 2f4cb6b8a2c9be7b1066829d390ef1ab4764c0da6c1cb93d527ff574cab1dcc8 |
"""A .. collapse:: directive for sphinx-bootstrap-theme."""
import os.path as op
from docutils import nodes
from docutils.parsers.rst.directives import flag, class_option
from docutils.parsers.rst.roles import set_classes
from docutils.statemachine import StringList
from sphinx.locale import _
from sphinx.util.docutils import SphinxDirective
from sphinx.util.fileutil import copy_asset
this_dir = op.dirname(__file__)
__version__ = '0.1.0.dev0'
###############################################################################
# Super classes
class DivNode(nodes.Body, nodes.Element):
"""Generic DivNode class."""
def __init__(self, **options):
diff = set(options.keys()).symmetric_difference(set(self.OPTION_KEYS))
assert len(diff) == 0, (diff, self.__class__.__name__)
self.options = options
super().__init__()
def visit_node(self, node):
"""Visit the node."""
atts = {}
if node.BASECLASS:
atts['class'] = node.BASECLASS
if node.options.get('class'):
atts['class'] += \
' {}-{}'.format(node.BASECLASS, node.options['class'])
self.body.append(self.starttag(node, node.ELEMENT, **atts))
def depart_node(self, node):
"""Depart the node."""
self.body.append('</{}>'.format(node.ELEMENT))
def _assemble(node, directive):
title_text = directive.arguments[0]
directive.add_name(node)
header = node.HEADER_PRETITLE.format(**node.options).split('\n')
directive.state.nested_parse(
StringList(header), directive.content_offset, node)
textnodes, messages = directive.state.inline_text(
title_text, directive.lineno)
node += textnodes
node += messages
header = node.HEADER_POSTTITLE.format(**node.options).split('\n')
directive.state.nested_parse(
StringList(header), directive.content_offset, node)
directive.state.nested_parse(
directive.content, directive.content_offset, node)
footer = node.FOOTER.format(**node.options).split('\n')
directive.state.nested_parse(
StringList(footer), directive.content_offset, node)
###############################################################################
# .. collapse::
class CollapseNode(DivNode):
"""Class for .. collapse:: directive."""
OPTION_KEYS = ('title', 'id_', 'extra', 'class')
ELEMENT = 'div'
BASECLASS = 'panel'
HEADER_PRETITLE = """.. raw:: html
<div class="panel-heading"><h4 class="panel-title">
<a data-toggle="collapse" href="#collapse_{id_}">"""
HEADER_POSTTITLE = """.. raw:: html
</a></h4></div>
<div id="collapse_{id_}" class="panel-collapse collapse{extra}">
<div class="panel-body">"""
FOOTER = """.. raw:: html
</div></div>"""
KNOWN_CLASSES = (
'default', 'primary', 'success', 'info', 'warning', 'danger')
@staticmethod
def _check_class(class_):
if class_ not in CollapseNode.KNOWN_CLASSES:
raise ValueError(':class: option %r must be one of %s'
% (class_, CollapseNode.KNOWN_CLASSES))
return class_
class CollapseDirective(SphinxDirective):
"""Collapse directive."""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'open': flag,
'class': CollapseNode._check_class}
has_content = True
def run(self):
"""Parse."""
self.assert_has_content()
title_text = _(self.arguments[0])
extra = _(' in' if 'open' in self.options else '')
class_ = {'class': self.options.get('class', 'default')}
id_ = nodes.make_id(title_text)
node = CollapseNode(title=title_text, id_=id_, extra=extra, **class_)
_assemble(node, self)
return [node]
###############################################################################
# .. details::
class DetailsNode(DivNode):
"""Class for .. details:: directive."""
ELEMENT = 'details'
BASECLASS = ''
OPTION_KEYS = ('title', 'class')
HEADER_PRETITLE = """.. raw:: html
<summary>"""
HEADER_POSTTITLE = """.. raw:: html
</summary>"""
FOOTER = """"""
class DetailsDirective(SphinxDirective):
"""Details directive."""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': class_option}
has_content = True
def run(self):
"""Parse."""
set_classes(self.options)
self.assert_has_content()
title_text = _(self.arguments[0])
class_ = {'class': self.options.get('class', '')}
node = DetailsNode(title=title_text, **class_)
_assemble(node, self)
return [node]
###############################################################################
# Generic setup
def setup(app):
"""Set up for Sphinx app."""
directives = dict(
collapse=CollapseDirective,
details=DetailsDirective,
)
for key, value in directives.items():
app.add_directive(key, value)
try:
app.add_css_file('bootstrap_divs.css')
except AttributeError:
app.add_stylesheet('bootstrap_divs.css')
try:
app.add_js_file('bootstrap_divs.js')
except AttributeError:
app.add_javascript('bootstrap_divs.js')
app.connect('build-finished', copy_asset_files)
for node in (CollapseNode, DetailsNode):
app.add_node(node,
html=(node.visit_node, node.depart_node),
latex=(node.visit_node, node.depart_node),
text=(node.visit_node, node.depart_node))
return dict(version='0.1', parallel_read_safe=True,
parallel_write_safe=True)
def copy_asset_files(app, exc):
"""Copy static assets."""
asset_files = ['bootstrap_divs.css', 'bootstrap_divs.js']
if exc is None: # build succeeded
for path in asset_files:
copy_asset(op.join(this_dir, path),
op.join(app.outdir, '_static'))
| olafhauk/mne-python | doc/sphinxext/sphinx_bootstrap_divs/__init__.py | Python | bsd-3-clause | 6,079 | [
"VisIt"
] | 6e111f80b5e11d1010c6b3de506dc5b728f09d57958051d8c56b5194dc2cdeca |
from neuron.tests import test_all
suite = test_all.suite
| neurodebian/pkg-neuron | share/lib/python/neuron/tests/__init__.py | Python | gpl-2.0 | 59 | [
"NEURON"
] | ec5b2a88aa0072f0c93da06e35b1a83e226cc8f7380611c661b323f94d69ef65 |
########################################################################
# $HeadURL $
# File: Request.py
# Author: [email protected]
# Date: 2012/07/16 13:43:45
########################################################################
"""
:mod: Request
.. module: Request
:synopsis: request implementation
.. moduleauthor:: [email protected]
request implementation
"""
# for properties
# pylint: disable=E0211,W0612,W0142,C0103
__RCSID__ = "$Id$"
# #
# @file Request.py
# @author [email protected]
# @date 2012/07/16 13:44:00
# @brief Definition of Request class.
# # imports
import datetime
# # from DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.TypedList import TypedList
from DIRAC.RequestManagementSystem.private.Record import Record
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.RequestManagementSystem.Client.Operation import Operation
########################################################################
class Request( Record ):
"""
.. class:: Request
:param int RequestID: requestID
:param str Name: request' name
:param str OwnerDN: request's owner DN
:param str OwnerGroup: request owner group
:param str Setup: DIRAC setup
:param str SourceComponent: whatever
:param int JobID: jobID
:param datetime.datetime CreationTime: UTC datetime
:param datetime.datetime SubmissionTime: UTC datetime
:param datetime.datetime LastUpdate: UTC datetime
:param str Status: request's status
:param TypedList operations: list of operations
"""
ALL_STATES = ( "Waiting", "Failed", "Done", "Scheduled", "Assigned", "Canceled" )
FINAL_STATES = ( "Done", "Failed", "Canceled" )
def __init__( self, fromDict = None ):
"""c'tor
:param self: self reference
"""
Record.__init__( self )
self.__waiting = None
now = datetime.datetime.utcnow().replace( microsecond = 0 )
self.__data__["CreationTime"] = now
self.__data__["SubmitTime"] = now
self.__data__["LastUpdate"] = now
self.__data__["Status"] = "Done"
self.__data__["JobID"] = 0
self.__data__["RequestID"] = 0
proxyInfo = getProxyInfo()
if proxyInfo["OK"]:
proxyInfo = proxyInfo["Value"]
if proxyInfo["validGroup"] and proxyInfo["validDN"]:
self.OwnerDN = proxyInfo["identity"]
self.OwnerGroup = proxyInfo["group"]
self.__dirty = []
self.__operations__ = TypedList( allowedTypes = Operation )
fromDict = fromDict if fromDict else {}
self.__dirty = fromDict.get( "__dirty", [] )
if "__dirty" in fromDict:
del fromDict["__dirty"]
for opDict in fromDict.get( "Operations", [] ):
self +=Operation( opDict )
if "Operations" in fromDict:
del fromDict["Operations"]
for key, value in fromDict.items():
if key not in self.__data__:
raise AttributeError( "Unknown Request attribute '%s'" % key )
if value:
setattr( self, key, value )
self._notify()
@staticmethod
def tableDesc():
""" get table desc """
return { "Fields" :
{ "RequestID" : "INTEGER NOT NULL AUTO_INCREMENT",
"RequestName" : "VARCHAR(255) NOT NULL",
"OwnerDN" : "VARCHAR(255)",
"OwnerGroup" : "VARCHAR(32)",
"Status" : "ENUM('Waiting', 'Assigned', 'Done', 'Failed', 'Canceled', 'Scheduled') DEFAULT 'Waiting'",
"Error" : "VARCHAR(255)",
"DIRACSetup" : "VARCHAR(32)",
"SourceComponent" : "BLOB",
"JobID" : "INTEGER DEFAULT 0",
"CreationTime" : "DATETIME",
"SubmitTime" : "DATETIME",
"LastUpdate" : "DATETIME" },
"PrimaryKey" : [ "RequestID" ],
'UniqueIndexes': {'RequestName' : [ 'RequestName'] }
}
def _notify( self ):
""" simple state machine for sub request statuses """
self.__waiting = None
# # update operations statuses
rStatus = "Waiting"
opStatusList = [ ( op.Status, op ) for op in self ]
self.__waiting = None
while opStatusList:
# # Scan all status in order!
opStatus, op = opStatusList.pop( 0 )
# # Failed -> Failed
if opStatus == "Failed":
rStatus = "Failed"
break
# Scheduled -> Scheduled
if opStatus == "Scheduled":
if self.__waiting == None:
self.__waiting = op
rStatus = "Scheduled"
# # First operation Queued becomes Waiting if no Waiting/Scheduled before
elif opStatus == "Queued":
if self.__waiting == None:
self.__waiting = op
op._setWaiting( self )
rStatus = "Waiting"
# # First operation Waiting is next to execute, others are queued
elif opStatus == "Waiting":
rStatus = "Waiting"
if self.__waiting == None:
self.__waiting = op
else:
op._setQueued( self )
# # All operations Done -> Done
elif opStatus == "Done" and self.__waiting == None:
rStatus = "Done"
self.Error = ''
self.Status = rStatus
def getWaiting( self ):
""" get waiting operation if any """
# # update states
self._notify()
return S_OK( self.__waiting )
# # Operation arithmetics
def __contains__( self, operation ):
""" in operator
:param self: self reference
:param Operation subRequest: a subRequest
"""
return bool( operation in self.__operations__ )
def __iadd__( self, operation ):
""" += operator for subRequest
:param self: self reference
:param Operation operation: sub-request to add
"""
if operation not in self:
self.__operations__.append( operation )
operation._parent = self
self._notify()
return self
def insertBefore( self, newOperation, existingOperation ):
""" insert :newOperation: just before :existingOperation:
:param self: self reference
:param Operation newOperation: Operation to be inserted
:param Operation existingOperation: previous Operation sibling
"""
if existingOperation not in self:
return S_ERROR( "%s is not in" % existingOperation )
if newOperation in self:
return S_ERROR( "%s is already in" % newOperation )
self.__operations__.insert( self.__operations__.index( existingOperation ), newOperation )
newOperation._parent = self
self._notify()
return S_OK()
def insertAfter( self, newOperation, existingOperation ):
""" insert :newOperation: just after :existingOperation:
:param self: self reference
:param Operation newOperation: Operation to be inserted
:param Operation existingOperation: next Operation sibling
"""
if existingOperation not in self:
return S_ERROR( "%s is not in" % existingOperation )
if newOperation in self:
return S_ERROR( "%s is already in" % newOperation )
self.__operations__.insert( self.__operations__.index( existingOperation ) + 1, newOperation )
newOperation._parent = self
self._notify()
return S_OK()
def addOperation( self, operation ):
""" add :operation: to list of Operations
:param self: self reference
:param Operation operation: Operation to be inserted
"""
if operation in self:
return S_ERROR( "This operation is already in!!!" )
self +=operation
return S_OK()
def isEmpty( self ):
""" Evaluate if the request is empty
"""
return len( self.__operations__ ) == 0
def __iter__( self ):
""" iterator for sub-request """
return self.__operations__.__iter__()
def __getitem__( self, i ):
""" [] op for sub requests """
return self.__operations__.__getitem__( i )
def __setitem__( self, i, value ):
""" self[i] = val """
self.__operations__._typeCheck( value )
if self[i].OperationID:
self.__dirty.append( self[i].OperationID )
self.__operations__.__setitem__( i, value )
value._parent = self
self._notify()
def __delitem__( self, i ):
""" del self[i]"""
if not self.RequestID:
self.__operations__.__delitem__( i )
else:
opId = self[i].OperationID
if opId:
self.__dirty.append( opId )
self.__operations__.__delitem__( i )
self._notify()
def indexOf( self, subReq ):
""" return index of subReq (execution order) """
return self.__operations__.index( subReq ) if subReq in self else -1
def __nonzero__( self ):
""" for comparisons
"""
return True
def __len__( self ):
""" nb of subRequests """
return len( self.__operations__ )
def __str__( self ):
""" str operator """
return str( self.toJSON()["Value"] )
def subStatusList( self ):
""" list of statuses for all operations """
return [ subReq.Status for subReq in self ]
# # properties
@property
def RequestID( self ):
""" request ID getter """
return self.__data__["RequestID"]
@RequestID.setter
def RequestID( self, value ):
""" requestID setter (shouldn't be RO???) """
self.__data__["RequestID"] = long( value ) if value else 0
@property
def RequestName( self ):
""" request's name getter """
return self.__data__["RequestName"]
@RequestName.setter
def RequestName( self, value ):
""" request name setter """
if type( value ) != str:
raise TypeError( "RequestName should be a string" )
self.__data__["RequestName"] = value[:128]
@property
def OwnerDN( self ):
""" request owner DN getter """
return self.__data__["OwnerDN"]
@OwnerDN.setter
def OwnerDN( self, value ):
""" request owner DN setter """
if type( value ) != str:
raise TypeError( "OwnerDN should be a string!" )
self.__data__["OwnerDN"] = value
@property
def OwnerGroup( self ):
""" request owner group getter """
return self.__data__["OwnerGroup"]
@OwnerGroup.setter
def OwnerGroup( self, value ):
""" request owner group setter """
if type( value ) != str:
raise TypeError( "OwnerGroup should be a string!" )
self.__data__["OwnerGroup"] = value
@property
def DIRACSetup( self ):
""" DIRAC setup getter """
return self.__data__["DIRACSetup"]
@DIRACSetup.setter
def DIRACSetup( self, value ):
""" DIRAC setup setter """
if type( value ) != str:
raise TypeError( "setup should be a string!" )
self.__data__["DIRACSetup"] = value
@property
def SourceComponent( self ):
""" source component getter """
return self.__data__["SourceComponent"]
@SourceComponent.setter
def SourceComponent( self, value ):
""" source component setter """
if type( value ) != str:
raise TypeError( "Setup should be a string!" )
self.__data__["SourceComponent"] = value
@property
def JobID( self ):
""" jobID getter """
return self.__data__["JobID"]
@JobID.setter
def JobID( self, value = 0 ):
""" jobID setter """
self.__data__["JobID"] = long( value ) if value else 0
@property
def CreationTime( self ):
""" creation time getter """
return self.__data__["CreationTime"]
@CreationTime.setter
def CreationTime( self, value = None ):
""" creation time setter """
if type( value ) not in ( datetime.datetime, str ) :
raise TypeError( "CreationTime should be a datetime.datetime!" )
if type( value ) == str:
value = datetime.datetime.strptime( value.split( "." )[0], '%Y-%m-%d %H:%M:%S' )
self.__data__["CreationTime"] = value
@property
def SubmitTime( self ):
""" request's submission time getter """
return self.__data__["SubmitTime"]
@SubmitTime.setter
def SubmitTime( self, value = None ):
""" submission time setter """
if type( value ) not in ( datetime.datetime, str ):
raise TypeError( "SubmitTime should be a datetime.datetime!" )
if type( value ) == str:
value = datetime.datetime.strptime( value.split( "." )[0], '%Y-%m-%d %H:%M:%S' )
self.__data__["SubmitTime"] = value
@property
def LastUpdate( self ):
""" last update getter """
return self.__data__["LastUpdate"]
@LastUpdate.setter
def LastUpdate( self, value = None ):
""" last update setter """
if type( value ) not in ( datetime.datetime, str ):
raise TypeError( "LastUpdate should be a datetime.datetime!" )
if type( value ) == str:
value = datetime.datetime.strptime( value.split( "." )[0], '%Y-%m-%d %H:%M:%S' )
self.__data__["LastUpdate"] = value
@property
def Status( self ):
""" status getter """
self._notify()
return self.__data__["Status"]
@Status.setter
def Status( self, value ):
""" status setter """
if value not in Request.ALL_STATES:
raise ValueError( "Unknown status: %s" % str( value ) )
# If the status moved to Failed or Done, update the lastUpdate time
if value in ( 'Done', 'Failed' ):
if value != self.__data__["Status"]:
self.LastUpdate = datetime.datetime.utcnow().replace( microsecond = 0 )
if value == 'Done':
self.Error = ''
self.__data__["Status"] = value
@property
def Order( self ):
""" ro execution order getter """
self._notify()
opStatuses = [ op.Status for op in self.__operations__ ]
return opStatuses.index( "Waiting" ) if "Waiting" in opStatuses else len( opStatuses )
@property
def Error( self ):
""" error getter """
return self.__data__["Error"]
@Error.setter
def Error( self, value ):
""" error setter """
if type( value ) != str:
raise TypeError( "Error has to be a string!" )
self.__data__["Error"] = self._escapeStr( value, 255 )
def toSQL( self ):
""" prepare SQL INSERT or UPDATE statement """
colVals = [ ( "`%s`" % column, "'%s'" % value if type( value ) in ( str, datetime.datetime ) else str( value ) )
for column, value in self.__data__.items()
if value and column not in ( "RequestID" ) ]
# colVals.append( ( "`LastUpdate`", "UTC_TIMESTAMP()" ) )
query = []
if self.RequestID:
query.append( "UPDATE `Request` SET " )
query.append( ", ".join( [ "%s=%s" % item for item in colVals ] ) )
query.append( " WHERE `RequestID`=%d;\n" % self.RequestID )
else:
query.append( "INSERT INTO `Request` " )
columns = "(%s)" % ",".join( [ column for column, value in colVals ] )
values = "(%s)" % ",".join( [ value for column, value in colVals ] )
query.append( columns )
query.append( " VALUES %s;" % values )
return S_OK( "".join( query ) )
def cleanUpSQL( self ):
""" delete query for dirty operations """
query = []
if self.RequestID and self.__dirty:
opIDs = ",".join( [ str( opID ) for opID in self.__dirty ] )
query.append( "DELETE FROM `Operation` WHERE `RequestID`=%s AND `OperationID` IN (%s);\n" % ( self.RequestID,
opIDs ) )
for opID in self.__dirty:
query.append( "DELETE FROM `File` WHERE `OperationID`=%s;\n" % opID )
return query
# # digest
def toJSON( self ):
""" serialize to JSON format """
digest = dict( zip( self.__data__.keys(),
[ str( val ) if val else "" for val in self.__data__.values() ] ) )
digest["RequestID"] = self.RequestID
digest["Operations"] = []
digest["__dirty"] = self.__dirty
for op in self:
opJSON = op.toJSON()
if not opJSON["OK"]:
return opJSON
digest["Operations"].append( opJSON["Value"] )
return S_OK( digest )
def getDigest( self ):
""" return digest for request """
digest = ['Name:' + self.RequestName]
for op in self:
opDigest = [ str( item ) for item in ( op.Type, op.Type, op.Status, op.Order ) ]
if op.TargetSE:
opDigest.append( op.TargetSE )
if op.Catalog:
opDigest.append( op.Catalog )
if len( op ):
opFile = op[0]
opDigest.append( opFile.LFN )
opDigest.append( ",...<%d files>" % len( op ) )
digest.append( ":".join( opDigest ) )
return S_OK( "\n".join( digest ) )
def optimize( self ):
""" Merges together the operations that can be merged. They need to have the following arguments equal:
* Type
* Arguments
* SourceSE
* TargetSE
* Catalog
It also makes sure that the maximum number of Files in an Operation is never overcome.
CAUTION: this method is meant to be called before inserting into the DB.
So if the RequestId is not 0, we don't touch
:return S_ERROR if the Request should not be optimized (because already in the DB
S_OK(True) if a optimization was carried out
S_OK(False) if no optimization were carried out
"""
# Set to True if the request could be optimized
optimized = False
# List of attributes that must be equal for operations to be merged
attrList = ["Type", "Arguments", "SourceSE", "TargetSE", "Catalog" ]
i = 0
# If the RequestID is not the default one (0), it probably means
# the Request is already in the DB, so we don't touch anything
if self.RequestID != 0:
return S_ERROR( "Cannot optimize because Request seems to be already in the DB (RequestID %s)" % self.RequestID )
# We could do it with a single loop (the 2nd one), but by doing this,
# we can replace
# i += 1
# continue
#
# with
# break
#
# which is nicer in my opinion
while i < len( self.__operations__ ):
while ( i + 1 ) < len( self.__operations__ ):
# Some attributes need to be the same
attrMismatch = False
for attr in attrList:
if getattr( self.__operations__[i], attr ) != getattr( self.__operations__[i + 1], attr ):
attrMismatch = True
break
if attrMismatch:
break
# We do not do the merge if there are common files in the operations
fileSetA = set( list( f.LFN for f in self.__operations__[i] ) )
fileSetB = set( list( f.LFN for f in self.__operations__[i + 1] ) )
if len( fileSetA & fileSetB ):
break
# There is a maximum number of files one can add into an operation
try:
while len( self.__operations__[i + 1] ):
self.__operations__[i] += self.__operations__[i + 1][0]
del self.__operations__[i + 1][0]
optimized = True
del self.__operations__[i + 1]
except RuntimeError:
i += 1
i += 1
return S_OK( optimized )
| sposs/DIRAC | RequestManagementSystem/Client/Request.py | Python | gpl-3.0 | 18,616 | [
"DIRAC"
] | 48e1c953ede561b9e1e09dbdb991ed7a874a473006e9ebe0839331be4471494d |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import json
import numpy as np
import pandas as pd
from pymatgen.io.lammps.outputs import LammpsDump, parse_lammps_dumps,\
parse_lammps_log
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
"test_files", "lammps")
class LammpsDumpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(os.path.join(test_dir, "dump.rdx_wc.100")) as f:
rdx_str = f.read()
cls.rdx = LammpsDump.from_string(string=rdx_str)
with open(os.path.join(test_dir, "dump.tatb")) as f:
tatb_str = f.read()
cls.tatb = LammpsDump.from_string(string=tatb_str)
def test_from_string(self):
self.assertEqual(self.rdx.timestep, 100)
self.assertEqual(self.rdx.natoms, 21)
np.testing.assert_array_equal(self.rdx.box.bounds,
np.array([(35, 48)] * 3))
np.testing.assert_array_equal(self.rdx.data.columns,
["id", "type", "xs", "ys", "zs"])
rdx_data = self.rdx.data.iloc[-1]
rdx_data_target = [19, 2, 0.42369, 0.47347, 0.555425]
np.testing.assert_array_almost_equal(rdx_data, rdx_data_target)
self.assertEqual(self.tatb.timestep, 0)
self.assertEqual(self.tatb.natoms, 384)
bounds = [[0, 13.624], [0, 17.1149153805], [0, 15.1826391451]]
np.testing.assert_array_almost_equal(self.tatb.box.bounds, bounds)
tilt = [-5.75315630927, -6.325466, 7.4257288]
np.testing.assert_array_almost_equal(self.tatb.box.tilt, tilt)
np.testing.assert_array_equal(self.tatb.data.columns,
["id", "type", "q", "x", "y", "z"])
tatb_data = self.tatb.data.iloc[-1]
tatb_data_target = [356, 3, -0.482096, 2.58647, 12.9577, 14.3143]
np.testing.assert_array_almost_equal(tatb_data, tatb_data_target)
def test_json_dict(self):
encoded = json.dumps(self.rdx.as_dict())
decoded = json.loads(encoded)
rdx = LammpsDump.from_dict(decoded)
self.assertEqual(rdx.timestep, 100)
self.assertEqual(rdx.natoms, 21)
np.testing.assert_array_equal(rdx.box.bounds,
np.array([(35, 48)] * 3))
pd.testing.assert_frame_equal(rdx.data, self.rdx.data)
class FuncTest(unittest.TestCase):
def test_parse_lammps_dumps(self):
# gzipped
rdx_10_pattern = os.path.join(test_dir, "dump.rdx.gz")
rdx_10 = list(parse_lammps_dumps(file_pattern=rdx_10_pattern))
timesteps_10 = [d.timestep for d in rdx_10]
np.testing.assert_array_equal(timesteps_10, np.arange(0, 101, 10))
self.assertTupleEqual(rdx_10[-1].data.shape, (21, 5))
# wildcard
rdx_25_pattern = os.path.join(test_dir, "dump.rdx_wc.*")
rdx_25 = list(parse_lammps_dumps(file_pattern=rdx_25_pattern))
timesteps_25 = [d.timestep for d in rdx_25]
np.testing.assert_array_equal(timesteps_25, np.arange(0, 101, 25))
self.assertTupleEqual(rdx_25[-1].data.shape, (21, 5))
def test_parse_lammps_log(self):
comb_file = "log.5Oct16.comb.Si.elastic.g++.1"
comb = parse_lammps_log(filename=os.path.join(test_dir, comb_file))
self.assertEqual(len(comb), 6)
# first comb run
comb0 = comb[0]
np.testing.assert_array_equal(["Step", "Temp", "TotEng", "PotEng",
"E_vdwl", "E_coul"], comb0.columns)
self.assertEqual(len(comb0), 6)
comb0_data = [[0, 1, -4.6295947, -4.6297237, -4.6297237, 0],
[5, 1, -4.6295965, -4.6297255, -4.6297255, 0]]
np.testing.assert_array_almost_equal(comb0.iloc[[0, -1]], comb0_data)
# final comb run
comb_1 = comb[-1]
np.testing.assert_array_equal(["Step", "Lx", "Ly", "Lz",
"Xy", "Xz", "Yz",
"c_fxy[1]", "c_fxy[2]", "c_fxy[3]",
"c_fxy[4]", "c_fxy[5]", "c_fxy[6]"],
comb_1.columns)
self.assertEqual(len(comb_1), 11)
comb_1_data = [[36, 5.1293854e-06], [46, 2192.8256]]
np.testing.assert_array_almost_equal(comb_1.iloc[[0, -1], [0, -3]],
comb_1_data)
ehex_file = "log.13Oct16.ehex.g++.8"
ehex = parse_lammps_log(filename=os.path.join(test_dir, ehex_file))
self.assertEqual(len(ehex), 3)
ehex0, ehex1, ehex2 = ehex
# ehex run #1
np.testing.assert_array_equal(["Step", "Temp", "E_pair", "E_mol",
"TotEng", "Press"], ehex0.columns)
self.assertEqual(len(ehex0), 11)
ehex0_data = [[0, 1.35, -4.1241917, 0, -2.0994448, -3.1961612],
[1000, 1.3732017, -3.7100044, 0,
-1.6504594, 0.83982701]]
np.testing.assert_array_almost_equal(ehex0.iloc[[0, -1]], ehex0_data)
# ehex run #2
np.testing.assert_array_equal(["Step", "Temp", "c_Thot", "c_Tcold"],
ehex1.columns)
self.assertEqual(len(ehex1), 11)
ehex1_data = [[1000, 1.35, 1.431295, 1.2955644],
[11000, 1.3794051, 1.692299, 1.0515688]]
np.testing.assert_array_almost_equal(ehex1.iloc[[0, -1]], ehex1_data)
# ehex run #3
np.testing.assert_array_equal(["Step", "Temp", "c_Thot", "c_Tcold",
"v_tdiff", "f_ave"], ehex2.columns)
self.assertEqual(len(ehex2), 21)
ehex2_data = [[11000, 1.3794051, 1.6903393, 1.0515688, 0, 0],
[31000, 1.3822489, 1.8220413, 1.0322271, -0.7550338,
-0.76999077]]
np.testing.assert_array_almost_equal(ehex2.iloc[[0, -1]], ehex2_data)
peptide_file = "log.5Oct16.peptide.g++.1"
peptide = parse_lammps_log(filename=os.path.join(test_dir,
peptide_file))
peptide0 = peptide[0]
np.testing.assert_array_equal(["Step", "TotEng", "KinEng", "Temp",
"PotEng", "E_bond", "E_angle",
"E_dihed", "E_impro", "E_vdwl",
"E_coul", "E_long", "Press"],
peptide0.columns)
self.assertEqual(len(peptide0), 7)
peptide0_select = peptide0.loc[[0, 6], ["Step", "TotEng", "Press"]]
peptide0_data = [[0, -5237.4580, -837.0112],
[300, -5251.3637, -471.5505]]
np.testing.assert_array_almost_equal(peptide0_select, peptide0_data)
if __name__ == "__main__":
unittest.main()
| dongsenfo/pymatgen | pymatgen/io/lammps/tests/test_outputs.py | Python | mit | 6,962 | [
"LAMMPS",
"pymatgen"
] | fb2ccade2a8ba75a59d10dc5f2b829ac9f61b28828e1ab45c3db7d8f75b292c4 |
# Copyright 2008 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of Crunchberry Pie.
#
# Crunchberry Pie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Crunchberry Pie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Crunchberry Pie. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
from django.contrib.auth.views import login, logout
from django.views.generic.simple import direct_to_template
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^$', 'pie.views.index'),
(r'^accounts/', include('pie.authentication.urls')),
(r'^quips/', include('pie.quips.urls')),
(r'^letters/', include('pie.letters.urls')),
(r'^questions/', include('pie.questions.urls')),
(r'^profiles/',include('pie.profiles.urls')),
(r'^admin/(.*)', admin.site.root),
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^contact/', include('contact_form.urls')),
(r'',include('pressroom.urls')),
(r'^public/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_DOC_ROOT}),
)
| brianboyer/newsmixer | pie/urls.py | Python | gpl-3.0 | 1,775 | [
"Brian"
] | be7ee769d7ae22d0a7f1b2ae53bb9a9f140b98acb61374dc4d88f2559e72aec8 |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd
from espressomd.interactions import HarmonicBond
from espressomd.interactions import FeneBond
from espressomd.observables import PressureTensor
from espressomd.observables import Pressure
from tests_common import fene_force2
import numpy as np
# allowed deviation from analytical results
tol = 1.0e-13
def pressure_tensor_kinetic(vel):
'''Analytical result for convective pressure tensor'''
return np.einsum('ij,ik->jk', vel, vel) / system.volume()
def pressure_tensor_bonded(pos):
'''Analytical result for pressure tensor originating from bonded forces'''
tensor = np.zeros([3, 3])
for p1, p2 in zip(pos[0::2], pos[1::2]):
r = p1 - p2
f = -1.0e4 * r
tensor += np.einsum('i,j', f, r) / system.volume()
return tensor
def pressure_tensor_nonbonded(particle_pairs):
'''Analytical result for pressure tensor originating from non-bonded forces'''
tensor = np.zeros([3, 3])
for p1, p2 in particle_pairs:
if (p1.type == 0 and p2.type == 0) or (p1.type == 1 and p2.type == 2):
d = p1.pos - p2.pos
r = np.linalg.norm(d)
r_hat = d / r
f = (24.0 * 1.0 * (2.0 * 1.0**12 / r**13 - 1.0**6 / r**7)) * r_hat
tensor += np.einsum('i,j', f, d) / system.volume()
return tensor
def pressure_tensor_nonbonded_inter(particle_pairs):
tensor = np.zeros([3, 3])
for p1, p2 in particle_pairs:
if p1.type == 1 and p2.type == 2 and p1.mol_id != p2.mol_id:
r = p1.pos - p2.pos
d = np.linalg.norm(r)
r_hat = r / d
f = (24.0 * 1.0 * (2.0 * 1.0**12 / d**13 - 1.0**6 / d**7)) * r_hat
tensor += np.einsum('i,j', f, r) / system.volume()
return tensor
def pressure_tensor_nonbonded_intra(particle_pairs):
tensor = np.zeros([3, 3])
for p1, p2 in particle_pairs:
if p1.type == 0 and p2.type == 0 and p1.mol_id == p2.mol_id:
r = p1.pos - p2.pos
d = np.linalg.norm(r)
r_hat = r / d
f = (24.0 * 1.0 * (2.0 * 1.0**12 / d**13 - 1.0**6 / d**7)) * r_hat
tensor += np.einsum('i,j', f, r) / system.volume()
return tensor
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
@utx.skipIfMissingFeatures(['LENNARD_JONES'])
class PressureLJ(ut.TestCase):
def tearDown(self):
system.part.clear()
def test(self):
# system parameters
system.box_l = 3 * [10.0]
skin = 0.4
time_step = 0.01
system.time_step = time_step
# thermostat and cell system
system.thermostat.set_langevin(kT=0.0, gamma=1.0, seed=41)
system.cell_system.skin = skin
system.periodicity = [1, 1, 1]
# particles and bond
system.part.add(id=0, pos=[9.9, 9.75, 9.9], type=0, mol_id=0)
system.part.add(id=1, pos=[9.9, 10.25, 9.9], type=0, mol_id=0)
system.part.add(id=2, pos=[0.1, 9.7, 0.1], type=1, mol_id=1)
system.part.add(id=3, pos=[0.1, 10.3, 0.1], type=2, mol_id=2)
harmonic = HarmonicBond(k=1e4, r_0=0)
system.bonded_inter.add(harmonic)
system.part[0].add_bond((harmonic, 1))
system.part[2].add_bond((harmonic, 3))
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.non_bonded_inter[1, 2].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.integrator.run(steps=0)
system.part[0].v = [10.0, 20.0, 30.0]
system.part[1].v = [-15, -25, -35]
system.part[2].v = [27.0, 23.0, 17.0]
system.part[3].v = [13.0, 11.0, 19.0]
pos = system.part[:].pos
vel = system.part[:].v
sim_pressure_tensor = system.analysis.pressure_tensor()
sim_pressure_tensor_kinetic = np.copy(sim_pressure_tensor['kinetic'])
sim_pressure_tensor_bonded = np.copy(sim_pressure_tensor['bonded'])
sim_pressure_tensor_bonded_harmonic = np.copy(
sim_pressure_tensor['bonded', len(system.bonded_inter) - 1])
sim_pressure_tensor_nonbonded = np.copy(
sim_pressure_tensor['non_bonded'])
sim_pressure_tensor_nonbonded_inter = np.copy(
sim_pressure_tensor['non_bonded_inter'])
sim_pressure_tensor_nonbonded_inter12 = np.copy(
sim_pressure_tensor['non_bonded_inter', 1, 2])
sim_pressure_tensor_nonbonded_intra = np.copy(
sim_pressure_tensor['non_bonded_intra'])
sim_pressure_tensor_nonbonded_intra00 = np.copy(
sim_pressure_tensor['non_bonded_intra', 0, 0])
sim_pressure_tensor_total = np.copy(sim_pressure_tensor['total'])
sim_pressure = system.analysis.pressure()
sim_pressure_kinetic = sim_pressure['kinetic']
sim_pressure_bonded = sim_pressure['bonded']
sim_pressure_bonded_harmonic = sim_pressure[
'bonded', len(system.bonded_inter) - 1]
sim_pressure_nonbonded = sim_pressure['non_bonded']
sim_pressure_nonbonded_inter = sim_pressure['non_bonded_inter']
sim_pressure_nonbonded_inter12 = sim_pressure['non_bonded_inter', 1, 2]
sim_pressure_nonbonded_intra = sim_pressure['non_bonded_intra']
sim_pressure_nonbonded_intra00 = sim_pressure['non_bonded_intra', 0, 0]
sim_pressure_total = sim_pressure['total']
anal_pressure_tensor_kinetic = pressure_tensor_kinetic(vel)
anal_pressure_tensor_bonded = pressure_tensor_bonded(pos)
anal_pressure_tensor_nonbonded = pressure_tensor_nonbonded(
system.part.pairs())
anal_pressure_tensor_nonbonded_inter = pressure_tensor_nonbonded_inter(
system.part.pairs())
anal_pressure_tensor_nonbonded_intra = pressure_tensor_nonbonded_intra(
system.part.pairs())
anal_pressure_tensor_total = anal_pressure_tensor_kinetic + \
anal_pressure_tensor_bonded + anal_pressure_tensor_nonbonded
anal_pressure_kinetic = np.einsum(
'ii', anal_pressure_tensor_kinetic) / 3.0
anal_pressure_bonded = np.einsum(
'ii', anal_pressure_tensor_bonded) / 3.0
anal_pressure_nonbonded = np.einsum(
'ii', anal_pressure_tensor_nonbonded) / 3.0
anal_pressure_nonbonded_inter = np.einsum(
'ii', anal_pressure_tensor_nonbonded_inter) / 3.0
anal_pressure_nonbonded_intra = np.einsum(
'ii', anal_pressure_tensor_nonbonded_intra) / 3.0
anal_pressure_total = anal_pressure_kinetic + \
anal_pressure_bonded + anal_pressure_nonbonded
np.testing.assert_allclose(
sim_pressure_tensor_kinetic, anal_pressure_tensor_kinetic, rtol=0, atol=tol,
err_msg='kinetic pressure tensor does not match analytical result')
np.testing.assert_allclose(
sim_pressure_tensor_bonded, anal_pressure_tensor_bonded, rtol=0, atol=tol,
err_msg='bonded pressure tensor does not match analytical result')
np.testing.assert_allclose(
sim_pressure_tensor_bonded_harmonic, anal_pressure_tensor_bonded, rtol=0, atol=tol,
err_msg='bonded pressure tensor harmonic bond does not match analytical result')
np.testing.assert_allclose(
sim_pressure_tensor_nonbonded, anal_pressure_tensor_nonbonded, rtol=0, atol=tol,
err_msg='non-bonded pressure tensor does not match analytical result')
np.testing.assert_allclose(
sim_pressure_tensor_nonbonded_inter, anal_pressure_tensor_nonbonded_inter, rtol=0, atol=tol,
err_msg='non-bonded intermolecular pressure tensor does not match analytical result')
np.testing.assert_allclose(
sim_pressure_tensor_nonbonded_inter12, anal_pressure_tensor_nonbonded_inter, rtol=0, atol=tol,
err_msg='non-bonded intermolecular pressure tensor molecules 1 and 2 does not match analytical result')
np.testing.assert_allclose(
sim_pressure_tensor_nonbonded_intra, anal_pressure_tensor_nonbonded_intra, rtol=0, atol=tol,
err_msg='non-bonded intramolecular pressure tensor does not match analytical result')
np.testing.assert_allclose(
sim_pressure_tensor_nonbonded_intra00, anal_pressure_tensor_nonbonded_intra, rtol=0, atol=tol,
err_msg='non-bonded intramolecular pressure tensor molecule 0 does not match analytical result')
np.testing.assert_allclose(
sim_pressure_tensor_total, anal_pressure_tensor_total, rtol=0, atol=tol,
err_msg='total pressure tensor does not match analytical result')
np.testing.assert_allclose(
sim_pressure_tensor_total, sim_pressure_tensor_kinetic + sim_pressure_tensor_bonded + sim_pressure_tensor_nonbonded, rtol=0, atol=tol,
err_msg='total pressure tensor is not given as the sum of all major pressure components')
self.assertAlmostEqual(
sim_pressure_kinetic, anal_pressure_kinetic, delta=tol,
msg='kinetic pressure does not match analytical result')
self.assertAlmostEqual(
sim_pressure_bonded, anal_pressure_bonded, delta=tol,
msg='bonded pressure does not match analytical result')
self.assertAlmostEqual(
sim_pressure_bonded_harmonic, anal_pressure_bonded, delta=tol,
msg='bonded pressure harmonic bond does not match analytical result')
self.assertAlmostEqual(
sim_pressure_nonbonded, anal_pressure_nonbonded, delta=tol,
msg='non-bonded pressure does not match analytical result')
self.assertAlmostEqual(
sim_pressure_nonbonded_inter, anal_pressure_nonbonded_inter, delta=tol,
msg='non-bonded intermolecular pressure does not match analytical result')
self.assertAlmostEqual(
sim_pressure_nonbonded_inter12, anal_pressure_nonbonded_inter, delta=tol,
msg='non-bonded intermolecular pressure molecule 1 and 2 does not match analytical result')
self.assertAlmostEqual(
sim_pressure_nonbonded_intra, anal_pressure_nonbonded_intra, delta=tol,
msg='non-bonded intramolecular pressure does not match analytical result')
self.assertAlmostEqual(
sim_pressure_nonbonded_intra00, anal_pressure_nonbonded_intra, delta=tol,
msg='non-bonded intramolecular pressure molecule 0 does not match analytical result')
self.assertAlmostEqual(
sim_pressure_total, anal_pressure_total, delta=tol,
msg='total pressure does not match analytical result')
self.assertAlmostEqual(
sim_pressure_total, sim_pressure_kinetic + sim_pressure_bonded + sim_pressure_nonbonded, delta=tol,
msg='total pressure is not given as the sum of all major pressure components')
# Compare pressure observables to pressure from analysis
np.testing.assert_allclose(
PressureTensor().calculate(),
sim_pressure_tensor["total"],
rtol=0, atol=1E-10)
self.assertAlmostEqual(
Pressure().calculate(),
sim_pressure["total"],
delta=tol)
@utx.skipIfMissingFeatures(['EXTERNAL_FORCES'])
class PressureFENE(ut.TestCase):
def tearDown(self):
system.part.clear()
def get_anal_pressure_tensor_fene(self, pos_1, pos_2, k, d_r_max, r_0):
tensor = np.zeros([3, 3])
vec_r = pos_1 - pos_2
f = -fene_force2(vec_r, k, d_r_max, r_0)
tensor += np.einsum('i,j', f, vec_r) / system.volume()
return tensor
def test_fene(self):
# system parameters
system.box_l = 3 * [10.0]
skin = 0.4
time_step = 0.01
system.time_step = time_step
# thermostat and cell system
system.cell_system.skin = skin
system.periodicity = [1, 1, 1]
# particles and bond
system.part.add(
id=0, pos=[9.9, 9.75, 9.9], type=0, mol_id=0, fix=[1, 1, 1])
system.part.add(
id=1, pos=[9.9, 10.25, 9.9], type=0, mol_id=0, fix=[1, 1, 1])
k = 1e4
d_r_max = 1.5
r_0 = 0.1
fene = FeneBond(k=k, d_r_max=d_r_max, r_0=r_0)
system.bonded_inter.add(fene)
system.part[0].add_bond((fene, 1))
system.integrator.run(steps=0)
sim_pressure_tensor = system.analysis.pressure_tensor()
sim_pressure_tensor_bonded = sim_pressure_tensor['bonded']
sim_pressure_tensor_fene = sim_pressure_tensor['bonded', len(
system.bonded_inter) - 1]
total_bonded_pressure_tensor = np.zeros([3, 3])
for i in range(len(system.bonded_inter)):
total_bonded_pressure_tensor += sim_pressure_tensor['bonded', i]
anal_pressure_tensor_fene = self.get_anal_pressure_tensor_fene(
system.part[0].pos, system.part[1].pos, k, d_r_max, r_0)
np.testing.assert_allclose(
sim_pressure_tensor_bonded, anal_pressure_tensor_fene, atol=tol,
err_msg='bonded pressure tensor does not match analytical result')
np.testing.assert_allclose(
sim_pressure_tensor_fene, anal_pressure_tensor_fene, atol=tol,
err_msg='bonded pressure tensor for fene does not match analytical result')
np.testing.assert_allclose(
sim_pressure_tensor_bonded, total_bonded_pressure_tensor, atol=tol,
err_msg='bonded pressure tensor do not sum up to the total value')
sim_pressure = system.analysis.pressure()
sim_pressure_fene = sim_pressure['bonded', len(
system.bonded_inter) - 1]
anal_pressure_fene = np.einsum("ii", anal_pressure_tensor_fene) / 3.0
np.testing.assert_allclose(
sim_pressure_fene, anal_pressure_fene, atol=tol,
err_msg='bonded pressure for fene does not match analytical result')
# Compare pressure observables to pressure from analysis
np.testing.assert_allclose(
PressureTensor().calculate(),
sim_pressure_tensor["total"],
rtol=0, atol=1E-10)
self.assertAlmostEqual(
Pressure().calculate(),
sim_pressure["total"],
delta=tol)
if __name__ == "__main__":
ut.main()
| KaiSzuttor/espresso | testsuite/python/pressure.py | Python | gpl-3.0 | 15,101 | [
"ESPResSo"
] | 39be44ef37664bb476566f80a2e06b8c95bb5f5f6755e8ae615a7c205b8b6553 |
## http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469
import compiler
class Unsafe_Source_Error(Exception):
def __init__(self,error,descr = None,node = None):
self.error = error
self.descr = descr
self.node = node
self.lineno = getattr(node,"lineno",None)
def __repr__(self):
return "Line %d. %s: %s" % (self.lineno, self.error, self.descr)
__str__ = __repr__
class SafeEval(object):
def visit(self, node,**kw):
cls = node.__class__
meth = getattr(self,'visit'+cls.__name__,self.default)
return meth(node, **kw)
def default(self, node, **kw):
for child in node.getChildNodes():
return self.visit(child, **kw)
visitExpression = default
def visitConst(self, node, **kw):
return node.value
def visitDict(self,node,**kw):
return dict([(self.visit(k),self.visit(v)) for k,v in node.items])
def visitTuple(self,node, **kw):
return tuple(self.visit(i) for i in node.nodes)
def visitList(self,node, **kw):
return [self.visit(i) for i in node.nodes]
def visitUnarySub(self, node, **kw):
return - self.visit (node.getChildNodes ()[0])
class SafeEvalWithErrors(SafeEval):
def default(self, node, **kw):
raise Unsafe_Source_Error("Unsupported source construct",
node.__class__,node)
def visitName(self,node, **kw):
raise Unsafe_Source_Error("Strings must be quoted",
node.name, node)
# Add more specific errors if desired
def safe_eval(source, fail_on_error = True):
walker = fail_on_error and SafeEvalWithErrors() or SafeEval()
try:
ast = compiler.parse(source,"eval")
except SyntaxError, err:
raise
try:
return walker.visit(ast)
except Unsafe_Source_Error, err:
raise
def safe_eval(source, fail_on_error = True):
walker = fail_on_error and SafeEvalWithErrors() or SafeEval()
try:
ast = compiler.parse(source,"eval")
except SyntaxError, err:
raise
try:
return walker.visit(ast)
except Unsafe_Source_Error, err:
raise
def test ():
print safe_eval ('{1: [2,3], "4": (-1,2)}')
if __name__ == '__main__':
test ()
| drewm1980/lilypond-an | python/safeeval.py | Python | gpl-2.0 | 2,439 | [
"VisIt"
] | 049528ac45df94a0024b774a4e524b32230edad7c9d5bf1b94fd9a7c7e676395 |
import os
import unittest
from datetime import datetime, timedelta
os.environ['MELANGE_CONFIG_MODULE'] = 'melange.config.TestingConfig'
import melange
from melange import db_session, Item, Tag, User, Log
class MelangeTestCase(unittest.TestCase):
def setUp(self):
self.app = melange.app.test_client()
melange.database.drop_db()
melange.database.init_db()
def tearDown(self):
pass
def create_simple_setup(self):
item = Item('firefly')
db_session.add(item)
db_session.commit()
tag = Tag('laptop')
db_session.add(tag)
db_session.commit()
def test_empty_items(self):
assert(len(Item.query.all())==0)
def test_empty_tags(self):
assert(len(Tag.query.all())==0)
def test_add_item(self):
self.create_simple_setup()
item = Item.query.filter(Item.name=='firefly').one()
items = Item.query.all()
assert item in items
def test_add_tag_to_item(self):
self.create_simple_setup()
item = Item.query.filter(Item.name=='firefly').one()
tag = Tag.query.filter(Tag.name=='laptop').one()
item.tags.append(tag)
db_session.add(item)
db_session.commit()
assert item in tag.items
def test_remove_tag_from_item(self):
self.test_add_tag_to_item()
item = Item.query.filter(Item.name=='firefly').one()
tag = Tag.query.filter(Tag.name=='laptop').one()
item.tags.remove(tag)
db_session.add(item)
db_session.commit()
assert item not in tag.items
def test_item_variable(self):
self.create_simple_setup()
item = Item.query.filter(Item.name=='firefly').one()
item.set_variable('hello', 'world')
item.set_variable('mylist', ['a', 'b'])
item.set_variable('mydict', {'hello': 'world'})
db_session.add(item)
db_session.commit()
vars = item.variables
assert vars['hello'] == 'world'
assert vars['mylist'] == ['a', 'b']
assert vars['mydict'] == {'hello': 'world'}
def test_remove_item_variables(self):
self.create_simple_setup()
item = Item.query.filter(Item.name=='firefly').one()
item.set_variable('hello', 'world')
db_session.add(item)
db_session.commit()
item.remove_variable('hello')
db_session.add(item)
db_session.commit()
assert 'hello' not in item.variables
def test_tag_variable(self):
self.create_simple_setup()
item = Item.query.filter(Item.name=='firefly').one()
tag = Tag.query.filter(Tag.name=='laptop').one()
tag.set_variable('hello', 'world')
tag.items.append(item)
db_session.add(tag)
db_session.commit()
assert 'hello' not in item.variables
assert 'hello' in item.get_all_variables()
def test_duplicate_tag_variable(self):
''' variable in longest tag wins '''
item = Item('firefly')
laptop = Tag('laptop')
laptop.set_variable('hello', 'laptop')
linux = Tag('linux')
linux.set_variable('hello', 'linux')
item.add_to(laptop)
item.add_to(linux)
item.save()
assert 'hello' in item.get_all_variables()
assert item.get_all_variables()['hello'] == 'laptop'
def test_avoid_duplicate_tag(self):
self.create_simple_setup()
item = Item.query.filter(Item.name=='firefly').one()
tag = Tag.query.filter(Tag.name=='laptop').one()
tag.items.append(item)
db_session.add(tag)
db_session.commit()
tag.items.append(item)
db_session.add(tag)
db_session.commit()
assert len(tag.items) == 1
def test_parent_child(self):
i1 = Item('firefly')
i2 = Item('fireflash')
i3 = Item('home')
db_session.add(i1)
db_session.add(i2)
db_session.add(i3)
db_session.commit()
i3.children.append(i1)
i3.children.append(i2)
db_session.add(i3)
db_session.commit()
assert i3.children == [i1, i2]
assert i1.parents == [i3]
def test_data_variable_in_multiple_tags(self):
item = Item('firefly')
item.set_variable('hello', 'firefly')
linux = Tag('linux')
linux.set_variable('hello', 'linux')
laptop = Tag('laptop')
laptop.set_variable('hello', 'laptop')
item.add_to(linux)
item.add_to(laptop)
item.save()
data = item.to_data()
hello = [entry['value'] for entry in data['vars'] if entry['key'] == 'hello']
assert len(hello) == 1
assert hello[0] == 'firefly'
class MelangeUserTestCase(unittest.TestCase):
def setUp(self):
melange.database.drop_db()
melange.database.init_db()
def test_user(self):
admin = User('admin')
admin.password = '123456'
db_session.add(admin)
db_session.commit()
admin = User.query.filter(User.name=='admin').one()
assert admin.authenticate('123456')
assert admin.authenticate('12345') == False
class MelangeLogTestCase(unittest.TestCase):
def setUp(self):
melange.database.drop_db()
melange.database.init_db()
def test_log(self):
log = Log('fireflash', 'test')
db_session.add(log)
db_session.commit()
log = Log.query.filter(Log.name=='fireflash').one()
assert log.message == 'test'
def test_log_order(self):
now = datetime.utcnow()
earlier = now - timedelta(1)
log2 = Log('firefly', 'test2', earlier)
log1 = Log('fireflash', 'test1', now)
db_session.add(log1)
db_session.add(log2)
db_session.commit()
logs = Log.find_all()
assert logs[0].message == 'test1'
def test_log_range(self):
now = datetime.utcnow()
earlier = now - timedelta(1)
log2 = Log('firefly', 'test2', earlier)
log1 = Log('fireflash', 'test1', now)
db_session.add(log1)
db_session.add(log2)
db_session.commit()
logs = Log.find_range(now)
assert len(logs)==1
assert logs[0].message == 'test1'
def test_log_on_item_insert(self):
item = Item('fireflash')
db_session.add(item)
db_session.commit()
logs = Log.find_all()
assert len(logs) == 1
assert logs[0].name == 'fireflash'
def test_no_log_on_creation(self):
item = Item('fireflash')
db_session.add(item)
db_session.rollback()
logs = Log.find_all()
assert len(logs) == 0
def test_log_on_variable(self):
item = Item('fireflash')
item.set_variable('hello', 'world')
db_session.add(item)
db_session.commit()
logs = Log.find_all()
assert len(logs) == 2
assert 'hello' in logs[0].message or 'hello' in logs[1].message
if __name__ == '__main__':
unittest.main()
| jhoekx/melange | tests/TestModels.py | Python | gpl-3.0 | 7,047 | [
"Firefly"
] | f7b193fc2ca81ca55725d6145f6a2f2109d027b914f4c397fcd46230718f6218 |
#!/usr/bin/env python
# GoodFET Client Library
#
# (C) 2009 Travis Goodspeed <travis at radiantmachines.com>
#
# This code is being rewritten and refactored. You've been warned!
#
# Code modified as part of BadUSB 2.0 Project
# by DK 2016 (@withdk)
import sys, time, string, cStringIO, struct, glob, os, inspect;
import sqlite3;
fmt = ("B", "<H", None, "<L")
def getClient(name="GoodFET"):
import GoodFET, GoodFETCC, GoodFETAVR, GoodFETSPI, GoodFETMSP430, GoodFETNRF, GoodFETCCSPI;
if(name=="GoodFET" or name=="monitor"): return GoodFET.GoodFET();
elif name=="cc" or name=="cc51": return GoodFETCC.GoodFETCC();
elif name=="cc2420" or name=="ccspi": return GoodFETCCSPI.GoodFETCCSPI();
elif name=="avr": return GoodFETAVR.GoodFETAVR();
elif name=="spi": return GoodFETSPI.GoodFETSPI();
elif name=="msp430": return GoodFETMSP430.GoodFETMSP430();
elif name=="nrf": return GoodFETNRF.GoodFETNRF();
print "Unsupported target: %s" % name;
sys.exit(0);
class SymbolTable:
"""GoodFET Symbol Table"""
db=sqlite3.connect(":memory:");
def __init__(self, *args, **kargs):
self.db.execute("create table if not exists symbols(adr,name,memory,size,comment);");
def get(self,name):
self.db.commit();
c=self.db.cursor();
try:
c.execute("select adr,memory from symbols where name=?",(name,));
for row in c:
#print "Found it.";
sys.stdout.flush();
return row[0];
#print "No dice.";
except:# sqlite3.OperationalError:
#print "SQL error.";
return eval(name);
return eval(name);
def define(self,adr,name,comment="",memory="vn",size=16):
self.db.execute("insert into symbols(adr,name,memory,size,comment)"
"values(?,?,?,?,?);", (
adr,name,memory,size,comment));
#print "Set %s=%s." % (name,adr);
class GoodFETbtser:
"""py-bluez class for emulating py-serial."""
def __init__(self,btaddr):
import bluetooth;
if btaddr==None or btaddr=="none" or btaddr=="bluetooth":
print "performing inquiry..."
nearby_devices = bluetooth.discover_devices(lookup_names = True)
print "found %d devices" % len(nearby_devices)
for addr, name in nearby_devices:
print " %s - '%s'" % (addr, name)
#TODO switch to wildcards.
if name=='FireFly-A6BD':
btaddr=addr;
if name=='RN42-A94A':
btaddr=addr;
print "Please set $GOODFET to the address of your device.";
sys.exit();
print "Identified GoodFET at %s" % btaddr;
# Manually use the portnumber.
port=1;
print "Connecting to %s on port %i." % (btaddr, port);
sock=bluetooth.BluetoothSocket(bluetooth.RFCOMM);
self.sock=sock;
sock.connect((btaddr,port));
sock.settimeout(10); #IMPORTANT Must be patient.
##This is what we'd do for a normal reset.
#str="";
#while not str.endswith("goodfet.sf.net/"):
# str=self.read(64);
# print str;
# Instead, just return and hope for the best.
return;
def write(self,msg):
"""Send traffic."""
import time;
self.sock.send(msg);
#time.sleep(0.1);
return;
def read(self,length):
"""Read traffic."""
data="";
while len(data)<length:
data=data+self.sock.recv(length-len(data));
return data;
class GoodFET:
"""GoodFET Client Library"""
besilent=0;
app=0;
verb=0;
count=0;
data="";
verbose=False
GLITCHAPP=0x71;
MONITORAPP=0x00;
symbols=SymbolTable();
def __init__(self, *args, **kargs):
self.data=[0];
def getConsole(self):
from GoodFETConsole import GoodFETConsole;
return GoodFETConsole(self);
def name2adr(self,name):
return self.symbols.get(name);
def timeout(self):
print "timeout\n";
def serInit(self, port=None, timeout=2, attemptlimit=None):
"""Open a serial port of some kind."""
import re;
if port==None:
print("Serial ports based on Linux, may need changing for other OS's");
port=inspect.stack()[1][1];
#port=port.split('/');
port=re.search('m2h.py',port)
if(port):
port="/dev/ttyUSB1";
os.environ["GOODFET"] = "/dev/ttyUSB1";
else:
port="/dev/ttyUSB0";
os.environ["GOODFET"] = "/dev/ttyUSB0";
print("Using serial port GOODFET=%s" % port)
if port=="bluetooth" or (port is not None and re.match("..:..:..:..:..:..",port)):
self.btInit(port,2,attemptlimit);
else:
self.pyserInit(port,timeout,attemptlimit);
def btInit(self, port, timeout, attemptlimit):
"""Open a bluetooth port.""";
#self.verbose=True; #For debugging BT.
self.serialport=GoodFETbtser(port);
def pyserInit(self, port, timeout, attemptlimit):
"""Open the serial port"""
# Make timeout None to wait forever, 0 for non-blocking mode.
import serial;
fixserial=False;
if os.name=='nt' and sys.version.find('64 bit')!=-1:
print "WARNING: PySerial requires a 32-bit Python build in Windows.";
if port is None and os.environ.get("GOODFET")!=None:
glob_list = glob.glob(os.environ.get("GOODFET"));
if len(glob_list) > 0:
port = glob_list[0];
else:
port = os.environ.get("GOODFET");
if port is None:
glob_list = glob.glob("/dev/tty.usbserial*");
if len(glob_list) > 0:
port = glob_list[0];
if port is None:
glob_list = glob.glob("/dev/ttyUSB*");
if len(glob_list) > 0:
port = glob_list[0];
if port is None:
glob_list = glob.glob("/dev/ttyU0");
if len(glob_list) > 0:
port = glob_list[0];
if port is None and os.name=='nt':
from scanwin32 import winScan;
scan=winScan();
for order,comport,desc,hwid in sorted(scan.comports()):
try:
if hwid.index('FTDI')==0:
port=comport;
print "Using FTDI port %s" % port
except:
#Do nothing.
a=1;
baud=115200;
if(os.environ.get("platform")=='arduino' or os.environ.get("board")=='arduino'):
baud=19200; #Slower, for now.
self.serialport = serial.Serial(
port,
#9600,
baud,
parity = serial.PARITY_NONE,
timeout=timeout
)
self.verb=0;
attempts=0;
connected=0;
while connected==0:
while self.verb!=0x7F or self.data!="http://goodfet.sf.net/":
#while self.data!="http://goodfet.sf.net/":
#print "'%s'!=\n'%s'" % (self.data,"http://goodfet.sf.net/");
if attemptlimit is not None and attempts >= attemptlimit:
return
elif attempts==2 and os.environ.get("board")!='telosb':
print "See the GoodFET FAQ about missing info flash.";
self.serialport.setTimeout(0.2);
elif attempts == 100:
print "Tried 100 times to connect and failed."
sys.stdout.write("Continuing to try forever.") # No newline
sys.stdout.flush()
self.verbose=True # Something isn't going right, give the user more info
elif attempts > 100 and attempts % 10 == 0:
sys.stdout.write('.')
sys.stdout.flush()
#self.serialport.flushInput()
#self.serialport.flushOutput()
#TelosB reset, prefer software to I2C SPST Switch.
if (os.environ.get("board")=='telosb'):
#print "TelosB Reset";
self.telosBReset();
elif (os.environ.get("board")=='z1'):
self.bslResetZ1(invokeBSL=0);
elif (os.environ.get("board")=='apimote1') or (os.environ.get("board")=='apimote'):
#Explicitly set RTS and DTR to halt board.
self.serialport.setRTS(1);
self.serialport.setDTR(1);
#RTS pin, not DTR is used for reset.
self.serialport.setRTS(0);
#print "Resetting Apimote not yet tested.";
else:
#Explicitly set RTS and DTR to halt board.
self.serialport.setRTS(1);
self.serialport.setDTR(1);
#Drop DTR, which is !RST, low to begin the app.
self.serialport.setDTR(0);
#self.serialport.write(chr(0x80));
#self.serialport.write(chr(0x80));
#self.serialport.write(chr(0x80));
#self.serialport.write(chr(0x80));
#self.serialport.flushInput()
#self.serialport.flushOutput()
#time.sleep(60);
attempts=attempts+1;
self.readcmd(); #Read the first command.
#print "Got %02x,%02x:'%s'" % (self.app,self.verb,self.data);
if self.verb!=0x7f:
#Retry again. This usually times out, but helps connect.
self.readcmd();
#print "Retry got %02x,%02x:'%s'" % (self.app,self.verb,self.data);
#Here we have a connection, but maybe not a good one.
#print "We have a connection."
connected=1;
if attempts >= 100:
print "" # Add a newline
olds=self.infostring();
clocking=self.monitorclocking();
for foo in range(1,30):
if not self.monitorecho():
if self.verbose:
print "Comm error on %i try, resyncing out of %s." % (foo,
clocking);
connected=0;
break;
if self.verbose: print "Connected after %02i attempts." % attempts;
self.mon_connected();
self.serialport.setTimeout(12);
def serClose(self):
self.serialport.close();
def telosSetSCL(self, level):
self.serialport.setRTS(not level)
def telosSetSDA(self, level):
self.serialport.setDTR(not level)
def telosI2CStart(self):
self.telosSetSDA(1)
self.telosSetSCL(1)
self.telosSetSDA(0)
def telosI2CStop(self):
self.telosSetSDA(0)
self.telosSetSCL(1)
self.telosSetSDA(1)
def telosI2CWriteBit(self, bit):
self.telosSetSCL(0)
self.telosSetSDA(bit)
time.sleep(2e-6)
self.telosSetSCL(1)
time.sleep(1e-6)
self.telosSetSCL(0)
def telosI2CWriteByte(self, byte):
self.telosI2CWriteBit( byte & 0x80 );
self.telosI2CWriteBit( byte & 0x40 );
self.telosI2CWriteBit( byte & 0x20 );
self.telosI2CWriteBit( byte & 0x10 );
self.telosI2CWriteBit( byte & 0x08 );
self.telosI2CWriteBit( byte & 0x04 );
self.telosI2CWriteBit( byte & 0x02 );
self.telosI2CWriteBit( byte & 0x01 );
self.telosI2CWriteBit( 0 ); # "acknowledge"
def telosI2CWriteCmd(self, addr, cmdbyte):
self.telosI2CStart()
self.telosI2CWriteByte( 0x90 | (addr << 1) )
self.telosI2CWriteByte( cmdbyte )
self.telosI2CStop()
def bslResetZ1(self, invokeBSL=0):
'''
Applies BSL entry sequence on RST/NMI and TEST/VPP pins
Parameters:
invokeBSL = 1: complete sequence
invokeBSL = 0: only RST/NMI pin accessed
By now only BSL mode is accessed
'''
#if DEBUG > 1: sys.stderr.write("* bslReset(invokeBSL=%s)\n" % invokeBSL)
if invokeBSL:
#sys.stderr.write("in Z1 bsl reset...\n")
time.sleep(0.1)
self.writepicROM(0xFF, 0xFF)
time.sleep(0.1)
#sys.stderr.write("z1 bsl reset done...\n")
else:
#sys.stderr.write("in Z1 reset...\n")
time.sleep(0.1)
self.writepicROM(0xFF, 0xFE)
time.sleep(0.1)
#sys.stderr.write("z1 reset done...\n")
def writepicROM(self, address, data):
''' Writes data to @address'''
for i in range(7,-1,-1):
self.picROMclock((address >> i) & 0x01)
self.picROMclock(0)
recbuf = 0
for i in range(7,-1,-1):
s = ((data >> i) & 0x01)
#print s
if i < 1:
r = not self.picROMclock(s, True)
else:
r = not self.picROMclock(s)
recbuf = (recbuf << 1) + r
self.picROMclock(0, True)
#k = 1
#while not self.serial.getCTS():
# pass
#time.sleep(0.1)
return recbuf
def readpicROM(self, address):
''' reads a byte from @address'''
for i in range(7,-1,-1):
self.picROMclock((address >> i) & 0x01)
self.picROMclock(1)
recbuf = 0
r = 0
for i in range(7,-1,-1):
r = self.picROMclock(0)
recbuf = (recbuf << 1) + r
self.picROMclock(r)
#time.sleep(0.1)
return recbuf
#This seems more reliable when slowed.
def picROMclock(self, masterout, slow = True):
#print "setting masterout to "+str(masterout)
self.serialport.setRTS(masterout)
self.serialport.setDTR(1)
#time.sleep(0.02)
self.serialport.setDTR(0)
if slow:
time.sleep(0.02)
return self.serialport.getCTS()
def picROMfastclock(self, masterout):
#print "setting masterout to "+str(masterout)
self.serialport.setRTS(masterout)
self.serialport.setDTR(1)
self.serialport.setDTR(0)
time.sleep(0.02)
return self.serialport.getCTS()
def telosBReset(self,invokeBSL=0):
# "BSL entry sequence at dedicated JTAG pins"
# rst !s0: 0 0 0 0 1 1
# tck !s1: 1 0 1 0 0 1
# s0|s1: 1 3 1 3 2 0
# "BSL entry sequence at shared JTAG pins"
# rst !s0: 0 0 0 0 1 1
# tck !s1: 0 1 0 1 1 0
# s0|s1: 3 1 3 1 0 2
if invokeBSL:
self.telosI2CWriteCmd(0,1)
self.telosI2CWriteCmd(0,3)
self.telosI2CWriteCmd(0,1)
self.telosI2CWriteCmd(0,3)
self.telosI2CWriteCmd(0,2)
self.telosI2CWriteCmd(0,0)
else:
self.telosI2CWriteCmd(0,3)
self.telosI2CWriteCmd(0,2)
# This line was not defined inside the else: block, not sure where it
# should be however
self.telosI2CWriteCmd(0,0)
time.sleep(0.250) #give MSP430's oscillator time to stabilize
self.serialport.flushInput() #clear buffers
def getbuffer(self,size=0x1c00):
writecmd(0,0xC2,[size&0xFF,(size>>16)&0xFF]);
print "Got %02x%02x buffer size." % (self.data[1],self.data[0]);
def writecmd(self, app, verb, count=0, data=[]):
"""Write a command and some data to the GoodFET."""
self.serialport.write(chr(app));
self.serialport.write(chr(verb));
#if data!=None:
# count=len(data); #Initial count ignored.
#print "TX %02x %02x %04x" % (app,verb,count);
#little endian 16-bit length
self.serialport.write(chr(count&0xFF));
self.serialport.write(chr(count>>8));
if self.verbose:
print "Tx: ( 0x%02x, 0x%02x, 0x%04x )" % ( app, verb, count )
#print "count=%02x, len(data)=%04x" % (count,len(data));
if count!=0:
if(isinstance(data,list)):
for i in range(0,count):
#print "Converting %02x at %i" % (data[i],i)
data[i]=chr(data[i]);
#print type(data);
outstr=''.join(data);
self.serialport.write(outstr);
if not self.besilent:
return self.readcmd()
else:
return []
def readcmd(self):
"""Read a reply from the GoodFET."""
while 1:#self.serialport.inWaiting(): # Loop while input data is available
try:
#print "Reading...";
self.app=ord(self.serialport.read(1));
#print "APP=%02x" % self.app;
self.verb=ord(self.serialport.read(1));
#Fixes an obscure bug in the TelosB.
if self.app==0x00:
while self.verb==0x00:
self.verb=ord(self.serialport.read(1));
#print "VERB=%02x" % self.verb;
self.count=(
ord(self.serialport.read(1))
+(ord(self.serialport.read(1))<<8)
);
if self.verbose:
print "Rx: ( 0x%02x, 0x%02x, 0x%04x )" % ( self.app, self.verb, self.count )
#Debugging string; print, but wait.
if self.app==0xFF:
if self.verb==0xFF:
print "# DEBUG %s" % self.serialport.read(self.count)
elif self.verb==0xFE:
print "# DEBUG 0x%x" % struct.unpack(fmt[self.count-1], self.serialport.read(self.count))[0]
elif self.verb==0xFD:
#Do nothing, just wait so there's no timeout.
print "# NOP.";
sys.stdout.flush();
else:
self.data=self.serialport.read(self.count);
return self.data;
except TypeError:
if self.connected:
print "Warning: waiting for serial read timed out (most likely).";
#print "This shouldn't happen after syncing. Exiting for safety.";
#sys.exit(-1)
return self.data;
#Glitching stuff.
def glitchApp(self,app):
"""Glitch into a device by its application."""
self.data=[app&0xff];
self.writecmd(self.GLITCHAPP,0x80,1,self.data);
#return ord(self.data[0]);
def glitchVerb(self,app,verb,data):
"""Glitch during a transaction."""
if data==None: data=[];
self.data=[app&0xff, verb&0xFF]+data;
self.writecmd(self.GLITCHAPP,0x81,len(self.data),self.data);
#return ord(self.data[0]);
def glitchstart(self):
"""Glitch into the AVR application."""
self.glitchVerb(self.APP,0x20,None);
def glitchstarttime(self):
"""Measure the timer of the START verb."""
return self.glitchTime(self.APP,0x20,None);
def glitchTime(self,app,verb,data):
"""Time the execution of a verb."""
if data==None: data=[];
self.data=[app&0xff, verb&0xFF]+data;
print "Timing app %02x verb %02x." % (app,verb);
self.writecmd(self.GLITCHAPP,0x82,len(self.data),self.data);
time=ord(self.data[0])+(ord(self.data[1])<<8);
print "Timed to be %i." % time;
return time;
def glitchVoltages(self,low=0x0880, high=0x0fff):
"""Set glitching voltages. (0x0fff is max.)"""
self.data=[low&0xff, (low>>8)&0xff,
high&0xff, (high>>8)&0xff];
self.writecmd(self.GLITCHAPP,0x90,4,self.data);
#return ord(self.data[0]);
def glitchRate(self,count=0x0800):
"""Set glitching count period."""
self.data=[count&0xff, (count>>8)&0xff];
self.writecmd(self.GLITCHAPP,0x91,2,
self.data);
#return ord(self.data[0]);
#Monitor stuff
def silent(self,s=0):
"""Transmissions halted when 1."""
self.besilent=s;
print "besilent is %i" % self.besilent;
self.writecmd(0,0xB0,1,[s]);
connected=0;
def mon_connected(self):
"""Announce to the monitor that the connection is good."""
self.connected=1;
self.writecmd(0,0xB1,0,[]);
def out(self,byte):
"""Write a byte to P5OUT."""
self.writecmd(0,0xA1,1,[byte]);
def dir(self,byte):
"""Write a byte to P5DIR."""
self.writecmd(0,0xA0,1,[byte]);
def call(self,adr):
"""Call to an address."""
self.writecmd(0,0x30,2,
[adr&0xFF,(adr>>8)&0xFF]);
def execute(self,code):
"""Execute supplied code."""
self.writecmd(0,0x31,2,#len(code),
code);
def MONpeek8(self,address):
"""Read a byte of memory from the monitor."""
self.data=[address&0xff,address>>8];
self.writecmd(0,0x02,2,self.data);
#self.readcmd();
return ord(self.data[0]);
def MONpeek16(self,address):
"""Read a word of memory from the monitor."""
return self.MONpeek8(address)+(self.MONpeek8(address+1)<<8);
def peek(self,address):
"""Read a word of memory from the monitor."""
return self.MONpeek8(address)+(self.MONpeek8(address+1)<<8);
def eeprompeek(self,address):
"""Read a word of memory from the monitor."""
print "EEPROM peeking not supported for the monitor.";
#return self.MONpeek8(address)+(self.MONpeek8(address+1)<<8);
def peekbysym(self,name):
"""Read a value by its symbol name."""
#TODO include memory in symbol.
reg=self.symbols.get(name);
return self.peek8(reg,"data");
def pokebysym(self,name,val):
"""Write a value by its symbol name."""
#TODO include memory in symbol.
reg=self.symbols.get(name);
return self.pokebyte(reg,val);
def pokebyte(self,address,value,memory="vn"):
"""Set a byte of memory by the monitor."""
self.data=[address&0xff,address>>8,value];
self.writecmd(0,0x03,3,self.data);
return ord(self.data[0]);
def poke16(self,address,value):
"""Set a word of memory by the monitor."""
self.MONpoke16(address,value);
def MONpoke16(self,address,value):
"""Set a word of memory by the monitor."""
self.pokebyte(address,value&0xFF);
self.pokebyte(address,(value>>8)&0xFF);
return value;
def setsecret(self,value):
"""Set a secret word for later retreival. Used by glitcher."""
#self.eeprompoke(0,value);
#self.eeprompoke(1,value);
print "Secret setting is not yet suppored for this target.";
print "Aborting.";
def getsecret(self):
"""Get a secret word. Used by glitcher."""
#self.eeprompeek(0);
print "Secret getting is not yet suppored for this target.";
print "Aborting.";
sys.exit();
def dumpmem(self,begin,end):
i=begin;
while i<end:
print "%04x %04x" % (i, self.MONpeek16(i));
i+=2;
def monitor_ram_pattern(self):
"""Overwrite all of RAM with 0xBEEF."""
self.writecmd(0,0x90,0,self.data);
return;
def monitor_ram_depth(self):
"""Determine how many bytes of RAM are unused by looking for 0xBEEF.."""
self.writecmd(0,0x91,0,self.data);
return ord(self.data[0])+(ord(self.data[1])<<8);
#Baud rates.
baudrates=[115200,
9600,
19200,
38400,
57600,
115200];
def setBaud(self,baud):
"""Change the baud rate. TODO fix this."""
rates=self.baudrates;
self.data=[baud];
print "Changing FET baud."
self.serialport.write(chr(0x00));
self.serialport.write(chr(0x80));
self.serialport.write(chr(1));
self.serialport.write(chr(baud));
print "Changed host baud."
self.serialport.setBaudrate(rates[baud]);
time.sleep(1);
self.serialport.flushInput()
self.serialport.flushOutput()
print "Baud is now %i." % rates[baud];
return;
def readbyte(self):
return ord(self.serialport.read(1));
def findbaud(self):
for r in self.baudrates:
print "\nTrying %i" % r;
self.serialport.setBaudrate(r);
#time.sleep(1);
self.serialport.flushInput()
self.serialport.flushOutput()
for i in range(1,10):
self.readbyte();
print "Read %02x %02x %02x %02x" % (
self.readbyte(),self.readbyte(),self.readbyte(),self.readbyte());
def monitortest(self):
"""Self-test several functions through the monitor."""
print "Performing monitor self-test.";
self.monitorclocking();
for f in range(0,3000):
a=self.MONpeek16(0x0c00);
b=self.MONpeek16(0x0c02);
if a!=0x0c04 and a!=0x0c06:
print "ERROR Fetched %04x, %04x" % (a,b);
self.pokebyte(0x0021,0); #Drop LED
if self.MONpeek8(0x0021)!=0:
print "ERROR, P1OUT not cleared.";
self.pokebyte(0x0021,1); #Light LED
if not self.monitorecho():
print "Echo test failed.";
print "Self-test complete.";
self.monitorclocking();
def monitorecho(self):
data="The quick brown fox jumped over the lazy dog.";
self.writecmd(self.MONITORAPP,0x81,len(data),data);
if self.data!=data:
if self.verbose:
print "Comm error recognized by monitorecho(), got:\n%s" % self.data;
return 0;
return 1;
def monitor_info(self):
print "GoodFET with %s MCU" % self.infostring();
print "Clocked at %s" % self.monitorclocking();
return 1;
def testleds(self):
print "Flashing LEDs"
self.writecmd(self.MONITORAPP,0xD0,0,"")
try:
print "Flashed %d LED." % ord(self.data)
except:
print "Unable to process response:", self.data
return 1
def monitor_list_apps(self, full=False):
self.monitor_info()
old_value = self.besilent
self.besilent = True # turn off automatic call to readcmd
self.writecmd(self.MONITORAPP, 0x82, 1, [int(full)]);
self.besilent = old_value
# read the build date string
self.readcmd()
print "Build Date: %s" % self.data
print "Firmware apps:"
while True:
self.readcmd()
if self.count == 0:
break
print self.data
return 1;
def monitorclocking(self):
"""Return the 16-bit clocking value."""
return "0x%04x" % self.monitorgetclock();
def monitorsetclock(self,clock):
"""Set the clocking value."""
self.MONpoke16(0x56, clock);
def monitorgetclock(self):
"""Get the clocking value."""
if(os.environ.get("platform")=='arduino' or os.environ.get("board")=='arduino'):
return 0xDEAD;
#Check for MSP430 before peeking this.
return self.MONpeek16(0x56);
# The following functions ought to be implemented in
# every client.
def infostring(self):
if(os.environ.get("platform")=='arduino' or os.environ.get("board")=='arduino'):
return "Arduino";
else:
a=self.MONpeek8(0xff0);
b=self.MONpeek8(0xff1);
return "%02x%02x" % (a,b);
def lock(self):
print "Locking Unsupported.";
def erase(self):
print "Erasure Unsupported.";
def setup(self):
return;
def start(self):
return;
def test(self):
print "Unimplemented.";
return;
def status(self):
print "Unimplemented.";
return;
def halt(self):
print "Unimplemented.";
return;
def resume(self):
print "Unimplemented.";
return;
def getpc(self):
print "Unimplemented.";
return 0xdead;
def flash(self,file):
"""Flash an intel hex file to code memory."""
print "Flash not implemented.";
def dump(self,file,start=0,stop=0xffff):
"""Dump an intel hex file from code memory."""
print "Dump not implemented.";
def peek32(self,address, memory="vn"):
"""Peek 32 bits."""
return (self.peek16(address,memory)+
(self.peek16(address+2,memory)<<16));
def peek16(self,address, memory="vn"):
"""Peek 16 bits of memory."""
return (self.peek8(address,memory)+
(self.peek8(address+1,memory)<<8));
def peek8(self,address, memory="vn"):
"""Peek a byte of memory."""
return self.MONpeek8(address); #monitor
def peekblock(self,address,length,memory="vn"):
"""Return a block of data."""
data=range(0,length);
for foo in range(0,length):
data[foo]=self.peek8(address+foo,memory);
return data;
def pokeblock(self,address,bytes,memory="vn"):
"""Poke a block of a data into memory at an address."""
for foo in bytes:
self.pokebyte(address,foo,memory);
address=address+1;
return;
def loadsymbols(self):
"""Load symbols from a file."""
return;
| withdk/badusb2-mitm-poc | GoodFET.py | Python | gpl-3.0 | 29,981 | [
"Firefly"
] | 63924bd75b509cf1a275dcbf33bc4927e3b5484f1f0f7d7d15abea27c158a30b |
import sys
import gc
from sparsehc_dm import sparsehc_dm
import random
import time
import mdtraj as md
import numpy
import json
Nk=1
if len(sys.argv)>1:
Nk=int(sys.argv[1])
traj_filename='aMD-148l-first{}k.nc'.format(Nk)
top_filename='aMD-148l-all_1.pdb'
first_frame = md.load_frame(traj_filename, 0,top=top_filename)
atoms_to_keep = [a.index for a in first_frame.topology.atoms if a.name == 'CA']
start=time.time()
traj=md.load(traj_filename,top=top_filename, atom_indices=atoms_to_keep)
m=sparsehc_dm.InMatrix()
finishedLoad=time.time()
print ("finished loading ({}k): {}".format(Nk,finishedLoad-start))
Nframes=traj.n_frames
rmsds=list()
for i in range(0,Nframes-1):
rmsds=md.rmsd(traj, traj, i)[i+1:].tolist()
sparsehc_dm.push(m,rmsds,i)
#for i in range(0,Nframes-1):
#rmsds=md.rmsd(traj, traj, i)
#for j in range(i+1,Nframes):
#m.push(i,j,float(rmsds[j]))
finishedRMSD=time.time()
print ("finished rmsd: {}".format(finishedRMSD-finishedLoad))
#Z-matrix contains the linkage history, see more at http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html#scipy.cluster.hierarchy.linkage
Z=sparsehc_dm.linkage(m,"complete")
finishedClust=time.time()
#One would probably like to save the Z-matrix for the future, so that there is no need to redo the clustering
open('z_sparsehc-dm_load_{}.json'.format(Nk), 'w').write(json.dumps(Z))
print ("finished clustering: {}".format(finishedClust-finishedRMSD)) | Burning-Daylight/sparsehc-dm | examples/sparsehc-dm_load.py | Python | gpl-3.0 | 1,461 | [
"MDTraj"
] | b9c57da759028510d808aead77bd67b94b782a1ac7dc0b9a572f532294579afc |
import os
import sys
import subprocess
from configparser import ConfigParser
from gi.repository import Gtk as gtk
from gi.repository.GdkPixbuf import Pixbuf
from gi.repository import WebKit as webkit
from locale import getdefaultlocale
"""
# Linux Lite Control Center
# Developers - John 'ShaggyTwoDope' Jenkins, Jerry Bezencon, Brian 'DarthLukan' Tomlinson
# Dependencies - python, python-webkit
# Licence - GPL v2
# Website - http://www.linuxliteos.com
"""
app_dir = '/usr/share/litecc'
lang = getdefaultlocale()[0].split('_')[0]
def execute(command, ret=True):
"""function to exec everything, subprocess used to fork"""
if ret is True:
p = os.popen(command)
return p.readline()
else:
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.stdout
def functions(view, frame, req, data=None):
"""base functions"""
uri = req.get_uri()
lllink, path = uri.split('://', 1)
path = path.replace("%20", " ")
print(lllink)
print(uri)
if lllink == "file":
return False
elif lllink == "about":
'''about dialog, need to add LDC members whom helped'''
about = gtk.AboutDialog()
about.set_program_name("Linux Lite Control Center")
about.set_version("1.0-0010")
about.set_license('''This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. ''')
about.set_authors(
[
"Johnathan 'ShaggyTwoDope' Jenkins\n<[email protected]>",
"Jerry Bezencon\n<[email protected]>\n",
"Brian 'DarthLukan' Tomlinson\n<[email protected]>\n"
]
)
about.set_comments("Designed for Linux Lite")
about.set_website("http://www.linuxliteos.com")
about.set_logo(Pixbuf.new_from_file("{0}/litecc.png".format(app_dir)))
about.run()
about.destroy()
elif lllink == "admin":
execute(path)
elif lllink == "exportdetails":
dialog = gtk.FileChooserDialog("Select folder to export details to.", None,
gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
response = dialog.run()
if response == gtk.RESPONSE_OK:
export_details(dialog.get_filename())
dialog.destroy()
# uses executep to pipe process fork
elif lllink == "script":
execute("{0}/scripts/{1}".format(app_dir, path))
# need to fix urls
elif lllink == "help":
execute("exo-open file:///usr/share/doc/litemanual/index.html")
elif lllink == "forum":
execute("exo-open http://www.linuxliteos.com/forums/")
elif lllink == "website":
execute("exo-open http://www.linuxliteos.com/")
elif lllink == "facebook":
execute("exo-open https://www.facebook.com/pages/Linuxlite/572323192787066")
elif lllink == "twitter":
execute("exo-open http://www.twitter.com/linuxlite/")
elif lllink == "google":
execute("exo-open https://plus.google.com/+linuxliteos/")
elif lllink == "linkedin":
execute("exo-open http://www.linkedin.com/in/jerrybezencon")
return True
def get_info(info):
"""here we gather some over all basic info"""
try:
if info == "os":
return open('/etc/llver', 'r').read().split('\\n')[0]
if info == "arc":
return os.uname()[4]
if info == "host":
return os.uname()[1]
if info == "kernel":
return "{0} {1}".format(os.uname()[0], os.uname()[2])
if info == "processor":
return execute("cat /proc/cpuinfo | grep 'model name'").split(':')[1]
if info == "mem":
mem = execute("free -m|awk '/^Mem:/{print $2}'")
if float(mem) > 1024:
return str(round(float(mem) / 1024)) + " GB"
else:
return "{0} MB".format(mem)
if info == "gfx":
return execute("lspci | grep VGA").split('controller:')[1].split('(rev')[0].split(',')[0]
if info == "audio":
return execute("lspci | grep Audio").split('device:')[1].split('(rev')[0].split(',')[0]
if info == "netstatus":
return execute(
"ping -q -w 1 -c 1 `ip r | grep default | cut -d ' ' -f 3` > /dev/null && echo Active || echo Not connected to any known network")
if info == "netip":
ip = execute("hostname -i").split()
if len(ip) > 1:
ip = ip[0]
return ip
except (OSError, TypeError, Exception) as e:
print(e)
return " "
def export_details(file):
x = open("{0}/details.txt".format(file), "w")
x.write('''
Operating System: {0}
Kernel: {1}
Processor: {2}
Architecture: {3}
RAM: {4}
Devices:
{5}
Hard disks:
Mount Points:
This file was generated by Linux Lite Control Center. '''.format(
get_info("os"), get_info("kernel"), get_info("processor"), get_info("arc"), get_info("mem"), execute("lspci")))
def get_modules(section):
"""we try and load errrors"""
try:
mod_dir = os.listdir("{0}/modules/{1}/".format(app_dir, section))
mod_dir.sort()
except Exception as details:
os.system("zenity --error --text 'Error : {0}' --title 'Module Loading Error'".format(details))
return exit()
if isinstance(mod_dir, list) and len(mod_dir) < 1:
return "<p>\"no modules found!\"</p>"
else:
parser = ConfigParser()
admin = ""
mod_dir.sort()
for i in mod_dir:
parser.read("{0}/modules/{1}/{2}".format(app_dir, section, i))
ico = parser.get('module', 'ico')
# check if the icon exists
ico = "{0}/frontend/icons/modules/{1}".format(app_dir, ico)
# check if the name has a different language
if parser.has_option('module', 'name[%s]'):
name = parser.get('module', 'name[%s]')
else:
name = parser.get('module', 'name')
# check if the description has a different language
if parser.has_option('module', 'desc[%s]'):
desc = parser.get('module', 'desc[%s]')
else:
desc = parser.get('module', 'desc')
command = parser.get('module', 'command')
command = command.replace("'", ''' \\' ''')
admin += '''<div class="launcher" onclick="location.href='admin://{0}'" >
<img src="{1}" onerror='this.src = "/usr/share/litecc/frontend/icons/modules/notfound.png"'/>
<h3>{2}</h3>
<span>{3}</span>
</div>'''.format(command, ico, name, desc)
return admin
def frontend_fill():
"""build all html junk"""
filee = open("{0}/frontend/default.html".format(app_dir), "r")
page = filee.read()
for i in ['os', 'arc', 'processor', 'mem', 'gfx', 'audio', 'kernel', 'host', 'netstatus', 'netip']:
# TODO: Can't use str.format here, breaks other substitutions
page = page.replace("{%s}" % i, get_info(i))
sections = ['software', 'system', 'desktop', 'hardware', 'networking']
sections.sort()
for i in sections:
# TODO: Can't use str.format here, breaks other substitutions
page = page.replace("{%s_list}" % i, get_modules(i))
filee.close()
return page
def main():
global browser
global window
frontend = frontend_fill()
window = gtk.Window()
window.connect('destroy', gtk.main_quit)
window.set_title("Linux Lite Control Center")
window.set_icon(Pixbuf.new_from_file("{0}/litecc.png".format(app_dir)))
window.set_size_request(870, 570)
# Valtam do we need to resize window?
window.set_resizable(False)
window.set_position(gtk.WindowPosition.CENTER),
browser = webkit.WebView()
swindow = gtk.ScrolledWindow()
window.add(swindow)
swindow.add(browser)
window.show_all()
browser.connect("navigation-requested", functions)
browser.load_html_string(frontend, "file://{0}/frontend/".format(app_dir))
# no right click menu
settings = browser.get_settings()
settings.set_property('enable-default-context-menu', False)
browser.set_settings(settings)
# Engage
gtk.main()
if __name__ == '__main__':
try:
main()
except (Exception, AttributeError, FileNotFoundError) as e:
print("Exiting due to error: {0}".format(e))
sys.exit(1)
| darthlukan/litecontrolcenter | usr/share/litecc/litecenter.py | Python | gpl-2.0 | 9,309 | [
"Brian"
] | 6baf2d12cbc0ced9592d4fe73b2212ccd1cdba07b52b92b6304e38d3e66df108 |
"""
Stars module, modified to work with crystal class
Classes to generate star sets, double star sets, and vector star sets; a lot of indexing functionality.
NOTE: The naming follows that of stars; the functionality is extremely similar, and this code
was modified as little as possible to translate that functionality to *crystals* which possess
a basis. In the case of a single atom basis, this should reduce to the stars object functionality.
The big changes are:
* Replacing NNvect star (which represents the jumps) with the jumpnetwork type found in crystal
* Using the jumpnetwork_latt representation from crystal
* Representing a "point" as a solute + vacancy. In this case, it is a tuple (s,v) of unit cell
indices and a vector dx or dR (dx = Cartesian vector pointing from solute to vacancy;
dR = lattice vector pointing from unit cell of solute to unit cell of vacancy). This is equivalent
to our old representation if the tuple (s,v) = (0,0) for all sites. Due to translational invariance,
the solute always stays inside the unit cell
* Using indices into the point list rather than just making lists of the vectors themselves. This
is because the "points" now have a more complex representation (see above).
"""
__author__ = 'Dallas R. Trinkle'
import numpy as np
import collections
import copy
import itertools
from onsager import crystal
# YAML tags
PAIRSTATE_YAMLTAG = '!PairState'
class PairState(collections.namedtuple('PairState', 'i j R dx')):
"""
A class corresponding to a "pair" state; in this case, a solute-vacancy pair, but can
also be a transition state pair. The solute (or initial state) is in unit cell 0, in position
indexed i; the vacancy (or final state) is in unit cell R, in position indexed j.
The cartesian vector dx connects them. We can add and subtract, negate, and "endpoint"
subtract (useful for determining what Green function entry to use)
:param i: index of the first member of the pair (solute)
:param j: index of the second member of the pair (vacancy)
:param R: lattice vector pointing from unit cell of i to unit cell of j
:param dx: Cartesian vector pointing from first to second member of pair
"""
@classmethod
def zero(cls, n=0, dim=3):
"""Return a "zero" state"""
return cls(i=n, j=n, R=np.zeros(dim, dtype=int), dx=np.zeros(dim))
@classmethod
def fromcrys(cls, crys, chem, ij, dx):
"""Convert (i,j), dx into PairState"""
return cls(i=ij[0],
j=ij[1],
R=np.round(np.dot(crys.invlatt, dx) - crys.basis[chem][ij[1]] + crys.basis[chem][ij[0]]).astype(int),
dx=dx)
@classmethod
def fromcrys_latt(cls, crys, chem, ij, R):
"""Convert (i,j), R into PairState"""
return cls(i=ij[0],
j=ij[1],
R=R,
dx=np.dot(crys.lattice, R + crys.basis[chem][ij[1]] - crys.basis[chem][ij[0]]))
def _asdict(self):
"""Return a proper dict"""
return {'i': self.i, 'j': self.j, 'R': self.R, 'dx': self.dx}
def __sane__(self, crys, chem):
"""Determine if the dx value makes sense given everything else..."""
return np.allclose(self.dx, np.dot(crys.lattice, self.R + crys.basis[chem][self.j] - crys.basis[chem][self.i]))
def iszero(self):
"""Quicker than self == PairState.zero()"""
return self.i == self.j and np.all(self.R == 0)
def __eq__(self, other):
"""Test for equality--we don't bother checking dx"""
return isinstance(other, self.__class__) and \
(self.i == other.i and self.j == other.j and np.all(self.R == other.R))
def __ne__(self, other):
"""Inequality == not __eq__"""
return not self.__eq__(other)
def __hash__(self):
"""Hash, so that we can make sets of states"""
# return self.i ^ (self.j << 1) ^ (self.R[0] << 2) ^ (self.R[1] << 3) ^ (self.R[2] << 4)
return hash((self.i, self.j) + tuple(self.R))
def __add__(self, other):
"""Add two states: works if and only if self.j == other.i
(i,j) R + (j,k) R' = (i,k) R+R' : works for thinking about transitions...
Note: a + b != b + a, and may be that only one of those is even defined
"""
if not isinstance(other, self.__class__): return NotImplemented
if self.iszero() and self.j == -1: return other
if other.iszero() and other.i == -1: return self
if self.j != other.i:
raise ArithmeticError(
'Can only add matching endpoints: ({} {})+({} {}) not compatible'.format(self.i, self.j, other.i,
other.j))
return self.__class__(i=self.i, j=other.j, R=self.R + other.R, dx=self.dx + other.dx)
def __neg__(self):
"""Negation of state (swap members of pair)
- (i,j) R = (j,i) -R
Note: a + (-a) == (-a) + a == 0 because we define what "zero" is.
"""
return self.__class__(i=self.j, j=self.i, R=-self.R, dx=-self.dx)
def __sub__(self, other):
"""Add a negative:
a-b points from initial of a to initial of b if same final state
(i,j) R - (k,j) R' = (i,k) R-R'
Note: this means that (a-b) + b = a, but b + (a-b) is an error. (b-a) + a = b
"""
if not isinstance(other, self.__class__): return NotImplemented
return self.__add__(-other)
def __xor__(self, other):
"""Subtraction on the endpoints (sort of the "opposite" of a-b):
a^b points from final of b to final of a if same initial state
(i,j) R ^ (i,k) R' = (k,j) R-R'
Note: b + (a^b) = a but (a^b) + b is an error. a + (b^a) = b
"""
if not isinstance(other, self.__class__): return NotImplemented
# if self.iszero(): raise ArithmeticError('Cannot endpoint substract from zero')
# if other.iszero(): raise ArithmeticError('Cannot endpoint subtract zero')
if self.i != other.i:
raise ArithmeticError(
'Can only endpoint subtract matching starts: ({} {})^({} {}) not compatible'.format(self.i, self.j,
other.i, other.j))
return self.__class__(i=other.j, j=self.j, R=self.R - other.R, dx=self.dx - other.dx)
def g(self, crys, chem, g):
"""
Apply group operation.
:param crys: crystal
:param chem: chemical index
:param g: group operation (from crys)
:return g*PairState: corresponding to group operation applied to self
"""
gRi, (c, gi) = crys.g_pos(g, np.zeros(len(self.R), dtype=int), (chem, self.i))
gRj, (c, gj) = crys.g_pos(g, self.R, (chem, self.j))
gdx = crys.g_direc(g, self.dx)
return self.__class__(i=gi, j=gj, R=gRj - gRi, dx=gdx)
def __str__(self):
"""Human readable version"""
if len(self.R) == 3:
return "{}.[0,0,0]:{}.[{},{},{}] (dx=[{},{},{}])".format(self.i, self.j,
self.R[0], self.R[1], self.R[2],
self.dx[0], self.dx[1], self.dx[2])
else:
return "{}.[0,0]:{}.[{},{}] (dx=[{},{}])".format(self.i, self.j,
self.R[0], self.R[1],
self.dx[0], self.dx[1])
@classmethod
def sortkey(cls, entry):
return np.dot(entry.dx, entry.dx)
@staticmethod
def PairState_representer(dumper, data):
"""Output a PairState"""
# asdict() returns an OrderedDictionary, so pass through dict()
# had to rewrite _asdict() for some reason...?
return dumper.represent_mapping(PAIRSTATE_YAMLTAG, data._asdict())
@staticmethod
def PairState_constructor(loader, node):
"""Construct a GroupOp from YAML"""
# ** turns the dictionary into parameters for PairState constructor
return PairState(**loader.construct_mapping(node, deep=True))
crystal.yaml.add_representer(PairState, PairState.PairState_representer)
crystal.yaml.add_constructor(PAIRSTATE_YAMLTAG, PairState.PairState_constructor)
# HDF5 conversion routines: PairState, and list-of-list structures
def PSlist2array(PSlist):
"""
Take in a list of pair states; return arrays that can be stored in HDF5 format
:param PSlist: list of pair states
:return ij: int_array[N][2] = (i,j)
:return R: int[N][3]
:return dx: float[N][3]
"""
N = len(PSlist)
ij = np.zeros((N, 2), dtype=int)
dim = len(PSlist[0].R)
R = np.zeros((N, dim), dtype=int)
dx = np.zeros((N, dim))
for n, PS in enumerate(PSlist):
ij[n, 0], ij[n, 1], R[n, :], dx[n, :] = PS.i, PS.j, PS.R, PS.dx
return ij, R, dx
def array2PSlist(ij, R, dx):
"""
Take in arrays of ij, R, dx (from HDF5), return a list of PairStates
:param ij: int_array[N][2] = (i,j)
:param R: int[N][3]
:param dx: float[N][3]
:return PSlist: list of pair states
"""
return [PairState(i=ij0[0], j=ij0[1], R=R0, dx=dx0) for ij0, R0, dx0 in zip(ij, R, dx)]
def doublelist2flatlistindex(listlist):
"""
Takes a list of lists, returns a flattened list and an index array
:param listlist: list of lists of objects
:return flatlist: flat list of objects (preserving order)
:return indexarray: array indexing which original list it came from
"""
flatlist = []
indexlist = []
for ind, entries in enumerate(listlist):
flatlist += entries
indexlist += [ind for j in entries]
return flatlist, np.array(indexlist)
def flatlistindex2doublelist(flatlist, indexarray):
"""
Takes a flattened list and an index array, returns a list of lists
:param flatlist: flat list of objects (preserving order)
:param indexarray: array indexing which original list it came from
:return listlist: list of lists of objects
"""
Nlist = max(indexarray) + 1
listlist = [[] for n in range(Nlist)]
for entry, ind in zip(flatlist, indexarray):
listlist[ind].append(entry)
return listlist
class StarSet(object):
"""
A class to construct crystal stars, and be able to efficiently index.
Takes in a jumpnetwork, which is used to construct the corresponding stars, a crystal
object with which to operate, a specification of the chemical index for the atom moving
(needs to be consistent with jumpnetwork and crys), and then the number of shells.
In this case, ``shells`` = number of successive "jumps" from a state. As an example,
in FCC, 1 shell = 1st neighbor, 2 shell = 1-4th neighbors.
"""
def __init__(self, jumpnetwork, crys, chem, Nshells=0, originstates=False, lattice=False):
"""
Initiates a star set generator for a given jumpnetwork, crystal, and specified
chemical index. Does not include "origin states" by default; these are PairStates that
iszero() is True; they are only needed if crystal has a nonzero VectorBasis.
:param jumpnetwork: list of symmetry unique jumps, as a list of list of tuples; either
``((i,j), dx)`` for jump from i to j with displacement dx, or
``((i,j), R)`` for jump from i in unit cell 0 -> j in unit cell R
:param crys: crystal where jumps take place
:param chem: chemical index of atom to consider jumps
:param Nshells: number of shells to generate
:param originstates: include origin states in generate?
:param lattice: which form does the jumpnetwork take?
"""
# jumpnetwork_index: list of lists of indices into jumplist; matches structure of jumpnetwork
# jumplist: list of jumps, as pair states (i=initial state, j=final state)
# states: list of pair states, out to Nshells
# Nstates: size of list
# stars: list of lists of indices into states; each list are states equivalent by symmetry
# Nstars: size of list
# index[Nstates]: index of star that state belongs to
# empty StarSet
if all(x is None for x in (jumpnetwork, crys, chem)): return
self.jumpnetwork_index = [] # list of list of indices into...
self.jumplist = [] # list of our jumps, as PairStates
ind = 0
for jlist in jumpnetwork:
self.jumpnetwork_index.append([])
for ij, v in jlist:
self.jumpnetwork_index[-1].append(ind)
ind += 1
if lattice:
PS = PairState.fromcrys_latt(crys, chem, ij, v)
else:
PS = PairState.fromcrys(crys, chem, ij, v)
self.jumplist.append(PS)
self.crys = crys
self.chem = chem
self.generate(Nshells, originstates)
def __str__(self):
"""Human readable version"""
str = "Nshells: {} Nstates: {} Nstars: {}\n".format(self.Nshells, self.Nstates, self.Nstars)
for si in range(self.Nstars):
str += "Star {} ({})\n".format(si, len(self.stars[si]))
for i in self.stars[si]:
str += " {}: {}\n".format(i, self.states[i])
return str
def generate(self, Nshells, threshold=1e-8, originstates=False):
"""
Construct the points and the stars in the set. Does not include "origin states" by default; these
are PairStates that iszero() is True; they are only needed if crystal has a nonzero VectorBasis.
:param Nshells: number of shells to generate; this is interpreted as subsequent
"sums" of jumplist (as we need the solute to be connected to the vacancy by at least one jump)
:param threshold: threshold for determining equality with symmetry
:param originstates: include origin states in generate?
"""
if Nshells == getattr(self, 'Nshells', -1): return
self.Nshells = Nshells
if Nshells > 0:
stateset = set(self.jumplist)
else:
stateset = set([])
lastshell = stateset.copy()
if originstates:
for i in range(len(self.crys.basis[self.chem])):
stateset.add(PairState.zero(i, self.crys.dim))
for i in range(Nshells - 1):
# add all NNvect to last shell produced, always excluding 0
# lastshell = [v1+v2 for v1 in lastshell for v2 in self.NNvect if not all(abs(v1 + v2) < threshold)]
nextshell = set([])
for s1 in lastshell:
for s2 in self.jumplist:
# this try/except structure lets us attempt addition and kick out if not possible
try:
s = s1 + s2
except:
continue
if not s.iszero():
nextshell.add(s)
stateset.add(s)
lastshell = nextshell
# now to sort our set of vectors (easiest by magnitude, and then reduce down:
self.states = sorted([s for s in stateset], key=PairState.sortkey)
self.Nstates = len(self.states)
if self.Nstates > 0:
x2_indices = []
x2old = np.dot(self.states[0].dx, self.states[0].dx)
for i, x2 in enumerate([np.dot(st.dx, st.dx) for st in self.states]):
if x2 > (x2old + threshold):
x2_indices.append(i)
x2old = x2
x2_indices.append(len(self.states))
# x2_indices now contains a list of indices with the same magnitudes
self.stars = []
xmin = 0
for xmax in x2_indices:
complist_stars = [] # for finding unique stars
symmstate_list = [] # list of sets corresponding to those stars...
for xi in range(xmin, xmax):
x = self.states[xi]
# is this a new rep. for a unique star?
match = False
for i, gs in enumerate(symmstate_list):
if x in gs:
# update star
complist_stars[i].append(xi)
match = True
continue
if not match:
# new symmetry point!
complist_stars.append([xi])
symmstate_list.append(set([x.g(self.crys, self.chem, g) for g in self.crys.G]))
self.stars += complist_stars
xmin = xmax
else:
self.stars = [[]]
self.Nstars = len(self.stars)
# generate index: which star is each state a member of?
self.index = np.zeros(self.Nstates, dtype=int)
self.indexdict = {}
for si, star in enumerate(self.stars):
for xi in star:
self.index[xi] = si
self.indexdict[self.states[xi]] = (xi, si)
def addhdf5(self, HDF5group):
"""
Adds an HDF5 representation of object into an HDF5group (needs to already exist).
Example: if f is an open HDF5, then StarSet.addhdf5(f.create_group('StarSet')) will
(1) create the group named 'StarSet', and then (2) put the StarSet representation in that group.
:param HDF5group: HDF5 group
"""
HDF5group.attrs['type'] = self.__class__.__name__
HDF5group.attrs['crystal'] = self.crys.__repr__()
HDF5group.attrs['chem'] = self.chem
HDF5group['Nshells'] = self.Nshells
# convert jumplist (list of PS) into arrays to store:
HDF5group['jumplist_ij'], HDF5group['jumplist_R'], HDF5group['jumplist_dx'] = \
PSlist2array(self.jumplist)
HDF5group['jumplist_Nunique'] = len(self.jumpnetwork_index)
jumplistinvmap = np.zeros(len(self.jumplist), dtype=int)
for j, jlist in enumerate(self.jumpnetwork_index):
for i in jlist: jumplistinvmap[i] = j
HDF5group['jumplist_invmap'] = jumplistinvmap
# convert states into arrays to store:
HDF5group['states_ij'], HDF5group['states_R'], HDF5group['states_dx'] = \
PSlist2array(self.states)
HDF5group['states_index'] = self.index
@classmethod
def loadhdf5(cls, crys, HDF5group):
"""
Creates a new StarSet from an HDF5 group.
:param crys: crystal object--MUST BE PASSED IN as it is not stored with the StarSet
:param HDFgroup: HDF5 group
:return StarSet: new StarSet object
"""
SSet = cls(None, None, None) # initialize
SSet.crys = crys
SSet.chem = HDF5group.attrs['chem']
SSet.Nshells = HDF5group['Nshells'].value
SSet.jumplist = array2PSlist(HDF5group['jumplist_ij'].value,
HDF5group['jumplist_R'].value,
HDF5group['jumplist_dx'].value)
SSet.jumpnetwork_index = [[] for n in range(HDF5group['jumplist_Nunique'].value)]
for i, jump in enumerate(HDF5group['jumplist_invmap'].value):
SSet.jumpnetwork_index[jump].append(i)
SSet.states = array2PSlist(HDF5group['states_ij'].value,
HDF5group['states_R'].value,
HDF5group['states_dx'].value)
SSet.Nstates = len(SSet.states)
SSet.index = HDF5group['states_index'].value
# construct the states, and the index dictionary:
SSet.Nstars = max(SSet.index) + 1
SSet.stars = [[] for n in range(SSet.Nstars)]
SSet.indexdict = {}
for xi, si in enumerate(SSet.index):
SSet.stars[si].append(xi)
SSet.indexdict[SSet.states[xi]] = (xi, si)
return SSet
def copy(self, empty=False):
"""Return a copy of the StarSet; done as efficiently as possible; empty means skip the shells, etc."""
newStarSet = self.__class__(None, None, None) # a little hacky... creates an empty class
newStarSet.jumpnetwork_index = copy.deepcopy(self.jumpnetwork_index)
newStarSet.jumplist = self.jumplist.copy()
newStarSet.crys = self.crys
newStarSet.chem = self.chem
if not empty:
newStarSet.Nshells = self.Nshells
newStarSet.stars = copy.deepcopy(self.stars)
newStarSet.states = self.states.copy()
newStarSet.Nstars = self.Nstars
newStarSet.Nstates = self.Nstates
newStarSet.index = self.index.copy()
newStarSet.indexdict = self.indexdict.copy()
else:
newStarSet.generate(0)
return newStarSet
# removed combine; all it does is generate(s1.Nshells + s2.Nshells) with lots of checks...
# replaced with (more efficient?) __add__ and __iadd__.
def __add__(self, other):
"""Add two StarSets together; done by making a copy of one, and iadding"""
if not isinstance(other, self.__class__): return NotImplemented
if self.Nshells >= other.Nshells:
scopy = self.copy()
scopy.__iadd__(other)
else:
scopy = other.copy()
scopy.__iadd__(self)
return scopy
def __iadd__(self, other):
"""Add another StarSet to this one; very similar to generate()"""
threshold = 1e-8
if not isinstance(other, self.__class__): return NotImplemented
if self.chem != other.chem: return ArithmeticError('Cannot add different chemistry index')
if other.Nshells < 1: return self
if self.Nshells < 1:
self.Nshells = other.Nshells
self.stars = copy.deepcopy(other.stars)
self.states = other.states.copy()
self.Nstars = other.Nstars
self.Nstates = other.Nstates
self.index = other.index.copy()
self.indexdict = other.indexdict.copy()
return self
self.Nshells += other.Nshells
Nold = self.Nstates
oldstateset = set(self.states)
newstateset = set([])
for s1 in self.states[:Nold]:
for s2 in other.states:
# this try/except structure lets us attempt addition and kick out if not possible
try:
s = s1 + s2
except:
continue
if not s.iszero() and not s in oldstateset: newstateset.add(s)
# now to sort our set of vectors (easiest by magnitude, and then reduce down:
self.states += sorted([s for s in newstateset], key=PairState.sortkey)
Nnew = len(self.states)
x2_indices = []
x2old = np.dot(self.states[Nold].dx, self.states[Nold].dx)
for i in range(Nold, Nnew):
x2 = np.dot(self.states[i].dx, self.states[i].dx)
if x2 > (x2old + threshold):
x2_indices.append(i)
x2old = x2
x2_indices.append(Nnew)
# x2_indices now contains a list of indices with the same magnitudes
xmin = Nold
for xmax in x2_indices:
complist_stars = [] # for finding unique stars
symmstate_list = [] # list of sets corresponding to those stars...
for xi in range(xmin, xmax):
x = self.states[xi]
# is this a new rep. for a unique star?
match = False
for i, gs in enumerate(symmstate_list):
if x in gs:
# update star
complist_stars[i].append(xi)
match = True
continue
if not match:
# new symmetry point!
complist_stars.append([xi])
symmstate_list.append(set([x.g(self.crys, self.chem, g) for g in self.crys.G]))
self.stars += complist_stars
xmin = xmax
self.Nstates = Nnew
# generate new index entries: which star is each state a member of?
self.index = np.pad(self.index, (0, Nnew - Nold), mode='constant')
Nold = self.Nstars
Nnew = len(self.stars)
for si in range(Nold, Nnew):
star = self.stars[si]
for xi in star:
self.index[xi] = si
self.indexdict[self.states[xi]] = (xi, si)
self.Nstars = Nnew
return self
def __contains__(self, PS):
"""Return true if PS is in the star"""
return PS in self.indexdict
# replaces pointindex:
def stateindex(self, PS):
"""Return the index of pair state PS; None if not found"""
try:
return self.indexdict[PS][0]
except:
return None
def starindex(self, PS):
"""Return the index for the star to which pair state PS belongs; None if not found"""
try:
return self.indexdict[PS][1]
except:
return None
def symmatch(self, PS1, PS2):
"""True if there exists a group operation that makes PS1 == PS2."""
return any(PS1 == PS2.g(self.crys, self.chem, g) for g in self.crys.G)
# replaces DoubleStarSet
def jumpnetwork_omega1(self):
"""
Generate a jumpnetwork corresponding to vacancy jumping while the solute remains fixed.
:return jumpnetwork: list of symmetry unique jumps; list of list of tuples (i,f), dx where
i,f index into states for the initial and final states, and dx = displacement of vacancy
in Cartesian coordinates. Note: if (i,f), dx is present, so if (f,i), -dx
:return jumptype: list of indices corresponding to the (original) jump type for each
symmetry unique jump; useful for constructing a LIMB approximation, and needed to
construct delta_omega
:return starpair: list of tuples of the star indices of the i and f states for each
symmetry unique jump
"""
if self.Nshells < 1: return []
jumpnetwork = []
jumptype = []
starpair = []
for jt, jumpindices in enumerate(self.jumpnetwork_index):
for jump in [self.jumplist[j] for j in jumpindices]:
for i, PSi in enumerate(self.states):
if PSi.iszero(): continue
# attempt to add...
try:
PSf = PSi + jump
except:
continue
if PSf.iszero(): continue
f = self.stateindex(PSf)
if f is None: continue # outside our StarSet
# see if we've already generated this jump (works since all of our states are distinct)
if any(any(i == i0 and f == f0 for (i0, f0), dx in jlist) for jlist in jumpnetwork): continue
dx = PSf.dx - PSi.dx
jumpnetwork.append(self.symmequivjumplist(i, f, dx))
jumptype.append(jt)
starpair.append((self.index[i], self.index[f]))
return jumpnetwork, jumptype, starpair
def jumpnetwork_omega2(self):
"""
Generate a jumpnetwork corresponding to vacancy exchanging with a solute.
:return jumpnetwork: list of symmetry unique jumps; list of list of tuples (i,f), dx where
i,f index into states for the initial and final states, and dx = displacement of vacancy
in Cartesian coordinates. Note: if (i,f), dx is present, so if (f,i), -dx
:return jumptype: list of indices corresponding to the (original) jump type for each
symmetry unique jump; useful for constructing a LIMB approximation, and needed to
construct delta_omega
:return starpair: list of tuples of the star indices of the i and f states for each
symmetry unique jump
"""
if self.Nshells < 1: return []
jumpnetwork = []
jumptype = []
starpair = []
for jt, jumpindices in enumerate(self.jumpnetwork_index):
for jump in [self.jumplist[j] for j in jumpindices]:
for i, PSi in enumerate(self.states):
if PSi.iszero(): continue
# attempt to add...
try:
PSf = PSi + jump
except:
continue
if not PSf.iszero(): continue
f = self.stateindex(-PSi) # exchange
# see if we've already generated this jump (works since all of our states are distinct)
if any(any(i == i0 and f == f0 for (i0, f0), dx in jlist) for jlist in jumpnetwork): continue
dx = -PSi.dx # the vacancy jumps into the solute position (exchange)
jumpnetwork.append(self.symmequivjumplist(i, f, dx))
jumptype.append(jt)
starpair.append((self.index[i], self.index[f]))
return jumpnetwork, jumptype, starpair
def symmequivjumplist(self, i, f, dx):
"""
Returns a list of tuples of symmetry equivalent jumps
:param i: index of initial state
:param f: index of final state
:param dx: displacement vector
:return symmjumplist: list of tuples of ((gi, gf), gdx) for every group op
"""
PSi = self.states[i]
PSf = self.states[f]
symmjumplist = [((i, f), dx)]
if i != f: symmjumplist.append(((f, i), -dx)) # i should not equal f... but in case we allow 0 as a jump
for g in self.crys.G:
gi, gf, gdx = self.stateindex(PSi.g(self.crys, self.chem, g)), \
self.stateindex(PSf.g(self.crys, self.chem, g)), \
self.crys.g_direc(g, dx)
if not any(gi == i0 and gf == f0 for (i0, f0), dx in symmjumplist):
symmjumplist.append(((gi, gf), gdx))
if gi != gf: symmjumplist.append(((gf, gi), -gdx))
return symmjumplist
def diffgenerate(self, S1, S2, threshold=1e-8):
"""
Construct a starSet using endpoint subtraction from starset S1 to starset S2. Will
include zero. Points from vacancy states of S1 to vacancy states of S2.
:param S1: starSet for start
:param S2: starSet for final
:param threshold: threshold for sorting magnitudes (can influence symmetry efficiency)
"""
if S1.Nshells < 1 or S2.Nshells < 1: raise ValueError('Need to initialize stars')
self.Nshells = S1.Nshells + S2.Nshells # an estimate...
stateset = set([])
# self.states = []
for s1 in S1.states:
for s2 in S2.states:
# this try/except structure lets us attempt addition and kick out if not possible
try:
s = s2 ^ s1 # points from vacancy state of s1 to vacancy state of s2
except:
continue
stateset.add(s)
# now to sort our set of vectors (easiest by magnitude, and then reduce down:
self.states = sorted([s for s in stateset], key=PairState.sortkey)
self.Nstates = len(self.states)
if self.Nstates > 0:
x2_indices = []
x2old = np.dot(self.states[0].dx, self.states[0].dx)
for i, x2 in enumerate([np.dot(st.dx, st.dx) for st in self.states]):
if x2 > (x2old + threshold):
x2_indices.append(i)
x2old = x2
x2_indices.append(len(self.states))
# x2_indices now contains a list of indices with the same magnitudes
self.stars = []
xmin = 0
for xmax in x2_indices:
complist_stars = [] # for finding unique stars
symmstate_list = [] # list of sets corresponding to those stars...
for xi in range(xmin, xmax):
x = self.states[xi]
# is this a new rep. for a unique star?
match = False
for i, gs in enumerate(symmstate_list):
if x in gs:
# update star
complist_stars[i].append(xi)
match = True
continue
if not match:
# new symmetry point!
complist_stars.append([xi])
symmstate_list.append(set([x.g(self.crys, self.chem, g) for g in self.crys.G]))
self.stars += complist_stars
xmin = xmax
else:
self.stars = [[]]
self.Nstars = len(self.stars)
# generate index: which star is each state a member of?
self.index = np.zeros(self.Nstates, dtype=int)
self.indexdict = {}
for si, star in enumerate(self.stars):
for xi in star:
self.index[xi] = si
self.indexdict[self.states[xi]] = (xi, si)
def zeroclean(x, threshold=1e-8):
"""Modify x in place, return 0 if x is below a threshold; useful for "symmetrizing" our expansions"""
for v in np.nditer(x, op_flags=['readwrite']):
if abs(v) < threshold: v[...] = 0
return x
class VectorStarSet(object):
"""
A class to construct vector star sets, and be able to efficiently index.
All based on a StarSet
"""
def __init__(self, starset=None):
"""
Initiates a vector-star generator; work with a given star.
:param starset: StarSet, from which we pull nearly all of the info that we need
"""
# vecpos: list of "positions" (state indices) for each vector star (list of lists)
# vecvec: list of vectors for each vector star (list of lists of vectors)
# Nvstars: number of vector stars
self.starset = None
self.Nvstars = 0
if starset is not None:
if starset.Nshells > 0:
self.generate(starset)
def generate(self, starset, threshold=1e-8):
"""
Construct the actual vectors stars
:param starset: StarSet, from which we pull nearly all of the info that we need
"""
if starset.Nshells == 0: return
if starset == self.starset: return
self.starset = starset
dim = starset.crys.dim
self.vecpos = []
self.vecvec = []
states = starset.states
for s in starset.stars:
# start by generating the parallel star-vector; always trivially present:
PS0 = states[s[0]]
if PS0.iszero():
# origin state; we can easily generate our vlist
vlist = starset.crys.vectlist(starset.crys.VectorBasis((self.starset.chem, PS0.i)))
scale = 1. / np.sqrt(len(s)) # normalization factor; vectors are already normalized
vlist = [v * scale for v in vlist]
# add the positions
for v in vlist:
self.vecpos.append(s.copy())
veclist = []
for PSi in [states[si] for si in s]:
for g in starset.crys.G:
if PS0.g(starset.crys, starset.chem, g) == PSi:
veclist.append(starset.crys.g_direc(g, v))
break
self.vecvec.append(veclist)
else:
# not an origin state
vpara = PS0.dx
scale = 1. / np.sqrt(len(s) * np.dot(vpara, vpara)) # normalization factor
self.vecpos.append(s.copy())
self.vecvec.append([states[si].dx * scale for si in s])
# next, try to generate perpendicular star-vectors, if present:
if dim == 3:
v0 = np.cross(vpara, np.array([0, 0, 1.]))
if np.dot(v0, v0) < threshold:
v0 = np.cross(vpara, np.array([1., 0, 0]))
v1 = np.cross(vpara, v0)
# normalization:
v0 /= np.sqrt(np.dot(v0, v0))
v1 /= np.sqrt(np.dot(v1, v1))
Nvect = 2
else:
# 2d is very simple...
v0 = np.array([vpara[1], -vpara[0]])
v0 /= np.sqrt(np.dot(v0, v0))
Nvect = 1
# run over the invariant group operations for state PS0
for g in self.starset.crys.G:
if Nvect == 0: continue
if PS0 != PS0.g(starset.crys, starset.chem, g): continue
gv0 = starset.crys.g_direc(g, v0)
if Nvect == 1:
# we only need to check that we still have an invariant vector
if not np.isclose(np.dot(v0, v0), 1): raise ArithmeticError('Somehow got unnormalized vector?')
if not np.allclose(gv0, v0): Nvect = 0
if Nvect == 2:
if not np.isclose(np.dot(v0, v0), 1): raise ArithmeticError('Somehow got unnormalized vector?')
if not np.isclose(np.dot(v1, v1), 1): raise ArithmeticError('Somehow got unnormalized vector?')
gv1 = starset.crys.g_direc(g, v1)
g00 = np.dot(v0, gv0)
g11 = np.dot(v1, gv1)
g01 = np.dot(v0, gv1)
g10 = np.dot(v1, gv0)
if abs((abs(g00 * g11 - g01 * g10) - 1)) > threshold or abs(g01 - g10) > threshold:
# we don't have an orthogonal matrix, or we have a rotation, so kick out
Nvect = 0
continue
if (abs(g00 - 1) > threshold) or (abs(g11 - 1) > threshold):
# if we don't have the identify matrix, then we have to find the one vector that survives
if abs(g00 - 1) < threshold:
Nvect = 1
continue
if abs(g11 - 1) < threshold:
v0 = v1
Nvect = 1
continue
v0 = (g01 * v0 + (1 - g00) * v1) / np.sqrt(g01 * g10 + (1 - g00) ** 2)
Nvect = 1
# so... do we have any vectors to add?
if Nvect > 0:
v0 /= np.sqrt(len(s) * np.dot(v0, v0))
vlist = [v0]
if Nvect > 1:
v1 /= np.sqrt(len(s) * np.dot(v1, v1))
vlist.append(v1)
# add the positions
for v in vlist:
self.vecpos.append(s.copy())
veclist = []
for PSi in [states[si] for si in s]:
for g in starset.crys.G:
if PS0.g(starset.crys, starset.chem, g) == PSi:
veclist.append(starset.crys.g_direc(g, v))
break
self.vecvec.append(veclist)
self.Nvstars = len(self.vecpos)
self.outer = self.generateouter()
def generateouter(self):
"""
Generate our outer products for our star-vectors.
:return outer: array [3, 3, Nvstars, Nvstars]
outer[:, :, i, j] is the 3x3 tensor outer product for two vector-stars vs[i] and vs[j]
"""
# dim = len(self.vecvec[0][0])
dim = self.starset.crys.dim
outer = np.zeros((dim, dim, self.Nvstars, self.Nvstars))
for i, sR0, sv0 in zip(itertools.count(), self.vecpos, self.vecvec):
for j, sR1, sv1 in zip(itertools.count(), self.vecpos, self.vecvec):
if sR0[0] == sR1[0]:
outer[:, :, i, j] = sum([np.outer(v0, v1) for v0, v1 in zip(sv0, sv1)])
return zeroclean(outer)
def addhdf5(self, HDF5group):
"""
Adds an HDF5 representation of object into an HDF5group (needs to already exist).
Example: if f is an open HDF5, then StarSet.addhdf5(f.create_group('VectorStarSet')) will
(1) create the group named 'VectorStarSet', and then (2) put the VectorStarSet
representation in that group.
:param HDF5group: HDF5 group
"""
HDF5group.attrs['type'] = self.__class__.__name__
HDF5group['Nvstars'] = self.Nvstars
HDF5group['vecposlist'], HDF5group['vecposindex'] = doublelist2flatlistindex(self.vecpos)
HDF5group['vecveclist'], HDF5group['vecvecindex'] = doublelist2flatlistindex(self.vecvec)
HDF5group['outer'] = self.outer
@classmethod
def loadhdf5(cls, SSet, HDF5group):
"""
Creates a new VectorStarSet from an HDF5 group.
:param SSet: StarSet--MUST BE PASSED IN as it is not stored with the VectorStarSet
:param HDFgroup: HDF5 group
:return VectorStarSet: new VectorStarSet object
"""
VSSet = cls(None) # initialize
VSSet.starset = SSet
VSSet.Nvstars = HDF5group['Nvstars'].value
VSSet.vecpos = flatlistindex2doublelist(HDF5group['vecposlist'].value,
HDF5group['vecposindex'].value)
VSSet.vecvec = flatlistindex2doublelist(HDF5group['vecveclist'].value,
HDF5group['vecvecindex'].value)
VSSet.outer = HDF5group['outer'].value
return VSSet
def GFexpansion(self):
"""
Construct the GF matrix expansion in terms of the star vectors, and indexed
to GFstarset.
:return GFexpansion: array[Nsv, Nsv, NGFstars]
the GF matrix[i, j] = sum(GFexpansion[i, j, k] * GF(starGF[k]))
:return GFstarset: starSet corresponding to the GF
"""
if self.Nvstars == 0:
return None
GFstarset = self.starset.copy(empty=True)
GFstarset.diffgenerate(self.starset, self.starset)
GFexpansion = np.zeros((self.Nvstars, self.Nvstars, GFstarset.Nstars))
for i in range(self.Nvstars):
for si, vi in zip(self.vecpos[i], self.vecvec[i]):
for j in range(i, self.Nvstars):
for sj, vj in zip(self.vecpos[j], self.vecvec[j]):
try:
ds = self.starset.states[sj] ^ self.starset.states[si]
except:
continue
k = GFstarset.starindex(ds)
if k is None: raise ArithmeticError('GF star not large enough to include {}?'.format(ds))
GFexpansion[i, j, k] += np.dot(vi, vj)
# symmetrize
for i in range(self.Nvstars):
for j in range(0, i):
GFexpansion[i, j, :] = GFexpansion[j, i, :]
# cleanup on return:
return zeroclean(GFexpansion), GFstarset
def rateexpansions(self, jumpnetwork, jumptype, omega2=False):
"""
Construct the omega0 and omega1 matrix expansions in terms of the jumpnetwork;
includes the escape terms separately. The escape terms are tricky because they have
probability factors that differ from the transitions; the PS (pair stars) is useful for
finding this. We just call it the 'probfactor' below.
*Note:* this used to be separated into rate0expansion, and rate1expansion, and
partly in bias1expansion. Note also that if jumpnetwork_omega2 is passed, it also works
for that. However, in that case we have a different approach for the calculation of
rate0expansion: if there are origin states, then we need to "jump" to those; if there
is a non-empty VectorBasis we will want to account for them there.
:param jumpnetwork: jumpnetwork of symmetry unique omega1-type jumps,
corresponding to our starset. List of lists of (IS, FS), dx tuples, where IS and FS
are indices corresponding to states in our starset.
:param jumptype: specific omega0 jump type that the jump corresponds to
:param omega2: (optional) are we dealing with the omega2 list, so we need to remove
origin states? (default=False)
:return rate0expansion: array[Nsv, Nsv, Njump_omega0]
the omega0 matrix[i, j] = sum(rate0expansion[i, j, k] * omega0[k]); *IF* NVB>0
we "hijack" this and use it for [NVB, Nsv, Njump_omega0], as we're doing an omega2
calc and rate0expansion won't be used *anyway*.
:return rate0escape: array[Nsv, Njump_omega0]
the escape contributions: omega0[i,i] += sum(rate0escape[i,k]*omega0[k]*probfactor0(PS[k]))
:return rate1expansion: array[Nsv, Nsv, Njump_omega1]
the omega1 matrix[i, j] = sum(rate1expansion[i, j, k] * omega1[k])
:return rate1escape: array[Nsv, Njump_omega1]
the escape contributions: omega1[i,i] += sum(rate1escape[i,k]*omega1[k]*probfactor(PS[k]))
"""
if self.Nvstars == 0: return None
rate0expansion = np.zeros((self.Nvstars, self.Nvstars, len(self.starset.jumpnetwork_index)))
rate1expansion = np.zeros((self.Nvstars, self.Nvstars, len(jumpnetwork)))
rate0escape = np.zeros((self.Nvstars, len(self.starset.jumpnetwork_index)))
rate1escape = np.zeros((self.Nvstars, len(jumpnetwork)))
for k, jumplist, jt in zip(itertools.count(), jumpnetwork, jumptype):
for (IS, FS), dx in jumplist:
for i in range(self.Nvstars):
for Ri, vi in zip(self.vecpos[i], self.vecvec[i]):
if Ri == IS:
rate0escape[i, jt] -= np.dot(vi, vi)
rate1escape[i, k] -= np.dot(vi, vi)
# for j in range(i+1):
for j in range(self.Nvstars):
for Rj, vj in zip(self.vecpos[j], self.vecvec[j]):
if Rj == FS:
if not omega2: rate0expansion[i, j, jt] += np.dot(vi, vj)
rate1expansion[i, j, k] += np.dot(vi, vj)
if omega2:
# find the "origin state" corresponding to the solute; "remove" those rates
OSindex = self.starset.stateindex(PairState.zero(self.starset.states[IS].i,
self.starset.crys.dim))
if OSindex is not None:
for j in range(self.Nvstars):
for Rj, vj in zip(self.vecpos[j], self.vecvec[j]):
if Rj == OSindex:
rate0expansion[i, j, jt] += np.dot(vi, vj)
rate0expansion[j, i, jt] += np.dot(vi, vj)
rate0escape[j, jt] -= np.dot(vj, vj)
# cleanup on return
return zeroclean(rate0expansion), zeroclean(rate0escape), \
zeroclean(rate1expansion), zeroclean(rate1escape)
def biasexpansions(self, jumpnetwork, jumptype, omega2=False):
"""
Construct the bias1 and bias0 vector expansion in terms of the jumpnetwork.
We return the bias0 contribution so that the db = bias1 - bias0 can be determined.
This saves us from having to deal with issues with our outer shell where we only
have a fraction of the escapes, but as long as the kinetic shell is one more than
the thermodynamics (so that the interaction energy is 0, hence no change in probability),
this will work. The PS (pair stars) is useful for including the probability factor
for the endpoint of the jump; we just call it the 'probfactor' below.
*Note:* this used to be separated into bias1expansion, and bias2expansion,and
had terms that are now in rateexpansions.
Note also that if jumpnetwork_omega2 is passed, it also works for that. However,
in that case we have a different approach for the calculation of bias1expansion:
if there are origin states, they get the negative summed bias of the others.
:param jumpnetwork: jumpnetwork of symmetry unique omega1-type jumps,
corresponding to our starset. List of lists of (IS, FS), dx tuples, where IS and FS
are indices corresponding to states in our starset.
:param jumptype: specific omega0 jump type that the jump corresponds to
:param omega2: (optional) are we dealing with the omega2 list, so we need to remove
origin states? (default=False)
:return bias0expansion: array[Nsv, Njump_omega0]
the gen0 vector[i] = sum(bias0expasion[i, k] * sqrt(probfactor0[PS[k]]) * omega0[k])
:return bias1expansion: array[Nsv, Njump_omega1]
the gen1 vector[i] = sum(bias1expansion[i, k] * sqrt(probfactor[PS[k]] * omega1[k])
"""
if self.Nvstars == 0: return None
bias0expansion = np.zeros((self.Nvstars, len(self.starset.jumpnetwork_index)))
bias1expansion = np.zeros((self.Nvstars, len(jumpnetwork)))
for k, jumplist, jt in zip(itertools.count(), jumpnetwork, jumptype):
for (IS, FS), dx in jumplist:
# run through the star-vectors; just use first as representative
for i, svR, svv in zip(itertools.count(), self.vecpos, self.vecvec):
if svR[0] == IS:
geom_bias = np.dot(svv[0], dx) * len(svR)
bias1expansion[i, k] += geom_bias
bias0expansion[i, jt] += geom_bias
if omega2:
# find the "origin state" corresponding to the solute; incorporate the change in bias
OSindex = self.starset.stateindex(PairState.zero(self.starset.states[IS].i,
self.starset.crys.dim))
if OSindex is not None:
for j in range(self.Nvstars):
for Rj, vj in zip(self.vecpos[j], self.vecvec[j]):
if Rj == OSindex:
geom_bias = -np.dot(vj, dx)
bias1expansion[j, k] += geom_bias # do we need this??
bias0expansion[j, jt] += geom_bias
# cleanup on return
return zeroclean(bias0expansion), zeroclean(bias1expansion)
# this is *almost* a static method--it only need to know how many omega0 type jumps there are
# in the starset. We *could* make it static and use max(jumptype), but that may not be strictly safe
def bareexpansions(self, jumpnetwork, jumptype):
"""
Construct the bare diffusivity expansion in terms of the jumpnetwork.
We return the reference (0) contribution so that the change can be determined; this
is useful for the vacancy contributions.
This saves us from having to deal with issues with our outer shell where we only
have a fraction of the escapes, but as long as the kinetic shell is one more than
the thermodynamics (so that the interaction energy is 0, hence no change in probability),
this will work. The PS (pair stars) is useful for including the probability factor
for the endpoint of the jump; we just call it the 'probfactor' below.
Note also: this *currently assumes* that the displacement vector *does not change* between
omega0 and omega(1/2).
:param jumpnetwork: jumpnetwork of symmetry unique omega1-type jumps,
corresponding to our starset. List of lists of (IS, FS), dx tuples, where IS and FS
are indices corresponding to states in our starset.
:param jumptype: specific omega0 jump type that the jump corresponds to
:return D0expansion: array[3,3, Njump_omega0]
the D0[a,b,jt] = sum(D0expansion[a,b, jt] * sqrt(probfactor0[PS[jt][0]]*probfactor0[PS[jt][1]) * omega0[jt])
:return D1expansion: array[3,3, Njump_omega1]
the D1[a,b,k] = sum(D1expansion[a,b, k] * sqrt(probfactor[PS[k][0]]*probfactor[PS[k][1]) * omega[k])
"""
if self.Nvstars == 0: return None
# dim = len(jumpnetwork[0][0][1])
dim = self.starset.crys.dim
D0expansion = np.zeros((dim, dim, len(self.starset.jumpnetwork_index)))
D1expansion = np.zeros((dim, dim, len(jumpnetwork)))
for k, jt, jumplist in zip(itertools.count(), jumptype, jumpnetwork):
d0 = sum(0.5 * np.outer(dx, dx) for ISFS, dx in jumplist) # we don't need initial/final state
D0expansion[:, :, jt] += d0
D1expansion[:, :, k] += d0
# cleanup on return
return zeroclean(D0expansion), zeroclean(D1expansion)
def originstateVectorBasisfolddown(self, elemtype='solute'):
"""
Construct the expansion to "fold down" from vector stars to origin states.
:param elemtype: 'solute' of 'vacancy', depending on which site we need to reduce
:return OSindices: list of indices corresponding to origin states
:return folddown: [NOS, Nvstars] to map vector stars to origin states
:return OS_VB: [NOS, Nsites, 3] mapping of origin state to a vector basis
"""
attr = {'solute': 'i', 'vacancy': 'j'}.get(elemtype)
if attr is None: raise ValueError('elemtype needs to be "solute" or "vacancy" not {}'.format(elemtype))
OSindices = [n for n in range(self.Nvstars) if self.starset.states[self.vecpos[n][0]].iszero()]
NOS, Nsites = len(OSindices), len(self.starset.crys.basis[self.starset.chem])
folddown = np.zeros((NOS, self.Nvstars))
# dim = len(self.vecvec[0][0])
dim = self.starset.crys.dim
OS_VB = np.zeros((NOS, Nsites, dim))
if NOS==0:
return OSindices, folddown, OS_VB
for i, ni in enumerate(OSindices):
for OS, OSv in zip(self.vecpos[ni], self.vecvec[ni]):
index = getattr(self.starset.states[OS], attr)
OS_VB[i, index, :] = OSv[:]
for j, svR, svv in zip(itertools.count(), self.vecpos, self.vecvec):
for s, v in zip(svR, svv):
if getattr(self.starset.states[s], attr) == index:
folddown[i, j] += np.dot(OSv, v)
# cleanup on return
return OSindices, zeroclean(folddown), zeroclean(OS_VB)
| DallasTrinkle/Onsager | onsager/crystalStars.py | Python | mit | 55,293 | [
"CRYSTAL"
] | bff5f17f4fecb95ff12ab63e61a53b4ac5a27f34193ebc3e4697098da4e47441 |
import os
import sys
import time
import logging
import datetime
import numpy as np
from data import *
from time import clock
from parameters import *
from collections import defaultdict
spike_generators = {} # dict name_part : spikegenerator
spike_detectors = {} # dict name_part : spikedetector
multimeters = {} # dict name_part : multimeter
startsimulate = 0
endsimulate = 0
txt_result_path = "" # path for txt results
all_parts = tuple() # tuple of all parts
MaxSynapses = 4000 # max synapses
SYNAPSES = 0 # synapse number
NEURONS = 0 # neurons number
times = [] # store time simulation
logging.basicConfig(format='%(name)s.%(levelname)s: %(message)s.', level=logging.DEBUG)
logger = logging.getLogger('function')
def getAllParts():
return all_parts
def generate_neurons(NNumber):
global NEURONS, all_parts
logger.debug("* * * Start generate neurons")
##################################dopa###############
parts_with_nora = nts + lc + bnst
parts_simple = (thalamus[thalamus_Glu],
prefrontal[pfc_Glu0], prefrontal[pfc_Glu1],prefrontal[pfc_NA],
nac[nac_Ach], nac[nac_GABA0], nac[nac_GABA1], nac[nac_NA],
vta[vta_GABA0], vta[vta_GABA1], vta[vta_GABA2],
amygdala[amygdala_Glu], amygdala[amygdala_Ach], amygdala[amygdala_GABA],
snc[snc_GABA]) + \
motor + pptg + snr + gpe + gpi + stn + pgi + prh + ldt + vta
parts_with_dopa = (vta[vta_DA0], vta[vta_DA1], snc[snc_DA], nac[nac_DA],
prefrontal[pfc_DA])
parts_with_5HT = (thalamus[thalamus_5HT], prefrontal[pfc_5HT],
nac[nac_5HT], vta[vta_5HT], amygdala[amygdala_5HT]) + \
medial_cortex + neocortex + lateral_cortex + \
entorhinal_cortex + septum + lateral_tegmental_area + \
periaqueductal_gray + hippocampus + hypothalamus + \
insular_cortex + rn + striatum
all_parts = tuple(sorted(parts_simple + parts_with_dopa + parts_with_5HT + parts_with_nora))
NN_coef = float(NNumber) / sum(item[k_NN] for item in all_parts)
NEURONS = sum(item[k_NN] for item in all_parts)
logger.debug('Initialized: {0} neurons'.format(NEURONS))
# Init neuron models with our parameters
# nest.SetDefaults('iaf_psc_exp', iaf_neuronparams)
# nest.SetDefaults('iaf_psc_alpha', iaf_neuronparams)
#nest.SetDefaults('hh_cond_exp_traub', hh_neuronparams)
# Parts without dopamine and 5HT and nora
for part in parts_simple:
part[k_model] = 'hh_cond_exp_traub'
# Parts with dopamine and 5HT
for part in parts_with_dopa + parts_with_5HT:
part[k_model] = 'hh_cond_exp_traub'
# Parts with noradrenaline
for part in parts_with_nora:
part[k_model] = 'hh_cond_exp_traub' # Creating neurons
for part in all_parts:
part[k_NN] = NN_minimal if int(part[k_NN] * NN_coef) < NN_minimal else int(part[k_NN] * NN_coef)
part[k_IDs] = nest.Create(part[k_model], part[k_NN])
logger.debug("{0} [{1}, {2}] {3} neurons".format(part[k_name], part[k_IDs][0], part[k_IDs][-1:][0], part[k_NN]))
def log_connection(pre, post, syn_type, weight):
global SYNAPSES
connections = pre[k_NN] * post[k_NN] if post[k_NN] < MaxSynapses else pre[k_NN] * MaxSynapses
SYNAPSES += connections
logger.debug("{0} -> {1} ({2}) w[{3}] // "
"{4}x{5}={6} synapses".format(pre[k_name], post[k_name], syn_type[:-8], weight, pre[k_NN],
MaxSynapses if post[k_NN] > MaxSynapses else post[k_NN], connections))
def connect(pre, post, syn_type=GABA, weight_coef=1):
# Set new weight value (weight_coef * basic weight)
nest.SetDefaults(synapses[syn_type][model], {'weight': weight_coef * synapses[syn_type][basic_weight]})
# Create dictionary of connection rules
conn_dict = {'rule': 'fixed_outdegree',
'outdegree': MaxSynapses if post[k_NN] > MaxSynapses else post[k_NN],
'multapses': True}
# Connect PRE IDs neurons with POST IDs neurons, add Connection and Synapse specification
nest.Connect(pre[k_IDs], post[k_IDs], conn_spec=conn_dict, syn_spec=synapses[syn_type][model])
# Show data of new connection
log_connection(pre, post, synapses[syn_type][model], nest.GetDefaults(synapses[syn_type][model])['weight'])
def connect_generator(part, startTime=1, stopTime=T, rate=250, coef_part=1):
name = part[k_name]
# Add to spikeGenerators dict a new generator
spike_generators[name] = nest.Create('poisson_generator', 1, {'rate' : float(rate),
'start': float(startTime),
'stop' : float(stopTime)})
# Create dictionary of connection rules
conn_dict = {'rule': 'fixed_outdegree',
'outdegree': int(part[k_NN] * coef_part)}
# Connect generator and part IDs with connection specification and synapse specification
nest.Connect(spike_generators[name], part[k_IDs], conn_spec=conn_dict, syn_spec=static_syn)
# Show data of new generator
logger.debug("Generator => {0}. Element #{1}".format(name, spike_generators[name][0]))
def connect_detector(part):
name = part[k_name]
# Init number of neurons which will be under detector watching
number = part[k_NN] if part[k_NN] < N_detect else N_detect
# Add to spikeDetectors a new detector
spike_detectors[name] = nest.Create('spike_detector', params=detector_param)
# Connect N first neurons ID of part with detector
nest.Connect(part[k_IDs][:number], spike_detectors[name])
# Show data of new detector
logger.debug("Detector => {0}. Tracing {1} neurons".format(name, number))
def connect_multimeter(part):
name = part[k_name]
multimeters[name] = nest.Create('multimeter', params=multimeter_param) # ToDo add count of multimeters
nest.Connect(multimeters[name], (part[k_IDs][:N_volt]))
logger.debug("Multimeter => {0}. On {1}".format(name, part[k_IDs][:N_volt]))
'''Generates string full name of an image'''
def f_name_gen(path, name):
return "{0}{1}{2}.png".format(path, name, "" if dopamine_flag else "")
def simulate():
global startsimulate, endsimulate, SAVE_PATH
begin = 0
SAVE_PATH = "../results/output-{0}/".format(NEURONS)
#SAVE_PATH = "../Res/4/".format(NEURONS)
if not os.path.exists(SAVE_PATH):
os.makedirs(SAVE_PATH)
#nest.PrintNetwork()
logger.debug('* * * Simulating')
startsimulate = datetime.datetime.now()
for t in np.arange(0, T, dt):
print "SIMULATING [{0}, {1}]".format(t, t + dt)
nest.Simulate(dt)
end = clock()
times.append("{0:10.1f} {1:8.1f} "
"{2:10.1f} {3:4.1f} {4}\n".format(begin, end - begin, end, t, datetime.datetime.now().time()))
begin = end
print "COMPLETED {0}%\n".format(t/dt)
endsimulate = datetime.datetime.now()
logger.debug('* * * Simulation completed successfully')
def get_log(startbuild, endbuild):
logger.info("Number of neurons : {}".format(NEURONS))
logger.info("Number of synapses : {}".format(SYNAPSES))
logger.info("Building time : {}".format(endbuild - startbuild))
logger.info("Simulation time : {}".format(endsimulate - startsimulate))
logger.info("Dopamine : {}".format('YES' if dopamine_flag else 'NO'))
def save(GUI):
global txt_result_path
if GUI:
import pylab as pl
import nest.raster_plot
import nest.voltage_trace
logger.debug("Saving IMAGES into {0}".format(SAVE_PATH))
N_events_gen = len(spike_generators)
for key in spike_detectors:
try:
nest.raster_plot.from_device(spike_detectors[key], hist=True)
pl.savefig(f_name_gen(SAVE_PATH, "spikes_" + key.lower()), dpi=dpi_n, format='png')
pl.close()
except Exception:
print("From {0} is NOTHING".format(key))
N_events_gen -= 1
for key in multimeters:
try:
nest.voltage_trace.from_device(multimeters[key])
pl.savefig(f_name_gen(SAVE_PATH, "volt_" + key.lower()), dpi=dpi_n, format='png')
pl.close()
except Exception:
print("From {0} is NOTHING".format(key))
print "Results {0}/{1}".format(N_events_gen, len(spike_detectors))
print "Results {0}/{1}".format(N_events_gen, len(spike_detectors))
txt_result_path = SAVE_PATH + 'txt/'
logger.debug("Saving TEXT into {0}".format(txt_result_path))
if not os.path.exists(txt_result_path):
os.mkdir(txt_result_path)
for key in spike_detectors:
save_spikes(spike_detectors[key], name=key)
#for key in multimeters:
# save_voltage(multimeters[key], name=key)
with open(txt_result_path + 'timeSimulation.txt', 'w') as f:
for item in times:
f.write(item)
def save_spikes(detec, name, hist=False):
title = "Raster plot from device '%i'" % detec[0]
ev = nest.GetStatus(detec, "events")[0]
ts = ev["times"]
gids = ev["senders"]
data = defaultdict(list)
if len(ts):
with open("{0}@spikes_{1}.txt".format(txt_result_path, name), 'w') as f:
f.write("Name: {0}, Title: {1}, Hist: {2}\n".format(name, title, "True" if hist else "False"))
for num in range(0, len(ev["times"])):
data[round(ts[num], 1)].append(gids[num])
for key in sorted(data.iterkeys()):
f.write("{0:>5} : {1:>4} : {2}\n".format(key, len(data[key]), sorted(data[key])))
else:
print "Spikes in {0} is NULL".format(name)
def save_voltage(detec, name):
title = "Membrane potential"
ev = nest.GetStatus(detec, "events")[0]
with open("{0}@voltage_{1}.txt".format(txt_result_path, name), 'w') as f:
f.write("Name: {0}, Title: {1}\n".format(name, title))
print int(T / multimeter_param['interval'])
for line in range(0, int(T / multimeter_param['interval'])):
for index in range(0, N_volt):
print "{0} {1} ".format(ev["times"][line], ev["V_m"][line])
#f.write("\n")
print "\n"
| research-team/NEUCOGAR | NEST/cube/integration/integration-10/func.py | Python | gpl-2.0 | 10,377 | [
"NEURON"
] | 169c5d2f7f0841625bb2899d9451cf71ecd27dc5e472b6fdebcff90df1bebdd1 |
#! /usr/bin/env python
"""
Copyright (c) 2000-2010, David Boddie
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os, string, sys
import ADFSlib
try:
import cmdsyntax
use_getopt = 0
except ImportError:
import getopt
use_getopt = 1
__version__ = "0.42 (Sun 29th August 2010)"
default_convert_dict = {"/": "."}
def read_cmdsyntax_input(argv, syntax):
syntax_obj = cmdsyntax.Syntax(syntax)
matches, failed = syntax_obj.get_args(argv[1:], return_failed = 1)
if len(matches) != 1 and cmdsyntax.use_GUI() != None:
form = cmdsyntax.Form("ADF2INF", syntax_obj, failed[0])
matches = form.get_args()
# Take the first match, if possible.
if len(matches) > 0:
match = matches[0]
else:
match = None
return match
def read_getopt_input(argv):
opts, args = getopt.getopt(argv[1:], "ldts:c:vh")
match = {}
opt_dict = {"-l": "list", "-d": "create-directory", "-t": "file-types", "-s": "separator",
"-v": "verify", "-c": "convert", "-h": "help"}
arg_list = ["ADF file", "destination path"]
# Read the options specified.
for opt, value in opts:
if opt_dict.has_key(opt):
match[opt_dict[opt]] = value or '1'
else:
return None
# Read the remaining arguments.
if match.has_key("help"):
return None
elif match.has_key("list"):
if len(args) != 1:
# For list operations, there should be one remaining argument.
return None
elif match.has_key("verify"):
if len(args) != 1:
# For verify operations, there should be one remaining argument.
return None
elif len(args) != 2:
# For all other operations, there should be two remaining arguments.
return None
i = 0
for arg in args:
match[arg_list[i]] = arg
i = i + 1
if match == {}: match = None
return match
if __name__ == "__main__":
if use_getopt == 0:
syntax = """
\r( (-l | --list) [-t | --file-types] <ADF file> ) |
\r
\r( [-d | --create-directory]
\r [ (-t | --file-types) [(-s separator) | --separator=character] ]
\r [(-c convert) | --convert=characters]
\r [-m | --time-stamps]
\r <ADF file> <destination path> ) |
\r
\r( (-v | --verify) <ADF file> ) |
\r
\r(-h | --help)
"""
match = read_cmdsyntax_input(sys.argv, syntax)
else:
syntax = "[-l] [-d] [-t] [-s separator] [-v] [-c characters] [-m] " + \
"<ADF file> <destination path>"
match = read_getopt_input(sys.argv)
if match == {} or match is None or \
match.has_key("h") or match.has_key("help"):
print "Syntax: ADF2INF.py " + syntax
print
print 'ADF2INF version ' + __version__
print 'ADFSlib version ' + ADFSlib.__version__
print
print 'Take the files stored in the directory given and store them as files with'
print 'corresponding INF files.'
print
print 'If the -l flag is specified then the catalogue of the disc will be printed.'
print
print "The -d flag specifies that a directory should be created using the disc's"
print 'name into which the contents of the disc will be written.'
print
print "The -t flag causes the load and execution addresses of files to be"
print "interpreted as file type information for files created on RISC OS."
print "A separator used to append a suffix onto the file is optional and"
print "defaults to the standard period character; e.g. myfile.fff"
print
print "The -s flag is used to specify the character which joins the file"
print "name to the file type. This can only be specified when extracting"
print "files from a disc image."
print
print "The -v flag causes the disc image to be checked for simple defects and"
print "determines whether there are files and directories which cannot be"
print "correctly located by this tool, whether due to a corrupted disc image"
print "or a bug in the image decoding techniques used."
print
print "The -c flag allows the user to define a conversion dictionary for the"
print "characters found in ADFS filenames. The format of the string used to"
print "define this dictionary is a comma separated list of character pairs:"
print
print " <src1><dest1>[,<src2><dest2>]..."
print
print "If no conversion dictionary is specified then a default dictionary will"
print "be used. This is currently defined as"
print
print " %s" % repr(default_convert_dict)
print
print "The -m flag determines whether the files extracted from the disc"
print "image should retain their time stamps on the target system."
print
sys.exit()
# Determine whether the file is to be listed
listing = match.has_key("l") or match.has_key("list")
use_name = match.has_key("d") or match.has_key("create-directory")
filetypes = match.has_key("t") or match.has_key("file-types")
use_separator = match.has_key("s") or match.has_key("separator")
verify = match.has_key("v") or match.has_key("verify")
convert = match.has_key("c") or match.has_key("convert")
with_time_stamps = match.has_key("m") or match.has_key("time-stamps")
adf_file = match["ADF file"]
out_path = match.get("destination path", None)
separator = match.get("separator", ",")
if sys.platform == 'RISCOS':
suffix = '/'
else:
suffix = '.'
if filetypes == 0 or (filetypes != 0 and use_separator == 0):
# Use the standard suffix separator for the current platform if
# none is specified.
separator = suffix
# Try to open the ADFS disc image file.
try:
adf = open(adf_file, "rb")
except IOError:
print "Couldn't open the ADF file: %s" % adf_file
print
sys.exit()
if listing == 0 and verify == 0:
try:
# Create an ADFSdisc instance using this file.
adfsdisc = ADFSlib.ADFSdisc(adf)
except ADFSlib.ADFS_exception:
print "Unrecognised disc image: %s" % adf_file
sys.exit()
elif listing != 0:
try:
# Create an ADFSdisc instance using this file.
adfsdisc = ADFSlib.ADFSdisc(adf, verify = 1)
except ADFSlib.ADFS_exception:
print "Unrecognised disc image: %s" % adf_file
sys.exit()
else:
# Verifying
print "Verifying..."
print
try:
# Create an ADFSdisc instance using this file.
adfsdisc = ADFSlib.ADFSdisc(adf, verify = 1)
except ADFSlib.ADFS_exception:
print "Unrecognised disc image: %s" % adf_file
sys.exit()
adfsdisc.print_log(verbose = 1)
# Exit
sys.exit()
if listing != 0:
# Print catalogue
print 'Contents of', adfsdisc.disc_name,':'
print
adfsdisc.print_catalogue(adfsdisc.files, adfsdisc.root_name, filetypes)
print
adfsdisc.print_log()
# Exit
sys.exit()
# Make sure that the disc is put in a directory corresponding to the disc
# name where applicable.
if use_name != 0:
# Place the output files on this new path.
out_path = os.path.join(out_path, adfsdisc.disc_name)
# If a list of conversions was specified then create a dictionary to
# pass to the disc object's extraction method.
if match.has_key("convert"):
convert_dict = {}
pairs = string.split(match["convert"])
try:
for pair in pairs:
convert_dict[pair[0]] = pair[1]
except IndexError:
print "Insufficient characters in character conversion list."
sys.exit()
else:
# Use a default conversion dictionary.
convert_dict = default_convert_dict
# Extract the files
adfsdisc.extract_files(
out_path, adfsdisc.files, filetypes, separator, convert_dict,
with_time_stamps
)
# Exit
sys.exit()
| BackupTheBerlios/python-adfs | ADF2INF.py | Python | gpl-3.0 | 9,472 | [
"ADF"
] | a8bf5b16204ada68756de84e1348f65fd79eba7c64cc1aa12259d7d6ecf5451a |
import random
from plugin import plugin
from colorama import Fore
def delay(): # method to pause after a series of actions have been completed.
n = input("Press enter to continue")
def wiped_slate(player): # resets all hands and bets
player['hands'] = []
player['suits'] = []
player['bets'] = []
return player
def pprinthand(hand, suit, type='visible'): # returns hand as a string which may or may not be hidden.
temphand = hand[:]
for i in range(len(temphand)):
if temphand[i] == 1 or temphand[i] == 11:
temphand[i] = 'A' # 1 or 11 is value of ace.
temphand[i] = str(temphand[i]) + " of " + suit[i]
if type == 'visible':
return str(temphand)
elif type == 'partially-visible':
return '[' + str(temphand[0]) + ',hidden]'
def pprinthandlist(handlist, suitlist): # returns handlist as a string
newhandlist = []
for i in range(len(handlist)):
newhandlist.append(pprinthand(handlist[i], suitlist[i]))
return str(newhandlist)
def blackjacksum(orig_hand): # computes the sum by assuming appropriate value of Ace.
hand = orig_hand[:]
for i in range(len(hand)):
if str(hand[i]) in 'JQK': # converts face card to their value,that is,10.
hand[i] = 10
if sum(hand) <= 11: # of Ace card(either 1 or 11) acc. to the sum.
for i in range(len(hand)):
if hand[i] == 1:
hand[i] = 11
orig_hand[i] = 11
break
elif sum(hand) > 21:
for i in range(len(hand)):
if hand[i] == 11:
hand[i] = 1
orig_hand[i] = 1
break
return sum(hand), orig_hand
def move(hand, suit, cards, suits,
bet): # Here, hand is a nested list inside a list. It is a list of all hands of a player.
# Player can have multiple hands if he/she chooses to split.
sum_, hand[0] = blackjacksum(hand[0])
print("Your hand is", pprinthand(hand[0], suit[0]))
print("Your sum is", sum_)
print('---------------------------')
# checks for bust or blackjack.
if sum_ > 21:
print("You got busted!")
return hand, suit, bet
elif sum_ == 21 and len(hand) == 2:
print("Blackjack!")
return hand, suit, bet
while True:
choice = input("Press H to Hit, S to Stand, D to Double-Down, P to sPlit\n")
if choice in ['H', 'h']:
newcard = random.choice(cards)
newsuit = random.choice(suits)
print("Newcard is", str(newcard) + " of " + newsuit)
hand[0].append(newcard)
suit[0].append(newsuit)
print("Updated hand is", pprinthand(hand[0], suit[0]))
sum_, hand[0] = blackjacksum(hand[0])
hand, suit, bet = move(hand, suit, cards, suits, bet)
return hand, suit, bet
elif choice in ['S', 's']:
return hand, suit, bet
elif choice in ['D', 'd']:
newcard = random.choice(cards)
print("Newcard is", newcard)
newsuit = random.choice(suits)
hand[0].append(newcard)
suit[0].append(newsuit)
print("Updated hand is", pprinthand(hand[0], suit[0]))
sum_, hand[0] = blackjacksum(hand[0])
print("Your sum is", sum_)
if sum_ > 21:
print("You got busted!")
bet[0] = bet[0] * 2
print("Your new bet is", bet[0])
return hand, suit, bet
elif choice in ['P', 'p']:
if hand[0][0] == hand[0][1]:
if not hand[0][0] == 1:
splitHand1 = [[0, 0]]
splitHand2 = [[0, 0]]
splitSuit1 = [[0, 0]]
splitSuit2 = [[0, 0]]
newcard1 = random.choice(cards)
newsuit1 = random.choice(suits)
print("Newcard for first split is", str(newcard1) + " of " + newsuit1)
newcard2 = random.choice(cards)
newsuit2 = random.choice(suits)
print("Newcard for second split is", str(newcard2) + " of " + newsuit2)
splitHand1[0][0] = hand[0][0]
splitHand2[0][0] = hand[0][1]
splitHand1[0][1] = newcard1
splitHand2[0][1] = newcard2
splitSuit1[0][0] = suit[0][0]
splitSuit2[0][0] = suit[0][1]
splitSuit1[0][1] = newsuit1
splitSuit2[0][1] = newsuit2
print("Split hands are", pprinthand(splitHand1[0], splitSuit1[0]), ", ",
pprinthand(splitHand2[0], splitSuit2[0]))
sum1, splitHand1[0] = blackjacksum(splitHand1[0])
sum2, splitHand2[0] = blackjacksum(splitHand2[0])
print("Your sum for split 1 is", sum1)
print("Your sum for split 2 is", sum2)
bet1 = bet[:]
bet2 = bet[:]
splitHand1, splitSuit1, bet1 = move(splitHand1, splitSuit1, cards, suits, bet1)
splitHand2, splitSuit2, bet2 = move(splitHand2, splitSuit2, cards, suits, bet2)
splitHand1.extend(splitHand2) # converting both hands to a single list
splitSuit1.extend(splitSuit2)
bet1.extend(bet2) # converting both bets to a single list
return splitHand1, splitSuit1, bet1
else:
print("Sorry,you can't split aces")
hand, suit, bet = move(hand, suit, cards, suits, bet)
return hand, suit, bet
else:
print("Sorry, you can only split hands with identical cards")
hand, suit, bet = move(hand, suit, cards, suits, bet)
return hand, suit, bet
else:
print("Please try again with a valid choice.")
@plugin('blackjack')
def blackjack(jarvis, s):
jarvis.say("Welcome to the casino! Let's play blackjack!", Fore.GREEN)
player = {"hands": [], "suits": [], "bets": [], 'profit': []}
cards = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'J', 'Q', 'K']
suits = ['spades', 'hearts', 'diamonds', 'clubs']
choice = 'y'
delay()
# Instructions
jarvis.say('How to play:', Fore.GREEN)
jarvis.say('-->The goal of blackjack is to beat the dealer\'s hand without going over 21.', Fore.CYAN)
jarvis.say('-->Face cards are worth 10. Aces are worth 1 or 11, whichever makes a better hand.', Fore.CYAN)
jarvis.say('-->Each player starts with two cards, one of the dealer\'s cards is hidden until the end.', Fore.CYAN)
jarvis.say('-->To \'Hit\' is to ask for another card. To \'Stand\' is to hold your total and end your turn.',
Fore.CYAN)
jarvis.say('-->If you go over 21 you bust, and the dealer wins regardless of the dealer\'s hand.', Fore.CYAN)
jarvis.say('-->If you are dealt 21 from the start (Ace & 10), you got a blackjack.', Fore.CYAN)
jarvis.say('-->Blackjack means you win 1.5 the amount of your bet.', Fore.CYAN)
jarvis.say('-->Dealer will hit until his/her cards total 17 or higher.', Fore.CYAN)
jarvis.say('-->Doubling is like a hit, only the bet is doubled and you only get one more card.', Fore.CYAN)
jarvis.say('-->Split can be done when you have two of the same card - the pair is split into two hands.', Fore.CYAN)
jarvis.say('-->Splitting also doubles the bet, because each new hand is worth the original bet.', Fore.CYAN)
jarvis.say('-->You cannot split two aces.', Fore.CYAN)
jarvis.say('-->You can double on a hand resulting from a split, tripling or quadrupling you bet.', Fore.CYAN)
while choice in "Yy":
jarvis.say('Shuffling the cards....', Fore.BLUE)
jarvis.say("Let's start the game!", Fore.BLUE)
# Bets
jarvis.say("How much are you betting?", Fore.BLUE)
bet = jarvis.input_number()
player['bets'].append(bet)
delay()
jarvis.say('---------------------------')
# Cards
jarvis.say("Dealing the cards............", Fore.BLUE)
jarvis.say("Your cards....", Fore.BLUE)
hand = [random.choice(cards), random.choice(cards)]
suit = [random.choice(suits), random.choice(suits)]
player["hands"].append(hand)
player["suits"].append(suit)
jarvis.say(pprinthand(hand, suit))
delay()
jarvis.say('---------------------------')
# Dealer's cards
dealerhand = [random.choice(cards), random.choice(cards)]
dealersuit = [random.choice(suits), random.choice(suits)]
jarvis.say("Dealer hand: " + pprinthand(dealerhand, dealersuit, type='partially-visible'), Fore.MAGENTA)
delay()
jarvis.say('---------------------------')
# Players' moves
jarvis.say("It's your turn, make your choice!", Fore.BLUE)
player['hands'], player['suits'], player['bets'] = move(player['hands'], player['suits'], cards, suits,
player['bets'])
jarvis.say("Your hands and respective bets for this round are:", Fore.BLUE)
jarvis.say(pprinthandlist(player['hands'], player['suits']) + " " + str(player['bets']), Fore.BLUE)
delay()
jarvis.say('---------------------------')
# Dealer's moves
jarvis.say("Dealer hand: " + pprinthand(dealerhand, dealersuit), Fore.MAGENTA)
dealersum, dealerhand = blackjacksum(dealerhand)
jarvis.say("Dealer's sum is " + str(dealersum), Fore.MAGENTA)
while dealersum < 17 or (
dealersum == 17 and 11 in dealerhand): # condition which determines if dealer hits or not.
jarvis.say("Dealer draws another card", Fore.MAGENTA)
dealerhand.append(random.choice(cards))
dealersuit.append(random.choice(suits))
jarvis.say("Newcard is " + str(dealerhand[-1]) + " of " + str(dealersuit[-1]), Fore.MAGENTA)
dealersum, dealerhand = blackjacksum(dealerhand)
jarvis.say("Dealer's sum is " + str(dealersum), Fore.MAGENTA)
jarvis.say("Dealer's hand is " + pprinthand(dealerhand, dealersuit), Fore.MAGENTA)
delay()
jarvis.say('---------------------------')
# Profit Calculation
jarvis.say("Let's see your results ", Fore.BLUE)
for j in range(len(player['hands'])):
hand = player['hands'][j]
suit = player['suits'][j]
bet = player['bets'][j]
sum_, hand = blackjacksum(hand)
dealersum, dealerhand = blackjacksum(dealerhand)
jarvis.say("For the hand- " + pprinthand(hand, suit) + ' sum is-' + str(sum_), Fore.BLUE)
if len(hand) == 2 and sum_ == 21:
jarvis.say("Blackjack!", Fore.BLUE)
profit = bet * 1.5
player['profit'].append(bet * 1.5)
elif sum_ > 21:
jarvis.say("Busted", Fore.BLUE)
profit = bet * -1
player['profit'].append(bet * -1)
elif dealersum > 21:
jarvis.say("Dealer Busted", Fore.BLUE)
profit = bet * 1
player['profit'].append(bet * 1)
elif dealersum > sum_:
jarvis.say("You lost", Fore.BLUE)
profit = bet * -1
player['profit'].append(bet * -1)
elif sum_ > dealersum:
jarvis.say("You win", Fore.BLUE)
profit = bet * 1
player['profit'].append(bet * 1)
elif sum_ == 21 and dealersum == 21 and len(dealerhand) == 2 and len(hand) > 2:
jarvis.say("You lost", Fore.BLUE)
profit = bet * -1
player['profit'].append(bet * -1)
elif sum_ == dealersum:
jarvis.say("Push", Fore.BLUE)
profit = bet * 0
player['profit'].append(bet * 0)
jarvis.say("Profit is- " + str(profit), Fore.BLUE)
players = wiped_slate(player)
choice = jarvis.input("Do you wish to play another round?Y/n \n", Fore.GREEN)
jarvis.say("OK then, Let's see the results", Fore.GREEN)
jarvis.say('---------------------------')
profit = sum(player['profit'])
if profit >= 0:
jarvis.say("Your total profit is " + str(profit), Fore.GREEN)
else:
jarvis.say("Your total loss is " + str(profit * -1), Fore.GREEN)
jarvis.say("Goodbye, Let's play again sometime!", Fore.GREEN)
| sukeesh/Jarvis | jarviscli/plugins/blackjack.py | Python | mit | 12,680 | [
"CASINO"
] | 482f64646282fd984fc6f0aac561259276a782f777174882a1e23b8dcfbd2c4e |
"""
A datastructure for summing over groups of symmetry equivalent reflections.
This module defines a blocked datastructures for summing over groups of
symmetry equivalent reflections, as required for scaling.
"""
from __future__ import annotations
from typing import List, Optional, Union
import numpy as np
import pandas as pd
from orderedset import OrderedSet
from scipy.sparse import csc_matrix
from cctbx import crystal, miller, sgtbx, uctbx
from dxtbx import flumpy
from scitbx import sparse
from dials.algorithms.scaling.error_model.error_model import BasicErrorModel
from dials.array_family import flex
def map_indices_to_asu(miller_indices, space_group, anomalous=False):
"""Map the indices to the asymmetric unit."""
crystal_symmetry = crystal.symmetry(space_group=space_group)
miller_set = miller.set(
crystal_symmetry=crystal_symmetry,
indices=miller_indices,
anomalous_flag=anomalous,
)
miller_set_in_asu = miller_set.map_to_asu()
return miller_set_in_asu.indices()
def get_sorted_asu_indices(asu_indices, space_group, anomalous=False):
"""Return the sorted asu indices and the permutation selection."""
crystal_symmetry = crystal.symmetry(space_group=space_group)
miller_set_in_asu = miller.set(
crystal_symmetry=crystal_symmetry, indices=asu_indices, anomalous_flag=anomalous
)
permuted = miller_set_in_asu.sort_permutation(by_value="packed_indices")
sorted_asu_miller_index = asu_indices.select(permuted)
return sorted_asu_miller_index, permuted
class IhTable:
"""
A class to manage access to Ih_table blocks.
The idea here is to split the data into blocks to allow parallelized
computations, but within the blocks the data are sorted by dataset.
In each block, there exists a block_selection_list which contains the indices
for each dataset from the input reflection table.
This class acts as a 'master' to setup the block structure and control access
to the underlying blocks - only metadata is kept in this class after
initialisation, the reflections etc are all contained in the blocks.
Attributes:
space_group: The space group for the dataset.
Ih_table_blocks (list): A list of IhTableBlock instances. All symmetry
equivalent reflections are recorded in the same block, to allow
splitting of the dataset for parallelized computations.
nblocks (int): The number of blocks in the Ih_table_blocks list.
blocked_selection_list (list): A list of lists. bsl[i][j] is the selection
list for block i, dataset j.
n_datasets: The number of input reflection tables used to make the Ih_table.
size: The number of reflections across all blocks
asu_index_dict (dict): A dictionary, key: asu_miller_index, value tuple
containing group_id and block_id (where group id is the group index
within its block).
"""
id_ = "IhTable"
def __init__(
self,
reflection_tables: List[flex.reflection_table],
space_group: sgtbx.space_group,
indices_lists: Optional[List[flex.size_t]] = None,
nblocks: int = 1,
free_set_percentage: float = 0,
free_set_offset: int = 0,
additional_cols: Optional[List[str]] = None,
anomalous: bool = False,
):
"""
Distribute the input data into the required structure.
The reflection data can be split into blocks, while the relevant
metadata is also generated.
A list of flex.size_t indices can be provided - this allows the
reflection table data to maintain a reference to a dataset from which
it was selected; these will be used when making the block selections.
e.g selection = flex.bool([True, False, True])
r_1 = r_master.select(selection)
indices_list = selection.iselection() = flex.size_t([0, 2])
then the block selection will contain 0 and 2 to refer back
to the location of the data in r_master.
"""
if indices_lists:
assert len(indices_lists) == len(reflection_tables)
self.anomalous = anomalous
self._asu_index_dict = {}
self._free_asu_index_dict = {}
self.space_group = space_group
self.n_work_blocks = nblocks
self.n_datasets = len(reflection_tables)
self.Ih_table_blocks = []
self.blocked_selection_list = []
self.properties_dict = {
"n_unique_in_each_block": [],
"n_reflections_in_each_block": {},
"miller_index_boundaries": [],
}
self.free_set_percentage = free_set_percentage
self._determine_required_block_structures(
reflection_tables, free_set_percentage, free_set_offset
)
self._create_empty_Ih_table_blocks()
for i, table in enumerate(reflection_tables):
if indices_lists:
self._add_dataset_to_blocks(
i, table, indices_lists[i], additional_cols=additional_cols
)
else:
self._add_dataset_to_blocks(i, table, additional_cols=additional_cols)
self.generate_block_selections()
self.free_Ih_table = None
if free_set_percentage > 0:
self.extract_free_set()
self.free_Ih_table = True
self.calc_Ih()
def update_data_in_blocks(
self, data: flex.double, dataset_id: int, column: str = "intensity"
) -> None:
"""
Update a given column across all blocks for a given dataset.
Given an array of data (of the same size as the input reflection
table) and the name of the column, use the internal data to split
this up and set in individual blocks.
"""
assert column in ["intensity", "variance", "inverse_scale_factor"]
assert dataset_id in range(self.n_datasets)
# split up data for blocks
data = flumpy.to_numpy(data)
for block in self.blocked_data_list:
data_for_block = data[block.block_selections[dataset_id]]
start = block.dataset_info[dataset_id]["start_index"]
end = block.dataset_info[dataset_id]["end_index"]
block.Ih_table.loc[
np.arange(start=start, stop=end), column
] = data_for_block
def get_block_selections_for_dataset(self, dataset: int) -> List[flex.size_t]:
"""Generate the block selection list for a given dataset."""
assert dataset in range(self.n_datasets)
if self.free_Ih_table:
return [
self.blocked_selection_list[i][dataset]
for i in range(self.n_work_blocks + 1)
]
return [
self.blocked_selection_list[i][dataset] for i in range(self.n_work_blocks)
]
@property
def size(self) -> int:
"""Sum the sizes of all work blocks to give the total number of reflections."""
if self.free_Ih_table:
return sum(block.size for block in self.Ih_table_blocks[:-1])
return sum(block.size for block in self.Ih_table_blocks)
def generate_block_selections(self) -> None:
"""Generate and set an updated blocked_selection_list."""
self.blocked_selection_list = [
block.block_selections for block in self.Ih_table_blocks
]
def update_weights(
self, error_model: Optional[BasicErrorModel] = None, dataset_id: int = None
) -> None:
"""Update the error model in the blocks."""
for block in self.Ih_table_blocks:
block.update_weights(error_model, dataset_id)
@property
def blocked_data_list(self) -> List["IhTableBlock"]:
"""Return the list of IhTableBlock instances."""
return self.Ih_table_blocks
def set_derivatives(self, derivatives: sparse.matrix, block_id: int) -> None:
"""Set the derivatives matrix for a given block."""
self.Ih_table_blocks[block_id].derivatives = derivatives
def set_inverse_scale_factors(self, new_scales: np.array, block_id: int) -> None:
"""Set the inverse scale factors for a given block."""
self.Ih_table_blocks[block_id].inverse_scale_factors = new_scales
def calc_Ih(self, block_id: int = None) -> None:
"""Calculate the latest value of Ih, for a given block or for all blocks."""
if block_id is not None:
self.Ih_table_blocks[block_id].calc_Ih()
else:
for block in self.Ih_table_blocks:
block.calc_Ih()
def _determine_required_block_structures(
self,
reflection_tables: List[flex.reflection_table],
free_set_percentage: float = 0,
free_set_offset: int = 0,
) -> None:
"""
Inspect the input to determine how to split into blocks.
Extract the asu miller indices from the reflection table and
add data to the asu_index_dict and properties dict.
"""
joint_asu_indices = flex.miller_index()
for table in reflection_tables:
if "asu_miller_index" not in table:
table["asu_miller_index"] = map_indices_to_asu(
table["miller_index"], self.space_group, self.anomalous
)
joint_asu_indices.extend(table["asu_miller_index"])
sorted_joint_asu_indices, _ = get_sorted_asu_indices(
joint_asu_indices, self.space_group, self.anomalous
)
if not sorted_joint_asu_indices:
raise ValueError("No data found in input file(s)")
asu_index_set = OrderedSet(sorted_joint_asu_indices)
n_unique_groups = len(asu_index_set)
n_free_groups = None
interval_between_free_groups = None
if free_set_percentage:
n_free_groups = int(free_set_percentage * n_unique_groups / 100.0)
n_work_groups = n_unique_groups - n_free_groups
interval_between_free_groups = int(100 / free_set_percentage)
else:
n_work_groups = n_unique_groups
self.n_work_blocks = min(self.n_work_blocks, n_work_groups)
# first remove the free set groups
if free_set_percentage:
groups_for_free_set = np.full(n_unique_groups, False, dtype=bool)
for_free = np.arange(
0 + free_set_offset, n_unique_groups, interval_between_free_groups
)
groups_for_free_set[for_free] = True
asu_index_set = np.array(list(asu_index_set))
# work_asu_index_set = asu_index_set[~groups_for_free_set]
free_asu_index_set = asu_index_set[groups_for_free_set]
else:
# work_asu_index_set = asu_index_set
free_asu_index_set = None
# also record how many unique groups go into each block
group_boundaries = [
int(i * n_unique_groups / self.n_work_blocks)
for i in range(self.n_work_blocks)
]
group_boundaries.append(n_unique_groups)
next_boundary = group_boundaries[1]
block_id = 0
group_id_in_block_i = 0
for i, index in enumerate(asu_index_set):
if i == next_boundary:
self.properties_dict["n_unique_in_each_block"].append(
group_id_in_block_i
)
self.properties_dict["miller_index_boundaries"].append(tuple(index))
block_id += 1
next_boundary = group_boundaries[block_id + 1]
group_id_in_block_i = 0
self._asu_index_dict[tuple(index)] = group_id_in_block_i
group_id_in_block_i += 1
# record the number in the last work block
self.properties_dict["n_unique_in_each_block"].append(group_id_in_block_i)
self.properties_dict["miller_index_boundaries"].append((10000, 10000, 10000))
block_id += 1
group_id_in_block_i = 0
if free_asu_index_set is not None:
for index in free_asu_index_set:
# no boundaries as all go into the final block
self._free_asu_index_dict[tuple(index)] = group_id_in_block_i
group_id_in_block_i += 1
# record the number in the free block
self.properties_dict["n_unique_in_each_block"].append(group_id_in_block_i)
self.properties_dict["miller_index_boundaries"].append((10000, 10000, 10000))
# ^ to avoid bounds checking when in last group
# need to know how many reflections will be in each block also
block_id = 0
idx_prev = 0
boundary = self.properties_dict["miller_index_boundaries"][0]
for i, index in enumerate(sorted_joint_asu_indices):
if index == boundary:
n_in_prev_group = i - idx_prev
self.properties_dict["n_reflections_in_each_block"][
block_id
] = n_in_prev_group
block_id += 1
boundary = self.properties_dict["miller_index_boundaries"][block_id]
idx_prev = i
self.properties_dict["n_reflections_in_each_block"][block_id] = (
len(sorted_joint_asu_indices) - idx_prev
)
def _create_empty_Ih_table_blocks(self) -> None:
for n in range(self.n_work_blocks):
n_refl_in_block = self.properties_dict["n_reflections_in_each_block"][n]
n_groups_in_block = self.properties_dict["n_unique_in_each_block"][n]
self.Ih_table_blocks.append(
IhTableBlock(
n_groups=n_groups_in_block,
n_refl=n_refl_in_block,
n_datasets=self.n_datasets,
)
)
def _add_dataset_to_blocks(
self,
dataset_id: int,
reflections: flex.reflection_table,
indices_array: Optional[flex.size_t] = None,
additional_cols: Optional[List[str]] = None,
) -> None:
sorted_asu_indices, perm = get_sorted_asu_indices(
reflections["asu_miller_index"], self.space_group, self.anomalous
)
hkl = reflections["asu_miller_index"]
df = pd.DataFrame()
df["intensity"] = flumpy.to_numpy(reflections["intensity"])
df["variance"] = flumpy.to_numpy(reflections["variance"])
df["inverse_scale_factor"] = flumpy.to_numpy(
reflections["inverse_scale_factor"]
)
if isinstance(additional_cols, list):
for col in additional_cols:
if col in reflections:
df[col] = flumpy.to_numpy(reflections[col])
if indices_array:
df["loc_indices"] = flumpy.to_numpy(indices_array)
else:
df["loc_indices"] = np.arange(df.shape[0], dtype=np.uint64)
df = df.iloc[flumpy.to_numpy(perm)]
hkl = hkl.select(perm)
df["dataset_id"] = np.full(df.shape[0], dataset_id, dtype=np.uint64)
# if data are sorted by asu_index, then up until boundary, should be in same
# block (still need to read group_id though)
# sort data, get group ids and block_ids
group_ids = np.zeros(sorted_asu_indices.size(), dtype=np.uint64)
boundary = self.properties_dict["miller_index_boundaries"][0]
boundary_id = 0
boundaries_for_this_datset = [0] # use to slice
# make this a c++ method for speed?
prev = (0, 0, 0)
group_id = -1
for i, index in enumerate(sorted_asu_indices):
if index != prev:
while index >= boundary:
boundaries_for_this_datset.append(i)
boundary_id += 1
boundary = self.properties_dict["miller_index_boundaries"][
boundary_id
]
group_id = self._asu_index_dict[tuple(index)]
prev = index
group_ids[i] = group_id
while len(boundaries_for_this_datset) < self.n_work_blocks + 1:
# catch case where last boundaries aren't reached
boundaries_for_this_datset.append(len(sorted_asu_indices))
# so now have group ids as well for individual dataset
if self.n_work_blocks == 1:
self.Ih_table_blocks[0].add_data(dataset_id, group_ids, df, hkl)
else:
for i, val in enumerate(boundaries_for_this_datset[:-1]):
start = val
end = boundaries_for_this_datset[i + 1]
self.Ih_table_blocks[i].add_data(
dataset_id, group_ids[start:end], df[start:end], hkl[start:end]
)
def extract_free_set(self) -> None:
"""Extract a free set from all blocks."""
assert not self.free_Ih_table
free_reflection_table = pd.DataFrame()
free_indices = np.array([], dtype=int).reshape((0,))
free_hkl = flex.miller_index([])
# for each block, remove a fraction of the groups
for j, block in enumerate(self.Ih_table_blocks):
n_groups = block.n_groups
groups_for_free_set = np.full(n_groups, False, dtype=bool)
for_free = np.array(
[
tuple(i) in self._free_asu_index_dict
for i in OrderedSet(block.asu_miller_index)
]
)
groups_for_free_set[for_free] = True
free_block = block.select_on_groups(groups_for_free_set)
free_reflection_table = pd.concat(
[free_reflection_table, free_block.Ih_table]
)
free_hkl.extend(free_block.asu_miller_index)
for sel in free_block.block_selections:
free_indices = np.concatenate([free_indices, sel])
self.Ih_table_blocks[j] = block.select_on_groups(~groups_for_free_set)
# Now need to update dataset_info dict.
removed_from_each_dataset = [
np.count_nonzero(free_block.Ih_table["dataset_id"].to_numpy() == i)
for i in range(0, block.n_datasets)
]
n_removed = 0
for i in range(0, self.Ih_table_blocks[j].n_datasets):
self.Ih_table_blocks[j].dataset_info[i]["start_index"] -= n_removed
n_removed += removed_from_each_dataset[i]
self.Ih_table_blocks[j].dataset_info[i]["end_index"] -= n_removed
self.blocked_selection_list = [
block.block_selections for block in self.Ih_table_blocks
]
# now split by dataset and use to instantiate another Ih_table
datasets = set(free_reflection_table["dataset_id"])
tables = []
indices_lists = []
n_refl = 0
for id_ in datasets:
dataset_sel = free_reflection_table["dataset_id"].to_numpy() == id_
n_refl += np.count_nonzero(dataset_sel)
tables.append(free_reflection_table[dataset_sel])
indices_lists.append(free_indices[dataset_sel])
free_block = IhTableBlock(
n_groups=len(set(free_hkl)), n_refl=n_refl, n_datasets=len(datasets)
)
group_ids = np.array(
[self._free_asu_index_dict[tuple(index)] for index in free_hkl],
dtype=np.uint64,
)
for id_, t in zip(datasets, tables):
dataset_sel = free_reflection_table["dataset_id"].to_numpy() == id_
group_id_this = group_ids[dataset_sel]
hkl_this = free_hkl.select(flumpy.from_numpy(dataset_sel))
free_block.add_data(id_, group_id_this, t, hkl_this)
self.Ih_table_blocks.append(free_block)
self.blocked_selection_list.append(free_block.block_selections)
def as_miller_array(
self, unit_cell: uctbx.unit_cell, return_free_set_data: bool = False
) -> miller.array:
"""Get a scaled miller array from the Ih_table and an experiment."""
blocked_data_list = self.blocked_data_list
joint_table = flex.reflection_table([])
if self.free_Ih_table:
if return_free_set_data:
blocked_data_list = [blocked_data_list[-1]]
else:
blocked_data_list = blocked_data_list[:-1]
if len(blocked_data_list) > 1:
for block in blocked_data_list:
joint_table.extend(block.as_reflection_table())
else:
joint_table = blocked_data_list[0].as_reflection_table()
# Filter out negative scale factors to avoid merging statistics errors.
return _reflection_table_to_iobs(joint_table, unit_cell, self.space_group)
class IhTableBlock:
"""
A datastructure for efficient summations over symmetry equivalent reflections.
This contains a reflection table, sorted by dataset, called the Ih_table,
a h_index_matrix (sparse) for efficiently calculating sums over symmetry
equivalent reflections as well as 'block_selections' which relate the order
of the data to the initial reflection tables used to initialise the (master)
IhTable.
Attributes:
Ih_table: A reflection table, containing I, g, w, var, Ih,
asu_miller_index, loc_indices and dataset_id.
block_selections: A list of flex.size_t arrays of indices, that can be
used to select and reorder data from the input reflection tables to
match the order in the Ih_table.
h_index_matrix: A sparse matrix used to sum over groups of equivalent
reflections by multiplication. Sum_h I = I * h_index_matrix. The
dimension is n_refl by n_groups; each row has a single nonzero
entry with a value of 1.
h_expand_matrix: The transpose of the h_index_matrix, used to expand an
array of values for symmetry groups into an array of size n_refl.
derivatives: A matrix of derivatives of the reflections wrt the model
parameters.
"""
def __init__(self, n_groups: int, n_refl: int, n_datasets: int = 1):
"""Create empty datastructures to which data can later be added."""
self.Ih_table = pd.DataFrame()
self.block_selections = [None] * n_datasets
self.h_index_matrix = sparse.matrix(n_refl, n_groups)
self._setup_info = {"next_row": 0, "next_dataset": 0, "setup_complete": False}
self.dataset_info = {}
self.n_datasets = n_datasets
self.h_expand_matrix = None
self.derivatives = None
self.binner = None
self._csc_rows = np.array([], dtype=np.uint64).reshape((0,))
self._csc_cols = np.array([], dtype=np.uint64).reshape((0,))
self._csc_h_index_matrix = None
self._csc_h_expand_matrix = None
self._hkl = flex.miller_index([])
def add_data(
self,
dataset_id: int,
group_ids: np.array,
reflections: pd.DataFrame,
hkl: flex.miller_index,
) -> None:
"""
Add data to all blocks for a given dataset.
Add data to the Ih_table, write data to the h_index_matrix and
add the loc indices to the block_selections list.
"""
assert not self._setup_info[
"setup_complete"
], """
No further data can be added to the IhTableBlock as setup marked complete."""
assert (
self._setup_info["next_row"] + len(group_ids) <= self.h_index_matrix.n_rows
), """
Not enough space left to add this data, please check for correct block initialisation."""
assert (
dataset_id == self._setup_info["next_dataset"]
), """
Datasets must be added in correct order: expected: {}, this dataset: {}""".format(
self._setup_info["next_dataset"],
dataset_id,
)
for i, id_ in enumerate(group_ids):
rowidx = i + self._setup_info["next_row"]
self.h_index_matrix[rowidx, int(id_)] = 1.0
cols = group_ids
rows = np.arange(
start=self._setup_info["next_row"],
stop=self._setup_info["next_row"] + group_ids.size,
dtype=np.uint64,
)
self._csc_cols = np.concatenate([self._csc_cols, cols])
self._csc_rows = np.concatenate([self._csc_rows, rows])
self._hkl.extend(hkl)
self.dataset_info[dataset_id] = {"start_index": self._setup_info["next_row"]}
self._setup_info["next_row"] += len(group_ids)
self._setup_info["next_dataset"] += 1
self.dataset_info[dataset_id]["end_index"] = self._setup_info["next_row"]
self.Ih_table = pd.concat([self.Ih_table, reflections], ignore_index=True)
if "loc_indices" in reflections:
self.block_selections[dataset_id] = reflections["loc_indices"].to_numpy()
else:
self.block_selections[dataset_id] = np.arange(
reflections.shape[0], dtype=np.uint64
)
if self._setup_info["next_dataset"] == len(self.block_selections):
self._complete_setup()
def _complete_setup(self) -> None:
"""Finish the setup of the Ih_table once all data has been added."""
self.h_index_matrix.compact()
assert (
self._setup_info["next_row"] == self.h_index_matrix.n_rows
), """
Not all rows of h_index_matrix appear to be filled in IhTableBlock setup."""
self.h_expand_matrix = self.h_index_matrix.transpose()
data = np.full(self._csc_cols.size, 1.0)
self._csc_h_index_matrix = csc_matrix((data, (self._csc_rows, self._csc_cols)))
self._csc_h_expand_matrix = self._csc_h_index_matrix.transpose()
self.weights = 1.0 / self.variances
self._setup_info["setup_complete"] = True
def group_multiplicities(self, output: str = "per_group") -> np.array:
"""Return the multiplicities of the symmetry groups."""
return self.sum_in_groups(np.full(self.size, 1.0), output=output)
def select(self, sel: np.array) -> "IhTableBlock":
"""Select a subset of the data, returning a new IhTableBlock object."""
Ih_table = self.Ih_table[sel]
Ih_table.reset_index(drop=True, inplace=True)
h_idx_sel = self.h_expand_matrix.select_columns(
flumpy.from_numpy(sel).iselection()
)
reduced_h_idx = h_idx_sel.transpose()
unity = flex.double(int(Ih_table.size), 1.0)
nz_col_sel = (unity * reduced_h_idx) > 0
h_index_matrix = reduced_h_idx.select_columns(nz_col_sel.iselection())
h_expand = h_index_matrix.transpose()
csc_h_idx_sel = self._csc_h_expand_matrix[:, sel]
csc_h_index_matrix = csc_h_idx_sel.transpose()[:, flumpy.to_numpy(nz_col_sel)]
csc_h_expand_matrix = csc_h_index_matrix.transpose()
newtable = IhTableBlock(n_groups=0, n_refl=0, n_datasets=self.n_datasets)
newtable.Ih_table = Ih_table
newtable._hkl = self._hkl.select(flumpy.from_numpy(sel))
newtable.h_expand_matrix = h_expand
newtable.h_index_matrix = h_index_matrix
newtable._csc_h_index_matrix = csc_h_index_matrix
newtable._csc_h_expand_matrix = csc_h_expand_matrix
newtable.block_selections = []
offset = 0
for i in range(newtable.n_datasets):
newtable.dataset_info[i] = {"start_index": offset}
block_sel_i = self.block_selections[i]
n_in_dataset_i = len(block_sel_i)
newtable.block_selections.append(
block_sel_i[sel[offset : offset + n_in_dataset_i]]
)
offset += n_in_dataset_i
newtable.dataset_info[i]["end_index"] = offset
return newtable
def select_on_groups(self, sel: np.array) -> "IhTableBlock":
"""Select a subset of the unique groups, returning a new IhTableBlock."""
reduced_h_idx = self._csc_h_index_matrix[:, sel]
unity = np.full(reduced_h_idx.shape[1], 1.0)
nz_row_sel = (unity * reduced_h_idx.transpose()) > 0
return self.select(nz_row_sel)
def calc_Ih(self) -> None:
"""Calculate the current best estimate for Ih for each reflection group."""
scale_factors = self.inverse_scale_factors
sumgsq = self.sum_in_groups(np.square(scale_factors) * self.weights)
sumgI = self.sum_in_groups(scale_factors * self.intensities * self.weights)
Ih = sumgI / sumgsq
self.Ih_table.loc[:, "Ih_values"] = Ih @ self._csc_h_expand_matrix
def update_weights(
self,
error_model: Optional[BasicErrorModel] = None,
dataset_id: Optional[int] = None,
) -> None:
"""Update the scaling weights based on an error model."""
if error_model:
if dataset_id is not None: # note the first dataset has an id of 0
sel = self.Ih_table["dataset_id"].to_numpy() == dataset_id
sigmaprimesq = error_model.update_variances(
self.variances[sel], self.intensities[sel]
)
self.Ih_table.loc[sel, "weights"] = 1.0 / sigmaprimesq
else:
sigmaprimesq = error_model.update_variances(
self.variances, self.intensities
)
self.Ih_table.loc[:, "weights"] = 1.0 / sigmaprimesq
else:
if dataset_id is not None: # note the first dataset has an id of 0
sel = self.Ih_table["dataset_id"].to_numpy() == dataset_id
self.Ih_table.loc[sel, "weights"] = 1.0 / self.variances[sel]
else:
self.Ih_table.loc[:, "weights"] = 1.0 / self.variances
def calc_nh(self) -> np.array:
"""Calculate the number of refls in the group to which the reflection belongs.
This is a vector of length n_refl."""
return self.sum_in_groups(np.full(self.size, 1.0), output="per_refl")
def match_Ih_values_to_target(self, target_Ih_table: IhTable) -> None:
"""
Use an Ih_table as a target to set Ih values in this table.
Given an Ih table as a target, the common reflections across the tables
are determined and the Ih_values are set to those of the target. If no
matching reflection is found, then the values are removed from the table.
"""
assert target_Ih_table.n_work_blocks == 1
target_asu_Ih_dict = dict(
zip(
target_Ih_table.blocked_data_list[0].asu_miller_index,
target_Ih_table.blocked_data_list[0].Ih_values,
)
)
new_Ih_values = np.zeros(self.size, dtype=float)
location_in_unscaled_array = 0
sorted_asu_indices, permuted = get_sorted_asu_indices(
self.asu_miller_index,
target_Ih_table.space_group,
anomalous=target_Ih_table.anomalous,
)
for j, miller_idx in enumerate(OrderedSet(sorted_asu_indices)):
n_in_group = self._csc_h_index_matrix.getcol(j).count_nonzero()
if miller_idx in target_asu_Ih_dict:
i = location_in_unscaled_array
new_Ih_values[np.arange(i, i + n_in_group, dtype=np.uint64)] = np.full(
n_in_group, target_asu_Ih_dict[miller_idx]
)
location_in_unscaled_array += n_in_group
self.Ih_table.loc[flumpy.to_numpy(permuted), "Ih_values"] = new_Ih_values
sel = self.Ih_values != 0.0
new_table = self.select(sel)
# now set attributes to update object
self.Ih_table = new_table.Ih_table
self.h_index_matrix = new_table.h_index_matrix
self.h_expand_matrix = new_table.h_expand_matrix
self.block_selections = new_table.block_selections
self._csc_h_expand_matrix = new_table._csc_h_expand_matrix
self._csc_h_index_matrix = new_table._csc_h_index_matrix
@property
def inverse_scale_factors(self) -> np.array:
"""The inverse scale factors of the reflections."""
return self.Ih_table["inverse_scale_factor"].to_numpy()
@inverse_scale_factors.setter
def inverse_scale_factors(self, new_scales: np.array) -> None:
if new_scales.size != self.size:
assert 0, """attempting to set a new set of scale factors of different
length than previous assignment: was {}, attempting {}""".format(
self.inverse_scale_factors.size,
new_scales.size,
)
else:
self.Ih_table.loc[:, "inverse_scale_factor"] = new_scales
@property
def variances(self) -> np.array:
"""The variances of the reflections."""
return self.Ih_table["variance"].to_numpy()
@variances.setter
def variances(self, new_variances: np.array) -> None:
assert new_variances.size == self.size
self.Ih_table.loc[:, "variance"] = new_variances
@property
def intensities(self) -> np.array:
"""The unscaled reflection intensities."""
return self.Ih_table["intensity"].to_numpy()
@intensities.setter
def intensities(self, new_intensities):
assert new_intensities.size == self.size
self.Ih_table.loc[:, "intensity"] = new_intensities
@property
def Ih_values(self) -> np.array:
"""The bset-estimated intensities of symmetry equivalent reflections."""
return self.Ih_table["Ih_values"].to_numpy()
@property
def weights(self) -> np.array:
"""The weights that will be used in scaling."""
return self.Ih_table["weights"].to_numpy()
@weights.setter
def weights(self, new_weights):
if new_weights.size != self.size:
assert 0, """attempting to set a new set of weights of different
length than previous assignment: was {}, attempting {}""".format(
self.size,
new_weights.size,
)
self.Ih_table.loc[:, "weights"] = new_weights
@property
def size(self) -> int:
"""Return the length of the stored Ih_table (a reflection table)."""
return self.Ih_table.shape[0]
@property
def n_groups(self) -> int:
"""Return the length of the stored Ih_table (a reflection table)."""
return self._csc_h_index_matrix.shape[1]
@property
def asu_miller_index(self) -> flex.miller_index:
"""Return the miller indices in the asymmetric unit."""
return self._hkl
def setup_binner(
self,
unit_cell: uctbx.unit_cell,
space_group: sgtbx.space_group,
n_resolution_bins: int,
) -> None:
"""Create a binner for the reflections contained in the table."""
ma = _reflection_table_to_iobs(
self.as_reflection_table(), unit_cell, space_group
)
# need d star sq step
d_star_sq = ma.d_star_sq().data()
d_star_sq_min = flex.min(d_star_sq)
d_star_sq_max = flex.max(d_star_sq)
span = d_star_sq_max - d_star_sq_min
relative_tolerance = 1e-6
d_star_sq_max += span * relative_tolerance
d_star_sq_min -= span * relative_tolerance
# Avoid a zero-size step that would otherwise anger the d_star_sq_step binner.
step = max((d_star_sq_max - d_star_sq_min) / n_resolution_bins, 0.004)
self.binner = ma.setup_binner_d_star_sq_step(
auto_binning=False,
d_max=uctbx.d_star_sq_as_d(d_star_sq_max),
d_min=uctbx.d_star_sq_as_d(d_star_sq_min),
d_star_sq_step=step,
)
def sum_in_groups(
self, array: Union[csc_matrix, np.array], output: str = "per_group"
) -> np.array:
"""
Sums an array object over the symmetry equivalent groups.
The array's final dimension must equal the size of the Ih_table.
"""
if output == "per_group":
return array @ self._csc_h_index_matrix
elif output == "per_refl": # return the summed quantity per reflection
return (array @ self._csc_h_index_matrix) @ self._csc_h_expand_matrix
else:
raise ValueError(
f"""Bad value for output= parameter
(value={output}, allowed values: per_group, per_refl)"""
)
def as_reflection_table(self) -> flex.reflection_table:
"""Return the data in flex reflection table format"""
table = flex.reflection_table()
table["asu_miller_index"] = self.asu_miller_index
for k, v in self.Ih_table.iteritems():
table[k] = flumpy.from_numpy(v.to_numpy())
return table
def _reflection_table_to_iobs(
table: flex.reflection_table,
unit_cell: uctbx.unit_cell,
space_group: sgtbx.space_group,
) -> miller.array:
miller_set = miller.set(
crystal_symmetry=crystal.symmetry(
unit_cell=unit_cell,
space_group=space_group,
assert_is_compatible_unit_cell=False,
),
indices=table["asu_miller_index"],
anomalous_flag=False,
)
i_obs = miller.array(
miller_set, data=table["intensity"] / table["inverse_scale_factor"]
)
i_obs.set_observation_type_xray_intensity()
i_obs.set_sigmas(flex.sqrt(table["variance"]) / table["inverse_scale_factor"])
i_obs.set_info(miller.array_info(source="DIALS", source_type="reflection_tables"))
return i_obs
| dials/dials | algorithms/scaling/Ih_table.py | Python | bsd-3-clause | 37,339 | [
"CRYSTAL"
] | 7e38c929e44a3053ed92dcf043952a5e62c14f998c054d5d1cd5d28ba1cdecd2 |
# Ubuntu Tweak - Ubuntu Configuration Tool
#
# Copyright (C) 2007-2011 Tualatrix Chou <[email protected]>
#
# Ubuntu Tweak is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Ubuntu Tweak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ubuntu Tweak; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
import json
import thread
import logging
from gi.repository import Gtk, GdkPixbuf
from gi.repository import GObject
from gi.repository import Pango
from xdg.DesktopEntry import DesktopEntry
from ubuntutweak.common import consts
from ubuntutweak.common.debug import log_func
from ubuntutweak.modules import TweakModule
from ubuntutweak.gui.dialogs import ErrorDialog, InfoDialog, QuestionDialog
from ubuntutweak.gui.dialogs import ProcessDialog
from ubuntutweak.gui.gtk import post_ui, set_busy, unset_busy
from ubuntutweak.utils.parser import Parser
from ubuntutweak.network import utdata
from ubuntutweak.network.downloadmanager import DownloadDialog
from ubuntutweak.settings.gsettings import GSetting
from ubuntutweak.utils import set_label_for_stock_button, icon
from ubuntutweak.utils.package import AptWorker
from ubuntutweak.apps import CategoryView
log = logging.getLogger("AppCenter")
APPCENTER_ROOT = os.path.join(consts.CONFIG_ROOT, 'appcenter')
APP_VERSION_URL = utdata.get_version_url('/appcenter_version/')
UPDATE_SETTING = GSetting(key='com.ubuntu-tweak.tweak.appcenter-has-update', type=bool)
VERSION_SETTING = GSetting(key='com.ubuntu-tweak.tweak.appcenter-version', type=str)
def get_app_data_url():
return utdata.get_download_url('/media/utdata/appcenter-%s.tar.gz' %
VERSION_SETTING.get_value())
if not os.path.exists(APPCENTER_ROOT):
os.mkdir(APPCENTER_ROOT)
class PackageInfo:
DESKTOP_DIR = '/usr/share/app-install/desktop/'
def __init__(self, name):
self.name = name
self.pkg = AptWorker.get_cache()[name]
self.desktopentry = DesktopEntry(self.DESKTOP_DIR + name + '.desktop')
def check_installed(self):
return self.pkg.isInstalled
def get_comment(self):
return self.desktopentry.getComment()
def get_name(self):
appname = self.desktopentry.getName()
if appname == '':
return self.name.title()
return appname
def get_version(self):
try:
return self.pkg.versions[0].version
except:
return ''
class StatusProvider(object):
def __init__(self, name):
self._path = os.path.join(consts.CONFIG_ROOT, name)
self._is_init = False
try:
self._data = json.loads(open(self._path).read())
except:
log.debug('No Status data available, set init to True')
self._data = {'apps': {}, 'cates': {}}
self._is_init = True
def set_init(self, active):
self._is_init = active
def get_init(self):
return self._is_init
def get_data(self):
return self._data
def save(self):
file = open(self._path, 'w')
file.write(json.dumps(self._data))
file.close()
def load_objects_from_parser(self, parser):
init = self.get_init()
for key in parser.keys():
#FIXME because of source id
if init:
self._data['apps'][key] = {}
self._data['apps'][key]['read'] = True
self._data['apps'][key]['cate'] = parser.get_category(key)
else:
if key not in self._data['apps']:
self._data['apps'][key] = {}
self._data['apps'][key]['read'] = False
self._data['apps'][key]['cate'] = parser.get_category(key)
if init and parser.keys():
self.set_init(False)
self.save()
def count_unread(self, cate):
i = 0
for key in self._data['apps']:
if self._data['apps'][key]['cate'] == cate and not self._data['apps'][key]['read']:
i += 1
return i
def load_category_from_parser(self, parser):
for cate in parser.keys():
id = parser.get_id(cate)
if self._is_init:
self._data['cates'][id] = 0
else:
self._data['cates'][id] = self.count_unread(id)
self._is_init = False
self.save()
def get_cate_unread_count(self, id):
return self.count_unread(id)
def get_read_status(self, key):
try:
return self._data['apps'][key]['read']
except:
return True
def set_as_read(self, key):
try:
self._data['apps'][key]['read'] = True
except:
pass
self.save()
class AppParser(Parser):
def __init__(self):
app_data = os.path.join(APPCENTER_ROOT, 'apps.json')
Parser.__init__(self, app_data, 'package')
def get_summary(self, key):
return self.get_by_lang(key, 'summary')
def get_name(self, key):
return self.get_by_lang(key, 'name')
def get_category(self, key):
return self[key]['category']
class AppCategoryView(CategoryView):
def pre_update_cate_model(self):
self.model.append(None, (-1,
'installed-apps',
_('Installed Apps')))
class AppView(Gtk.TreeView):
__gsignals__ = {
'changed': (GObject.SignalFlags.RUN_FIRST,
None,
(GObject.TYPE_INT,)),
'select': (GObject.SignalFlags.RUN_FIRST,
None,
(GObject.TYPE_BOOLEAN,))
}
(COLUMN_INSTALLED,
COLUMN_ICON,
COLUMN_PKG,
COLUMN_NAME,
COLUMN_DESC,
COLUMN_DISPLAY,
COLUMN_CATE,
COLUMN_TYPE,
) = range(8)
def __init__(self):
GObject.GObject.__init__(self)
self.to_add = []
self.to_rm = []
self.filter = None
self._status = None
model = self._create_model()
self._add_columns()
self.set_model(model)
self.set_rules_hint(True)
self.set_search_column(self.COLUMN_NAME)
self.show_all()
def _create_model(self):
model = Gtk.ListStore(
GObject.TYPE_BOOLEAN,
GdkPixbuf.Pixbuf,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING)
return model
def sort_model(self):
model = self.get_model()
model.set_sort_column_id(self.COLUMN_NAME, Gtk.SortType.ASCENDING)
def _add_columns(self):
renderer = Gtk.CellRendererToggle()
renderer.set_property("xpad", 6)
renderer.connect('toggled', self.on_install_toggled)
column = Gtk.TreeViewColumn('', renderer, active=self.COLUMN_INSTALLED)
column.set_sort_column_id(self.COLUMN_INSTALLED)
self.append_column(column)
column = Gtk.TreeViewColumn('Applications')
column.set_sort_column_id(self.COLUMN_NAME)
column.set_spacing(5)
renderer = Gtk.CellRendererPixbuf()
column.pack_start(renderer, False)
column.set_cell_data_func(renderer, self.icon_column_view_func)
column.add_attribute(renderer, 'pixbuf', self.COLUMN_ICON)
renderer = Gtk.CellRendererText()
renderer.set_property("xpad", 6)
renderer.set_property("ypad", 6)
renderer.set_property('ellipsize', Pango.EllipsizeMode.END)
column.pack_start(renderer, True)
column.add_attribute(renderer, 'markup', self.COLUMN_DISPLAY)
self.append_column(column)
def set_as_read(self, iter, model):
package = model.get_value(iter, self.COLUMN_PKG)
if self._status and not self._status.get_read_status(package):
appname = model.get_value(iter, self.COLUMN_NAME)
desc = model.get_value(iter, self.COLUMN_DESC)
self._status.set_as_read(package)
model.set_value(iter, self.COLUMN_DISPLAY, '<b>%s</b>\n%s' % (appname, desc))
def icon_column_view_func(self, tree_column, renderer, model, iter, data=None):
pixbuf = model.get_value(iter, self.COLUMN_ICON)
if pixbuf == None:
renderer.set_property("visible", False)
else:
renderer.set_property("visible", True)
def append_update(self, status, pkgname, summary):
model = self.get_model()
icontheme = Gtk.IconTheme.get_default()
for icon_name in ['application-x-deb', 'package-x-generic', 'package']:
icon_theme = icontheme.lookup_icon(icon_name,
size=32,
flags=Gtk.IconLookupFlags.NO_SVG)
if icon_theme:
break
if icon_theme:
pixbuf = icon_theme.load_icon()
else:
pixbuf = icon.get_from_name(size=32)
iter = model.append()
model.set(iter,
self.COLUMN_INSTALLED, status,
self.COLUMN_ICON, pixbuf,
self.COLUMN_PKG, pkgname,
self.COLUMN_NAME, pkgname,
self.COLUMN_DESC, summary,
self.COLUMN_DISPLAY, '<b>%s</b>\n%s' % (pkgname, summary),
self.COLUMN_TYPE, 'update')
def set_status_active(self, active):
if active:
self._status = StatusProvider('appstatus.json')
def get_status(self):
return self._status
@log_func(log)
def update_model(self, apps=None, only_installed=False):
'''apps is a list to iter pkgname,
'''
model = self.get_model()
model.clear()
app_parser = AppParser()
if self._status:
self._status.load_objects_from_parser(app_parser)
if not apps:
apps = app_parser.keys()
for pkgname in apps:
category = app_parser.get_category(pkgname)
pixbuf = self.get_app_logo(app_parser[pkgname]['logo'])
try:
package = PackageInfo(pkgname)
is_installed = package.check_installed()
if not is_installed and only_installed:
continue
appname = package.get_name()
desc = app_parser.get_summary(pkgname)
except Exception, e:
# Confirm the invalid package isn't in the count
# But in the future, Ubuntu Tweak should display the invalid package too
if self._status and not self._status.get_read_status(pkgname):
self._status.set_as_read(pkgname)
continue
if self.filter == None or self.filter == category:
iter = model.append()
if pkgname in self.to_add or pkgname in self.to_rm:
status = not is_installed
display = self.__fill_changed_display(appname, desc)
else:
status = is_installed
if self._status and not self._status.get_read_status(pkgname):
display = '<b>%s <span foreground="#ff0000">(New!!!)</span>\n%s</b>' % (appname, desc)
else:
display = '<b>%s</b>\n%s' % (appname, desc)
model.set(iter,
self.COLUMN_INSTALLED, status,
self.COLUMN_ICON, pixbuf,
self.COLUMN_PKG, pkgname,
self.COLUMN_NAME, appname,
self.COLUMN_DESC, desc,
self.COLUMN_DISPLAY, display,
self.COLUMN_CATE, str(category),
self.COLUMN_TYPE, 'app')
def __fill_changed_display(self, appname, desc):
return '<span style="italic" weight="bold"><b>%s</b>\n%s</span>' % (appname, desc)
def on_install_toggled(self, cell, path):
def do_app_changed(model, iter, appname, desc):
model.set(iter,
self.COLUMN_DISPLAY, self.__fill_changed_display(appname, desc))
def do_app_unchanged(model, iter, appname, desc):
model.set(iter,
self.COLUMN_DISPLAY,
'<b>%s</b>\n%s' % (appname, desc))
model = self.get_model()
iter = model.get_iter((int(path),))
is_installed = model.get_value(iter, self.COLUMN_INSTALLED)
pkgname = model.get_value(iter, self.COLUMN_PKG)
appname = model.get_value(iter, self.COLUMN_NAME)
desc = model.get_value(iter, self.COLUMN_DESC)
type = model.get_value(iter, self.COLUMN_TYPE)
if pkgname:
if type == 'app':
is_installed = not is_installed
if is_installed:
if pkgname in self.to_rm:
self.to_rm.remove(pkgname)
do_app_unchanged(model, iter, appname, desc)
else:
self.to_add.append(pkgname)
do_app_changed(model, iter, appname, desc)
else:
if pkgname in self.to_add:
self.to_add.remove(pkgname)
do_app_unchanged(model, iter, appname, desc)
else:
self.to_rm.append(pkgname)
do_app_changed(model, iter, appname, desc)
model.set(iter, self.COLUMN_INSTALLED, is_installed)
else:
to_installed = is_installed
to_installed = not to_installed
if to_installed == True:
self.to_add.append(pkgname)
else:
self.to_add.remove(pkgname)
model.set(iter, self.COLUMN_INSTALLED, to_installed)
self.emit('changed', len(self.to_add) + len(self.to_rm))
else:
model.set(iter, self.COLUMN_INSTALLED, not is_installed)
self.emit('select', not is_installed)
@log_func(log)
def set_filter(self, filter):
self.filter = filter
def get_app_logo(self, file_name):
path = os.path.join(APPCENTER_ROOT, file_name)
if not os.path.exists(path) or file_name == '':
path = os.path.join(consts.DATA_DIR, 'pixmaps/common-logo.png')
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file(path)
if pixbuf.get_width() != 32 or pixbuf.get_height() != 32:
pixbuf = pixbuf.scale_simple(32, 32, GdkPixbuf.InterpType.BILINEAR)
return pixbuf
except:
return Gtk.IconTheme.get_default().load_icon(Gtk.STOCK_MISSING_IMAGE, 32, 0)
class CheckUpdateDialog(ProcessDialog):
def __init__(self, parent, url):
self.status = None
self.done = False
self.error = None
self.user_action = False
self.url = url
super(CheckUpdateDialog, self).__init__(parent=parent)
self.set_dialog_lable(_('Checking update...'))
def run(self):
thread.start_new_thread(self.process_data, ())
GObject.timeout_add(100, self.on_timeout)
return super(CheckUpdateDialog, self).run()
def process_data(self):
import time
time.sleep(1)
try:
self.status = self.get_updatable()
except IOError:
self.error = True
else:
self.done = True
def get_updatable(self):
return utdata.check_update_function(self.url, APPCENTER_ROOT, \
UPDATE_SETTING, VERSION_SETTING, \
auto=False)
def on_timeout(self):
self.pulse()
if self.error:
self.destroy()
elif not self.done:
return True
else:
self.destroy()
class FetchingDialog(DownloadDialog):
def __init__(self, url, parent=None):
super(FetchingDialog, self).__init__(url=url,
title=_('Fetching online data...'),
parent=parent)
log.debug("Will start to download online data from: %s", url)
class AppCenter(TweakModule):
__title__ = _('Application Center')
__desc__ = _('A simple but efficient way for finding and installing popular applications.')
__icon__ = 'gnome-app-install'
__url__ = 'http://ubuntu-tweak.com/app/'
__urltitle__ = _('Visit Online Application Center')
__category__ = 'application'
__utactive__ = True
def __init__(self):
TweakModule.__init__(self, 'appcenter.ui')
set_label_for_stock_button(self.sync_button, _('_Sync'))
self.to_add = []
self.to_rm = []
self.url = APP_VERSION_URL
self.appview = AppView()
self.appview.set_status_active(True)
self.appview.update_model()
self.appview.sort_model()
self.appview.connect('changed', self.on_app_status_changed)
self.app_selection = self.appview.get_selection()
self.app_selection.connect('changed', self.on_app_selection)
self.right_sw.add(self.appview)
self.cateview = AppCategoryView(os.path.join(APPCENTER_ROOT, 'cates.json'))
self.cateview.set_status_from_view(self.appview)
self.cateview.update_cate_model()
self.cate_selection = self.cateview.get_selection()
self.cate_selection.connect('changed', self.on_category_changed)
self.left_sw.add(self.cateview)
self.update_timestamp()
self.show_all()
UPDATE_SETTING.set_value(False)
UPDATE_SETTING.connect_notify(self.on_have_update, data=None)
thread.start_new_thread(self.check_update, ())
GObject.timeout_add(60000, self.update_timestamp)
self.add_start(self.main_vbox)
self.connect('realize', self.setup_ui_tasks)
def setup_ui_tasks(self, widget):
self.cateview.expand_all()
def update_timestamp(self):
self.time_label.set_text(_('Last synced:') + ' ' + utdata.get_last_synced(APPCENTER_ROOT))
return True
@post_ui
def on_have_update(self, *args):
log.debug("on_have_update")
if UPDATE_SETTING.get_value():
dialog = QuestionDialog(_('New application data available, would you like to update?'))
response = dialog.run()
dialog.destroy()
if response == Gtk.ResponseType.YES:
dialog = FetchingDialog(get_app_data_url(), self.get_toplevel())
dialog.connect('destroy', self.on_app_data_downloaded)
dialog.run()
dialog.destroy()
def check_update(self):
try:
return utdata.check_update_function(self.url, APPCENTER_ROOT, \
UPDATE_SETTING, VERSION_SETTING, \
auto=True)
except Exception, error:
log.error(error)
def on_app_selection(self, widget, data=None):
model, iter = widget.get_selected()
if iter:
appview = widget.get_tree_view()
appview.set_as_read(iter, model)
self.cateview.update_selected_item()
@log_func(log)
def on_category_changed(self, widget, data=None):
model, iter = widget.get_selected()
cateview = widget.get_tree_view()
if iter:
path = model.get_path(iter).to_string()
only_installed = False
if path == '0':
only_installed = True
self.appview.set_filter(None)
elif path == '1':
self.appview.set_filter(None)
else:
self.appview.set_filter(model[iter][cateview.CATE_ID])
self.appview.update_model(only_installed=only_installed)
def deep_update(self):
self.package_worker.update_apt_cache(True)
self.update_app_data()
def on_apply_button_clicked(self, widget, data=None):
@log_func(log)
def on_install_finished(transaction, status, kwargs):
to_add, to_rm = kwargs['add_and_rm']
if to_rm:
worker = AptWorker(self.get_toplevel(),
finish_handler=self.on_package_work_finished,
data=kwargs)
worker.remove_packages(to_rm)
else:
self.on_package_work_finished(None, None, kwargs)
to_rm = self.appview.to_rm
to_add = self.appview.to_add
log.debug("on_apply_button_clicked: to_rm: %s, to_add: %s" % (to_rm, to_add))
if to_add or to_rm:
set_busy(self)
if to_add:
worker = AptWorker(self.get_toplevel(),
finish_handler=on_install_finished,
data={'add_and_rm': (to_add, to_rm),
'parent': self})
worker.install_packages(to_add)
else:
on_install_finished(None, None,
{'add_and_rm': (to_add, to_rm),
'parent': self})
@log_func(log)
def on_package_work_finished(self, transaction, status, kwargs):
to_add, to_rm = kwargs['add_and_rm']
parent = kwargs['parent']
AptWorker.update_apt_cache(init=True)
self.emit('call', 'ubuntutweak.modules.updatemanager', 'update_list', {})
self.appview.to_add = []
self.appview.to_rm = []
self.on_category_changed(self.cateview.get_selection())
self.apply_button.set_sensitive(False)
unset_busy(parent)
def on_sync_button_clicked(self, widget):
dialog = CheckUpdateDialog(widget.get_toplevel(), self.url)
dialog.run()
dialog.destroy()
if dialog.status == True:
dialog = QuestionDialog(_("Update available, would you like to update?"))
response = dialog.run()
dialog.destroy()
if response == Gtk.ResponseType.YES:
dialog = FetchingDialog(get_app_data_url(), self.get_toplevel())
dialog.connect('destroy', self.on_app_data_downloaded)
dialog.run()
dialog.destroy()
elif dialog.error == True:
ErrorDialog(_("Network Error, please check your network connection - or the remote server may be down.")).launch()
else:
utdata.save_synced_timestamp(APPCENTER_ROOT)
self.update_timestamp()
InfoDialog(_("No update available.")).launch()
def on_app_data_downloaded(self, widget):
log.debug("on_app_data_downloaded")
path = widget.get_downloaded_file()
tarfile = utdata.create_tarfile(path)
if tarfile.is_valid():
tarfile.extract(consts.CONFIG_ROOT)
self.update_app_data()
utdata.save_synced_timestamp(APPCENTER_ROOT)
self.update_timestamp()
else:
ErrorDialog(_('An error occurred while downloading the file.')).launch()
def update_app_data(self):
self.appview.update_model()
self.cateview.update_cate_model()
self.cateview.expand_all()
def on_app_status_changed(self, widget, i):
if i:
self.apply_button.set_sensitive(True)
else:
self.apply_button.set_sensitive(False)
| 0x7E/ubuntu-tweak | ubuntutweak/admins/appcenter.py | Python | gpl-2.0 | 24,249 | [
"VisIt"
] | 214b8aad5643e604d649f7e2d02ab14c50e1dc82818f73a93b741b8f6b64f8e2 |
# -*- coding: utf-8 -*-
#
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2014-2015, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Parser for MOPAC output files"""
from __future__ import print_function
import re
import numpy
import itertools
from . import logfileparser
from . import utils
class MOPAC(logfileparser.Logfile):
"""A MOPAC 4 log file."""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(MOPAC, self).__init__(logname="MOPAC", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "MOPAC log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'MOPAC("%s")' % (self.filename)
def normalisesym(self, label):
"""MOPAC does not require normalizing symmetry labels."""
def before_parsing(self):
#TODO
# Defaults
charge = 0
self.set_attribute('charge', charge)
mult = 1
self.set_attribute('mult', mult)
# Keep track of whether or not we're performing an
# (un)restricted calculation.
self.unrestricted = False
self.is_rohf = False
# Keep track of 1SCF vs. gopt since gopt is default
self.onescf = False
self.geomdone = False
# Compile the dashes-and-or-spaces-only regex.
self.re_dashes_and_spaces = re.compile('^[\s-]+$')
self.star = ' * '
self.stars = ' *******************************************************************************'
self.spinstate = {'SINGLET': 1,
'DOUBLET': 2,
'TRIPLET': 3,
'QUARTET': 4,
'QUINTET': 5,
'SEXTET': 6,
'HEPTET': 7,
'OCTET': 8,
'NONET': 9}
def after_parsing(self):
#TODO
"""
# If parsing a fragment job, each of the geometries appended to
# `atomcoords` may be of different lengths, which will prevent
# conversion from a list to NumPy array.
# Take the length of the first geometry as correct, and remove
# all others with different lengths.
if len(self.atomcoords) > 1:
correctlen = len(self.atomcoords[0])
self.atomcoords[:] = [coords for coords in self.atomcoords
if len(coords) == correctlen]
# At the moment, there is no similar correction for other properties!
# MOPAC does not print all MO coefficients by default, but rather
# up to HOMO+5. So, fill up the missing values with NaNs. If there are
# other cases where coefficient are missing, but different ones, this
# general afterthought might not be appropriate and the fix will
# need to be done while parsing.
if hasattr(self, 'mocoeffs'):
for im in range(len(self.mocoeffs)):
_nmo, _nbasis = self.mocoeffs[im].shape
if (_nmo, _nbasis) != (self.nmo, self.nbasis):
coeffs = numpy.empty((self.nmo, self.nbasis))
coeffs[:] = numpy.nan
coeffs[0:_nmo, 0:_nbasis] = self.mocoeffs[im]
self.mocoeffs[im] = coeffs
# When parsing the 'MOLECULAR ORBITAL COEFFICIENTS' block for
# `aonames`, MOPAC doesn't print the principal quantum number
# for each shell; this needs to be added.
if hasattr(self, 'aonames') and hasattr(self, 'atombasis'):
angmom = ('', 'S', 'P', 'D', 'F', 'G', 'H', 'I')
for atom in self.atombasis:
bfcounts = dict()
for bfindex in atom:
atomname, bfname = self.aonames[bfindex].split('_')
# Keep track of how many times each shell type has
# appeared.
if bfname in bfcounts:
bfcounts[bfname] += 1
else:
# Make sure the starting number for type of
# angular momentum begins at the appropriate
# principal quantum number (1S, 2P, 3D, 4F,
# ...).
bfcounts[bfname] = angmom.index(bfname[0])
newbfname = '{}{}'.format(bfcounts[bfname], bfname)
self.aonames[bfindex] = '_'.join([atomname, newbfname])
"""
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
if self.stars in line:
line = inputfile.next()
if ' * CALCULATION DONE:' in line:
# Calculation Information
while self.stars not in line:
if 'CHARGE ON SYSTEM =' in line:
charge = int(line.split()[5])
self.set_attribute('charge', charge)
line = inputfile.next()
if 'SPIN STATE DEFINED AS A' in line:
mult = self.spinstate(line.split()[1])
self.set_attribute('mult', mult)
if '1SCF' in line:
self.onescf = True
# Input Fields
#FIXME We assume inputs are not extended past 2 line default
line = inputfile.next()
inputs = line.split()
line = inputfile.next()
inputs.extend(line.split())
inputs = [x.upper() for x in inputs]
"""
if line[0:11] == 'User input:':
self.skip_line(inputfile, 'd')
while list(set(line.strip())) != ['-']:
if '$rem' in line:
while '$end' not in line:
line = next(inputfile)
if 'print_orbitals' in line.lower():
# Stay with the default value if a number isn't
# specified.
if line.split()[-1].lower() in ('true', 'false'):
continue
else:
norbdisp_aonames = int(line.split()[-1])
self.norbdisp_alpha_aonames = norbdisp_aonames
self.norbdisp_beta_aonames = norbdisp_aonames
self.norbdisp_set = True
# Charge and multiplicity are present in the input file, which is generally
# printed once at the beginning. However, it is also prined for fragment
# calculations, so make sure we parse only the first occurance.
if '$molecule' in line:
line = next(inputfile)
charge, mult = map(int, line.split())
if not hasattr(self, 'charge'):
self.set_attribute('charge', charge)
if not hasattr(self, 'mult'):
self.set_attribute('mult', mult)
line = next(inputfile)
# Parse the general basis for `gbasis`, in the style used by
# Gaussian.
if 'Basis set in general basis input format:' in line:
self.skip_lines(inputfile, ['d', '$basis'])
line = next(inputfile)
if not hasattr(self, 'gbasis'):
self.gbasis = []
# The end of the general basis block.
while '$end' not in line:
atom = []
# 1. Contains element symbol and atomic index of
# basis functions; if 0, applies to all atoms of
# same element.
assert len(line.split()) == 2
line = next(inputfile)
# The end of each atomic block.
while '****' not in line:
# 2. Contains the type of basis function {S, SP,
# P, D, F, G, H, ...}, the number of primitives,
# and the weight of the final contracted function.
bfsplitline = line.split()
assert len(bfsplitline) == 3
bftype = bfsplitline[0]
nprim = int(bfsplitline[1])
line = next(inputfile)
# 3. The primitive basis functions that compose
# the contracted basis function; there are `nprim`
# of them. The first value is the exponent, and
# the second value is the contraction
# coefficient. If `bftype == 'SP'`, the primitives
# are for both S- and P-type basis functions but
# with separate contraction coefficients,
# resulting in three columns.
if bftype == 'SP':
primitives_S = []
primitives_P = []
else:
primitives = []
# For each primitive in the contracted basis
# function...
for iprim in range(nprim):
primsplitline = line.split()
exponent = float(primsplitline[0])
if bftype == 'SP':
assert len(primsplitline) == 3
coefficient_S = float(primsplitline[1])
coefficient_P = float(primsplitline[2])
primitives_S.append((exponent, coefficient_S))
primitives_P.append((exponent, coefficient_P))
else:
assert len(primsplitline) == 2
coefficient = float(primsplitline[1])
primitives.append((exponent, coefficient))
line = next(inputfile)
if bftype == 'SP':
bf_S = ('S', primitives_S)
bf_P = ('P', primitives_P)
atom.append(bf_S)
atom.append(bf_P)
else:
bf = (bftype, primitives)
atom.append(bf)
# Move to the next contracted basis function
# as long as we don't hit the '****' atom
# delimiter.
self.gbasis.append(atom)
line = next(inputfile)
"""
# Extract the atomic numbers and coordinates of the atoms.
if not self.geomdone:
if 'NUMBER SYMBOL (ANGSTROMS) (ANGSTROMS) (ANGSTROMS)' in line:
if not hasattr(self, 'atomcoords'):
self.atomcoords = []
if self.onescf:
self.geomdone = True
line = next(inputfile)
line = next(inputfile)
atomelements = []
atomcoords = []
entry = line.split()
while entry:
atomelements.append(entry[1])
atomcoords.append(list(map(float, entry[2::2])))
line = next(inputfile)
entry = line.split()
self.atomcoords.append(atomcoords)
if not hasattr(self, 'atomnos'):
self.atomnos = []
self.atomelements = []
for atomelement in atomelements:
self.atomelements.append(atomelement)
self.atomnos.append(utils.PeriodicTable().number[atomelement])
self.natom = len(self.atomnos)
self.atommap = self.generate_atom_map()
self.formula_histogram = self.generate_formula_histogram()
"""
# Number of electrons.
# Useful for determining the number of occupied/virtual orbitals.
if 'Nuclear Repulsion Energy' in line:
if not hasattr(self, 'nalpha'):
line = next(inputfile)
nelec_re_string = 'There are(\s+[0-9]+) alpha and(\s+[0-9]+) beta electrons'
match = re.findall(nelec_re_string, line.strip())
self.nalpha = int(match[0][0].strip())
self.nbeta = int(match[0][1].strip())
self.norbdisp_alpha += self.nalpha
self.norbdisp_alpha_aonames += self.nalpha
self.norbdisp_beta += self.nbeta
self.norbdisp_beta_aonames += self.nbeta
# Number of basis functions.
# Because MOPAC's integral recursion scheme is defined using
# Cartesian basis functions, there is often a distinction between the
# two in the output. We only parse for *pure* functions.
# Examples:
# Only one type:
# There are 30 shells and 60 basis functions
# Both Cartesian and pure:
# ...
if 'basis functions' in line:
if not hasattr(self, 'nbasis'):
self.set_attribute('nbasis', int(line.split()[-3]))
# Check for whether or not we're peforming an
# (un)restricted calculation.
if 'calculation will be' in line:
if ' restricted' in line:
self.unrestricted = False
if 'unrestricted' in line:
self.unrestricted = True
if hasattr(self, 'nalpha') and hasattr(self, 'nbeta'):
if self.nalpha != self.nbeta:
self.unrestricted = True
self.is_rohf = True
# Section with SCF iterations goes like this:
#
# SCF converges when DIIS error is below 1.0E-05
# ---------------------------------------
# Cycle Energy DIIS Error
# ---------------------------------------
# 1 -381.9238072190 1.39E-01
# 2 -382.2937212775 3.10E-03
# 3 -382.2939780242 3.37E-03
# ...
#
scf_success_messages = (
'Convergence criterion met',
'corrected energy'
)
scf_failure_messages = (
'SCF failed to converge',
'Convergence failure'
)
if 'SCF converges when ' in line:
if not hasattr(self, 'scftargets'):
self.scftargets = []
target = float(line.split()[-1])
self.scftargets.append([target])
# We should have the header between dashes now,
# but sometimes there are lines before the first dashes.
while not 'Cycle Energy' in line:
line = next(inputfile)
self.skip_line(inputfile, 'd')
values = []
iter_counter = 1
line = next(inputfile)
while not any(message in line for message in scf_success_messages):
# Some trickery to avoid a lot of printing that can occur
# between each SCF iteration.
entry = line.split()
if len(entry) > 0:
if entry[0] == str(iter_counter):
# MOPAC only outputs one error metric.
error = float(entry[2])
values.append([error])
iter_counter += 1
line = next(inputfile)
# We've converged, but still need the last iteration.
if any(message in line for message in scf_success_messages):
entry = line.split()
error = float(entry[2])
values.append([error])
iter_counter += 1
# This is printed in regression MOPAC4.2/dvb_sp_unconverged.out
# so use it to bail out when convergence fails.
if any(message in line for message in scf_failure_messages):
break
if not hasattr(self, 'scfvalues'):
self.scfvalues = []
self.scfvalues.append(numpy.array(values))
# Molecular orbital coefficients.
# Try parsing them from this block (which comes from
# `scf_final_print = 2``) rather than the combined
# aonames/mocoeffs/moenergies block (which comes from
# `print_orbitals = true`).
if 'Final Alpha MO Coefficients' in line:
if not hasattr(self, 'mocoeffs'):
self.mocoeffs = []
mocoeffs = numpy.empty(shape=(self.nbasis, self.norbdisp_alpha))
self.parse_matrix(inputfile, mocoeffs)
self.mocoeffs.append(mocoeffs.transpose())
if 'Final Beta MO Coefficients' in line:
mocoeffs = numpy.empty(shape=(self.nbasis, self.norbdisp_beta))
self.parse_matrix(inputfile, mocoeffs)
self.mocoeffs.append(mocoeffs.transpose())
if 'Total energy in the final basis set' in line:
if not hasattr(self, 'scfenergies'):
self.scfenergies = []
scfenergy = float(line.split()[-1])
self.scfenergies.append(utils.convertor(scfenergy, 'hartree', 'eV'))
# Geometry optimization.
if 'Maximum Tolerance Cnvgd?' in line:
line_g = list(map(float, next(inputfile).split()[1:3]))
line_d = list(map(float, next(inputfile).split()[1:3]))
line_e = next(inputfile).split()[2:4]
if not hasattr(self, 'geotargets'):
self.geotargets = [line_g[1], line_d[1], self.float(line_e[1])]
if not hasattr(self, 'geovalues'):
self.geovalues = []
try:
ediff = abs(self.float(line_e[0]))
except ValueError:
ediff = numpy.nan
geovalues = [line_g[0], line_d[0], ediff]
self.geovalues.append(geovalues)
if '** OPTIMIZATION CONVERGED **' in line:
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(len(self.atomcoords))
if '** MAXIMUM OPTIMIZATION CYCLES REACHED **' in line:
if not hasattr(self, 'optdone'):
self.optdone = []
# Moller-Plesset corrections.
# There are multiple modules in MOPAC for calculating MPn energies:
# cdman, ccman, and ccman2, all with different output.
#
# MP2, RI-MP2, and local MP2 all default to cdman, which has a simple
# block of output after the regular SCF iterations.
#
# MP3 is handled by ccman2.
#
# MP4 and variants are handled by ccman.
# This is the MP2/cdman case.
if 'MP2 total energy' in line:
if not hasattr(self, 'mpenergies'):
self.mpenergies = []
mp2energy = float(line.split()[4])
mp2energy = utils.convertor(mp2energy, 'hartree', 'eV')
self.mpenergies.append([mp2energy])
# This is the MP3/ccman2 case.
if line[1:11] == 'MP2 energy' and line[12:19] != 'read as':
if not hasattr(self, 'mpenergies'):
self.mpenergies = []
mpenergies = []
mp2energy = float(line.split()[3])
mpenergies.append(mp2energy)
line = next(inputfile)
line = next(inputfile)
# Just a safe check.
if 'MP3 energy' in line:
mp3energy = float(line.split()[3])
mpenergies.append(mp3energy)
mpenergies = [utils.convertor(mpe, 'hartree', 'eV')
for mpe in mpenergies]
self.mpenergies.append(mpenergies)
# This is the MP4/ccman case.
if 'EHF' in line:
if not hasattr(self, 'mpenergies'):
self.mpenergies = []
mpenergies = []
while list(set(line.strip())) != ['-']:
if 'EMP2' in line:
mp2energy = float(line.split()[2])
mpenergies.append(mp2energy)
if 'EMP3' in line:
mp3energy = float(line.split()[2])
mpenergies.append(mp3energy)
if 'EMP4SDQ' in line:
mp4sdqenergy = float(line.split()[2])
mpenergies.append(mp4sdqenergy)
# This is really MP4SD(T)Q.
if 'EMP4 ' in line:
mp4sdtqenergy = float(line.split()[2])
mpenergies.append(mp4sdtqenergy)
line = next(inputfile)
mpenergies = [utils.convertor(mpe, 'hartree', 'eV')
for mpe in mpenergies]
self.mpenergies.append(mpenergies)
# Coupled cluster corrections.
# Hopefully we only have to deal with ccman2 here.
if 'CCD total energy' in line:
if not hasattr(self, 'ccenergies'):
self.ccenergies = []
ccdenergy = float(line.split()[-1])
ccdenergy = utils.convertor(ccdenergy, 'hartree', 'eV')
self.ccenergies.append(ccdenergy)
if 'CCSD total energy' in line:
has_triples = False
if not hasattr(self, 'ccenergies'):
self.ccenergies = []
ccsdenergy = float(line.split()[-1])
# Make sure we aren't actually doing CCSD(T).
line = next(inputfile)
line = next(inputfile)
if 'CCSD(T) total energy' in line:
has_triples = True
ccsdtenergy = float(line.split()[-1])
ccsdtenergy = utils.convertor(ccsdtenergy, 'hartree', 'eV')
self.ccenergies.append(ccsdtenergy)
if not has_triples:
ccsdenergy = utils.convertor(ccsdenergy, 'hartree', 'eV')
self.ccenergies.append(ccsdenergy)
# Electronic transitions. Works for both CIS and TDDFT.
if 'Excitation Energies' in line:
# Restricted:
# ---------------------------------------------------
# TDDFT/TDA Excitation Energies
# ---------------------------------------------------
#
# Excited state 1: excitation energy (eV) = 3.6052
# Total energy for state 1: -382.167872200685
# Multiplicity: Triplet
# Trans. Mom.: 0.0000 X 0.0000 Y 0.0000 Z
# Strength : 0.0000
# D( 33) --> V( 3) amplitude = 0.2618
# D( 34) --> V( 2) amplitude = 0.2125
# D( 35) --> V( 1) amplitude = 0.9266
#
# Unrestricted:
# Excited state 2: excitation energy (eV) = 2.3156
# Total energy for state 2: -381.980177630969
# <S**2> : 0.7674
# Trans. Mom.: -2.7680 X -0.1089 Y 0.0000 Z
# Strength : 0.4353
# S( 1) --> V( 1) amplitude = -0.3105 alpha
# D( 34) --> S( 1) amplitude = 0.9322 beta
self.skip_lines(inputfile, ['dashes', 'blank'])
line = next(inputfile)
etenergies = []
etsyms = []
etoscs = []
etsecs = []
spinmap = {'alpha': 0, 'beta': 1}
while list(set(line.strip())) != ['-']:
# Take the total energy for the state and subtract from the
# ground state energy, rather than just the EE;
# this will be more accurate.
if 'Total energy for state' in line:
energy = utils.convertor(float(line.split()[-1]), 'hartree', 'cm-1')
etenergy = energy - utils.convertor(self.scfenergies[-1], 'eV', 'cm-1')
etenergies.append(etenergy)
# if 'excitation energy' in line:
# etenergy = utils.convertor(float(line.split()[-1]), 'eV', 'cm-1')
# etenergies.append(etenergy)
if 'Multiplicity' in line:
etsym = line.split()[1]
etsyms.append(etsym)
if 'Strength' in line:
strength = float(line.split()[-1])
etoscs.append(strength)
# This is the list of transitions.
if 'amplitude' in line:
sec = []
while line.strip() != '':
if self.unrestricted:
spin = spinmap[line[42:47].strip()]
else:
spin = 0
# There is a subtle difference between TDA and RPA calcs,
# because in the latter case each transition line is
# preceeded by the type of vector: X or Y, name excitation
# or deexcitation (see #154 for details). For deexcitations,
# we will need to reverse the MO indices. Note also that MOPAC
# starts reindexing virtual orbitals at 1.
if line[5] == '(':
ttype = 'X'
startidx = int(line[6:9]) - 1
endidx = int(line[17:20]) - 1 + self.nalpha
contrib = float(line[34:41].strip())
else:
assert line[5] == ":"
ttype = line[4]
startidx = int(line[9:12]) - 1
endidx = int(line[20:23]) - 1 + self.nalpha
contrib = float(line[37:44].strip())
start = (startidx, spin)
end = (endidx, spin)
if ttype == 'X':
sec.append([start, end, contrib])
elif ttype == 'Y':
sec.append([end, start, contrib])
else:
raise ValueError('Unknown transition type: %s' % ttype)
line = next(inputfile)
etsecs.append(sec)
line = next(inputfile)
self.set_attribute('etenergies', etenergies)
self.set_attribute('etsyms', etsyms)
self.set_attribute('etoscs', etoscs)
self.set_attribute('etsecs', etsecs)
# Molecular orbital energies and symmetries.
if 'Orbital Energies (a.u.) and Symmetries' in line:
# --------------------------------------------------------------
# Orbital Energies (a.u.) and Symmetries
# --------------------------------------------------------------
#
# Alpha MOs, Restricted
# -- Occupied --
# -10.018 -10.018 -10.008 -10.008 -10.007 -10.007 -10.006 -10.005
# 1 Bu 1 Ag 2 Bu 2 Ag 3 Bu 3 Ag 4 Bu 4 Ag
# -9.992 -9.992 -0.818 -0.755 -0.721 -0.704 -0.670 -0.585
# 5 Ag 5 Bu 6 Ag 6 Bu 7 Ag 7 Bu 8 Bu 8 Ag
# -0.561 -0.532 -0.512 -0.462 -0.439 -0.410 -0.400 -0.397
# 9 Ag 9 Bu 10 Ag 11 Ag 10 Bu 11 Bu 12 Bu 12 Ag
# -0.376 -0.358 -0.349 -0.330 -0.305 -0.295 -0.281 -0.263
# 13 Bu 14 Bu 13 Ag 1 Au 15 Bu 14 Ag 15 Ag 1 Bg
# -0.216 -0.198 -0.160
# 2 Au 2 Bg 3 Bg
# -- Virtual --
# 0.050 0.091 0.116 0.181 0.280 0.319 0.330 0.365
# 3 Au 4 Au 4 Bg 5 Au 5 Bg 16 Ag 16 Bu 17 Bu
# 0.370 0.413 0.416 0.422 0.446 0.469 0.496 0.539
# 17 Ag 18 Bu 18 Ag 19 Bu 19 Ag 20 Bu 20 Ag 21 Ag
# 0.571 0.587 0.610 0.627 0.646 0.693 0.743 0.806
# 21 Bu 22 Ag 22 Bu 23 Bu 23 Ag 24 Ag 24 Bu 25 Ag
# 0.816
# 25 Bu
#
# Beta MOs, Restricted
# -- Occupied --
# -10.018 -10.018 -10.008 -10.008 -10.007 -10.007 -10.006 -10.005
# 1 Bu 1 Ag 2 Bu 2 Ag 3 Bu 3 Ag 4 Bu 4 Ag
# -9.992 -9.992 -0.818 -0.755 -0.721 -0.704 -0.670 -0.585
# 5 Ag 5 Bu 6 Ag 6 Bu 7 Ag 7 Bu 8 Bu 8 Ag
# -0.561 -0.532 -0.512 -0.462 -0.439 -0.410 -0.400 -0.397
# 9 Ag 9 Bu 10 Ag 11 Ag 10 Bu 11 Bu 12 Bu 12 Ag
# -0.376 -0.358 -0.349 -0.330 -0.305 -0.295 -0.281 -0.263
# 13 Bu 14 Bu 13 Ag 1 Au 15 Bu 14 Ag 15 Ag 1 Bg
# -0.216 -0.198 -0.160
# 2 Au 2 Bg 3 Bg
# -- Virtual --
# 0.050 0.091 0.116 0.181 0.280 0.319 0.330 0.365
# 3 Au 4 Au 4 Bg 5 Au 5 Bg 16 Ag 16 Bu 17 Bu
# 0.370 0.413 0.416 0.422 0.446 0.469 0.496 0.539
# 17 Ag 18 Bu 18 Ag 19 Bu 19 Ag 20 Bu 20 Ag 21 Ag
# 0.571 0.587 0.610 0.627 0.646 0.693 0.743 0.806
# 21 Bu 22 Ag 22 Bu 23 Bu 23 Ag 24 Ag 24 Bu 25 Ag
# 0.816
# 25 Bu
# --------------------------------------------------------------
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
# Sometimes MOPAC gets a little confused...
while 'Warning : Irrep of orbital' in line:
line = next(inputfile)
line = next(inputfile)
energies_alpha = []
symbols_alpha = []
if self.unrestricted:
energies_beta = []
symbols_beta = []
line = next(inputfile)
# The end of the block is either a blank line or only dashes.
while not self.re_dashes_and_spaces.search(line):
if 'Occupied' in line or 'Virtual' in line:
# A nice trick to find where the HOMO is.
if 'Virtual' in line:
self.homos = [len(energies_alpha)-1]
line = next(inputfile)
# Parse the energies and symmetries in pairs of lines.
# energies = [utils.convertor(energy, 'hartree', 'eV')
# for energy in map(float, line.split())]
# This convoluted bit handles '*******' when present.
energies = []
energy_line = line.split()
for e in energy_line:
try:
energy = utils.convertor(self.float(e), 'hartree', 'eV')
except ValueError:
energy = numpy.nan
energies.append(energy)
energies_alpha.extend(energies)
line = next(inputfile)
symbols = line.split()[1::2]
symbols_alpha.extend(symbols)
line = next(inputfile)
line = next(inputfile)
# Only look at the second block if doing an unrestricted calculation.
# This might be a problem for ROHF/ROKS.
if self.unrestricted:
assert 'Beta MOs' in line
self.skip_line(inputfile, '-- Occupied --')
line = next(inputfile)
while not self.re_dashes_and_spaces.search(line):
if 'Occupied' in line or 'Virtual' in line:
# This will definitely exist, thanks to the above block.
if 'Virtual' in line:
if len(self.homos) == 1:
self.homos.append(len(energies_beta)-1)
line = next(inputfile)
energies = []
energy_line = line.split()
for e in energy_line:
try:
energy = utils.convertor(self.float(e), 'hartree', 'eV')
except ValueError:
energy = numpy.nan
energies.append(energy)
energies_beta.extend(energies)
line = next(inputfile)
symbols = line.split()[1::2]
symbols_beta.extend(symbols)
line = next(inputfile)
# For now, only keep the last set of MO energies, even though it is
# printed at every step of geometry optimizations and fragment jobs.
self.moenergies = [[]]
self.mosyms = [[]]
self.moenergies[0] = numpy.array(energies_alpha)
self.mosyms[0] = symbols_alpha
if self.unrestricted:
self.moenergies.append([])
self.mosyms.append([])
self.moenergies[1] = numpy.array(energies_beta)
self.mosyms[1] = symbols_beta
self.set_attribute('nmo', len(self.moenergies[0]))
# Molecular orbital energies, no symmetries.
if line.strip() == 'Orbital Energies (a.u.)':
# In the case of no orbital symmetries, the beta spin block is not
# present for restricted calculations.
# --------------------------------------------------------------
# Orbital Energies (a.u.)
# --------------------------------------------------------------
#
# Alpha MOs
# -- Occupied --
# ******* -38.595 -34.580 -34.579 -34.578 -19.372 -19.372 -19.364
# -19.363 -19.362 -19.362 -4.738 -3.252 -3.250 -3.250 -1.379
# -1.371 -1.369 -1.365 -1.364 -1.362 -0.859 -0.855 -0.849
# -0.846 -0.840 -0.836 -0.810 -0.759 -0.732 -0.729 -0.704
# -0.701 -0.621 -0.610 -0.595 -0.587 -0.584 -0.578 -0.411
# -0.403 -0.355 -0.354 -0.352
# -- Virtual --
# -0.201 -0.117 -0.099 -0.086 0.020 0.031 0.055 0.067
# 0.075 0.082 0.086 0.092 0.096 0.105 0.114 0.148
#
# Beta MOs
# -- Occupied --
# ******* -38.561 -34.550 -34.549 -34.549 -19.375 -19.375 -19.367
# -19.367 -19.365 -19.365 -4.605 -3.105 -3.103 -3.102 -1.385
# -1.376 -1.376 -1.371 -1.370 -1.368 -0.863 -0.858 -0.853
# -0.849 -0.843 -0.839 -0.818 -0.765 -0.738 -0.737 -0.706
# -0.702 -0.624 -0.613 -0.600 -0.591 -0.588 -0.585 -0.291
# -0.291 -0.288 -0.275
# -- Virtual --
# -0.139 -0.122 -0.103 0.003 0.014 0.049 0.049 0.059
# 0.061 0.070 0.076 0.081 0.086 0.090 0.098 0.106
# 0.138
# --------------------------------------------------------------
self.skip_lines(inputfile, ['dashes', 'blank'])
line = next(inputfile)
energies_alpha = []
if self.unrestricted:
energies_beta = []
line = next(inputfile)
# The end of the block is either a blank line or only dashes.
while not self.re_dashes_and_spaces.search(line):
if 'Occupied' in line or 'Virtual' in line:
# A nice trick to find where the HOMO is.
if 'Virtual' in line:
self.homos = [len(energies_alpha)-1]
line = next(inputfile)
energies = []
energy_line = line.split()
for e in energy_line:
try:
energy = utils.convertor(self.float(e), 'hartree', 'eV')
except ValueError:
energy = numpy.nan
energies.append(energy)
energies_alpha.extend(energies)
line = next(inputfile)
line = next(inputfile)
# Only look at the second block if doing an unrestricted calculation.
# This might be a problem for ROHF/ROKS.
if self.unrestricted:
assert 'Beta MOs' in line
self.skip_line(inputfile, '-- Occupied --')
line = next(inputfile)
while not self.re_dashes_and_spaces.search(line):
if 'Occupied' in line or 'Virtual' in line:
# This will definitely exist, thanks to the above block.
if 'Virtual' in line:
if len(self.homos) == 1:
self.homos.append(len(energies_beta)-1)
line = next(inputfile)
energies = []
energy_line = line.split()
for e in energy_line:
try:
energy = utils.convertor(self.float(e), 'hartree', 'eV')
except ValueError:
energy = numpy.nan
energies.append(energy)
energies_beta.extend(energies)
line = next(inputfile)
# For now, only keep the last set of MO energies, even though it is
# printed at every step of geometry optimizations and fragment jobs.
self.moenergies = [[]]
self.moenergies[0] = numpy.array(energies_alpha)
if self.unrestricted:
self.moenergies.append([])
self.moenergies[1] = numpy.array(energies_beta)
self.set_attribute('nmo', len(self.moenergies[0]))
# If we've asked to display more virtual orbitals than there
# are MOs present in the molecule, fix that now.
if hasattr(self, 'nmo') and hasattr(self, 'nalpha') and hasattr(self, 'nbeta'):
if self.norbdisp_alpha_aonames > self.nmo:
self.norbdisp_alpha_aonames = self.nmo
if self.norbdisp_beta_aonames > self.nmo:
self.norbdisp_beta_aonames = self.nmo
# Molecular orbital coefficients.
# This block comes from `print_orbitals = true/{int}`. Less
# precision than `scf_final_print >= 2` for `mocoeffs`, but
# important for `aonames` and `atombasis`.
if any(header in line
for header in self.alpha_mo_coefficient_headers):
if not hasattr(self, 'mocoeffs'):
self.mocoeffs = []
if not hasattr(self, 'atombasis'):
self.atombasis = []
for n in range(self.natom):
self.atombasis.append([])
if not hasattr(self, 'aonames'):
self.aonames = []
# We could also attempt to parse `moenergies` here, but
# nothing is gained by it.
mocoeffs = numpy.empty(shape=(self.nbasis, self.norbdisp_alpha_aonames))
self.parse_matrix_aonames(inputfile, mocoeffs)
# Only use these MO coefficients if we don't have them
# from `scf_final_print`.
if len(self.mocoeffs) == 0:
self.mocoeffs.append(mocoeffs.transpose())
# Go back through `aonames` to create `atombasis`.
assert len(self.aonames) == self.nbasis
for aoindex, aoname in enumerate(self.aonames):
atomindex = int(self.re_atomindex.search(aoname).groups()[0]) - 1
self.atombasis[atomindex].append(aoindex)
assert len(self.atombasis) == len(self.atomnos)
if 'BETA MOLECULAR ORBITAL COEFFICIENTS' in line:
mocoeffs = numpy.empty(shape=(self.nbasis, self.norbdisp_beta_aonames))
self.parse_matrix_aonames(inputfile, mocoeffs)
if len(self.mocoeffs) == 1:
self.mocoeffs.append(mocoeffs.transpose())
# Population analysis.
if 'Ground-State Mulliken Net Atomic Charges' in line:
self.parse_charge_section(inputfile, 'mulliken')
if 'Hirshfeld Atomic Charges' in line:
self.parse_charge_section(inputfile, 'hirshfeld')
if 'Ground-State ChElPG Net Atomic Charges' in line:
self.parse_charge_section(inputfile, 'chelpg')
# Multipole moments are not printed in lexicographical order,
# so we need to parse and sort them. The units seem OK, but there
# is some uncertainty about the reference point and whether it
# can be changed.
#
# Notice how the letter/coordinate labels change to coordinate ranks
# after hexadecapole moments, and need to be translated. Additionally,
# after 9-th order moments the ranks are not necessarily single digits
# and so there are spaces between them.
#
# -----------------------------------------------------------------
# Cartesian Multipole Moments
# LMN = < X^L Y^M Z^N >
# -----------------------------------------------------------------
# Charge (ESU x 10^10)
# 0.0000
# Dipole Moment (Debye)
# X 0.0000 Y 0.0000 Z 0.0000
# Tot 0.0000
# Quadrupole Moments (Debye-Ang)
# XX -50.9647 XY -0.1100 YY -50.1441
# XZ 0.0000 YZ 0.0000 ZZ -58.5742
# ...
# 5th-Order Moments (Debye-Ang^4)
# 500 0.0159 410 -0.0010 320 0.0005
# 230 0.0000 140 0.0005 050 0.0012
# ...
# -----------------------------------------------------------------
#
if "Cartesian Multipole Moments" in line:
# This line appears not by default, but only when
# `multipole_order` > 4:
line = inputfile.next()
if 'LMN = < X^L Y^M Z^N >' in line:
line = inputfile.next()
# The reference point is always the origin, although normally the molecule
# is moved so that the center of charge is at the origin.
self.reference = [0.0, 0.0, 0.0]
self.moments = [self.reference]
# Watch out! This charge is in statcoulombs without the exponent!
# We should expect very good agreement, however MOPAC prints
# the charge only with 5 digits, so expect 1e-4 accuracy.
charge_header = inputfile.next()
assert charge_header.split()[0] == "Charge"
charge = float(inputfile.next().strip())
charge = utils.convertor(charge, 'statcoulomb', 'e') * 1e-10
# Allow this to change until fragment jobs are properly implemented.
# assert abs(charge - self.charge) < 1e-4
# This will make sure Debyes are used (not sure if it can be changed).
line = inputfile.next()
assert line.strip() == "Dipole Moment (Debye)"
while "-----" not in line:
# The current multipole element will be gathered here.
multipole = []
line = inputfile.next()
while ("-----" not in line) and ("Moment" not in line):
cols = line.split()
# The total (norm) is printed for dipole but not other multipoles.
if cols[0] == 'Tot':
line = inputfile.next()
continue
# Find and replace any 'stars' with NaN before moving on.
for i in range(len(cols)):
if '***' in cols[i]:
cols[i] = numpy.nan
# The moments come in pairs (label followed by value) up to the 9-th order,
# although above hexadecapoles the labels are digits representing the rank
# in each coordinate. Above the 9-th order, ranks are not always single digits,
# so there are spaces between them, which means moments come in quartets.
if len(self.moments) < 5:
for i in range(len(cols)//2):
lbl = cols[2*i]
m = cols[2*i + 1]
multipole.append([lbl, m])
elif len(self.moments) < 10:
for i in range(len(cols)//2):
lbl = cols[2*i]
lbl = 'X'*int(lbl[0]) + 'Y'*int(lbl[1]) + 'Z'*int(lbl[2])
m = cols[2*i + 1]
multipole.append([lbl, m])
else:
for i in range(len(cols)//4):
lbl = 'X'*int(cols[4*i]) + 'Y'*int(cols[4*i + 1]) + 'Z'*int(cols[4*i + 2])
m = cols[4*i + 3]
multipole.append([lbl, m])
line = inputfile.next()
# Sort should use the first element when sorting lists,
# so this should simply work, and afterwards we just need
# to extract the second element in each list (the actual moment).
multipole.sort()
multipole = [m[1] for m in multipole]
self.moments.append(multipole)
# For `method = force` or geometry optimizations,
# the gradient is printed.
if 'Gradient of SCF Energy' in line:
if not hasattr(self, 'grads'):
self.grads = []
grad = numpy.empty(shape=(3, self.natom))
self.parse_matrix(inputfile, grad)
self.grads.append(grad.T)
# For IR-related jobs, the Hessian is printed (dim: 3*natom, 3*natom).
# Note that this is *not* the mass-weighted Hessian.
if 'Hessian of the SCF Energy' in line:
if not hasattr(self, 'hessian'):
dim = 3*self.natom
self.hessian = numpy.empty(shape=(dim, dim))
self.parse_matrix(inputfile, self.hessian)
# Start of the IR/Raman frequency section.
if 'VIBRATIONAL ANALYSIS' in line:
while 'STANDARD THERMODYNAMIC QUANTITIES' not in line:
## IR, optional Raman:
# **********************************************************************
# ** **
# ** VIBRATIONAL ANALYSIS **
# ** -------------------- **
# ** **
# ** VIBRATIONAL FREQUENCIES (CM**-1) AND NORMAL MODES **
# ** FORCE CONSTANTS (mDYN/ANGSTROM) AND REDUCED MASSES (AMU) **
# ** INFRARED INTENSITIES (KM/MOL) **
##** RAMAN SCATTERING ACTIVITIES (A**4/AMU) AND DEPOLARIZATION RATIOS **
# ** **
# **********************************************************************
# Mode: 1 2 3
# Frequency: -106.88 -102.91 161.77
# Force Cnst: 0.0185 0.0178 0.0380
# Red. Mass: 2.7502 2.8542 2.4660
# IR Active: NO YES YES
# IR Intens: 0.000 0.000 0.419
# Raman Active: YES NO NO
##Raman Intens: 2.048 0.000 0.000
##Depolar: 0.750 0.000 0.000
# X Y Z X Y Z X Y Z
# C 0.000 0.000 -0.100 -0.000 0.000 -0.070 -0.000 -0.000 -0.027
# C 0.000 0.000 0.045 -0.000 0.000 -0.074 0.000 -0.000 -0.109
# C 0.000 0.000 0.148 -0.000 -0.000 -0.074 0.000 0.000 -0.121
# C 0.000 0.000 0.100 -0.000 -0.000 -0.070 0.000 0.000 -0.027
# C 0.000 0.000 -0.045 0.000 -0.000 -0.074 -0.000 -0.000 -0.109
# C 0.000 0.000 -0.148 0.000 0.000 -0.074 -0.000 -0.000 -0.121
# H -0.000 0.000 0.086 -0.000 0.000 -0.082 0.000 -0.000 -0.102
# H 0.000 0.000 0.269 -0.000 -0.000 -0.091 0.000 0.000 -0.118
# H 0.000 0.000 -0.086 0.000 -0.000 -0.082 -0.000 0.000 -0.102
# H -0.000 0.000 -0.269 0.000 0.000 -0.091 -0.000 -0.000 -0.118
# C 0.000 -0.000 0.141 -0.000 -0.000 -0.062 -0.000 0.000 0.193
# C -0.000 -0.000 -0.160 0.000 0.000 0.254 -0.000 0.000 0.043
# H 0.000 -0.000 0.378 -0.000 0.000 -0.289 0.000 0.000 0.519
# H -0.000 -0.000 -0.140 0.000 0.000 0.261 -0.000 -0.000 0.241
# H -0.000 -0.000 -0.422 0.000 0.000 0.499 -0.000 0.000 -0.285
# C 0.000 -0.000 -0.141 0.000 0.000 -0.062 -0.000 -0.000 0.193
# C -0.000 -0.000 0.160 -0.000 -0.000 0.254 0.000 0.000 0.043
# H 0.000 -0.000 -0.378 0.000 -0.000 -0.289 -0.000 0.000 0.519
# H -0.000 -0.000 0.140 -0.000 -0.000 0.261 0.000 0.000 0.241
# H -0.000 -0.000 0.422 -0.000 -0.000 0.499 0.000 0.000 -0.285
# TransDip 0.000 -0.000 -0.000 0.000 -0.000 -0.000 -0.000 0.000 0.021
# Mode: 4 5 6
# ...
# There isn't any symmetry information for normal modes present
# in MOPAC.
# if not hasattr(self, 'vibsyms'):
# self.vibsyms = []
if 'Frequency:' in line:
if not hasattr(self, 'vibfreqs'):
self.vibfreqs = []
vibfreqs = map(float, line.split()[1:])
self.vibfreqs.extend(vibfreqs)
if 'IR Intens:' in line:
if not hasattr(self, 'vibirs'):
self.vibirs = []
vibirs = map(float, line.split()[2:])
self.vibirs.extend(vibirs)
if 'Raman Intens:' in line:
if not hasattr(self, 'vibramans'):
self.vibramans = []
vibramans = map(float, line.split()[2:])
self.vibramans.extend(vibramans)
# This is the start of the displacement block.
if line.split()[0:3] == ['X', 'Y', 'Z']:
if not hasattr(self, 'vibdisps'):
self.vibdisps = []
disps = []
for k in range(self.natom):
line = next(inputfile)
numbers = list(map(float, line.split()[1:]))
N = len(numbers) // 3
if not disps:
for n in range(N):
disps.append([])
for n in range(N):
disps[n].append(numbers[3*n:(3*n)+3])
self.vibdisps.extend(disps)
line = next(inputfile)
# Anharmonic vibrational analysis.
# MOPAC includes 3 theories: VPT2, TOSH, and VCI.
# For now, just take the VPT2 results.
# if 'VIBRATIONAL ANHARMONIC ANALYSIS' in line:
# while list(set(line.strip())) != ['=']:
# if 'VPT2' in line:
# if not hasattr(self, 'vibanharms'):
# self.vibanharms = []
# self.vibanharms.append(float(line.split()[-1]))
# line = next(inputfile)
if 'STANDARD THERMODYNAMIC QUANTITIES AT' in line:
if not hasattr(self, 'temperature'):
self.temperature = float(line.split()[4])
# Not supported yet.
if not hasattr(self, 'pressure'):
self.pressure = float(line.split()[7])
self.skip_lines(inputfile, ['blank', 'Imaginary'])
line = next(inputfile)
# Not supported yet.
if 'Zero point vibrational energy' in line:
if not hasattr(self, 'zpe'):
# Convert from kcal/mol to Hartree/particle.
self.zpe = utils.convertor(float(line.split()[4]),
'kcal', 'hartree')
atommasses = []
while 'Archival summary' not in line:
if 'Has Mass' in line:
atommass = float(line.split()[6])
atommasses.append(atommass)
if 'Total Enthalpy' in line:
if not hasattr(self, 'enthalpy'):
enthalpy = float(line.split()[2])
self.enthalpy = utils.convertor(enthalpy,
'kcal', 'hartree')
if 'Total Entropy' in line:
if not hasattr(self, 'entropy'):
entropy = float(line.split()[2]) * self.temperature / 1000
# This is the *temperature dependent* entropy.
self.entropy = utils.convertor(entropy,
'kcal', 'hartree')
if not hasattr(self, 'freeenergy'):
self.freeenergy = self.enthalpy - self.entropy
line = next(inputfile)
if not hasattr(self, 'atommasses'):
self.atommasses = numpy.array(atommasses)
# TODO:
# 'enthalpy' (incorrect)
# 'entropy' (incorrect)
# 'freeenergy' (incorrect)
# 'nocoeffs'
# 'nooccnos'
# 'vibanharms'
"""
def parse_charge_section(self, inputfile, chargetype):
"""Parse the population analysis charge block."""
"""
self.skip_line(inputfile, 'blank')
line = next(inputfile)
has_spins = False
if 'Spin' in line:
if not hasattr(self, 'atomspins'):
self.atomspins = dict()
has_spins = True
spins = []
self.skip_line(inputfile, 'dashes')
if not hasattr(self, 'atomcharges'):
self.atomcharges = dict()
charges = []
line = next(inputfile)
while list(set(line.strip())) != ['-']:
elements = line.split()
charge = self.float(elements[2])
charges.append(charge)
if has_spins:
spin = self.float(elements[3])
spins.append(spin)
line = next(inputfile)
self.atomcharges[chargetype] = numpy.array(charges)
if has_spins:
self.atomspins[chargetype] = numpy.array(spins)
"""
def parse_matrix(self, inputfile, nparray):
"""MOPAC prints most matrices in a standard format; parse the matrix
into a preallocated NumPy array of the appropriate shape.
"""
"""
nrows, ncols = nparray.shape
line = next(inputfile)
assert len(line.split()) == min(self.ncolsblock, ncols)
colcounter = 0
while colcounter < ncols:
# If the line is just the column header (indices)...
if line[:5].strip() == '':
line = next(inputfile)
rowcounter = 0
while rowcounter < nrows:
row = list(map(float, line.split()[1:]))
assert len(row) == min(self.ncolsblock, (ncols - colcounter))
nparray[rowcounter][colcounter:colcounter + self.ncolsblock] = row
line = next(inputfile)
rowcounter += 1
colcounter += self.ncolsblock
"""
def parse_matrix_aonames(self, inputfile, nparray):
"""MOPAC prints most matrices in a standard format; parse the matrix
into a preallocated NumPy array of the appropriate shape.
Rather than have one routine for parsing all general matrices
and the 'MOLECULAR ORBITAL COEFFICIENTS' block, use a second
which handles `aonames`.
"""
"""
bigmom = ('d', 'f', 'g', 'h')
nrows, ncols = nparray.shape
line = next(inputfile)
assert len(line.split()) == min(self.ncolsblock, ncols)
colcounter = 0
while colcounter < ncols:
# If the line is just the column header (indices)...
if line[:5].strip() == '':
line = next(inputfile)
# Do nothing for now.
if 'eigenvalues' in line:
line = next(inputfile)
rowcounter = 0
while rowcounter < nrows:
row = line.split()
# Only take the AO names on the first time through.
if colcounter == 0:
if len(self.aonames) != self.nbasis:
# Apply the offset for rows where there is
# more than one atom of any element in the
# molecule.
offset = int(self.formula_histogram[row[1]] != 1)
if offset:
name = self.atommap.get(row[1] + str(row[2]))
else:
name = self.atommap.get(row[1] + '1')
# For l > 1, there is a space between l and
# m_l when using spherical functions.
shell = row[2 + offset]
if shell in bigmom:
shell = ''.join([shell, row[3 + offset]])
aoname = ''.join([name, '_', shell.upper()])
self.aonames.append(aoname)
row = list(map(float, row[-min(self.ncolsblock, (ncols - colcounter)):]))
nparray[rowcounter][colcounter:colcounter + self.ncolsblock] = row
line = next(inputfile)
rowcounter += 1
colcounter += self.ncolsblock
"""
def generate_atom_map(self):
"""Generate the map to go from MOPAC atom numbering:
'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H1', 'H2', 'H3', 'H4', 'C7', ...
to cclib atom numbering:
'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H7', 'H8', 'H9', 'H10', 'C11', ...
for later use.
"""
"""
# Generate the desired order.
order_proper = [element + str(num)
for element, num in zip(self.atomelements,
itertools.count(start=1))]
# We need separate counters for each element.
element_counters = {element: itertools.count(start=1)
for element in set(self.atomelements)}
# Generate the MOPAC printed order.
order_mopac = [element + str(next(element_counters[element]))
for element in self.atomelements]
# Combine the orders into a mapping.
atommap = {k:v for k, v, in zip(order_mopac, order_proper)}
return atommap
"""
def generate_formula_histogram(self):
"""From the atomnos, generate a histogram that represents the
molecular formula.
"""
"""
histogram = dict()
for element in self.atomelements:
if element in histogram.keys():
histogram[element] += 1
else:
histogram[element] = 1
return histogram
"""
if __name__ == '__main__':
import sys
import doctest, mopacparser
if len(sys.argv) == 1:
doctest.testmod(mopacparser, verbose=False)
if len(sys.argv) == 2:
parser = mopacparser.MOPAC(sys.argv[1])
data = parser.parse()
if len(sys.argv) > 2:
for i in range(len(sys.argv[2:])):
if hasattr(data, sys.argv[2 + i]):
print(getattr(data, sys.argv[2 + i]))
| ben-albrecht/cclib | cclib/parser/mopacparser.py | Python | lgpl-2.1 | 61,886 | [
"Gaussian",
"MOPAC",
"cclib"
] | c6194dbc264a91c08397309033d4d1c536c8cd09c70cba35edfdf0310a85685b |
""" DIRAC Workload Management System Client class encapsulates all the
methods necessary to communicate with the Workload Management System
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six import StringIO
import time
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities import File
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.Core.Utilities.DErrno import EWMSJDL, EWMSSUBM
from DIRAC.WorkloadManagementSystem.Client.JobManagerClient import JobManagerClient
from DIRAC.WorkloadManagementSystem.Client.SandboxStoreClient import SandboxStoreClient
from DIRAC.WorkloadManagementSystem.Utilities.ParametricJob import getParameterVectorLength
__RCSID__ = "$Id$"
class WMSClient(object):
""" Class exposing the following jobs methods:
submit
kill
delete
remove
reschedule
reset
"""
def __init__(self, jobManagerClient=None, sbRPCClient=None, sbTransferClient=None,
useCertificates=False, timeout=600, delegatedDN=None, delegatedGroup=None):
""" WMS Client constructor
Here we also initialize the needed clients and connections
"""
self.useCertificates = useCertificates
self.delegatedDN = delegatedDN
self.delegatedGroup = delegatedGroup
self.timeout = timeout
self._jobManager = jobManagerClient
self.operationsHelper = Operations()
self.sandboxClient = None
if sbRPCClient and sbTransferClient:
self.sandboxClient = SandboxStoreClient(rpcClient=sbRPCClient,
transferClient=sbTransferClient,
useCertificates=useCertificates)
@property
def jobManager(self):
if not self._jobManager:
self._jobManager = JobManagerClient(
useCertificates=self.useCertificates,
delegatedDN=self.delegatedDN,
delegatedGroup=self.delegatedGroup,
timeout=self.timeout)
return self._jobManager
def __getInputSandboxEntries(self, classAdJob):
if classAdJob.lookupAttribute("InputSandbox"):
inputSandbox = classAdJob.get_expression("InputSandbox")
inputSandbox = inputSandbox.replace('","', "\n")
inputSandbox = inputSandbox.replace('{', "")
inputSandbox = inputSandbox.replace('}', "")
inputSandbox = inputSandbox.replace('"', "")
inputSandbox = inputSandbox.replace(',', "")
inputSandbox = inputSandbox.split()
else:
inputSandbox = []
return inputSandbox
def __uploadInputSandbox(self, classAdJob, jobDescriptionObject=None):
"""Checks the validity of the job Input Sandbox.
The function returns the list of Input Sandbox files.
The total volume of the input sandbox is evaluated
"""
inputSandbox = self.__getInputSandboxEntries(classAdJob)
realFiles = []
badFiles = []
diskFiles = []
for isFile in inputSandbox:
if not isFile.startswith(('lfn:', 'LFN:', 'SB:', '%s', '%(')):
realFiles.append(isFile)
stringIOFiles = []
stringIOFilesSize = 0
if jobDescriptionObject is not None:
if isinstance(jobDescriptionObject, StringIO):
stringIOFiles = [jobDescriptionObject]
stringIOFilesSize = len(jobDescriptionObject.getvalue())
gLogger.debug("Size of the stringIOFiles: " + str(stringIOFilesSize))
else:
return S_ERROR(EWMSJDL, "jobDescriptionObject is not a StringIO object")
# Check real files
for isFile in realFiles:
if not os.path.exists(isFile): # we are passing in real files, we expect them to be on disk
badFiles.append(isFile)
gLogger.warn("inputSandbox file/directory " + isFile + " not found. Keep looking for the others")
continue
diskFiles.append(isFile)
diskFilesSize = File.getGlobbedTotalSize(diskFiles)
gLogger.debug("Size of the diskFiles: " + str(diskFilesSize))
totalSize = diskFilesSize + stringIOFilesSize
gLogger.verbose("Total size of the inputSandbox: " + str(totalSize))
okFiles = stringIOFiles + diskFiles
if badFiles:
result = S_ERROR(EWMSJDL, 'Input Sandbox is not valid')
result['BadFile'] = badFiles
result['TotalSize'] = totalSize
return result
if okFiles:
if not self.sandboxClient:
self.sandboxClient = SandboxStoreClient(useCertificates=self.useCertificates,
delegatedDN=self.delegatedDN,
delegatedGroup=self.delegatedGroup)
result = self.sandboxClient.uploadFilesAsSandbox(okFiles)
if not result['OK']:
return result
inputSandbox.append(result['Value'])
classAdJob.insertAttributeVectorString("InputSandbox", inputSandbox)
return S_OK()
def submitJob(self, jdl, jobDescriptionObject=None):
""" Submit one job specified by its JDL to WMS.
The JDL may actually be the desciption of a parametric job,
resulting in multiple DIRAC jobs submitted to the DIRAC WMS
"""
if os.path.exists(jdl):
with open(jdl, "r") as fic:
jdlString = fic.read()
else:
# If file JDL does not exist, assume that the JDL is passed as a string
jdlString = jdl
jdlString = jdlString.strip()
gLogger.debug("Submitting JDL", jdlString)
# Strip of comments in the jdl string
newJdlList = []
for line in jdlString.split('\n'):
if not line.strip().startswith('#'):
newJdlList.append(line)
jdlString = '\n'.join(newJdlList)
# Check the validity of the input JDL
if jdlString.find("[") != 0:
jdlString = "[%s]" % jdlString
classAdJob = ClassAd(jdlString)
if not classAdJob.isOK():
return S_ERROR(EWMSJDL, 'Invalid job JDL')
# Check the size and the contents of the input sandbox
result = self.__uploadInputSandbox(classAdJob, jobDescriptionObject)
if not result['OK']:
return result
# Submit the job now and get the new job ID
result = getParameterVectorLength(classAdJob)
if not result['OK']:
return result
nJobs = result['Value']
result = self.jobManager.submitJob(classAdJob.asJDL())
if nJobs:
gLogger.debug('Applying transactional job submission')
# The server applies transactional bulk submission, we should confirm the jobs
if result['OK']:
jobIDList = result['Value']
if len(jobIDList) == nJobs:
# Confirm the submitted jobs
confirmed = False
for _attempt in range(3):
result = self.jobManager.confirmBulkSubmission(jobIDList)
if result['OK']:
confirmed = True
break
time.sleep(1)
if not confirmed:
# The bulk submission failed, try to delete the created jobs
resultDelete = self.jobManager.deleteJob(jobIDList)
error = "Job submission failed to confirm bulk transaction"
if not resultDelete['OK']:
error += "; removal of created jobs failed"
return S_ERROR(EWMSSUBM, error)
else:
return S_ERROR(EWMSSUBM, "The number of submitted jobs does not match job description")
if result.get('requireProxyUpload'):
gLogger.warn("Need to upload the proxy")
return result
def killJob(self, jobID):
""" Kill running job.
jobID can be an integer representing a single DIRAC job ID or a list of IDs
"""
return self.jobManager.killJob(jobID)
def deleteJob(self, jobID):
""" Delete job(s) (set their status to DELETED) from the WMS Job database.
jobID can be an integer representing a single DIRAC job ID or a list of IDs
"""
return self.jobManager.deleteJob(jobID)
def removeJob(self, jobID):
""" Fully remove job(s) from the WMS Job database.
jobID can be an integer representing a single DIRAC job ID or a list of IDs
"""
return self.jobManager.removeJob(jobID)
def rescheduleJob(self, jobID):
""" Reschedule job(s) in WMS Job database.
jobID can be an integer representing a single DIRAC job ID or a list of IDs
"""
return self.jobManager.rescheduleJob(jobID)
def resetJob(self, jobID):
""" Reset job(s) in WMS Job database.
jobID can be an integer representing a single DIRAC job ID or a list of IDs
"""
return self.jobManager.resetJob(jobID)
| yujikato/DIRAC | src/DIRAC/WorkloadManagementSystem/Client/WMSClient.py | Python | gpl-3.0 | 8,555 | [
"DIRAC"
] | 02b3f6db07a42e02a54044caa0a7d6845ccc2562a648205c6cadd8fbb8b56f49 |
import py
import pytest
from pkg_resources import parse_version
from xdist.looponfail import RemoteControl
from xdist.looponfail import StatRecorder
class TestStatRecorder:
def test_filechange(self, tmpdir):
tmp = tmpdir
hello = tmp.ensure("hello.py")
sd = StatRecorder([tmp])
changed = sd.check()
assert not changed
hello.write("world")
changed = sd.check()
assert changed
(hello + "c").write("hello")
changed = sd.check()
assert not changed
p = tmp.ensure("new.py")
changed = sd.check()
assert changed
p.remove()
changed = sd.check()
assert changed
tmp.join("a", "b", "c.py").ensure()
changed = sd.check()
assert changed
tmp.join("a", "c.txt").ensure()
changed = sd.check()
assert changed
changed = sd.check()
assert not changed
tmp.join("a").remove()
changed = sd.check()
assert changed
def test_dirchange(self, tmpdir):
tmp = tmpdir
tmp.ensure("dir", "hello.py")
sd = StatRecorder([tmp])
assert not sd.fil(tmp.join("dir"))
def test_filechange_deletion_race(self, tmpdir, monkeypatch):
tmp = tmpdir
sd = StatRecorder([tmp])
changed = sd.check()
assert not changed
p = tmp.ensure("new.py")
changed = sd.check()
assert changed
p.remove()
# make check()'s visit() call return our just removed
# path as if we were in a race condition
monkeypatch.setattr(tmp, "visit", lambda *args: [p])
changed = sd.check()
assert changed
def test_pycremoval(self, tmpdir):
tmp = tmpdir
hello = tmp.ensure("hello.py")
sd = StatRecorder([tmp])
changed = sd.check()
assert not changed
pycfile = hello + "c"
pycfile.ensure()
hello.write("world")
changed = sd.check()
assert changed
assert not pycfile.check()
def test_waitonchange(self, tmpdir, monkeypatch):
tmp = tmpdir
sd = StatRecorder([tmp])
ret_values = [True, False]
monkeypatch.setattr(StatRecorder, "check", lambda self: ret_values.pop())
sd.waitonchange(checkinterval=0.2)
assert not ret_values
class TestRemoteControl:
def test_nofailures(self, testdir):
item = testdir.getitem("def test_func(): pass\n")
control = RemoteControl(item.config)
control.setup()
topdir, failures = control.runsession()[:2]
assert not failures
def test_failures_somewhere(self, testdir):
item = testdir.getitem("def test_func():\n assert 0\n")
control = RemoteControl(item.config)
control.setup()
failures = control.runsession()
assert failures
control.setup()
item.fspath.write("def test_func():\n assert 1\n")
removepyc(item.fspath)
topdir, failures = control.runsession()[:2]
assert not failures
def test_failure_change(self, testdir):
modcol = testdir.getitem(
"""
def test_func():
assert 0
"""
)
control = RemoteControl(modcol.config)
control.loop_once()
assert control.failures
modcol.fspath.write(
py.code.Source(
"""
def test_func():
assert 1
def test_new():
assert 0
"""
)
)
removepyc(modcol.fspath)
control.loop_once()
assert not control.failures
control.loop_once()
assert control.failures
assert str(control.failures).find("test_new") != -1
def test_failure_subdir_no_init(self, testdir):
modcol = testdir.getitem(
"""
def test_func():
assert 0
"""
)
parent = modcol.fspath.dirpath().dirpath()
parent.chdir()
modcol.config.args = [
py.path.local(x).relto(parent) for x in modcol.config.args
]
control = RemoteControl(modcol.config)
control.loop_once()
assert control.failures
control.loop_once()
assert control.failures
class TestLooponFailing:
def test_looponfail_from_fail_to_ok(self, testdir):
modcol = testdir.getmodulecol(
"""
def test_one():
x = 0
assert x == 1
def test_two():
assert 1
"""
)
remotecontrol = RemoteControl(modcol.config)
remotecontrol.loop_once()
assert len(remotecontrol.failures) == 1
modcol.fspath.write(
py.code.Source(
"""
def test_one():
assert 1
def test_two():
assert 1
"""
)
)
removepyc(modcol.fspath)
remotecontrol.loop_once()
assert not remotecontrol.failures
def test_looponfail_from_one_to_two_tests(self, testdir):
modcol = testdir.getmodulecol(
"""
def test_one():
assert 0
"""
)
remotecontrol = RemoteControl(modcol.config)
remotecontrol.loop_once()
assert len(remotecontrol.failures) == 1
assert "test_one" in remotecontrol.failures[0]
modcol.fspath.write(
py.code.Source(
"""
def test_one():
assert 1 # passes now
def test_two():
assert 0 # new and fails
"""
)
)
removepyc(modcol.fspath)
remotecontrol.loop_once()
assert len(remotecontrol.failures) == 0
remotecontrol.loop_once()
assert len(remotecontrol.failures) == 1
assert "test_one" not in remotecontrol.failures[0]
assert "test_two" in remotecontrol.failures[0]
@pytest.mark.xfail(
parse_version(pytest.__version__) >= parse_version("3.1"),
reason="broken by pytest 3.1+",
strict=True,
)
def test_looponfail_removed_test(self, testdir):
modcol = testdir.getmodulecol(
"""
def test_one():
assert 0
def test_two():
assert 0
"""
)
remotecontrol = RemoteControl(modcol.config)
remotecontrol.loop_once()
assert len(remotecontrol.failures) == 2
modcol.fspath.write(
py.code.Source(
"""
def test_xxx(): # renamed test
assert 0
def test_two():
assert 1 # pass now
"""
)
)
removepyc(modcol.fspath)
remotecontrol.loop_once()
assert len(remotecontrol.failures) == 0
remotecontrol.loop_once()
assert len(remotecontrol.failures) == 1
def test_looponfail_multiple_errors(self, testdir, monkeypatch):
modcol = testdir.getmodulecol(
"""
def test_one():
assert 0
"""
)
remotecontrol = RemoteControl(modcol.config)
orig_runsession = remotecontrol.runsession
def runsession_dups():
# twisted.trial test cases may report multiple errors.
failures, reports, collection_failed = orig_runsession()
print(failures)
return failures * 2, reports, collection_failed
monkeypatch.setattr(remotecontrol, "runsession", runsession_dups)
remotecontrol.loop_once()
assert len(remotecontrol.failures) == 1
class TestFunctional:
def test_fail_to_ok(self, testdir):
p = testdir.makepyfile(
"""
def test_one():
x = 0
assert x == 1
"""
)
# p = testdir.mkdir("sub").join(p1.basename)
# p1.move(p)
child = testdir.spawn_pytest("-f %s --traceconfig" % p)
child.expect("def test_one")
child.expect("x == 1")
child.expect("1 failed")
child.expect("### LOOPONFAILING ####")
child.expect("waiting for changes")
p.write(
py.code.Source(
"""
def test_one():
x = 1
assert x == 1
"""
)
)
child.expect(".*1 passed.*")
child.kill(15)
def test_xfail_passes(self, testdir):
p = testdir.makepyfile(
"""
import py
@py.test.mark.xfail
def test_one():
pass
"""
)
child = testdir.spawn_pytest("-f %s" % p)
child.expect("1 xpass")
# child.expect("### LOOPONFAILING ####")
child.expect("waiting for changes")
child.kill(15)
def removepyc(path):
# XXX damn those pyc files
pyc = path + "c"
if pyc.check():
pyc.remove()
c = path.dirpath("__pycache__")
if c.check():
c.remove()
| RonnyPfannschmidt/pytest-xdist | testing/test_looponfail.py | Python | mit | 9,111 | [
"VisIt"
] | 3ec204d12650e563a3f04b155504aadcfdeec62ab354231872b32b9cdaa3dd2f |
# Checks all psi4 relevant files for proper boilerplate GNU license.
# This is sold as is with no warrenty-- probably should double check everything
# after running. I am not responsible if you break Psi4.
#
# Do not forget to do share/plugins by hand!
import os
# File type we know how to handle
ftypes = ['cc', 'h', 'py']
c_header ="""/*
* @BEGIN LICENSE
*
* Psi4: an open-source quantum chemistry software package
*
* Copyright (c) 2007-2017 The Psi4 Developers.
*
* The copyrights for code used from other parties are included in
* the corresponding files.
*
* This file is part of Psi4.
*
* Psi4 is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, version 3.
*
* Psi4 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License along
* with Psi4; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* @END LICENSE
*/"""
py_header = c_header.replace(' */', '#')
py_header = py_header.replace('/*', '#')
py_header = py_header.replace(' *', '#')
c_header = c_header.splitlines()
py_header = py_header.splitlines()
def check_header(infile):
f = open(infile, 'r+')
data = f.read().splitlines()
# Find the header location
max_lines = 30
try:
symbol = None
if filename.split('.')[-1] in ['py']:
start = data.index("# @BEGIN LICENSE") - 1
end = data.index("# @END LICENSE") + 1
if data[start] != '#' or data[end] != '#':
f.close()
print('Did not find "wings" of license block in file %s' % infile)
return
else:
start = data.index(" * @BEGIN LICENSE") - 1
end = data.index(" * @END LICENSE") + 1
if data[start] != '/*' or data[end] != ' */':
f.close()
print('Did not find "wings" of license block in file %s' % infile)
return
except:
print('Could not find license block in file %s' % infile)
f.close()
return
# Make sure the block actually looks like a license
license = data[start:end+1]
top = any("PSI4:" in x.upper() for x in license[:5])
bot = any("51 Franklin Street" in x for x in license[5:])
if not (top and bot):
print('Did not understand infile %s' % infile)
f.close()
return
# Replace license
if filename.split('.')[-1] in ['cc', 'h']:
data[start:end + 1] = c_header
elif filename.split('.')[-1] in ['py']:
data[start:end + 1] = py_header
else:
print('Did not understand infile end: %s' % infile)
f.close()
return
# Write it out
f.seek(0)
f.write("\n".join(data))
f.truncate()
f.close()
avoid_strings = ['qcdb', 'libJKFactory']
walk = list(os.walk('../../src/'))
walk += list(os.walk('../python'))
for root, dirnames, filenames in walk:
if any(x in root for x in avoid_strings):
continue
for filename in filenames:
if filename.split('.')[-1] not in ftypes:
continue
check_header(root + '/' + filename)
| rmcgibbo/psi4public | psi4/share/psi4/scripts/apply_license.py | Python | lgpl-3.0 | 3,497 | [
"Psi4"
] | 85aa70ca94899e5d7e110492f564dd53c771983ee88e79493b4762a8517cdff1 |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Simulate two reacting monomers which are bonded via a harmonic potential.
The script aborts as soon as the abortion criterion in the Wang-Landau
algorithm is met: the Wang-Landau simulation runs until the Wang-Landau
potential has converged and then raises a Warning that it has converged,
effectively aborting the simulation.
With the setup of the Wang-Landau algorithm in this script, you sample the
density of states of a three-dimensional reacting harmonic oscillator as
a function of the two collective variables 1) degree of association and
2) potential energy.
The recorded Wang-Landau potential (which is updated during the simulation)
is written to the file :file:`WL_potential_out.dat`.
In this simulation setup the Wang-Landau potential is the density of states.
You can view the converged Wang-Landau potential e.g. via plotting with
gnuplot: ``splot "WL_potential_out.dat"``. As expected the three-dimensional
harmonic oscillator has a density of states which goes like
:math:`\\sqrt{E_{\\text{pot}}}`.
For a scientific description and different ways to use the algorithm please
consult https://pubs.acs.org/doi/full/10.1021/acs.jctc.6b00791
"""
import numpy as np
import espressomd
from espressomd import reaction_ensemble
from espressomd.interactions import HarmonicBond
# System parameters
#############################################################
box_l = 6 * np.sqrt(2)
# Integration parameters
#############################################################
system = espressomd.System(box_l=[box_l] * 3)
np.random.seed(seed=42)
system.time_step = 0.02
system.cell_system.skin = 0.4
#############################################################
# Setup System #
#############################################################
# Particle setup
#############################################################
# type 0 = HA
# type 1 = A-
# type 2 = H+
N0 = 1 # number of titratable units
K_diss = 0.0088
system.part.add(pos=[0, 0, 0] * system.box_l, type=3)
system.part.add(pos=[1.0, 1.0, 1.0] * system.box_l / 2.0, type=1)
system.part.add(pos=np.random.random() * system.box_l, type=2)
system.part.add(pos=np.random.random() * system.box_l, type=2)
# create a harmonic bond between the two reacting particles => the
# potential energy is quadratic in the elongation of the bond and
# therefore the density of states is known as the one of the harmonic
# oscillator
h = HarmonicBond(r_0=0, k=1)
system.bonded_inter[0] = h
system.part[0].add_bond((h, 1))
RE = reaction_ensemble.WangLandauReactionEnsemble(
temperature=1, exclusion_radius=0, seed=77)
RE.add_reaction(gamma=K_diss, reactant_types=[0], reactant_coefficients=[1],
product_types=[1, 2], product_coefficients=[1, 1],
default_charges={0: 0, 1: -1, 2: +1})
print(RE.get_status())
system.setup_type_map([0, 1, 2, 3])
# initialize wang_landau
# generate preliminary_energy_run_results here, this should be done in a
# separate simulation without energy reweighting using the update energy
# functions
np.savetxt("energy_boundaries.dat", np.c_[[0, 1], [0, 0], [9, 9]],
header="nbar E_min E_max")
RE.add_collective_variable_degree_of_association(
associated_type=0, min=0, max=1, corresponding_acid_types=[0, 1])
RE.add_collective_variable_potential_energy(
filename="energy_boundaries.dat", delta=0.05)
RE.set_wang_landau_parameters(
final_wang_landau_parameter=1e-3,
do_not_sample_reaction_partition_function=True,
full_path_to_output_filename="WL_potential_out.dat")
i = 0
while True:
RE.reaction()
RE.displacement_mc_move_for_particles_of_type(3)
| fweik/espresso | samples/wang_landau_reaction_ensemble.py | Python | gpl-3.0 | 4,376 | [
"ESPResSo"
] | 4802e9866178ddbb956e72f82072cd86d7b5abf4050bfc186ef593593749da2b |
#!/usr/bin/python
# ---------------------------------------------------------------------
# ___ ___ _ ____
# / _ \/ _ \(_) __/__ __ __
# / , _/ ___/ /\ \/ _ \/ // /
# /_/|_/_/ /_/___/ .__/\_, /
# /_/ /___/
#
# bh1750.py
# Read data from a BH1750 digital light sensor.
#
# Author : Matt Hawkins
# Date : 26/06/2018
#
# For more information please visit :
# https://www.raspberrypi-spy.co.uk/?s=bh1750
#
# ---------------------------------------------------------------------
try:
import smbus
except:
smbus = None
import time
from threading import Lock
bh_lock = Lock()
class BH1750:
DEVICE = 0x23 # Default device I2C address
POWER_DOWN = 0x00 # No active state
POWER_ON = 0x01 # Power on
RESET = 0x07 # Reset data register value
# Start measurement at 4lx resolution. Time typically 16ms.
CONTINUOUS_LOW_RES_MODE = 0x13
# Start measurement at 1lx resolution. Time typically 120ms
CONTINUOUS_HIGH_RES_MODE_1 = 0x10
# Start measurement at 0.5lx resolution. Time typically 120ms
CONTINUOUS_HIGH_RES_MODE_2 = 0x11
# Start measurement at 1lx resolution. Time typically 120ms
# Device is automatically set to Power Down after measurement.
ONE_TIME_HIGH_RES_MODE_1 = 0x20
# Start measurement at 0.5lx resolution. Time typically 120ms
# Device is automatically set to Power Down after measurement.
ONE_TIME_HIGH_RES_MODE_2 = 0x21
# Start measurement at 1lx resolution. Time typically 120ms
# Device is automatically set to Power Down after measurement.
ONE_TIME_LOW_RES_MODE = 0x23
if smbus:
# bus = smbus.SMBus(0) # Rev 1 Pi uses 0
bus = smbus.SMBus(1) # Rev 2 Pi uses 1
else:
bus = None
def __init__(self, **kwargs):
pass
def convertToNumber(self, data):
# Simple function to convert 2 bytes of data
# into a decimal number. Optional parameter 'decimals'
# will round to specified number of decimal places.
if data:
result = (data[1] + (256 * data[0])) / 1.2
else:
result = 0
return result
def readLight(self, addr=DEVICE):
# Read data from I2C interface
bh_lock.acquire()
if self.bus:
data = self.bus.read_i2c_block_data(addr, BH1750.ONE_TIME_HIGH_RES_MODE_1)
else:
data = None
bh_lock.release()
return self.convertToNumber(data)
def main():
bh = BH1750()
while True:
lightLevel = bh.readLight()
print("Light Level : " + format(lightLevel, '.2f') + " lx")
time.sleep(0.5)
if __name__ == "__main__":
main()
| ThomasHangstoerfer/pyHomeCtrl | bh1750.py | Python | apache-2.0 | 2,676 | [
"VisIt"
] | 2de8db3fcfb92def9d2415ef5f1612c1846ac311b895db4653098280d68a55f0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------------------------------------------------
# INFO:
# -----------------------------------------------------------------------------------------------------------------------
"""
Author: Evan Hubinger
License: Apache 2.0
Description: Installer for the Coconut Jupyter kernel.
"""
# -----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
# -----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
from coconut.root import * # NOQA
import sys
import os
import shutil
import json
import traceback
import time
import ast
from zlib import crc32
from warnings import warn
from types import MethodType
from coconut.constants import (
fixpath,
default_encoding,
icoconut_custom_kernel_dir,
icoconut_custom_kernel_install_loc,
icoconut_custom_kernel_file_loc,
WINDOWS,
reserved_prefix,
)
# -----------------------------------------------------------------------------------------------------------------------
# UTILITIES:
# -----------------------------------------------------------------------------------------------------------------------
def printerr(*args, **kwargs):
"""Prints to standard error."""
print(*args, file=sys.stderr, **kwargs)
def univ_open(filename, opentype="r+", encoding=None, **kwargs):
"""Open a file using default_encoding."""
if encoding is None:
encoding = default_encoding
if "b" not in opentype:
kwargs["encoding"] = encoding
# we use io.open from coconut.root here
return open(filename, opentype, **kwargs)
def checksum(data):
"""Compute a checksum of the given data.
Used for computing __coconut_hash__."""
return crc32(data) & 0xffffffff # necessary for cross-compatibility
def get_clock_time():
"""Get a time to use for performance metrics."""
if PY2:
return time.clock()
else:
return time.process_time()
class override(object):
"""Implementation of Coconut's @override for use within Coconut."""
__slots__ = ("func",)
# from _coconut_base_hashable
def __reduce_ex__(self, _):
return self.__reduce__()
def __eq__(self, other):
return self.__class__ is other.__class__ and self.__reduce__() == other.__reduce__()
def __hash__(self):
return hash(self.__reduce__())
# from override
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype=None):
if obj is None:
return self.func
if PY2:
return MethodType(self.func, obj, objtype)
else:
return MethodType(self.func, obj)
def __set_name__(self, obj, name):
if not hasattr(super(obj, obj), name):
raise RuntimeError(obj.__name__ + "." + name + " marked with @override but not overriding anything")
def __reduce__(self):
return (self.__class__, (self.func,))
# -----------------------------------------------------------------------------------------------------------------------
# VERSIONING:
# -----------------------------------------------------------------------------------------------------------------------
def ver_tuple_to_str(req_ver):
"""Converts a requirement version tuple into a version string."""
return ".".join(str(x) for x in req_ver)
def ver_str_to_tuple(ver_str):
"""Convert a version string into a version tuple."""
out = []
for x in ver_str.split("."):
try:
x = int(x)
except ValueError:
pass
out.append(x)
return tuple(out)
def get_next_version(req_ver, point_to_increment=-1):
"""Get the next version after the given version."""
return req_ver[:point_to_increment] + (req_ver[point_to_increment] + 1,)
# -----------------------------------------------------------------------------------------------------------------------
# JUPYTER KERNEL INSTALL:
# -----------------------------------------------------------------------------------------------------------------------
def get_kernel_data_files(argv):
"""Given sys.argv, write the custom kernel file and return data_files."""
if any(arg.startswith("bdist") for arg in argv):
executable = "python"
elif any(arg.startswith("install") for arg in argv):
executable = sys.executable
else:
return []
install_custom_kernel(executable)
return [
(
icoconut_custom_kernel_install_loc,
[icoconut_custom_kernel_file_loc],
),
]
def install_custom_kernel(executable=None, logger=None):
"""Force install the custom kernel."""
kernel_source = os.path.join(icoconut_custom_kernel_dir, "kernel.json")
kernel_dest = fixpath(os.path.join(sys.exec_prefix, icoconut_custom_kernel_install_loc))
try:
make_custom_kernel(executable)
if not os.path.exists(kernel_dest):
os.makedirs(kernel_dest)
shutil.copy(kernel_source, kernel_dest)
except OSError:
existing_kernel = os.path.join(kernel_dest, "kernel.json")
if os.path.exists(existing_kernel):
if logger is not None:
logger.log_exc()
errmsg = "Failed to update Coconut Jupyter kernel installation; the 'coconut' kernel might not work properly as a result"
else:
if logger is None:
traceback.print_exc()
else:
logger.print_exc()
errmsg = "Coconut Jupyter kernel installation failed due to above error"
if WINDOWS:
errmsg += " (try again from a shell that is run as administrator)"
else:
errmsg += " (try again with 'sudo')"
errmsg += "."
if logger is None:
warn(errmsg)
else:
logger.warn(errmsg)
return None
else:
return kernel_dest
def make_custom_kernel(executable=None):
"""Write custom kernel file and return its directory."""
if executable is None:
executable = sys.executable
kernel_dict = {
"argv": [executable, "-m", "coconut.icoconut", "-f", "{connection_file}"],
"display_name": "Coconut",
"language": "coconut",
}
if os.path.exists(icoconut_custom_kernel_dir):
shutil.rmtree(icoconut_custom_kernel_dir)
os.mkdir(icoconut_custom_kernel_dir)
with univ_open(os.path.join(icoconut_custom_kernel_dir, "kernel.json"), "wb") as kernel_file:
raw_json = json.dumps(kernel_dict, indent=1)
kernel_file.write(raw_json.encode(encoding=default_encoding))
return icoconut_custom_kernel_dir
# -----------------------------------------------------------------------------------------------------------------------
# PYTEST:
# -----------------------------------------------------------------------------------------------------------------------
class FixPytestNames(ast.NodeTransformer):
"""Renames invalid names added by pytest assert rewriting."""
def fix_name(self, name):
"""Make the given pytest name a valid but non-colliding identifier."""
return name.replace("@", reserved_prefix + "_pytest_")
def visit_Name(self, node):
"""Special method to visit ast.Names."""
node.id = self.fix_name(node.id)
return node
def visit_alias(self, node):
"""Special method to visit ast.aliases."""
node.asname = self.fix_name(node.asname)
return node
def pytest_rewrite_asserts(code, module_name=reserved_prefix + "_pytest_module"):
"""Uses pytest to rewrite the assert statements in the given code."""
from _pytest.assertion.rewrite import rewrite_asserts # hidden since it's not always available
module_name = module_name.encode("utf-8")
tree = ast.parse(code)
rewrite_asserts(tree, module_name)
fixed_tree = ast.fix_missing_locations(FixPytestNames().visit(tree))
return ast.unparse(fixed_tree)
| evhub/coconut | coconut/util.py | Python | apache-2.0 | 8,214 | [
"VisIt"
] | a818fb1c0010e74b14abb9dfcbea036a831a7bd5511e7d59a66d83993157d7f9 |
#!/usr/bin/env python
import sys
from glob import glob
from distutils import log
from distutils.cmd import Command
import numpy as np
# monkey-patch numpy distutils to use Cython instead of Pyrex
from build_helpers import package_check, INFO_VARS
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('cviewer')
return config
################################################################################
# Dependency check
################################################################################
def _mayavi_version(pkg_name):
from mayavi import version
return version.version
def _traits_version(pkg_name):
from traits import version
return version.__version__
# Check for core dependencies
package_check('numpy', INFO_VARS['numpy_min_version'])
################################################################################
################################################################################
# For some commands, use setuptools
if len(set(('develop', 'bdist_egg', 'bdist_rpm', 'bdist', 'bdist_dumb',
'bdist_wininst', 'install_egg_info', 'egg_info', 'easy_install',
)).intersection(sys.argv)) > 0:
from setup_egg import extra_setuptools_args
package_check('networkx', INFO_VARS['networkx_min_version'])
package_check('mayavi', INFO_VARS['mayavi_min_version'],version_getter=_mayavi_version)
package_check('traits', INFO_VARS['traits_min_version'],version_getter=_traits_version)
# extra_setuptools_args can be defined from the line above, but it can
# also be defined here because setup.py has been exec'ed from
# setup_egg.py.
if not 'extra_setuptools_args' in globals():
extra_setuptools_args = dict()
def main(**extra_args):
from numpy.distutils.core import setup
setup(
name = 'Connectome Viewer',
version = INFO_VARS['version'],
author = "Stephan Gerhard",
author_email = "[email protected]",
classifiers = [c.strip() for c in """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
Intended Audience :: Science/Research
Operating System :: OS Independent
Operating System :: POSIX
Operating System :: POSIX :: Linux
Operating System :: Unix
Programming Language :: Python
Topic :: Scientific/Engineering
Topic :: Software Development
""".splitlines() if len(c.split()) > 0],
description = "Multi-Modal MR Connectomics Framework for Analysis and Visualization",
license = "Modified BSD License",
long_description = INFO_VARS['long_description'],
maintainer = 'Stephan Gerhard',
maintainer_email = '[email protected]',
platforms = ["Linux", "Unix"],
url = 'http://www.connectomeviewer.org/',
scripts = glob('scripts/*'),
configuration = configuration,
**extra_args
)
if __name__ == "__main__":
main()
| LTS5/connectomeviewer | setup.py | Python | bsd-3-clause | 3,375 | [
"Mayavi"
] | 8e30d157e41386cae95b734e428e2630bf3a369229fc28f351e45a176935559a |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <[email protected]>
# Copyright (C) 2007-2009 Gary Burton <[email protected]>
# Copyright (C) 2007-2009 Stephane Charette <[email protected]>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <[email protected]>
# Copyright (C) 2008-2011 Rob G. Healey <[email protected]>
# Copyright (C) 2010 Doug Blank <[email protected]>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010-2017 Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
RepositoryPage - Repository index page and individual Repository pages
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from collections import defaultdict
from decimal import getcontext
import logging
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.lib import Repository
from gramps.plugins.lib.libhtml import Html
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.common import (FULLCLEAR, html_escape)
_ = glocale.translation.sgettext
LOG = logging.getLogger(".NarrativeWeb")
getcontext().prec = 8
#################################################
#
# creates the Repository List Page and Repository Pages
#
#################################################
class RepositoryPages(BasePage):
"""
This class is responsible for displaying information about the 'Repository'
database objects. It displays this information under the 'Individuals'
tab. It is told by the 'add_instances' call which 'Repository's to display,
and remembers the list of persons. A single call to 'display_pages'
displays both the Individual List (Index) page and all the Individual
pages.
The base class 'BasePage' is initialised once for each page that is
displayed.
"""
def __init__(self, report):
"""
@param: report -- The instance of the main report class for this report
"""
BasePage.__init__(self, report, title="")
self.repos_dict = defaultdict(set)
def display_pages(self, title):
"""
Generate and output the pages under the Repository tab, namely the
repository index and the individual repository pages.
@param: title -- Is the title of the web page
"""
LOG.debug("obj_dict[Person]")
for item in self.report.obj_dict[Repository].items():
LOG.debug(" %s", str(item))
# set progress bar pass for Repositories
message = _('Creating repository pages')
with self.r_user.progress(_("Narrated Web Site Report"), message,
len(self.report.obj_dict[Repository]) + 1
) as step:
# Sort the repositories
repos_dict = {}
for repo_handle in self.report.obj_dict[Repository]:
repository = self.r_db.get_repository_from_handle(repo_handle)
key = repository.get_name() + str(repository.get_gramps_id())
repos_dict[key] = (repository, repo_handle)
keys = sorted(repos_dict, key=self.rlocale.sort_key)
# RepositoryListPage Class
self.repositorylistpage(self.report, title, repos_dict, keys)
idx = 1
for index, key in enumerate(keys):
(repo, handle) = repos_dict[key]
step()
idx += 1
self.repositorypage(self.report, title, repo, handle)
def repositorylistpage(self, report, title, repos_dict, keys):
"""
Create Index for repositories
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
@param: repos_dict -- The dictionary for all repositories
@param: keys -- The keys used to access repositories
"""
BasePage.__init__(self, report, title)
#inc_repos = self.report.options["inc_repository"]
output_file, sio = self.report.create_file("repositories")
repolistpage, head, body = self.write_header(_("Repositories"))
ldatec = 0
# begin RepositoryList division
with Html("div", class_="content",
id="RepositoryList") as repositorylist:
body += repositorylist
msg = self._("This page contains an index of "
"all the repositories in the "
"database, sorted by their title. "
"Clicking on a repositories’s title "
"will take you to that repositories’s page.")
repositorylist += Html("p", msg, id="description")
# begin repositories table and table head
with Html("table", class_="infolist primobjlist repolist") as table:
repositorylist += table
thead = Html("thead")
table += thead
trow = Html("tr") + (
Html("th", " ", class_="ColumnRowLabel", inline=True),
Html("th", self._("Type"), class_="ColumnType",
inline=True),
Html("th", self._("Repository |Name"), class_="ColumnName",
inline=True)
)
thead += trow
# begin table body
tbody = Html("tbody")
table += tbody
for index, key in enumerate(keys):
(repo, handle) = repos_dict[key]
trow = Html("tr")
tbody += trow
# index number
trow += Html("td", index + 1, class_="ColumnRowLabel",
inline=True)
# repository type
rtype = self._(repo.type.xml_str())
trow += Html("td", rtype, class_="ColumnType", inline=True)
# repository name and hyperlink
if repo.get_name():
trow += Html("td",
self.repository_link(handle,
repo.get_name(),
repo.get_gramps_id(),
self.uplink),
class_="ColumnName")
ldatec = repo.get_change_time()
else:
trow += Html("td", "[ untitled ]", class_="ColumnName")
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(repolistpage, output_file, sio, ldatec)
def repositorypage(self, report, title, repo, handle):
"""
Create one page for one repository.
@param: report -- The instance of the main report class for this report
@param: title -- Is the title of the web page
@param: repo -- the repository to use
@param: handle -- the handle to use
"""
gid = repo.get_gramps_id()
BasePage.__init__(self, report, title, gid)
ldatec = repo.get_change_time()
output_file, sio = self.report.create_file(handle, 'repo')
self.uplink = True
repositorypage, head, body = self.write_header(_('Repositories'))
# begin RepositoryDetail division and page title
with Html("div", class_="content",
id="RepositoryDetail") as repositorydetail:
body += repositorydetail
# repository name
repositorydetail += Html("h3", html_escape(repo.name),
inline=True)
# begin repository table
with Html("table", class_="infolist repolist") as table:
repositorydetail += table
tbody = Html("tbody")
table += tbody
if not self.noid and gid:
trow = Html("tr") + (
Html("td", self._("Gramps ID"),
class_="ColumnAttribute",
inline=True),
Html("td", gid, class_="ColumnValue", inline=True)
)
tbody += trow
trow = Html("tr") + (
Html("td", self._("Type"), class_="ColumnAttribute",
inline=True),
Html("td", self._(repo.get_type().xml_str()),
class_="ColumnValue",
inline=True)
)
tbody += trow
# repository: address(es)...
# repository addresses do NOT have Sources
repo_address = self.display_addr_list(repo.get_address_list(),
False)
if repo_address is not None:
repositorydetail += repo_address
# repository: urllist
urllist = self.display_url_list(repo.get_url_list())
if urllist is not None:
repositorydetail += urllist
# reposity: notelist
notelist = self.display_note_list(repo.get_note_list())
if notelist is not None:
repositorydetail += notelist
# display Repository Referenced Sources...
ref_list = self.display_bkref_list(Repository, repo.get_handle())
if ref_list is not None:
repositorydetail += ref_list
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(repositorypage, output_file, sio, ldatec)
| prculley/gramps | gramps/plugins/webreport/repository.py | Python | gpl-2.0 | 11,391 | [
"Brian"
] | b49cd6f3d43480ad180ab02f4993881fc5b20fe1824bc479d902072d23bcbd15 |
from kfhlog.models import State
def load_fixtures(dbsession):
# Canada
dbsession.add(State(dxcc=1, code='NS', name='Nova Scotia'))
dbsession.add(State(dxcc=1, code='QC', name='Québec'))
dbsession.add(State(dxcc=1, code='ON', name='Ontario'))
dbsession.add(State(dxcc=1, code='MB', name='Manitoba'))
dbsession.add(State(dxcc=1, code='SK', name='Saskatchewan'))
dbsession.add(State(dxcc=1, code='AB', name='Alberta'))
dbsession.add(State(dxcc=1, code='BC', name='British Columbia'))
dbsession.add(State(dxcc=1, code='NT', name='Northwest Territories'))
dbsession.add(State(dxcc=1, code='NB', name='New Brunswick'))
dbsession.add(State(dxcc=1, code='NL', name='Newfoundland and Labrador'))
dbsession.add(State(dxcc=1, code='YT', name='Yukon'))
dbsession.add(State(dxcc=1, code='PE', name='Prince Edward Island'))
dbsession.add(State(dxcc=1, code='NU', name='Nunavut'))
# Aland Islands
dbsession.add(State(dxcc=5, code='001', name='Brändö'))
dbsession.add(State(dxcc=5, code='002', name='Eckerö'))
dbsession.add(State(dxcc=5, code='003', name='Finström'))
dbsession.add(State(dxcc=5, code='004', name='Föglö'))
dbsession.add(State(dxcc=5, code='005', name='Geta'))
dbsession.add(State(dxcc=5, code='006', name='Hammarland'))
dbsession.add(State(dxcc=5, code='007', name='Jomala'))
dbsession.add(State(dxcc=5, code='008', name='Kumlinge'))
dbsession.add(State(dxcc=5, code='009', name='Kökar'))
dbsession.add(State(dxcc=5, code='010', name='Lemland'))
dbsession.add(State(dxcc=5, code='011', name='Lumparland'))
dbsession.add(State(dxcc=5, code='012', name='Maarianhamina'))
dbsession.add(State(dxcc=5, code='013', name='Saltvik'))
dbsession.add(State(dxcc=5, code='014', name='Sottunga'))
dbsession.add(State(dxcc=5, code='015', name='Sund'))
dbsession.add(State(dxcc=5, code='016', name='Vårdö'))
# Alaska
dbsession.add(State(dxcc=6, code='AK', name='Alaska'))
# Asiatic Russia
dbsession.add(State(dxcc=15, code='UO', name='Ust’-Ordynsky Autonomous Okrug'))
dbsession.add(State(dxcc=15, code='AB', name='Aginsky Buryatsky Autonomous Okrug'))
dbsession.add(State(dxcc=15, code='CB', name='Chelyabinsk (Chelyabinskaya oblast)'))
dbsession.add(State(dxcc=15, code='SV', name='Sverdlovskaya oblast'))
dbsession.add(State(dxcc=15, code='PM', name='Perm\' (Permskaya oblast)'))
dbsession.add(State(dxcc=15, code='KP', name='Komi-Permyatsky Autonomous Okrug'))
dbsession.add(State(dxcc=15, code='TO', name='Tomsk (Tomskaya oblast)'))
dbsession.add(State(dxcc=15, code='HM', name='Khanty-Mansyisky Autonomous Okrug'))
dbsession.add(State(dxcc=15, code='YN', name='Yamalo-Nenetsky Autonomous Okrug'))
dbsession.add(State(dxcc=15, code='TN', name='Tyumen’(Tyumenskaya oblast)'))
dbsession.add(State(dxcc=15, code='OM', name='Omsk (Omskaya oblast)'))
dbsession.add(State(dxcc=15, code='NS', name='Novosibirsk (Novosibirskaya oblast)'))
dbsession.add(State(dxcc=15, code='KN', name='Kurgan (Kurganskaya oblast)'))
dbsession.add(State(dxcc=15, code='OB', name='Orenburg (Orenburgskaya oblast)'))
dbsession.add(State(dxcc=15, code='KE', name='Kemerovo (Kemerovskaya oblast)'))
dbsession.add(State(dxcc=15, code='BA', name='Republic of Bashkortostan'))
dbsession.add(State(dxcc=15, code='KO', name='Republic of Komi'))
dbsession.add(State(dxcc=15, code='AL', name='Altaysky Kraj'))
dbsession.add(State(dxcc=15, code='GA', name='Republic Gorny Altay'))
dbsession.add(State(dxcc=15, code='KK', name='Krasnoyarsk (Krasnoyarsk Kraj)'))
dbsession.add(State(dxcc=15, code='TM', name='Taymyr Autonomous Okrug'))
dbsession.add(State(dxcc=15, code='HK', name='Khabarovsk (Khabarovsky Kraj)'))
dbsession.add(State(dxcc=15, code='EA', name='Yevreyskaya Autonomous Oblast'))
dbsession.add(State(dxcc=15, code='SL', name='Sakhalin (Sakhalinskaya oblast)'))
dbsession.add(State(dxcc=15, code='EV', name='Evenkiysky Autonomous Okrug'))
dbsession.add(State(dxcc=15, code='MG', name='Magadan (Magadanskaya oblast)'))
dbsession.add(State(dxcc=15, code='AM', name='Amurskaya oblast'))
dbsession.add(State(dxcc=15, code='CK', name='Chukotka Autonomous Okrug'))
dbsession.add(State(dxcc=15, code='PK', name='Primorsky Kraj'))
dbsession.add(State(dxcc=15, code='BU', name='Republic of Buryatia'))
dbsession.add(State(dxcc=15, code='YA', name='Sakha (Yakut) Republic'))
dbsession.add(State(dxcc=15, code='IR', name='Irkutsk (Irkutskaya oblast)'))
dbsession.add(State(dxcc=15, code='CT', name='Zabaykalsky Kraj'))
dbsession.add(State(dxcc=15, code='HA', name='Republic of Khakassia'))
dbsession.add(State(dxcc=15, code='KY', name='Koryaksky Autonomous Okrug'))
dbsession.add(State(dxcc=15, code='KT', name='Kamchatka (Kamchatskaya oblast)'))
dbsession.add(State(dxcc=15, code='TU', name='Republic of Tuva'))
# Baleric Islands
dbsession.add(State(dxcc=21, code='IB', name='Baleares'))
# Belarus
dbsession.add(State(dxcc=27, code='MI', name='Minsk (Minskaya voblasts’)'))
dbsession.add(State(dxcc=27, code='BR', name='Brest (Brestskaya voblasts’)'))
dbsession.add(State(dxcc=27, code='HR', name='Grodno (Hrodzenskaya voblasts’)'))
dbsession.add(State(dxcc=27, code='VI', name='Vitebsk (Vitsyebskaya voblasts’)'))
dbsession.add(State(dxcc=27, code='MA', name='Mogilev (Mahilyowskaya voblasts’)'))
dbsession.add(State(dxcc=27, code='HO', name='Gomel (Homyel’ skaya voblasts’)'))
dbsession.add(State(dxcc=27, code='HM', name='Horad Minsk'))
# Canary Islands
dbsession.add(State(dxcc=29, code='GC', name='Las Palmas'))
dbsession.add(State(dxcc=29, code='TF', name='Tenerife'))
# Cetua & Melilla
dbsession.add(State(dxcc=32, code='CE', name='Ceuta'))
dbsession.add(State(dxcc=32, code='ML', name='Melilla'))
# Mexico
dbsession.add(State(dxcc=50, code='COL', name='Colima'))
dbsession.add(State(dxcc=50, code='DF', name='Distrito Federal'))
dbsession.add(State(dxcc=50, code='EMX', name='Estado de México'))
dbsession.add(State(dxcc=50, code='GTO', name='Guanajuato'))
dbsession.add(State(dxcc=50, code='HGO', name='Hidalgo'))
dbsession.add(State(dxcc=50, code='JAL', name='Jalisco'))
dbsession.add(State(dxcc=50, code='MIC', name='Michoacán de Ocampo'))
dbsession.add(State(dxcc=50, code='MOR', name='Morelos'))
dbsession.add(State(dxcc=50, code='NAY', name='Nayarit'))
dbsession.add(State(dxcc=50, code='PUE', name='Puebla'))
dbsession.add(State(dxcc=50, code='QRO', name='Querétaro de Arteaga'))
dbsession.add(State(dxcc=50, code='TLX', name='Tlaxcala'))
dbsession.add(State(dxcc=50, code='VER', name='Veracruz-Llave'))
dbsession.add(State(dxcc=50, code='AGS', name='Aguascalientes'))
dbsession.add(State(dxcc=50, code='BC', name='Baja California'))
dbsession.add(State(dxcc=50, code='BCS', name='Baja California Sur'))
dbsession.add(State(dxcc=50, code='CHH', name='Chihuahua'))
dbsession.add(State(dxcc=50, code='COA', name='Coahuila de Zaragoza'))
dbsession.add(State(dxcc=50, code='DGO', name='Durango'))
dbsession.add(State(dxcc=50, code='NL', name='Nuevo Leon'))
dbsession.add(State(dxcc=50, code='SLP', name='San Luis Potosí'))
dbsession.add(State(dxcc=50, code='SIN', name='Sinaloa'))
dbsession.add(State(dxcc=50, code='SON', name='Sonora'))
dbsession.add(State(dxcc=50, code='TMS', name='Tamaulipas'))
dbsession.add(State(dxcc=50, code='ZAC', name='Zacatecas'))
dbsession.add(State(dxcc=50, code='CAM', name='Campeche'))
dbsession.add(State(dxcc=50, code='CHS', name='Chiapas'))
dbsession.add(State(dxcc=50, code='GRO', name='Guerrero'))
dbsession.add(State(dxcc=50, code='OAX', name='Oaxaca'))
dbsession.add(State(dxcc=50, code='QTR', name='Quintana Roo'))
dbsession.add(State(dxcc=50, code='TAB', name='Tabasco'))
dbsession.add(State(dxcc=50, code='YUC', name='Yucatán'))
# European Russia
dbsession.add(State(dxcc=54, code='SP', name='City of St. Petersburg'))
dbsession.add(State(dxcc=54, code='LO', name='Leningradskaya oblast'))
dbsession.add(State(dxcc=54, code='KL', name='Republic of Karelia'))
dbsession.add(State(dxcc=54, code='AR', name='Arkhangelsk (Arkhangelskaya oblast)'))
dbsession.add(State(dxcc=54, code='NO', name='Nenetsky Autonomous Okrug'))
dbsession.add(State(dxcc=54, code='VO', name='Vologda (Vologodskaya oblast)'))
dbsession.add(State(dxcc=54, code='NV', name='Novgorodskaya oblast'))
dbsession.add(State(dxcc=54, code='PS', name='Pskov (Pskovskaya oblast)'))
dbsession.add(State(dxcc=54, code='MU', name='Murmansk (Murmanskaya oblast)'))
dbsession.add(State(dxcc=54, code='MA', name='City of Moscow'))
dbsession.add(State(dxcc=54, code='MO', name='Moscowskaya oblast'))
dbsession.add(State(dxcc=54, code='OR', name='Oryel (Orlovskaya oblast)'))
dbsession.add(State(dxcc=54, code='LP', name='Lipetsk (Lipetskaya oblast)'))
dbsession.add(State(dxcc=54, code='TV', name='Tver’ (Tverskayaoblast)'))
dbsession.add(State(dxcc=54, code='SM', name='Smolensk (Smolenskaya oblast)'))
dbsession.add(State(dxcc=54, code='YR', name='Yaroslavl (Yaroslavskaya oblast)'))
dbsession.add(State(dxcc=54, code='KS', name='Kostroma (Kostromskaya oblast)'))
dbsession.add(State(dxcc=54, code='TL', name='Tula (Tul’ skaya oblast)'))
dbsession.add(State(dxcc=54, code='VR', name='Voronezh (Voronezhskaya oblast)'))
dbsession.add(State(dxcc=54, code='TB', name='Tambov (Tambovskaya oblast)'))
dbsession.add(State(dxcc=54, code='RA', name='Ryazan’ (Ryazanskayaoblast)'))
dbsession.add(State(dxcc=54, code='NN', name='Nizhni Novgorod (Nizhegorodskaya oblast)'))
dbsession.add(State(dxcc=54, code='IV', name='Ivanovo (Ivanovskaya oblast)'))
dbsession.add(State(dxcc=54, code='VL', name='Vladimir (Vladimirskaya oblast)'))
dbsession.add(State(dxcc=54, code='KU', name='Kursk (Kurskaya oblast)'))
dbsession.add(State(dxcc=54, code='KG', name='Kaluga (Kaluzhskaya oblast)'))
dbsession.add(State(dxcc=54, code='BR', name='Bryansk (Bryanskaya oblast)'))
dbsession.add(State(dxcc=54, code='BO', name='Belgorod (Belgorodskaya oblast)'))
dbsession.add(State(dxcc=54, code='VG', name='Volgograd (Volgogradskaya oblast)'))
dbsession.add(State(dxcc=54, code='SA', name='Saratov (Saratovskaya oblast)'))
dbsession.add(State(dxcc=54, code='PE', name='Penza (Penzenskaya oblast)'))
dbsession.add(State(dxcc=54, code='SR', name='Samara (Samarskaya oblast)'))
dbsession.add(State(dxcc=54, code='UL', name='Ulyanovsk (Ulyanovskaya oblast)'))
dbsession.add(State(dxcc=54, code='KI', name='Kirov (Kirovskaya oblast)'))
dbsession.add(State(dxcc=54, code='TA', name='Republic of Tataria'))
dbsession.add(State(dxcc=54, code='MR', name='Republic of Marij-El'))
dbsession.add(State(dxcc=54, code='MD', name='Republic of Mordovia'))
dbsession.add(State(dxcc=54, code='UD', name='Republic of Udmurtia'))
dbsession.add(State(dxcc=54, code='CU', name='Republic of Chuvashia'))
dbsession.add(State(dxcc=54, code='KR', name='Krasnodar (Krasnodarsky Kraj)'))
dbsession.add(State(dxcc=54, code='KC', name='Republic of Karachaevo-Cherkessia'))
dbsession.add(State(dxcc=54, code='ST', name='Stavropol’ (Stavropolsky Kraj)'))
dbsession.add(State(dxcc=54, code='KM', name='Republic of Kalmykia'))
dbsession.add(State(dxcc=54, code='SO', name='Republic of Northern Ossetia'))
dbsession.add(State(dxcc=54, code='RO', name='Rostov-on-Don (Rostovskaya oblast)'))
dbsession.add(State(dxcc=54, code='CN', name='Republic Chechnya'))
dbsession.add(State(dxcc=54, code='IN', name='Republic of Ingushetia'))
dbsession.add(State(dxcc=54, code='AO', name='Astrakhan’ (Astrakhanskaya oblast)'))
dbsession.add(State(dxcc=54, code='DA', name='Republic of Daghestan'))
dbsession.add(State(dxcc=54, code='KB', name='Republic of Kabardino-Balkaria'))
dbsession.add(State(dxcc=54, code='AD', name='Republic of Adygeya'))
# Franz Josef Land
dbsession.add(State(dxcc=61, code='AR', name='Arkhangelsk (Arkhangelskaya oblast)'))
# Argentina
dbsession.add(State(dxcc=100, code='C', name='Capital federal (Buenos Aires City)'))
dbsession.add(State(dxcc=100, code='B', name='Buenos Aires Province'))
dbsession.add(State(dxcc=100, code='S', name='Santa Fe'))
dbsession.add(State(dxcc=100, code='H', name='Chaco'))
dbsession.add(State(dxcc=100, code='P', name='Formosa'))
dbsession.add(State(dxcc=100, code='X', name='Cordoba'))
dbsession.add(State(dxcc=100, code='N', name='Misiones'))
dbsession.add(State(dxcc=100, code='E', name='Entre Rios'))
dbsession.add(State(dxcc=100, code='T', name='Tucumán'))
dbsession.add(State(dxcc=100, code='W', name='Corrientes'))
dbsession.add(State(dxcc=100, code='M', name='Mendoza'))
dbsession.add(State(dxcc=100, code='G', name='Santiago del Estero'))
dbsession.add(State(dxcc=100, code='A', name='Salta'))
dbsession.add(State(dxcc=100, code='J', name='San Juan'))
dbsession.add(State(dxcc=100, code='D', name='San Luis'))
dbsession.add(State(dxcc=100, code='K', name='Catamarca'))
dbsession.add(State(dxcc=100, code='F', name='La Rioja'))
dbsession.add(State(dxcc=100, code='Y', name='Jujuy'))
dbsession.add(State(dxcc=100, code='L', name='La Pampa'))
dbsession.add(State(dxcc=100, code='R', name='Rió Negro'))
dbsession.add(State(dxcc=100, code='U', name='Chubut'))
dbsession.add(State(dxcc=100, code='Z', name='Santa Cruz'))
dbsession.add(State(dxcc=100, code='V', name='Tierra del Fuego'))
dbsession.add(State(dxcc=100, code='Q', name='Neuquén'))
# Brazil
dbsession.add(State(dxcc=108, code='ES', name='Espírito Santo'))
dbsession.add(State(dxcc=108, code='GO', name='Goiás'))
dbsession.add(State(dxcc=108, code='SC', name='Santa Catarina'))
dbsession.add(State(dxcc=108, code='SE', name='Sergipe'))
dbsession.add(State(dxcc=108, code='AL', name='Alagoas'))
dbsession.add(State(dxcc=108, code='AM', name='Amazonas'))
dbsession.add(State(dxcc=108, code='TO', name='Tocantins'))
dbsession.add(State(dxcc=108, code='AP', name='Amapã'))
dbsession.add(State(dxcc=108, code='PB', name='Paraíba'))
dbsession.add(State(dxcc=108, code='MA', name='Maranhao'))
dbsession.add(State(dxcc=108, code='RN', name='Rio Grande do Norte'))
dbsession.add(State(dxcc=108, code='PI', name='Piaui'))
dbsession.add(State(dxcc=108, code='DF', name='Oietrito Federal (Brasila)'))
dbsession.add(State(dxcc=108, code='CE', name='Ceará'))
dbsession.add(State(dxcc=108, code='AC', name='Acre'))
dbsession.add(State(dxcc=108, code='MS', name='Mato Grosso do Sul'))
dbsession.add(State(dxcc=108, code='RR', name='Roraima'))
dbsession.add(State(dxcc=108, code='RO', name='Rondônia'))
dbsession.add(State(dxcc=108, code='RJ', name='Rio de Janeiro'))
dbsession.add(State(dxcc=108, code='SP', name='Sao Paulo'))
dbsession.add(State(dxcc=108, code='RS', name='Rio Grande do Sul'))
dbsession.add(State(dxcc=108, code='MG', name='Minas Gerais'))
dbsession.add(State(dxcc=108, code='PR', name='Paranã'))
dbsession.add(State(dxcc=108, code='BA', name='Bahia'))
dbsession.add(State(dxcc=108, code='PE', name='Pernambuco'))
dbsession.add(State(dxcc=108, code='PA', name='Parã'))
dbsession.add(State(dxcc=108, code='MT', name='Mato Grosso'))
# Hawaii
dbsession.add(State(dxcc=110, code='HI', name='Hawaii'))
# Chile
dbsession.add(State(dxcc=112, code='II', name='Antofagasta'))
dbsession.add(State(dxcc=112, code='III', name='Atacama'))
dbsession.add(State(dxcc=112, code='I', name='Tarapacá'))
dbsession.add(State(dxcc=112, code='IV', name='Coquimbo'))
dbsession.add(State(dxcc=112, code='V', name='Valparaíso'))
dbsession.add(State(dxcc=112, code='RM', name='Region Metropolitana de Santiago'))
dbsession.add(State(dxcc=112, code='VI', name='Libertador General Bernardo O\'Higgins'))
dbsession.add(State(dxcc=112, code='VII', name='Maule'))
dbsession.add(State(dxcc=112, code='VIII', name='Bío-Bío'))
dbsession.add(State(dxcc=112, code='IX', name='La Araucanía'))
dbsession.add(State(dxcc=112, code='X', name='Los Lagos'))
dbsession.add(State(dxcc=112, code='XI', name='Aisén del General Carlos Ibáñez del Campo'))
dbsession.add(State(dxcc=112, code='XII', name='Magallanes'))
# Kaliningrad
dbsession.add(State(dxcc=126, code='KA', name='Kalingrad (Kaliningradskaya oblast)'))
# Paraguay
dbsession.add(State(dxcc=132, code='16', name='Alto Paraguay'))
dbsession.add(State(dxcc=132, code='19', name='Boquerón'))
dbsession.add(State(dxcc=132, code='15', name='Presidente Hayes'))
dbsession.add(State(dxcc=132, code='13', name='Amambay'))
dbsession.add(State(dxcc=132, code='01', name='Concepción'))
dbsession.add(State(dxcc=132, code='14', name='Canindeyú'))
dbsession.add(State(dxcc=132, code='02', name='San Pedro'))
dbsession.add(State(dxcc=132, code='ASU', name='Asunción'))
dbsession.add(State(dxcc=132, code='11', name='Central'))
dbsession.add(State(dxcc=132, code='03', name='Cordillera'))
dbsession.add(State(dxcc=132, code='09', name='Paraguarí'))
dbsession.add(State(dxcc=132, code='06', name='Caazapl'))
dbsession.add(State(dxcc=132, code='05', name='Caeguazú'))
dbsession.add(State(dxcc=132, code='04', name='Guairá'))
dbsession.add(State(dxcc=132, code='08', name='Miaiones'))
dbsession.add(State(dxcc=132, code='12', name='Ñeembucu'))
dbsession.add(State(dxcc=132, code='10', name='Alto Paraná'))
dbsession.add(State(dxcc=132, code='07', name='Itapua'))
# Republic of Korea
dbsession.add(State(dxcc=137, code='A', name='Seoul (Seoul Teugbyeolsi)'))
dbsession.add(State(dxcc=137, code='N', name='Inchon (Incheon Gwang\'yeogsi)'))
dbsession.add(State(dxcc=137, code='D', name='Kangwon-do (Gang \'weondo)'))
dbsession.add(State(dxcc=137, code='C', name='Kyunggi-do (Gyeonggido)'))
dbsession.add(State(dxcc=137, code='E', name='Choongchungbuk-do (Chungcheongbugdo)'))
dbsession.add(State(dxcc=137, code='F', name='Choongchungnam-do (Chungcheongnamdo)'))
dbsession.add(State(dxcc=137, code='R', name='Taejon (Daejeon Gwang\'yeogsi)'))
dbsession.add(State(dxcc=137, code='M', name='Cheju-do (Jejudo)'))
dbsession.add(State(dxcc=137, code='G', name='Chollabuk-do (Jeonrabugdo)'))
dbsession.add(State(dxcc=137, code='H', name='Chollanam-do (Jeonranamdo)'))
dbsession.add(State(dxcc=137, code='Q', name='Kwangju (Gwangju Gwang\'yeogsi)'))
dbsession.add(State(dxcc=137, code='K', name='Kyungsangbuk-do (Gyeongsangbugdo)'))
dbsession.add(State(dxcc=137, code='L', name='Kyungsangnam-do (Gyeongsangnamdo)'))
dbsession.add(State(dxcc=137, code='B', name='Pusan (Busan Gwang\'yeogsi)'))
dbsession.add(State(dxcc=137, code='P', name='Taegu (Daegu Gwang\'yeogsi)'))
dbsession.add(State(dxcc=137, code='S', name='Ulsan (Ulsan Gwanq\'yeogsi)'))
# Kure Island
dbsession.add(State(dxcc=138, code='KI', name='Kure Island'))
# Uruguay
dbsession.add(State(dxcc=144, code='MO', name='Montevideo'))
dbsession.add(State(dxcc=144, code='CA', name='Canelones'))
dbsession.add(State(dxcc=144, code='SJ', name='San José'))
dbsession.add(State(dxcc=144, code='CO', name='Colonia'))
dbsession.add(State(dxcc=144, code='SO', name='Soriano'))
dbsession.add(State(dxcc=144, code='RN', name='Rio Negro'))
dbsession.add(State(dxcc=144, code='PA', name='Paysandu'))
dbsession.add(State(dxcc=144, code='SA', name='Salto'))
dbsession.add(State(dxcc=144, code='AR', name='Artigsa'))
dbsession.add(State(dxcc=144, code='FD', name='Florida'))
dbsession.add(State(dxcc=144, code='FS', name='Flores'))
dbsession.add(State(dxcc=144, code='DU', name='Durazno'))
dbsession.add(State(dxcc=144, code='TA', name='Tacuarembo'))
dbsession.add(State(dxcc=144, code='RV', name='Rivera'))
dbsession.add(State(dxcc=144, code='MA', name='Maldonado'))
dbsession.add(State(dxcc=144, code='LA', name='Lavalleja'))
dbsession.add(State(dxcc=144, code='RO', name='Rocha'))
dbsession.add(State(dxcc=144, code='TT', name='Treinta y Tres'))
dbsession.add(State(dxcc=144, code='CL', name='Cerro Largo'))
# Lord Howe Islands
dbsession.add(State(dxcc=147, code='LH', name='Lord Howe Islands'))
# Venezuela
dbsession.add(State(dxcc=148, code='AM', name='Amazonas'))
dbsession.add(State(dxcc=148, code='AN', name='Anzoátegui'))
dbsession.add(State(dxcc=148, code='AP', name='Apure'))
dbsession.add(State(dxcc=148, code='AR', name='Aragua'))
dbsession.add(State(dxcc=148, code='BA', name='Barinas'))
dbsession.add(State(dxcc=148, code='BO', name='Bolívar'))
dbsession.add(State(dxcc=148, code='CA', name='Carabobo'))
dbsession.add(State(dxcc=148, code='CO', name='Cojedes'))
dbsession.add(State(dxcc=148, code='DA', name='Delta Amacuro'))
dbsession.add(State(dxcc=148, code='DC', name='Distrito Capital'))
dbsession.add(State(dxcc=148, code='FA', name='Falcón'))
dbsession.add(State(dxcc=148, code='GU', name='Guárico'))
dbsession.add(State(dxcc=148, code='LA', name='Lara'))
dbsession.add(State(dxcc=148, code='ME', name='Mérida'))
dbsession.add(State(dxcc=148, code='MI', name='Miranda'))
dbsession.add(State(dxcc=148, code='MO', name='Monagas'))
dbsession.add(State(dxcc=148, code='NE', name='Nueva Esparta'))
dbsession.add(State(dxcc=148, code='PO', name='Portuguesa'))
dbsession.add(State(dxcc=148, code='SU', name='Sucre'))
dbsession.add(State(dxcc=148, code='TA', name='Táchira'))
dbsession.add(State(dxcc=148, code='TR', name='Trujillo'))
dbsession.add(State(dxcc=148, code='VA', name='Vargas'))
dbsession.add(State(dxcc=148, code='YA', name='Yaracuy'))
dbsession.add(State(dxcc=148, code='ZU', name='Zulia'))
# Azores
dbsession.add(State(dxcc=149, code='AC', name='Açores'))
# Australia
dbsession.add(State(dxcc=150, code='ACT', name='Australian Capital Territory'))
dbsession.add(State(dxcc=150, code='NSW', name='New South Wales'))
dbsession.add(State(dxcc=150, code='VIC', name='Victoria'))
dbsession.add(State(dxcc=150, code='QLD', name='Queensland'))
dbsession.add(State(dxcc=150, code='SA', name='South Australia'))
dbsession.add(State(dxcc=150, code='WA', name='Western Australia'))
dbsession.add(State(dxcc=150, code='TAS', name='Tasmania'))
dbsession.add(State(dxcc=150, code='NT', name='Northern Territory'))
# Malyj Vysotskij
dbsession.add(State(dxcc=151, code='LO', name='Leningradskaya Oblast'))
# Macquarie Islands
dbsession.add(State(dxcc=153, code='MA', name='Macquarie Islands'))
# Papua New Guinea
dbsession.add(State(dxcc=163, code='NCD', name='National Capital District (Port Moresby)'))
dbsession.add(State(dxcc=163, code='CPM', name='Central'))
dbsession.add(State(dxcc=163, code='CPK', name='Chimbu'))
dbsession.add(State(dxcc=163, code='EHG', name='Eastern Highlands'))
dbsession.add(State(dxcc=163, code='EBR', name='East New Britain'))
dbsession.add(State(dxcc=163, code='ESW', name='East Sepik'))
dbsession.add(State(dxcc=163, code='EPW', name='Enga'))
dbsession.add(State(dxcc=163, code='GPK', name='Gulf'))
dbsession.add(State(dxcc=163, code='MPM', name='Madang'))
dbsession.add(State(dxcc=163, code='MRL', name='Manus'))
dbsession.add(State(dxcc=163, code='MBA', name='Milne Bay'))
dbsession.add(State(dxcc=163, code='MPL', name='Morobe'))
dbsession.add(State(dxcc=163, code='NIK', name='New Ireland'))
dbsession.add(State(dxcc=163, code='NPP', name='Northern'))
dbsession.add(State(dxcc=163, code='NSA', name='North Solomons'))
dbsession.add(State(dxcc=163, code='SAN', name='Santaun'))
dbsession.add(State(dxcc=163, code='SHM', name='Southern Highlands'))
dbsession.add(State(dxcc=163, code='WPD', name='Western'))
dbsession.add(State(dxcc=163, code='WHM', name='Western Highlands'))
dbsession.add(State(dxcc=163, code='WBR', name='West New Britain'))
# New Zealand
dbsession.add(State(dxcc=170, code='AUK', name='Auckland'))
dbsession.add(State(dxcc=170, code='BOP', name='Bay of Plenty'))
dbsession.add(State(dxcc=170, code='NTL', name='Northland'))
dbsession.add(State(dxcc=170, code='WKO', name='Waikato'))
dbsession.add(State(dxcc=170, code='GIS', name='Gisborne'))
dbsession.add(State(dxcc=170, code='HKB', name='Hawkes Bay'))
dbsession.add(State(dxcc=170, code='MWT', name='Manawatu-Wanganui'))
dbsession.add(State(dxcc=170, code='TKI', name='Taranaki'))
dbsession.add(State(dxcc=170, code='WGN', name='Wellington'))
dbsession.add(State(dxcc=170, code='CAN', name='Canterbury'))
dbsession.add(State(dxcc=170, code='MBH', name='Marlborough'))
dbsession.add(State(dxcc=170, code='NSN', name='Nelson'))
dbsession.add(State(dxcc=170, code='TAS', name='Tasman'))
dbsession.add(State(dxcc=170, code='WTC', name='West Coast'))
dbsession.add(State(dxcc=170, code='OTA', name='Otago'))
dbsession.add(State(dxcc=170, code='STL', name='Southland'))
# Minami Torishima
dbsession.add(State(dxcc=177, code='MT', name='Minami Torishima'))
# Ogasawara
dbsession.add(State(dxcc=192, code='O', name='Ogasawara'))
# Austria
dbsession.add(State(dxcc=206, code='WC', name='Wien'))
dbsession.add(State(dxcc=206, code='HA', name='Hallein'))
dbsession.add(State(dxcc=206, code='JO', name='St. Johann'))
dbsession.add(State(dxcc=206, code='SC', name='Salzburg'))
dbsession.add(State(dxcc=206, code='SL', name='Salzburg-Land'))
dbsession.add(State(dxcc=206, code='TA', name='Tamsweg'))
dbsession.add(State(dxcc=206, code='ZE', name='Zell Am See'))
dbsession.add(State(dxcc=206, code='AM', name='Amstetten'))
dbsession.add(State(dxcc=206, code='BL', name='Bruck/Leitha'))
dbsession.add(State(dxcc=206, code='BN', name='Baden'))
dbsession.add(State(dxcc=206, code='GD', name='Gmünd'))
dbsession.add(State(dxcc=206, code='GF', name='Gänserndorf'))
dbsession.add(State(dxcc=206, code='HL', name='Hollabrunn'))
dbsession.add(State(dxcc=206, code='HO', name='Horn'))
dbsession.add(State(dxcc=206, code='KO', name='Korneuburg'))
dbsession.add(State(dxcc=206, code='KR', name='Krems-Region'))
dbsession.add(State(dxcc=206, code='KS', name='Krems'))
dbsession.add(State(dxcc=206, code='LF', name='Lilienfeld'))
dbsession.add(State(dxcc=206, code='MD', name='Mödling'))
dbsession.add(State(dxcc=206, code='ME', name='Melk'))
dbsession.add(State(dxcc=206, code='MI', name='Mistelbach'))
dbsession.add(State(dxcc=206, code='NK', name='Neunkirchen'))
dbsession.add(State(dxcc=206, code='PC', name='St. Pölten'))
dbsession.add(State(dxcc=206, code='PL', name='St. Pölten-Land'))
dbsession.add(State(dxcc=206, code='SB', name='Scheibbs'))
dbsession.add(State(dxcc=206, code='SW', name='Schwechat'))
dbsession.add(State(dxcc=206, code='TU', name='Tulln'))
dbsession.add(State(dxcc=206, code='WB', name='Wr.Neustadt-Bezirk'))
dbsession.add(State(dxcc=206, code='WN', name='Wr.Neustadt'))
dbsession.add(State(dxcc=206, code='WT', name='Waidhofen/Thaya'))
dbsession.add(State(dxcc=206, code='WU', name='Wien-Umgebung'))
dbsession.add(State(dxcc=206, code='WY', name='Waidhofen/Ybbs'))
dbsession.add(State(dxcc=206, code='ZT', name='Zwettl'))
dbsession.add(State(dxcc=206, code='EC', name='Eisenstadt'))
dbsession.add(State(dxcc=206, code='EU', name='Eisenstadt-Umgebung'))
dbsession.add(State(dxcc=206, code='GS', name='Güssing'))
dbsession.add(State(dxcc=206, code='JE', name='Jennersdorf'))
dbsession.add(State(dxcc=206, code='MA', name='Mattersburg'))
dbsession.add(State(dxcc=206, code='ND', name='Neusiedl/See'))
dbsession.add(State(dxcc=206, code='OP', name='Oberpullendorf'))
dbsession.add(State(dxcc=206, code='OW', name='Oberwart'))
dbsession.add(State(dxcc=206, code='BR', name='Braunau/Inn'))
dbsession.add(State(dxcc=206, code='EF', name='Eferding'))
dbsession.add(State(dxcc=206, code='FR', name='Freistadt'))
dbsession.add(State(dxcc=206, code='GM', name='Gmunden'))
dbsession.add(State(dxcc=206, code='GR', name='Grieskirchen'))
dbsession.add(State(dxcc=206, code='KI', name='Kirchdorf'))
dbsession.add(State(dxcc=206, code='LC', name='Linz'))
dbsession.add(State(dxcc=206, code='LL', name='Linz-Land'))
dbsession.add(State(dxcc=206, code='PE', name='Perg'))
dbsession.add(State(dxcc=206, code='RI', name='Ried/Innkreis'))
dbsession.add(State(dxcc=206, code='RO', name='Rohrbach'))
dbsession.add(State(dxcc=206, code='SD', name='Schärding'))
dbsession.add(State(dxcc=206, code='SE', name='Steyr-Land'))
dbsession.add(State(dxcc=206, code='SR', name='Steyr'))
dbsession.add(State(dxcc=206, code='UU', name='Urfahr'))
dbsession.add(State(dxcc=206, code='VB', name='Vöcklabruck'))
dbsession.add(State(dxcc=206, code='WE', name='Wels'))
dbsession.add(State(dxcc=206, code='WL', name='Wels-Land'))
dbsession.add(State(dxcc=206, code='BA', name='Bad Aussee'))
dbsession.add(State(dxcc=206, code='BM', name='Bruck-Mürzzuschlag'))
dbsession.add(State(dxcc=206, code='DL', name='Deutschlandsberg'))
dbsession.add(State(dxcc=206, code='FB', name='Feldbach'))
dbsession.add(State(dxcc=206, code='FF', name='Fürstenfeld'))
dbsession.add(State(dxcc=206, code='GB', name='Gröbming'))
dbsession.add(State(dxcc=206, code='GC', name='Graz'))
dbsession.add(State(dxcc=206, code='GU', name='Graz-Umgebung'))
dbsession.add(State(dxcc=206, code='HB', name='Hartberg'))
dbsession.add(State(dxcc=206, code='HF', name='Hartberg-Fürstenfeld'))
dbsession.add(State(dxcc=206, code='JU', name='Judenburg'))
dbsession.add(State(dxcc=206, code='KF', name='Knittelfeld'))
dbsession.add(State(dxcc=206, code='LB', name='Leibnitz'))
dbsession.add(State(dxcc=206, code='LE', name='Leoben'))
dbsession.add(State(dxcc=206, code='LI', name='Liezen'))
dbsession.add(State(dxcc=206, code='LN', name='Leoben-Land'))
dbsession.add(State(dxcc=206, code='MT', name='Murtal'))
dbsession.add(State(dxcc=206, code='MU', name='Murau'))
dbsession.add(State(dxcc=206, code='MZ', name='Mürzzuschlag'))
dbsession.add(State(dxcc=206, code='RA', name='Radkersburg'))
dbsession.add(State(dxcc=206, code='SO', name='Südoststeiermark'))
dbsession.add(State(dxcc=206, code='VO', name='Voitsberg'))
dbsession.add(State(dxcc=206, code='WZ', name='Weiz'))
dbsession.add(State(dxcc=206, code='IC', name='Innsbruck'))
dbsession.add(State(dxcc=206, code='IL', name='Innsbruck-Land'))
dbsession.add(State(dxcc=206, code='IM', name='Imst'))
dbsession.add(State(dxcc=206, code='KB', name='Kitzbühel'))
dbsession.add(State(dxcc=206, code='KU', name='Kufstein'))
dbsession.add(State(dxcc=206, code='LA', name='Landeck'))
dbsession.add(State(dxcc=206, code='LZ', name='Lienz'))
dbsession.add(State(dxcc=206, code='RE', name='Reutte'))
dbsession.add(State(dxcc=206, code='SZ', name='Schwaz'))
dbsession.add(State(dxcc=206, code='FE', name='Feldkirchen'))
dbsession.add(State(dxcc=206, code='HE', name='Hermagor'))
dbsession.add(State(dxcc=206, code='KC', name='Klagenfurt'))
dbsession.add(State(dxcc=206, code='KL', name='Klagenfurt-Land'))
dbsession.add(State(dxcc=206, code='SP', name='Spittal/Drau'))
dbsession.add(State(dxcc=206, code='SV', name='St.Veit/Glan'))
dbsession.add(State(dxcc=206, code='VI', name='Villach'))
dbsession.add(State(dxcc=206, code='VK', name='Völkermarkt'))
dbsession.add(State(dxcc=206, code='VL', name='Villach-Land'))
dbsession.add(State(dxcc=206, code='WO', name='Wolfsberg'))
dbsession.add(State(dxcc=206, code='BC', name='Bregenz'))
dbsession.add(State(dxcc=206, code='BZ', name='Bludenz'))
dbsession.add(State(dxcc=206, code='DO', name='Dornbirn'))
dbsession.add(State(dxcc=206, code='FK', name='Feldkirch'))
# Belgium
dbsession.add(State(dxcc=209, code='AN', name='Antwerpen'))
dbsession.add(State(dxcc=209, code='BR', name='Brussels'))
dbsession.add(State(dxcc=209, code='BW', name='Brabant Wallon'))
dbsession.add(State(dxcc=209, code='HT', name='Hainaut'))
dbsession.add(State(dxcc=209, code='LB', name='Limburg'))
dbsession.add(State(dxcc=209, code='LG', name='Liêge'))
dbsession.add(State(dxcc=209, code='NM', name='Namur'))
dbsession.add(State(dxcc=209, code='LU', name='Luxembourg'))
dbsession.add(State(dxcc=209, code='OV', name='Oost-Vlaanderen'))
dbsession.add(State(dxcc=209, code='VB', name='Vlaams Brabant'))
dbsession.add(State(dxcc=209, code='WV', name='West-Vlaanderen'))
# Bulgaria
dbsession.add(State(dxcc=212, code='BU', name='Burgas'))
dbsession.add(State(dxcc=212, code='SL', name='Sliven'))
dbsession.add(State(dxcc=212, code='YA', name='Yambol (Jambol)'))
dbsession.add(State(dxcc=212, code='SO', name='Sofija Grad'))
dbsession.add(State(dxcc=212, code='HA', name='Haskovo'))
dbsession.add(State(dxcc=212, code='KA', name='Kărdžali'))
dbsession.add(State(dxcc=212, code='SZ', name='Stara Zagora'))
dbsession.add(State(dxcc=212, code='PA', name='Pazardžik'))
dbsession.add(State(dxcc=212, code='PD', name='Plovdiv'))
dbsession.add(State(dxcc=212, code='SM', name='Smoljan'))
dbsession.add(State(dxcc=212, code='BL', name='Blagoevgrad'))
dbsession.add(State(dxcc=212, code='KD', name='Kjustendil'))
dbsession.add(State(dxcc=212, code='PK', name='Pernik'))
dbsession.add(State(dxcc=212, code='SF', name='Sofija (Sofia)'))
dbsession.add(State(dxcc=212, code='GA', name='Gabrovo'))
dbsession.add(State(dxcc=212, code='LV', name='Loveč (Lovech)'))
dbsession.add(State(dxcc=212, code='PL', name='Pleven'))
dbsession.add(State(dxcc=212, code='VT', name='Veliko Tărnovo'))
dbsession.add(State(dxcc=212, code='MN', name='Montana'))
dbsession.add(State(dxcc=212, code='VD', name='Vidin'))
dbsession.add(State(dxcc=212, code='VR', name='Vraca'))
dbsession.add(State(dxcc=212, code='RZ', name='Razgrad'))
dbsession.add(State(dxcc=212, code='RS', name='Ruse'))
dbsession.add(State(dxcc=212, code='SS', name='Silistra'))
dbsession.add(State(dxcc=212, code='TA', name='Tărgovište'))
dbsession.add(State(dxcc=212, code='DO', name='Dobrič'))
dbsession.add(State(dxcc=212, code='SN', name='Šumen'))
dbsession.add(State(dxcc=212, code='VN', name='Varna'))
# Corsica
dbsession.add(State(dxcc=214, code='2A', name='Corse-du-Sud'))
dbsession.add(State(dxcc=214, code='2B', name='Haute-Corse'))
# Denmark
dbsession.add(State(dxcc=221, code='015', name='Koebenhavns amt'))
dbsession.add(State(dxcc=221, code='020', name='Frederiksborg amt'))
dbsession.add(State(dxcc=221, code='025', name='Roskilde amt'))
dbsession.add(State(dxcc=221, code='030', name='Vestsjaellands amt'))
dbsession.add(State(dxcc=221, code='035', name='Storstrøm amt (Storstroems)'))
dbsession.add(State(dxcc=221, code='040', name='Bornholms amt'))
dbsession.add(State(dxcc=221, code='042', name='Fyns amt'))
dbsession.add(State(dxcc=221, code='050', name='Sínderjylland amt (Sydjyllands)'))
dbsession.add(State(dxcc=221, code='055', name='Ribe amt'))
dbsession.add(State(dxcc=221, code='060', name='Vejle amt'))
dbsession.add(State(dxcc=221, code='065', name='Ringkøbing amt (Ringkoebing)'))
dbsession.add(State(dxcc=221, code='070', name='Århus amt (Aarhus)'))
dbsession.add(State(dxcc=221, code='076', name='Viborg amt'))
dbsession.add(State(dxcc=221, code='080', name='Nordjyllands amt'))
dbsession.add(State(dxcc=221, code='101', name='Copenhagen City'))
dbsession.add(State(dxcc=221, code='147', name='Frederiksberg'))
# Finland
dbsession.add(State(dxcc=224, code='100', name='Somero'))
dbsession.add(State(dxcc=224, code='102', name='Alastaro'))
dbsession.add(State(dxcc=224, code='103', name='Askainen'))
dbsession.add(State(dxcc=224, code='104', name='Aura'))
dbsession.add(State(dxcc=224, code='105', name='Dragsfjärd'))
dbsession.add(State(dxcc=224, code='106', name='Eura'))
dbsession.add(State(dxcc=224, code='107', name='Eurajoki'))
dbsession.add(State(dxcc=224, code='108', name='Halikko'))
dbsession.add(State(dxcc=224, code='109', name='Harjavalta'))
dbsession.add(State(dxcc=224, code='110', name='Honkajoki'))
dbsession.add(State(dxcc=224, code='111', name='Houtskari'))
dbsession.add(State(dxcc=224, code='112', name='Huittinen'))
dbsession.add(State(dxcc=224, code='115', name='Iniö'))
dbsession.add(State(dxcc=224, code='116', name='Jämijärvi'))
dbsession.add(State(dxcc=224, code='117', name='Kaarina'))
dbsession.add(State(dxcc=224, code='119', name='Kankaanpää'))
dbsession.add(State(dxcc=224, code='120', name='Karinainen'))
dbsession.add(State(dxcc=224, code='122', name='Karvia'))
dbsession.add(State(dxcc=224, code='123', name='Äetsä'))
dbsession.add(State(dxcc=224, code='124', name='Kemiö'))
dbsession.add(State(dxcc=224, code='126', name='Kiikala'))
dbsession.add(State(dxcc=224, code='128', name='Kiikoinen'))
dbsession.add(State(dxcc=224, code='129', name='Kisko'))
dbsession.add(State(dxcc=224, code='130', name='Kiukainen'))
dbsession.add(State(dxcc=224, code='131', name='Kodisjoki'))
dbsession.add(State(dxcc=224, code='132', name='Kokemäki'))
dbsession.add(State(dxcc=224, code='133', name='Korppoo'))
dbsession.add(State(dxcc=224, code='134', name='Koski tl'))
dbsession.add(State(dxcc=224, code='135', name='Kullaa'))
dbsession.add(State(dxcc=224, code='136', name='Kustavi'))
dbsession.add(State(dxcc=224, code='137', name='Kuusjoki'))
dbsession.add(State(dxcc=224, code='138', name='Köyliö'))
dbsession.add(State(dxcc=224, code='139', name='Laitila'))
dbsession.add(State(dxcc=224, code='140', name='Lappi'))
dbsession.add(State(dxcc=224, code='141', name='Lavia'))
dbsession.add(State(dxcc=224, code='142', name='Lemu'))
dbsession.add(State(dxcc=224, code='143', name='Lieto'))
dbsession.add(State(dxcc=224, code='144', name='Loimaa'))
dbsession.add(State(dxcc=224, code='145', name='Loimaan kunta'))
dbsession.add(State(dxcc=224, code='147', name='Luvia'))
dbsession.add(State(dxcc=224, code='148', name='Marttila'))
dbsession.add(State(dxcc=224, code='149', name='Masku'))
dbsession.add(State(dxcc=224, code='150', name='Mellilä'))
dbsession.add(State(dxcc=224, code='151', name='Merikarvia'))
dbsession.add(State(dxcc=224, code='152', name='Merimasku'))
dbsession.add(State(dxcc=224, code='154', name='Mietoinen'))
dbsession.add(State(dxcc=224, code='156', name='Muurla'))
dbsession.add(State(dxcc=224, code='157', name='Mynämäki'))
dbsession.add(State(dxcc=224, code='158', name='Naantali'))
dbsession.add(State(dxcc=224, code='159', name='Nakkila'))
dbsession.add(State(dxcc=224, code='160', name='Nauvo'))
dbsession.add(State(dxcc=224, code='161', name='Noormarkku'))
dbsession.add(State(dxcc=224, code='162', name='Nousiainen'))
dbsession.add(State(dxcc=224, code='163', name='Oripää'))
dbsession.add(State(dxcc=224, code='164', name='Paimio'))
dbsession.add(State(dxcc=224, code='165', name='Parainen'))
dbsession.add(State(dxcc=224, code='167', name='Perniö'))
dbsession.add(State(dxcc=224, code='168', name='Pertteli'))
dbsession.add(State(dxcc=224, code='169', name='Piikkiö'))
dbsession.add(State(dxcc=224, code='170', name='Pomarkku'))
dbsession.add(State(dxcc=224, code='171', name='Pori'))
dbsession.add(State(dxcc=224, code='172', name='Punkalaidun'))
dbsession.add(State(dxcc=224, code='173', name='Pyhäranta'))
dbsession.add(State(dxcc=224, code='174', name='Pöytyä'))
dbsession.add(State(dxcc=224, code='175', name='Raisio'))
dbsession.add(State(dxcc=224, code='176', name='Rauma'))
dbsession.add(State(dxcc=224, code='178', name='Rusko'))
dbsession.add(State(dxcc=224, code='179', name='Rymättylä'))
dbsession.add(State(dxcc=224, code='180', name='Salo'))
dbsession.add(State(dxcc=224, code='181', name='Sauvo'))
dbsession.add(State(dxcc=224, code='182', name='Siikainen'))
dbsession.add(State(dxcc=224, code='183', name='Suodenniemi'))
dbsession.add(State(dxcc=224, code='184', name='Suomusjärvi'))
dbsession.add(State(dxcc=224, code='185', name='Säkylä'))
dbsession.add(State(dxcc=224, code='186', name='Särkisalo'))
dbsession.add(State(dxcc=224, code='187', name='Taivassalo'))
dbsession.add(State(dxcc=224, code='188', name='Tarvasjoki'))
dbsession.add(State(dxcc=224, code='189', name='Turku'))
dbsession.add(State(dxcc=224, code='190', name='Ulvila'))
dbsession.add(State(dxcc=224, code='191', name='Uusikaupunki'))
dbsession.add(State(dxcc=224, code='192', name='Vahto'))
dbsession.add(State(dxcc=224, code='193', name='Vammala'))
dbsession.add(State(dxcc=224, code='194', name='Vampula'))
dbsession.add(State(dxcc=224, code='195', name='Vehmaa'))
dbsession.add(State(dxcc=224, code='196', name='Velkua'))
dbsession.add(State(dxcc=224, code='198', name='Västanfjärd'))
dbsession.add(State(dxcc=224, code='199', name='Yläne'))
dbsession.add(State(dxcc=224, code='201', name='Artjärvi'))
dbsession.add(State(dxcc=224, code='202', name='Askola'))
dbsession.add(State(dxcc=224, code='204', name='Espoo'))
dbsession.add(State(dxcc=224, code='205', name='Hanko'))
dbsession.add(State(dxcc=224, code='206', name='Helsinki'))
dbsession.add(State(dxcc=224, code='207', name='Hyvinkää'))
dbsession.add(State(dxcc=224, code='208', name='Inkoo'))
dbsession.add(State(dxcc=224, code='209', name='Järvenpää'))
dbsession.add(State(dxcc=224, code='210', name='Karjaa'))
dbsession.add(State(dxcc=224, code='211', name='Karjalohja'))
dbsession.add(State(dxcc=224, code='212', name='Karkkila'))
dbsession.add(State(dxcc=224, code='213', name='Kauniainen'))
dbsession.add(State(dxcc=224, code='214', name='Kerava'))
dbsession.add(State(dxcc=224, code='215', name='Kirkkonummi'))
dbsession.add(State(dxcc=224, code='216', name='Lapinjärvi'))
dbsession.add(State(dxcc=224, code='217', name='Liljendal'))
dbsession.add(State(dxcc=224, code='218', name='Lohjan kaupunki'))
dbsession.add(State(dxcc=224, code='220', name='Loviisa'))
dbsession.add(State(dxcc=224, code='221', name='Myrskylä'))
dbsession.add(State(dxcc=224, code='222', name='Mäntsälä'))
dbsession.add(State(dxcc=224, code='223', name='Nummi-Pusula'))
dbsession.add(State(dxcc=224, code='224', name='Nurmijärvi'))
dbsession.add(State(dxcc=224, code='225', name='Orimattila'))
dbsession.add(State(dxcc=224, code='226', name='Pernaja'))
dbsession.add(State(dxcc=224, code='227', name='Pohja'))
dbsession.add(State(dxcc=224, code='228', name='Pornainen'))
dbsession.add(State(dxcc=224, code='229', name='Porvoo'))
dbsession.add(State(dxcc=224, code='231', name='Pukkila'))
dbsession.add(State(dxcc=224, code='233', name='Ruotsinpyhtää'))
dbsession.add(State(dxcc=224, code='234', name='Sammatti'))
dbsession.add(State(dxcc=224, code='235', name='Sipoo'))
dbsession.add(State(dxcc=224, code='236', name='Siuntio'))
dbsession.add(State(dxcc=224, code='238', name='Tammisaari'))
dbsession.add(State(dxcc=224, code='241', name='Tuusula'))
dbsession.add(State(dxcc=224, code='242', name='Vantaa'))
dbsession.add(State(dxcc=224, code='243', name='Vihti'))
dbsession.add(State(dxcc=224, code='301', name='Asikkala'))
dbsession.add(State(dxcc=224, code='303', name='Forssa'))
dbsession.add(State(dxcc=224, code='304', name='Hattula'))
dbsession.add(State(dxcc=224, code='305', name='Hauho'))
dbsession.add(State(dxcc=224, code='306', name='Hausjärvi'))
dbsession.add(State(dxcc=224, code='307', name='Hollola'))
dbsession.add(State(dxcc=224, code='308', name='Humppila'))
dbsession.add(State(dxcc=224, code='309', name='Hämeenlinna'))
dbsession.add(State(dxcc=224, code='310', name='Janakkala'))
dbsession.add(State(dxcc=224, code='311', name='Jokioinen'))
dbsession.add(State(dxcc=224, code='312', name='Juupajoki'))
dbsession.add(State(dxcc=224, code='313', name='Kalvola'))
dbsession.add(State(dxcc=224, code='314', name='Kangasala'))
dbsession.add(State(dxcc=224, code='315', name='Hämeenkoski'))
dbsession.add(State(dxcc=224, code='316', name='Kuhmalahti'))
dbsession.add(State(dxcc=224, code='318', name='Kuru'))
dbsession.add(State(dxcc=224, code='319', name='Kylmäkoski'))
dbsession.add(State(dxcc=224, code='320', name='Kärkölä'))
dbsession.add(State(dxcc=224, code='321', name='Lahti'))
dbsession.add(State(dxcc=224, code='322', name='Lammi'))
dbsession.add(State(dxcc=224, code='323', name='Lempäälä'))
dbsession.add(State(dxcc=224, code='324', name='Loppi'))
dbsession.add(State(dxcc=224, code='325', name='Luopioinen'))
dbsession.add(State(dxcc=224, code='326', name='Längelmäki'))
dbsession.add(State(dxcc=224, code='327', name='Mänttä'))
dbsession.add(State(dxcc=224, code='328', name='Nastola'))
dbsession.add(State(dxcc=224, code='329', name='Nokia'))
dbsession.add(State(dxcc=224, code='330', name='Orivesi'))
dbsession.add(State(dxcc=224, code='331', name='Padasjoki'))
dbsession.add(State(dxcc=224, code='332', name='Pirkkala'))
dbsession.add(State(dxcc=224, code='333', name='Pälkäne'))
dbsession.add(State(dxcc=224, code='334', name='Renko'))
dbsession.add(State(dxcc=224, code='335', name='Riihimäki'))
dbsession.add(State(dxcc=224, code='336', name='Ruovesi'))
dbsession.add(State(dxcc=224, code='337', name='Sahalahti'))
dbsession.add(State(dxcc=224, code='340', name='Tammela'))
dbsession.add(State(dxcc=224, code='341', name='Tampere'))
dbsession.add(State(dxcc=224, code='342', name='Toijala'))
dbsession.add(State(dxcc=224, code='344', name='Tuulos'))
dbsession.add(State(dxcc=224, code='345', name='Urjala'))
dbsession.add(State(dxcc=224, code='346', name='Valkeakoski'))
dbsession.add(State(dxcc=224, code='347', name='Vesilahti'))
dbsession.add(State(dxcc=224, code='348', name='Viiala'))
dbsession.add(State(dxcc=224, code='349', name='Vilppula'))
dbsession.add(State(dxcc=224, code='350', name='Virrat'))
dbsession.add(State(dxcc=224, code='351', name='Ylöjärvi'))
dbsession.add(State(dxcc=224, code='352', name='Ypäjä'))
dbsession.add(State(dxcc=224, code='353', name='Hämeenkyrö'))
dbsession.add(State(dxcc=224, code='354', name='Ikaalinen'))
dbsession.add(State(dxcc=224, code='355', name='Kihniö'))
dbsession.add(State(dxcc=224, code='356', name='Mouhijärvi'))
dbsession.add(State(dxcc=224, code='357', name='Parkano'))
dbsession.add(State(dxcc=224, code='358', name='Viljakkala'))
dbsession.add(State(dxcc=224, code='402', name='Enonkoski'))
dbsession.add(State(dxcc=224, code='403', name='Hartola'))
dbsession.add(State(dxcc=224, code='404', name='Haukivuori'))
dbsession.add(State(dxcc=224, code='405', name='Heinola'))
dbsession.add(State(dxcc=224, code='407', name='Heinävesi'))
dbsession.add(State(dxcc=224, code='408', name='Hirvensalmi'))
dbsession.add(State(dxcc=224, code='409', name='Joroinen'))
dbsession.add(State(dxcc=224, code='410', name='Juva'))
dbsession.add(State(dxcc=224, code='411', name='Jäppilä'))
dbsession.add(State(dxcc=224, code='412', name='Kangaslampi'))
dbsession.add(State(dxcc=224, code='413', name='Kangasniemi'))
dbsession.add(State(dxcc=224, code='414', name='Kerimäki'))
dbsession.add(State(dxcc=224, code='415', name='Mikkeli'))
dbsession.add(State(dxcc=224, code='417', name='Mäntyharju'))
dbsession.add(State(dxcc=224, code='418', name='Pertunmaa'))
dbsession.add(State(dxcc=224, code='419', name='Pieksämäki'))
dbsession.add(State(dxcc=224, code='420', name='Pieksänmaa'))
dbsession.add(State(dxcc=224, code='421', name='Punkaharju'))
dbsession.add(State(dxcc=224, code='422', name='Puumala'))
dbsession.add(State(dxcc=224, code='423', name='Rantasalmi'))
dbsession.add(State(dxcc=224, code='424', name='Ristiina'))
dbsession.add(State(dxcc=224, code='425', name='Savonlinna'))
dbsession.add(State(dxcc=224, code='426', name='Savonranta'))
dbsession.add(State(dxcc=224, code='427', name='Sulkava'))
dbsession.add(State(dxcc=224, code='428', name='Sysmä'))
dbsession.add(State(dxcc=224, code='502', name='Elimäki'))
dbsession.add(State(dxcc=224, code='503', name='Hamina'))
dbsession.add(State(dxcc=224, code='504', name='Iitti'))
dbsession.add(State(dxcc=224, code='505', name='Imatra'))
dbsession.add(State(dxcc=224, code='506', name='Jaala'))
dbsession.add(State(dxcc=224, code='507', name='Joutseno'))
dbsession.add(State(dxcc=224, code='509', name='Kotka'))
dbsession.add(State(dxcc=224, code='510', name='Kouvola'))
dbsession.add(State(dxcc=224, code='511', name='Kuusankoski'))
dbsession.add(State(dxcc=224, code='513', name='Lappeenranta'))
dbsession.add(State(dxcc=224, code='514', name='Lemi'))
dbsession.add(State(dxcc=224, code='515', name='Luumäki'))
dbsession.add(State(dxcc=224, code='516', name='Miehikkälä'))
dbsession.add(State(dxcc=224, code='518', name='Parikkala'))
dbsession.add(State(dxcc=224, code='519', name='Pyhtää'))
dbsession.add(State(dxcc=224, code='520', name='Rautjärvi'))
dbsession.add(State(dxcc=224, code='521', name='Ruokolahti'))
dbsession.add(State(dxcc=224, code='522', name='Saari'))
dbsession.add(State(dxcc=224, code='523', name='Savitaipale'))
dbsession.add(State(dxcc=224, code='525', name='Suomenniemi'))
dbsession.add(State(dxcc=224, code='526', name='Taipalsaari'))
dbsession.add(State(dxcc=224, code='527', name='Uukuniemi'))
dbsession.add(State(dxcc=224, code='528', name='Valkeala'))
dbsession.add(State(dxcc=224, code='530', name='Virolahti'))
dbsession.add(State(dxcc=224, code='531', name='Ylämaa'))
dbsession.add(State(dxcc=224, code='532', name='Anjalankoski'))
dbsession.add(State(dxcc=224, code='601', name='Alahärmä'))
dbsession.add(State(dxcc=224, code='602', name='Alajärvi'))
dbsession.add(State(dxcc=224, code='603', name='Alavus'))
dbsession.add(State(dxcc=224, code='604', name='Evijärvi'))
dbsession.add(State(dxcc=224, code='605', name='Halsua'))
dbsession.add(State(dxcc=224, code='606', name='Hankasalmi'))
dbsession.add(State(dxcc=224, code='607', name='Himanka'))
dbsession.add(State(dxcc=224, code='608', name='Ilmajoki'))
dbsession.add(State(dxcc=224, code='609', name='Isojoki'))
dbsession.add(State(dxcc=224, code='610', name='Isokyrö'))
dbsession.add(State(dxcc=224, code='611', name='Jalasjärvi'))
dbsession.add(State(dxcc=224, code='612', name='Joutsa'))
dbsession.add(State(dxcc=224, code='613', name='Jurva'))
dbsession.add(State(dxcc=224, code='614', name='Jyväskylä'))
dbsession.add(State(dxcc=224, code='615', name='Jyväskylän mlk'))
dbsession.add(State(dxcc=224, code='616', name='Jämsä'))
dbsession.add(State(dxcc=224, code='617', name='Jämsänkoski'))
dbsession.add(State(dxcc=224, code='619', name='Kannonkoski'))
dbsession.add(State(dxcc=224, code='620', name='Kannus'))
dbsession.add(State(dxcc=224, code='621', name='Karijoki'))
dbsession.add(State(dxcc=224, code='622', name='Karstula'))
dbsession.add(State(dxcc=224, code='623', name='Kaskinen'))
dbsession.add(State(dxcc=224, code='624', name='Kauhajoki'))
dbsession.add(State(dxcc=224, code='625', name='Kauhava'))
dbsession.add(State(dxcc=224, code='626', name='Kaustinen'))
dbsession.add(State(dxcc=224, code='627', name='Keuruu'))
dbsession.add(State(dxcc=224, code='628', name='Kinnula'))
dbsession.add(State(dxcc=224, code='629', name='Kivijärvi'))
dbsession.add(State(dxcc=224, code='630', name='Kokkola'))
dbsession.add(State(dxcc=224, code='632', name='Konnevesi'))
dbsession.add(State(dxcc=224, code='633', name='Korpilahti'))
dbsession.add(State(dxcc=224, code='634', name='Korsnäs'))
dbsession.add(State(dxcc=224, code='635', name='Kortesjärvi'))
dbsession.add(State(dxcc=224, code='636', name='Kristiinankaupunki'))
dbsession.add(State(dxcc=224, code='637', name='Kruunupyy'))
dbsession.add(State(dxcc=224, code='638', name='Kuhmoinen'))
dbsession.add(State(dxcc=224, code='639', name='Kuortane'))
dbsession.add(State(dxcc=224, code='640', name='Kurikka'))
dbsession.add(State(dxcc=224, code='641', name='Kyyjärvi'))
dbsession.add(State(dxcc=224, code='642', name='Kälviä'))
dbsession.add(State(dxcc=224, code='643', name='Laihia'))
dbsession.add(State(dxcc=224, code='644', name='Lappajärvi'))
dbsession.add(State(dxcc=224, code='645', name='Lapua'))
dbsession.add(State(dxcc=224, code='646', name='Laukaa'))
dbsession.add(State(dxcc=224, code='647', name='Lehtimäki'))
dbsession.add(State(dxcc=224, code='648', name='Leivonmäki'))
dbsession.add(State(dxcc=224, code='649', name='Lestijärvi'))
dbsession.add(State(dxcc=224, code='650', name='Lohtaja'))
dbsession.add(State(dxcc=224, code='651', name='Luhanka'))
dbsession.add(State(dxcc=224, code='652', name='Luoto'))
dbsession.add(State(dxcc=224, code='653', name='Maalahti'))
dbsession.add(State(dxcc=224, code='654', name='Maksamaa'))
dbsession.add(State(dxcc=224, code='655', name='Multia'))
dbsession.add(State(dxcc=224, code='656', name='Mustasaari'))
dbsession.add(State(dxcc=224, code='657', name='Muurame'))
dbsession.add(State(dxcc=224, code='658', name='Nurmo'))
dbsession.add(State(dxcc=224, code='659', name='Närpiö'))
dbsession.add(State(dxcc=224, code='660', name='Oravainen'))
dbsession.add(State(dxcc=224, code='661', name='Perho'))
dbsession.add(State(dxcc=224, code='662', name='Peräseinäjoki'))
dbsession.add(State(dxcc=224, code='663', name='Petäjävesi'))
dbsession.add(State(dxcc=224, code='664', name='Pietarsaari'))
dbsession.add(State(dxcc=224, code='665', name='Pedersöre'))
dbsession.add(State(dxcc=224, code='666', name='Pihtipudas'))
dbsession.add(State(dxcc=224, code='668', name='Pylkönmäki'))
dbsession.add(State(dxcc=224, code='669', name='Saarijärvi'))
dbsession.add(State(dxcc=224, code='670', name='Seinäjoki'))
dbsession.add(State(dxcc=224, code='671', name='Soini'))
dbsession.add(State(dxcc=224, code='672', name='Sumiainen'))
dbsession.add(State(dxcc=224, code='673', name='Suolahti'))
dbsession.add(State(dxcc=224, code='675', name='Teuva'))
dbsession.add(State(dxcc=224, code='676', name='Toholampi'))
dbsession.add(State(dxcc=224, code='677', name='Toivakka'))
dbsession.add(State(dxcc=224, code='678', name='Töysä'))
dbsession.add(State(dxcc=224, code='679', name='Ullava'))
dbsession.add(State(dxcc=224, code='680', name='Uurainen'))
dbsession.add(State(dxcc=224, code='681', name='Uusikaarlepyy'))
dbsession.add(State(dxcc=224, code='682', name='Vaasa'))
dbsession.add(State(dxcc=224, code='683', name='Veteli'))
dbsession.add(State(dxcc=224, code='684', name='Viitasaari'))
dbsession.add(State(dxcc=224, code='685', name='Vimpeli'))
dbsession.add(State(dxcc=224, code='686', name='Vähäkyrö'))
dbsession.add(State(dxcc=224, code='687', name='Vöyri'))
dbsession.add(State(dxcc=224, code='688', name='Ylihärmä'))
dbsession.add(State(dxcc=224, code='689', name='Ylistaro'))
dbsession.add(State(dxcc=224, code='690', name='Ähtäri'))
dbsession.add(State(dxcc=224, code='692', name='Äänekoski'))
dbsession.add(State(dxcc=224, code='701', name='Eno'))
dbsession.add(State(dxcc=224, code='702', name='Iisalmi'))
dbsession.add(State(dxcc=224, code='703', name='Ilomantsi'))
dbsession.add(State(dxcc=224, code='704', name='Joensuu'))
dbsession.add(State(dxcc=224, code='705', name='Juankoski'))
dbsession.add(State(dxcc=224, code='706', name='Juuka'))
dbsession.add(State(dxcc=224, code='707', name='Kaavi'))
dbsession.add(State(dxcc=224, code='708', name='Karttula'))
dbsession.add(State(dxcc=224, code='709', name='Keitele'))
dbsession.add(State(dxcc=224, code='710', name='Kesälahti'))
dbsession.add(State(dxcc=224, code='711', name='Kiihtelysvaara'))
dbsession.add(State(dxcc=224, code='712', name='Kitee'))
dbsession.add(State(dxcc=224, code='713', name='Kiuruvesi'))
dbsession.add(State(dxcc=224, code='714', name='Kontiolahti'))
dbsession.add(State(dxcc=224, code='715', name='Kuopio'))
dbsession.add(State(dxcc=224, code='716', name='Lapinlahti'))
dbsession.add(State(dxcc=224, code='717', name='Leppävirta'))
dbsession.add(State(dxcc=224, code='718', name='Lieksa'))
dbsession.add(State(dxcc=224, code='719', name='Liperi'))
dbsession.add(State(dxcc=224, code='720', name='Maaninka'))
dbsession.add(State(dxcc=224, code='721', name='Nilsiä'))
dbsession.add(State(dxcc=224, code='722', name='Nurmes'))
dbsession.add(State(dxcc=224, code='723', name='Outokumpu'))
dbsession.add(State(dxcc=224, code='724', name='Pielavesi'))
dbsession.add(State(dxcc=224, code='725', name='Polvijärvi'))
dbsession.add(State(dxcc=224, code='726', name='Pyhäselkä'))
dbsession.add(State(dxcc=224, code='727', name='Rautalampi'))
dbsession.add(State(dxcc=224, code='728', name='Rautavaara'))
dbsession.add(State(dxcc=224, code='729', name='Rääkkylä'))
dbsession.add(State(dxcc=224, code='730', name='Siilinjärvi'))
dbsession.add(State(dxcc=224, code='731', name='Sonkajärvi'))
dbsession.add(State(dxcc=224, code='732', name='Suonenjoki'))
dbsession.add(State(dxcc=224, code='733', name='Tervo'))
dbsession.add(State(dxcc=224, code='734', name='Tohmajärvi'))
dbsession.add(State(dxcc=224, code='735', name='Tuupovaara'))
dbsession.add(State(dxcc=224, code='736', name='Tuusniemi'))
dbsession.add(State(dxcc=224, code='737', name='Valtimo'))
dbsession.add(State(dxcc=224, code='738', name='Varkaus'))
dbsession.add(State(dxcc=224, code='739', name='Varpaisjärvi'))
dbsession.add(State(dxcc=224, code='740', name='Vehmersalmi'))
dbsession.add(State(dxcc=224, code='741', name='Vesanto'))
dbsession.add(State(dxcc=224, code='742', name='Vieremä'))
dbsession.add(State(dxcc=224, code='743', name='Värtsilä'))
dbsession.add(State(dxcc=224, code='801', name='Alavieska'))
dbsession.add(State(dxcc=224, code='802', name='Haapajärvi'))
dbsession.add(State(dxcc=224, code='803', name='Haapavesi'))
dbsession.add(State(dxcc=224, code='804', name='Hailuoto'))
dbsession.add(State(dxcc=224, code='805', name='Haukipudas'))
dbsession.add(State(dxcc=224, code='806', name='Hyrynsalmi'))
dbsession.add(State(dxcc=224, code='807', name='Ii'))
dbsession.add(State(dxcc=224, code='808', name='Kajaani'))
dbsession.add(State(dxcc=224, code='810', name='Kalajoki'))
dbsession.add(State(dxcc=224, code='811', name='Kempele'))
dbsession.add(State(dxcc=224, code='812', name='Kestilä'))
dbsession.add(State(dxcc=224, code='813', name='Kiiminki'))
dbsession.add(State(dxcc=224, code='814', name='Kuhmo'))
dbsession.add(State(dxcc=224, code='815', name='Kuivaniemi'))
dbsession.add(State(dxcc=224, code='816', name='Kuusamo'))
dbsession.add(State(dxcc=224, code='817', name='Kärsämäki'))
dbsession.add(State(dxcc=224, code='818', name='Liminka'))
dbsession.add(State(dxcc=224, code='819', name='Lumijoki'))
dbsession.add(State(dxcc=224, code='820', name='Merijärvi'))
dbsession.add(State(dxcc=224, code='821', name='Muhos'))
dbsession.add(State(dxcc=224, code='822', name='Nivala'))
dbsession.add(State(dxcc=224, code='823', name='Oulainen'))
dbsession.add(State(dxcc=224, code='824', name='Oulu'))
dbsession.add(State(dxcc=224, code='825', name='Oulunsalo'))
dbsession.add(State(dxcc=224, code='826', name='Paltamo'))
dbsession.add(State(dxcc=224, code='827', name='Pattijoki'))
dbsession.add(State(dxcc=224, code='828', name='Piippola'))
dbsession.add(State(dxcc=224, code='829', name='Pudasjärvi'))
dbsession.add(State(dxcc=224, code='830', name='Pulkkila'))
dbsession.add(State(dxcc=224, code='831', name='Puolanka'))
dbsession.add(State(dxcc=224, code='832', name='Pyhäjoki'))
dbsession.add(State(dxcc=224, code='833', name='Pyhäjärvi'))
dbsession.add(State(dxcc=224, code='834', name='Pyhäntä'))
dbsession.add(State(dxcc=224, code='835', name='Raahe'))
dbsession.add(State(dxcc=224, code='836', name='Rantsila'))
dbsession.add(State(dxcc=224, code='837', name='Reisjärvi'))
dbsession.add(State(dxcc=224, code='838', name='Ristijärvi'))
dbsession.add(State(dxcc=224, code='839', name='Ruukki'))
dbsession.add(State(dxcc=224, code='840', name='Sievi'))
dbsession.add(State(dxcc=224, code='841', name='Siikajoki'))
dbsession.add(State(dxcc=224, code='842', name='Sotkamo'))
dbsession.add(State(dxcc=224, code='843', name='Suomussalmi'))
dbsession.add(State(dxcc=224, code='844', name='Taivalkoski'))
dbsession.add(State(dxcc=224, code='846', name='Tyrnävä'))
dbsession.add(State(dxcc=224, code='847', name='Utajärvi'))
dbsession.add(State(dxcc=224, code='848', name='Vaala'))
dbsession.add(State(dxcc=224, code='849', name='Vihanti'))
dbsession.add(State(dxcc=224, code='850', name='Vuolijoki'))
dbsession.add(State(dxcc=224, code='851', name='Yli-Ii'))
dbsession.add(State(dxcc=224, code='852', name='Ylikiiminki'))
dbsession.add(State(dxcc=224, code='853', name='Ylivieska'))
dbsession.add(State(dxcc=224, code='901', name='Enontekiö'))
dbsession.add(State(dxcc=224, code='902', name='Inari'))
dbsession.add(State(dxcc=224, code='903', name='Kemi'))
dbsession.add(State(dxcc=224, code='904', name='Keminmaa'))
dbsession.add(State(dxcc=224, code='905', name='Kemijärvi'))
dbsession.add(State(dxcc=224, code='907', name='Kittilä'))
dbsession.add(State(dxcc=224, code='908', name='Kolari'))
dbsession.add(State(dxcc=224, code='909', name='Muonio'))
dbsession.add(State(dxcc=224, code='910', name='Pelkosenniemi'))
dbsession.add(State(dxcc=224, code='911', name='Pello'))
dbsession.add(State(dxcc=224, code='912', name='Posio'))
dbsession.add(State(dxcc=224, code='913', name='Ranua'))
dbsession.add(State(dxcc=224, code='914', name='Rovaniemi'))
dbsession.add(State(dxcc=224, code='915', name='Rovaniemen mlk'))
dbsession.add(State(dxcc=224, code='916', name='Salla'))
dbsession.add(State(dxcc=224, code='917', name='Savukoski'))
dbsession.add(State(dxcc=224, code='918', name='Simo'))
dbsession.add(State(dxcc=224, code='919', name='Sodankylä'))
dbsession.add(State(dxcc=224, code='920', name='Tervola'))
dbsession.add(State(dxcc=224, code='921', name='Tornio'))
dbsession.add(State(dxcc=224, code='922', name='Utsjoki'))
dbsession.add(State(dxcc=224, code='923', name='Ylitornio'))
# Sardinia
dbsession.add(State(dxcc=225, code='CA', name='Cagliari'))
dbsession.add(State(dxcc=225, code='CI', name='Carbonia-Iglesias'))
dbsession.add(State(dxcc=225, code='MD', name='Medio Campidano (import-only)'))
dbsession.add(State(dxcc=225, code='NU', name='Nuoro'))
dbsession.add(State(dxcc=225, code='OG', name='Ogliastra'))
dbsession.add(State(dxcc=225, code='OR', name='Oristano'))
dbsession.add(State(dxcc=225, code='OT', name='Olbia-Tempio'))
dbsession.add(State(dxcc=225, code='SS', name='Sassari'))
dbsession.add(State(dxcc=225, code='VS', name='MedioCampidano'))
#France
dbsession.add(State(dxcc=227, code='01', name='Ain'))
dbsession.add(State(dxcc=227, code='02', name='Aisne'))
dbsession.add(State(dxcc=227, code='03', name='Allier'))
dbsession.add(State(dxcc=227, code='04', name='Alpes-de-Haute-Provence'))
dbsession.add(State(dxcc=227, code='05', name='Hautes-Alpes'))
dbsession.add(State(dxcc=227, code='06', name='Alpes-Maritimes'))
dbsession.add(State(dxcc=227, code='07', name='Ardèche'))
dbsession.add(State(dxcc=227, code='08', name='Ardennes'))
dbsession.add(State(dxcc=227, code='09', name='Ariège'))
dbsession.add(State(dxcc=227, code='10', name='Aube'))
dbsession.add(State(dxcc=227, code='11', name='Aude'))
dbsession.add(State(dxcc=227, code='12', name='Aveyron'))
dbsession.add(State(dxcc=227, code='13', name='Bouches-du-Rhone'))
dbsession.add(State(dxcc=227, code='14', name='Calvados'))
dbsession.add(State(dxcc=227, code='15', name='Cantal'))
dbsession.add(State(dxcc=227, code='16', name='Charente'))
dbsession.add(State(dxcc=227, code='17', name='Charente-Maritime'))
dbsession.add(State(dxcc=227, code='18', name='Cher'))
dbsession.add(State(dxcc=227, code='19', name='Corrèze'))
dbsession.add(State(dxcc=227, code='20', name='Cote-d\'Or'))
dbsession.add(State(dxcc=227, code='21', name='Cotes-d\'Armor'))
dbsession.add(State(dxcc=227, code='22', name='Creuse'))
dbsession.add(State(dxcc=227, code='23', name='Dordogne'))
dbsession.add(State(dxcc=227, code='24', name='Doubs'))
dbsession.add(State(dxcc=227, code='25', name='Drôme'))
dbsession.add(State(dxcc=227, code='26', name='Eure'))
dbsession.add(State(dxcc=227, code='27', name='Eure-et-Loir'))
dbsession.add(State(dxcc=227, code='28', name='Finistère'))
dbsession.add(State(dxcc=227, code='29', name='Gard'))
dbsession.add(State(dxcc=227, code='30', name='Haute-Garonne'))
dbsession.add(State(dxcc=227, code='31', name='Gere'))
dbsession.add(State(dxcc=227, code='32', name='Gironde'))
dbsession.add(State(dxcc=227, code='33', name='Hérault'))
dbsession.add(State(dxcc=227, code='34', name='Ille-et-Vilaine'))
dbsession.add(State(dxcc=227, code='35', name='Indre'))
dbsession.add(State(dxcc=227, code='36', name='Indre-et-Loire'))
dbsession.add(State(dxcc=227, code='37', name='Isère'))
dbsession.add(State(dxcc=227, code='38', name='Jura'))
dbsession.add(State(dxcc=227, code='39', name='Landes'))
dbsession.add(State(dxcc=227, code='40', name='Loir-et-Cher'))
dbsession.add(State(dxcc=227, code='41', name='Loire'))
dbsession.add(State(dxcc=227, code='42', name='Haute-Loire'))
dbsession.add(State(dxcc=227, code='43', name='Loire-Atlantique'))
dbsession.add(State(dxcc=227, code='44', name='Loiret'))
dbsession.add(State(dxcc=227, code='45', name='Lot'))
dbsession.add(State(dxcc=227, code='46', name='Lot-et-Garonne'))
dbsession.add(State(dxcc=227, code='47', name='Lozère'))
dbsession.add(State(dxcc=227, code='48', name='Maine-et-Loire'))
dbsession.add(State(dxcc=227, code='49', name='Manche'))
dbsession.add(State(dxcc=227, code='50', name='Marne'))
dbsession.add(State(dxcc=227, code='51', name='Haute-Marne'))
dbsession.add(State(dxcc=227, code='52', name='Mayenne'))
dbsession.add(State(dxcc=227, code='53', name='Meurthe-et-Moselle'))
dbsession.add(State(dxcc=227, code='54', name='Meuse'))
dbsession.add(State(dxcc=227, code='55', name='Morbihan'))
dbsession.add(State(dxcc=227, code='56', name='Moselle'))
dbsession.add(State(dxcc=227, code='57', name='Niëvre'))
dbsession.add(State(dxcc=227, code='58', name='Nord'))
dbsession.add(State(dxcc=227, code='59', name='Oise'))
dbsession.add(State(dxcc=227, code='60', name='Orne'))
dbsession.add(State(dxcc=227, code='61', name='Pas-de-Calais'))
dbsession.add(State(dxcc=227, code='62', name='Puy-de-Dôme'))
dbsession.add(State(dxcc=227, code='63', name='Pyrénées-Atlantiques'))
dbsession.add(State(dxcc=227, code='64', name='Hautea-Pyrénées'))
dbsession.add(State(dxcc=227, code='65', name='Pyrénées-Orientales'))
dbsession.add(State(dxcc=227, code='66', name='Bas-Rhin'))
dbsession.add(State(dxcc=227, code='67', name='Haut-Rhin'))
dbsession.add(State(dxcc=227, code='68', name='Rhône'))
dbsession.add(State(dxcc=227, code='69', name='Haute-Saône'))
dbsession.add(State(dxcc=227, code='70', name='Saône-et-Loire'))
dbsession.add(State(dxcc=227, code='71', name='Sarthe'))
dbsession.add(State(dxcc=227, code='72', name='Savoie'))
dbsession.add(State(dxcc=227, code='73', name='Haute-Savoie'))
dbsession.add(State(dxcc=227, code='74', name='Paris'))
dbsession.add(State(dxcc=227, code='75', name='Seine-Maritime'))
dbsession.add(State(dxcc=227, code='76', name='Seine-et-Marne'))
dbsession.add(State(dxcc=227, code='77', name='Yvelines'))
dbsession.add(State(dxcc=227, code='78', name='Deux-Sèvres'))
dbsession.add(State(dxcc=227, code='79', name='Somme'))
dbsession.add(State(dxcc=227, code='80', name='Tarn'))
dbsession.add(State(dxcc=227, code='81', name='Tarn-et-Garonne'))
dbsession.add(State(dxcc=227, code='82', name='Var'))
dbsession.add(State(dxcc=227, code='83', name='Vaucluse'))
dbsession.add(State(dxcc=227, code='84', name='Vendée'))
dbsession.add(State(dxcc=227, code='85', name='Vienne'))
dbsession.add(State(dxcc=227, code='86', name='Haute-Vienne'))
dbsession.add(State(dxcc=227, code='87', name='Vosges'))
dbsession.add(State(dxcc=227, code='88', name='Yonne'))
dbsession.add(State(dxcc=227, code='89', name='Territoire de Belfort'))
dbsession.add(State(dxcc=227, code='90', name='Essonne'))
dbsession.add(State(dxcc=227, code='91', name='Hauts-de-Selne'))
dbsession.add(State(dxcc=227, code='92', name='Seine-Saint-Denis'))
dbsession.add(State(dxcc=227, code='93', name='Val-de-Marne'))
dbsession.add(State(dxcc=227, code='94', name='Val-d\'Oise'))
# Fed. Rep. Of Germany
dbsession.add(State(dxcc=230, code='BB', name='Brandenburg'))
dbsession.add(State(dxcc=230, code='BE', name='Berlin'))
dbsession.add(State(dxcc=230, code='BW', name='Baden-Württemberg'))
dbsession.add(State(dxcc=230, code='BY', name='Freistaat Bayern'))
dbsession.add(State(dxcc=230, code='HB', name='Freie Hansestadt Bremen'))
dbsession.add(State(dxcc=230, code='HE', name='Hessen'))
dbsession.add(State(dxcc=230, code='HH', name='Freie und Hansestadt Hamburg'))
dbsession.add(State(dxcc=230, code='MV', name='Mecklenburg-Vorpommern'))
dbsession.add(State(dxcc=230, code='NI', name='Niedersachsen'))
dbsession.add(State(dxcc=230, code='NW', name='Nordrhein-Westfalen'))
dbsession.add(State(dxcc=230, code='RP', name='Rheinland-Pfalz'))
dbsession.add(State(dxcc=230, code='SL', name='Saarland'))
dbsession.add(State(dxcc=230, code='SH', name='Schleswig-Holstein'))
dbsession.add(State(dxcc=230, code='SN', name='Freistaat Sachsen'))
dbsession.add(State(dxcc=230, code='ST', name='Sachsen-Anhalt'))
dbsession.add(State(dxcc=230, code='TH', name='Freistaat Thüringen'))
#Hungary
dbsession.add(State(dxcc=239, code='GY', name='Gyõr (Gyõr-Moson-Sopron)'))
dbsession.add(State(dxcc=239, code='VA', name='Vas'))
dbsession.add(State(dxcc=239, code='ZA', name='Zala'))
dbsession.add(State(dxcc=239, code='KO', name='Komárom (Komárom-Esztergom)'))
dbsession.add(State(dxcc=239, code='VE', name='Veszprém'))
dbsession.add(State(dxcc=239, code='BA', name='Baranya'))
dbsession.add(State(dxcc=239, code='SO', name='Somogy'))
dbsession.add(State(dxcc=239, code='TO', name='Tolna'))
dbsession.add(State(dxcc=239, code='FE', name='Fejér'))
dbsession.add(State(dxcc=239, code='BP', name='Budapest'))
dbsession.add(State(dxcc=239, code='HE', name='Heves'))
dbsession.add(State(dxcc=239, code='NG', name='Nógrád'))
dbsession.add(State(dxcc=239, code='PE', name='Pest'))
dbsession.add(State(dxcc=239, code='SZ', name='Szolnok (Jász-Nagykun-Szolnok)'))
dbsession.add(State(dxcc=239, code='BE', name='Békés'))
dbsession.add(State(dxcc=239, code='BN', name='Bács-Kiskun'))
dbsession.add(State(dxcc=239, code='CS', name='Csongrád'))
dbsession.add(State(dxcc=239, code='BO', name='Borsod (Borsod-Abaúj-Zemplén)'))
dbsession.add(State(dxcc=239, code='HB', name='Hajdú-Bihar'))
dbsession.add(State(dxcc=239, code='SA', name='Szabolcs (Szabolcs-Szatmár-Bereg)'))
# Ireland
dbsession.add(State(dxcc=245, code='CW', name='Carlow (Ceatharlach)'))
dbsession.add(State(dxcc=245, code='CN', name='Cavan (An Cabhán)'))
dbsession.add(State(dxcc=245, code='CE', name='Clare (An Clár)'))
dbsession.add(State(dxcc=245, code='C', name='Cork (Corcaigh)'))
dbsession.add(State(dxcc=245, code='DL', name='Donegal (Dún na nGall)'))
dbsession.add(State(dxcc=245, code='D', name='Dublin (Baile Áth Cliath)'))
dbsession.add(State(dxcc=245, code='G', name='Galway (Gaillimh)'))
dbsession.add(State(dxcc=245, code='KY', name='Kerry (Ciarraí)'))
dbsession.add(State(dxcc=245, code='KE', name='Kildare (Cill Dara)'))
dbsession.add(State(dxcc=245, code='KK', name='Kilkenny (Cill Chainnigh)'))
dbsession.add(State(dxcc=245, code='LS', name='Laois (Laois)'))
dbsession.add(State(dxcc=245, code='LM', name='Leitrim (Liatroim)'))
dbsession.add(State(dxcc=245, code='LK', name='Limerick (Luimneach)'))
dbsession.add(State(dxcc=245, code='LD', name='Longford (An Longfort)'))
dbsession.add(State(dxcc=245, code='LH', name='Louth (Lú)'))
dbsession.add(State(dxcc=245, code='MO', name='Mayo (Maigh Eo)'))
dbsession.add(State(dxcc=245, code='MH', name='Meath (An Mhí)'))
dbsession.add(State(dxcc=245, code='MN', name='Monaghan (Muineachán)'))
dbsession.add(State(dxcc=245, code='OY', name='Offaly (Uíbh Fhailí)'))
dbsession.add(State(dxcc=245, code='RN', name='Roscommon (Ros Comáin)'))
dbsession.add(State(dxcc=245, code='SO', name='Sligo (Sligeach)'))
dbsession.add(State(dxcc=245, code='TA', name='Tipperary (Tiobraid Árann)'))
dbsession.add(State(dxcc=245, code='WD', name='Waterford (Port Láirge)'))
dbsession.add(State(dxcc=245, code='WH', name='Westmeath (An Iarmhí)'))
dbsession.add(State(dxcc=245, code='WX', name='Wexford (Loch Garman)'))
dbsession.add(State(dxcc=245, code='WW', name='Wicklow (Cill Mhantáin)'))
# Italy
dbsession.add(State(dxcc=248, code='GE', name='Genova'))
dbsession.add(State(dxcc=248, code='IM', name='Imperia'))
dbsession.add(State(dxcc=248, code='SP', name='La Spezia'))
dbsession.add(State(dxcc=248, code='SV', name='Savona'))
dbsession.add(State(dxcc=248, code='AL', name='Alessandria'))
dbsession.add(State(dxcc=248, code='AT', name='Asti'))
dbsession.add(State(dxcc=248, code='BI', name='Biella'))
dbsession.add(State(dxcc=248, code='CN', name='Cuneo'))
dbsession.add(State(dxcc=248, code='NO', name='Novara'))
dbsession.add(State(dxcc=248, code='TO', name='Torino'))
dbsession.add(State(dxcc=248, code='VB', name='Verbano Cusio Ossola'))
dbsession.add(State(dxcc=248, code='VC', name='Vercelli'))
dbsession.add(State(dxcc=248, code='AO', name='Aosta'))
dbsession.add(State(dxcc=248, code='BG', name='Bergamo'))
dbsession.add(State(dxcc=248, code='BS', name='Brescia'))
dbsession.add(State(dxcc=248, code='CO', name='Como'))
dbsession.add(State(dxcc=248, code='CR', name='Cremona'))
dbsession.add(State(dxcc=248, code='LC', name='Lecco'))
dbsession.add(State(dxcc=248, code='LO', name='Lodi'))
dbsession.add(State(dxcc=248, code='MB', name='Monza e Brianza'))
dbsession.add(State(dxcc=248, code='MN', name='Mantova'))
dbsession.add(State(dxcc=248, code='MI', name='Milano'))
dbsession.add(State(dxcc=248, code='PV', name='Pavia'))
dbsession.add(State(dxcc=248, code='SO', name='Sondrio'))
dbsession.add(State(dxcc=248, code='VA', name='Varese'))
dbsession.add(State(dxcc=248, code='BL', name='Belluno'))
dbsession.add(State(dxcc=248, code='PD', name='Padova'))
dbsession.add(State(dxcc=248, code='RO', name='Rovigo'))
dbsession.add(State(dxcc=248, code='TV', name='Treviso'))
dbsession.add(State(dxcc=248, code='VE', name='Venezia'))
dbsession.add(State(dxcc=248, code='VR', name='Verona'))
dbsession.add(State(dxcc=248, code='VI', name='Vicenza'))
dbsession.add(State(dxcc=248, code='BZ', name='Bolzano'))
dbsession.add(State(dxcc=248, code='TN', name='Trento'))
dbsession.add(State(dxcc=248, code='GO', name='Gorizia'))
dbsession.add(State(dxcc=248, code='PN', name='Pordenone'))
dbsession.add(State(dxcc=248, code='TS', name='Trieste'))
dbsession.add(State(dxcc=248, code='UD', name='Udine'))
dbsession.add(State(dxcc=248, code='BO', name='Bologna'))
dbsession.add(State(dxcc=248, code='FE', name='Ferrara'))
dbsession.add(State(dxcc=248, code='FC', name='Forlì Cesena'))
dbsession.add(State(dxcc=248, code='MO', name='Modena'))
dbsession.add(State(dxcc=248, code='PR', name='Parma'))
dbsession.add(State(dxcc=248, code='PC', name='Piacenza'))
dbsession.add(State(dxcc=248, code='RA', name='Ravenna'))
dbsession.add(State(dxcc=248, code='RE', name='Reggio Emilia'))
dbsession.add(State(dxcc=248, code='RN', name='Rimini'))
dbsession.add(State(dxcc=248, code='AR', name='Arezzo'))
dbsession.add(State(dxcc=248, code='FI', name='Firenze'))
dbsession.add(State(dxcc=248, code='GR', name='Grosseto'))
dbsession.add(State(dxcc=248, code='LI', name='Livorno'))
dbsession.add(State(dxcc=248, code='LU', name='Lucca'))
dbsession.add(State(dxcc=248, code='MS', name='Massa Carrara'))
dbsession.add(State(dxcc=248, code='PT', name='Pistoia'))
dbsession.add(State(dxcc=248, code='PI', name='Pisa'))
dbsession.add(State(dxcc=248, code='PO', name='Prato'))
dbsession.add(State(dxcc=248, code='SI', name='Siena'))
dbsession.add(State(dxcc=248, code='CH', name='Chieti'))
dbsession.add(State(dxcc=248, code='AQ', name='L\'Aquila'))
dbsession.add(State(dxcc=248, code='PE', name='Pescara'))
dbsession.add(State(dxcc=248, code='TE', name='Teramo'))
dbsession.add(State(dxcc=248, code='AN', name='Ancona'))
dbsession.add(State(dxcc=248, code='AP', name='Ascoli Piceno'))
dbsession.add(State(dxcc=248, code='FM', name='Fermo'))
dbsession.add(State(dxcc=248, code='MC', name='Macerata'))
dbsession.add(State(dxcc=248, code='PU', name='Pesaro e Urbino'))
dbsession.add(State(dxcc=248, code='MT', name='Matera'))
dbsession.add(State(dxcc=248, code='PZ', name='Potenza'))
dbsession.add(State(dxcc=248, code='BA', name='Bari'))
dbsession.add(State(dxcc=248, code='BT', name='Barletta-Andria-Trani'))
dbsession.add(State(dxcc=248, code='BR', name='Brindisi'))
dbsession.add(State(dxcc=248, code='FG', name='Foggia'))
dbsession.add(State(dxcc=248, code='LE', name='Lecce'))
dbsession.add(State(dxcc=248, code='TA', name='Taranto'))
dbsession.add(State(dxcc=248, code='CZ', name='Catanzaro'))
dbsession.add(State(dxcc=248, code='CS', name='Cosenza'))
dbsession.add(State(dxcc=248, code='KR', name='Crotone'))
dbsession.add(State(dxcc=248, code='RC', name='Reggio Calabria'))
dbsession.add(State(dxcc=248, code='VV', name='Vibo Valentia'))
dbsession.add(State(dxcc=248, code='AV', name='Avellino'))
dbsession.add(State(dxcc=248, code='BN', name='Benevento'))
dbsession.add(State(dxcc=248, code='CE', name='Caserta'))
dbsession.add(State(dxcc=248, code='NA', name='Napoli'))
dbsession.add(State(dxcc=248, code='SA', name='Salerno'))
dbsession.add(State(dxcc=248, code='IS', name='Isernia'))
dbsession.add(State(dxcc=248, code='CB', name='Campobasso'))
dbsession.add(State(dxcc=248, code='FR', name='Frosinone'))
dbsession.add(State(dxcc=248, code='LT', name='Latina'))
dbsession.add(State(dxcc=248, code='RI', name='Rieti'))
dbsession.add(State(dxcc=248, code='RM', name='Roma'))
dbsession.add(State(dxcc=248, code='VT', name='Viterbo'))
dbsession.add(State(dxcc=248, code='PG', name='Perugia'))
dbsession.add(State(dxcc=248, code='TR', name='Terni'))
dbsession.add(State(dxcc=248, code='AG', name='Agrigento'))
dbsession.add(State(dxcc=248, code='CL', name='Caltanissetta'))
dbsession.add(State(dxcc=248, code='CT', name='Catania'))
dbsession.add(State(dxcc=248, code='EN', name='Enna'))
dbsession.add(State(dxcc=248, code='ME', name='Messina'))
dbsession.add(State(dxcc=248, code='PA', name='Palermo'))
dbsession.add(State(dxcc=248, code='RG', name='Ragusa'))
dbsession.add(State(dxcc=248, code='SR', name='Siracusa'))
dbsession.add(State(dxcc=248, code='TP', name='Trapani'))
# Madeira Islands
dbsession.add(State(dxcc=256, code='MD', name='Madeira'))
# Netherlands
dbsession.add(State(dxcc=263, code='DR', name='Drenthe'))
dbsession.add(State(dxcc=263, code='FR', name='Friesland'))
dbsession.add(State(dxcc=263, code='GR', name='Groningen'))
dbsession.add(State(dxcc=263, code='NB', name='Noord-Brabant'))
dbsession.add(State(dxcc=263, code='OV', name='Overijssel'))
dbsession.add(State(dxcc=263, code='ZH', name='Zuid-Holland'))
dbsession.add(State(dxcc=263, code='FL', name='Flevoland'))
dbsession.add(State(dxcc=263, code='GD', name='Gelderland'))
dbsession.add(State(dxcc=263, code='LB', name='Limburg'))
dbsession.add(State(dxcc=263, code='NH', name='Noord-Holland'))
dbsession.add(State(dxcc=263, code='UT', name='Utrecht'))
dbsession.add(State(dxcc=263, code='ZL', name='Zeeland'))
# Poland
dbsession.add(State(dxcc=269, code='Z', name='Zachodnio-Pomorskie'))
dbsession.add(State(dxcc=269, code='F', name='Pomorskie'))
dbsession.add(State(dxcc=269, code='P', name='Kujawsko-Pomorskie'))
dbsession.add(State(dxcc=269, code='B', name='Lubuskie'))
dbsession.add(State(dxcc=269, code='W', name='Wielkopolskie'))
dbsession.add(State(dxcc=269, code='J', name='Warmińsko-Mazurskie'))
dbsession.add(State(dxcc=269, code='O', name='Podlaskie'))
dbsession.add(State(dxcc=269, code='R', name='Mazowieckie'))
dbsession.add(State(dxcc=269, code='D', name='Dolnośląskie'))
dbsession.add(State(dxcc=269, code='U', name='Opolskie'))
dbsession.add(State(dxcc=269, code='C', name='Łódzkie'))
dbsession.add(State(dxcc=269, code='S', name='Świętokrzyskie'))
dbsession.add(State(dxcc=269, code='K', name='Podkarpackie'))
dbsession.add(State(dxcc=269, code='L', name='Lubelskie'))
dbsession.add(State(dxcc=269, code='G', name='Śląskie'))
dbsession.add(State(dxcc=269, code='M', name='Małopolskie'))
# Portugal
dbsession.add(State(dxcc=272, code='AV', name='Aveiro'))
dbsession.add(State(dxcc=272, code='BJ', name='Beja'))
dbsession.add(State(dxcc=272, code='BR', name='Braga'))
dbsession.add(State(dxcc=272, code='BG', name='Bragança'))
dbsession.add(State(dxcc=272, code='CB', name='Castelo Branco'))
dbsession.add(State(dxcc=272, code='CO', name='Coimbra'))
dbsession.add(State(dxcc=272, code='EV', name='Evora'))
dbsession.add(State(dxcc=272, code='FR', name='Faro'))
dbsession.add(State(dxcc=272, code='GD', name='Guarda'))
dbsession.add(State(dxcc=272, code='LR', name='Leiria'))
dbsession.add(State(dxcc=272, code='LX', name='Lisboa'))
dbsession.add(State(dxcc=272, code='PG', name='Portalegre'))
dbsession.add(State(dxcc=272, code='PT', name='Porto'))
dbsession.add(State(dxcc=272, code='SR', name='Santarem'))
dbsession.add(State(dxcc=272, code='ST', name='Setubal'))
dbsession.add(State(dxcc=272, code='VC', name='Viana do Castelo'))
dbsession.add(State(dxcc=272, code='VR', name='Vila Real'))
dbsession.add(State(dxcc=272, code='VS', name='Viseu'))
# Romania
dbsession.add(State(dxcc=275, code='AR', name='Arad'))
dbsession.add(State(dxcc=275, code='CS', name='Cara\' - Severin'))
dbsession.add(State(dxcc=275, code='HD', name='Hunedoara'))
dbsession.add(State(dxcc=275, code='TM', name='Timiş (Timis)'))
dbsession.add(State(dxcc=275, code='BU', name='Bucureşti (Bucure\'ti)'))
dbsession.add(State(dxcc=275, code='IF', name='Ilfov'))
dbsession.add(State(dxcc=275, code='BR', name='Brăila (Braila)'))
dbsession.add(State(dxcc=275, code='CT', name='Conatarta'))
dbsession.add(State(dxcc=275, code='GL', name='Galati'))
dbsession.add(State(dxcc=275, code='TL', name='Tulcea'))
dbsession.add(State(dxcc=275, code='VN', name='Vrancea'))
dbsession.add(State(dxcc=275, code='AB', name='Alba'))
dbsession.add(State(dxcc=275, code='BH', name='Bihor'))
dbsession.add(State(dxcc=275, code='BN', name='Bistrita-Nasaud'))
dbsession.add(State(dxcc=275, code='CJ', name='Cluj'))
dbsession.add(State(dxcc=275, code='MM', name='Maramureş (Maramures)'))
dbsession.add(State(dxcc=275, code='SJ', name='Sălaj (Salaj)'))
dbsession.add(State(dxcc=275, code='SM', name='Satu Mare'))
dbsession.add(State(dxcc=275, code='BV', name='Braşov (Bra\'ov)'))
dbsession.add(State(dxcc=275, code='CV', name='Covasna'))
dbsession.add(State(dxcc=275, code='HR', name='Harghita'))
dbsession.add(State(dxcc=275, code='MS', name='Mureş (Mures)'))
dbsession.add(State(dxcc=275, code='SB', name='Sibiu'))
dbsession.add(State(dxcc=275, code='AG', name='Arge\''))
dbsession.add(State(dxcc=275, code='DJ', name='Dolj'))
dbsession.add(State(dxcc=275, code='GJ', name='Gorj'))
dbsession.add(State(dxcc=275, code='MH', name='Mehedinţi (Mehedinti)'))
dbsession.add(State(dxcc=275, code='OT', name='Olt'))
dbsession.add(State(dxcc=275, code='VL', name='Vâlcea'))
dbsession.add(State(dxcc=275, code='BC', name='Bacau'))
dbsession.add(State(dxcc=275, code='BT', name='Boto\'ani'))
dbsession.add(State(dxcc=275, code='IS', name='Iaşi (Iasi)'))
dbsession.add(State(dxcc=275, code='NT', name='Neamţ (Neamt)'))
dbsession.add(State(dxcc=275, code='SV', name='Suceava'))
dbsession.add(State(dxcc=275, code='VS', name='Vaslui'))
dbsession.add(State(dxcc=275, code='BZ', name='Buzău (Buzau)'))
dbsession.add(State(dxcc=275, code='CL', name='Călăraşi (Calarasi)'))
dbsession.add(State(dxcc=275, code='DB', name='Dâmboviţa (Dambovita)'))
dbsession.add(State(dxcc=275, code='GR', name='Giurqiu'))
dbsession.add(State(dxcc=275, code='IL', name='Ialomita'))
dbsession.add(State(dxcc=275, code='PH', name='Prahova'))
dbsession.add(State(dxcc=275, code='TR', name='Teleorman'))
# Spain
dbsession.add(State(dxcc=281, code='AV', name='Avila'))
dbsession.add(State(dxcc=281, code='BU', name='Burgos'))
dbsession.add(State(dxcc=281, code='C', name='A Coruña'))
dbsession.add(State(dxcc=281, code='LE', name='Leon'))
dbsession.add(State(dxcc=281, code='LO', name='La Rioja'))
dbsession.add(State(dxcc=281, code='LU', name='Lugo'))
dbsession.add(State(dxcc=281, code='O', name='Asturias'))
dbsession.add(State(dxcc=281, code='OU', name='Ourense'))
dbsession.add(State(dxcc=281, code='P', name='Palencia'))
dbsession.add(State(dxcc=281, code='PO', name='Pontevedra'))
dbsession.add(State(dxcc=281, code='S', name='Cantabria'))
dbsession.add(State(dxcc=281, code='SA', name='Salamanca'))
dbsession.add(State(dxcc=281, code='SG', name='Segovia'))
dbsession.add(State(dxcc=281, code='SO', name='Soria'))
dbsession.add(State(dxcc=281, code='VA', name='Valladolid'))
dbsession.add(State(dxcc=281, code='ZA', name='Zamora'))
dbsession.add(State(dxcc=281, code='BI', name='Vizcaya'))
dbsession.add(State(dxcc=281, code='HU', name='Huesca'))
dbsession.add(State(dxcc=281, code='NA', name='Navarra'))
dbsession.add(State(dxcc=281, code='SS', name='Guipuzcoa'))
dbsession.add(State(dxcc=281, code='TE', name='Teruel'))
dbsession.add(State(dxcc=281, code='VI', name='Alava'))
dbsession.add(State(dxcc=281, code='Z', name='Zaragoza'))
dbsession.add(State(dxcc=281, code='B', name='Barcelona'))
dbsession.add(State(dxcc=281, code='GI', name='Girona'))
dbsession.add(State(dxcc=281, code='L', name='Lleida'))
dbsession.add(State(dxcc=281, code='T', name='Tarragona'))
dbsession.add(State(dxcc=281, code='BA', name='Badajoz'))
dbsession.add(State(dxcc=281, code='CC', name='Caceres'))
dbsession.add(State(dxcc=281, code='CR', name='Ciudad Real'))
dbsession.add(State(dxcc=281, code='CU', name='Cuenca'))
dbsession.add(State(dxcc=281, code='GU', name='Guadalajara'))
dbsession.add(State(dxcc=281, code='M', name='Madrid'))
dbsession.add(State(dxcc=281, code='TO', name='Toledo'))
dbsession.add(State(dxcc=281, code='A', name='Alicante'))
dbsession.add(State(dxcc=281, code='AB', name='Albacete'))
dbsession.add(State(dxcc=281, code='CS', name='Castellon'))
dbsession.add(State(dxcc=281, code='MU', name='Murcia'))
dbsession.add(State(dxcc=281, code='V', name='Valencia'))
dbsession.add(State(dxcc=281, code='AL', name='Almeria'))
dbsession.add(State(dxcc=281, code='CA', name='Cadiz'))
dbsession.add(State(dxcc=281, code='CO', name='Cordoba'))
dbsession.add(State(dxcc=281, code='GR', name='Granada'))
dbsession.add(State(dxcc=281, code='H', name='Huelva'))
dbsession.add(State(dxcc=281, code='J', name='Jaen'))
dbsession.add(State(dxcc=281, code='MA', name='Malaga'))
dbsession.add(State(dxcc=281, code='SE', name='Sevilla'))
# Sweden
dbsession.add(State(dxcc=284, code='AB', name='Stockholm län'))
dbsession.add(State(dxcc=284, code='I', name='Gotlands län'))
dbsession.add(State(dxcc=284, code='BD', name='Norrbottens län'))
dbsession.add(State(dxcc=284, code='AC', name='Västerbottens län'))
dbsession.add(State(dxcc=284, code='X', name='Gävleborgs län'))
dbsession.add(State(dxcc=284, code='Z', name='Jämtlands län'))
dbsession.add(State(dxcc=284, code='Y', name='Västernorrlands län'))
dbsession.add(State(dxcc=284, code='W', name='Dalarna län'))
dbsession.add(State(dxcc=284, code='S', name='Värmlands län'))
dbsession.add(State(dxcc=284, code='O', name='Västra Götalands län'))
dbsession.add(State(dxcc=284, code='T', name='Örebro län'))
dbsession.add(State(dxcc=284, code='E', name='Östergötlands län'))
dbsession.add(State(dxcc=284, code='D', name='Södermanlands län'))
dbsession.add(State(dxcc=284, code='C', name='Uppsala län'))
dbsession.add(State(dxcc=284, code='U', name='Västmanlands län'))
dbsession.add(State(dxcc=284, code='N', name='Hallands län'))
dbsession.add(State(dxcc=284, code='K', name='Blekinge län'))
dbsession.add(State(dxcc=284, code='F', name='Jönköpings län'))
dbsession.add(State(dxcc=284, code='H', name='Kalmar län'))
dbsession.add(State(dxcc=284, code='G', name='Kronobergs län'))
dbsession.add(State(dxcc=284, code='L', name='Skåne län'))
# Switzerland
dbsession.add(State(dxcc=287, code='AG', name='Aargau'))
dbsession.add(State(dxcc=287, code='AR', name='Appenzell Ausserrhoden'))
dbsession.add(State(dxcc=287, code='AI', name='Appenzell Innerrhoden'))
dbsession.add(State(dxcc=287, code='BL', name='Basel Landschaft'))
dbsession.add(State(dxcc=287, code='BS', name='Basel Stadt'))
dbsession.add(State(dxcc=287, code='BE', name='Bern'))
dbsession.add(State(dxcc=287, code='FR', name='Freiburg / Fribourg'))
dbsession.add(State(dxcc=287, code='GE', name='Genf / Genève'))
dbsession.add(State(dxcc=287, code='GL', name='Glarus'))
dbsession.add(State(dxcc=287, code='GR', name='Graubuenden / Grisons'))
dbsession.add(State(dxcc=287, code='JU', name='Jura'))
dbsession.add(State(dxcc=287, code='LU', name='Luzern'))
dbsession.add(State(dxcc=287, code='NE', name='Neuenburg / Neuchâtel'))
dbsession.add(State(dxcc=287, code='NW', name='Nidwalden'))
dbsession.add(State(dxcc=287, code='OW', name='Obwalden'))
dbsession.add(State(dxcc=287, code='SH', name='Schaffhausen'))
dbsession.add(State(dxcc=287, code='SZ', name='Schwyz'))
dbsession.add(State(dxcc=287, code='SO', name='Solothurn'))
dbsession.add(State(dxcc=287, code='SG', name='St. Gallen'))
dbsession.add(State(dxcc=287, code='TI', name='Tessin / Ticino'))
dbsession.add(State(dxcc=287, code='TG', name='Thurgau'))
dbsession.add(State(dxcc=287, code='UR', name='Uri'))
dbsession.add(State(dxcc=287, code='VD', name='Waadt / Vaud'))
dbsession.add(State(dxcc=287, code='VS', name='Wallis / Valais'))
dbsession.add(State(dxcc=287, code='ZH', name='Zuerich'))
dbsession.add(State(dxcc=287, code='ZG', name='Zug'))
# Ukraine
dbsession.add(State(dxcc=288, code='SU', name='Sums\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='TE', name='Ternopil\'s\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='CH', name='Cherkas\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='ZA', name='Zakarpats\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='DN', name='Dnipropetrovs\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='OD', name='Odes\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='HE', name='Khersons\'kaOblast\''))
dbsession.add(State(dxcc=288, code='PO', name='Poltavs\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='DO', name='Donets\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='RI', name='Rivnens\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='HA', name='Kharkivs\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='LU', name='Luhans\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='VI', name='Vinnyts\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='VO', name='Volyos\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='ZP', name='Zaporiz\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='CR', name='Chernihivs\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='IF', name='Ivano-Frankivs\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='HM', name='Khmel\'nyts\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='KV', name='Kyïv'))
dbsession.add(State(dxcc=288, code='KO', name='Kyivs\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='KI', name='Kirovohrads\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='LV', name='L\'vivs\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='ZH', name='Zhytomyrs\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='CN', name='Chernivets\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='NI', name='Mykolaivs\'ka Oblast\''))
dbsession.add(State(dxcc=288, code='KR', name='Respublika Krym'))
dbsession.add(State(dxcc=288, code='SL', name='Sevastopol\''))
# United States
dbsession.add(State(dxcc=291, code='CT', name='Connecticut'))
dbsession.add(State(dxcc=291, code='ME', name='Maine'))
dbsession.add(State(dxcc=291, code='MA', name='Massachusetts'))
dbsession.add(State(dxcc=291, code='NH', name='New Hampshire'))
dbsession.add(State(dxcc=291, code='RI', name='Rhode Island'))
dbsession.add(State(dxcc=291, code='VT', name='Vermont'))
dbsession.add(State(dxcc=291, code='NJ', name='New Jersey'))
dbsession.add(State(dxcc=291, code='NY', name='New York'))
dbsession.add(State(dxcc=291, code='DE', name='Delaware'))
dbsession.add(State(dxcc=291, code='DC', name='District of Columbia'))
dbsession.add(State(dxcc=291, code='MD', name='Maryland'))
dbsession.add(State(dxcc=291, code='PA', name='Pennsylvania'))
dbsession.add(State(dxcc=291, code='AL', name='Alabama'))
dbsession.add(State(dxcc=291, code='FL', name='Florida'))
dbsession.add(State(dxcc=291, code='GA', name='Georgia'))
dbsession.add(State(dxcc=291, code='KY', name='Kentucky'))
dbsession.add(State(dxcc=291, code='NC', name='North Carolina'))
dbsession.add(State(dxcc=291, code='SC', name='South Carolina'))
dbsession.add(State(dxcc=291, code='TN', name='Tennessee'))
dbsession.add(State(dxcc=291, code='VA', name='Virginia'))
dbsession.add(State(dxcc=291, code='AR', name='Arkansas'))
dbsession.add(State(dxcc=291, code='LA', name='Louisiana'))
dbsession.add(State(dxcc=291, code='MS', name='Mississippi'))
dbsession.add(State(dxcc=291, code='NM', name='New Mexico'))
dbsession.add(State(dxcc=291, code='OK', name='Oklahoma'))
dbsession.add(State(dxcc=291, code='TX', name='Texas'))
dbsession.add(State(dxcc=291, code='CA', name='California'))
dbsession.add(State(dxcc=291, code='AZ', name='Arizona'))
dbsession.add(State(dxcc=291, code='ID', name='Idaho'))
dbsession.add(State(dxcc=291, code='MT', name='Montana'))
dbsession.add(State(dxcc=291, code='NV', name='Nevada'))
dbsession.add(State(dxcc=291, code='OR', name='Oregon'))
dbsession.add(State(dxcc=291, code='UT', name='Utah'))
dbsession.add(State(dxcc=291, code='WA', name='Washington'))
dbsession.add(State(dxcc=291, code='WY', name='Wyoming'))
dbsession.add(State(dxcc=291, code='MI', name='Michigan'))
dbsession.add(State(dxcc=291, code='OH', name='Ohio'))
dbsession.add(State(dxcc=291, code='WV', name='West Virginia'))
dbsession.add(State(dxcc=291, code='IL', name='Illinois'))
dbsession.add(State(dxcc=291, code='IN', name='Indiana'))
dbsession.add(State(dxcc=291, code='WI', name='Wisconsin'))
dbsession.add(State(dxcc=291, code='CO', name='Colorado'))
dbsession.add(State(dxcc=291, code='IA', name='Iowa'))
dbsession.add(State(dxcc=291, code='KS', name='Kansas'))
dbsession.add(State(dxcc=291, code='MN', name='Minnesota'))
dbsession.add(State(dxcc=291, code='MO', name='Missouri'))
dbsession.add(State(dxcc=291, code='NE', name='Nebraska'))
dbsession.add(State(dxcc=291, code='ND', name='North Dakota'))
dbsession.add(State(dxcc=291, code='SD', name='South Dakota'))
# Japan
dbsession.add(State(dxcc=339, code='12', name='Chiba (Kanto)'))
dbsession.add(State(dxcc=339, code='16', name='Gunma (Kanto)'))
dbsession.add(State(dxcc=339, code='14', name='Ibaraki (Kanto)'))
dbsession.add(State(dxcc=339, code='11', name='Kanagawa (Kanto)'))
dbsession.add(State(dxcc=339, code='13', name='Saitama (Kanto)'))
dbsession.add(State(dxcc=339, code='15', name='Tochigi (Kanto)'))
dbsession.add(State(dxcc=339, code='10', name='Tokyo (Kanto)'))
dbsession.add(State(dxcc=339, code='17', name='Yamanashi (Kanto)'))
dbsession.add(State(dxcc=339, code='20', name='Aichi (Tokai)'))
dbsession.add(State(dxcc=339, code='19', name='Gifu (Tokai)'))
dbsession.add(State(dxcc=339, code='21', name='Mie (Tokai)'))
dbsession.add(State(dxcc=339, code='18', name='Shizuoka (Tokai)'))
dbsession.add(State(dxcc=339, code='27', name='Hyogo (Kansai)'))
dbsession.add(State(dxcc=339, code='22', name='Kyoto (Kansai)'))
dbsession.add(State(dxcc=339, code='24', name='Nara (Kansai)'))
dbsession.add(State(dxcc=339, code='25', name='Osaka (Kansai)'))
dbsession.add(State(dxcc=339, code='23', name='Shiga (Kansai)'))
dbsession.add(State(dxcc=339, code='26', name='Wakayama (Kansai)'))
dbsession.add(State(dxcc=339, code='35', name='Hiroshima (Chugoku)'))
dbsession.add(State(dxcc=339, code='31', name='Okayama (Chugoku)'))
dbsession.add(State(dxcc=339, code='32', name='Shimane (Chugoku)'))
dbsession.add(State(dxcc=339, code='34', name='Tottori (Chugoku)'))
dbsession.add(State(dxcc=339, code='33', name='Yamaguchi (Chugoku)'))
dbsession.add(State(dxcc=339, code='38', name='Ehime (Shikoku)'))
dbsession.add(State(dxcc=339, code='36', name='Kagawa (Shikoku)'))
dbsession.add(State(dxcc=339, code='39', name='Kochi (Shikoku)'))
dbsession.add(State(dxcc=339, code='37', name='Tokushima (Shikoku)'))
dbsession.add(State(dxcc=339, code='40', name='Fukuoka (Kyushu)'))
dbsession.add(State(dxcc=339, code='46', name='Kagoshima (Kyushu)'))
dbsession.add(State(dxcc=339, code='43', name='Kumamoto (Kyushu)'))
dbsession.add(State(dxcc=339, code='45', name='Miyazaki (Kyushu)'))
dbsession.add(State(dxcc=339, code='42', name='Nagasaki (Kyushu)'))
dbsession.add(State(dxcc=339, code='44', name='Oita (Kyushu)'))
dbsession.add(State(dxcc=339, code='47', name='Okinawa (Kyushu)'))
dbsession.add(State(dxcc=339, code='41', name='Saga (Kyushu)'))
dbsession.add(State(dxcc=339, code='04', name='Akita (Tohoku)'))
dbsession.add(State(dxcc=339, code='02', name='Aomori (Tohoku)'))
dbsession.add(State(dxcc=339, code='07', name='Fukushima (Tohoku)'))
dbsession.add(State(dxcc=339, code='03', name='Iwate (Tohoku)'))
dbsession.add(State(dxcc=339, code='06', name='Miyagi (Tohoku)'))
dbsession.add(State(dxcc=339, code='05', name='Yamagata (Tohoku)'))
dbsession.add(State(dxcc=339, code='01', name='Hokkaido (Hokkaido)'))
dbsession.add(State(dxcc=339, code='29', name='Fukui (Hokuriku)'))
dbsession.add(State(dxcc=339, code='30', name='Ishikawa (Hokuriku)'))
dbsession.add(State(dxcc=339, code='28', name='Toyama (Hokuriku)'))
dbsession.add(State(dxcc=339, code='09', name='Nagano (Shin\'estu)'))
dbsession.add(State(dxcc=339, code='08', name='Niigata (Shin\'estu)'))
# Philippines
dbsession.add(State(dxcc=375, code='AUR', name='Aurora (Southern Tagalog)'))
dbsession.add(State(dxcc=375, code='BTG', name='Batangas (Southern Tagalog)'))
dbsession.add(State(dxcc=375, code='CAV', name='Cavite (Southern Tagalog)'))
dbsession.add(State(dxcc=375, code='LAG', name='Laguna (Southern Tagalog)'))
dbsession.add(State(dxcc=375, code='MAD', name='Marinduque (Southern Tagalog)'))
dbsession.add(State(dxcc=375, code='MDC', name='Mindoro Occidental (Southern Tagalog)'))
dbsession.add(State(dxcc=375, code='MDR', name='Mindoro Oriental (Southern Tagalog)'))
dbsession.add(State(dxcc=375, code='PLW', name='Palawan (Southern Tagalog)'))
dbsession.add(State(dxcc=375, code='QUE', name='Quezon (Southern Tagalog)'))
dbsession.add(State(dxcc=375, code='RIZ', name='Rizal (Southern Tagalog)'))
dbsession.add(State(dxcc=375, code='ROM', name='Romblon (Southern Tagalog)'))
dbsession.add(State(dxcc=375, code='ILN', name='Ilocos Norte (Ilocos)'))
dbsession.add(State(dxcc=375, code='ILS', name='Ilocos Sur (Ilocos)'))
dbsession.add(State(dxcc=375, code='LUN', name='La Union (Ilocos)'))
dbsession.add(State(dxcc=375, code='PAN', name='Pangasinan (Ilocos)'))
dbsession.add(State(dxcc=375, code='BTN', name='Batanes (Cagayan Valley)'))
dbsession.add(State(dxcc=375, code='CAG', name='Cagayan (Cagayan Valley)'))
dbsession.add(State(dxcc=375, code='ISA', name='Isabela (Cagayan Valley)'))
dbsession.add(State(dxcc=375, code='NUV', name='Nueva Vizcaya (Cagayan Valley)'))
dbsession.add(State(dxcc=375, code='QUI', name='Quirino (Cagayan Valley)'))
dbsession.add(State(dxcc=375, code='ABR', name='Abra (Cordillera Administrative Region)'))
dbsession.add(State(dxcc=375, code='APA', name='Apayao (Cordillera Administrative Region)'))
dbsession.add(State(dxcc=375, code='BEN', name='Benguet (Cordillera Administrative Region)'))
dbsession.add(State(dxcc=375, code='IFU', name='Ifugao (Cordillera Administrative Region)'))
dbsession.add(State(dxcc=375, code='KAL', name='Kalinga-Apayso (Cordillera Administrative Region)'))
dbsession.add(State(dxcc=375, code='MOU', name='Mountain Province (Cordillera Administrative Region)'))
dbsession.add(State(dxcc=375, code='BAN', name='Batasn (Central Luzon)'))
dbsession.add(State(dxcc=375, code='BUL', name='Bulacan (Central Luzon)'))
dbsession.add(State(dxcc=375, code='NUE', name='Nueva Ecija (Central Luzon)'))
dbsession.add(State(dxcc=375, code='PAM', name='Pampanga (Central Luzon)'))
dbsession.add(State(dxcc=375, code='TAR', name='Tarlac (Central Luzon)'))
dbsession.add(State(dxcc=375, code='ZMB', name='Zambales (Central Luzon)'))
dbsession.add(State(dxcc=375, code='ALB', name='Albay (Bicol)'))
dbsession.add(State(dxcc=375, code='CAN', name='Camarines Norte (Bicol)'))
dbsession.add(State(dxcc=375, code='CAS', name='Camarines Sur (Bicol)'))
dbsession.add(State(dxcc=375, code='CAT', name='Catanduanes (Bicol)'))
dbsession.add(State(dxcc=375, code='MAS', name='Masbate (Bicol)'))
dbsession.add(State(dxcc=375, code='SOR', name='Sorsogon (Bicol)'))
dbsession.add(State(dxcc=375, code='BIL', name='Biliran (Eastern Visayas)'))
dbsession.add(State(dxcc=375, code='EAS', name='Eastern Samar (Eastern Visayas)'))
dbsession.add(State(dxcc=375, code='LEY', name='Leyte (Eastern Visayas)'))
dbsession.add(State(dxcc=375, code='NSA', name='Northern Samar (Eastern Visayas)'))
dbsession.add(State(dxcc=375, code='SLE', name='Southern Leyte (Eastern Visayas)'))
dbsession.add(State(dxcc=375, code='WSA', name='Western Samar (Eastern Visayas)'))
dbsession.add(State(dxcc=375, code='AKL', name='Aklan (Western Visayas)'))
dbsession.add(State(dxcc=375, code='ANT', name='Antique (Western Visayas)'))
dbsession.add(State(dxcc=375, code='CAP', name='Capiz (Western Visayas)'))
dbsession.add(State(dxcc=375, code='GUI', name='Guimaras (Western Visayas)'))
dbsession.add(State(dxcc=375, code='ILI', name='Iloilo (Western Visayas)'))
dbsession.add(State(dxcc=375, code='NEC', name='Negroe Occidental (Western Visayas)'))
dbsession.add(State(dxcc=375, code='BOH', name='Bohol (Central Visayas)'))
dbsession.add(State(dxcc=375, code='CEB', name='Cebu (Central Visayas)'))
dbsession.add(State(dxcc=375, code='NER', name='Negros Oriental (Central Visayas)'))
dbsession.add(State(dxcc=375, code='SIG', name='Siquijor (Central Visayas)'))
dbsession.add(State(dxcc=375, code='ZAN', name='Zamboanga del Norte (Western Mindanao)'))
dbsession.add(State(dxcc=375, code='ZAS', name='Zamboanga del Sur (Western Mindanao)'))
dbsession.add(State(dxcc=375, code='ZSI', name='Zamboanga Sibugay (Western Mindanao)'))
dbsession.add(State(dxcc=375, code='NCO', name='North Cotabato (Central Mindanao)'))
dbsession.add(State(dxcc=375, code='SUK', name='Sultan Kudarat (Central Mindanao)'))
dbsession.add(State(dxcc=375, code='SAR', name='Sarangani (Central Mindanao)'))
dbsession.add(State(dxcc=375, code='SCO', name='South Cotabato (Central Mindanao)'))
dbsession.add(State(dxcc=375, code='BAS', name='Basilan (Autonomous Region in Muslim Mindanao)'))
dbsession.add(State(dxcc=375, code='LAS', name='Lanao del Sur (Autonomous Region in Muslim Mindanao)'))
dbsession.add(State(dxcc=375, code='MAG', name='Maguindanao (Autonomous Region in Muslim Mindanao)'))
dbsession.add(State(dxcc=375, code='SLU', name='Sulu (Autonomous Region in Muslim Mindanao)'))
dbsession.add(State(dxcc=375, code='TAW', name='Tawi-Tawi (Autonomous Region in Muslim Mindanao)'))
dbsession.add(State(dxcc=375, code='LAN', name='Lanao del Norte (Northern Mindanao)'))
dbsession.add(State(dxcc=375, code='BUK', name='Bukidnon (Northern Mindanao)'))
dbsession.add(State(dxcc=375, code='CAM', name='Camiguin (Northern Mindanao)'))
dbsession.add(State(dxcc=375, code='MSC', name='Misamis Occidental (Northern Mindanao)'))
dbsession.add(State(dxcc=375, code='MSR', name='Misamis Oriental (Northern Mindanao)'))
dbsession.add(State(dxcc=375, code='COM', name='Compostela Valley (Southern Mindanao)'))
dbsession.add(State(dxcc=375, code='DAV', name='Davao del Norte (Southern Mindanao)'))
dbsession.add(State(dxcc=375, code='DAS', name='Davao del Sur (Southern Mindanao)'))
dbsession.add(State(dxcc=375, code='DAO', name='Davao Oriental (Southern Mindanao)'))
dbsession.add(State(dxcc=375, code='AGN', name='Agusan del Norte (CARAGA)'))
dbsession.add(State(dxcc=375, code='AGS', name='Agusan del Sur (CARAGA)'))
dbsession.add(State(dxcc=375, code='SUN', name='Surigao del Norte (CARAGA)'))
dbsession.add(State(dxcc=375, code='SUR', name='Surigao del Sur (CARAGA)'))
# Croatia
dbsession.add(State(dxcc=497, code='01', name='Zagrebačka županija'))
dbsession.add(State(dxcc=497, code='02', name='Krapinsko-Zagorska županija'))
dbsession.add(State(dxcc=497, code='03', name='Sisačko-Moslavačka županija'))
dbsession.add(State(dxcc=497, code='04', name='Karlovačka županija'))
dbsession.add(State(dxcc=497, code='05', name='Varaždinska županija'))
dbsession.add(State(dxcc=497, code='06', name='Koprivničko-Križevačka županija'))
dbsession.add(State(dxcc=497, code='07', name='Bjelovarsko-Bilogorska županija'))
dbsession.add(State(dxcc=497, code='08', name='Primorsko-Goranska županija'))
dbsession.add(State(dxcc=497, code='09', name='Ličko-Senjska županija'))
dbsession.add(State(dxcc=497, code='10', name='Virovitičko-Podravska županija'))
dbsession.add(State(dxcc=497, code='11', name='Požeško-Slavonska županija'))
dbsession.add(State(dxcc=497, code='12', name='Brodsko-Posavska županija'))
dbsession.add(State(dxcc=497, code='13', name='Zadarska županija'))
dbsession.add(State(dxcc=497, code='14', name='Osječko-Baranjska županija'))
dbsession.add(State(dxcc=497, code='15', name='Šibensko-Kninska županija'))
dbsession.add(State(dxcc=497, code='16', name='Vukovarsko-Srijemska županija'))
dbsession.add(State(dxcc=497, code='17', name='Splitsko-Dalmatinska županija'))
dbsession.add(State(dxcc=497, code='18', name='Istarska županija'))
dbsession.add(State(dxcc=497, code='19', name='Dubrovačko-Neretvanska županija'))
dbsession.add(State(dxcc=497, code='20', name='Međimurska županija'))
dbsession.add(State(dxcc=497, code='21', name='Grad Zagreb'))
# Czech Republic
dbsession.add(State(dxcc=503, code='APA', name='Praha 1'))
dbsession.add(State(dxcc=503, code='APB', name='Praha 2'))
dbsession.add(State(dxcc=503, code='APC', name='Praha 3'))
dbsession.add(State(dxcc=503, code='APD', name='Praha 4'))
dbsession.add(State(dxcc=503, code='APE', name='Praha 5'))
dbsession.add(State(dxcc=503, code='APF', name='Praha 6'))
dbsession.add(State(dxcc=503, code='APG', name='Praha 7'))
dbsession.add(State(dxcc=503, code='APH', name='Praha 8'))
dbsession.add(State(dxcc=503, code='API', name='Praha 9'))
dbsession.add(State(dxcc=503, code='APJ', name='Praha 10'))
dbsession.add(State(dxcc=503, code='BBN', name='Benesov'))
dbsession.add(State(dxcc=503, code='BBE', name='Beroun'))
dbsession.add(State(dxcc=503, code='BKD', name='Kladno'))
dbsession.add(State(dxcc=503, code='BKO', name='Kolin'))
dbsession.add(State(dxcc=503, code='BKH', name='Kutna Hora'))
dbsession.add(State(dxcc=503, code='BME', name='Melnik'))
dbsession.add(State(dxcc=503, code='BMB', name='Mlada Boleslav'))
dbsession.add(State(dxcc=503, code='BNY', name='Nymburk'))
dbsession.add(State(dxcc=503, code='BPZ', name='Praha zapad'))
dbsession.add(State(dxcc=503, code='BPV', name='Praha vychod'))
dbsession.add(State(dxcc=503, code='BPB', name='Pribram'))
dbsession.add(State(dxcc=503, code='BRA', name='Rakovnik'))
dbsession.add(State(dxcc=503, code='CBU', name='Ceske Budejovice'))
dbsession.add(State(dxcc=503, code='CCK', name='Cesky Krumlov'))
dbsession.add(State(dxcc=503, code='CJH', name='Jindrichuv Hradec'))
dbsession.add(State(dxcc=503, code='CPE', name='Pelhrimov'))
dbsession.add(State(dxcc=503, code='CPI', name='Pisek'))
dbsession.add(State(dxcc=503, code='CPR', name='Prachatice'))
dbsession.add(State(dxcc=503, code='CST', name='Strakonice'))
dbsession.add(State(dxcc=503, code='CTA', name='Tabor'))
dbsession.add(State(dxcc=503, code='DDO', name='Domazlice'))
dbsession.add(State(dxcc=503, code='DCH', name='Cheb'))
dbsession.add(State(dxcc=503, code='DKV', name='Karlovy Vary'))
dbsession.add(State(dxcc=503, code='DKL', name='Klatovy'))
dbsession.add(State(dxcc=503, code='DPM', name='Plzen mesto'))
dbsession.add(State(dxcc=503, code='DPJ', name='Plzen jih'))
dbsession.add(State(dxcc=503, code='DPS', name='Plzen sever'))
dbsession.add(State(dxcc=503, code='DRO', name='Rokycany'))
dbsession.add(State(dxcc=503, code='DSO', name='Sokolov'))
dbsession.add(State(dxcc=503, code='DTA', name='Tachov'))
dbsession.add(State(dxcc=503, code='ECL', name='Ceska Lipa'))
dbsession.add(State(dxcc=503, code='EDE', name='Decin'))
dbsession.add(State(dxcc=503, code='ECH', name='Chomutov'))
dbsession.add(State(dxcc=503, code='EJA', name='Jablonec n. Nisou'))
dbsession.add(State(dxcc=503, code='ELI', name='Liberec'))
dbsession.add(State(dxcc=503, code='ELT', name='Litomerice'))
dbsession.add(State(dxcc=503, code='ELO', name='Louny'))
dbsession.add(State(dxcc=503, code='EMO', name='Most'))
dbsession.add(State(dxcc=503, code='ETE', name='Teplice'))
dbsession.add(State(dxcc=503, code='EUL', name='Usti nad Labem'))
dbsession.add(State(dxcc=503, code='FHB', name='Havlickuv Brod'))
dbsession.add(State(dxcc=503, code='FHK', name='Hradec Kralove'))
dbsession.add(State(dxcc=503, code='FCR', name='Chrudim'))
dbsession.add(State(dxcc=503, code='FJI', name='Jicin'))
dbsession.add(State(dxcc=503, code='FNA', name='Nachod'))
dbsession.add(State(dxcc=503, code='FPA', name='Pardubice'))
dbsession.add(State(dxcc=503, code='FRK', name='Rychn n. Kneznou'))
dbsession.add(State(dxcc=503, code='FSE', name='Semily'))
dbsession.add(State(dxcc=503, code='FSV', name='Svitavy'))
dbsession.add(State(dxcc=503, code='FTR', name='Trutnov'))
dbsession.add(State(dxcc=503, code='FUO', name='Usti nad Orlici'))
dbsession.add(State(dxcc=503, code='GBL', name='Blansko'))
dbsession.add(State(dxcc=503, code='GBM', name='Brno mesto'))
dbsession.add(State(dxcc=503, code='GBV', name='Brno venkov'))
dbsession.add(State(dxcc=503, code='GBR', name='Breclav'))
dbsession.add(State(dxcc=503, code='GHO', name='Hodonin'))
dbsession.add(State(dxcc=503, code='GJI', name='Jihlava'))
dbsession.add(State(dxcc=503, code='GKR', name='Kromeriz'))
dbsession.add(State(dxcc=503, code='GPR', name='Prostejov'))
dbsession.add(State(dxcc=503, code='GTR', name='Trebic'))
dbsession.add(State(dxcc=503, code='GUH', name='Uherske Hradiste'))
dbsession.add(State(dxcc=503, code='GVY', name='Vyskov'))
dbsession.add(State(dxcc=503, code='GZL', name='Zlin'))
dbsession.add(State(dxcc=503, code='GZN', name='Znojmo'))
dbsession.add(State(dxcc=503, code='GZS', name='Zdar nad Sazavou'))
dbsession.add(State(dxcc=503, code='HBR', name='Bruntal'))
dbsession.add(State(dxcc=503, code='HFM', name='Frydek-Mistek'))
dbsession.add(State(dxcc=503, code='HJE', name='Jesenik'))
dbsession.add(State(dxcc=503, code='HKA', name='Karvina'))
dbsession.add(State(dxcc=503, code='HNJ', name='Novy Jicin'))
dbsession.add(State(dxcc=503, code='HOL', name='Olomouc'))
dbsession.add(State(dxcc=503, code='HOP', name='Opava'))
dbsession.add(State(dxcc=503, code='HOS', name='Ostrava'))
dbsession.add(State(dxcc=503, code='HPR', name='Prerov'))
dbsession.add(State(dxcc=503, code='HSU', name='Sumperk'))
dbsession.add(State(dxcc=503, code='HVS', name='Vsetin'))
# Slovak Republic
dbsession.add(State(dxcc=504, code='BAA', name='Bratislava 1'))
dbsession.add(State(dxcc=504, code='BAB', name='Bratislava 2'))
dbsession.add(State(dxcc=504, code='BAC', name='Bratislava 3'))
dbsession.add(State(dxcc=504, code='BAD', name='Bratislava 4'))
dbsession.add(State(dxcc=504, code='BAE', name='Bratislava 5'))
dbsession.add(State(dxcc=504, code='MAL', name='Malacky'))
dbsession.add(State(dxcc=504, code='PEZ', name='Pezinok'))
dbsession.add(State(dxcc=504, code='SEN', name='Senec'))
dbsession.add(State(dxcc=504, code='DST', name='Dunajska Streda'))
dbsession.add(State(dxcc=504, code='GAL', name='Galanta'))
dbsession.add(State(dxcc=504, code='HLO', name='Hlohovec'))
dbsession.add(State(dxcc=504, code='PIE', name='Piestany'))
dbsession.add(State(dxcc=504, code='SEA', name='Senica'))
dbsession.add(State(dxcc=504, code='SKA', name='Skalica'))
dbsession.add(State(dxcc=504, code='TRN', name='Trnava'))
dbsession.add(State(dxcc=504, code='BAN', name='Banovce n. Bebr.'))
dbsession.add(State(dxcc=504, code='ILA', name='Ilava'))
dbsession.add(State(dxcc=504, code='MYJ', name='Myjava'))
dbsession.add(State(dxcc=504, code='NMV', name='Nove Mesto n. Vah'))
dbsession.add(State(dxcc=504, code='PAR', name='Partizanske'))
dbsession.add(State(dxcc=504, code='PBY', name='Povazska Bystrica'))
dbsession.add(State(dxcc=504, code='PRI', name='Prievidza'))
dbsession.add(State(dxcc=504, code='PUC', name='Puchov'))
dbsession.add(State(dxcc=504, code='TNC', name='Trencin'))
dbsession.add(State(dxcc=504, code='KOM', name='Komarno'))
dbsession.add(State(dxcc=504, code='LVC', name='Levice'))
dbsession.add(State(dxcc=504, code='NIT', name='Nitra'))
dbsession.add(State(dxcc=504, code='NZA', name='Nove Zamky'))
dbsession.add(State(dxcc=504, code='SAL', name='Sala'))
dbsession.add(State(dxcc=504, code='TOP', name='Topolcany'))
dbsession.add(State(dxcc=504, code='ZMO', name='Zlate Moravce'))
dbsession.add(State(dxcc=504, code='BYT', name='Bytca'))
dbsession.add(State(dxcc=504, code='CAD', name='Cadca'))
dbsession.add(State(dxcc=504, code='DKU', name='Dolny Kubin'))
dbsession.add(State(dxcc=504, code='KNM', name='Kysucke N. Mesto'))
dbsession.add(State(dxcc=504, code='LMI', name='Liptovsky Mikulas'))
dbsession.add(State(dxcc=504, code='MAR', name='Martin'))
dbsession.add(State(dxcc=504, code='NAM', name='Namestovo'))
dbsession.add(State(dxcc=504, code='RUZ', name='Ruzomberok'))
dbsession.add(State(dxcc=504, code='TTE', name='Turcianske Teplice'))
dbsession.add(State(dxcc=504, code='TVR', name='Tvrdosin'))
dbsession.add(State(dxcc=504, code='ZIL', name='Zilina'))
dbsession.add(State(dxcc=504, code='BBY', name='Banska Bystrica'))
dbsession.add(State(dxcc=504, code='BST', name='Banska Stiavnica'))
dbsession.add(State(dxcc=504, code='BRE', name='Brezno'))
dbsession.add(State(dxcc=504, code='DET', name='Detva'))
dbsession.add(State(dxcc=504, code='KRU', name='Krupina'))
dbsession.add(State(dxcc=504, code='LUC', name='Lucenec'))
dbsession.add(State(dxcc=504, code='POL', name='Poltar'))
dbsession.add(State(dxcc=504, code='REV', name='Revuca'))
dbsession.add(State(dxcc=504, code='RSO', name='Rimavska Sobota'))
dbsession.add(State(dxcc=504, code='VKR', name='Velky Krtis'))
dbsession.add(State(dxcc=504, code='ZAR', name='Zarnovica'))
dbsession.add(State(dxcc=504, code='ZIH', name='Ziar nad Hronom'))
dbsession.add(State(dxcc=504, code='ZVO', name='Zvolen'))
dbsession.add(State(dxcc=504, code='GEL', name='Gelnica'))
dbsession.add(State(dxcc=504, code='KEA', name='Kosice 1'))
dbsession.add(State(dxcc=504, code='KEB', name='Kosice 2'))
dbsession.add(State(dxcc=504, code='KEC', name='Kosice 3'))
dbsession.add(State(dxcc=504, code='KED', name='Kosice 4'))
dbsession.add(State(dxcc=504, code='KEO', name='Kosice-okolie'))
dbsession.add(State(dxcc=504, code='MIC', name='Michalovce'))
dbsession.add(State(dxcc=504, code='ROZ', name='Roznava'))
dbsession.add(State(dxcc=504, code='SOB', name='Sobrance'))
dbsession.add(State(dxcc=504, code='SNV', name='Spisska Nova Ves'))
dbsession.add(State(dxcc=504, code='TRE', name='Trebisov'))
dbsession.add(State(dxcc=504, code='BAR', name='Bardejov'))
dbsession.add(State(dxcc=504, code='HUM', name='Humenne'))
dbsession.add(State(dxcc=504, code='KEZ', name='Kezmarok'))
dbsession.add(State(dxcc=504, code='LEV', name='Levoca'))
dbsession.add(State(dxcc=504, code='MED', name='Medzilaborce'))
dbsession.add(State(dxcc=504, code='POP', name='Poprad'))
dbsession.add(State(dxcc=504, code='PRE', name='Presov'))
dbsession.add(State(dxcc=504, code='SAB', name='Sabinov'))
dbsession.add(State(dxcc=504, code='SNI', name='Snina'))
dbsession.add(State(dxcc=504, code='SLU', name='Stara Lubovna'))
dbsession.add(State(dxcc=504, code='STR', name='Stropkov'))
dbsession.add(State(dxcc=504, code='SVI', name='Svidnik'))
dbsession.add(State(dxcc=504, code='VRT', name='Vranov nad Toplou'))
| sq8kfh/kfhlog | kfhlog/models/fixtures/state.py | Python | agpl-3.0 | 121,217 | [
"EPW"
] | 5f4ad8164db55d3aa3e59e8263e0440c9373fe85ee8255216b27fccc159137a1 |
# -*- coding: utf-8 -*-
"""
Wordlist taken from the New General Service List v1.01.
Changes include:
1. Converted from Excel to Python list;
2. Only headwords were retained for clarity;
3. Supplemental words, such as months and numbers were added;
4. Words less than 4 characters were removed.
For more information, see: http://www.newgeneralservicelist.org/
Note that the list itself is covered by the Creative Commons Attribution 3.0
Unported License.
"""
WORD_LIST = ['July', 'June', 'able', 'ally', 'also', 'area', 'army', 'aunt',
'away', 'baby', 'back', 'ball', 'band', 'bank', 'base', 'bath',
'bear', 'beat', 'beer', 'bell', 'belt', 'bend', 'bias', 'bike',
'bill', 'bind', 'bird', 'bite', 'blow', 'blue', 'boat', 'body',
'bomb', 'bond', 'bone', 'book', 'boom', 'boot', 'bore', 'boss',
'both', 'bowl', 'burn', 'bury', 'busy', 'cake', 'call', 'calm',
'camp', 'card', 'care', 'case', 'cash', 'cast', 'cell', 'cent',
'chat', 'chip', 'cite', 'city', 'club', 'coal', 'coat', 'code',
'coin', 'cold', 'come', 'cook', 'cool', 'cope', 'copy', 'core',
'cost', 'crew', 'crop', 'damn', 'dare', 'dark', 'data', 'date',
'dead', 'deal', 'dear', 'debt', 'deep', 'deny', 'desk', 'diet',
'dish', 'disk', 'door', 'down', 'drag', 'draw', 'drop', 'drug',
'dust', 'duty', 'each', 'earn', 'ease', 'east', 'easy', 'edge',
'edit', 'else', 'even', 'ever', 'evil', 'exam', 'face', 'fact',
'fade', 'fail', 'fair', 'fall', 'farm', 'fast', 'fear', 'feed',
'feel', 'file', 'fill', 'film', 'find', 'fine', 'fire', 'firm',
'fish', 'five', 'flag', 'flat', 'flow', 'fold', 'folk', 'food',
'fool', 'foot', 'form', 'four', 'free', 'from', 'fuel', 'full',
'fund', 'gain', 'game', 'gate', 'gaze', 'gear', 'gene', 'gift',
'girl', 'give', 'glad', 'goal', 'gold', 'golf', 'good', 'grab',
'gray', 'grin', 'grow', 'hair', 'half', 'hall', 'hand', 'hang',
'hard', 'harm', 'hate', 'have', 'head', 'hear', 'heat', 'hell',
'help', 'here', 'hero', 'hide', 'high', 'hill', 'hint', 'hire',
'hold', 'hole', 'home', 'hook', 'hope', 'host', 'hour', 'huge',
'hunt', 'hurt', 'idea', 'inch', 'into', 'iron', 'item', 'jail',
'join', 'joke', 'jump', 'jury', 'just', 'keen', 'keep', 'kick',
'kill', 'kind', 'king', 'kiss', 'knee', 'know', 'lack', 'lady',
'lake', 'land', 'last', 'late', 'lazy', 'lead', 'lean', 'leap',
'left', 'lend', 'less', 'life', 'lift', 'like', 'line', 'link',
'list', 'live', 'load', 'loan', 'lock', 'long', 'look', 'lose',
'loss', 'loud', 'love', 'luck', 'mail', 'main', 'make', 'male',
'many', 'mark', 'mass', 'mate', 'meal', 'mean', 'meat', 'meet',
'menu', 'mere', 'mess', 'mile', 'milk', 'mind', 'mine', 'miss',
'mode', 'mood', 'moon', 'more', 'most', 'move', 'much', 'must',
'name', 'near', 'neck', 'need', 'news', 'next', 'nice', 'nine',
'none', 'nose', 'note', 'noun', 'okay', 'once', 'only', 'onto',
'open', 'over', 'pace', 'pack', 'page', 'pain', 'pair', 'pale',
'park', 'part', 'pass', 'past', 'path', 'peak', 'peer', 'pick',
'pile', 'pink', 'pipe', 'plan', 'play', 'plot', 'plus', 'poem',
'poet', 'poll', 'pool', 'poor', 'port', 'pose', 'post', 'pour',
'pray', 'pull', 'pump', 'pure', 'push', 'race', 'rail', 'rain',
'rank', 'rare', 'rate', 'read', 'real', 'rear', 'rely', 'rent',
'rest', 'rice', 'rich', 'ride', 'ring', 'rise', 'risk', 'road',
'rock', 'role', 'roll', 'roof', 'room', 'root', 'ruin', 'rule',
'rush', 'safe', 'sail', 'sake', 'sale', 'salt', 'same', 'sand',
'save', 'scan', 'seal', 'seat', 'seed', 'seek', 'seem', 'self',
'sell', 'send', 'ship', 'shoe', 'shop', 'shot', 'show', 'shut',
'sick', 'side', 'sigh', 'sign', 'sing', 'sink', 'site', 'size',
'skin', 'slip', 'slow', 'snap', 'snow', 'soft', 'soil', 'some',
'song', 'soon', 'sort', 'soul', 'spin', 'spot', 'star', 'stay',
'stem', 'step', 'stir', 'stop', 'such', 'suit', 'sure', 'swim',
'tail', 'take', 'tale', 'talk', 'tall', 'tank', 'tape', 'task',
'taxi', 'team', 'tear', 'tell', 'tend', 'tent', 'term', 'test',
'text', 'than', 'that', 'then', 'they', 'thin', 'this', 'thus',
'till', 'time', 'tiny', 'tire', 'tone', 'tool', 'tour', 'town',
'trap', 'tree', 'trip', 'true', 'tube', 'tune', 'turn', 'twin',
'type', 'ugly', 'unit', 'upon', 'urge', 'user', 'vary', 'vast',
'verb', 'very', 'vice', 'view', 'vote', 'wage', 'wait', 'wake',
'walk', 'wall', 'want', 'warm', 'warn', 'wash', 'wave', 'weak',
'wear', 'week', 'well', 'west', 'what', 'when', 'wide', 'wife',
'wild', 'will', 'wind', 'wine', 'wing', 'wipe', 'wire', 'wise',
'wish', 'with', 'wood', 'word', 'work', 'wrap', 'yard', 'yeah',
'year', 'zone', 'April', 'March', 'about', 'above', 'abuse',
'actor', 'adapt', 'admit', 'adopt', 'adult', 'after', 'again',
'agent', 'agree', 'ahead', 'alarm', 'album', 'alive', 'allow',
'alone', 'along', 'alter', 'amaze', 'among', 'anger', 'angle',
'angry', 'apart', 'apply', 'argue', 'arise', 'aside', 'asset',
'avoid', 'award', 'aware', 'awful', 'badly', 'basic', 'basis',
'beach', 'begin', 'below', 'birth', 'black', 'blame', 'bless',
'blind', 'block', 'blood', 'board', 'boost', 'brain', 'brand',
'bread', 'break', 'breed', 'brief', 'bring', 'broad', 'brown',
'brush', 'build', 'bunch', 'burst', 'buyer', 'cable', 'carry',
'catch', 'cause', 'chain', 'chair', 'charm', 'chart', 'chase',
'cheap', 'check', 'cheek', 'chest', 'chief', 'child', 'civil',
'claim', 'class', 'clean', 'clear', 'climb', 'clock', 'close',
'cloud', 'coach', 'coast', 'color', 'cough', 'could', 'count',
'court', 'cover', 'crack', 'craft', 'crash', 'crazy', 'cream',
'crime', 'cross', 'crowd', 'curve', 'cycle', 'daily', 'dance',
'death', 'delay', 'depth', 'dirty', 'doubt', 'dozen', 'draft',
'drama', 'dream', 'dress', 'drink', 'drive', 'early', 'earth',
'eight', 'elect', 'email', 'empty', 'enemy', 'enjoy', 'enter',
'entry', 'equal', 'error', 'essay', 'event', 'every', 'exact',
'exist', 'extra', 'faith', 'false', 'fancy', 'fault', 'favor',
'fence', 'field', 'fifty', 'fight', 'final', 'first', 'flash',
'float', 'flood', 'floor', 'focus', 'force', 'forth', 'forty',
'found', 'frame', 'fresh', 'front', 'fruit', 'fully', 'funny',
'giant', 'glass', 'grade', 'grain', 'grand', 'grant', 'grass',
'great', 'green', 'greet', 'group', 'guard', 'guess', 'guest',
'guide', 'habit', 'happy', 'heart', 'heavy', 'hello', 'hence',
'honor', 'horse', 'hotel', 'house', 'human', 'humor', 'hurry',
'ideal', 'image', 'imply', 'index', 'inner', 'input', 'issue',
'joint', 'judge', 'knife', 'knock', 'label', 'labor', 'large',
'laugh', 'layer', 'learn', 'least', 'leave', 'legal', 'level',
'light', 'limit', 'local', 'logic', 'loose', 'lover', 'lucky',
'lunch', 'magic', 'major', 'maker', 'march', 'marry', 'match',
'maybe', 'mayor', 'metal', 'meter', 'might', 'minor', 'model',
'money', 'month', 'moral', 'motor', 'mount', 'mouse', 'mouth',
'movie', 'music', 'nerve', 'never', 'newly', 'night', 'noise',
'north', 'novel', 'nurse', 'occur', 'ocean', 'offer', 'often',
'opera', 'order', 'other', 'ought', 'owner', 'paint', 'panel',
'panic', 'paper', 'party', 'pause', 'peace', 'phase', 'phone',
'photo', 'piano', 'piece', 'pilot', 'pitch', 'place', 'plain',
'plane', 'plant', 'plate', 'point', 'pound', 'power', 'press',
'price', 'pride', 'prime', 'print', 'prior', 'prize', 'proof',
'proud', 'prove', 'pupil', 'quick', 'quiet', 'quite', 'quote',
'radio', 'raise', 'range', 'rapid', 'ratio', 'reach', 'react',
'ready', 'refer', 'relax', 'reply', 'right', 'rival', 'river',
'rough', 'round', 'route', 'royal', 'rural', 'scale', 'scare',
'scene', 'scope', 'score', 'sense', 'serve', 'seven', 'shade',
'shake', 'shall', 'shape', 'share', 'sharp', 'sheep', 'sheet',
'shelf', 'shell', 'shift', 'shine', 'shirt', 'shock', 'shoot',
'shore', 'short', 'shout', 'sight', 'silly', 'since', 'sixty',
'skill', 'skirt', 'slave', 'sleep', 'slice', 'slide', 'slope',
'small', 'smart', 'smell', 'smile', 'smoke', 'solid', 'solve',
'sorry', 'sound', 'south', 'space', 'spare', 'speak', 'speed',
'spell', 'spend', 'split', 'sport', 'staff', 'stage', 'stain',
'stair', 'stake', 'stamp', 'stand', 'stare', 'start', 'state',
'steal', 'steel', 'stick', 'still', 'stock', 'stone', 'store',
'storm', 'story', 'strip', 'study', 'stuff', 'style', 'sugar',
'swear', 'sweep', 'sweet', 'swing', 'table', 'taste', 'teach',
'thank', 'theme', 'there', 'thick', 'thing', 'think', 'three',
'throw', 'tight', 'title', 'today', 'tooth', 'topic', 'total',
'touch', 'tough', 'tower', 'trace', 'track', 'trade', 'trail',
'train', 'treat', 'trend', 'trial', 'trick', 'troop', 'truck',
'truly', 'trust', 'truth', 'twice', 'twist', 'uncle', 'under',
'union', 'unite', 'until', 'upper', 'upset', 'urban', 'usual',
'value', 'video', 'virus', 'visit', 'vital', 'voice', 'voter',
'waste', 'watch', 'water', 'weigh', 'weird', 'wheel', 'where',
'which', 'while', 'white', 'whole', 'woman', 'world', 'worry',
'worth', 'would', 'wound', 'write', 'wrong', 'yield', 'young',
'youth', 'August', 'Friday', 'Monday', 'Sunday', 'abroad',
'accept', 'access', 'accord', 'accuse', 'across', 'action',
'active', 'actual', 'adjust', 'admire', 'advice', 'advise',
'affair', 'affect', 'afford', 'afraid', 'agency', 'agenda',
'allege', 'almost', 'always', 'amount', 'animal', 'annual',
'answer', 'anyone', 'anyway', 'appeal', 'appear', 'around',
'arrest', 'arrive', 'artist', 'assess', 'assign', 'assist',
'assume', 'assure', 'attach', 'attack', 'attend', 'author',
'autumn', 'barely', 'battle', 'beauty', 'become', 'before',
'behave', 'behind', 'belief', 'belong', 'beside', 'beyond',
'bloody', 'border', 'borrow', 'bother', 'bottle', 'bottom',
'branch', 'breast', 'breath', 'bridge', 'bright', 'budget',
'burden', 'button', 'camera', 'cancel', 'cancer', 'carbon',
'career', 'carpet', 'castle', 'center', 'chance', 'change',
'charge', 'cheese', 'choice', 'choose', 'church', 'circle',
'clause', 'coffee', 'column', 'comedy', 'commit', 'common',
'corner', 'county', 'couple', 'course', 'cousin', 'create',
'credit', 'crisis', 'critic', 'custom', 'damage', 'danger',
'dealer', 'debate', 'decade', 'decide', 'deeply', 'defeat',
'defend', 'define', 'degree', 'demand', 'depend', 'derive',
'desert', 'design', 'desire', 'detail', 'detect', 'device',
'devote', 'dialog', 'differ', 'dinner', 'direct', 'divide',
'doctor', 'dollar', 'double', 'driver', 'during', 'easily',
'editor', 'effect', 'effort', 'eighty', 'either', 'eleven',
'emerge', 'empire', 'employ', 'enable', 'energy', 'engage',
'engine', 'enough', 'ensure', 'entire', 'escape', 'estate',
'ethnic', 'evolve', 'exceed', 'except', 'excess', 'excite',
'excuse', 'expand', 'expect', 'expert', 'export', 'expose',
'extend', 'extent', 'factor', 'fairly', 'family', 'famous',
'farmer', 'father', 'fellow', 'female', 'figure', 'filter',
'finger', 'finish', 'firmly', 'flight', 'flower', 'follow',
'forest', 'forget', 'formal', 'format', 'former', 'freeze',
'friend', 'future', 'garden', 'gather', 'gender', 'gentle',
'gently', 'glance', 'global', 'golden', 'govern', 'ground',
'growth', 'guilty', 'guitar', 'handle', 'happen', 'harbor',
'hardly', 'health', 'height', 'highly', 'holder', 'honest',
'hunger', 'ignore', 'import', 'impose', 'income', 'indeed',
'infant', 'inform', 'injure', 'injury', 'inside', 'insist',
'insure', 'intend', 'invent', 'invest', 'invite', 'island',
'itself', 'jacket', 'latter', 'launch', 'lawyer', 'leader',
'league', 'length', 'lesson', 'letter', 'likely', 'liquid',
'listen', 'little', 'locate', 'lovely', 'luxury', 'mainly',
'manage', 'manner', 'margin', 'market', 'master', 'matter',
'mature', 'medium', 'member', 'memory', 'mental', 'merely',
'method', 'middle', 'minute', 'mirror', 'mobile', 'modern',
'modify', 'module', 'moment', 'mostly', 'mother', 'motion',
'murder', 'muscle', 'museum', 'mutual', 'myself', 'narrow',
'nation', 'native', 'nature', 'nearby', 'nearly', 'ninety',
'nobody', 'normal', 'notice', 'notion', 'number', 'object',
'occupy', 'office', 'online', 'oppose', 'option', 'orange',
'origin', 'output', 'parent', 'partly', 'people', 'period',
'permit', 'person', 'phrase', 'planet', 'player', 'please',
'plenty', 'pocket', 'poetry', 'police', 'policy', 'potato',
'praise', 'prefer', 'pretty', 'prison', 'profit', 'prompt',
'proper', 'public', 'pursue', 'racial', 'random', 'rarely',
'rather', 'reader', 'really', 'reason', 'recall', 'recent',
'reckon', 'record', 'reduce', 'reform', 'refuse', 'regard',
'region', 'regret', 'reject', 'relate', 'relief', 'remain',
'remark', 'remind', 'remote', 'remove', 'repair', 'repeat',
'report', 'rescue', 'resign', 'resist', 'resort', 'result',
'retail', 'retain', 'retire', 'return', 'reveal', 'review',
'revise', 'reward', 'safety', 'salary', 'sample', 'scheme',
'school', 'scream', 'screen', 'search', 'season', 'second',
'secret', 'sector', 'secure', 'select', 'senior', 'series',
'server', 'settle', 'severe', 'sexual', 'shadow', 'should',
'shower', 'signal', 'silent', 'silver', 'simple', 'simply',
'singer', 'single', 'sister', 'slight', 'slowly', 'smooth',
'social', 'source', 'speech', 'spirit', 'spread', 'spring',
'square', 'stable', 'status', 'steady', 'strain', 'stream',
'street', 'stress', 'strict', 'strike', 'string', 'stroke',
'strong', 'studio', 'stupid', 'submit', 'sudden', 'suffer',
'summer', 'supply', 'surely', 'survey', 'switch', 'symbol',
'system', 'tackle', 'talent', 'target', 'tender', 'tennis',
'theory', 'thirst', 'thirty', 'though', 'threat', 'throat',
'ticket', 'tissue', 'tongue', 'toward', 'travel', 'twelve',
'twenty', 'unable', 'unique', 'unless', 'unlike', 'update',
'useful', 'valley', 'versus', 'vessel', 'victim', 'vision',
'visual', 'volume', 'wander', 'wealth', 'weapon', 'weekly',
'weight', 'whilst', 'widely', 'window', 'winner', 'winter',
'within', 'wonder', 'wooden', 'worker', 'writer', 'yellow',
'January', 'October', 'Tuesday', 'abandon', 'ability', 'absence',
'account', 'achieve', 'acquire', 'address', 'advance', 'adviser',
'against', 'airline', 'alcohol', 'already', 'alright', 'analyst',
'analyze', 'ancient', 'another', 'anxiety', 'anxious', 'anybody',
'anymore', 'appoint', 'approve', 'arrange', 'arrival', 'article',
'ashamed', 'athlete', 'attempt', 'attract', 'average', 'balance',
'barrier', 'because', 'bedroom', 'believe', 'beneath', 'benefit',
'besides', 'between', 'billion', 'breathe', 'briefly', 'brother',
'capable', 'capital', 'capture', 'careful', 'catalog', 'central',
'century', 'certain', 'chamber', 'channel', 'chapter', 'charity',
'chicken', 'citizen', 'classic', 'clearly', 'climate', 'closely',
'clothes', 'cluster', 'collect', 'college', 'combine', 'comfort',
'command', 'comment', 'company', 'compare', 'compete', 'complex',
'compose', 'compute', 'concept', 'concern', 'concert', 'conduct',
'confirm', 'confuse', 'connect', 'consist', 'consult', 'consume',
'contact', 'contain', 'content', 'contest', 'context', 'control',
'convert', 'correct', 'council', 'counsel', 'counter', 'country',
'crucial', 'culture', 'curious', 'current', 'curtain', 'declare',
'decline', 'defense', 'deficit', 'delight', 'deliver', 'density',
'deposit', 'depress', 'deserve', 'despite', 'destroy', 'develop',
'digital', 'discuss', 'disease', 'dismiss', 'display', 'dispute',
'distant', 'disturb', 'divorce', 'eastern', 'economy', 'edition',
'educate', 'elderly', 'element', 'embrace', 'emotion', 'enhance',
'entitle', 'episode', 'equally', 'evening', 'exactly', 'examine',
'example', 'exclude', 'exhaust', 'exhibit', 'expense', 'explain',
'explore', 'express', 'extract', 'extreme', 'factory', 'failure',
'fashion', 'feature', 'federal', 'fiction', 'fifteen', 'finally',
'finance', 'firstly', 'foreign', 'forever', 'formula', 'fortune',
'forward', 'freedom', 'fulfill', 'further', 'gallery', 'general',
'genetic', 'genuine', 'gesture', 'grammar', 'greatly', 'healthy',
'heavily', 'helpful', 'herself', 'himself', 'history', 'holiday',
'however', 'hundred', 'husband', 'illegal', 'illness', 'imagine',
'impress', 'improve', 'include', 'initial', 'inquiry', 'insight',
'inspire', 'install', 'instead', 'intense', 'involve', 'isolate',
'journal', 'journey', 'justice', 'justify', 'kitchen', 'largely',
'leather', 'lecture', 'liberal', 'library', 'license', 'machine',
'manager', 'massive', 'maximum', 'measure', 'medical', 'mention',
'message', 'million', 'minimum', 'mission', 'mistake', 'mixture',
'monitor', 'monthly', 'morning', 'musical', 'mystery', 'natural',
'neglect', 'neither', 'nervous', 'network', 'nothing', 'nowhere',
'nuclear', 'observe', 'obvious', 'offense', 'officer', 'operate',
'opinion', 'organic', 'outcome', 'outline', 'outside', 'overall',
'package', 'partner', 'passage', 'passion', 'patient', 'pattern',
'payment', 'penalty', 'pension', 'percent', 'perfect', 'perform',
'perhaps', 'picture', 'plastic', 'popular', 'portion', 'possess',
'poverty', 'precise', 'predict', 'premise', 'prepare', 'present',
'pretend', 'prevent', 'primary', 'printer', 'private', 'problem',
'proceed', 'process', 'produce', 'product', 'profile', 'program',
'project', 'promise', 'promote', 'propose', 'protect', 'protein',
'protest', 'provide', 'purpose', 'qualify', 'quality', 'quarter',
'quickly', 'quietly', 'radical', 'rapidly', 'reality', 'realize',
'receive', 'recover', 'recruit', 'reflect', 'refugee', 'regular',
'release', 'replace', 'request', 'require', 'reserve', 'resolve',
'respect', 'respond', 'restore', 'revenue', 'reverse', 'roughly',
'routine', 'satisfy', 'scholar', 'science', 'section', 'segment',
'serious', 'servant', 'service', 'session', 'seventy', 'several',
'shelter', 'silence', 'similar', 'situate', 'sixteen', 'society',
'soldier', 'somehow', 'someone', 'speaker', 'special', 'species',
'specify', 'sponsor', 'station', 'stomach', 'storage', 'strange',
'stretch', 'student', 'subject', 'succeed', 'success', 'suggest',
'summary', 'support', 'suppose', 'surface', 'surgery', 'survive',
'suspect', 'suspend', 'sustain', 'symptom', 'teacher', 'tension',
'theater', 'therapy', 'through', 'tonight', 'totally', 'tourism',
'tourist', 'traffic', 'trigger', 'trouble', 'typical', 'unclear',
'undergo', 'uniform', 'unknown', 'unusual', 'usually', 'variety',
'various', 'vehicle', 'venture', 'version', 'veteran', 'victory',
'village', 'violent', 'visible', 'visitor', 'wealthy', 'weather',
'weekend', 'welcome', 'welfare', 'western', 'whereas', 'whether',
'whisper', 'without', 'witness', 'December', 'February',
'November', 'Saturday', 'Thursday', 'abortion', 'absolute',
'abstract', 'academic', 'accident', 'accurate', 'activity',
'actually', 'addition', 'adequate', 'advocate', 'aircraft',
'although', 'analysis', 'announce', 'anything', 'anywhere',
'apparent', 'approach', 'approval', 'argument', 'attitude',
'audience', 'behavior', 'boundary', 'business', 'campaign',
'capacity', 'category', 'ceremony', 'chairman', 'champion',
'chemical', 'civilian', 'clinical', 'clothing', 'collapse',
'complain', 'complete', 'compound', 'comprise', 'computer',
'conclude', 'concrete', 'conflict', 'consider', 'constant',
'consumer', 'continue', 'contract', 'contrast', 'convince',
'coverage', 'creation', 'creative', 'creature', 'criminal',
'critical', 'cultural', 'currency', 'customer', 'darkness',
'database', 'daughter', 'decision', 'decrease', 'dedicate',
'delivery', 'describe', 'designer', 'directly', 'director',
'disagree', 'disaster', 'discount', 'discover', 'disorder',
'distance', 'distinct', 'district', 'division', 'document',
'domestic', 'dominate', 'dramatic', 'economic', 'eighteen',
'election', 'electric', 'emphasis', 'employee', 'employer',
'engineer', 'enormous', 'entirely', 'entrance', 'envelope',
'equation', 'estimate', 'evaluate', 'everyday', 'everyone',
'evidence', 'exchange', 'exercise', 'exposure', 'external',
'facility', 'familiar', 'favorite', 'festival', 'flexible',
'football', 'forecast', 'fourteen', 'fragment', 'frequent',
'friendly', 'frighten', 'function', 'generate', 'governor',
'graduate', 'grateful', 'hesitate', 'historic', 'horrible',
'hospital', 'identify', 'identity', 'incident', 'increase',
'indicate', 'industry', 'innocent', 'instance', 'interest',
'interior', 'internal', 'investor', 'judgment', 'language',
'laughter', 'listener', 'literary', 'location', 'magazine',
'maintain', 'majority', 'marriage', 'material', 'medicine',
'military', 'minister', 'minority', 'moderate', 'moreover',
'mortgage', 'motivate', 'mountain', 'movement', 'multiple',
'musician', 'national', 'negative', 'neighbor', 'nineteen',
'normally', 'northern', 'nowadays', 'numerous', 'occasion',
'official', 'operator', 'opponent', 'opposite', 'ordinary',
'organize', 'original', 'overcome', 'overseas', 'parallel',
'perceive', 'personal', 'persuade', 'physical', 'platform',
'pleasant', 'pleasure', 'politics', 'portrait', 'position',
'positive', 'possible', 'possibly', 'powerful', 'practice',
'pregnant', 'presence', 'preserve', 'pressure', 'previous',
'priority', 'prisoner', 'probably', 'producer', 'progress',
'properly', 'property', 'proposal', 'prospect', 'province',
'purchase', 'quantity', 'question', 'reaction', 'recently',
'recovery', 'regional', 'register', 'regulate', 'relation',
'relative', 'relevant', 'reliable', 'religion', 'remember',
'reporter', 'research', 'resident', 'resource', 'response',
'restrict', 'romantic', 'sanction', 'schedule', 'secondly',
'security', 'sentence', 'separate', 'sequence', 'shoulder',
'slightly', 'software', 'solution', 'somebody', 'somewhat',
'southern', 'specific', 'standard', 'straight', 'stranger',
'strategy', 'strength', 'strongly', 'struggle', 'suddenly',
'suitable', 'supplier', 'surprise', 'surround', 'survival',
'teenager', 'tendency', 'terrible', 'thirteen', 'thousand',
'threaten', 'together', 'tomorrow', 'transfer', 'trillion',
'underlie', 'universe', 'unlikely', 'valuable', 'variable',
'violence', 'weakness', 'whatever', 'whenever', 'wherever',
'withdraw', 'yourself', 'September', 'Wednesday', 'accompany',
'advantage', 'adventure', 'advertise', 'afternoon', 'agreement',
'alongside', 'amendment', 'apartment', 'apologize', 'assistant',
'associate', 'attention', 'attribute', 'available', 'awareness',
'basically', 'beautiful', 'breakfast', 'brilliant', 'broadcast',
'calculate', 'carefully', 'celebrate', 'certainly', 'challenge',
'character', 'childhood', 'chocolate', 'cigarette', 'classical',
'colleague', 'committee', 'community', 'complaint', 'component',
'condition', 'confident', 'confusion', 'construct', 'corporate',
'criterion', 'criticism', 'criticize', 'currently', 'dangerous',
'democracy', 'dependent', 'determine', 'different', 'difficult',
'dimension', 'direction', 'disappear', 'discovery', 'diversity',
'education', 'effective', 'efficient', 'eliminate', 'elsewhere',
'embarrass', 'emergency', 'emotional', 'emphasize', 'encounter',
'encourage', 'entertain', 'equipment', 'essential', 'establish',
'everybody', 'evolution', 'excellent', 'exception', 'executive',
'existence', 'expansion', 'expensive', 'extension', 'extensive',
'extremely', 'fantastic', 'fascinate', 'financial', 'formation',
'fortunate', 'framework', 'frequency', 'furniture', 'generally',
'gentleman', 'gradually', 'guarantee', 'guideline', 'happiness',
'highlight', 'historian', 'hopefully', 'household', 'immediate',
'immigrant', 'implement', 'important', 'incentive', 'infection',
'inflation', 'influence', 'initially', 'insurance', 'integrate',
'intention', 'interpret', 'interview', 'introduce', 'knowledge',
'landscape', 'liability', 'literally', 'meanwhile', 'mechanism',
'narrative', 'naturally', 'necessary', 'negotiate', 'newspaper',
'objective', 'obviously', 'operation', 'otherwise', 'ourselves',
'ownership', 'paragraph', 'passenger', 'perfectly', 'permanent',
'personnel', 'political', 'pollution', 'potential', 'practical',
'precisely', 'pregnancy', 'president', 'primarily', 'principal',
'principle', 'privilege', 'procedure', 'professor', 'promotion',
'provision', 'publisher', 'recognize', 'recommend', 'reduction',
'reference', 'regularly', 'religious', 'represent', 'scientist',
'secondary', 'secretary', 'selection', 'sensitive', 'seriously',
'seventeen', 'similarly', 'situation', 'something', 'sometimes',
'somewhere', 'stability', 'statement', 'statistic', 'stimulate',
'structure', 'substance', 'summarize', 'supporter', 'technical',
'technique', 'telephone', 'temporary', 'territory', 'terrorist',
'therefore', 'tradition', 'transform', 'translate', 'transport',
'treatment', 'typically', 'undertake', 'universal', 'variation',
'vegetable', 'virtually', 'voluntary', 'volunteer', 'wonderful',
'yesterday', 'absolutely', 'acceptable', 'accomplish',
'additional', 'adjustment', 'aggressive', 'altogether',
'anticipate', 'apparently', 'appearance', 'appreciate',
'assessment', 'assistance', 'assumption', 'atmosphere',
'attachment', 'attendance', 'attraction', 'attractive',
'background', 'biological', 'capability', 'collection',
'commercial', 'commission', 'commitment', 'comparison',
'competitor', 'completely', 'complexity', 'complicate',
'compromise', 'conclusion', 'confidence', 'connection',
'consistent', 'constantly', 'constitute', 'constraint',
'consultant', 'continuous', 'contribute', 'convention',
'correspond', 'definitely', 'definition', 'democratic',
'department', 'depression', 'difference', 'difficulty',
'disappoint', 'discipline', 'discussion', 'distribute',
'efficiency', 'electronic', 'employment', 'enterprise',
'equivalent', 'especially', 'evaluation', 'eventually',
'everything', 'everywhere', 'excitement', 'exhibition',
'experience', 'experiment', 'expression', 'faithfully',
'foundation', 'frequently', 'friendship', 'functional',
'generation', 'government', 'historical', 'hypothesis',
'illustrate', 'importance', 'impossible', 'impression',
'impressive', 'indication', 'individual', 'industrial',
'initiative', 'innovation', 'instrument', 'investment',
'invitation', 'journalist', 'laboratory', 'leadership',
'limitation', 'literature', 'management', 'membership',
'motivation', 'obligation', 'opposition', 'originally',
'particular', 'percentage', 'perception', 'permission',
'personally', 'phenomenon', 'philosophy', 'photograph',
'politician', 'population', 'possession', 'preference',
'presumably', 'previously', 'production', 'profession',
'proportion', 'protection', 'reasonable', 'reasonably',
'reflection', 'regardless', 'regulation', 'relatively',
'remarkable', 'reputation', 'researcher', 'resistance',
'resolution', 'restaurant', 'retirement', 'revolution',
'scientific', 'settlement', 'specialist', 'specialize',
'strengthen', 'structural', 'subsequent', 'substitute',
'successful', 'sufficient', 'suggestion', 'supplement',
'technology', 'television', 'themselves', 'throughout',
'tournament', 'transition', 'ultimately', 'understand',
'university', 'achievement', 'acknowledge', 'acquisition',
'alternative', 'application', 'appointment', 'appropriate',
'arrangement', 'association', 'celebration', 'combination',
'comfortable', 'communicate', 'competition', 'competitive',
'composition', 'concentrate', 'consequence', 'cooperation',
'corporation', 'demonstrate', 'description', 'destruction',
'development', 'differently', 'distinction', 'distinguish',
'educational', 'effectively', 'electricity', 'environment',
'examination', 'expectation', 'expenditure', 'explanation',
'fundamental', 'furthermore', 'grandmother', 'imagination',
'immediately', 'implication', 'improvement', 'incorporate',
'independent', 'information', 'institution', 'instruction',
'interaction', 'investigate', 'involvement', 'legislation',
'maintenance', 'manufacture', 'mathematics', 'measurement',
'necessarily', 'negotiation', 'observation', 'opportunity',
'participant', 'participate', 'partnership', 'performance',
'personality', 'perspective', 'possibility', 'potentially',
'preparation', 'probability', 'publication', 'recognition',
'requirement', 'responsible', 'restriction', 'shareholder',
'significant', 'substantial', 'temperature', 'theoretical',
'traditional', 'uncertainty', 'agricultural', 'announcement',
'architecture', 'championship', 'characterize', 'circumstance',
'compensation', 'consequently', 'conservative', 'considerable',
'construction', 'contemporary', 'contribution', 'conventional',
'conversation', 'dramatically', 'experimental', 'illustration',
'increasingly', 'independence', 'intellectual', 'intelligence',
'intervention', 'introduction', 'manufacturer', 'neighborhood',
'nevertheless', 'occasionally', 'organization', 'particularly',
'presentation', 'presidential', 'professional', 'registration',
'relationship', 'respectively', 'satisfaction', 'significance',
'specifically', 'subsequently', 'successfully', 'surprisingly',
'unemployment', 'accommodation', 'advertisement', 'approximately',
'automatically', 'communication', 'comprehensive', 'concentration',
'consideration', 'controversial', 'demonstration', 'determination',
'entertainment', 'environmental', 'establishment', 'extraordinary',
'institutional', 'international', 'investigation', 'participation',
'psychological', 'qualification', 'significantly', 'unfortunately',
'administration', 'characteristic', 'implementation',
'interpretation', 'recommendation', 'representation',
'representative', 'responsibility', 'transportation']
| petrilli/generalwords | generalwords/generalwords.py | Python | bsd-3-clause | 34,720 | [
"VisIt"
] | de75afc7b955a9ef3d72281be1528fb72faa4dfbeace0f96a62ec6f737cf06ed |
__source__ = 'https://leetcode.com/problems/number-of-distinct-islands/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 694. Number of Distinct Islands
#
# Given a non-empty 2D array grid of 0's and 1's, an island is a group of 1's (representing land)
# connected 4-directionally (horizontal or vertical.)
# You may assume all four edges of the grid are surrounded by water.
#
# Count the number of distinct islands. An island is considered to be the same
# as another if and only if one island can be translated (and not rotated or reflected) to equal the other.
#
# Example 1:
# 11000
# 11000
# 00011
# 00011
# Given the above grid map, return 1.
# Example 2:
# 11011
# 10000
# 00001
# 11011
# Given the above grid map, return 3.
#
# Notice that:
# 11
# 1
# and
# 1
# 11
# are considered different island shapes, because we do not consider reflection / rotation.
# Note: The length of each dimension in the given grid does not exceed 50.
#
# Companies
# Amazon
# Related Topics
# Hash Table Depth-first Search
# Similar Questions
# Number of Islands
#
import unittest
import collections
# 76ms 98.09%
class Solution(object):
def numDistinctIslands(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
if not grid or not grid[0]:
return 0
row = len(grid)
col = len(grid[0])
visited = [[0 for _ in range(col)] for _ in range(row)]
def dfs(x, y, c_str):
visited[x][y] = 1
if x - 1 >= 0 and grid[x-1][y] == 1 and visited[x-1][y] == 0:
c_str += 'u'
c_str = dfs(x-1, y, c_str)
c_str += 'o'
if y + 1 < col and grid[x][y+1] == 1 and visited[x][y+1] == 0:
c_str += 'r'
c_str = dfs(x, y+1, c_str)
c_str += 'o'
if x + 1 < row and grid[x+1][y] == 1 and visited[x+1][y] == 0:
c_str += 'd'
c_str = dfs(x+1, y, c_str)
c_str += 'o'
if y - 1 >= 0 and grid[x][y-1] == 1 and visited[x][y-1] == 0:
c_str += 'l'
c_str = dfs(x, y-1, c_str)
c_str += 'o'
return c_str
ans = 0
str_set = set()
for i in range(row):
for j in range(col):
if grid[i][j] == 1 and visited[i][j] == 0:
shape_str = dfs(i, j, '')
if shape_str not in str_set:
ans += 1
str_set.add(shape_str)
return ans
#68ms 99.28%
class Solution2(object):
def numDistinctIslands(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
m, n = len(grid), len(grid[0])
def dfs(i, j, posr, posc):
coord.append((posr, posc))
grid[i][j] = 0
if i < m - 1 and grid[i + 1][j]:
dfs(i + 1, j, posr + 1, posc)
if i > 0 and grid[i - 1][j]:
dfs(i - 1, j, posr - 1, posc)
if j < n - 1 and grid[i][j + 1]:
dfs(i, j + 1, posr, posc + 1)
if j > 0 and grid[i][j - 1]:
dfs(i, j - 1, posr, posc - 1)
d = collections.Counter()
for i in xrange(m):
for j in xrange(n):
if grid[i][j]:
coord = []
dfs(i, j, 0, 0)
d[tuple(coord)]+=1
return len(d)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/number-of-distinct-islands/solution/
Approach #1: Hash By Local Coordinates [Accepted]
Complexity Analysis
Time Complexity: O(R*C), where R is the number of rows in the given grid, and C is the number of columns.
We visit every square once.
Space complexity: O(R*C), the space used by seen to keep track of visited squares, and shapes.
# 32ms 53.77%
class Solution {
int[][] grid;
boolean[][] seen;
Set<Integer> shape;
public void explore(int r, int c, int r0, int c0) {
if (0 <= r && r < grid.length && 0 <= c && c < grid[0].length &&
grid[r][c] == 1 && !seen[r][c]) {
seen[r][c] = true;
shape.add((r - r0) * 2 * grid[0].length + (c - c0));
explore(r+1, c, r0, c0);
explore(r-1, c, r0, c0);
explore(r, c+1, r0, c0);
explore(r, c-1, r0, c0);
}
}
public int numDistinctIslands(int[][] grid) {
this.grid = grid;
seen = new boolean[grid.length][grid[0].length];
Set shapes = new HashSet<HashSet<Integer>>();
for (int r = 0; r < grid.length; r++) {
for (int c = 0; c < grid[0].length; c++) {
shape = new HashSet<Integer>();
explore(r, c, r, c);
if (!shape.isEmpty()) {
shapes.add(shape);
}
}
}
return shapes.size();
}
}
# Approach #2: Hash By Path Signature [Accepted]
# Complexity Analysis
# Time and Space Complexity: O(R * C). The analysis is the same as in Approach #1.
# When we start a depth-first search on the top-left square of some island,
# the path taken by our depth-first search will be the same if and only if the shape is the same.
# 50ms 39.12%
class Solution {
int[][] grid;
boolean[][] seen;
ArrayList<Integer> shape;
public int numDistinctIslands(int[][] grid) {
this.grid = grid;
seen = new boolean[grid.length][grid[0].length];
Set shapes = new HashSet<ArrayList<Integer>>();
for (int r = 0; r < grid.length; r++) {
for (int c = 0; c < grid[0].length; c++) {
shape = new ArrayList<Integer>();
explore(r, c, 0);
if (!shape.isEmpty()) shapes.add(shape);
}
}
return shapes.size();
}
public void explore(int r, int c, int di) {
if (0 <= r && r < grid.length && 0 <= c && c < grid[0].length && grid[r][c] == 1 && !seen[r][c]) {
seen[r][c] = true;
shape.add(di);
explore(r+1, c, 1);
explore(r-1, c, 2);
explore(r, c+1, 3);
explore(r, c-1, 4);
shape.add(0);
}
}
}
# Rolling hash
# 19ms 99.84%
class Solution {
public int numDistinctIslands(int[][] grid) {
HashSet<Integer> set = new HashSet<>();
for (int i = 0; i < grid.length; i++) {
for (int j = 0; j < grid[0].length; j++) {
if (grid[i][j] == 1) {
int hash = dfs(grid, i, j, i, j, 17);
if (!set.contains(hash)) set.add(hash);
}
}
}
return set.size();
}
private int dfs(int[][] grid, int stx, int sty, int x, int y, int hash){
if ( x < 0 || x >= grid.length || y < 0 || y >= grid[0].length || grid[x][y]!=1) return hash;
grid[x][y] = 2;
hash = hash * 31 + (x - stx) * grid.length + (y - sty);
for (int i = -1; i < 2; i += 2) {
hash = dfs(grid, stx, sty, x + i, y, hash);
hash = dfs(grid, stx, sty, x, y + i, hash);
}
return hash;
}
}
'''
| JulyKikuAkita/PythonPrac | cs15211/NumberOfDistinctIslands.py | Python | apache-2.0 | 7,360 | [
"VisIt"
] | f8f8466f264159250d7523d25a6309836457754b62738c09e67a91191566524e |
#!/usr/bin/env python
"""
Command line interface to the Statistical Downscaling Model (SDM) package.
"""
import os
import sys
from ConfigParser import ConfigParser
import argparse
from sdm import __version__
from sdm.cod import CoD
from sdm.extractor import GriddedExtractor
def read_config(config_file):
if not config_file:
if 'USERPROFILE' in os.environ: # Windows
config_file = os.path.join(os.environ['USERPROFILE'], '.sdm.cfg')
else:
config_file = os.path.join(os.environ['HOME'], '.sdm.cfg')
config = ConfigParser()
config.optionxform = str # preserve case
config.read(config_file)
return config
def main(args):
ap = argparse.ArgumentParser(prog=os.path.basename(__file__),
formatter_class=argparse.RawDescriptionHelpFormatter,
description='',
epilog=__doc__)
ap.add_argument('-c', '--config-file',
required=False,
help='the configuration file, default to "$HOME/.sdm.cfg"')
ap.add_argument('-V', '--verbose',
action='store_true',
default=False,
help='be more chatty')
ap.add_argument('-v', '--version',
action='version',
version='%s: v%s' % (ap.prog, __version__))
subparsers = ap.add_subparsers(dest='sub_command',
title='List of sub-commands',
metavar='sub-command',
help='"%s sub-command -h" for more help' % ap.prog)
cod_getpath_parser = subparsers.add_parser('cod-getpath',
help='get the full path to a CoD file')
cod_getpath_parser.add_argument('-m', '--model',
required=True,
help='model name')
cod_getpath_parser.add_argument('-c', '--scenario',
required=False,
help='scenario name, e.g. historical, rcp45, rcp85')
cod_getpath_parser.add_argument('-r', '--region-type',
required=True,
help='pre-defined region type name, e.g. sea, sec, tas ...')
cod_getpath_parser.add_argument('-s', '--season',
required=True,
help='season number, e.g. 1 (DJF), 2 (MAM), 3 (JJA), or 4 (SON)')
cod_getpath_parser.add_argument('-p', '--predictand',
required=True,
help='predictand name, e.g. rain, tmax, tmin')
dxt_gridded_parser = subparsers.add_parser('dxt-gridded',
help='extract gridded data using the given cod file')
dxt_gridded_parser.add_argument('cod_file_path',
help='full path to the CoD file')
dxt_gridded_parser.add_argument('output_file',
help='output netCDF file name')
dxt_gridded_parser.add_argument('-R', '--region',
required=False,
help='the region where the data are to be extracted')
dxt_gridded2_parser = subparsers.add_parser('dxt-gridded2',
help='extract gridded data with the given parameters')
dxt_gridded2_parser.add_argument('output_file',
help='output netCDF file name')
dxt_gridded2_parser.add_argument('-m', '--model',
required=True,
help='model name')
dxt_gridded2_parser.add_argument('-c', '--scenario',
required=False,
help='scenario name, e.g. historical, rcp45, rcp85')
dxt_gridded2_parser.add_argument('-r', '--region-type',
required=True,
help='pre-defined region type name, e.g. sea, sec, tas ...')
dxt_gridded2_parser.add_argument('-s', '--season',
required=True,
help='season number, e.g. 1 (DJF), 2 (MAM), 3 (JJA), or 4 (SON)')
dxt_gridded2_parser.add_argument('-p', '--predictand',
required=True,
help='predictand name, e.g. rain, tmax, tmin')
dxt_gridded2_parser.add_argument('-R', '--region',
required=False,
help='the region where the data are to be extracted (default to region-type)')
ns = ap.parse_args(args)
config = read_config(ns.config_file)
if ns.sub_command == 'cod-getpath':
print CoD(config.get('dxt', 'cod_base_dir'), verbose=ns.verbose).get_cod_file_path(
ns.model, ns.scenario, ns.region_type, ns.season, ns.predictand)
elif ns.sub_command in ('dxt-gridded', 'dxt-gridded2'):
gridded_extractor = GriddedExtractor(cod_base_dir=config.get('dxt', 'cod_base_dir'),
mask_base_dir=config.get('dxt', 'mask_base_dir'),
gridded_base_dir=config.get('dxt', 'gridded_base_dir'),
verbose=ns.verbose)
if ns.sub_command == 'dxt-gridded':
model, scenario, region_type, season, predictand = CoD.get_components_from_path(ns.cod_file_path)
else:
model, scenario, region_type, season, predictand = \
ns.model, ns.scenario, ns.region_type, ns.season, ns.predictand
data, dates, lat, lon = gridded_extractor.extract(model, scenario, region_type, season, predictand,
ns.region)
GriddedExtractor.save_netcdf(ns.output_file, data, dates, lat, lon,
model, scenario, region_type, season, predictand)
if __name__ == '__main__':
main(sys.argv[1:])
| paolap/cwsl-ctools | sdm/sdmrun.py | Python | apache-2.0 | 6,273 | [
"NetCDF"
] | cf2d2cf248d307cf6dd651b2f933f4bd9dc8a19c70d7f1b01869a6020669cbae |
from __future__ import division, print_function
import os, sys, warnings
def fn_name(): return sys._getframe(1).f_code.co_name
if sys.version_info[0] >= 3:
warnings.warn(
"The gtk* backends have not been tested with Python 3.x",
ImportWarning)
try:
import gobject
import gtk; gdk = gtk.gdk
import pango
except ImportError:
raise ImportError("Gtk* backend requires pygtk to be installed.")
pygtk_version_required = (2,4,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
_new_tooltip_api = (gtk.pygtk_version[1] >= 12)
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib.backends.backend_gdk import RendererGDK, FigureCanvasGDK
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import markers
from matplotlib import cbook
from matplotlib import verbose
from matplotlib import rcParams
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
# Hide the benign warning that it can't stat a file that doesn't
warnings.filterwarnings('ignore', '.*Unable to retrieve the file info for.*', gtk.Warning)
cursord = {
cursors.MOVE : gdk.Cursor(gdk.FLEUR),
cursors.HAND : gdk.Cursor(gdk.HAND2),
cursors.POINTER : gdk.Cursor(gdk.LEFT_PTR),
cursors.SELECT_REGION : gdk.Cursor(gdk.TCROSS),
}
# ref gtk+/gtk/gtkwidget.h
def GTK_WIDGET_DRAWABLE(w):
flags = w.flags();
return flags & gtk.VISIBLE != 0 and flags & gtk.MAPPED != 0
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
if gtk.main_level() == 0:
gtk.main()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK(figure)
manager = FigureManagerGTK(canvas, num)
return manager
class TimerGTK(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses GTK for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = gobject.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
gobject.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
TimerBase._on_timer(self)
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if len(self.callbacks) > 0 and not self._single:
return True
else:
self._timer = None
return False
class FigureCanvasGTK (gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
65511 : 'super',
65512 : 'super',
65406 : 'alt',
65289 : 'tab',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.EXPOSURE_MASK |
gdk.KEY_PRESS_MASK |
gdk.KEY_RELEASE_MASK |
gdk.ENTER_NOTIFY_MASK |
gdk.LEAVE_NOTIFY_MASK |
gdk.POINTER_MOTION_MASK |
gdk.POINTER_MOTION_HINT_MASK)
def __init__(self, figure):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
FigureCanvasBase.__init__(self, figure)
gtk.DrawingArea.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._pixmap_width = -1
self._pixmap_height = -1
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('expose_event', self.expose_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(False)
self.set_flags(gtk.CAN_FOCUS)
self._renderer_init()
self._idle_event_id = gobject.idle_add(self.idle_event)
self.last_downclick = {}
def destroy(self):
#gtk.DrawingArea.destroy(self)
self.close_event()
gobject.source_remove(self._idle_event_id)
if self._idle_draw_id != 0:
gobject.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
if event.direction==gdk.SCROLL_UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
dblclick = (event.type == gdk._2BUTTON_PRESS)
if not dblclick:
# GTK is the only backend that generates a DOWN-UP-DOWN-DBLCLICK-UP event
# sequence for a double click. All other backends have a DOWN-UP-DBLCLICK-UP
# sequence. In order to provide consistency to matplotlib users, we will
# eat the extra DOWN event in the case that we detect it is part of a double
# click.
# first, get the double click time in milliseconds.
current_time = event.get_time()
last_time = self.last_downclick.get(event.button,0)
dblclick_time = gtk.settings_get_for_screen(gdk.screen_get_default()).get_property('gtk-double-click-time')
delta_time = current_time-last_time
if delta_time < dblclick_time:
del self.last_downclick[event.button] # we do not want to eat more than one event.
return False # eat.
self.last_downclick[event.button] = current_time
FigureCanvasBase.button_press_event(self, x, y, event.button, dblclick=dblclick, guiEvent=event)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
key = self._get_key(event)
if _debug: print("hit", key)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
key = self._get_key(event)
if _debug: print("release", key)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
# flipy so y=0 is bottom of canvas
y = self.allocation.height - y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
x, y, state = event.window.get_pointer()
FigureCanvasBase.enter_notify_event(self, event, xy=(x, y))
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval < 256:
key = chr(event.keyval)
else:
key = None
for key_mask, prefix in (
[gdk.MOD4_MASK, 'super'],
[gdk.MOD1_MASK, 'alt'],
[gdk.CONTROL_MASK, 'ctrl'], ):
if event.state & key_mask:
key = '{0}+{1}'.format(prefix, key)
return key
def configure_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if widget.window is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def draw(self):
# Note: FigureCanvasBase.draw() is inconveniently named as it clashes
# with the deprecated gtk.Widget.draw()
self._need_redraw = True
if GTK_WIDGET_DRAWABLE(self):
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.window.process_updates (False)
def draw_idle(self):
def idle_draw(*args):
self.draw()
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _renderer_init(self):
"""Override by GTK backends to select a different renderer
Renderer should provide the methods:
set_pixmap ()
set_width_height ()
that are used by
_render_figure() / _pixmap_prepare()
"""
self._renderer = RendererGDK (self, self.figure.dpi)
def _pixmap_prepare(self, width, height):
"""
Make sure _._pixmap is at least width, height,
create new pixmap if necessary
"""
if _debug: print('FigureCanvasGTK.%s' % fn_name())
create_pixmap = False
if width > self._pixmap_width:
# increase the pixmap in 10%+ (rather than 1 pixel) steps
self._pixmap_width = max (int (self._pixmap_width * 1.1),
width)
create_pixmap = True
if height > self._pixmap_height:
self._pixmap_height = max (int (self._pixmap_height * 1.1),
height)
create_pixmap = True
if create_pixmap:
self._pixmap = gdk.Pixmap (self.window, self._pixmap_width,
self._pixmap_height)
self._renderer.set_pixmap (self._pixmap)
def _render_figure(self, pixmap, width, height):
"""used by GTK and GTKcairo. GTKAgg overrides
"""
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def expose_event(self, widget, event):
"""Expose_event for all GTK backends. Should not be overridden.
"""
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if GTK_WIDGET_DRAWABLE(self):
if self._need_redraw:
x, y, w, h = self.allocation
self._pixmap_prepare (w, h)
self._render_figure(self._pixmap, w, h)
self._need_redraw = False
x, y, w, h = event.area
self.window.draw_drawable (self.style.fg_gc[self.state],
self._pixmap, x, y, x, y, w, h)
return False # finish event propagation?
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
filetypes['png'] = 'Portable Network Graphics'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format, *args, **kwargs):
if self.flags() & gtk.REALIZED == 0:
# for self.window(for pixmap) and has a side effect of altering
# figure width,height (via configure-event?)
gtk.DrawingArea.realize(self)
width, height = self.get_width_height()
pixmap = gdk.Pixmap (self.window, width, height)
self._renderer.set_pixmap (pixmap)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, 0, 8, width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
# set the default quality, if we are writing a JPEG.
# http://www.pygtk.org/docs/pygtk/class-gdkpixbuf.html#method-gdkpixbuf--save
options = cbook.restrict_dict(kwargs, ['quality'])
if format in ['jpg','jpeg']:
if 'quality' not in options:
options['quality'] = rcParams['savefig.jpeg_quality']
options['quality'] = str(options['quality'])
if is_string_like(filename):
try:
pixbuf.save(filename, format, options=options)
except gobject.GError as exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
elif is_writable_file_like(filename):
if hasattr(pixbuf, 'save_to_callback'):
def save_callback(buf, data=None):
data.write(buf)
try:
pixbuf.save_to_callback(save_callback, format, user_data=filename, options=options)
except gobject.GError as exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
else:
raise ValueError("Saving to a Python file-like object is only supported by PyGTK >= 2.8")
else:
raise ValueError("filename must be a path or a file-like object")
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerGTK(*args, **kwargs)
def flush_events(self):
gtk.gdk.threads_enter()
while gtk.events_pending():
gtk.main_iteration(True)
gtk.gdk.flush()
gtk.gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The gtk.Toolbar (gtk only)
vbox : The gtk.VBox containing the canvas and toolbar (gtk only)
window : The gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print('FigureManagerGTK.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.window = gtk.Window()
self.set_window_title("Figure %d" % num)
if (window_icon):
try:
self.window.set_icon_from_file(window_icon)
except:
# some versions of gtk throw a glib.GError but not
# all, so I am not sure how to catch it. I am unhappy
# diong a blanket catch here, but an not sure what a
# better way is - JDH
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
self.vbox = gtk.VBox()
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
self.vbox.pack_start(self.canvas, True, True)
self.toolbar = self._get_toolbar(canvas)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
if self.toolbar is not None:
self.toolbar.show()
self.vbox.pack_end(self.toolbar, False, False)
tb_w, tb_h = self.toolbar.size_request()
h += tb_h
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar is not None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print('FigureManagerGTK.%s' % fn_name())
if hasattr(self, 'toolbar') and self.toolbar is not None:
self.toolbar.destroy()
if hasattr(self, 'vbox'):
self.vbox.destroy()
if hasattr(self, 'window'):
self.window.destroy()
if hasattr(self, 'canvas'):
self.canvas.destroy()
self.__dict__.clear() #Is this needed? Other backends don't have it.
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
gtk.main_level() >= 1:
gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle(self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if rcParams['toolbar'] == 'classic':
toolbar = NavigationToolbar (canvas, self.window)
elif rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK (canvas, self.window)
else:
toolbar = None
return toolbar
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK(NavigationToolbar2, gtk.Toolbar):
def __init__(self, canvas, window):
self.win = window
gtk.Toolbar.__init__(self)
NavigationToolbar2.__init__(self, canvas)
def set_message(self, s):
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.window.set_cursor(cursord[cursor])
def release(self, event):
try: del self._pixmapBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
drawable = self.canvas.window
if drawable is None:
return
gc = drawable.new_gc()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in (min(x0,x1), min(y0, y1), w, h)]
try:
lastrect, pixmapBack = self._pixmapBack
except AttributeError:
#snap image back
if event.inaxes is None:
return
ax = event.inaxes
l,b,w,h = [int(val) for val in ax.bbox.bounds]
b = int(height)-(b+h)
axrect = l,b,w,h
self._pixmapBack = axrect, gtk.gdk.Pixmap(drawable, w, h)
self._pixmapBack[1].draw_drawable(gc, drawable, l, b, 0, 0, w, h)
else:
drawable.draw_drawable(gc, pixmapBack, 0, 0, *lastrect)
drawable.draw_rectangle(gc, False, *rect)
def _init_toolbar(self):
self.set_style(gtk.TOOLBAR_ICONS)
self._init_toolbar2_4()
def _init_toolbar2_4(self):
basedir = os.path.join(rcParams['datapath'],'images')
if not _new_tooltip_api:
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file + '.png')
image = gtk.Image()
image.set_from_file(fname)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
if _new_tooltip_api:
tbutton.set_tooltip_text(tooltip_text)
else:
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
toolitem = gtk.SeparatorToolItem()
self.insert(toolitem, -1)
# set_draw() not making separator invisible,
# bug #143692 fixed Jun 06 2004, will be in GTK+ 2.6
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = gtk.ToolItem()
self.insert(toolitem, -1)
self.message = gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.win,
path=os.path.expanduser(rcParams.get('savefig.directory', '')),
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
fc.set_current_name(self.canvas.get_default_filename())
return fc
def save_figure(self, *args):
chooser = self.get_filechooser()
fname, format = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(rcParams.get('savefig.directory', ''))
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(unicode(fname))
try:
self.canvas.print_figure(fname, format=format)
except Exception as e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = gtk.Window()
if (window_icon):
try: window.set_icon_from_file(window_icon)
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = gtk.VBox()
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True)
window.show()
def _get_canvas(self, fig):
return FigureCanvasGTK(fig)
class NavigationToolbar(gtk.Toolbar):
"""
Public attributes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image, callback(str), callback_arg, scroll(bool)
toolitems = (
('Left', 'Pan left with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_BACK, 'panx', -1, True),
('Right', 'Pan right with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_FORWARD, 'panx', 1, True),
('Zoom In X',
'Zoom In X (shrink the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomx', 1, True),
('Zoom Out X',
'Zoom Out X (expand the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomx', -1, True),
(None, None, None, None, None, None,),
('Up', 'Pan up with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_UP, 'pany', 1, True),
('Down', 'Pan down with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_DOWN, 'pany', -1, True),
('Zoom In Y',
'Zoom in Y (shrink the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomy', 1, True),
('Zoom Out Y',
'Zoom Out Y (expand the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomy', -1, True),
(None, None, None, None, None, None,),
('Save', 'Save the figure',
gtk.STOCK_SAVE, 'save_figure', None, False),
)
def __init__(self, canvas, window):
"""
figManager is the FigureManagerGTK instance that contains the
toolbar, with attributes figure, window and drawingArea
"""
gtk.Toolbar.__init__(self)
self.canvas = canvas
# Note: gtk.Toolbar already has a 'window' attribute
self.win = window
self.set_style(gtk.TOOLBAR_ICONS)
self._create_toolitems_2_4()
self.update = self._update_2_4
self.fileselect = FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
self.show_all()
self.update()
def _create_toolitems_2_4(self):
# use the GTK+ 2.4 GtkToolbar API
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
if not _new_tooltip_api:
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
image = gtk.Image()
image.set_from_stock(image_num, iconSize)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
if callback_arg:
tbutton.connect('clicked', getattr(self, callback),
callback_arg)
else:
tbutton.connect('clicked', getattr(self, callback))
if scroll:
tbutton.connect('scroll_event', getattr(self, callback))
if _new_tooltip_api:
tbutton.set_tooltip_text(tooltip_text)
else:
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
# Axes toolitem, is empty at start, update() adds a menu if >=2 axes
self.axes_toolitem = gtk.ToolItem()
self.insert(self.axes_toolitem, 0)
if _new_tooltip_api:
self.axes_toolitem.set_tooltip_text(
'Select axes that controls affect')
else:
self.axes_toolitem.set_tooltip (
self.tooltips,
tip_text='Select axes that controls affect',
tip_private = 'Private')
align = gtk.Alignment (xalign=0.5, yalign=0.5, xscale=0.0, yscale=0.0)
self.axes_toolitem.add(align)
self.menubutton = gtk.Button ("Axes")
align.add (self.menubutton)
def position_menu (menu):
"""Function for positioning a popup menu.
Place menu below the menu button, but ensure it does not go off
the bottom of the screen.
The default is to popup menu at current mouse position
"""
x0, y0 = self.window.get_origin()
x1, y1, m = self.window.get_pointer()
x2, y2 = self.menubutton.get_pointer()
sc_h = self.get_screen().get_height() # requires GTK+ 2.2 +
w, h = menu.size_request()
x = x0 + x1 - x2
y = y0 + y1 - y2 + self.menubutton.allocation.height
y = min(y, sc_h - h)
return x, y, True
def button_clicked (button, data=None):
self.axismenu.popup (None, None, position_menu, 0,
gtk.get_current_event_time())
self.menubutton.connect ("clicked", button_clicked)
def _update_2_4(self):
# for GTK+ 2.4+
# called by __init__() and FigureManagerGTK
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
self.axismenu = self._make_axis_menu()
self.menubutton.show_all()
else:
self.menubutton.hide()
self.set_active(range(len(self._axes)))
def _make_axis_menu(self):
# called by self._update*()
def toggled(item, data=None):
if item == self.itemAll:
for item in items: item.set_active(True)
elif item == self.itemInvert:
for item in items:
item.set_active(not item.get_active())
ind = [i for i,item in enumerate(items) if item.get_active()]
self.set_active(ind)
menu = gtk.Menu()
self.itemAll = gtk.MenuItem("All")
menu.append(self.itemAll)
self.itemAll.connect("activate", toggled)
self.itemInvert = gtk.MenuItem("Invert")
menu.append(self.itemInvert)
self.itemInvert.connect("activate", toggled)
items = []
for i in range(len(self._axes)):
item = gtk.CheckMenuItem("Axis %d" % (i+1))
menu.append(item)
item.connect("toggled", toggled)
item.set_active(True)
items.append(item)
menu.show_all()
return menu
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, button, direction):
'panx in direction'
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
return True
def pany(self, button, direction):
'pany in direction'
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
return True
def zoomx(self, button, direction):
'zoomx in direction'
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
return True
def zoomy(self, button, direction):
'zoomy in direction'
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
return True
def get_filechooser(self):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
def save_figure(self, *args):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception as e:
error_msg_gtk(str(e), parent=self)
class FileChooserDialog(gtk.FileChooserDialog):
"""GTK+ 2.4 file selector which presents the user with a menu
of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK),
path = None,
filetypes = [],
default_filetype = None
):
super(FileChooserDialog, self).__init__ (title, parent, action,
buttons)
super(FileChooserDialog, self).set_do_overwrite_confirmation(True)
self.set_default_response (gtk.RESPONSE_OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = gtk.HBox (spacing=10)
hbox.pack_start (gtk.Label ("File Format:"), expand=False)
liststore = gtk.ListStore(gobject.TYPE_STRING)
cbox = gtk.ComboBox(liststore)
cell = gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start (cbox)
self.filetypes = filetypes
self.sorted_filetypes = filetypes.items()
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
cbox.append_text ("%s (*.%s)" % (name, ext))
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(gtk.RESPONSE_OK):
break
filename = self.get_filename()
break
return filename, self.ext
class DialogLineprops:
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in markers.MarkerStyle.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = gtk.gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = gtk.gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# set icon used when windows are minimized
# Unfortunately, the SVG renderer (rsvg) leaks memory under earlier
# versions of pygtk, so we have to use a PNG file instead.
try:
if gtk.pygtk_version < (2, 8, 0) or sys.platform == 'win32':
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
window_icon = os.path.join(rcParams['datapath'], 'images', icon_filename)
except:
window_icon = None
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel gtk.Window
parent = parent.get_toplevel()
if parent.flags() & gtk.TOPLEVEL == 0:
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = gtk.MessageDialog(
parent = parent,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
dialog.run()
dialog.destroy()
FigureManager = FigureManagerGTK
| cactusbin/nyt | matplotlib/lib/matplotlib/backends/backend_gtk.py | Python | unlicense | 45,322 | [
"FLEUR"
] | 44968a9a39bc78c95de0c165bb785f00ea3761da85e07f85f1c36804b2279968 |
#!/usr/bin/env python
'''
:func:`scf.addons.remove_linear_dep_` discards the small eigenvalues of overlap
matrix. This reduces the number of MOs from 50 to 49. The problem size of
the following CCSD method is 49.
'''
from pyscf import gto, scf, cc
mol = gto.Mole()
mol.atom = [('H', 0, 0, .5*i) for i in range(20)]
mol.basis = 'ccpvdz'
mol.verbose = 4
mol.build()
mf = scf.RHF(mol).run()
mycc = cc.CCSD(mf).run()
mf = scf.addons.remove_linear_dep_(mf).run()
mycc = cc.CCSD(mf).run()
| gkc1000/pyscf | examples/cc/31-remove_linear_dep.py | Python | apache-2.0 | 489 | [
"PySCF"
] | 587ff0699494cc87f4fdaf2d4f3051af8de3d457c68d1856b93cc0e3ec4d3b0d |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST conversion templates.
Adapted from Tangent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import textwrap
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
class ReplaceTransformer(gast.NodeTransformer):
"""Replace AST nodes."""
def __init__(self, replacements):
"""Create a new ReplaceTransformer.
Args:
replacements: A mapping from placeholder names to (lists of) AST nodes
that these placeholders will be replaced by.
"""
self.replacements = replacements
self.in_replacements = False
self.preserved_annos = {
anno.Basic.ORIGIN,
anno.Basic.SKIP_PROCESSING,
anno.Static.ORIG_DEFINITIONS,
}
def _prepare_replacement(self, replaced, key):
"""Prepares a replacement AST that's safe to swap in for a node.
Args:
replaced: ast.AST, the node being replaced
key: Hashable, the key of the replacement AST
Returns:
ast.AST, the replacement AST
"""
repl = self.replacements[key]
new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos)
if isinstance(new_nodes, gast.AST):
new_nodes = [new_nodes]
return new_nodes
def visit_Expr(self, node):
# When replacing a placeholder with an entire statement, the replacement
# must stand on its own and not be wrapped in an Expr.
new_value = self.visit(node.value)
if new_value is node.value:
return node
return new_value
def visit_keyword(self, node):
if node.arg not in self.replacements:
return self.generic_visit(node)
repl = self._prepare_replacement(node, node.arg)
if isinstance(repl, gast.keyword):
return repl
elif (repl and isinstance(repl, (list, tuple)) and
all(isinstance(r, gast.keyword) for r in repl)):
return repl
# TODO(mdan): We may allow replacing with a string as well.
# For example, if one wanted to replace foo with bar in foo=baz, then
# we could allow changing just node arg, so that we end up with bar=baz.
raise ValueError(
'a keyword argument may only be replaced by another keyword or a '
'non-empty list of keywords. Found: %s' % repl)
def visit_FunctionDef(self, node):
node = self.generic_visit(node)
if node.name not in self.replacements:
return node
repl = self.replacements[node.name]
if not isinstance(repl, (gast.Name, ast.Name)):
raise ValueError(
'a function name can only be replaced by a Name node. Found: %s' %
repl)
node.name = repl.id
return node
def _check_has_context(self, node):
if not node.ctx:
raise ValueError('node %s is missing ctx value' % node)
# TODO(mdan): Rewrite _check and _set using a separate transformer.
def _check_inner_children_have_context(self, node):
if isinstance(node, gast.Attribute):
self._check_inner_children_have_context(node.value)
self._check_has_context(node)
elif isinstance(node, (gast.Tuple, gast.List)):
for e in node.elts:
self._check_inner_children_have_context(e)
self._check_has_context(node)
elif isinstance(node, gast.Dict):
for e in node.keys:
self._check_inner_children_have_context(e)
for e in node.values:
self._check_inner_children_have_context(e)
elif isinstance(node, gast.Index):
self._check_inner_children_have_context(node.value)
elif isinstance(node, gast.Subscript):
self._check_inner_children_have_context(node.value)
self._check_inner_children_have_context(node.slice)
elif isinstance(node, gast.Slice):
self._check_inner_children_have_context(node.lower)
if node.upper:
self._check_inner_children_have_context(node.upper)
if node.step:
self._check_inner_children_have_context(node.step)
elif isinstance(node, gast.BinOp):
self._check_inner_children_have_context(node.left)
self._check_inner_children_have_context(node.right)
elif isinstance(node, gast.UnaryOp):
self._check_inner_children_have_context(node.operand)
elif isinstance(node, gast.Name):
self._check_has_context(node)
elif isinstance(node, (gast.Str, gast.Num)):
pass
else:
raise ValueError('unexpected node type "%s"' % node)
def _set_inner_child_context(self, node, ctx):
if isinstance(node, gast.Attribute):
self._set_inner_child_context(node.value, gast.Load())
node.ctx = ctx
elif isinstance(node, (gast.Tuple, gast.List)):
for e in node.elts:
self._set_inner_child_context(e, ctx)
node.ctx = ctx
elif isinstance(node, gast.Name):
node.ctx = ctx
elif isinstance(node, gast.Call):
self._set_inner_child_context(node.func, ctx)
# We may be able to override these to Load(), but for now it's simpler
# to just assert that they're set.
for a in node.args:
self._check_inner_children_have_context(a)
for k in node.keywords:
self._check_inner_children_have_context(k.value)
elif isinstance(node, gast.Dict):
# We may be able to override these to Load(), but for now it's simpler
# to just assert that they're set.
for e in node.keys:
self._check_inner_children_have_context(e)
for e in node.values:
self._check_inner_children_have_context(e)
elif isinstance(node, gast.Subscript):
self._set_inner_child_context(node.value, ctx)
self._check_inner_children_have_context(node.slice)
elif isinstance(node, gast.BinOp):
self._check_inner_children_have_context(node.left)
self._check_inner_children_have_context(node.right)
elif isinstance(node, gast.UnaryOp):
self._check_inner_children_have_context(node.operand)
elif isinstance(node, (gast.Str, gast.Num)):
pass
else:
raise ValueError('unexpected node type "%s"' % node)
def visit_Attribute(self, node):
node = self.generic_visit(node)
if node.attr not in self.replacements:
return node
repl = self.replacements[node.attr]
if not isinstance(repl, gast.Name):
raise ValueError(
'An attribute can only be replaced by a Name node. Found: %s' % repl)
node.attr = repl.id
return node
def visit_Name(self, node):
if node.id not in self.replacements:
return node
new_nodes = self._prepare_replacement(node, node.id)
# Preserve the target context.
for n in new_nodes:
if isinstance(n, (gast.Tuple, gast.List)):
for e in n.elts:
self._set_inner_child_context(e, node.ctx)
if isinstance(n, gast.Attribute):
# For attributes, the inner Name node receives the context, while the
# outer ones have it set to Load.
self._set_inner_child_context(n, node.ctx)
else:
n.ctx = node.ctx
if len(new_nodes) == 1:
new_nodes, = new_nodes
return new_nodes
def _convert_to_ast(n):
"""Converts from a known data type to AST."""
if isinstance(n, str):
# Note: the node will receive the ctx value from the template, see
# ReplaceTransformer.visit_Name.
return gast.Name(id=n, ctx=None, annotation=None)
if isinstance(n, qual_names.QN):
return n.ast()
if isinstance(n, list):
return [_convert_to_ast(e) for e in n]
if isinstance(n, tuple):
return tuple(_convert_to_ast(e) for e in n)
return n
def replace(template, **replacements):
"""Replaces placeholders in a Python template.
AST Name and Tuple nodes always receive the context that inferred from
the template. However, when replacing more complex nodes (that can potentially
contain Name children), then the caller is responsible for setting the
appropriate context.
Args:
template: A string representing Python code. Any symbol name can be used
that appears in the template code can be used as placeholder.
**replacements: A mapping from placeholder names to (lists of) AST nodes
that these placeholders will be replaced by. String values are also
supported as a shorthand for AST Name nodes with the respective ID.
Returns:
An AST node or list of AST nodes with the replacements made. If the
template was a function, a list will be returned. If the template was a
node, the same node will be returned. If the template was a string, an
AST node will be returned (a `Module` node in the case of a multi-line
string, an `Expr` node otherwise).
Raises:
ValueError: if the arguments are incorrect.
"""
if not isinstance(template, str):
raise ValueError('Expected string template, got %s' % type(template))
tree = parser.parse_str(textwrap.dedent(template))
for k in replacements:
replacements[k] = _convert_to_ast(replacements[k])
results = ReplaceTransformer(replacements).visit(tree).body
if isinstance(results, list):
return [qual_names.resolve(r) for r in results]
return qual_names.resolve(results)
def replace_as_expression(template, **replacements):
"""Variant of replace that generates expressions, instead of code blocks."""
replacement = replace(template, **replacements)
if len(replacement) != 1:
raise ValueError(
'single expression expected; for more general templates use replace')
node = replacement[0]
node = qual_names.resolve(node)
if isinstance(node, gast.Expr):
return node.value
elif isinstance(node, gast.Name):
return node
raise ValueError(
'the template is expected to generate an expression or a name node;'
' instead found %s' % node)
| kobejean/tensorflow | tensorflow/python/autograph/pyct/templates.py | Python | apache-2.0 | 10,477 | [
"VisIt"
] | c90af2face5adfe9effa59b473d63682d4c2e0aa60cbb16b5c3cf513e87190e1 |
"""
Read data from ECMWF MACC Reanalysis.
"""
import threading
import pandas as pd
try:
import netCDF4
except ImportError:
class netCDF4:
@staticmethod
def Dataset(*a, **kw):
raise ImportError(
'Reading ECMWF data requires netCDF4 to be installed.')
try:
from ecmwfapi import ECMWFDataServer
except ImportError:
def ECMWFDataServer(*a, **kw):
raise ImportError(
'To download data from ECMWF requires the API client.\nSee https:/'
'/confluence.ecmwf.int/display/WEBAPI/Access+ECMWF+Public+Datasets'
)
#: map of ECMWF MACC parameter keynames and codes used in API
PARAMS = {
"tcwv": "137.128",
"aod550": "207.210",
'aod469': '213.210',
'aod670': '214.210',
'aod865': '215.210',
"aod1240": "216.210",
}
def _ecmwf(server, startdate, enddate, params, targetname):
# see http://apps.ecmwf.int/datasets/data/macc-reanalysis/levtype=sfc/
server.retrieve({
"class": "mc",
"dataset": "macc",
"date": "%s/to/%s" % (startdate, enddate),
"expver": "rean",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": params,
"step": "3/6/9/12/15/18/21/24",
"stream": "oper",
"format": "netcdf",
"time": "00:00:00",
"type": "fc",
"target": targetname,
})
def get_ecmwf_macc(filename, params, start, end, lookup_params=True,
server=None, target=_ecmwf):
"""
Download data from ECMWF MACC Reanalysis API.
Parameters
----------
filename : str
full path of file where to save data, ``.nc`` appended if not given
params : str or sequence of str
keynames of parameter[s] to download
start : datetime.datetime or datetime.date
UTC date
end : datetime.datetime or datetime.date
UTC date
lookup_params : bool, default True
optional flag, if ``False``, then codes are already formatted
server : ecmwfapi.api.ECMWFDataServer
optionally provide a server object, default is ``None``
target : callable
optional function that calls ``server.retrieve`` to pass to thread
Returns
-------
t : thread
a thread object, use it to check status by calling `t.is_alive()`
Notes
-----
To download data from ECMWF requires the API client and a registration
key. Please read the documentation in `Access ECMWF Public Datasets
<https://confluence.ecmwf.int/display/WEBAPI/Access+ECMWF+Public+Datasets>`_.
Follow the instructions in step 4 and save the ECMWF registration key
as `$HOME/.ecmwfapirc` or set `ECMWF_API_KEY` as the path to the key.
This function returns a daemon thread that runs in the background. Exiting
Python will kill this thread, however this thread will not block the main
thread or other threads. This thread will terminate when the file is
downloaded or if the thread raises an unhandled exception. You may submit
multiple requests simultaneously to break up large downloads. You can also
check the status and retrieve downloads online at
http://apps.ecmwf.int/webmars/joblist/. This is useful if you kill the
thread. Downloads expire after 24 hours.
.. warning:: Your request may be queued online for an hour or more before
it begins to download
Precipitable water :math:`P_{wat}` is equivalent to the total column of
water vapor (TCWV), but the units given by ECMWF MACC Reanalysis are kg/m^2
at STP (1-atm, 25-C). Divide by ten to convert to centimeters of
precipitable water:
.. math::
P_{wat} \\left( \\text{cm} \\right) \
= TCWV \\left( \\frac{\\text{kg}}{\\text{m}^2} \\right) \
\\frac{100 \\frac{\\text{cm}}{\\text{m}}} \
{1000 \\frac{\\text{kg}}{\\text{m}^3}}
The keynames available for the ``params`` argument are given by
:const:`pvlib.iotools.ecmwf_macc.PARAMS` which maps the keys to codes used
in the API. The following keynames are available:
======= =========================================
keyname description
======= =========================================
tcwv total column water vapor in kg/m^2 at STP
aod550 aerosol optical depth measured at 550-nm
aod469 aerosol optical depth measured at 469-nm
aod670 aerosol optical depth measured at 670-nm
aod865 aerosol optical depth measured at 865-nm
aod1240 aerosol optical depth measured at 1240-nm
======= =========================================
If ``lookup_params`` is ``False`` then ``params`` must contain the codes
preformatted according to the ECMWF MACC Reanalysis API. This is useful if
you want to retrieve codes that are not mapped in
:const:`pvlib.iotools.ecmwf_macc.PARAMS`.
Specify a custom ``target`` function to modify how the ECMWF API function
``server.retrieve`` is called. The ``target`` function must have the
following signature in which the parameter definitions are similar to
:func:`pvlib.iotools.get_ecmwf_macc`. ::
target(server, startdate, enddate, params, filename) -> None
Examples
--------
Retrieve the AOD measured at 550-nm and the total column of water vapor for
November 1, 2012.
>>> from datetime import date
>>> from pvlib.iotools import get_ecmwf_macc
>>> filename = 'aod_tcwv_20121101.nc' # .nc extension added if missing
>>> params = ('aod550', 'tcwv')
>>> start = end = date(2012, 11, 1)
>>> t = get_ecmwf_macc(filename, params, start, end)
>>> t.is_alive()
True
"""
if not filename.endswith('nc'):
filename += '.nc'
if lookup_params:
try:
params = '/'.join(PARAMS.get(p) for p in params)
except TypeError:
params = PARAMS.get(params)
startdate = start.strftime('%Y-%m-%d')
enddate = end.strftime('%Y-%m-%d')
if not server:
server = ECMWFDataServer()
t = threading.Thread(target=target, daemon=True,
args=(server, startdate, enddate, params, filename))
t.start()
return t
class ECMWF_MACC(object):
"""container for ECMWF MACC reanalysis data"""
TCWV = 'tcwv' # total column water vapor in kg/m^2 at (1-atm,25-degC)
def __init__(self, filename):
self.data = netCDF4.Dataset(filename)
# data variables and dimensions
variables = set(self.data.variables.keys())
dimensions = set(self.data.dimensions.keys())
self.keys = tuple(variables - dimensions)
# size of lat/lon dimensions
self.lat_size = self.data.dimensions['latitude'].size
self.lon_size = self.data.dimensions['longitude'].size
# spatial resolution in degrees
self.delta_lat = -180.0 / (self.lat_size - 1) # from north to south
self.delta_lon = 360.0 / self.lon_size # from west to east
# time resolution in hours
self.time_size = self.data.dimensions['time'].size
self.start_time = self.data['time'][0]
self.end_time = self.data['time'][-1]
self.time_range = self.end_time - self.start_time
self.delta_time = self.time_range / (self.time_size - 1)
def get_nearest_indices(self, latitude, longitude):
"""
Get nearest indices to (latitude, longitude).
Parmaeters
----------
latitude : float
Latitude in degrees
longitude : float
Longitude in degrees
Returns
-------
idx_lat : int
index of nearest latitude
idx_lon : int
index of nearest longitude
"""
# index of nearest latitude
idx_lat = int(round((latitude - 90.0) / self.delta_lat))
# avoid out of bounds latitudes
if idx_lat < 0:
idx_lat = 0 # if latitude == 90, north pole
elif idx_lat > self.lat_size:
idx_lat = self.lat_size # if latitude == -90, south pole
# adjust longitude from -180/180 to 0/360
longitude = longitude % 360.0
# index of nearest longitude
idx_lon = int(round(longitude / self.delta_lon)) % self.lon_size
return idx_lat, idx_lon
def interp_data(self, latitude, longitude, utc_time, param):
"""
Interpolate ``param`` values to ``utc_time`` using indices nearest to
(``latitude, longitude``).
Parmaeters
----------
latitude : float
Latitude in degrees
longitude : float
Longitude in degrees
utc_time : datetime.datetime or datetime.date
Naive or UTC date or datetime to interpolate
param : str
Name of the parameter to interpolate from the data
Returns
-------
Interpolated ``param`` value at (``utc_time, latitude, longitude``)
Examples
--------
Use this to get a single value of a parameter in the data at a specific
time and set of (latitude, longitude) coordinates.
>>> from datetime import datetime
>>> from pvlib.iotools import ecmwf_macc
>>> data = ecmwf_macc.ECMWF_MACC('aod_tcwv_20121101.nc')
>>> dt = datetime(2012, 11, 1, 11, 33, 1)
>>> data.interp_data(38.2, -122.1, dt, 'aod550')
"""
nctime = self.data['time'] # time
ilat, ilon = self.get_nearest_indices(latitude, longitude)
# time index before
before = netCDF4.date2index(utc_time, nctime, select='before')
fbefore = self.data[param][before, ilat, ilon]
fafter = self.data[param][before + 1, ilat, ilon]
dt_num = netCDF4.date2num(utc_time, nctime.units)
time_ratio = (dt_num - nctime[before]) / self.delta_time
return fbefore + (fafter - fbefore) * time_ratio
def read_ecmwf_macc(filename, latitude, longitude, utc_time_range=None):
"""
Read data from ECMWF MACC reanalysis netCDF4 file.
Parameters
----------
filename : string
full path to netCDF4 data file.
latitude : float
latitude in degrees
longitude : float
longitude in degrees
utc_time_range : sequence of datetime.datetime
pair of start and end naive or UTC date-times
Returns
-------
data : pandas.DataFrame
dataframe for specified range of UTC date-times
"""
ecmwf_macc = ECMWF_MACC(filename)
try:
ilat, ilon = ecmwf_macc.get_nearest_indices(latitude, longitude)
nctime = ecmwf_macc.data['time']
if utc_time_range:
start_idx = netCDF4.date2index(
utc_time_range[0], nctime, select='before')
end_idx = netCDF4.date2index(
utc_time_range[-1], nctime, select='after')
time_slice = slice(start_idx, end_idx + 1)
else:
time_slice = slice(0, ecmwf_macc.time_size)
times = netCDF4.num2date(nctime[time_slice], nctime.units)
df = {k: ecmwf_macc.data[k][time_slice, ilat, ilon]
for k in ecmwf_macc.keys}
if ECMWF_MACC.TCWV in df:
# convert total column water vapor in kg/m^2 at (1-atm, 25-degC) to
# precipitable water in cm
df['precipitable_water'] = df[ECMWF_MACC.TCWV] / 10.0
finally:
ecmwf_macc.data.close()
return pd.DataFrame(df, index=times.astype('datetime64[s]'))
| mikofski/pvlib-python | pvlib/iotools/ecmwf_macc.py | Python | bsd-3-clause | 11,415 | [
"NetCDF"
] | a8b59e641c4cad915de56d0e48b3c2d6b3c9e1466d3cd4d3d8b811a33bec9d7d |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 - 2019 Adam.Dybbroe
# Author(s):
# Adam.Dybbroe <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Posttroll-runner for the mesan composite generator. Listens to incoming
satellite data products (lvl2 cloud products) and generates a mesan composite
valid for the closest (whole) hour.
"""
import os
import socket
import argparse
from logging import handlers
import logging.config
import sys
from six.moves.urllib.parse import urlparse
import posttroll.subscriber
from posttroll.publisher import Publish
from posttroll.message import Message
from multiprocessing import Pool, Manager
import threading
try:
# python 3
from queue import Empty
except ImportError:
# python 2
from Queue import Empty
from datetime import timedelta, datetime
from mesan_compositer.utils import check_uri
from mesan_compositer.utils import get_local_ips
from mesan_compositer.composite_tools import get_analysis_time
from mesan_compositer import make_ct_composite as mcc
from mesan_compositer import make_ctth_composite
from mesan_compositer.prt_nwcsaf_cloudamount import derive_sobs as derive_sobs_clamount
from mesan_compositer.prt_nwcsaf_cloudheight import derive_sobs as derive_sobs_clheight
from mesan_compositer import get_config
LOG = logging.getLogger(__name__)
DEFAULT_AREA = "mesanX"
DEFAULT_SUPEROBS_WINDOW_SIZE_NPIX = 32
#: Default time format
_DEFAULT_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
#: Default log format
_DEFAULT_LOG_FORMAT = '[%(levelname)s: %(asctime)s : %(name)s] %(message)s'
SENSOR = {'NOAA-19': 'avhrr/3',
'NOAA-18': 'avhrr/3',
'NOAA-15': 'avhrr/3',
'Metop-A': 'avhrr/3',
'Metop-B': 'avhrr/3',
'Metop-C': 'avhrr/3',
'EOS-Terra': 'modis',
'EOS-Aqua': 'modis',
'Suomi-NPP': 'viirs',
'NOAA-20': 'viirs'}
GEO_SATS = ['Meteosat-10', 'Meteosat-9', 'Meteosat-8', 'Meteosat-11', ]
MSG_NAME = {'Meteosat-10': 'MSG3', 'Meteosat-9': 'MSG2',
'Meteosat-8': 'MSG1', 'Meteosat-11': 'MSG4'}
PRODUCT_NAMES = ['CMA', 'CT', 'CTTH', 'PC', 'CPP']
def get_arguments():
"""
Get command line arguments
args.logging_conf_file, args.config_file, obs_time, area_id, wsize
Return
File path of the logging.ini file
File path of the application configuration file
"""
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config_file',
type=str,
dest='config_file',
required=True,
help="The file containing configuration parameters e.g. mesan_sat_config.yaml")
parser.add_argument("-l", "--logging",
help="The path to the log-configuration file (e.g. './logging.ini')",
dest="logging_conf_file",
type=str,
required=False)
args = parser.parse_args()
if 'template' in args.config_file:
print("Template file given as master config, aborting!")
sys.exit()
return args.logging_conf_file, args.config_file
class MesanCompRunError(Exception):
pass
def reset_job_registry(objdict, key):
"""Remove job key from registry"""
LOG.debug("Release/reset job-key " + str(key) + " from job registry")
if key in objdict:
objdict.pop(key)
else:
LOG.warning("Nothing to reset/release - " +
"Register didn't contain any entry matching: " +
str(key))
return
class FilePublisher(threading.Thread):
"""A publisher for the MESAN composite result files. Picks up the return value
from the ctype_composite_worker when ready, and publishes the files via posttroll
"""
def __init__(self, queue):
threading.Thread.__init__(self)
self.loop = True
self.queue = queue
self.jobs = {}
def stop(self):
"""Stops the file publisher"""
self.loop = False
self.queue.put(None)
def run(self):
with Publish('mesan_composite_runner', 0, ['netCDF/3', ]) as publisher:
while self.loop:
retv = self.queue.get()
if retv != None:
LOG.info("Publish the files...")
publisher.send(retv)
class FileListener(threading.Thread):
"""A file listener class, to listen for incoming messages with a
relevant file for further processing"""
def __init__(self, queue):
threading.Thread.__init__(self)
self.loop = True
self.queue = queue
def stop(self):
"""Stops the file listener"""
self.loop = False
self.queue.put(None)
def run(self):
with posttroll.subscriber.Subscribe('', ['CF/2',
'2/nwcsaf-msg/0deg/ctth-plax-corrected',
'2/nwcsaf-msg/0deg/ct-plax-corrected'], True) as subscr:
for msg in subscr.recv(timeout=90):
if not self.loop:
break
# Check if it is a relevant message:
if self.check_message(msg):
LOG.debug("Put the message on the queue...")
#LOG.debug("Message = %s", str(msg))
self.queue.put(msg)
def check_message(self, msg):
if not msg:
return False
urlobj = urlparse(msg.data['uri'])
url_ip = socket.gethostbyname(urlobj.netloc)
if urlobj.netloc and (url_ip not in get_local_ips()):
LOG.warning("Server %s not the current one: %s", str(urlobj.netloc), socket.gethostname())
return False
if ('platform_name' not in msg.data or
'orbit_number' not in msg.data or
'start_time' not in msg.data):
LOG.info(
"Message is lacking crucial fields, probably an MSG scene...")
if ('platform_name' not in msg.data or
'nominal_time' not in msg.data or
'pge' not in msg.data):
LOG.warning("Message is lacking crucial fields...")
return False
if msg.data['platform_name'] not in (GEO_SATS + POLAR_SATELLITES):
LOG.info(str(msg.data['platform_name']) + ": " +
"Not a MSG or a NOAA/Metop/S-NPP/Terra/Aqua scene. Continue...")
return False
LOG.debug("Ok: message = %s", str(msg))
return True
def create_message(resultfile, scene):
"""Create the posttroll message"""
to_send = {}
to_send['uri'] = ('ssh://%s/%s' % (SERVERNAME, resultfile))
to_send['uid'] = resultfile
to_send['sensor'] = scene.get('instrument')
if not to_send['sensor']:
to_send['sensor'] = scene.get('sensor')
to_send['platform_name'] = scene['platform_name']
to_send['orbit_number'] = scene.get('orbit_number')
to_send['type'] = 'netCDF'
to_send['format'] = 'MESAN'
to_send['data_processing_level'] = '3'
to_send['start_time'], to_send['end_time'] = scene[
'starttime'], scene['endtime']
pub_message = Message('/' + to_send['format'] + '/' + to_send['data_processing_level'] +
'/polar/direct_readout/',
"file", to_send).encode()
return pub_message
def ready2run(msg, files4comp, job_register, sceneid, product='CT'):
"""Check whether we can start a composite generation on scene"""
LOG.debug("Ready to run?")
LOG.info("Got message: " + str(msg))
if msg.type == 'file':
uri = (msg.data['uri'])
else:
LOG.debug(
"Ignoring this type of message data: type = " + str(msg.type))
return False
try:
file4mesan = check_uri(uri)
except IOError:
LOG.info('Requested file not present on this host!')
return False
platform_name = msg.data['platform_name']
sensors = msg.data['sensor']
if not isinstance(sensors, (list, tuple, set)):
sensors = [sensors]
if 'start_time' not in msg.data and 'nominal_time' not in msg.data:
LOG.warning("No start time in message!")
return False
if platform_name not in POLAR_SATELLITES and platform_name not in GEO_SATS:
LOG.info("Platform not supported: " + str(platform_name))
return False
if platform_name in POLAR_SATELLITES and SENSOR.get(platform_name, 'avhrr/3') not in sensors:
LOG.debug("Scene not applicable. platform and instrument: " +
str(msg.data['platform_name']) + " " +
str(msg.data['sensor']))
return False
elif platform_name in GEO_SATS and 'seviri' not in sensors:
LOG.debug("Scene not applicable. platform and instrument: " +
str(msg.data['platform_name']) + " " +
str(msg.data['sensor']))
return False
if 'uid' not in msg.data:
if 'uri' not in msg.data:
raise IOError("No uri or url in message!")
# Get uid from uri:
uri = urlparse(msg.data['uri'])
uid = os.path.basename(uri.path)
else:
uid = msg.data['uid']
prefixes = ['S_NWC_' + product + '_',
'SAFNWC_' + MSG_NAME.get(str(msg.data['platform_name']), 'MSG4') +
'_' + product + '_']
file_ok = False
for prfx in prefixes:
LOG.debug("File prefix to check for: %s", prfx)
if uid.startswith(prfx):
LOG.debug("File uid ok: %s", str(uid))
file_ok = True
break
if not file_ok:
LOG.debug("File uid not ok: %s", str(uid))
LOG.debug("File is not applicable. " +
"Product requested: " + str(product))
return False
LOG.debug("Scene identifier = " + str(sceneid))
LOG.debug("Job register = " + str(job_register))
if sceneid in job_register and job_register[sceneid]:
LOG.debug("Processing of scene " + str(sceneid) +
" have already been launched...")
return False
if sceneid not in files4comp:
files4comp[sceneid] = []
files4comp[sceneid].append(file4mesan)
LOG.info("Files ready for Mesan composite: " +
str(files4comp[sceneid]))
job_register[sceneid] = datetime.utcnow()
return True
def ctype_composite_worker(scene, job_id, publish_q, config_options):
"""Spawn/Start a Mesan composite generation on a new thread if available"""
try:
LOG.debug("Ctype: Start compositer...")
# Get the time of analysis from start and end times:
time_of_analysis = get_analysis_time(
scene['starttime'], scene['endtime'])
twindow = int(config_options.get('absolute_time_threshold_minutes', '30'))
delta_t = timedelta(minutes=twindow)
LOG.debug("Time window = " + str(twindow))
mesan_area_id = config_options.get('mesan_area_id', None)
if not mesan_area_id:
LOG.warning("No area id specified in config file. Using default = " +
str(DEFAULT_AREA))
mesan_area_id = DEFAULT_AREA
LOG.info(
"Make ctype composite for area id = " + str(mesan_area_id))
npix = int(config_options.get('number_of_pixels', DEFAULT_SUPEROBS_WINDOW_SIZE_NPIX))
ipar = str(config_options.get('cloud_amount_ipar'))
if not ipar:
raise IOError("No ipar value in config file!")
ctcomp = mcc.ctCompositer(time_of_analysis, delta_t, mesan_area_id, config_options)
ctcomp.get_catalogue()
if not ctcomp.make_composite():
LOG.error("Failed creating ctype composite...")
else:
ctcomp.write()
ctcomp.make_quicklooks()
# Make Super observations:
LOG.info("Make Cloud Type super observations")
values = {"area": mesan_area_id, }
bname = time_of_analysis.strftime(
config_options['cloudamount_filename']) % values
path = config_options['composite_output_dir']
filename = os.path.join(path, bname + '.dat')
derive_sobs_clamount(ctcomp.composite, ipar, npix, filename)
result_file = ctcomp.filename
pubmsg = create_message(result_file, scene)
LOG.info("Sending: " + str(pubmsg))
publish_q.put(pubmsg)
if isinstance(job_id, datetime):
dt_ = datetime.utcnow() - job_id
LOG.info("Ctype composite scene " + str(job_id) +
" finished. It took: " + str(dt_))
else:
LOG.warning(
"Job entry is not a datetime instance: " + str(job_id))
except:
LOG.exception('Failed in ctype_composite_worker...')
raise
def ctth_composite_worker(scene, job_id, publish_q, config_options):
"""Spawn/Start a Mesan cloud height composite generation on a new thread if
available"""
try:
LOG.debug("CTTH compositer: Start...")
# Get the time of analysis from start and end times:
time_of_analysis = get_analysis_time(
scene['starttime'], scene['endtime'])
twindow = int(config_options.get('absolute_time_threshold_minutes', '30'))
delta_t = timedelta(minutes=twindow)
LOG.debug("Time window = " + str(twindow))
mesan_area_id = config_options.get('mesan_area_id', None)
if not mesan_area_id:
LOG.warning("No area id specified in config file. Using default = " +
str(DEFAULT_AREA))
mesan_area_id = DEFAULT_AREA
LOG.info("Make cloud height composite for area id = " + str(mesan_area_id))
npix = int(config_options.get('number_of_pixels', DEFAULT_SUPEROBS_WINDOW_SIZE_NPIX))
ipar = config_options.get('cloud_amount_ipar')
if not ipar:
raise IOError("No ipar value in config file!")
ctth_comp = make_ctth_composite.ctthComposite(time_of_analysis, delta_t, mesan_area_id, config_options)
ctth_comp.get_catalogue()
if not ctth_comp.make_composite():
LOG.error("Failed creating ctth composite...")
else:
ctth_comp.write()
ctth_comp.make_quicklooks()
# Make Super observations:
values = {"area": mesan_area_id, }
bname = time_of_analysis.strftime(OPTIONS['cloudheight_filename']) % values
path = config_options['composite_output_dir']
filename = os.path.join(path, bname + '.dat')
LOG.info("Make Cloud Height super observations. Output file = %s", str(filename))
derive_sobs_clheight(ctth_comp.composite, npix, filename)
result_file = ctth_comp.filename
pubmsg = create_message(result_file, scene)
LOG.info("Sending: " + str(pubmsg))
publish_q.put(pubmsg)
if isinstance(job_id, datetime):
dt_ = datetime.utcnow() - job_id
LOG.info("Cloud Height composite scene " + str(job_id) +
" finished. It took: " + str(dt_))
else:
LOG.warning(
"Job entry is not a datetime instance: " + str(job_id))
except:
LOG.exception('Failed in ctth_composite_worker...')
raise
def mesan_live_runner(config_options):
"""Listens and triggers processing"""
LOG.info("*** Start the runner for the Mesan composite generator:")
LOG.debug("os.environ = " + str(os.environ))
npix = int(config_options.get('number_of_pixels', DEFAULT_SUPEROBS_WINDOW_SIZE_NPIX))
LOG.debug("Number of pixels = " + str(npix))
pool = Pool(processes=6, maxtasksperchild=1)
manager = Manager()
listener_q = manager.Queue()
publisher_q = manager.Queue()
pub_thread = FilePublisher(publisher_q)
pub_thread.start()
listen_thread = FileListener(listener_q)
listen_thread.start()
composite_files = {}
jobs_dict = {}
while True:
try:
msg = listener_q.get()
except Empty:
LOG.debug("Empty listener queue...")
continue
LOG.debug(
"Number of threads currently alive: " + str(threading.active_count()))
if 'start_time' in msg.data:
start_time = msg.data['start_time']
elif 'nominal_time' in msg.data:
start_time = msg.data['nominal_time']
else:
LOG.warning("Neither start_time nor nominal_time in message!")
start_time = None
if 'end_time' in msg.data:
end_time = msg.data['end_time']
else:
LOG.warning("No end_time in message!")
end_time = None
sensor = str(msg.data['sensor'])
platform_name = msg.data['platform_name']
if platform_name not in GEO_SATS:
orbit_number = int(msg.data['orbit_number'])
LOG.info("Polar satellite: " + str(platform_name))
else:
orbit_number = '00000'
LOG.info("Geostationary satellite: " + str(platform_name))
keyname = (str(platform_name) + '_' +
str(orbit_number) + '_' +
str(start_time.strftime('%Y%m%d%H%M')))
product = 'UNKNOWN'
if 'pge' in msg.data:
product = msg.data['pge']
elif 'uid' in msg.data:
uid = msg.data['uid']
for pge in PRODUCT_NAMES:
match = '_' + pge + '_'
if match in uid:
product = pge
break
keyname = str(product) + '_' + keyname
status = ready2run(msg, composite_files,
jobs_dict, keyname, product)
if status:
# Start composite generation:
urlobj = urlparse(msg.data['uri'])
path, fname = os.path.split(urlobj.path)
LOG.debug("path " + str(path) + " filename = " + str(fname))
scene = {'platform_name': platform_name,
'orbit_number': orbit_number,
'starttime': start_time, 'endtime': end_time,
'sensor': sensor,
'filename': urlobj.path,
'product': product}
if keyname not in jobs_dict:
LOG.warning("Scene-run seems unregistered! Forget it...")
continue
if product == 'CT':
LOG.debug("Product is CT")
pool.apply_async(ctype_composite_worker,
(scene,
jobs_dict[
keyname],
publisher_q,
config_options))
elif product == 'CTTH':
LOG.debug("Product is CTTH")
pool.apply_async(ctth_composite_worker,
(scene,
jobs_dict[
keyname],
publisher_q,
config_options))
else:
LOG.warning("Product %s not supported!", str(product))
# Block any future run on this scene for x minutes from now
# x = 5
thread_job_registry = threading.Timer(
5 * 60.0, reset_job_registry, args=(jobs_dict, keyname))
thread_job_registry.start()
pool.close()
pool.join()
pub_thread.stop()
listen_thread.stop()
if __name__ == "__main__":
(logfile, config_filename) = get_arguments()
if logfile:
logging.config.fileConfig(logfile, disable_existing_loggers=False)
handler = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter(fmt=_DEFAULT_LOG_FORMAT,
datefmt=_DEFAULT_TIME_FORMAT)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logging.getLogger('').addHandler(handler)
logging.getLogger('').setLevel(logging.DEBUG)
logging.getLogger('posttroll').setLevel(logging.INFO)
LOG = logging.getLogger('mesan_composite_runner')
log_handlers = logging.getLogger('').handlers
for log_handle in log_handlers:
if type(log_handle) is handlers.SMTPHandler:
LOG.debug("Mail notifications to: %s", str(log_handle.toaddrs))
OPTIONS = get_config(config_filename)
POLSATS_STR = OPTIONS.get('polar_satellites')
POLAR_SATELLITES = POLSATS_STR.split()
servername = None
servername = socket.gethostname()
SERVERNAME = OPTIONS.get('servername', servername)
mesan_live_runner(OPTIONS)
| adybbroe/mesan_compositer | mesan_compositer/mesan_composite_runner.py | Python | gpl-3.0 | 21,434 | [
"NetCDF"
] | b8d1a52819e19f630e733f33ff10f0631e046069264e9734fef107e26e91cd9b |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Shogun Toolbox Foundation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Written (W) 2016 Ariane Paola Gomes
#
from setuptools import find_packages, setup as shogun_setup
import distutils.command.build
import setuptools.command.install
import distutils.spawn
import glob
import re
import os
import sys
setup_py_location = os.path.abspath(os.path.dirname(__file__))
shogun_build_directory = os.path.join(setup_py_location, 'build')
shogun_generated_install = os.path.join(shogun_build_directory, 'install')
shogun_versionstring_h = os.path.abspath('src/shogun/lib/versionstring.h')
shogun_python_packages_location = None
shogun_completed_cmake = False
shogun_completed_make = False
shogun_completed_make_install = False
show_debug_information = True
with open(os.path.join(setup_py_location, 'README.md')) as f:
readme = f.read()
try:
import pypandoc
readme = pypandoc.convert(readme, to='rst', format='md')
except:
print("Conversion of long_description from markdown to reStructuredText failed, skipping...")
def shogun_bootstrap():
global shogun_completed_cmake
global shogun_completed_make
global shogun_completed_make_install
global shogun_python_packages_location
print("Bootstrapping Shogun")
if shogun_cmake():
shogun_completed_cmake = True
else:
print('Error running CMake')
if shogun_make():
shogun_completed_make = True
else:
print('Error running make')
if shogun_make_install():
shogun_completed_make_install = True
else:
print('Error running make install')
if shogun_preconditions_met():
print("Setting Shogun Python packages location")
shogun_python_packages_location = glob.glob(os.path.join(shogun_generated_install, 'lib/*/dist-packages'))[0]
print("Bootstrapping Shogun successfully completed!")
shogun_debug_information()
else:
print("Shogun bootstrapping failed!")
print("Please make sure that you have cmake and make installed.")
sys.exit()
def shogun_preconditions_met():
print("Verifying Shogun preconditions")
if show_debug_information:
print("Shogun build environment completed tasks: cmake: [%s] - make: [%s] - make install: [%s]" % (shogun_completed_cmake, shogun_completed_make, shogun_completed_make_install))
return shogun_completed_cmake and shogun_completed_make and shogun_completed_make_install
def shogun_preconditions():
if not shogun_preconditions_met():
shogun_bootstrap()
return shogun_preconditions_met()
def shogun_debug_information():
if show_debug_information:
print("Shogun generated installation location %s" % shogun_generated_install)
print("Shogun Python package location: %s" % shogun_python_packages_location)
print("Shogun version string location: %s" % shogun_versionstring_h)
def parse_shogun_version(version_header):
shogun_version_pattern = re.compile(ur'#define MAINVERSION \"([0-9]\.[0-9]\.[0-9])\"')
with open(version_header, 'r') as f:
content = f.read()
matches = re.findall(shogun_version_pattern, content)
if len(matches):
return matches[0]
else:
return 'undefined'
def shogun_cmake(arguments=None):
print("Running CMake")
if arguments is None:
arguments='-DPythonModular=ON -DENABLE_TESTING=OFF -DCMAKE_INSTALL_PREFIX=install'
if distutils.spawn.find_executable('cmake') is not None:
print('CMake arguments: %s ' % arguments)
print('Creating build directory: %s' % shogun_build_directory)
distutils.dir_util.mkpath(shogun_build_directory)
os.chdir(shogun_build_directory)
try:
distutils.spawn.spawn(['cmake'] + arguments.split() + ['..'])
except distutils.spawn.DistutilsExecError:
print('CMake error.')
return False
finally:
os.chdir(os.path.abspath(setup_py_location))
return True
else:
print('CMake is required to build shogun!')
return False
def shogun_make(arguments=None):
print("Running make")
if arguments is None:
arguments='all'
if distutils.spawn.find_executable('make') is not None:
print('make arguments: %s ' % arguments)
os.chdir(shogun_build_directory)
try:
distutils.spawn.spawn(['make'] + arguments.split())
except distutils.spawn.DistutilsExecError:
print('make error.')
return False
finally:
os.chdir(os.path.abspath(setup_py_location))
return True
else:
print('make is required to build shogun!')
return False
def shogun_make_install():
return shogun_make(arguments='install')
def get_shogun_version():
print("Retrieving Shogun version")
if shogun_preconditions():
shogun_version = parse_shogun_version(shogun_versionstring_h)
if show_debug_information:
print('The Shogun version is %s' % shogun_version)
return shogun_version
def python_package_path(package_path):
print("Generating destination Python package path")
directories = package_path.split(os.sep)
destination_path = os.path.join(directories[-2], directories[-1])
if show_debug_information:
print("Shogun destination Python package path: %s" % destination_path)
return destination_path
def shogun_packages():
if not shogun_preconditions_met():
shogun_bootstrap()
return find_packages(where=shogun_python_packages_location)
def shogun_package_directories():
package_directories = dict()
if not shogun_preconditions_met():
shogun_bootstrap()
package_directories[''] = shogun_python_packages_location
return package_directories
def shogun_data_files():
data_files = list()
libshogun_files = glob.glob(os.path.join(shogun_generated_install, 'lib/libshogun*'))
modshogun_so_destination = os.path.join('lib', python_package_path(shogun_python_packages_location))
modshogun_so_file = glob.glob(os.path.join(shogun_python_packages_location, '_modshogun.so'))[0]
# appending data files
data_files.append(('lib', libshogun_files))
data_files.append((modshogun_so_destination, [modshogun_so_file]))
if show_debug_information:
print('Shogun Python package data files:')
for data_file_content in data_files:
print('|->[%s]' % data_file_content[0])
for data_file in data_file_content[1]:
print(' |--> %s' % os.path.basename(data_file))
return data_files
# https://docs.python.org/2/distutils/apiref.html#creating-a-new-distutils-command
class ShogunBuild(distutils.command.build.build):
user_options = distutils.command.build.build.user_options + [('cmake=', None, 'Specify CMake arguments.')]
build_base = ''
build_lib = ''
build_scripts = ''
plat_name = ''
def initialize_options(self):
self.cmake = None
def finalize_options(self):
pass
def run(self):
print('Running Package build')
if not shogun_preconditions_met:
shogun_cmake(self.cmake)
shogun_make()
shogun_make_install()
# Command.sub_commands
class ShogunInstall(setuptools.command.install.install):
def run(self):
print('Running Package install')
if shogun_preconditions():
self.do_egg_install()
shogun_setup(
name = "shogun-ml",
version = get_shogun_version(),
description = 'The Shogun Machine Learning Toolbox',
long_description=readme,
url = 'http://www.shogun-toolbox.org/',
author = 'Shogun Team',
author_email = '[email protected]',
license = 'The GNU General Public License v3.0',
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Libraries',
# Python 2 and Python 3 support
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
],
keywords = [
'Machine Learning',
'Gaussian Processes',
'Neural Networks',
'Deep Learning'
],
zip_safe = False,
# Shogun bootstrapping build and install
cmdclass = {'build': ShogunBuild, 'install': ShogunInstall},
# Shogun package content
packages = shogun_packages(),
package_dir = shogun_package_directories(),
py_modules =['modshogun'],
data_files = shogun_data_files(),
# Shogun dependencies
install_requires = ['numpy']
)
| sanuj/shogun | setup.py | Python | gpl-3.0 | 10,586 | [
"Gaussian"
] | ad90b8b9fbe76b546c602f045604bac73593d616812d438f4b9807145dc82a56 |
# Copyright 2019 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import numpy as np
import pytest
from numpy.testing import assert_allclose
import gpflow
from gpflow.base import TensorType
from gpflow.config import default_jitter
from gpflow.kernels import Kernel
from gpflow.likelihoods import Likelihood
from gpflow.mean_functions import Constant
from gpflow.models import GPModel, maximum_log_likelihood_objective, training_loss_closure
rng = np.random.RandomState(0)
class Datum:
X = rng.rand(20, 1) * 10
Y = np.sin(X) + 0.9 * np.cos(X * 1.6) + rng.randn(*X.shape) * 0.8
Y = np.tile(Y, 2) # two identical columns
Xtest = rng.rand(10, 1) * 10
data = (X, Y)
class DatumVGP:
N, Ns, DX, DY = 100, 10, 2, 2
np.random.seed(1)
X = np.random.randn(N, DX)
Xs = np.random.randn(Ns, DX)
Y = np.random.randn(N, DY)
q_mu = np.random.randn(N, DY)
q_sqrt = np.random.randn(DY, N, N)
q_alpha = np.random.randn(N, DX)
q_lambda = np.random.randn(N, DX) ** 2
data = (X, Y)
def _create_full_gp_model() -> gpflow.models.GPR:
"""
GP Regression
"""
full_gp_model = gpflow.models.GPR(
(Datum.X, Datum.Y),
kernel=gpflow.kernels.SquaredExponential(),
mean_function=gpflow.mean_functions.Constant(),
)
return full_gp_model
def _create_approximate_models() -> Tuple[
gpflow.models.VGP,
gpflow.models.SVGP,
gpflow.models.SVGP,
gpflow.models.SGPR,
gpflow.models.GPRFITC,
]:
"""
1) Variational GP (with the likelihood set to Gaussian)
2) Sparse variational GP (likelihood is Gaussian, inducing points
at the data)
3) Sparse variational GP (as above, but with the whitening rotation
of the inducing variables)
4) Sparse variational GP Regression (as above, but there the inducing
variables are 'collapsed' out, as in Titsias 2009)
5) FITC Sparse GP Regression
"""
model_1 = gpflow.models.VGP(
(Datum.X, Datum.Y),
kernel=gpflow.kernels.SquaredExponential(),
likelihood=gpflow.likelihoods.Gaussian(),
mean_function=gpflow.mean_functions.Constant(),
)
model_2 = gpflow.models.SVGP(
kernel=gpflow.kernels.SquaredExponential(),
likelihood=gpflow.likelihoods.Gaussian(),
inducing_variable=Datum.X.copy(),
q_diag=False,
whiten=False,
mean_function=gpflow.mean_functions.Constant(),
num_latent_gps=Datum.Y.shape[1],
)
gpflow.set_trainable(model_2.inducing_variable, False)
model_3 = gpflow.models.SVGP(
kernel=gpflow.kernels.SquaredExponential(),
likelihood=gpflow.likelihoods.Gaussian(),
inducing_variable=Datum.X.copy(),
q_diag=False,
whiten=True,
mean_function=gpflow.mean_functions.Constant(),
num_latent_gps=Datum.Y.shape[1],
)
gpflow.set_trainable(model_3.inducing_variable, False)
model_4 = gpflow.models.SGPR(
(Datum.X, Datum.Y),
kernel=gpflow.kernels.SquaredExponential(),
inducing_variable=Datum.X.copy(),
mean_function=Constant(),
)
gpflow.set_trainable(model_4.inducing_variable, False)
model_5 = gpflow.models.GPRFITC(
(Datum.X, Datum.Y),
kernel=gpflow.kernels.SquaredExponential(),
inducing_variable=Datum.X.copy(),
mean_function=Constant(),
)
gpflow.set_trainable(model_5.inducing_variable, False)
return model_1, model_2, model_3, model_4, model_5
def _create_vgp_model(
kernel: Kernel,
likelihood: Likelihood,
q_mu: Optional[TensorType] = None,
q_sqrt: Optional[TensorType] = None,
) -> gpflow.models.VGP:
model_vgp = gpflow.models.VGP((DatumVGP.X, DatumVGP.Y), kernel, likelihood)
if q_mu is not None and q_sqrt is not None:
model_vgp.q_mu.assign(q_mu)
model_vgp.q_sqrt.assign(q_sqrt)
return model_vgp
def _create_vgpao_model(
kernel: Kernel,
likelihood: Likelihood,
q_alpha: Optional[TensorType],
q_lambda: Optional[TensorType],
) -> gpflow.models.VGPOpperArchambeau:
model_vgpoa = gpflow.models.VGPOpperArchambeau(
(DatumVGP.X, DatumVGP.Y), kernel, likelihood, num_latent_gps=DatumVGP.DY
)
model_vgpoa.q_alpha.assign(q_alpha)
model_vgpoa.q_lambda.assign(q_lambda)
return model_vgpoa
def _create_svgp_model(
kernel: Kernel,
likelihood: Likelihood,
q_mu: Optional[TensorType],
q_sqrt: Optional[TensorType],
whiten: bool,
) -> gpflow.models.SVGP:
model_svgp = gpflow.models.SVGP(
kernel,
likelihood,
DatumVGP.X.copy(),
whiten=whiten,
q_diag=False,
num_latent_gps=DatumVGP.DY,
)
model_svgp.q_mu.assign(q_mu)
model_svgp.q_sqrt.assign(q_sqrt)
return model_svgp
@pytest.mark.parametrize("approximate_model", _create_approximate_models())
def test_equivalence(approximate_model: GPModel) -> None:
"""
With a Gaussian likelihood, and inducing points (where appropriate)
positioned at the data, many of the gpflow methods are equivalent (perhaps
subject to some optimization).
"""
def optimize(model: GPModel) -> None:
opt = gpflow.optimizers.Scipy()
loss = training_loss_closure(model, Datum.data)
opt.minimize(loss, model.trainable_variables, options=dict(maxiter=3000))
if isinstance(model, gpflow.models.SVGP) and not model.whiten:
# The (S)VGP model in non-whitened representation has significantly
# worse optimization behaviour. To get the tests to pass, we need
# to optimize much harder: we set ftol=gtol=0.0 to enforce
# continued optimization.
opt.minimize(
loss, model.trainable_variables, options=dict(maxiter=7000, ftol=0.0, gtol=0.0)
)
gpr_model = _create_full_gp_model()
optimize(gpr_model)
optimize(approximate_model)
gpr_likelihood = gpr_model.log_marginal_likelihood()
approximate_likelihood = maximum_log_likelihood_objective(approximate_model, Datum.data)
assert_allclose(approximate_likelihood, gpr_likelihood, rtol=1e-6)
gpr_kernel_ls = gpr_model.kernel.lengthscales.numpy()
gpr_kernel_var = gpr_model.kernel.variance.numpy()
approximate_kernel_ls = approximate_model.kernel.lengthscales.numpy()
approximate_kernel_var = approximate_model.kernel.variance.numpy()
assert_allclose(gpr_kernel_ls, approximate_kernel_ls, 1e-4)
assert_allclose(gpr_kernel_var, approximate_kernel_var, 1e-3)
gpr_mu, gpr_var = gpr_model.predict_y(Datum.Xtest)
approximate_mu, approximate_var = approximate_model.predict_y(Datum.Xtest)
assert_allclose(gpr_mu, approximate_mu, 1e-3)
assert_allclose(gpr_var, approximate_var, 1e-4)
def test_equivalence_vgp_and_svgp() -> None:
kernel = gpflow.kernels.Matern52()
likelihood = gpflow.likelihoods.StudentT()
svgp_model = _create_svgp_model(kernel, likelihood, DatumVGP.q_mu, DatumVGP.q_sqrt, whiten=True)
vgp_model = _create_vgp_model(kernel, likelihood, DatumVGP.q_mu, DatumVGP.q_sqrt)
likelihood_svgp = svgp_model.elbo(DatumVGP.data)
likelihood_vgp = vgp_model.elbo()
assert_allclose(likelihood_svgp, likelihood_vgp, rtol=1e-2)
svgp_mu, svgp_var = svgp_model.predict_f(DatumVGP.Xs)
vgp_mu, vgp_var = vgp_model.predict_f(DatumVGP.Xs)
assert_allclose(svgp_mu, vgp_mu)
assert_allclose(svgp_var, vgp_var)
def test_equivalence_vgp_and_opper_archambeau() -> None:
kernel = gpflow.kernels.Matern52()
likelihood = gpflow.likelihoods.StudentT()
vgp_oa_model = _create_vgpao_model(kernel, likelihood, DatumVGP.q_alpha, DatumVGP.q_lambda)
K = kernel(DatumVGP.X) + np.eye(DatumVGP.N) * default_jitter()
L = np.linalg.cholesky(K)
L_inv = np.linalg.inv(L)
K_inv = np.linalg.inv(K)
mean = K @ DatumVGP.q_alpha
prec_dnn = K_inv[None, :, :] + np.array([np.diag(l ** 2) for l in DatumVGP.q_lambda.T])
var_dnn = np.linalg.inv(prec_dnn)
svgp_model_unwhitened = _create_svgp_model(
kernel, likelihood, mean, np.linalg.cholesky(var_dnn), whiten=False
)
mean_white_nd = L_inv.dot(mean)
var_white_dnn = np.einsum("nN,dNM,mM->dnm", L_inv, var_dnn, L_inv)
q_sqrt_nnd = np.linalg.cholesky(var_white_dnn)
vgp_model = _create_vgp_model(kernel, likelihood, mean_white_nd, q_sqrt_nnd)
likelihood_vgp = vgp_model.elbo()
likelihood_vgp_oa = vgp_oa_model.elbo()
likelihood_svgp_unwhitened = svgp_model_unwhitened.elbo(DatumVGP.data)
assert_allclose(likelihood_vgp, likelihood_vgp_oa, rtol=1e-2)
assert_allclose(likelihood_vgp, likelihood_svgp_unwhitened, rtol=1e-2)
vgp_oa_mu, vgp_oa_var = vgp_oa_model.predict_f(DatumVGP.Xs)
svgp_unwhitened_mu, svgp_unwhitened_var = svgp_model_unwhitened.predict_f(DatumVGP.Xs)
vgp_mu, vgp_var = vgp_model.predict_f(DatumVGP.Xs)
assert_allclose(vgp_oa_mu, vgp_mu)
assert_allclose(vgp_oa_var, vgp_var, rtol=1e-4) # jitter?
assert_allclose(svgp_unwhitened_mu, vgp_mu)
assert_allclose(svgp_unwhitened_var, vgp_var, rtol=1e-4)
class DatumUpper:
rng = np.random.default_rng(123)
X = rng.random((100, 1))
Y = np.sin(1.5 * 2 * np.pi * X) + rng.standard_normal(X.shape) * 0.1 + 5.3
assert Y.mean() > 5.0, "offset ensures a regression test against the bug fixed by PR #1560"
data = (X, Y)
def test_upper_bound_few_inducing_points() -> None:
"""
Test for upper bound for regression marginal likelihood
"""
model_vfe = gpflow.models.SGPR(
(DatumUpper.X, DatumUpper.Y),
gpflow.kernels.SquaredExponential(),
inducing_variable=DatumUpper.X[:10, :].copy(),
mean_function=Constant(),
)
opt = gpflow.optimizers.Scipy()
opt.minimize(
model_vfe.training_loss,
variables=model_vfe.trainable_variables,
options=dict(maxiter=500),
)
full_gp = gpflow.models.GPR(
(DatumUpper.X, DatumUpper.Y),
kernel=gpflow.kernels.SquaredExponential(),
mean_function=Constant(),
)
full_gp.kernel.lengthscales.assign(model_vfe.kernel.lengthscales)
full_gp.kernel.variance.assign(model_vfe.kernel.variance)
full_gp.likelihood.variance.assign(model_vfe.likelihood.variance)
full_gp.mean_function.c.assign(model_vfe.mean_function.c)
lml_upper = model_vfe.upper_bound()
lml_vfe = model_vfe.elbo()
lml_full_gp = full_gp.log_marginal_likelihood()
assert lml_vfe < lml_full_gp
assert lml_full_gp < lml_upper
| GPflow/GPflow | tests/integration/test_method_equivalence.py | Python | apache-2.0 | 11,062 | [
"Gaussian"
] | 559c217ce9d53225389b44a46e253c36d211786f5b12adf6ae191f69007ec762 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
import mock
from stoqdrivers.printers.virtual.Simple import Simple
from stoqlib.database.runtime import get_current_station
from stoqlib.domain.test.domaintest import DomainTest
from ecf.couponprinter import CouponPrinter
from ecf.ecfdomain import ECFPrinter
class ECFTest(DomainTest):
def setUp(self):
super(ECFTest, self).setUp()
new_store = mock.Mock()
new_store.return_value = self.store
get_supported_printers = mock.Mock()
get_supported_printers.return_value = {u'virtual': [Simple]}
fake_method = lambda *a, **k: None
self._mocks = []
# Fiscal/Coupon printer methods usually creates and
# commits their transactions.
self._mocks.append(mock.patch('stoqlib.api.StoqAPI.new_store',
new=new_store))
self._mocks.append(mock.patch('ecf.couponprinter.new_store',
new=new_store))
self._mocks.append(mock.patch.object(self.store, 'commit',
new=fake_method))
self._mocks.append(mock.patch.object(self.store, 'close',
new=fake_method))
self._mocks.append(mock.patch('stoqdrivers.printers.base.get_supported_printers',
new=get_supported_printers))
for mocked in self._mocks:
mocked.start()
self.ecf_printer = self.create_ecf_printer()
self.printer = self.create_coupon_printer(self.ecf_printer)
def tearDown(self):
for mocked in self._mocks:
mocked.stop()
super(ECFTest, self).tearDown()
def create_ecf_printer(self):
printer = ECFPrinter(
store=self.store,
station=get_current_station(self.store),
brand=u'virtual',
model=u'Simple',
device_name=u'',
device_serial=u'',
baudrate=9600,
is_active=True,
)
# This might load state from disk that says that
# the printer is closed, we don't care about that,
# so override whatever state was loaded from disk so that
# the tests can pass.
printer.till_closed = False
printer.create_fiscal_printer_constants()
return printer
def create_coupon_printer(self, printer=None):
return CouponPrinter(printer or self.create_ecf_printer())
| tiagocardosos/stoq | plugins/ecf/test/ecftest.py | Python | gpl-2.0 | 3,344 | [
"VisIt"
] | 802660c0079327404111cd5ab7521b9c774354a224abc8bc337244ba3e84bc65 |
# coding: utf-8
from __future__ import unicode_literals
from six.moves.urllib.parse import urljoin
class Action(object):
""" Base class for all actions """
def run(self, context):
raise NotImplementedError()
class Visit(Action):
""" Visit an URL in browser """
def __init__(self, url=None):
self.url = url
def get_url(self, server_url=None):
"""
Return target URL. May be predefined in subclass with dynamic behaviour.
:param server_url: Root server URL with scheme and domain
:return: URL to visit
"""
if not server_url:
return self.url
return urljoin(server_url, self.url)
def run(self, context):
server_url = context.get('server_url')
url = self.get_url(server_url)
context.browser.visit(url)
class FillForm(Action):
"""
Fill data in form and (optionally) submit form.
"""
def __init__(self, data=None, submit=None):
if data is None:
data = {}
self.data = data
if submit is True:
submit = '[type="submit"]'
self.submit = submit
def run(self, context):
context.browser.fill_form(self.data)
if self.submit:
context.browser.find_by_css(self.submit).first.click()
| tricoder42/python-ariadne | ariadne/actions.py | Python | mit | 1,317 | [
"VisIt"
] | 364c03cbe878949d6e748f26b7e2ba215b0d1f97dc86287c53b2bb886447530e |
from CTFd.models import db, WrongKeys, Pages, Config, Tracking, Users, Containers, ip2long, long2ip
from six.moves.urllib.parse import urlparse, urljoin
import six
from werkzeug.utils import secure_filename
from functools import wraps
from flask import current_app as app, g, request, redirect, url_for, session, render_template, abort
from flask_caching import Cache
from itsdangerous import Signer, BadSignature
from socket import inet_aton, inet_ntoa, socket
from struct import unpack, pack, error
from sqlalchemy.engine.url import make_url
from sqlalchemy import create_engine
from email.mime.text import MIMEText
import time
import datetime
import hashlib
import shutil
import requests
import logging
import os
import sys
import re
import time
import smtplib
import tempfile
import subprocess
import urllib
import json
cache = Cache()
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
filename="out.log",
filemode='a'
)
def init_logs(app):
logger_keys = logging.getLogger('keys')
logger_logins = logging.getLogger('logins')
logger_regs = logging.getLogger('regs')
logger_keys.setLevel(logging.INFO)
logger_logins.setLevel(logging.INFO)
logger_regs.setLevel(logging.INFO)
try:
parent = os.path.dirname(__file__)
except:
parent = os.path.dirname(os.path.realpath(sys.argv[0]))
log_dir = os.path.join(parent, 'logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logs = [
os.path.join(parent, 'logs', 'keys.log'),
os.path.join(parent, 'logs', 'logins.log'),
os.path.join(parent, 'logs', 'registers.log')
]
for log in logs:
if not os.path.exists(log):
open(log, 'a').close()
key_log = logging.handlers.RotatingFileHandler(os.path.join(parent, 'logs', 'keys.log'), maxBytes=10000)
login_log = logging.handlers.RotatingFileHandler(os.path.join(parent, 'logs', 'logins.log'), maxBytes=10000)
register_log = logging.handlers.RotatingFileHandler(os.path.join(parent, 'logs', 'registers.log'), maxBytes=10000)
logger_keys.addHandler(key_log)
logger_logins.addHandler(login_log)
logger_regs.addHandler(register_log)
logger_keys.propagate = 0
logger_logins.propagate = 0
logger_regs.propagate = 0
def init_errors(app):
@app.errorhandler(404)
def page_not_found(error):
return render_template('errors/404.html'), 404
@app.errorhandler(403)
def forbidden(error):
return render_template('errors/403.html'), 403
@app.errorhandler(500)
def general_error(error):
return render_template('errors/500.html'), 500
@app.errorhandler(502)
def gateway_error(error):
return render_template('errors/502.html'), 502
def init_utils(app):
app.jinja_env.filters['unix_time'] = unix_time
app.jinja_env.filters['unix_time_millis'] = unix_time_millis
app.jinja_env.filters['long2ip'] = long2ip
app.jinja_env.globals.update(pages=pages)
app.jinja_env.globals.update(can_register=can_register)
app.jinja_env.globals.update(can_send_mail=can_send_mail)
app.jinja_env.globals.update(ctf_name=ctf_name)
app.jinja_env.globals.update(ctf_theme=ctf_theme)
app.jinja_env.globals.update(can_create_container=can_create_container)
@app.context_processor
def inject_user():
if session:
return dict(session)
return dict()
@app.before_request
def needs_setup():
if request.path == '/setup' or request.path.startswith('/static'):
return
if not is_setup():
return redirect(url_for('views.setup'))
@app.before_request
def tracker():
if authed():
track = Tracking.query.filter_by(ip=ip2long(get_ip()), user=session['id']).first()
if not track:
visit = Tracking(ip=get_ip(), user=session['id'])
db.session.add(visit)
db.session.commit()
else:
track.date = datetime.datetime.utcnow()
db.session.commit()
db.session.close()
@app.before_request
def csrf():
if not session.get('nonce'):
session['nonce'] = sha512(os.urandom(10))
if request.method == "POST":
if session['nonce'] != request.form.get('nonce'):
abort(403)
@cache.memoize()
def ctf_name():
name = get_config('ctf_name')
return name if name else 'CTFd'
@cache.memoize()
def ctf_theme():
theme = get_config('ctf_theme')
return theme if theme else ''
def pages():
pages = Pages.query.filter(Pages.route!="index").all()
return pages
def authed():
return bool(session.get('id', False))
def is_verified():
if get_config('verify_emails'):
team = Users.query.filter_by(id=session.get('id')).first()
if team:
return team.verified
else:
return False
else:
return True
@cache.memoize()
def is_setup():
setup = Config.query.filter_by(key='setup').first()
if setup:
return setup.value
else:
return False
def is_admin():
if authed():
return session['admin']
else:
return False
@cache.memoize()
def can_register():
return not bool(get_config('prevent_registration'))
def admins_only(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get('admin'):
return f(*args, **kwargs)
else:
return redirect(url_for('auth.login'))
return decorated_function
@cache.memoize()
def view_after_ctf():
return bool(get_config('view_after_ctf'))
def ctftime():
""" Checks whether it's CTF time or not. """
start = get_config("start")
end = get_config("end")
if start:
start = int(start)
else:
start = 0
if end:
end = int(end)
else:
end = 0
if start and end:
if start < time.time() < end:
# Within the two time bounds
return True
if start < time.time() and end == 0:
# CTF starts on a date but never ends
return True
if start == 0 and time.time() < end:
# CTF started but ends at a date
return True
if start == 0 and end == 0:
# CTF has no time requirements
return True
return False
def ctf_started():
return time.time() > int(get_config("start") or 0)
def ctf_ended():
if int(get_config("end") or 0):
return time.time() > int(get_config("end") or 0)
return False
def user_can_view_challenges():
config = bool(get_config('view_challenges_unregistered'))
verify_emails = bool(get_config('verify_emails'))
if config:
return (authed() and is_on_team()) or config
else:
return (authed() and is_on_team())
def is_on_team():
user = Users.query.filter_by(id=session.get('id')).first()
if user:
return bool(user.teamid)
return False
def unix_time(dt):
return int((dt - datetime.datetime(1970, 1, 1)).total_seconds())
def unix_time_millis(dt):
return unix_time(dt) * 1000
def get_ip():
""" Returns the IP address of the currently in scope request. The approach is to define a list of trusted proxies
(in this case the local network), and only trust the most recently defined untrusted IP address.
Taken from http://stackoverflow.com/a/22936947/4285524 but the generator there makes no sense.
The trusted_proxies regexes is taken from Ruby on Rails.
This has issues if the clients are also on the local network so you can remove proxies from config.py.
CTFd does not use IP address for anything besides cursory tracking of teams and it is ill-advised to do much
more than that if you do not know what you're doing.
"""
trusted_proxies = app.config['TRUSTED_PROXIES']
combined = "(" + ")|(".join(trusted_proxies) + ")"
route = request.access_route + [request.remote_addr]
for addr in reversed(route):
if not re.match(combined, addr): # IP is not trusted but we trust the proxies
remote_addr = addr
break
else:
remote_addr = request.remote_addr
return remote_addr
def get_kpm(userid): # keys per minute
one_min_ago = datetime.datetime.utcnow() + datetime.timedelta(minutes=-1)
return len(db.session.query(WrongKeys).filter(WrongKeys.userid == userid, WrongKeys.date >= one_min_ago).all())
def get_themes():
dir = os.path.join(app.root_path, app.template_folder)
return [name for name in os.listdir(dir)
if os.path.isdir(os.path.join(dir, name))]
@cache.memoize()
def get_config(key):
config = Config.query.filter_by(key=key).first()
if config and config.value:
value = config.value
if value and value.isdigit():
return int(value)
elif value and isinstance(value, six.string_types):
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
else:
return value
else:
set_config(key, None)
return None
def set_config(key, value):
config = Config.query.filter_by(key=key).first()
if config:
config.value = value
else:
config = Config(key, value)
db.session.add(config)
db.session.commit()
return config
@cache.memoize()
def can_send_mail():
return mailgun() or mailserver()
@cache.memoize()
def mailgun():
if (get_config('use_mailgun')):
return True
return False
@cache.memoize()
def mailserver():
if (get_config('use_mailserver')):
return True
return False
def get_smtp(host, port, username=None, password=None, TLS=None, SSL=None):
smtp = smtplib.SMTP(host, port)
smtp.ehlo()
if TLS:
smtp.starttls()
smtp.ehlo()
smtp.login(username, password)
return smtp
def sendmail(addr, mailtext):
if mailgun():
ctf_name = get_config('ctf_name')
mg_api_key = get_config('mg_api_key') or app.config.get('MAILGUN_API_KEY')
mg_base_url = get_config('mg_base_url') or app.config.get('MAILGUN_BASE_URL')
mailfrom_addr = get_config('mailfrom_addr') or app.config.get('MAILFROM_ADDR')
r = requests.post(
mg_base_url + '/messages',
auth=("api", mg_api_key),
data={"from": "{} Admin <{}>".format(ctf_name, mailfrom_addr),
"to": [addr],
"subject": "Message from {0}".format(ctf_name),
"text": text})
if r.status_code == 200:
return True
else:
return False
elif mailserver():
data = {
'host': get_config('mail_server'),
'port': int(get_config('mail_port'))
}
if get_config('mail_username'):
data['username'] = get_config('mail_username')
if get_config('mail_password'):
data['password'] = get_config('mail_password')
if get_config('mail_tls'):
data['TLS'] = get_config('mail_tls')
if get_config('mail_ssl'):
data['SSL'] = get_config('mail_ssl')
smtp = get_smtp(**data)
msg = MIMEText(mailtext)
msg['Subject'] = "Message from {0}".format(get_config('ctf_name'))
msg['From'] = get_config('mailfrom_addr')
msg['To'] = addr
smtp.sendmail(msg['From'], [msg['To']], msg.as_string())
smtp.quit()
return True
else:
return False
def verify_email(addr):
s = Signer(app.config['SECRET_KEY'])
token = s.sign(addr)
text = """Please click the following link to confirm your email address for {}: {}""".format(
get_config('ctf_name'),
url_for('auth.confirm_user', _external=True) + '/' + urllib.quote_plus(token.encode('base64'))
)
sendmail(addr, text)
def rmdir(dir):
shutil.rmtree(dir, ignore_errors=True)
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc
def validate_url(url):
return urlparse(url).scheme.startswith('http')
def sha512(string):
return hashlib.sha512(string).hexdigest()
@cache.memoize()
def can_create_container():
try:
output = subprocess.check_output(['docker', 'version'])
return True
except (subprocess.CalledProcessError, OSError):
return False
def is_port_free(port):
s = socket()
result = s.connect_ex(('127.0.0.1', port))
if result == 0:
s.close()
return False
return True
def create_image(name, buildfile, files):
if not can_create_container():
return False
folder = tempfile.mkdtemp(prefix='ctfd')
tmpfile = tempfile.NamedTemporaryFile(dir=folder, delete=False)
tmpfile.write(buildfile)
tmpfile.close()
for f in files:
if f.filename.strip():
filename = os.path.basename(f.filename)
f.save(os.path.join(folder, filename))
# repository name component must match "[a-z0-9](?:-*[a-z0-9])*(?:[._][a-z0-9](?:-*[a-z0-9])*)*"
# docker build -f tmpfile.name -t name
try:
cmd = ['docker', 'build', '-f', tmpfile.name, '-t', name, folder]
print cmd
subprocess.call(cmd)
container = Containers(name, buildfile)
db.session.add(container)
db.session.commit()
db.session.close()
rmdir(folder)
return True
except subprocess.CalledProcessError:
return False
def delete_image(name):
try:
subprocess.call(['docker', 'rm', name])
subprocess.call(['docker', 'rmi', name])
return True
except subprocess.CalledProcessError:
return False
def run_image(name):
try:
info = json.loads(subprocess.check_output(['docker', 'inspect', '--type=image', name]))
try:
ports_asked = info[0]['Config']['ExposedPorts'].keys()
ports_asked = [int(re.sub('[A-Za-z/]+', '', port)) for port in ports_asked]
except KeyError:
ports_asked = []
cmd = ['docker', 'run', '-d']
for port in ports_asked:
if is_port_free(port):
cmd.append('-p')
cmd.append('{}:{}'.format(port, port))
else:
cmd.append('-p')
ports_used.append('{}'.format(port))
cmd += ['--name', name, name]
print cmd
subprocess.call(cmd)
return True
except subprocess.CalledProcessError:
return False
def container_start(name):
try:
cmd = ['docker', 'start', name]
subprocess.call(cmd)
return True
except subprocess.CalledProcessError:
return False
def container_stop(name):
try:
cmd = ['docker', 'stop', name]
subprocess.call(cmd)
return True
except subprocess.CalledProcessError:
return False
def container_status(name):
try:
data = json.loads(subprocess.check_output(['docker', 'inspect', '--type=container', name]))
status = data[0]["State"]["Status"]
return status
except subprocess.CalledProcessError:
return 'missing'
def container_ports(name, verbose=False):
try:
info = json.loads(subprocess.check_output(['docker', 'inspect', '--type=container', name]))
if verbose:
ports = info[0]["NetworkSettings"]["Ports"]
if not ports:
return []
final = []
for port in ports.keys():
final.append("".join([ports[port][0]["HostPort"], '->', port]))
return final
else:
ports = info[0]['Config']['ExposedPorts'].keys()
if not ports:
return []
ports = [int(re.sub('[A-Za-z/]+', '', port)) for port in ports_asked]
return ports
except subprocess.CalledProcessError:
return []
| RITC3/RC3_CTFD | CTFd/utils.py | Python | apache-2.0 | 16,511 | [
"VisIt"
] | 266013318c8f49def494d72724569c64a8b5c449ea0f6770f76d10bd0b635cae |
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import re
import xml.dom.minidom
from xml.dom import Node
import html5lib
from html5lib import treebuilders
from reportlab.platypus.doctemplate import FrameBreak, NextPageTemplate
from reportlab.platypus.flowables import KeepInFrame, PageBreak
from xhtml2pdf.default import BOOL, BOX, COLOR, FILE, FONT, INT, MUST, POS, SIZE, STRING, TAGS
# TODO: Why do we need to import these Tags here? They aren't uses in this file or any other file,
# but if we don't import them, Travis & AppVeyor fail. Very strange (fbernhart)
from xhtml2pdf.tables import (TableData,
pisaTagTABLE,
pisaTagTD,
pisaTagTR,
pisaTagTH)
from xhtml2pdf.tags import (pisaTagIMG,
pisaTagPDFLANGUAGE,
pisaTagPDFNEXTPAGE,
pisaTag,
pisaTagA,
pisaTagBODY,
pisaTagBR,
pisaTagDIV,
pisaTagFONT,
pisaTagH1,
pisaTagH2,
pisaTagH3,
pisaTagH4,
pisaTagH5,
pisaTagH6,
pisaTagHR,
pisaTagLI,
pisaTagMETA,
pisaTagOL,
pisaTagP,
pisaTagPDFBARCODE,
pisaTagPDFFONT,
pisaTagPDFFRAME,
pisaTagPDFNEXTFRAME,
pisaTagPDFNEXTTEMPLATE,
pisaTagPDFPAGECOUNT,
pisaTagPDFPAGENUMBER,
pisaTagPDFSPACER,
pisaTagPDFTEMPLATE,
pisaTagPDFTOC,
pisaTagSTYLE,
pisaTagSUB,
pisaTagSUP,
pisaTagTITLE,
pisaTagUL,
# pisaTagINPUT,
# pisaTagTEXTAREA,
# pisaTagSELECT,
# pisaTagOPTION
)
from xhtml2pdf.util import getAlign, getBool, getBox, getColor, getPos, getSize, pisaTempFile, toList, transform_attrs
from xhtml2pdf.w3c import cssDOMElementInterface
from xhtml2pdf.xhtml2pdf_reportlab import PmlLeftPageBreak, PmlRightPageBreak
CSSAttrCache = {}
log = logging.getLogger("xhtml2pdf")
rxhttpstrip = re.compile("https?://[^/]+(.*)", re.M | re.I)
class AttrContainer(dict):
def __getattr__(self, name):
try:
return dict.__getattr__(self, name)
except:
return self[name]
def pisaGetAttributes(c, tag, attributes):
attrs = {}
if attributes:
for k, v in attributes.items():
try:
# XXX no Unicode! Reportlab fails with template names
attrs[str(k)] = str(v)
except:
attrs[k] = v
nattrs = {}
if tag in TAGS:
block, adef = TAGS[tag]
adef["id"] = STRING
for k, v in adef.items():
nattrs[k] = None
# print k, v
# defaults, wenn vorhanden
if type(v) == tuple:
if v[1] == MUST:
if k not in attrs:
log.warning(
c.warning("Attribute '%s' must be set!", k))
nattrs[k] = None
continue
nv = attrs.get(k, v[1])
dfl = v[1]
v = v[0]
else:
nv = attrs.get(k, None)
dfl = None
if nv is not None:
if type(v) == list:
nv = nv.strip().lower()
if nv not in v:
#~ raise PML_EXCEPTION, "attribute '%s' of wrong value, allowed is one of: %s" % (k, repr(v))
log.warning(
c.warning("Attribute '%s' of wrong value, allowed is one of: %s", k, repr(v)))
nv = dfl
elif v == BOOL:
nv = nv.strip().lower()
nv = nv in ("1", "y", "yes", "true", str(k))
elif v == SIZE:
try:
nv = getSize(nv)
except:
log.warning(
c.warning("Attribute '%s' expects a size value", k))
elif v == BOX:
nv = getBox(nv, c.pageSize)
elif v == POS:
nv = getPos(nv, c.pageSize)
elif v == INT:
nv = int(nv)
elif v == COLOR:
nv = getColor(nv)
elif v == FILE:
nv = c.getFile(nv)
elif v == FONT:
nv = c.getFontName(nv)
nattrs[k] = nv
return AttrContainer(nattrs)
attrNames = '''
color
font-family
font-size
font-weight
font-style
text-decoration
line-height
letter-spacing
background-color
display
margin-left
margin-right
margin-top
margin-bottom
padding-left
padding-right
padding-top
padding-bottom
border-top-color
border-top-style
border-top-width
border-bottom-color
border-bottom-style
border-bottom-width
border-left-color
border-left-style
border-left-width
border-right-color
border-right-style
border-right-width
text-align
vertical-align
width
height
zoom
page-break-after
page-break-before
list-style-type
list-style-image
white-space
text-indent
-pdf-page-break
-pdf-frame-break
-pdf-next-page
-pdf-keep-with-next
-pdf-outline
-pdf-outline-level
-pdf-outline-open
-pdf-line-spacing
-pdf-keep-in-frame-mode
-pdf-word-wrap
'''.strip().split()
def getCSSAttr(self, cssCascade, attrName, default=NotImplemented):
if attrName in self.cssAttrs:
return self.cssAttrs[attrName]
try:
result = cssCascade.findStyleFor(self.cssElement, attrName, default)
except LookupError:
result = None
# XXX Workaround for inline styles
try:
style = self.cssStyle
except:
style = self.cssStyle = cssCascade.parser.parseInline(
self.cssElement.getStyleAttr() or '')[0]
if attrName in style:
result = style[attrName]
if result == 'inherit':
if hasattr(self.parentNode, 'getCSSAttr'):
result = self.parentNode.getCSSAttr(cssCascade, attrName, default)
elif default is not NotImplemented:
return default
raise LookupError(
"Could not find inherited CSS attribute value for '%s'" % (attrName,))
if result is not None:
self.cssAttrs[attrName] = result
return result
# TODO: Monkeypatching standard lib should go away.
xml.dom.minidom.Element.getCSSAttr = getCSSAttr
# Create an aliasing system. Many sources use non-standard tags, because browsers allow
# them to. This allows us to map a nonstandard name to the standard one.
nonStandardAttrNames = {
'bgcolor': 'background-color',
}
def mapNonStandardAttrs(c, n, attrList):
for attr in nonStandardAttrNames:
if attr in attrList and nonStandardAttrNames[attr] not in c:
c[nonStandardAttrNames[attr]] = attrList[attr]
return c
def getCSSAttrCacheKey(node):
_cl = _id = _st = ''
for k, v in node.attributes.items():
if k == 'class':
_cl = v
elif k == 'id':
_id = v
elif k == 'style':
_st = v
return "%s#%s#%s#%s#%s" % (id(node.parentNode), node.tagName.lower(), _cl, _id, _st)
def CSSCollect(node, c):
#node.cssAttrs = {}
# return node.cssAttrs
if c.css:
_key = getCSSAttrCacheKey(node)
if hasattr(node.parentNode, "tagName"):
if node.parentNode.tagName.lower() != "html":
CachedCSSAttr = CSSAttrCache.get(_key, None)
if CachedCSSAttr is not None:
node.cssAttrs = CachedCSSAttr
return CachedCSSAttr
node.cssElement = cssDOMElementInterface.CSSDOMElementInterface(node)
node.cssAttrs = {}
# node.cssElement.onCSSParserVisit(c.cssCascade.parser)
cssAttrMap = {}
for cssAttrName in attrNames:
try:
cssAttrMap[cssAttrName] = node.getCSSAttr(
c.cssCascade, cssAttrName)
# except LookupError:
# pass
except Exception: # TODO: Kill this catch-all!
log.debug("CSS error '%s'", cssAttrName, exc_info=1)
CSSAttrCache[_key] = node.cssAttrs
return node.cssAttrs
def lower(sequence):
if isinstance(sequence, str):
return sequence.lower()
else:
return sequence[0].lower()
def CSS2Frag(c, kw, isBlock):
# COLORS
if "color" in c.cssAttr:
c.frag.textColor = getColor(c.cssAttr["color"])
if "background-color" in c.cssAttr:
c.frag.backColor = getColor(c.cssAttr["background-color"])
# FONT SIZE, STYLE, WEIGHT
if "font-family" in c.cssAttr:
c.frag.fontName = c.getFontName(c.cssAttr["font-family"])
if "font-size" in c.cssAttr:
# XXX inherit
c.frag.fontSize = max(
getSize("".join(c.cssAttr["font-size"]), c.frag.fontSize, c.baseFontSize), 1.0)
if "line-height" in c.cssAttr:
leading = "".join(c.cssAttr["line-height"])
c.frag.leading = getSize(leading, c.frag.fontSize)
c.frag.leadingSource = leading
else:
c.frag.leading = getSize(c.frag.leadingSource, c.frag.fontSize)
if "letter-spacing" in c.cssAttr:
c.frag.letterSpacing = c.cssAttr["letter-spacing"]
if "-pdf-line-spacing" in c.cssAttr:
c.frag.leadingSpace = getSize("".join(c.cssAttr["-pdf-line-spacing"]))
# print "line-spacing", c.cssAttr["-pdf-line-spacing"], c.frag.leading
if "font-weight" in c.cssAttr:
value = lower(c.cssAttr["font-weight"])
if value in ("bold", "bolder", "500", "600", "700", "800", "900"):
c.frag.bold = 1
else:
c.frag.bold = 0
for value in toList(c.cssAttr.get("text-decoration", "")):
if "underline" in value:
c.frag.underline = 1
if "line-through" in value:
c.frag.strike = 1
if "none" in value:
c.frag.underline = 0
c.frag.strike = 0
if "font-style" in c.cssAttr:
value = lower(c.cssAttr["font-style"])
if value in ("italic", "oblique"):
c.frag.italic = 1
else:
c.frag.italic = 0
if "white-space" in c.cssAttr:
# normal | pre | nowrap
c.frag.whiteSpace = str(c.cssAttr["white-space"]).lower()
# ALIGN & VALIGN
if "text-align" in c.cssAttr:
c.frag.alignment = getAlign(c.cssAttr["text-align"])
if "vertical-align" in c.cssAttr:
c.frag.vAlign = c.cssAttr["vertical-align"]
# HEIGHT & WIDTH
if "height" in c.cssAttr:
try:
# XXX Relative is not correct!
c.frag.height = "".join(toList(c.cssAttr["height"]))
except TypeError:
# sequence item 0: expected string, tuple found
c.frag.height = "".join(toList(c.cssAttr["height"][0]))
if c.frag.height in ("auto",):
c.frag.height = None
if "width" in c.cssAttr:
try:
# XXX Relative is not correct!
c.frag.width = "".join(toList(c.cssAttr["width"]))
except TypeError:
c.frag.width = "".join(toList(c.cssAttr["width"][0]))
if c.frag.width in ("auto",):
c.frag.width = None
# ZOOM
if "zoom" in c.cssAttr:
# XXX Relative is not correct!
zoom = "".join(toList(c.cssAttr["zoom"]))
if zoom.endswith("%"):
zoom = float(zoom[: - 1]) / 100.0
c.frag.zoom = float(zoom)
# MARGINS & LIST INDENT, STYLE
if isBlock:
transform_attrs(c.frag,
(("spaceBefore", "margin-top"),
("spaceAfter", "margin-bottom"),
("firstLineIndent", "text-indent"),
),
c.cssAttr,
getSize,
extras=c.frag.fontSize
)
if "margin-left" in c.cssAttr:
c.frag.bulletIndent = kw["margin-left"] # For lists
kw["margin-left"] += getSize(c.cssAttr["margin-left"],
c.frag.fontSize)
c.frag.leftIndent = kw["margin-left"]
if "margin-right" in c.cssAttr:
kw["margin-right"] += getSize(
c.cssAttr["margin-right"], c.frag.fontSize)
c.frag.rightIndent = kw["margin-right"]
if "list-style-type" in c.cssAttr:
c.frag.listStyleType = str(c.cssAttr["list-style-type"]).lower()
if "list-style-image" in c.cssAttr:
c.frag.listStyleImage = c.getFile(c.cssAttr["list-style-image"])
# PADDINGS
if isBlock:
transform_attrs(c.frag,
(("paddingTop", "padding-top"),
("paddingBottom", "padding-bottom"),
("paddingLeft", "padding-left"),
("paddingRight", "padding-right"),
),
c.cssAttr,
getSize,
extras=c.frag.fontSize
)
# BORDERS
if isBlock:
transform_attrs(c.frag,
(("borderTopWidth", "border-top-width"),
("borderBottomWidth", "border-bottom-width"),
("borderLeftWidth", "border-left-width"),
("borderRightWidth", "border-right-width"),
),
c.cssAttr,
getSize,
extras=c.frag.fontSize
)
transform_attrs(c.frag,
(
("borderTopStyle", "border-top-style"),
("borderBottomStyle", "border-bottom-style"),
("borderLeftStyle", "border-left-style"),
("borderRightStyle", "border-right-style")
),
c.cssAttr,
lambda x: x
)
transform_attrs(c.frag,
(
("borderTopColor", "border-top-color"),
("borderBottomColor", "border-bottom-color"),
("borderLeftColor", "border-left-color"),
("borderRightColor", "border-right-color")
),
c.cssAttr,
getColor
)
def pisaPreLoop(node, context, collect=False):
"""
Collect all CSS definitions
"""
data = u""
if node.nodeType == Node.TEXT_NODE and collect:
data = node.data
elif node.nodeType == Node.ELEMENT_NODE:
name = node.tagName.lower()
if name in ("style", "link"):
attr = pisaGetAttributes(context, name, node.attributes)
media = [x.strip()
for x in attr.media.lower().split(",") if x.strip()]
if attr.get("type", "").lower() in ("", "text/css") and \
(not media or "all" in media or "print" in media or "pdf" in media):
if name == "style":
for node in node.childNodes:
data += pisaPreLoop(node, context, collect=True)
context.addCSS(data)
return u""
if name == "link" and attr.href and attr.rel.lower() == "stylesheet":
# print "CSS LINK", attr
context.addCSS('\n@import "%s" %s;' %
(attr.href, ",".join(media)))
for node in node.childNodes:
result = pisaPreLoop(node, context, collect=collect)
if collect:
data += result
return data
def pisaLoop(node, context, path=None, **kw):
if path is None:
path = []
# Initialize KW
if not kw:
kw = {
"margin-top": 0,
"margin-bottom": 0,
"margin-left": 0,
"margin-right": 0,
}
else:
kw = copy.copy(kw)
# indent = len(path) * " " # only used for debug print statements
# TEXT
if node.nodeType == Node.TEXT_NODE:
# print indent, "#", repr(node.data) #, context.frag
context.addFrag(node.data)
# context.text.append(node.value)
# ELEMENT
elif node.nodeType == Node.ELEMENT_NODE:
node.tagName = node.tagName.replace(":", "").lower()
if node.tagName in ("style", "script"):
return
path = copy.copy(path) + [node.tagName]
# Prepare attributes
attr = pisaGetAttributes(context, node.tagName, node.attributes)
# log.debug(indent + "<%s %s>" % (node.tagName, attr) +
# repr(node.attributes.items())) #, path
# Calculate styles
context.cssAttr = CSSCollect(node, context)
context.cssAttr = mapNonStandardAttrs(context.cssAttr, node, attr)
context.node = node
# Block?
PAGE_BREAK = 1
PAGE_BREAK_RIGHT = 2
PAGE_BREAK_LEFT = 3
pageBreakAfter = False
frameBreakAfter = False
display = lower(context.cssAttr.get("display", "inline"))
# print indent, node.tagName, display,
# context.cssAttr.get("background-color", None), attr
isBlock = (display == "block")
if isBlock:
context.addPara()
# Page break by CSS
if "-pdf-next-page" in context.cssAttr:
context.addStory(
NextPageTemplate(str(context.cssAttr["-pdf-next-page"])))
if "-pdf-page-break" in context.cssAttr:
if str(context.cssAttr["-pdf-page-break"]).lower() == "before":
context.addStory(PageBreak())
if "-pdf-frame-break" in context.cssAttr:
if str(context.cssAttr["-pdf-frame-break"]).lower() == "before":
context.addStory(FrameBreak())
if str(context.cssAttr["-pdf-frame-break"]).lower() == "after":
frameBreakAfter = True
if "page-break-before" in context.cssAttr:
if str(context.cssAttr["page-break-before"]).lower() == "always":
context.addStory(PageBreak())
if str(context.cssAttr["page-break-before"]).lower() == "right":
context.addStory(PageBreak())
context.addStory(PmlRightPageBreak())
if str(context.cssAttr["page-break-before"]).lower() == "left":
context.addStory(PageBreak())
context.addStory(PmlLeftPageBreak())
if "page-break-after" in context.cssAttr:
if str(context.cssAttr["page-break-after"]).lower() == "always":
pageBreakAfter = PAGE_BREAK
if str(context.cssAttr["page-break-after"]).lower() == "right":
pageBreakAfter = PAGE_BREAK_RIGHT
if str(context.cssAttr["page-break-after"]).lower() == "left":
pageBreakAfter = PAGE_BREAK_LEFT
if display == "none":
# print "none!"
return
# Translate CSS to frags
# Save previous frag styles
context.pushFrag()
# Map styles to Reportlab fragment properties
CSS2Frag(context, kw, isBlock)
# EXTRAS
transform_attrs(context.frag,
(
("keepWithNext", "-pdf-keep-with-next"),
("outline", "-pdf-outline"),
#("borderLeftColor", "-pdf-outline-open"),
),
context.cssAttr,
getBool
)
if "-pdf-outline-level" in context.cssAttr:
context.frag.outlineLevel = int(
context.cssAttr["-pdf-outline-level"])
if "-pdf-word-wrap" in context.cssAttr:
context.frag.wordWrap = context.cssAttr["-pdf-word-wrap"]
# handle keep-in-frame
keepInFrameMode = None
keepInFrameMaxWidth = 0
keepInFrameMaxHeight = 0
if "-pdf-keep-in-frame-mode" in context.cssAttr:
value = str(
context.cssAttr["-pdf-keep-in-frame-mode"]).strip().lower()
if value in ("shrink", "error", "overflow", "truncate"):
keepInFrameMode = value
else:
keepInFrameMode = "shrink"
# Added because we need a default value.
if "-pdf-keep-in-frame-max-width" in context.cssAttr:
keepInFrameMaxWidth = getSize(
"".join(context.cssAttr["-pdf-keep-in-frame-max-width"]))
if "-pdf-keep-in-frame-max-height" in context.cssAttr:
keepInFrameMaxHeight = getSize(
"".join(context.cssAttr["-pdf-keep-in-frame-max-height"]))
# ignore nested keep-in-frames, tables have their own KIF handling
keepInFrame = keepInFrameMode is not None and context.keepInFrameIndex is None
if keepInFrame:
# keep track of current story index, so we can wrap everythink
# added after this point in a KeepInFrame
context.keepInFrameIndex = len(context.story)
# BEGIN tag
klass = globals().get("pisaTag%s" %
node.tagName.replace(":", "").upper(), None)
obj = None
# Static block
elementId = attr.get("id", None)
staticFrame = context.frameStatic.get(elementId, None)
if staticFrame:
context.frag.insideStaticFrame += 1
oldStory = context.swapStory()
# Tag specific operations
if klass is not None:
obj = klass(node, attr)
obj.start(context)
# Visit child nodes
context.fragBlock = fragBlock = copy.copy(context.frag)
for nnode in node.childNodes:
pisaLoop(nnode, context, path, **kw)
context.fragBlock = fragBlock
# END tag
if obj:
obj.end(context)
# Block?
if isBlock:
context.addPara()
# XXX Buggy!
# Page break by CSS
if pageBreakAfter:
context.addStory(PageBreak())
if pageBreakAfter == PAGE_BREAK_RIGHT:
context.addStory(PmlRightPageBreak())
if pageBreakAfter == PAGE_BREAK_LEFT:
context.addStory(PmlLeftPageBreak())
if frameBreakAfter:
context.addStory(FrameBreak())
if keepInFrame:
# get all content added after start of -pdf-keep-in-frame and wrap
# it in a KeepInFrame
substory = context.story[context.keepInFrameIndex:]
context.story = context.story[:context.keepInFrameIndex]
context.story.append(
KeepInFrame(
content=substory,
maxWidth=keepInFrameMaxWidth,
maxHeight=keepInFrameMaxHeight,
mode=keepInFrameMode))
# mode wasn't being used; it is necessary for tables or images at
# end of page.
context.keepInFrameIndex = None
# Static block, END
if staticFrame:
context.addPara()
for frame in staticFrame:
frame.pisaStaticStory = context.story
context.swapStory(oldStory)
context.frag.insideStaticFrame -= 1
# context.debug(1, indent, "</%s>" % (node.tagName))
# Reset frag style
context.pullFrag()
# Unknown or not handled
else:
# context.debug(1, indent, "???", node, node.nodeType, repr(node))
# Loop over children
for node in node.childNodes:
pisaLoop(node, context, path, **kw)
def pisaParser(src, context, default_css="", xhtml=False, encoding=None, xml_output=None):
"""
- Parse HTML and get miniDOM
- Extract CSS informations, add default CSS, parse CSS
- Handle the document DOM itself and build reportlab story
- Return Context object
"""
global CSSAttrCache
CSSAttrCache = {}
if xhtml:
# TODO: XHTMLParser doesn't seem to exist...
parser = html5lib.XHTMLParser(tree=treebuilders.getTreeBuilder("dom"))
else:
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
parser_kwargs = {}
if isinstance(src, str):
# If an encoding was provided, do not change it.
if not encoding:
encoding = "utf-8"
src = src.encode(encoding)
src = pisaTempFile(src, capacity=context.capacity)
# To pass the encoding used to convert the text_type src to binary_type
# on to html5lib's parser to ensure proper decoding
parser_kwargs['transport_encoding'] = encoding
# # Test for the restrictions of html5lib
# if encoding:
# # Workaround for html5lib<0.11.1
# if hasattr(inputstream, "isValidEncoding"):
# if encoding.strip().lower() == "utf8":
# encoding = "utf-8"
# if not inputstream.isValidEncoding(encoding):
# log.error("%r is not a valid encoding e.g. 'utf8' is not valid but 'utf-8' is!", encoding)
# else:
# if inputstream.codecName(encoding) is None:
# log.error("%r is not a valid encoding", encoding)
document = parser.parse(
src, **parser_kwargs
) # encoding=encoding)
if xml_output:
if encoding:
xml_output.write(document.toprettyxml(encoding=encoding))
else:
xml_output.write(document.toprettyxml(encoding="utf8"))
if default_css:
context.addDefaultCSS(default_css)
pisaPreLoop(document, context)
# try:
context.parseCSS()
# except:
# context.cssText = DEFAULT_CSS
# context.parseCSS()
# context.debug(9, pprint.pformat(context.css))
pisaLoop(document, context)
return context
# Shortcuts
HTML2PDF = pisaParser
def XHTML2PDF(*a, **kw):
kw["xhtml"] = True
return HTML2PDF(*a, **kw)
XML2PDF = XHTML2PDF
| xhtml2pdf/xhtml2pdf | xhtml2pdf/parser.py | Python | apache-2.0 | 27,875 | [
"VisIt"
] | 6d23f9c64e6e3b8317bd116a45161c26af4060ff86318147b30feadec7d847dd |
#
# Copyright (C) 2010-2018 The ESPResSo project
# Copyright (C) 2002,2003,2004,2005,2006,2007,2008,2009,2010
# Max-Planck-Institute for Polymer Research, Theory Group
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
from espressomd import assert_features, electrostatics, electrostatic_extensions
from espressomd.shapes import Wall
from espressomd import visualization_opengl
import numpy
from threading import Thread
from time import sleep
assert_features(["ELECTROSTATICS", "MASS", "LENNARD_JONES"])
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
numpy.random.seed(system.seed)
print("\n--->Setup system")
# System parameters
n_part = 1000
n_ionpairs = n_part / 2
density = 1.1138
time_step = 0.001823
temp = 1198.3
gamma = 50
#l_bjerrum = 0.885^2 * e^2/(4*pi*epsilon_0*k_B*T)
l_bjerrum = 130878.0 / temp
Vz = 0 # potential difference between the electrodes
Ez = 364.5 # conversion from potential to electrical field
# Particle parameters
types = {"Cl": 0, "Na": 1, "Electrode": 2}
numbers = {"Cl": n_ionpairs, "Na": n_ionpairs}
charges = {"Cl": -1.0, "Na": 1.0}
lj_sigmas = {"Cl": 3.85, "Na": 2.52, "Electrode": 3.37}
lj_epsilons = {"Cl": 192.45, "Na": 17.44, "Electrode": 24.72}
lj_cuts = {"Cl": 3.0 * lj_sigmas["Cl"],
"Na": 3.0 * lj_sigmas["Na"],
"Electrode": 3.0 * lj_sigmas["Electrode"]}
masses = {"Cl": 35.453, "Na": 22.99, "Electrode": 12.01}
# Setup System
box_l = (n_ionpairs * sum(masses.values()) / density)**(1. / 3.)
box_z = box_l + 2.0 * (lj_sigmas["Electrode"])
box_volume = box_l * box_l * box_z
elc_gap = box_z * 0.15
system.box_l = [box_l, box_l, box_z + elc_gap]
system.periodicity = [True, True, True]
system.time_step = time_step
system.cell_system.skin = 0.3
system.thermostat.set_langevin(kT=temp, gamma=gamma, seed=42)
# Visualizer
visualizer = visualization_opengl.openGLLive(
system,
camera_position=[-3 * box_l, box_l * 0.5, box_l * 0.5],
camera_right=[0, 0, 1],
drag_force=5 * 298,
background_color=[1, 1, 1],
light_pos=[30, 30, 30],
ext_force_arrows_type_scale=[0.0001],
ext_force_arrows=False)
# Walls
system.constraints.add(shape=Wall(
dist=0, normal=[0, 0, 1]), particle_type=types["Electrode"])
system.constraints.add(shape=Wall(
dist=-box_z, normal=[0, 0, -1]), particle_type=types["Electrode"])
# Place particles
for i in range(int(n_ionpairs)):
p = numpy.random.random(3) * box_l
p[2] += lj_sigmas["Electrode"]
system.part.add(id=len(system.part), type=types["Cl"],
pos=p, q=charges["Cl"], mass=masses["Cl"])
for i in range(int(n_ionpairs)):
p = numpy.random.random(3) * box_l
p[2] += lj_sigmas["Electrode"]
system.part.add(id=len(system.part), type=types["Na"],
pos=p, q=charges["Na"], mass=masses["Na"])
# Lennard-Jones interactions parameters
def combination_rule_epsilon(rule, eps1, eps2):
if rule == "Lorentz":
return (eps1 * eps2)**0.5
else:
return ValueError("No combination rule defined")
def combination_rule_sigma(rule, sig1, sig2):
if rule == "Berthelot":
return (sig1 + sig2) * 0.5
else:
return ValueError("No combination rule defined")
for s in [["Cl", "Na"], ["Cl", "Cl"], ["Na", "Na"], ["Na", "Electrode"], ["Cl", "Electrode"]]:
lj_sig = combination_rule_sigma(
"Berthelot", lj_sigmas[s[0]], lj_sigmas[s[1]])
lj_cut = combination_rule_sigma("Berthelot", lj_cuts[s[0]], lj_cuts[s[1]])
lj_eps = combination_rule_epsilon(
"Lorentz", lj_epsilons[s[0]], lj_epsilons[s[1]])
system.non_bonded_inter[types[s[0]], types[s[1]]].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
system.minimize_energy.init(
f_max=10, gamma=10, max_steps=2000, max_displacement=0.1)
system.minimize_energy.minimize()
print("\n--->Tuning Electrostatics")
p3m = electrostatics.P3M(prefactor=l_bjerrum, accuracy=1e-2)
system.actors.add(p3m)
elc = electrostatic_extensions.ELC(gap_size=elc_gap, maxPWerror=1e-3)
system.actors.add(elc)
def increaseElectricField():
global Vz
Vz += 3
for p in system.part:
p.ext_force = [0, 0, p.q * Vz * Vz_to_Ez]
print('Potential difference: {:.0V}'.format(Vz))
def decreaseElectricField():
global Ez
Vz -= 3
for p in system.part:
p.ext_force = [0, 0, p.q * Vz * Vz_to_Ez]
print('Potential difference: {:.0V}'.format(Vz))
# Register buttons
visualizer.keyboardManager.register_button(visualization_opengl.KeyboardButtonEvent(
'u', visualization_opengl.KeyboardFireEvent.Hold, increaseElectricField))
visualizer.keyboardManager.register_button(visualization_opengl.KeyboardButtonEvent(
'j', visualization_opengl.KeyboardFireEvent.Hold, decreaseElectricField))
def main():
print("\n--->Integration")
system.time = 0.0
while True:
system.integrator.run(1)
visualizer.update()
# Start simulation in separate thread
t = Thread(target=main)
t.daemon = True
t.start()
# Start blocking visualizer
visualizer.start()
| mkuron/espresso | doc/tutorials/02-charged_system/scripts/nacl_units_confined_vis.py | Python | gpl-3.0 | 5,739 | [
"ESPResSo"
] | 63c17f425eadc661dd9fd0296680c7f076b14e3bdd0e1d24d9d6bca69775b085 |
#!/opt/local/bin/python
"""
Calculate the total energy as a function of lattice constant,
by altering the lattice constant in POSCAR file.
And if possible, calculate equilibrium lattice size and
bulk modulus, too.
Usage:
energy_vs_size.py [options]
Options:
-h, --help Show this help message and exit.
-n NITER Number of points to be calculated. [default: 10]
-p Show a graph on the screen.
-s STRAIN STRAIN (%) applied to the lattice constant. [default: 10.0]
-x Not isotropic deformation, *x* direction is changed.
-y Not isotropic deformation, *y* direction is changed.
-z Not isotropic deformation, *z* direction is changed.
--LS Perform least square fitting to obtain lattice constant
and bulk modulus.
--cmd=CMD VASP execution command. [default: \"vasp > out.vasp\"]
"""
from __future__ import print_function
import sys,os,copy
import subprocess
from docopt import docopt
import numpy as np
from scipy.optimize import leastsq
_no_pyplot=False
try:
import matplotlib.pyplot as plt
except:
_no_pyplot=True
def read_POSCAR(fname='POSCAR'):
f=open(fname,'r')
#...1st line: comment
cmmt= f.readline()
#...read 1st line and get current lattice size
al= float(f.readline().split()[0])
hmat= np.zeros((3,3))
hmat[0]= [ float(x) for x in f.readline().split() ]
hmat[1]= [ float(x) for x in f.readline().split() ]
hmat[2]= [ float(x) for x in f.readline().split() ]
buffer= f.readline().split()
if buffer[0].isdigit():
natm= 0
for b in buffer:
natm += int(b)
else:
natm= 0
for b in f.readline().split():
natm += int(b)
f.close()
return (al,hmat,natm)
def get_vol(al,hmat):
a1= hmat[0:3,0] *al
a2= hmat[0:3,1] *al
a3= hmat[0:3,2] *al
return np.dot(a1,np.cross(a2,a3))
def replace_1st_line(x,fname='POSCAR'):
f=open(fname,'r')
ini= f.readlines()
f.close()
g=open(fname,'w')
for l in range(len(ini)):
if l == 1:
g.write(' {0:10.4f}\n'.format(x))
else:
g.write(ini[l])
g.close()
def replace_hmat(hmat,fname='POSCAR'):
f=open(fname,'r')
ini= f.readlines()
f.close()
g=open(fname,'w')
for l in range(len(ini)):
if 2 <= l <= 4:
g.write(' {0:12.7f} {1:12.7f} {2:12.7f}\n'.format(hmat[l-2,0],hmat[l-2,1],hmat[l-2,2]))
else:
g.write(ini[l])
g.close()
def residuals(p,y,x):
b,bp,v0,ev0= p
err= y -( b*x/(bp*(bp-1.0)) *(bp*(1.0-v0/x) +(v0/x)**bp -1.0) +ev0 )
return err
def peval(x,p):
b,bp,v0,ev0= p
return b*x/(bp*(bp-1.0)) *(bp*(1.0-v0/x) +(v0/x)**bp -1.0) +ev0
if __name__ == '__main__':
args= docopt(__doc__)
niter= int(args['-n'])
show_graph= args['-p']
strain= float(args['-s'])
perform_ls= args['--LS']
cmd= args['--cmd']
mvx= args['-x']
mvy= args['-y']
mvz= args['-z']
if show_graph and _no_pyplot:
print("matplotlib.pyplot is not available in this sysytem.")
print("Run this script without -p option.")
sys.exit()
strain= strain/100
al_orig,hmat_orig,natm= read_POSCAR()
hmat= copy.copy(hmat_orig)
hmat_min= copy.copy(hmat_orig)
hmat_max= copy.copy(hmat_orig)
dhmat= np.zeros((3,3),dtype=float)
if not mvx and not mvy and not mvz:
al_min= al_orig*(1.0-strain)
al_max= al_orig*(1.0+strain)
dl= (al_max-al_min)/niter
else:
if mvx:
hmat_min[0]= hmat_orig[0]*(1.0-strain)
hmat_max[0]= hmat_orig[0]*(1.0+strain)
if mvy:
hmat_min[1]= hmat_orig[1]*(1.0-strain)
hmat_max[1]= hmat_orig[1]*(1.0+strain)
if mvz:
hmat_min[2]= hmat_orig[2]*(1.0-strain)
hmat_max[2]= hmat_orig[2]*(1.0+strain)
dhmat= (hmat_max -hmat_min)/niter
logfile= open('log.energy_vs_size','w')
outfile1= open('out.energy_vs_size','w')
for iter in range(niter+1):
dname= "energy-{0:05d}".format(iter)
if not mvx and not mvy and not mvz:
al= al_min +dl*iter
hmat= hmat_orig
replace_1st_line(al)
else:
al= al_orig
hmat= hmat_min +dhmat*iter
replace_hmat(hmat)
#os.system('vasp > out.vasp')
os.system(cmd)
erg= float(subprocess.getoutput("tail -n1 OSZICAR | awk '{print $5}'"))
os.system("mkdir -p "+dname)
os.system("cp INCAR OSZICAR OUTCAR vasprun.xml {0}/".format(dname))
vol= get_vol(al,hmat)
print(' {0:10.4f} {1:10.4f} {2:15.7f}'.format(al,vol,erg))
outfile1.write(' {0:10.4f} {1:10.4f} {2:15.7f}\n'.format(al,vol,erg))
logfile.write(' {0:10.4f} {1:10.4f} {2:15.7f}\n'.format(al,vol,erg))
outfile1.close()
if not mvx and not mvy and not mvz:
replace_1st_line(al_orig)
else:
replace_hmat(hmat_orig)
print(' energy_vs_size finished because mvx,y,z=False.')
sys.exit()
if not perform_ls:
print(' energy_vs_size finished without performing least square fitting...')
sys.exit()
#...prepare for Murnaghan fitting
f= open('out.energy_vs_size','r')
lines= f.readlines()
xarr= np.zeros((len(lines)))
yarr= np.zeros((len(lines)))
for l in range(len(lines)):
dat= lines[l].split()
xarr[l]= float(dat[1])
yarr[l]= float(dat[2])
f.close()
#...set initial values
b= 1.0
bp= 2.0
ev0= min(yarr)
v0= xarr[len(xarr)/2]
p0= np.array([b,bp,v0,ev0])
#...least square fitting
plsq= leastsq(residuals,p0,args=(yarr,xarr))
#...output results
print(' plsq=',plsq[0])
print('{0:=^72}'.format(' RESULTS '))
logfile.write('{0:=^72}\n'.format(' RESULTS '))
a1= hmat_orig[0:3,0]
a2= hmat_orig[0:3,1]
a3= hmat_orig[0:3,2]
uvol= np.dot(a1,np.cross(a2,a3))
lc= (plsq[0][2]/uvol)**(1.0/3)
print(' Lattice constant = {0:10.4f} Ang.'.format(lc))
print(' Cohesive energy = {0:10.3f} eV'.format(plsq[0][3]/natm))
print(' Bulk modulus = {0:10.2f} GPa'.format(plsq[0][0]*1.602e+2))
logfile.write(' Lattice constant = {0:10.4f} Ang.\n'.format(lc))
logfile.write(' Cohesive energy = {0:10.3f} eV\n'.format(plsq[0][3]/natm))
logfile.write(' Bulk modulus = {0:10.2f} GPa\n'.format(plsq[0][0]*1.602e+2))
if show_graph:
plt.plot(xarr,peval(xarr,plsq[0]),xarr,yarr,'o')
plt.title('Data fitted with Murnaghan eq.')
plt.legend(['fitted','data'])
plt.xlabel('Volume (Ang.^3)')
plt.ylabel('Energy (eV)')
plt.savefig('graph.energy_vs_size.eps',dpi=150)
plt.show()
print('{0:=^72}'.format(' OUTPUT '))
print(' * out.energy_vs_size')
print(' * log.energy_vs_size')
print(' * graph.energy_vs_size.eps')
| ryokbys/nap | nappy/vasp/energy_vs_size.py | Python | mit | 6,895 | [
"VASP"
] | 01d2a6715dd9228b79fd5e7012f61bb2663e367e659a40c5ea127a58ed1ace9e |
#!/usr/bin/env python
'''
setup board.h for chibios
'''
import argparse, sys, fnmatch, os, dma_resolver, shlex, pickle
parser = argparse.ArgumentParser("chibios_pins.py")
parser.add_argument(
'-D', '--outdir', type=str, default=None, help='Output directory')
parser.add_argument(
'hwdef', type=str, default=None, help='hardware definition file')
args = parser.parse_args()
# output variables for each pin
vtypes = ['MODER', 'OTYPER', 'OSPEEDR', 'PUPDR', 'ODR', 'AFRL', 'AFRH']
# number of pins in each port
pincount = {
'A': 16,
'B': 16,
'C': 16,
'D': 16,
'E': 16,
'F': 16,
'G': 16,
'H': 2,
'I': 0,
'J': 0,
'K': 0
}
ports = pincount.keys()
portmap = {}
# dictionary of all config lines, indexed by first word
config = {}
# list of all pins in config file order
allpins = []
# list of configs by type
bytype = {}
# list of configs by label
bylabel = {}
# list of SPI devices
spidev = []
# SPI bus list
spi_list = []
# all config lines in order
alllines = []
# allow for extra env vars
env_vars = {}
mcu_type = None
def is_int(str):
'''check if a string is an integer'''
try:
int(str)
except Exception:
return False
return True
def error(str):
'''show an error and exit'''
print("Error: " + str)
sys.exit(1)
def get_alt_function(mcu, pin, function):
'''return alternative function number for a pin'''
import importlib
try:
lib = importlib.import_module(mcu)
alt_map = lib.AltFunction_map
except ImportError:
error("Unable to find module for MCU %s" % mcu)
if function and function.endswith("_RTS") and (
function.startswith('USART') or function.startswith('UART')):
# we do software RTS
return None
af_labels = ['USART', 'UART', 'SPI', 'I2C', 'SDIO', 'OTG', 'JT', 'TIM', 'CAN']
for l in af_labels:
if function.startswith(l):
s = pin + ":" + function
if not s in alt_map:
error("Unknown pin function %s for MCU %s" % (s, mcu))
return alt_map[s]
return None
def have_type_prefix(ptype):
'''return True if we have a peripheral starting with the given peripheral type'''
for t in bytype.keys():
if t.startswith(ptype):
return True
return False
def get_ADC1_chan(mcu, pin):
'''return ADC1 channel for an analog pin'''
import importlib
try:
lib = importlib.import_module(mcu)
ADC1_map = lib.ADC1_map
except ImportError:
error("Unable to find ADC1_Map for MCU %s" % mcu)
if not pin in ADC1_map:
error("Unable to find ADC1 channel for pin %s" % pin)
return ADC1_map[pin]
class generic_pin(object):
'''class to hold pin definition'''
def __init__(self, port, pin, label, type, extra):
self.portpin = "P%s%u" % (port, pin)
self.port = port
self.pin = pin
self.label = label
self.type = type
self.extra = extra
self.af = None
def has_extra(self, v):
'''return true if we have the given extra token'''
return v in self.extra
def extra_prefix(self, prefix):
'''find an extra token starting with the given prefix'''
for e in self.extra:
if e.startswith(prefix):
return e
return None
def extra_value(self, name, type=None, default=None):
'''find an extra value of given type'''
v = self.extra_prefix(name)
if v is None:
return default
if v[len(name)] != '(' or v[-1] != ')':
error("Badly formed value for %s: %s\n" % (name, v))
ret = v[len(name) + 1:-1]
if type is not None:
try:
ret = type(ret)
except Exception:
error("Badly formed value for %s: %s\n" % (name, ret))
return ret
def is_RTS(self):
'''return true if this is a RTS pin'''
if self.label and self.label.endswith("_RTS") and (
self.type.startswith('USART') or self.type.startswith('UART')):
return True
return False
def is_CS(self):
'''return true if this is a CS pin'''
return self.has_extra("CS") or self.type == "CS"
def get_MODER(self):
'''return one of ALTERNATE, OUTPUT, ANALOG, INPUT'''
if self.af is not None:
v = "ALTERNATE"
elif self.type == 'OUTPUT':
v = "OUTPUT"
elif self.type.startswith('ADC'):
v = "ANALOG"
elif self.is_CS():
v = "OUTPUT"
elif self.is_RTS():
v = "OUTPUT"
else:
v = "INPUT"
return "PIN_MODE_%s(%uU)" % (v, self.pin)
def get_OTYPER(self):
'''return one of PUSHPULL, OPENDRAIN'''
v = 'PUSHPULL'
if self.type.startswith('I2C'):
# default I2C to OPENDRAIN
v = 'OPENDRAIN'
values = ['PUSHPULL', 'OPENDRAIN']
for e in self.extra:
if e in values:
v = e
return "PIN_OTYPE_%s(%uU)" % (v, self.pin)
def get_OSPEEDR(self):
'''return one of SPEED_VERYLOW, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH'''
# on STM32F4 these speeds correspond to 2MHz, 25MHz, 50MHz and 100MHz
values = ['SPEED_VERYLOW', 'SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in values:
v = e
return "PIN_O%s(%uU)" % (v, self.pin)
def get_PUPDR(self):
'''return one of FLOATING, PULLUP, PULLDOWN'''
values = ['FLOATING', 'PULLUP', 'PULLDOWN']
v = 'FLOATING'
if self.is_CS():
v = "PULLUP"
if (self.type.startswith('USART') or
self.type.startswith('UART')) and (
(self.label.endswith('_TX') or
self.label.endswith('_RX'))):
# default RX/TX lines to pullup, to prevent spurious bytes
# on disconnected ports
v = "PULLUP"
for e in self.extra:
if e in values:
v = e
return "PIN_PUPDR_%s(%uU)" % (v, self.pin)
def get_ODR(self):
'''return one of LOW, HIGH'''
values = ['LOW', 'HIGH']
v = 'HIGH'
for e in self.extra:
if e in values:
v = e
return "PIN_ODR_%s(%uU)" % (v, self.pin)
def get_AFIO(self):
'''return AFIO'''
af = self.af
if af is None:
af = 0
return "PIN_AFIO_AF(%uU, %uU)" % (self.pin, af)
def get_AFRL(self):
'''return AFIO low 8'''
if self.pin >= 8:
return None
return self.get_AFIO()
def get_AFRH(self):
'''return AFIO high 8'''
if self.pin < 8:
return None
return self.get_AFIO()
def __str__(self):
str = ''
if self.af is not None:
str += " AF%u" % self.af
if self.type.startswith('ADC1'):
str += " ADC1_IN%u" % get_ADC1_chan(mcu_type, self.portpin)
if self.extra_value('PWM', type=int):
str += " PWM%u" % self.extra_value('PWM', type=int)
return "P%s%u %s %s%s" % (self.port, self.pin, self.label, self.type,
str)
# setup default as input pins
for port in ports:
portmap[port] = []
for pin in range(pincount[port]):
portmap[port].append(generic_pin(port, pin, None, 'INPUT', []))
def get_config(name, column=0, required=True, default=None, type=None):
'''get a value from config dictionary'''
if not name in config:
if required and default is None:
error("missing required value %s in hwdef.dat" % name)
return default
if len(config[name]) < column + 1:
error("missing required value %s in hwdef.dat (column %u)" % (name,
column))
ret = config[name][column]
if type is not None:
try:
ret = type(ret)
except Exception:
error("Badly formed config value %s (got %s)" % (name, ret))
return ret
def enable_can(f):
'''setup for a CAN enabled board'''
f.write('#define HAL_WITH_UAVCAN 1\n')
env_vars['HAL_WITH_UAVCAN'] = '1'
def write_mcu_config(f):
'''write MCU config defines'''
f.write('// MCU type (ChibiOS define)\n')
f.write('#define %s_MCUCONF\n' % get_config('MCU'))
f.write('#define %s\n\n' % get_config('MCU', 1))
f.write('// crystal frequency\n')
f.write('#define STM32_HSECLK %sU\n\n' % get_config('OSCILLATOR_HZ'))
f.write('// UART used for stdout (printf)\n')
if get_config('STDOUT_SERIAL', required=False):
f.write('#define HAL_STDOUT_SERIAL %s\n\n' % get_config('STDOUT_SERIAL'))
f.write('// baudrate used for stdout (printf)\n')
f.write('#define HAL_STDOUT_BAUDRATE %u\n\n' % get_config('STDOUT_BAUDRATE', type=int))
if 'SDIO' in bytype:
f.write('// SDIO available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_SDC TRUE\n')
env_vars['CHIBIOS_FATFS_FLAG'] = 'USE_FATFS=yes'
else:
f.write('#define HAL_USE_SDC FALSE\n')
env_vars['CHIBIOS_FATFS_FLAG'] = 'USE_FATFS=no'
if 'OTG1' in bytype:
f.write('#define STM32_USB_USE_OTG1 TRUE\n')
f.write('#define HAL_USE_USB TRUE\n')
f.write('#define HAL_USE_SERIAL_USB TRUE\n')
if 'OTG2' in bytype:
f.write('#define STM32_USB_USE_OTG2 TRUE\n')
if have_type_prefix('CAN'):
enable_can(f)
# write any custom STM32 defines
for d in alllines:
if d.startswith('STM32_'):
f.write('#define %s\n' % d)
if d.startswith('define '):
f.write('#define %s\n' % d[7:])
flash_size = get_config('FLASH_SIZE_KB', type=int)
f.write('#define BOARD_FLASH_SIZE %u\n' % flash_size)
f.write('#define CRT1_AREAS_NUMBER 1\n')
if mcu_type in ['STM32F427xx', 'STM32F405xx']:
def_ccm_size = 64
else:
def_ccm_size = None
ccm_size = get_config(
'CCM_RAM_SIZE_KB', default=def_ccm_size, required=False, type=int)
if ccm_size is not None:
f.write('#define CCM_RAM_SIZE %u\n' % ccm_size)
f.write('\n')
def write_ldscript(fname):
'''write ldscript.ld for this board'''
flash_size = get_config('FLASH_SIZE_KB', type=int)
# space to reserve for bootloader and storage at start of flash
flash_reserve_start = get_config(
'FLASH_RESERVE_START_KB', default=16, type=int)
# space to reserve for storage at end of flash
flash_reserve_end = get_config('FLASH_RESERVE_END_KB', default=0, type=int)
# ram size
ram_size = get_config('RAM_SIZE_KB', default=192, type=int)
flash_base = 0x08000000 + flash_reserve_start * 1024
flash_length = flash_size - (flash_reserve_start + flash_reserve_end)
print("Generating ldscript.ld")
f = open(fname, 'w')
f.write('''/* generated ldscript.ld */
MEMORY
{
flash : org = 0x%08x, len = %uK
ram0 : org = 0x20000000, len = %uk
}
INCLUDE ../../libraries/AP_HAL_ChibiOS/hwdef/common/common.ld
''' % (flash_base, flash_length, ram_size))
def write_USB_config(f):
'''write USB config defines'''
if not have_type_prefix('OTG'):
return;
f.write('// USB configuration\n')
f.write('#define HAL_USB_VENDOR_ID %s\n' % get_config('USB_VENDOR', default=0x0483)) # default to ST
f.write('#define HAL_USB_PRODUCT_ID %s\n' % get_config('USB_PRODUCT', default=0x5740))
f.write('#define HAL_USB_STRING_MANUFACTURER "%s"\n' % get_config("USB_STRING_MANUFACTURER", default="ArduPilot"))
f.write('#define HAL_USB_STRING_PRODUCT "%s"\n' % get_config("USB_STRING_PRODUCT", default="%BOARD%"))
f.write('#define HAL_USB_STRING_SERIAL "%s"\n' % get_config("USB_STRING_SERIAL", default="%SERIAL%"))
f.write('\n\n')
def write_SPI_table(f):
'''write SPI device table'''
f.write('\n// SPI device table\n')
devlist = []
for dev in spidev:
if len(dev) != 7:
print("Badly formed SPIDEV line %s" % dev)
name = '"' + dev[0] + '"'
bus = dev[1]
devid = dev[2]
cs = dev[3]
mode = dev[4]
lowspeed = dev[5]
highspeed = dev[6]
if not bus.startswith('SPI') or not bus in spi_list:
error("Bad SPI bus in SPIDEV line %s" % dev)
if not devid.startswith('DEVID') or not is_int(devid[5:]):
error("Bad DEVID in SPIDEV line %s" % dev)
if not cs in bylabel or not bylabel[cs].is_CS():
error("Bad CS pin in SPIDEV line %s" % dev)
if not mode in ['MODE0', 'MODE1', 'MODE2', 'MODE3']:
error("Bad MODE in SPIDEV line %s" % dev)
if not lowspeed.endswith('*MHZ') and not lowspeed.endswith('*KHZ'):
error("Bad lowspeed value %s in SPIDEV line %s" % (lowspeed, dev))
if not highspeed.endswith('*MHZ') and not highspeed.endswith('*KHZ'):
error("Bad highspeed value %s in SPIDEV line %s" % (highspeed,
dev))
cs_pin = bylabel[cs]
pal_line = 'PAL_LINE(GPIO%s,%uU)' % (cs_pin.port, cs_pin.pin)
devidx = len(devlist)
f.write(
'#define HAL_SPI_DEVICE%-2u SPIDesc(%-17s, %2u, %2u, %-19s, SPIDEV_%s, %7s, %7s)\n'
% (devidx, name, spi_list.index(bus), int(devid[5:]), pal_line,
mode, lowspeed, highspeed))
devlist.append('HAL_SPI_DEVICE%u' % devidx)
f.write('#define HAL_SPI_DEVICE_LIST %s\n\n' % ','.join(devlist))
def write_SPI_config(f):
'''write SPI config defines'''
global spi_list
for t in bytype.keys():
if t.startswith('SPI'):
spi_list.append(t)
spi_list = sorted(spi_list)
if len(spi_list) == 0:
f.write('#define HAL_USE_SPI FALSE\n')
return
devlist = []
for dev in spi_list:
n = int(dev[3:])
devlist.append('HAL_SPI%u_CONFIG' % n)
f.write(
'#define HAL_SPI%u_CONFIG { &SPID%u, %u, STM32_SPI_SPI%u_TX_DMA_STREAM, STM32_SPI_SPI%u_RX_DMA_STREAM }\n'
% (n, n, n, n, n))
f.write('#define HAL_SPI_BUS_LIST %s\n\n' % ','.join(devlist))
write_SPI_table(f)
def write_UART_config(f):
'''write UART config defines'''
get_config('UART_ORDER')
uart_list = config['UART_ORDER']
f.write('\n// UART configuration\n')
# write out driver declarations for HAL_ChibOS_Class.cpp
devnames = "ABCDEFGH"
sdev = 0
for dev in uart_list:
idx = uart_list.index(dev)
if dev == 'EMPTY':
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
else:
f.write(
'#define HAL_UART%s_DRIVER ChibiOS::UARTDriver uart%sDriver(%u)\n'
% (devnames[idx], devnames[idx], sdev))
sdev += 1
for idx in range(len(uart_list), 6):
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
if 'IOMCU_UART' in config:
f.write('#define HAL_WITH_IO_MCU 1\n')
idx = len(uart_list)
f.write('#define HAL_UART_IOMCU_IDX %u\n' % idx)
f.write(
'#define HAL_UART_IO_DRIVER ChibiOS::UARTDriver uart_io(HAL_UART_IOMCU_IDX)\n'
)
uart_list.append(config['IOMCU_UART'][0])
else:
f.write('#define HAL_WITH_IO_MCU 0\n')
f.write('\n')
need_uart_driver = False
devlist = []
for dev in uart_list:
if dev.startswith('UART'):
n = int(dev[4:])
elif dev.startswith('USART'):
n = int(dev[5:])
elif dev.startswith('OTG'):
n = int(dev[3:])
elif dev.startswith('EMPTY'):
continue
else:
error("Invalid element %s in UART_ORDER" % dev)
devlist.append('HAL_%s_CONFIG' % dev)
if dev + "_RTS" in bylabel:
p = bylabel[dev + '_RTS']
rts_line = 'PAL_LINE(GPIO%s,%uU)' % (p.port, p.pin)
else:
rts_line = "0"
if dev.startswith('OTG'):
f.write(
'#define HAL_%s_CONFIG {(BaseSequentialStream*) &SDU1, true, false, 0, 0, false, 0, 0}\n'
% dev)
else:
need_uart_driver = True
f.write(
"#define HAL_%s_CONFIG { (BaseSequentialStream*) &SD%u, false, "
% (dev, n))
f.write("STM32_%s_RX_DMA_CONFIG, STM32_%s_TX_DMA_CONFIG, %s}\n" %
(dev, dev, rts_line))
f.write('#define HAL_UART_DEVICE_LIST %s\n\n' % ','.join(devlist))
if not need_uart_driver:
f.write('#define HAL_USE_SERIAL FALSE\n')
def write_I2C_config(f):
'''write I2C config defines'''
if not have_type_prefix('I2C'):
print("No I2C peripherals")
f.write('#define HAL_USE_I2C FALSE\n')
return
if not 'I2C_ORDER' in config:
error("Missing I2C_ORDER config")
i2c_list = config['I2C_ORDER']
f.write('// I2C configuration\n')
if len(i2c_list) == 0:
error("I2C_ORDER invalid")
devlist = []
for dev in i2c_list:
if not dev.startswith('I2C') or dev[3] not in "1234":
error("Bad I2C_ORDER element %s" % dev)
if dev + "_SCL" in bylabel:
p = bylabel[dev + "_SCL"]
f.write(
'#define HAL_%s_SCL_AF %d\n' % (dev, p.af)
)
n = int(dev[3:])
devlist.append('HAL_I2C%u_CONFIG' % n)
f.write(
'#define HAL_I2C%u_CONFIG { &I2CD%u, STM32_I2C_I2C%u_RX_DMA_STREAM, STM32_I2C_I2C%u_TX_DMA_STREAM }\n'
% (n, n, n, n))
f.write('#define HAL_I2C_DEVICE_LIST %s\n\n' % ','.join(devlist))
def write_PWM_config(f):
'''write PWM config defines'''
rc_in = None
alarm = None
pwm_out = []
pwm_timers = []
for l in bylabel.keys():
p = bylabel[l]
if p.type.startswith('TIM'):
if p.has_extra('RCIN'):
rc_in = p
elif p.has_extra('ALARM'):
alarm = p
else:
if p.extra_value('PWM', type=int) is not None:
pwm_out.append(p)
if p.type not in pwm_timers:
pwm_timers.append(p.type)
if not pwm_out:
print("No PWM output defined")
f.write('#define HAL_USE_PWM FALSE\n')
if rc_in is not None:
a = rc_in.label.split('_')
chan_str = a[1][2:]
timer_str = a[0][3:]
if chan_str[-1] == 'N':
# it is an inverted channel
f.write('#define HAL_RCIN_IS_INVERTED\n')
chan_str = chan_str[:-1]
if not is_int(chan_str) or not is_int(timer_str):
error("Bad timer channel %s" % rc_in.label)
if int(chan_str) not in [1, 2]:
error(
"Bad channel number, only channel 1 and 2 supported for RCIN")
n = int(a[0][3:])
dma_chan_str = rc_in.extra_prefix('DMA_CH')[6:]
dma_chan = int(dma_chan_str)
f.write('// RC input config\n')
f.write('#define HAL_USE_ICU TRUE\n')
f.write('#define STM32_ICU_USE_TIM%u TRUE\n' % n)
f.write('#define RCIN_ICU_TIMER ICUD%u\n' % n)
f.write(
'#define RCIN_ICU_CHANNEL ICU_CHANNEL_%u\n' % int(chan_str))
f.write('#define STM32_RCIN_DMA_CHANNEL %u' % dma_chan)
f.write('\n')
if alarm is not None:
a = alarm.label.split('_')
chan_str = a[1][2:]
timer_str = a[0][3:]
if not is_int(chan_str) or not is_int(timer_str):
error("Bad timer channel %s" % alarm.label)
n = int(timer_str)
f.write('\n')
f.write('// Alarm PWM output config\n')
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
chan = int(chan_str)
if chan not in [1, 2, 3, 4]:
error("Bad channel number %u for ALARM PWM %s" % (chan, p))
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
pwm_clock = 1000000
period = 1000
f.write('''#define HAL_PWM_ALARM \\
{ /* pwmGroup */ \\
%u, /* Timer channel */ \\
{ /* PWMConfig */ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ /* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, \\
0, 0 \\
}, \\
&PWMD%u /* PWMDriver* */ \\
}\n''' %
(chan-1, pwm_clock, period, chan_mode[0],
chan_mode[1], chan_mode[2], chan_mode[3], n))
else:
f.write('\n')
f.write('// No Alarm output pin defined\n')
f.write('#undef HAL_PWM_ALARM\n')
f.write('\n')
f.write('// PWM timer config\n')
for t in sorted(pwm_timers):
n = int(t[3])
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
f.write('\n')
f.write('// PWM output config\n')
groups = []
for t in sorted(pwm_timers):
group = len(groups) + 1
n = int(t[3])
chan_list = [255, 255, 255, 255]
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
for p in pwm_out:
if p.type != t:
continue
chan_str = p.label[7]
if not is_int(chan_str):
error("Bad channel for PWM %s" % p)
chan = int(chan_str)
if chan not in [1, 2, 3, 4]:
error("Bad channel number %u for PWM %s" % (chan, p))
pwm = p.extra_value('PWM', type=int)
chan_list[chan - 1] = pwm - 1
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
groups.append('HAL_PWM_GROUP%u' % group)
if n in [1, 8]:
# only the advanced timers do 8MHz clocks
advanced_timer = 'true'
else:
advanced_timer = 'false'
pwm_clock = 1000000
period = 20000 * pwm_clock / 1000000
f.write('''#define HAL_PWM_GROUP%u { %s, \\
{%u, %u, %u, %u}, \\
/* Group Initial Config */ \\
{ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ \\
/* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, 0, 0}, &PWMD%u}\n''' %
(group, advanced_timer, chan_list[0], chan_list[1],
chan_list[2], chan_list[3], pwm_clock, period, chan_mode[0],
chan_mode[1], chan_mode[2], chan_mode[3], n))
f.write('#define HAL_PWM_GROUPS %s\n\n' % ','.join(groups))
def write_ADC_config(f):
'''write ADC config defines'''
f.write('// ADC config\n')
adc_chans = []
for l in bylabel:
p = bylabel[l]
if not p.type.startswith('ADC'):
continue
chan = get_ADC1_chan(mcu_type, p.portpin)
scale = p.extra_value('SCALE', default=None)
if p.label == 'VDD_5V_SENS':
f.write('#define ANALOG_VCC_5V_PIN %u\n' % chan)
adc_chans.append((chan, scale, p.label, p.portpin))
adc_chans = sorted(adc_chans)
vdd = get_config('STM32_VDD')
if vdd[-1] == 'U':
vdd = vdd[:-1]
vdd = float(vdd) * 0.01
f.write('#define HAL_ANALOG_PINS { \\\n')
for (chan, scale, label, portpin) in adc_chans:
scale_str = '%.2f/4096' % vdd
if scale is not None and scale != '1':
scale_str = scale + '*' + scale_str
f.write('{ %2u, %12s }, /* %s %s */ \\\n' % (chan, scale_str, portpin,
label))
f.write('}\n\n')
def write_GPIO_config(f):
'''write GPIO config defines'''
f.write('// GPIO config\n')
gpios = []
for l in bylabel:
p = bylabel[l]
gpio = p.extra_value('GPIO', type=int)
if gpio is None:
continue
# see if it is also a PWM pin
pwm = p.extra_value('PWM', type=int, default=0)
port = p.port
pin = p.pin
gpios.append((gpio, pwm, port, pin, p))
gpios = sorted(gpios)
f.write('#define HAL_GPIO_PINS { \\\n')
for (gpio, pwm, port, pin, p) in gpios:
f.write('{ %3u, true, %2u, PAL_LINE(GPIO%s, %2uU) }, /* %s */ \\\n' %
(gpio, pwm, port, pin, p))
# and write #defines for use by config code
f.write('}\n\n')
f.write('// full pin define list\n')
for l in sorted(bylabel.keys()):
p = bylabel[l]
label = p.label
label = label.replace('-', '_')
f.write('#define HAL_GPIO_PIN_%-20s PAL_LINE(GPIO%s,%uU)\n' %
(label, p.port, p.pin))
f.write('\n')
def write_prototype_file():
'''write the prototype file for apj generation'''
pf = open(os.path.join(outdir, "apj.prototype"), "w")
pf.write('''{
"board_id": %s,
"magic": "PX4FWv1",
"description": "Firmware for the %s board",
"image": "",
"build_time": 0,
"summary": "PX4FMUv3",
"version": "0.1",
"image_size": 0,
"git_identity": "",
"board_revision": 0
}
''' % (get_config('APJ_BOARD_ID'),
get_config('APJ_BOARD_TYPE', default=mcu_type)))
def write_peripheral_enable(f):
'''write peripheral enable lines'''
f.write('// peripherals enabled\n')
for type in sorted(bytype.keys()):
if type.startswith('USART') or type.startswith('UART'):
f.write('#define STM32_SERIAL_USE_%-6s TRUE\n' % type)
if type.startswith('SPI'):
f.write('#define STM32_SPI_USE_%s TRUE\n' % type)
if type.startswith('OTG'):
f.write('#define STM32_USB_USE_%s TRUE\n' % type)
if type.startswith('I2C'):
f.write('#define STM32_I2C_USE_%s TRUE\n' % type)
def get_dma_exclude(periph_list):
'''return list of DMA devices to exclude from DMA'''
dma_exclude = []
for periph in periph_list:
if periph not in bylabel:
continue
p = bylabel[periph]
if p.has_extra('NODMA'):
dma_exclude.append(periph)
return dma_exclude
def write_hwdef_header(outfilename):
'''write hwdef header file'''
print("Writing hwdef setup in %s" % outfilename)
f = open(outfilename, 'w')
f.write('''/*
generated hardware definitions from hwdef.dat - DO NOT EDIT
*/
#pragma once
''')
write_mcu_config(f)
write_USB_config(f)
write_I2C_config(f)
write_SPI_config(f)
write_PWM_config(f)
write_ADC_config(f)
write_GPIO_config(f)
write_peripheral_enable(f)
write_prototype_file()
dma_resolver.write_dma_header(f, periph_list, mcu_type,
dma_exclude=get_dma_exclude(periph_list),
dma_priority=get_config('DMA_PRIORITY',default=''),
dma_noshare=get_config('DMA_NOSHARE',default=''))
write_UART_config(f)
f.write('''
/*
* I/O ports initial setup, this configuration is established soon after reset
* in the initialization code.
* Please refer to the STM32 Reference Manual for details.
*/
#define PIN_MODE_INPUT(n) (0U << ((n) * 2U))
#define PIN_MODE_OUTPUT(n) (1U << ((n) * 2U))
#define PIN_MODE_ALTERNATE(n) (2U << ((n) * 2U))
#define PIN_MODE_ANALOG(n) (3U << ((n) * 2U))
#define PIN_ODR_LOW(n) (0U << (n))
#define PIN_ODR_HIGH(n) (1U << (n))
#define PIN_OTYPE_PUSHPULL(n) (0U << (n))
#define PIN_OTYPE_OPENDRAIN(n) (1U << (n))
#define PIN_OSPEED_VERYLOW(n) (0U << ((n) * 2U))
#define PIN_OSPEED_LOW(n) (1U << ((n) * 2U))
#define PIN_OSPEED_MEDIUM(n) (2U << ((n) * 2U))
#define PIN_OSPEED_HIGH(n) (3U << ((n) * 2U))
#define PIN_PUPDR_FLOATING(n) (0U << ((n) * 2U))
#define PIN_PUPDR_PULLUP(n) (1U << ((n) * 2U))
#define PIN_PUPDR_PULLDOWN(n) (2U << ((n) * 2U))
#define PIN_AFIO_AF(n, v) ((v) << (((n) % 8U) * 4U))
''')
for port in sorted(ports):
f.write("/* PORT%s:\n" % port)
for pin in range(pincount[port]):
p = portmap[port][pin]
if p.label is not None:
f.write(" %s\n" % p)
f.write("*/\n\n")
if pincount[port] == 0:
# handle blank ports
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s 0x0\n" % (port,
vtype))
f.write("\n\n\n")
continue
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s (" % (p.port, vtype))
first = True
for pin in range(pincount[port]):
p = portmap[port][pin]
modefunc = getattr(p, "get_" + vtype)
v = modefunc()
if v is None:
continue
if not first:
f.write(" | \\\n ")
f.write(v)
first = False
if first:
# there were no pin definitions, use 0
f.write("0")
f.write(")\n\n")
def build_peripheral_list():
'''build a list of peripherals for DMA resolver to work on'''
peripherals = []
done = set()
prefixes = ['SPI', 'USART', 'UART', 'I2C']
for p in allpins:
type = p.type
if type in done:
continue
for prefix in prefixes:
if type.startswith(prefix):
peripherals.append(type + "_TX")
peripherals.append(type + "_RX")
if type.startswith('ADC'):
peripherals.append(type)
if type.startswith('SDIO'):
peripherals.append(type)
if type.startswith('TIM') and p.has_extra('RCIN'):
label = p.label
if label[-1] == 'N':
label = label[:-1]
peripherals.append(label)
done.add(type)
return peripherals
def process_line(line):
'''process one line of pin definition file'''
global allpins
a = shlex.split(line)
# keep all config lines for later use
alllines.append(line)
if a[0].startswith('P') and a[0][1] in ports and a[0] in config:
print("WARNING: Pin %s redefined" % a[0])
config[a[0]] = a[1:]
if a[0] == 'MCU':
global mcu_type
mcu_type = a[2]
if a[0].startswith('P') and a[0][1] in ports:
# it is a port/pin definition
try:
port = a[0][1]
pin = int(a[0][2:])
label = a[1]
type = a[2]
extra = a[3:]
except Exception:
error("Bad pin line: %s" % a)
return
p = generic_pin(port, pin, label, type, extra)
portmap[port][pin] = p
allpins.append(p)
if not type in bytype:
bytype[type] = []
bytype[type].append(p)
bylabel[label] = p
af = get_alt_function(mcu_type, a[0], label)
if af is not None:
p.af = af
if a[0] == 'SPIDEV':
spidev.append(a[1:])
if a[0] == 'undef':
print("Removing %s" % a[1])
config.pop(a[1], '')
bytype.pop(a[1],'')
bylabel.pop(a[1],'')
#also remove all occurences of defines in previous lines if any
for line in alllines[:]:
if line.startswith('define') and a[1] in line:
alllines.remove(line)
newpins = []
for pin in allpins:
if pin.type == a[1]:
continue
if pin.label == a[1]:
continue
if pin.portpin == a[1]:
continue
newpins.append(pin)
allpins = newpins
if a[0] == 'env':
print("Adding environment %s" % ' '.join(a[1:]))
if len(a[1:]) < 2:
error("Bad env line for %s" % a[0])
env_vars[a[1]] = ' '.join(a[2:])
def process_file(filename):
'''process a hwdef.dat file'''
try:
f = open(filename, "r")
except Exception:
error("Unable to open file %s" % filename)
for line in f.readlines():
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
a = shlex.split(line)
if a[0] == "include" and len(a) > 1:
include_file = a[1]
if include_file[0] != '/':
dir = os.path.dirname(filename)
include_file = os.path.normpath(
os.path.join(dir, include_file))
print("Including %s" % include_file)
process_file(include_file)
else:
process_line(line)
# process input file
process_file(args.hwdef)
outdir = args.outdir
if outdir is None:
outdir = '/tmp'
if not "MCU" in config:
error("Missing MCU type in config")
mcu_type = get_config('MCU', 1)
print("Setup for MCU %s" % mcu_type)
# build a list for peripherals for DMA resolver
periph_list = build_peripheral_list()
# write out hwdef.h
write_hwdef_header(os.path.join(outdir, "hwdef.h"))
# write out ldscript.ld
write_ldscript(os.path.join(outdir, "ldscript.ld"))
# write out env.py
pickle.dump(env_vars, open(os.path.join(outdir, "env.py"), "w"))
| UrusTeam/URUS | libraries/AP_HAL_ChibiOS/hwdef/scripts/chibios_hwdef.py | Python | gpl-3.0 | 34,026 | [
"CRYSTAL"
] | c193017a99d4219f209065a26d9545d7babe5a111474bdb9f9ba75d60d727056 |
import numpy as np
import uncertainties.unumpy as unp
from uncertainties import ufloat
import math
import latex
from pandas import Series, DataFrame
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from pint import UnitRegistry
import scipy.constants as const
u = UnitRegistry()
Q_ = u.Quantity
h = Q_(const.h, 'joule*second')
e_0 = Q_(const.elementary_charge, 'coulomb')
m_0 = Q_(const.m_e, 'kilogram')
#Abmessungen der Proben
d_zink = Q_(1.85 - 1.70, 'millimeter')
d_kupfer = Q_(18e-6, 'meter').to('millimeter')
l_zink = Q_(4.3, 'centimeter')
l_kupfer = Q_(2.805, 'centimeter')
b_kupfer = Q_(2.53, 'centimeter')
b_zink = Q_(2.55, 'centimeter')
def E_fermi(n):
return ( h**2 /(2 * m_0) * ( ( (3 * n) / (8 * np.pi) ) **2 ) **(1/3) ).to('eV')
#lineare Funktion für Fits
def F_1(x, m, b):
return m * x + b
I_eich_steigend, B_eich_steigend = np.genfromtxt('flussdichte_steigend.txt', unpack=True)
I_eich_fallend, B_eich_fallend = np.genfromtxt('flussdichte_fallend.txt', unpack=True)
params_1, covariance_1 = curve_fit(F_1, I_eich_steigend, B_eich_steigend, sigma=0.1)
errors_B = np.sqrt(np.diag(covariance_1))
params_B = unp.uarray(params_1, errors_B)
print('Gerade B(I) Paramter: ', params_B)
I_lim = np.linspace(-0.2, 5.2, 100)
plt.plot(I_eich_steigend, B_eich_steigend, 'bx', label='Messwerte steigender Strom')
plt.plot(I_eich_fallend, B_eich_fallend, 'rx', label='Messwerte fallender Strom')
plt.plot(I_lim, F_1(I_lim, *params_1), '-b', label='Lineare Regression')
plt.xlim(I_eich_steigend[0], I_eich_steigend[-1])
plt.xlabel('$I$ in $A$')
plt.ylabel('$B$ in $mT$')
plt.ylim(-1500, 500)
plt.xlim(I_lim[0], I_lim[-1])
plt.grid()
plt.legend(loc='best')
plt.savefig('hysterese.pdf')
latex.Latexdocument('hysterese_tab.tex').tabular([I_eich_steigend, B_eich_steigend, B_eich_fallend[::-1] ],
'{$I_q$ in $\si{\\ampere}$} & {$B_{wachsend}$ in $\si{\milli \\tesla}$} & {$B_{fallend}$ in $\si{\milli \\tesla}$}', [1, 1, 1] ,
caption = 'Messung des magnetischen Feldes bei fallendem und steigendem Strom', label = 'tab: hysterese')
def B(I):
return Q_(params_1[0] * I + params_1[1], 'millitesla')
#Bestimmung Widerstand
I_zink_raw, U_zink_raw = np.genfromtxt('uri_zink.txt', unpack=True)
I_zink = Q_(I_zink_raw, 'ampere')
U_zink = Q_(U_zink_raw, 'millivolt')
params_zink_R, cov_zink_R = curve_fit(F_1, I_zink.magnitude, U_zink.magnitude, sigma=0.1)
plt.clf()
I_lim = np.linspace(-0.2, 8.2, 100)
plt.plot(I_zink.magnitude, U_zink.magnitude, 'rx', label='Messwerte')
plt.plot(I_lim, F_1(I_lim, *params_zink_R), '-b', label='Lineare Regression')
plt.xlim(I_zink.magnitude[0], I_zink.magnitude[-1])
plt.ylim(-1, 80)
plt.xlabel('$I$ in $A$')
plt.ylabel('$U$ in $mV$')
plt.xlim(I_lim[0], I_lim[-1])
plt.grid()
plt.legend(loc='best')
plt.savefig('uri_zink.pdf')
R_zink_errors = np.sqrt(np.diag(cov_zink_R))
R_zink = Q_(ufloat(params_zink_R[0], R_zink_errors[0]), 'millivolt/ampere').to('milliohm')
print('R_zink = ', R_zink)
latex.Latexdocument('uri_zink_tab.tex').tabular([I_zink_raw, U_zink_raw],
'{$I$ in $\si{\\ampere}$} & {$U$ in $\si{\\volt}$} ', [1, 1],
caption = 'Zinkprobe: Messung der Spannung in Abhängigkeit vom Strom ', label = 'tab: uri_zink')
I_kupfer_raw, U_kupfer_raw = np.genfromtxt('uri_kupfer.txt', unpack=True)
I_kupfer = Q_(I_kupfer_raw, 'ampere')
U_kupfer = Q_(U_kupfer_raw, 'millivolt')
params_kupfer_R, cov_kupfer_R = curve_fit(F_1, I_kupfer.magnitude, U_kupfer.magnitude, sigma=0.1)
plt.clf()
I_lim = np.linspace(-0.2, 10.2, 100)
plt.plot(I_kupfer.magnitude, U_kupfer.magnitude, 'rx', label='Messwerte')
plt.plot(I_lim, F_1(I_lim, *params_kupfer_R), '-b', label='Lineare Regression')
plt.xlim(I_kupfer.magnitude[0], I_kupfer.magnitude[-1])
plt.xlabel('$I$ in $A$')
plt.ylabel('$U$ in $mV$')
plt.xlim(I_lim[0], I_lim[-1])
plt.grid()
plt.legend(loc='best')
plt.savefig('uri_kupfer.pdf')
R_kupfer_errors = np.sqrt(np.diag(cov_kupfer_R))
R_kupfer = Q_(ufloat(params_kupfer_R[0], R_kupfer_errors[0]), 'millivolt/ampere').to('milliohm')
print('R_kupfer = ', R_kupfer)
latex.Latexdocument('uri_kupfer_tab.tex').tabular([I_kupfer_raw, U_kupfer_raw],
'{$I$ in $\si{\\ampere}$} & {$U$ in $\si{\\volt}$} ', [1, 1],
caption = 'Kupferprobe: Messung der Spannung in Abhängigkeit vom Strom ', label = 'tab: uri_kupfer')
#Hallspannung Kupfer
#konst. B_Feld bei I_q = 3A
B_konst_kupfer = B(3)
U_ges_min_kupfer_konstB, U_ges_plu_kupfer_konstB = np.genfromtxt('u_h_konstB_kupfer.txt', unpack=True)
U_h_kupfer_konstB = Q_(0.5 * (U_ges_plu_kupfer_konstB - U_ges_min_kupfer_konstB), 'millivolt')
I = np.linspace(0, 10 , 11)
latex.Latexdocument('u_h_kupfer_konstB_tab.tex').tabular([I, U_ges_min_kupfer_konstB, U_ges_plu_kupfer_konstB, U_h_kupfer_konstB.magnitude],
'{$I$ in $\si{\\ampere}$} & {$U_{ges-}$ in $\si{\milli \\volt}$} & {$U_{ges+}$ in $\si{\milli \\volt}$} & {$U_{H}$ in $\si{\milli \\volt}$}', [1, 3, 3, 3],
caption = 'Hallspannung Kupfer bei konstantem Magnetfeld', label = 'tab: hall_kupfer_konstB')
params_kupfer_U_h_1, cov_kupfer_U_h_1 = curve_fit(F_1, I, U_h_kupfer_konstB.magnitude, sigma=0.1)
plt.clf()
I_lim = np.linspace(-0.2, 10.2, 100)
plt.plot(I, U_h_kupfer_konstB, 'rx', label='Messwerte')
plt.plot(I_lim, F_1(I_lim, *params_kupfer_U_h_1), '-b', label='Lineare Regression')
plt.xlim(I_lim[0], I_lim[-1])
plt.xlabel('$I$ in $A$')
plt.ylabel('$U_H$ in $mV$')
plt.grid()
plt.legend(loc='best')
plt.savefig('u_h_kupfer_konstB.pdf')
Steigung_U_h_kupfer_konstB_errors = np.sqrt(np.diag(cov_kupfer_U_h_1))
print(params_kupfer_U_h_1[0])
Steigung_U_h_kupfer_konstB = Q_(ufloat(params_kupfer_U_h_1[0], Steigung_U_h_kupfer_konstB_errors[0]), 'millivolt/ampere')
print('Steigung der Hall Spannung, Kupfer, konst BFeld: ', Steigung_U_h_kupfer_konstB.to('millivolt/ampere'))
n_kupfer_konstB = (- 1/(Steigung_U_h_kupfer_konstB * e_0 * d_kupfer) * B_konst_kupfer).to('1/meter**3')
print('n_kupfer_konstB', n_kupfer_konstB)
print('Fermienergie Kupfer: ', E_fermi(n_kupfer_konstB))
B_konst_zink = B(3)
U_ges_min_zink_konstB, U_ges_plu_zink_konstB = np.genfromtxt('u_h_konstB_zink.txt', unpack=True)
U_h_zink_konstB = Q_(0.5 * (U_ges_plu_zink_konstB - U_ges_min_zink_konstB), 'millivolt')
I = np.linspace(0, 10 , 11)
with open('u_h_zink_konstB_tab.tex', 'w') as f:
f.write('\\begin{table} \n \\centering \n \\caption{Hallspannung Zink bei konstantem Magnetfeld} \n \\label{tab: hall_zink_konstB} \n\\begin{tabular}{')
f.write(4 *'S ')
f.write('} \n \\toprule \n')
f.write(' {$I$ in $\si{\\ampere}$} & {$U_{ges-}$ in $\si{\milli \\volt}$} & {$U_{ges+}$ in $\si{\milli \\volt}$} & {$U_{H}$ in $\si{\milli \\volt}$} \\\ \n')
f.write('\\midrule \n ')
for i in range (0,len(I)):
f.write('{:.1f} & {:.3f} & {:.3f} & {:.3f} \\\ \n'.format(I[i], U_ges_min_zink_konstB[i], U_ges_plu_zink_konstB[i], U_h_zink_konstB[i].magnitude))
f.write('\\bottomrule \n \\end{tabular} \n \\end{table}')
params_zink_U_h_1, cov_zink_U_h_1 = curve_fit(F_1, I, U_h_zink_konstB.magnitude, sigma=0.1)
plt.clf()
I_lim = np.linspace(-0.2, 10.2, 100)
plt.plot(I, U_h_zink_konstB, 'rx', label='Messwerte')
plt.plot(I_lim, F_1(I_lim, *params_zink_U_h_1), '-b', label='Lineare Regression')
plt.xlim(I_lim[0], I_lim[-1])
plt.xlabel('$I$ in $A$')
plt.ylabel('$U_H$ in $mV$')
plt.grid()
plt.legend(loc='best')
plt.savefig('u_h_zink_konstB.pdf')
Steigung_U_h_zink_konstB_errors = np.sqrt(np.diag(cov_zink_U_h_1))
Steigung_U_h_zink_konstB = Q_(ufloat(params_zink_U_h_1[0], Steigung_U_h_zink_konstB_errors[0]), 'millivolt/ampere')
print('Steigung der Hall Spannung, Zink, konst BFeld: ', Steigung_U_h_zink_konstB.to('millivolt/ampere'))
n_zink_konstB = (1/(Steigung_U_h_zink_konstB * e_0 * d_zink) * B_konst_zink).to('1/meter**3')
print('n_zink_konstB', n_zink_konstB)
print('Fermienergie Zink: ', E_fermi(n_zink_konstB))
#Berechnung UH variables B Feld
konstI = Q_(10, 'ampere')
I_konstI, U_ges_plu_zink_konstI, U_ges_min_zink_konstI = np.genfromtxt('u_h_konstI_zink.txt', unpack=True)
U_h_zink_konstI = Q_(0.5 * (U_ges_plu_zink_konstI - U_ges_min_zink_konstI), 'millivolt')
with open('u_h_zink_konstI_tab.tex', 'w') as f:
f.write('\\begin{table} \n \\centering \n\\caption{Hallspannung Zink bei konstantem Querstrom} \n \\label{tab: hall_zink_konstI} \n \\begin{tabular}{')
f.write(5 *'S ')
f.write('} \n \\toprule \n')
f.write(' {$I$ in $\si{\\ampere}$} & {$B$ in $\si{\milli\\tesla}$} & {$U_{ges-}$ in $\si{\milli \\volt}$} & {$U_{ges+}$ in $\si{\milli \\volt}$} & {$U_{H}$ in $\si{\milli \\volt}$} \\\ \n')
f.write('\\midrule \n ')
for i in range (0,len(I_konstI)):
f.write('{:.1f} & {:.1f} & {:.3f} & {:.3f} & {:.3f} \\\ \n'.format(I_konstI[i],B(I_konstI[i]).magnitude ,U_ges_min_zink_konstI[i], U_ges_plu_zink_konstI[i], U_h_zink_konstI[i].magnitude))
f.write('\\bottomrule \n \\end{tabular} \n \\end{table}')
B_konstI_zink = B(I_konstI).magnitude
params_zink_U_h_2, cov_zink_U_h_2 = curve_fit(F_1, B_konstI_zink[:-1], U_h_zink_konstI.magnitude[:-1], sigma=0.1)
plt.clf()
B_lim = np.linspace(B_konstI_zink[0]+10, B_konstI_zink[-1]-10, 100)
plt.plot(B_konstI_zink, U_h_zink_konstI.magnitude, 'rx', label='Messwerte')
plt.plot(B_lim, F_1(B_lim, *params_zink_U_h_2), '-b', label='Lineare Regression')
plt.xlabel('$B$ in $mT$')
plt.ylabel('$U_H$ in $mV$')
plt.xlim(B_konstI_zink[-1]-10, B_konstI_zink[0]+10)
plt.grid()
plt.legend(loc ='best')
plt.savefig('u_h_zink_konstI.pdf')
Steigung_U_h_zink_konstI_errors = np.sqrt(np.diag(cov_zink_U_h_2))
Steigung_U_h_zink_konstI = Q_(ufloat(params_zink_U_h_2[0], Steigung_U_h_zink_konstI_errors[0]), 'volt/tesla')
print('Steigung der Hall Spannung, Zink, konst Strom: ', Steigung_U_h_zink_konstI.to('volt/tesla'))
n_zink_konstI = (1/(Steigung_U_h_zink_konstI * e_0 * d_zink) * konstI).to('1/meter**3')
print('n_zink_konstI', n_zink_konstI)
konstI = Q_(10, 'ampere')
I_konstI, U_ges_plu_kupfer_konstI, U_ges_min_kupfer_konstI = np.genfromtxt('u_h_konstI_kupfer.txt', unpack=True)
U_h_kupfer_konstI = Q_(0.5 * (U_ges_plu_kupfer_konstI - U_ges_min_kupfer_konstI), 'millivolt')
with open('u_h_kupfer_konstI_tab.tex', 'w') as f:
f.write('\\begin{table} \n \\centering \n\\caption{Hallspannung Kupfer bei konstantem Querstrom} \n \\label{tab: hall_kupfer_konstI} \n \\begin{tabular}{')
f.write(5 *'S ')
f.write('} \n \\toprule \n')
f.write(' {$I$ in $\si{\\ampere}$} & {$B$ in $\si{\milli\\tesla}$} & {$U_{ges-}$ in $\si{\milli \\volt}$} & {$U_{ges+}$ in $\si{\milli \\volt}$} & {$U_{H}$ in $\si{\milli \\volt}$} \\\ \n')
f.write('\\midrule \n ')
for i in range (0, len(I_konstI)):
f.write('{:.1f} & {:.1f} & {:.3f} & {:.3f} & {:.3f} \\\ \n'.format(I_konstI[i] ,B(I_konstI[i]).magnitude ,U_ges_min_kupfer_konstI[i], U_ges_plu_kupfer_konstI[i], U_h_kupfer_konstI[i].magnitude))
f.write('\\bottomrule \n \\end{tabular} \n \\end{table}')
B_konstI_kupfer = B(I_konstI).magnitude
params_kupfer_U_h_2, cov_kupfer_U_h_2 = curve_fit(F_1, B_konstI_kupfer[1:], U_h_kupfer_konstI.magnitude[1:], sigma=0.1)
plt.clf()
B_lim = np.linspace(B_konstI_kupfer[0]+10, B_konstI_kupfer[-1]-10, 100)
plt.plot(B_konstI_kupfer, U_h_kupfer_konstI.magnitude, 'rx', label='Messwerte')
plt.plot(B_lim, F_1(B_lim, *params_kupfer_U_h_2), '-b', label='Lineare Regression')
plt.xlabel('$B$ in $mT$')
plt.ylabel('$U_H$ in $mV$')
plt.xlim(B_konstI_kupfer[-1]-10, B_konstI_kupfer[0]+10)
plt.grid()
plt.legend(loc ='best')
plt.savefig('u_h_kupfer_konstI.pdf')
Steigung_U_h_kupfer_konstI_errors = np.sqrt(np.diag(cov_kupfer_U_h_2))
Steigung_U_h_kupfer_konstI = Q_(ufloat(params_kupfer_U_h_2[0], Steigung_U_h_kupfer_konstI_errors[0]), 'volt/tesla')
print('Steigung der Hall Spannung, Kupfer, konst Strom: ', Steigung_U_h_kupfer_konstI.to('volt/tesla'))
n_kupfer_konstI = (- 1/(Steigung_U_h_kupfer_konstI * e_0 * d_kupfer) * konstI).to('1/meter**3')
print('n_kupfer_konstI', n_kupfer_konstI)
#Berechnungen weiterer Größen
rho_kupfer = Q_(8.96, 'gram/(cm)^3').to('kilogram/m^3')
rho_zink = Q_(7.14, 'gram/(cm)^3').to('kilogram/m^3')
molmass_kupfer = Q_(63.5, 'gram/mol').to('kilogram/mol')
molmass_zink = Q_(65.4, 'gram/mol').to('kilogram/mol')
molvol_kupfer = molmass_kupfer/rho_kupfer
molvol_zink = molmass_zink/rho_zink
vol = Q_(1, 'meter^3')
print(molvol_zink, molvol_kupfer)
n_cube_kupfer = vol/molvol_kupfer
n_cube_zink = vol/molvol_zink
print(n_cube_zink, n_cube_kupfer)
z_kupfer_konstI = (n_kupfer_konstI*(molvol_kupfer / Q_(const.Avogadro, '1/mole')))
print('z_kupfer_konstI: ', z_kupfer_konstI)
z_kupfer_konstB = (n_kupfer_konstB*(molvol_kupfer / Q_(const.Avogadro, '1/mole')))
print('z_kupfer_konstB: ', z_kupfer_konstB)
z_zink_konstI = (n_zink_konstI*(molvol_zink / Q_(const.Avogadro, '1/mole')))
print('z_zink_konstI: ', z_zink_konstI)
z_zink_konstB = (n_zink_konstB*(molvol_zink / Q_(const.Avogadro, '1/mole')))
print('z_zink_konstB: ', z_zink_konstB)
#spezifische Leitfähigkeit
R_spez_kupfer = (R_kupfer * b_kupfer * d_kupfer) / l_kupfer
print('spezifischer Widerstand Kupfer: ', R_spez_kupfer.to('ohm * millimeter^2 / meter'))
R_spez_zink = (R_zink * b_zink * d_zink) / l_zink
print('spezifischer Widerstand Zink: ',R_spez_zink.to('ohm * millimeter^2 / meter'))
tau_kupfer1 = (2 * m_0) / (n_kupfer_konstB * R_spez_kupfer * e_0**2)
print(tau_kupfer1.to('second'))
tau_kupfer2 = (2 * m_0) / (n_kupfer_konstI * R_spez_kupfer * e_0**2)
print(tau_kupfer2.to('second'))
tau_zink1 = (2 * m_0) / (n_zink_konstB * R_spez_zink * e_0**2)
print(tau_zink1.to('second'))
tau_zink2 = (2 * m_0) / (n_zink_konstI * R_spez_zink * e_0**2)
print(tau_zink2.to('second'))
j = Q_(1, 'ampere/(millimeter)^2')
v_d_kupfer1 = j / (n_kupfer_konstB * e_0)
print('v_d_kupfer1: ', v_d_kupfer1.to('millimeter/second'))
v_d_kupfer2 = j / (n_kupfer_konstI * e_0)
print('v_d_kupfer2: ', v_d_kupfer2.to('millimeter/second'))
v_d_zink1 = j / (n_zink_konstB * e_0)
print('v_d_zink1: ', v_d_zink1.to('millimeter/second'))
v_d_zink2 = j / (n_zink_konstI * e_0)
print('v_d_zink2: ', v_d_zink2.to('millimeter/second'))
print('E_fermi_kupfer1:', E_fermi(n_kupfer_konstB) )
print('E_fermi_kupfer2:', E_fermi(n_kupfer_konstI) )
print('E_fermi_zink1:', E_fermi(n_zink_konstB) )
print('E_fermi_zink2:', E_fermi(n_zink_konstI) )
print('vt1_kupfer: ', ((2 * E_fermi(n_kupfer_konstB) / m_0)**0.5).to('meter/second'))
print('vt2_kupfer: ', ((2 * E_fermi(n_kupfer_konstI) / m_0)**0.5).to('meter/second'))
print('vt1_zink: ', ((2 * E_fermi(n_zink_konstB) / m_0)**0.5 ).to('meter/second'))
print('vt2_zink: ', ((2 * E_fermi(n_zink_konstI) / m_0)**0.5 ).to('meter/second'))
print('l1_kupfer: ', (tau_kupfer1 * (2 * E_fermi(n_kupfer_konstB) / m_0)**0.5).to('micrometer'))
print('l2_kupfer: ', (tau_kupfer2 * (2 * E_fermi(n_kupfer_konstI) / m_0)**0.5).to('micrometer'))
print('l1_zink: ', (tau_zink1 * (2 * E_fermi(n_zink_konstB) / m_0)**0.5 ).to('micrometer'))
print('l2_zink: ', (tau_zink2 * (2 * E_fermi(n_zink_konstI) / m_0)**0.5 ).to('micrometer'))
print('mu_kupfer1:', (0.5 * tau_kupfer1* e_0/m_0).to('meter^2 / (volt * second)') )
print('mu_kupfer2:', (0.5 * tau_kupfer2* e_0/m_0).to('meter^2 / (volt * second)') )
print('mu_zink1:', (0.5 * tau_zink1* e_0/m_0).to('meter^2 / (volt * second)') )
print('mu_zink2:', (0.5 * tau_zink2* e_0/m_0).to('meter^2 / (volt * second)') )
R_spez_kupfer_lit = Q_(0.017e-06, ' ohm * meter').to('ohm * millimeter^2 / meter')
print ('Literaturwert spezifischer Widerstand Kupfer', R_spez_kupfer_lit)
print('Prozentuale Abweichung: ', R_spez_kupfer/R_spez_kupfer_lit - 1)
R_spez_zink_lit = Q_(0.059e-06, ' ohm * meter').to('ohm * millimeter^2 / meter')
print ('Literaturwert spezifischer Widerstand Zink', R_spez_zink_lit)
print('Prozentuale Abweichung: ', R_spez_zink/R_spez_zink_lit - 1)
| stefangri/s_s_productions | PHY341/V311_Halleffekt/Messdaten/auswertung.py | Python | mit | 15,668 | [
"Avogadro"
] | ac1a2a32f613c9e05334aa4ef36068a1eec20f58f1b33187b27f31ebd8f82b22 |
"""Utilities for registering objects.
This module contains utility functions to register Python objects as
valid COM Servers. The RegisterServer function provides all information
necessary to allow the COM framework to respond to a request for a COM object,
construct the necessary Python object, and dispatch COM events.
"""
import sys
import win32api
import win32con
import pythoncom
import winerror
import os
CATID_PythonCOMServer = "{B3EF80D0-68E2-11D0-A689-00C04FD658FF}"
def _set_subkeys(keyName, valueDict, base=win32con.HKEY_CLASSES_ROOT):
hkey = win32api.RegCreateKey(base, keyName)
try:
for key, value in valueDict.items():
win32api.RegSetValueEx(hkey, key, None, win32con.REG_SZ, value)
finally:
win32api.RegCloseKey(hkey)
def _set_string(path, value, base=win32con.HKEY_CLASSES_ROOT):
"Set a string value in the registry."
win32api.RegSetValue(base,
path,
win32con.REG_SZ,
value)
def _get_string(path, base=win32con.HKEY_CLASSES_ROOT):
"Get a string value from the registry."
try:
return win32api.RegQueryValue(base, path)
except win32api.error:
return None
def _remove_key(path, base=win32con.HKEY_CLASSES_ROOT):
"Remove a string from the registry."
try:
win32api.RegDeleteKey(base, path)
except win32api.error, (code, fn, msg):
if code != winerror.ERROR_FILE_NOT_FOUND:
raise win32api.error, (code, fn, msg)
def recurse_delete_key(path, base=win32con.HKEY_CLASSES_ROOT):
"""Recursively delete registry keys.
This is needed since you can't blast a key when subkeys exist.
"""
try:
h = win32api.RegOpenKey(base, path)
except win32api.error, (code, fn, msg):
if code != winerror.ERROR_FILE_NOT_FOUND:
raise win32api.error, (code, fn, msg)
else:
# parent key found and opened successfully. do some work, making sure
# to always close the thing (error or no).
try:
# remove all of the subkeys
while 1:
try:
subkeyname = win32api.RegEnumKey(h, 0)
except win32api.error, (code, fn, msg):
if code != winerror.ERROR_NO_MORE_ITEMS:
raise win32api.error, (code, fn, msg)
break
recurse_delete_key(path + '\\' + subkeyname, base)
# remove the parent key
_remove_key(path, base)
finally:
win32api.RegCloseKey(h)
def _cat_registrar():
return pythoncom.CoCreateInstance(
pythoncom.CLSID_StdComponentCategoriesMgr,
None,
pythoncom.CLSCTX_INPROC_SERVER,
pythoncom.IID_ICatRegister
)
def _find_localserver_exe(mustfind):
if not sys.platform.startswith("win32"):
return sys.executable
if pythoncom.__file__.find("_d") < 0:
exeBaseName = "pythonw.exe"
else:
exeBaseName = "pythonw_d.exe"
# First see if in the same directory as this .EXE
exeName = os.path.join( os.path.split(sys.executable)[0], exeBaseName )
if not os.path.exists(exeName):
# See if in our sys.prefix directory
exeName = os.path.join( sys.prefix, exeBaseName )
if not os.path.exists(exeName):
# See if in our sys.prefix/pcbuild directory (for developers)
exeName = os.path.join( sys.prefix, "PCbuild", exeBaseName )
if not os.path.exists(exeName):
# See if the registry has some info.
try:
key = "SOFTWARE\\Python\\PythonCore\\%s\\InstallPath" % sys.winver
path = win32api.RegQueryValue( win32con.HKEY_LOCAL_MACHINE, key )
exeName = os.path.join( path, exeBaseName )
except (AttributeError,win32api.error):
pass
if not os.path.exists(exeName):
if mustfind:
raise RuntimeError, "Can not locate the program '%s'" % exeBaseName
return None
return exeName
def _find_localserver_module():
import win32com.server
path = win32com.server.__path__[0]
baseName = "localserver"
pyfile = os.path.join(path, baseName + ".py")
try:
os.stat(pyfile)
except os.error:
# See if we have a compiled extension
if __debug__:
ext = ".pyc"
else:
ext = ".pyo"
pyfile = os.path.join(path, baseName + ext)
try:
os.stat(pyfile)
except os.error:
raise RuntimeError, "Can not locate the Python module 'win32com.server.%s'" % baseName
return pyfile
def RegisterServer(clsid,
pythonInstString=None,
desc=None,
progID=None, verProgID=None,
defIcon=None,
threadingModel="both",
policy=None,
catids=[], other={},
addPyComCat=None,
dispatcher = None,
clsctx = None,
addnPath = None,
):
"""Registers a Python object as a COM Server. This enters almost all necessary
information in the system registry, allowing COM to use the object.
clsid -- The (unique) CLSID of the server.
pythonInstString -- A string holding the instance name that will be created
whenever COM requests a new object.
desc -- The description of the COM object.
progID -- The user name of this object (eg, Word.Document)
verProgId -- The user name of this version's implementation (eg Word.6.Document)
defIcon -- The default icon for the object.
threadingModel -- The threading model this object supports.
policy -- The policy to use when creating this object.
catids -- A list of category ID's this object belongs in.
other -- A dictionary of extra items to be registered.
addPyComCat -- A flag indicating if the object should be added to the list
of Python servers installed on the machine. If None (the default)
then it will be registered when running from python source, but
not registered if running in a frozen environment.
dispatcher -- The dispatcher to use when creating this object.
clsctx -- One of the CLSCTX_* constants.
addnPath -- An additional path the COM framework will add to sys.path
before attempting to create the object.
"""
### backwards-compat check
### Certain policies do not require a "class name", just the policy itself.
if not pythonInstString and not policy:
raise TypeError, 'You must specify either the Python Class or Python Policy which implement the COM object.'
keyNameRoot = "CLSID\\%s" % str(clsid)
_set_string(keyNameRoot, desc)
# Also register as an "Application" so DCOM etc all see us.
_set_string("AppID\\%s" % clsid, progID)
# Depending on contexts requested, register the specified server type.
# Set default clsctx.
if not clsctx:
clsctx = pythoncom.CLSCTX_INPROC_SERVER | pythoncom.CLSCTX_LOCAL_SERVER
# And if we are frozen, ignore the ones that don't make sense in this
# context.
if pythoncom.frozen:
assert sys.frozen, "pythoncom is frozen, but sys.frozen is not set - don't know the context!"
if sys.frozen == "dll":
clsctx = clsctx & pythoncom.CLSCTX_INPROC_SERVER
else:
clsctx = clsctx & pythoncom.CLSCTX_LOCAL_SERVER
# Now setup based on the clsctx left over.
if clsctx & pythoncom.CLSCTX_INPROC_SERVER:
# get the module to use for registration.
# nod to Gordon's installer - if sys.frozen and sys.frozendllhandle
# exist, then we are being registered via a DLL - use this DLL as the
# file name.
if pythoncom.frozen:
if hasattr(sys, "frozendllhandle"):
dllName = win32api.GetModuleFileName(sys.frozendllhandle)
else:
raise RuntimeError, "We appear to have a frozen DLL, but I don't know the DLL to use"
else:
# Normal case - running from .py file, so register pythoncom's DLL.
dllName = os.path.basename(pythoncom.__file__)
_set_subkeys(keyNameRoot + "\\InprocServer32",
{ None : dllName,
"ThreadingModel" : threadingModel,
})
else: # Remove any old InProcServer32 registrations
_remove_key(keyNameRoot + "\\InprocServer32")
if clsctx & pythoncom.CLSCTX_LOCAL_SERVER:
if pythoncom.frozen:
# If we are frozen, we write "{exe} /Automate", just
# like "normal" .EXEs do
exeName = win32api.GetShortPathName(sys.executable)
command = '%s /Automate' % (exeName,)
else:
# Running from .py sources - we need to write
# 'python.exe win32com\server\localserver.py {clsid}"
exeName = _find_localserver_exe(1)
exeName = win32api.GetShortPathName(exeName)
pyfile = _find_localserver_module()
command = '%s "%s" %s' % (exeName, pyfile, str(clsid))
_set_string(keyNameRoot + '\\LocalServer32', command)
else: # Remove any old LocalServer32 registrations
_remove_key(keyNameRoot + "\\LocalServer32")
if pythonInstString:
_set_string(keyNameRoot + '\\PythonCOM', pythonInstString)
else:
_remove_key(keyNameRoot + '\\PythonCOM')
if policy:
_set_string(keyNameRoot + '\\PythonCOMPolicy', policy)
else:
_remove_key(keyNameRoot + '\\PythonCOMPolicy')
if dispatcher:
_set_string(keyNameRoot + '\\PythonCOMDispatcher', dispatcher)
else:
_remove_key(keyNameRoot + '\\PythonCOMDispatcher')
if defIcon:
_set_string(keyNameRoot + '\\DefaultIcon', defIcon)
else:
_remove_key(keyNameRoot + '\\DefaultIcon')
if addnPath:
_set_string(keyNameRoot + "\\PythonCOMPath", addnPath)
else:
_remove_key(keyNameRoot + "\\PythonCOMPath")
if addPyComCat is None:
addPyComCat = pythoncom.frozen == 0
if addPyComCat:
catids = catids + [ CATID_PythonCOMServer ]
# Set up the implemented categories
if catids:
regCat = _cat_registrar()
regCat.RegisterClassImplCategories(clsid, catids)
# set up any other reg values they might have
if other:
for key, value in other.items():
_set_string(keyNameRoot + '\\' + key, value)
if progID:
# set the progID as the most specific that was given to us
if verProgID:
_set_string(keyNameRoot + '\\ProgID', verProgID)
else:
_set_string(keyNameRoot + '\\ProgID', progID)
# Set up the root entries - version independent.
if desc:
_set_string(progID, desc)
_set_string(progID + '\\CLSID', str(clsid))
# Set up the root entries - version dependent.
if verProgID:
# point from independent to the current version
_set_string(progID + '\\CurVer', verProgID)
# point to the version-independent one
_set_string(keyNameRoot + '\\VersionIndependentProgID', progID)
# set up the versioned progID
if desc:
_set_string(verProgID, desc)
_set_string(verProgID + '\\CLSID', str(clsid))
def GetUnregisterServerKeys(clsid, progID=None, verProgID=None, customKeys = None):
"""Given a server, return a list of of ("key", root), which are keys recursively
and uncondtionally deleted at unregister or uninstall time.
"""
# remove the main CLSID registration
ret = [("CLSID\\%s" % str(clsid), win32con.HKEY_CLASSES_ROOT)]
# remove the versioned ProgID registration
if verProgID:
ret.append((verProgID, win32con.HKEY_CLASSES_ROOT))
# blow away the independent ProgID. we can't leave it since we just
# torched the class.
### could potentially check the CLSID... ?
if progID:
ret.append((progID, win32con.HKEY_CLASSES_ROOT))
# The DCOM config tool may write settings to the AppID key for our CLSID
ret.append( ("AppID\\%s" % str(clsid), win32con.HKEY_CLASSES_ROOT) )
# Any custom keys?
if customKeys:
ret = ret + customKeys
return ret
def UnregisterServer(clsid, progID=None, verProgID=None, customKeys = None):
"""Unregisters a Python COM server."""
for args in GetUnregisterServerKeys(clsid, progID, verProgID, customKeys ):
recurse_delete_key(*args)
### it might be nice at some point to "roll back" the independent ProgID
### to an earlier version if one exists, and just blowing away the
### specified version of the ProgID (and its corresponding CLSID)
### another time, though...
### NOTE: ATL simply blows away the above three keys without the
### potential checks that I describe. Assuming that defines the
### "standard" then we have no additional changes necessary.
def GetRegisteredServerOption(clsid, optionName):
"""Given a CLSID for a server and option name, return the option value
"""
keyNameRoot = "CLSID\\%s\\%s" % (str(clsid), str(optionName))
return _get_string(keyNameRoot)
def _get(ob, attr, default=None):
try:
return getattr(ob, attr)
except AttributeError:
pass
# look down sub-classes
try:
bases = ob.__bases__
except AttributeError:
# ob is not a class - no probs.
return default
for base in bases:
val = _get(base, attr, None)
if val is not None:
return val
return default
def RegisterClasses(*classes, **flags):
quiet = flags.has_key('quiet') and flags['quiet']
debugging = flags.has_key('debug') and flags['debug']
for cls in classes:
clsid = cls._reg_clsid_
progID = _get(cls, '_reg_progid_')
desc = _get(cls, '_reg_desc_', progID)
spec = _get(cls, '_reg_class_spec_')
verProgID = _get(cls, '_reg_verprogid_')
defIcon = _get(cls, '_reg_icon_')
threadingModel = _get(cls, '_reg_threading_', 'both')
catids = _get(cls, '_reg_catids_', [])
options = _get(cls, '_reg_options_', {})
policySpec = _get(cls, '_reg_policy_spec_')
clsctx = _get(cls, '_reg_clsctx_')
tlb_filename = _get(cls, '_reg_typelib_filename_')
# default to being a COM category only when not frozen.
addPyComCat = not _get(cls, '_reg_disable_pycomcat_', pythoncom.frozen!=0)
addnPath = None
if debugging:
# If the class has a debugging dispatcher specified, use it, otherwise
# use our default dispatcher.
dispatcherSpec = _get(cls, '_reg_debug_dispatcher_spec_')
if dispatcherSpec is None:
dispatcherSpec = "win32com.server.dispatcher.DefaultDebugDispatcher"
# And remember the debugging flag as servers may wish to use it at runtime.
debuggingDesc = "(for debugging)"
options['Debugging'] = "1"
else:
dispatcherSpec = _get(cls, '_reg_dispatcher_spec_')
debuggingDesc = ""
options['Debugging'] = "0"
if spec is None:
moduleName = cls.__module__
if moduleName == '__main__':
# Use argv[0] to determine the module name.
try:
# Use the win32api to find the case-sensitive name
moduleName = os.path.splitext(win32api.FindFiles(sys.argv[0])[0][8])[0]
except (IndexError, win32api.error):
# Can't find the script file - the user must explicitely set the _reg_... attribute.
raise TypeError, "Can't locate the script hosting the COM object - please set _reg_class_spec_ in your object"
spec = moduleName + "." + cls.__name__
# Frozen apps don't need their directory on sys.path
if not pythoncom.frozen:
scriptDir = os.path.split(sys.argv[0])[0]
if not scriptDir: scriptDir = "."
addnPath = win32api.GetFullPathName(scriptDir)
RegisterServer(clsid, spec, desc, progID, verProgID, defIcon,
threadingModel, policySpec, catids, options,
addPyComCat, dispatcherSpec, clsctx, addnPath)
if not quiet:
print 'Registered:', progID or spec, debuggingDesc
# Register the typelibrary
if tlb_filename:
tlb_filename = os.path.abspath(tlb_filename)
typelib = pythoncom.LoadTypeLib(tlb_filename)
pythoncom.RegisterTypeLib(typelib, tlb_filename)
if not quiet:
print 'Registered type library:', tlb_filename
extra = flags.get('finalize_register')
if extra:
extra()
def UnregisterClasses(*classes, **flags):
quiet = flags.has_key('quiet') and flags['quiet']
for cls in classes:
clsid = cls._reg_clsid_
progID = _get(cls, '_reg_progid_')
verProgID = _get(cls, '_reg_verprogid_')
customKeys = _get(cls, '_reg_remove_keys_')
unregister_typelib = _get(cls, '_reg_typelib_filename_') is not None
UnregisterServer(clsid, progID, verProgID, customKeys)
if not quiet:
print 'Unregistered:', progID or str(clsid)
if unregister_typelib:
tlb_guid = _get(cls, "_typelib_guid_")
if tlb_guid is None:
# I guess I could load the typelib, but they need the GUID anyway.
print "Have typelib filename, but no GUID - can't unregister"
else:
major, minor = _get(cls, "_typelib_version_", (1,0))
lcid = _get(cls, "_typelib_lcid_", 0)
try:
pythoncom.UnRegisterTypeLib(tlb_guid, major, minor, lcid)
if not quiet:
print 'Unregistered type library'
except pythoncom.com_error:
pass
extra = flags.get('finalize_unregister')
if extra:
extra()
#
# Unregister info is for installers or external uninstallers.
# The WISE installer, for example firstly registers the COM server,
# then queries for the Unregister info, appending it to its
# install log. Uninstalling the package will the uninstall the server
def UnregisterInfoClasses(*classes, **flags):
ret = []
for cls in classes:
clsid = cls._reg_clsid_
progID = _get(cls, '_reg_progid_')
verProgID = _get(cls, '_reg_verprogid_')
customKeys = _get(cls, '_reg_remove_keys_')
ret = ret + GetUnregisterServerKeys(clsid, progID, verProgID, customKeys)
return ret
def UseCommandLine(*classes, **flags):
unregisterInfo = '--unregister_info' in sys.argv
unregister = '--unregister' in sys.argv
flags['quiet'] = flags.get('quiet',0) or '--quiet' in sys.argv
flags['debug'] = flags.get('debug',0) or '--debug' in sys.argv
flags['unattended'] = flags.get('unattended',0) or '--unattended' in sys.argv
if unregisterInfo:
return UnregisterInfoClasses(*classes, **flags)
try:
if unregister:
UnregisterClasses(*classes, **flags)
else:
RegisterClasses(*classes, **flags)
except win32api.error, exc:
# If we are on xp+ and have "access denied", retry using
# ShellExecuteEx with 'runas' verb to force elevation (vista) and/or
# admin login dialog (vista/xp)
if flags['unattended'] or exc[0] != winerror.ERROR_ACCESS_DENIED \
or sys.getwindowsversion()[0] < 5:
raise
from win32com.shell.shell import ShellExecuteEx
from win32com.shell import shellcon
import win32process, win32event
import winxpgui # we've already checked we are running XP above
if not flags['quiet']:
print "Requesting elevation and retrying..."
new_params = " ".join(['"' + a + '"' for a in sys.argv])
# specifying the parent means the dialog is centered over our window,
# which is a good usability clue.
# hwnd is unlikely on the command-line, but flags may come from elsewhere
hwnd = flags.get('hwnd', None)
if hwnd is None:
try:
hwnd = winxpgui.GetConsoleWindow()
except winxpgui.error:
hwnd = 0
rc = ShellExecuteEx(hwnd=hwnd,
fMask=shellcon.SEE_MASK_NOCLOSEPROCESS,
lpVerb="runas",
lpFile=win32api.GetShortPathName(sys.executable),
lpParameters=new_params,
lpDirectory=os.getcwd(),
nShow=win32con.SW_SHOW)
# Output is lost to the new console which opens, so the
# best we can do is get the exit code of the process.
hproc = rc['hProcess']
win32event.WaitForSingleObject(hproc, win32event.INFINITE)
exit_code = win32process.GetExitCodeProcess(hproc)
if exit_code:
# Even if quiet you get to see this error.
print "Error: registration failed (exit code %s)." % exit_code
print "Please re-execute this command from an elevated command-prompt"
print "to see details about the error."
else:
if not flags['quiet']:
print "Elevated process succeeded."
def RegisterPyComCategory():
""" Register the Python COM Server component category.
"""
regCat = _cat_registrar()
regCat.RegisterCategories( [ (CATID_PythonCOMServer,
0x0409,
"Python COM Server") ] )
if not pythoncom.frozen:
try:
win32api.RegQueryValue(win32con.HKEY_CLASSES_ROOT,
'Component Categories\\%s' % CATID_PythonCOMServer)
except win32api.error:
try:
RegisterPyComCategory()
except pythoncom.error: # Error with the COM category manager - oh well.
pass
| leighpauls/k2cro4 | third_party/python_26/Lib/site-packages/win32com/server/register.py | Python | bsd-3-clause | 20,518 | [
"BLAST"
] | 55ce482ee3ce52083024e82268196baf92f305b85e4558606ec97cad390a578a |
# -*- coding: utf-8 -*-
import dxr.plugins
import re
import cgi
import urllib
""" Regular expression for matching urls
Credits to: http://stackoverflow.com/a/1547940
"""
pat = "\[(https?://[A-Za-z0-9\-\._~:\/\?#[\]@!\$&'()*\+,;=%]+\.[A-Za-z0-9\-\._~:\/\?#[\]@!\$&'()*\+,;=%]+)\]"
pat += "|\((https?://[A-Za-z0-9\-\._~:\/\?#[\]@!\$&'()*\+,;=%]+\.[A-Za-z0-9\-\._~:\/\?#[\]@!\$&'()*\+,;=%]+)\)"
pat += "|(https?://[A-Za-z0-9\-\._~:\/\?#[\]@!\$&'()*\+,;=%]+\.[A-Za-z0-9\-\._~:\/\?#[\]@!\$&'()*\+,;=%]+)"
urlFinder = re.compile(pat)
def load(tree, conn):
# Nothing to do here
pass
class UrlHtmlifier:
def __init__(self, text):
self.text = text
def refs(self):
for m in urlFinder.finditer(self.text):
try:
if m.group(1):
url = m.group(1).decode('utf-8')
start, end = m.start(1), m.end(1)
elif m.group(2):
url = m.group(2).decode('utf-8')
start, end = m.start(2), m.end(2)
else:
url = m.group(3).decode('utf-8')
start, end = m.start(3), m.end(3)
except UnicodeDecodeError:
pass
else:
yield start, end, [{
'text': "Follow link",
'title': "Visit %s" % url,
'href': url,
'icon': 'external_link'
}]
def regions(self):
return []
def annotations(self):
return []
def links(self):
return []
def htmlify(path, text):
return UrlHtmlifier(text)
__all__ = dxr.plugins.htmlifier_exports()
| jonasfj/dxr | dxr/plugins/urllink/htmlifier.py | Python | mit | 1,700 | [
"VisIt"
] | 4a6b0ef17af551b9cd22fa21f6f79c7f3ab6aad5d60a0e48d383b66d6a58a58f |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
from pymatgen.analysis.magnetism.jahnteller import *
from pymatgen.io.cif import CifParser
from pymatgen.util.testing import PymatgenTest
class JahnTellerTest(unittest.TestCase):
def setUp(self):
self.jt = JahnTellerAnalyzer()
def test_jahn_teller_species_analysis(self):
# 1 d-shell electron
m = self.jt.get_magnitude_of_effect_from_species("Ti3+", "", "oct")
self.assertEqual(m, "weak")
# 2 d-shell electrons
m = self.jt.get_magnitude_of_effect_from_species("Ti2+", "", "oct")
self.assertEqual(m, "weak")
m = self.jt.get_magnitude_of_effect_from_species("V3+", "", "oct")
self.assertEqual(m, "weak")
# 3
m = self.jt.get_magnitude_of_effect_from_species("V2+", "", "oct")
self.assertEqual(m, "none")
m = self.jt.get_magnitude_of_effect_from_species("Cr3+", "", "oct")
self.assertEqual(m, "none")
# 4
m = self.jt.get_magnitude_of_effect_from_species("Cr2+", "high", "oct")
self.assertEqual(m, "strong")
m = self.jt.get_magnitude_of_effect_from_species("Cr2+", "low", "oct")
self.assertEqual(m, "weak")
m = self.jt.get_magnitude_of_effect_from_species("Mn3+", "high", "oct")
self.assertEqual(m, "strong")
m = self.jt.get_magnitude_of_effect_from_species("Mn3+", "low", "oct")
self.assertEqual(m, "weak")
# 5
m = self.jt.get_magnitude_of_effect_from_species("Mn2+", "high", "oct")
self.assertEqual(m, "none")
m = self.jt.get_magnitude_of_effect_from_species("Mn2+", "low", "oct")
self.assertEqual(m, "weak")
m = self.jt.get_magnitude_of_effect_from_species("Fe3+", "high", "oct")
self.assertEqual(m, "none")
m = self.jt.get_magnitude_of_effect_from_species("Fe3+", "low", "oct")
self.assertEqual(m, "weak")
# 6
m = self.jt.get_magnitude_of_effect_from_species("Fe2+", "high", "oct")
self.assertEqual(m, "weak")
m = self.jt.get_magnitude_of_effect_from_species("Fe2+", "low", "oct")
self.assertEqual(m, "none")
m = self.jt.get_magnitude_of_effect_from_species("Co3+", "high", "oct")
self.assertEqual(m, "weak")
m = self.jt.get_magnitude_of_effect_from_species("Co3+", "low", "oct")
self.assertEqual(m, "none")
# 7
m = self.jt.get_magnitude_of_effect_from_species("Co2+", "high", "oct")
self.assertEqual(m, "weak")
m = self.jt.get_magnitude_of_effect_from_species("Co2+", "low", "oct")
self.assertEqual(m, "strong")
# 8
m = self.jt.get_magnitude_of_effect_from_species("Ni2+", "", "oct")
self.assertEqual(m, "none")
# 9
m = self.jt.get_magnitude_of_effect_from_species("Cu2+", "", "oct")
self.assertEqual(m, "strong")
# 10
m = self.jt.get_magnitude_of_effect_from_species("Cu+", "", "oct")
self.assertEqual(m, "none")
m = self.jt.get_magnitude_of_effect_from_species("Zn2+", "", "oct")
self.assertEqual(m, "none")
def test_jahn_teller_structure_analysis(self):
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "LiFePO4.cif"))
LiFePO4 = parser.get_structures()[0]
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "Fe3O4.cif"))
Fe3O4 = parser.get_structures()[0]
self.assertTrue(self.jt.is_jahn_teller_active(LiFePO4))
self.assertTrue(self.jt.is_jahn_teller_active(Fe3O4))
LiFePO4_analysis = {
"active": True,
"strength": "weak",
"sites": [
{
"ligand": "O2-",
"ligand_bond_length_spread": 0.2111,
"ligand_bond_lengths": set([2.2951, 2.2215, 2.2383, 2.1382, 2.084, 2.0863]),
"strength": "weak",
"motif": "oct",
"motif_order_parameter": 0.1441,
"site_indices": [4, 5, 6, 7],
"species": "Fe2+",
"spin_state": "unknown",
}
],
}
jt_predicted = self.jt.get_analysis(LiFePO4)
# order does not matter
jt_predicted["sites"][0]["ligand_bond_lengths"] = set(jt_predicted["sites"][0]["ligand_bond_lengths"])
self.assertDictEqual(LiFePO4_analysis, jt_predicted)
def test_mu_so(self):
SpeciesCo = Species(symbol="Co", oxidation_state=4)
self.assertAlmostEqual(np.sqrt(3), JahnTellerAnalyzer.mu_so(SpeciesCo, "oct", "low"))
self.assertAlmostEqual(np.sqrt(35), JahnTellerAnalyzer.mu_so(SpeciesCo, "oct", "high"))
SpeciesNa = Species(symbol="Na", oxidation_state=1)
self.assertEqual(None, JahnTellerAnalyzer.mu_so(SpeciesNa, "oct", "high"))
if __name__ == "__main__":
unittest.main()
| gmatteo/pymatgen | pymatgen/analysis/magnetism/tests/test_jahnteller.py | Python | mit | 5,003 | [
"pymatgen"
] | 8aa44a8671ad185787b45318964ca6ae2a84016d50bb230b1c14fe2ee8b7985a |
#
# A file that opens the neuroConstruct project LarkumEtAl2009 and run multiple simulations stimulating ech terminal apical branch with varying number of synapses.
#
# Author: Matteo Farinella
from sys import *
from java.io import File
from java.lang import System
from java.util import ArrayList
from ucl.physiol.neuroconstruct.project import ProjectManager
from ucl.physiol.neuroconstruct.neuron import NeuronFileManager
from ucl.physiol.neuroconstruct.utils import NumberGenerator
from ucl.physiol.neuroconstruct.nmodleditor.processes import ProcessManager
from ucl.physiol.neuroconstruct.simulation import SimulationData
from ucl.physiol.neuroconstruct.gui import SimulationRerunFrame
from ucl.physiol.neuroconstruct.gui.plotter import PlotManager
from ucl.physiol.neuroconstruct.gui.plotter import PlotCanvas
from ucl.physiol.neuroconstruct.dataset import DataSet
from math import *
import time
import shutil
import random
import os
import subprocess
# Load the original project
projName = "LarkumEtAl2009"
projFile = File("/home/matteo/neuroConstruct/models/"+projName+"/"+projName+".ncx")
print "Loading project from file: " + projFile.getAbsolutePath()+", exists: "+ str(projFile.exists())
pm = ProjectManager()
myProject = pm.loadProject(projFile)
simConfig = myProject.simConfigInfo.getSimConfig("Default Simulation Configuration")#
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
numGenerated = myProject.generatedCellPositions.getNumberInAllCellGroups()
simsRunning = []
def updateSimsRunning():
simsFinished = []
for sim in simsRunning:
timeFile = File(myProject.getProjectMainDirectory(), "simulations/"+sim+"/time.dat")
#print "Checking file: "+timeFile.getAbsolutePath() +", exists: "+ str(timeFile.exists())
if (timeFile.exists()):
simsFinished.append(sim)
if(len(simsFinished)>0):
for sim in simsFinished:
simsRunning.remove(sim)
if numGenerated > 0:
print "Generating NEURON scripts..."
myProject.neuronFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
myProject.neuronSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
myProject.neuronSettings.setGraphicsMode(False) # Run NEURON without GUI
# Note same network structure will be used for each!
# Change this number to the number of processors you wish to use on your local machine
maxNumSimultaneousSims = 100
#multiple simulation settings:
prefix = "" #string that will be added to the name of the simulations to identify the simulation set
trials = 1
Nbranches = 1
Configuration = ["NMDAspike input"]
apical_branch = ["apical17","apical18","apical21","apical23","apical24","apical25","apical27","apical28","apical31","apical34","apical35","apical37","apical38","apical44","apical46","apical52","apical53","apical54","apical56","apical57","apical61","apical62","apical65","apical67","apical68","apical69","apical72","apical73"]
apical_stim = ["NMDAs_17","NMDAs_18","NMDAs_21","NMDAs_23","NMDAs_24","NMDAs_25","NMDAs_27","NMDAs_28","NMDAs_31","NMDAs_34","NMDAs_35","NMDAs_37","NMDAs_38","NMDAs_44","NMDAs_46","NMDAs_52","NMDAs_53","NMDAs_54","NMDAs_56","NMDAs_57","NMDAs_61","NMDAs_62","NMDAs_65","NMDAs_67","NMDAs_68","NMDAs_69","NMDAs_72","NMDAs_73"]
apical_ID =[4460,4571,4793,4961,4994,5225,5477,5526,5990,6221,6274,6523,6542,6972,7462,8026,8044,8088,8324,8468,8685,8800,8966,9137,9160,9186,9592,9639]
apical_lenght = [98,69,78,26,34,166,161,49,143,55,87,25,38,73,194,19,22,26,25,129,138,95,42,89,21,62,26,18]
apical_plot = ["pyrCML_apical17_V","pyrCML_apical18_V","pyrCML_apical21_V","pyrCML_apical23_V","pyrCML_apical24_V","pyrCML_apical25_V","pyrCML_apical27_V","pyrCML_apical28_V","pyrCML_apical31_V","pyrCML_apical34_V","pyrCML_apical35_V","pyrCML_apical37_V","pyrCML_apical38_V","pyrCML_apical44_V","pyrCML_apical46_V","pyrCML_apical52_V","pyrCML_apical53_V","pyrCML_apical54_V","pyrCML_apical56_V","pyrCML_apical57_V","pyrCML_apical61_V","pyrCML_apical62_V","pyrCML_apical65_V","pyrCML_apical67_V","pyrCML_apical68_V","pyrCML_apical69_V","pyrCML_apical72_V","pyrCML_apical73_V"]
print "Going to run " +str(int(trials*Nbranches)) + " simulations"
refStored = []
simGroups = ArrayList()
simInputs = ArrayList()
simPlots = ArrayList()
stringConfig = Configuration[0]
print "nConstruct using SIMULATION CONFIGURATION: " +stringConfig
simConfig = myProject.simConfigInfo.getSimConfig(stringConfig)
for y in range(0, Nbranches):
j=y+1
selectedBranches = []
prefix = "b"+str(j) #number of branches stimulated
print
print "-----------------------------------------------------------------------"
print str(trials)+" trials, stimulating " +str(int(j))+" branches"
print "reference name: " + prefix +"..."
print "-----------------------------------------------------------------------"
print
for i in range(0, trials):
print ""
selectedBranches = []
#empty vectors
simGroups = ArrayList()
simInputs = ArrayList()
simPlots = ArrayList()
######## Selecting j random different apical branches, to Input and Plot ###############
for r in range(0,j):
randomApicalBranch = random.randint(0,int(len(apical_branch))-1)
while randomApicalBranch in selectedBranches:
randomApicalBranch = random.randint(0,int(len(apical_branch))-1)
selectedBranches.append(randomApicalBranch)
print "selected branch "+apical_branch[randomApicalBranch]
simInputs.add(apical_stim[randomApicalBranch])
simPlots.add(apical_plot[randomApicalBranch])
simGroups.add("pyrCML_group")
simPlots.add("pyrCML_soma_V")
simPlots.add("pyrCML_CaZone_V")
simConfig.setCellGroups(simGroups)
simConfig.setInputs(simInputs)
simConfig.setPlots(simPlots)
###################################### control #########################################
simRef = prefix+"control"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK control #####
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(2) # Wait for sim to be kicked off
#####################'''
######## Running the same configuration + background exc 1500 ###############
simInputs.add("backgroundExc")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
##########################################################################################
simRef = prefix+"exc1500_"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK background exc #####
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(2) # Wait for sim to be kicked off
simInputs.remove("backgroundExc")
#####################'''
######## Running the same configuration + background exc 1200 ###############
simInputs.add("backgroundExc1200")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
##########################################################################################
simRef = prefix+"exc1200_"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK background exc #####
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(2) # Wait for sim to be kicked off
simInputs.remove("backgroundExc1200")
#####################'''
######## Running the same configuration + background exc 900 ###############
simInputs.add("backgroundExc900")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
##########################################################################################
simRef = prefix+"exc900_"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK background exc #####
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(2) # Wait for sim to be kicked off
simInputs.remove("backgroundExc900")
#####################'''
### end for i (trials)
### end for j (noise)
######## Extracting simulations results ###############
time.sleep(60)
y=-1
for sim in refStored:
y=y+1
pullSimFilename = "pullsim.sh"
path = "/home/matteo/neuroConstruct/models/"+projName
print "\n------ Checking directory: " + path +"/simulations"+"/"+sim
pullsimFile = path+"/simulations/"+sim+"/"+pullSimFilename
if os.path.isfile(pullsimFile):
print pullSimFilename+" exists and will be executed..."
process = subprocess.Popen("cd "+path+"/simulations/"+sim+"/"+";./"+pullSimFilename, shell=True, stdout=subprocess.PIPE)
stdout_value = process.communicate()[0]
process.wait()
else:
print "Simulation not finished"
if os.path.isfile(path+"/simulations/"+sim+"/pyrCML_group_0.dat"):
print "Simulation results recovered from remote cluster."
simDir = File(path+"/simulations/"+sim)
newFileSoma = path+"/recordings/"+sim+".soma"
shutil.copyfile(path+"/simulations/"+sim+"/pyrCML_group_0.dat" , newFileSoma)
for ID in apical_ID:
if os.path.isfile(path+"/simulations/"+sim+"/pyrCML_group_0."+str(ID)+".dat"):
newFileApical = path+"/recordings/"+sim+"_ID"+str(ID)+".apical"
shutil.copyfile(path+"/simulations/"+sim+"/pyrCML_group_0."+str(ID)+".dat" , newFileApical)
print "Simulation was successful. "
print "Results saved."
print
else:
print "Simulation failed!"
### ''' | pgleeson/TestArea | models/LarkumEtAl2009/pythonScripts/PAP_multibranches_exc.py | Python | gpl-2.0 | 13,721 | [
"NEURON"
] | 2c41b01b202de8a8d3c79d8261d1096007d7803991db216686de12ea08ccedde |
"""
Signal handling functions for use with external commerce service.
"""
import json
import logging
from urlparse import urljoin
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.dispatch import receiver
from django.utils.translation import ugettext as _
from edx_rest_api_client.exceptions import HttpClientError
import requests
from microsite_configuration import microsite
from request_cache.middleware import RequestCache
from student.models import UNENROLL_DONE
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client, is_commerce_service_configured
from openedx.core.djangoapps.theming.helpers import get_value
log = logging.getLogger(__name__)
@receiver(UNENROLL_DONE)
def handle_unenroll_done(sender, course_enrollment=None, skip_refund=False,
**kwargs): # pylint: disable=unused-argument
"""
Signal receiver for unenrollments, used to automatically initiate refunds
when applicable.
N.B. this signal is also consumed by lms.djangoapps.shoppingcart.
"""
if not is_commerce_service_configured() or skip_refund:
return
if course_enrollment and course_enrollment.refundable():
try:
request_user = get_request_user() or course_enrollment.user
if isinstance(request_user, AnonymousUser):
# Assume the request was initiated via server-to-server
# api call (presumably Otto). In this case we cannot
# construct a client to call Otto back anyway, because
# the client does not work anonymously, and furthermore,
# there's certainly no need to inform Otto about this request.
return
refund_seat(course_enrollment, request_user)
except: # pylint: disable=bare-except
# don't assume the signal was fired with `send_robust`.
# avoid blowing up other signal handlers by gracefully
# trapping the Exception and logging an error.
log.exception(
"Unexpected exception while attempting to initiate refund for user [%s], course [%s]",
course_enrollment.user.id,
course_enrollment.course_id,
)
def get_request_user():
"""
Helper to get the authenticated user from the current HTTP request (if
applicable).
If the requester of an unenrollment is not the same person as the student
being unenrolled, we authenticate to the commerce service as the requester.
"""
request = RequestCache.get_current_request()
return getattr(request, 'user', None)
def refund_seat(course_enrollment, request_user):
"""
Attempt to initiate a refund for any orders associated with the seat being
unenrolled, using the commerce service.
Arguments:
course_enrollment (CourseEnrollment): a student enrollment
request_user: the user as whom to authenticate to the commerce service
when attempting to initiate the refund.
Returns:
A list of the external service's IDs for any refunds that were initiated
(may be empty).
Raises:
exceptions.SlumberBaseException: for any unhandled HTTP error during
communication with the commerce service.
exceptions.Timeout: if the attempt to reach the commerce service timed
out.
"""
course_key_str = unicode(course_enrollment.course_id)
unenrolled_user = course_enrollment.user
try:
refund_ids = ecommerce_api_client(request_user or unenrolled_user).refunds.post(
{'course_id': course_key_str, 'username': unenrolled_user.username}
)
except HttpClientError, exc:
if exc.response.status_code == 403 and request_user != unenrolled_user:
# this is a known limitation; commerce service does not presently
# support the case of a non-superusers initiating a refund on
# behalf of another user.
log.warning("User [%s] was not authorized to initiate a refund for user [%s] "
"upon unenrollment from course [%s]", request_user.id, unenrolled_user.id, course_key_str)
return []
else:
# no other error is anticipated, so re-raise the Exception
raise exc
if refund_ids:
# at least one refundable order was found.
log.info(
"Refund successfully opened for user [%s], course [%s]: %r",
unenrolled_user.id,
course_key_str,
refund_ids,
)
# XCOM-371: this is a temporary measure to suppress refund-related email
# notifications to students and support@) for free enrollments. This
# condition should be removed when the CourseEnrollment.refundable() logic
# is updated to be more correct, or when we implement better handling (and
# notifications) in Otto for handling reversal of $0 transactions.
if course_enrollment.mode != 'verified':
# 'verified' is the only enrollment mode that should presently
# result in opening a refund request.
log.info(
"Skipping refund email notification for non-verified mode for user [%s], course [%s], mode: [%s]",
course_enrollment.user.id,
course_enrollment.course_id,
course_enrollment.mode,
)
else:
try:
send_refund_notification(course_enrollment, refund_ids)
except: # pylint: disable=bare-except
# don't break, just log a warning
log.warning("Could not send email notification for refund.", exc_info=True)
else:
# no refundable orders were found.
log.debug("No refund opened for user [%s], course [%s]", unenrolled_user.id, course_key_str)
return refund_ids
def create_zendesk_ticket(requester_name, requester_email, subject, body, tags=None):
""" Create a Zendesk ticket via API. """
if not (settings.ZENDESK_URL and settings.ZENDESK_USER and settings.ZENDESK_API_KEY):
log.debug('Zendesk is not configured. Cannot create a ticket.')
return
# Copy the tags to avoid modifying the original list.
tags = list(tags or [])
tags.append('LMS')
# Remove duplicates
tags = list(set(tags))
data = {
'ticket': {
'requester': {
'name': requester_name,
'email': requester_email
},
'subject': subject,
'comment': {'body': body},
'tags': tags
}
}
# Encode the data to create a JSON payload
payload = json.dumps(data)
# Set the request parameters
url = urljoin(settings.ZENDESK_URL, '/api/v2/tickets.json')
user = '{}/token'.format(settings.ZENDESK_USER)
pwd = settings.ZENDESK_API_KEY
headers = {'content-type': 'application/json'}
try:
response = requests.post(url, data=payload, auth=(user, pwd), headers=headers)
# Check for HTTP codes other than 201 (Created)
if response.status_code != 201:
log.error(u'Failed to create ticket. Status: [%d], Body: [%s]', response.status_code, response.content)
else:
log.debug('Successfully created ticket.')
except Exception: # pylint: disable=broad-except
log.exception('Failed to create ticket.')
return
def generate_refund_notification_body(student, refund_ids): # pylint: disable=invalid-name
""" Returns a refund notification message body. """
msg = _(
"A refund request has been initiated for {username} ({email}). "
"To process this request, please visit the link(s) below."
).format(username=student.username, email=student.email)
ecommerce_url_root = get_value('ECOMMERCE_PUBLIC_URL_ROOT', settings.ECOMMERCE_PUBLIC_URL_ROOT)
refund_urls = [urljoin(ecommerce_url_root, '/dashboard/refunds/{}/'.format(refund_id))
for refund_id in refund_ids]
return '{msg}\n\n{urls}'.format(msg=msg, urls='\n'.join(refund_urls))
def send_refund_notification(course_enrollment, refund_ids):
""" Notify the support team of the refund request. """
tags = ['auto_refund']
if microsite.is_request_in_microsite():
# this is not presently supported with the external service.
raise NotImplementedError("Unable to send refund processing emails to microsite teams.")
student = course_enrollment.user
subject = _("[Refund] User-Requested Refund")
body = generate_refund_notification_body(student, refund_ids)
requester_name = student.profile.name or student.username
create_zendesk_ticket(requester_name, student.email, subject, body, tags)
| ampax/edx-platform | lms/djangoapps/commerce/signals.py | Python | agpl-3.0 | 8,827 | [
"VisIt"
] | 9d8b3141496388c68e76ed600ecabc7dee8173c8e6da1ce515e5bea5edb74dbe |
# encoding: utf-8
"""
Job and task components for writing .xml files that the Windows HPC Server
2008 can use to start jobs.
Authors:
* Brian Granger
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import re
import uuid
from xml.etree import ElementTree as ET
from IPython.config.configurable import Configurable
from IPython.utils.traitlets import (
Unicode, Integer, List, Instance,
Enum, Bool
)
#-----------------------------------------------------------------------------
# Job and Task classes
#-----------------------------------------------------------------------------
def as_str(value):
if isinstance(value, str):
return value
elif isinstance(value, bool):
if value:
return 'true'
else:
return 'false'
elif isinstance(value, (int, float)):
return repr(value)
else:
return value
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def find_username():
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME','')
if domain is None:
return username
else:
return '%s\\%s' % (domain, username)
class WinHPCJob(Configurable):
job_id = Unicode('')
job_name = Unicode('MyJob', config=True)
min_cores = Integer(1, config=True)
max_cores = Integer(1, config=True)
min_sockets = Integer(1, config=True)
max_sockets = Integer(1, config=True)
min_nodes = Integer(1, config=True)
max_nodes = Integer(1, config=True)
unit_type = Unicode("Core", config=True)
auto_calculate_min = Bool(True, config=True)
auto_calculate_max = Bool(True, config=True)
run_until_canceled = Bool(False, config=True)
is_exclusive = Bool(False, config=True)
username = Unicode(find_username(), config=True)
job_type = Unicode('Batch', config=True)
priority = Enum(('Lowest','BelowNormal','Normal','AboveNormal','Highest'),
default_value='Highest', config=True)
requested_nodes = Unicode('', config=True)
project = Unicode('IPython', config=True)
xmlns = Unicode('http://schemas.microsoft.com/HPCS2008/scheduler/')
version = Unicode("2.000")
tasks = List([])
@property
def owner(self):
return self.username
def _write_attr(self, root, attr, key):
s = as_str(getattr(self, attr, ''))
if s:
root.set(key, s)
def as_element(self):
# We have to add _A_ type things to get the right order than
# the MSFT XML parser expects.
root = ET.Element('Job')
self._write_attr(root, 'version', '_A_Version')
self._write_attr(root, 'job_name', '_B_Name')
self._write_attr(root, 'unit_type', '_C_UnitType')
self._write_attr(root, 'min_cores', '_D_MinCores')
self._write_attr(root, 'max_cores', '_E_MaxCores')
self._write_attr(root, 'min_sockets', '_F_MinSockets')
self._write_attr(root, 'max_sockets', '_G_MaxSockets')
self._write_attr(root, 'min_nodes', '_H_MinNodes')
self._write_attr(root, 'max_nodes', '_I_MaxNodes')
self._write_attr(root, 'run_until_canceled', '_J_RunUntilCanceled')
self._write_attr(root, 'is_exclusive', '_K_IsExclusive')
self._write_attr(root, 'username', '_L_UserName')
self._write_attr(root, 'job_type', '_M_JobType')
self._write_attr(root, 'priority', '_N_Priority')
self._write_attr(root, 'requested_nodes', '_O_RequestedNodes')
self._write_attr(root, 'auto_calculate_max', '_P_AutoCalculateMax')
self._write_attr(root, 'auto_calculate_min', '_Q_AutoCalculateMin')
self._write_attr(root, 'project', '_R_Project')
self._write_attr(root, 'owner', '_S_Owner')
self._write_attr(root, 'xmlns', '_T_xmlns')
dependencies = ET.SubElement(root, "Dependencies")
etasks = ET.SubElement(root, "Tasks")
for t in self.tasks:
etasks.append(t.as_element())
return root
def tostring(self):
"""Return the string representation of the job description XML."""
root = self.as_element()
indent(root)
txt = ET.tostring(root, encoding="utf-8")
# Now remove the tokens used to order the attributes.
txt = re.sub(r'_[A-Z]_','',txt)
txt = '<?xml version="1.0" encoding="utf-8"?>\n' + txt
return txt
def write(self, filename):
"""Write the XML job description to a file."""
txt = self.tostring()
with open(filename, 'w') as f:
f.write(txt)
def add_task(self, task):
"""Add a task to the job.
Parameters
----------
task : :class:`WinHPCTask`
The task object to add.
"""
self.tasks.append(task)
class WinHPCTask(Configurable):
task_id = Unicode('')
task_name = Unicode('')
version = Unicode("2.000")
min_cores = Integer(1, config=True)
max_cores = Integer(1, config=True)
min_sockets = Integer(1, config=True)
max_sockets = Integer(1, config=True)
min_nodes = Integer(1, config=True)
max_nodes = Integer(1, config=True)
unit_type = Unicode("Core", config=True)
command_line = Unicode('', config=True)
work_directory = Unicode('', config=True)
is_rerunnaable = Bool(True, config=True)
std_out_file_path = Unicode('', config=True)
std_err_file_path = Unicode('', config=True)
is_parametric = Bool(False, config=True)
environment_variables = Instance(dict, args=(), config=True)
def _write_attr(self, root, attr, key):
s = as_str(getattr(self, attr, ''))
if s:
root.set(key, s)
def as_element(self):
root = ET.Element('Task')
self._write_attr(root, 'version', '_A_Version')
self._write_attr(root, 'task_name', '_B_Name')
self._write_attr(root, 'min_cores', '_C_MinCores')
self._write_attr(root, 'max_cores', '_D_MaxCores')
self._write_attr(root, 'min_sockets', '_E_MinSockets')
self._write_attr(root, 'max_sockets', '_F_MaxSockets')
self._write_attr(root, 'min_nodes', '_G_MinNodes')
self._write_attr(root, 'max_nodes', '_H_MaxNodes')
self._write_attr(root, 'command_line', '_I_CommandLine')
self._write_attr(root, 'work_directory', '_J_WorkDirectory')
self._write_attr(root, 'is_rerunnaable', '_K_IsRerunnable')
self._write_attr(root, 'std_out_file_path', '_L_StdOutFilePath')
self._write_attr(root, 'std_err_file_path', '_M_StdErrFilePath')
self._write_attr(root, 'is_parametric', '_N_IsParametric')
self._write_attr(root, 'unit_type', '_O_UnitType')
root.append(self.get_env_vars())
return root
def get_env_vars(self):
env_vars = ET.Element('EnvironmentVariables')
for k, v in self.environment_variables.iteritems():
variable = ET.SubElement(env_vars, "Variable")
name = ET.SubElement(variable, "Name")
name.text = k
value = ET.SubElement(variable, "Value")
value.text = v
return env_vars
# By declaring these, we can configure the controller and engine separately!
class IPControllerJob(WinHPCJob):
job_name = Unicode('IPController', config=False)
is_exclusive = Bool(False, config=True)
username = Unicode(find_username(), config=True)
priority = Enum(('Lowest','BelowNormal','Normal','AboveNormal','Highest'),
default_value='Highest', config=True)
requested_nodes = Unicode('', config=True)
project = Unicode('IPython', config=True)
class IPEngineSetJob(WinHPCJob):
job_name = Unicode('IPEngineSet', config=False)
is_exclusive = Bool(False, config=True)
username = Unicode(find_username(), config=True)
priority = Enum(('Lowest','BelowNormal','Normal','AboveNormal','Highest'),
default_value='Highest', config=True)
requested_nodes = Unicode('', config=True)
project = Unicode('IPython', config=True)
class IPControllerTask(WinHPCTask):
task_name = Unicode('IPController', config=True)
controller_cmd = List(['ipcontroller.exe'], config=True)
controller_args = List(['--log-to-file', '--log-level=40'], config=True)
# I don't want these to be configurable
std_out_file_path = Unicode('', config=False)
std_err_file_path = Unicode('', config=False)
min_cores = Integer(1, config=False)
max_cores = Integer(1, config=False)
min_sockets = Integer(1, config=False)
max_sockets = Integer(1, config=False)
min_nodes = Integer(1, config=False)
max_nodes = Integer(1, config=False)
unit_type = Unicode("Core", config=False)
work_directory = Unicode('', config=False)
def __init__(self, config=None):
super(IPControllerTask, self).__init__(config=config)
the_uuid = uuid.uuid1()
self.std_out_file_path = os.path.join('log','ipcontroller-%s.out' % the_uuid)
self.std_err_file_path = os.path.join('log','ipcontroller-%s.err' % the_uuid)
@property
def command_line(self):
return ' '.join(self.controller_cmd + self.controller_args)
class IPEngineTask(WinHPCTask):
task_name = Unicode('IPEngine', config=True)
engine_cmd = List(['ipengine.exe'], config=True)
engine_args = List(['--log-to-file', '--log-level=40'], config=True)
# I don't want these to be configurable
std_out_file_path = Unicode('', config=False)
std_err_file_path = Unicode('', config=False)
min_cores = Integer(1, config=False)
max_cores = Integer(1, config=False)
min_sockets = Integer(1, config=False)
max_sockets = Integer(1, config=False)
min_nodes = Integer(1, config=False)
max_nodes = Integer(1, config=False)
unit_type = Unicode("Core", config=False)
work_directory = Unicode('', config=False)
def __init__(self, config=None):
super(IPEngineTask,self).__init__(config=config)
the_uuid = uuid.uuid1()
self.std_out_file_path = os.path.join('log','ipengine-%s.out' % the_uuid)
self.std_err_file_path = os.path.join('log','ipengine-%s.err' % the_uuid)
@property
def command_line(self):
return ' '.join(self.engine_cmd + self.engine_args)
# j = WinHPCJob(None)
# j.job_name = 'IPCluster'
# j.username = 'GNET\\bgranger'
# j.requested_nodes = 'GREEN'
#
# t = WinHPCTask(None)
# t.task_name = 'Controller'
# t.command_line = r"\\blue\domainusers$\bgranger\Python\Python25\Scripts\ipcontroller.exe --log-to-file -p default --log-level 10"
# t.work_directory = r"\\blue\domainusers$\bgranger\.ipython\cluster_default"
# t.std_out_file_path = 'controller-out.txt'
# t.std_err_file_path = 'controller-err.txt'
# t.environment_variables['PYTHONPATH'] = r"\\blue\domainusers$\bgranger\Python\Python25\Lib\site-packages"
# j.add_task(t)
| noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/parallel/apps/winhpcjob.py | Python | apache-2.0 | 11,701 | [
"Brian"
] | 89e52c45fb6381664c7332c92320b3809261750e9aa8c592a03a75508ee61702 |
"""
Models from Godley & Lavoie text.
[G&L 2012] "Monetary Economics: An Integrated Approach to credit, Money, Income, Production
and Wealth; Second Edition", by Wynne Godley and Marc Lavoie, Palgrave Macmillan, 2012.
ISBN 978-0-230-30184-9
Copyright 2016 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sfc_models.models as models
class GL_book_model(object):
"""
Base class for example models from [G&L 2012] for single-country models.
Generates the sectors, either in a new model object, or an object that is passed in.
The user supplies a country code.
"""
def __init__(self, country_code, model=None, use_book_exogenous=True):
"""
Constructor for an example model. Builds a single country, using a code that is passed in.
If the user supplies an existing Model object, uses that. This allows us to embed in a multi-country model.
:param country_code: str
:param model: sfc_models.models.Model
:param use_book_exogenous: bool
"""
if model is None:
model = models.Model()
self.Model = model
self.Country = models.Country(model, country_code, country_code)
self.UseBookExogenous = use_book_exogenous
def build_model(self): # pragma: no cover This is a virtual base class
"""
Does the work of building the sectors within a country. Returns the Model object.
:return: sfc_models.models.Model
"""
return self.Model
def expected_output(self): # pragma: no cover -- Virtual base class.
"""
Returns a list of expected output. Used to validate the framework output.
Uses the default exogenous series.
Format:
A list of tuples, that consist of the variable name, and (limited) time series of output.
For example:
[
('GOOD_SUP_GOOD', [0., 10., 12.]),
('HH_AfterTax', [0., 15., 18., 22.]),
]
In this case, the variable 'GOOD_SUP_GOOD' is expected to be [0., 10., 12.] for the first 3 periods
and
'HH_AftterTax' is expected to be [0., 15., 18., 22.] over the fiest 4 periods.
In other words, target outputs do not have to be the same length.
:return: list
"""
return []
| brianr747/SFC_models | sfc_models/gl_book/__init__.py | Python | apache-2.0 | 2,795 | [
"Brian"
] | a76f3ac7d9aa5a0dbf6fcf0db27bec8b955b5a8dd7ecef72eebb46c60fdabf32 |
#
# Adrian Soto
# 22-12-2014
# Stony Brook University
#
################################################
# Plot band structure and DOS from the
# output of the bands.x program in the
# Quantum Espresso package.
#
# Features:
# 1) Allows for scissor correction (band shift)
# 2)
#
################################################
import math
import matplotlib.pyplot as plt
from matplotlib import rcParams
#rcParams['font.family'] = 'serif'
#rcParams['font.serif'] = ['Times']
#rcParams['text.usetex'] = True
#rcParams['font.size'] = 14
class band:
def __init__(self, numkpoints, bandenergies):
self.nks = numkpoints
if (len(bandenergies) != numkpoints):
print "ERROR: list of band energies has wrong length. Setting band to 0."
self.nrg = [0] * numkpoints
else:
self.nrg = bandenergies
def printband(self):
print self.nrg
def shift(self, delta):
self.nrg = map(lambda x : x+delta, self.nrg) # watch for scope here.
return
class kpoints:
def __init__(self):
self.klist = []
def w0gauss(x):
# As in flib/w0gauss.f90 in the QE package
pi = 3.141592653589793
w0 = 1.0/math.sqrt(pi)*math.exp(-(x-1.0/math.sqrt(2.0))**2)*(2.0-math.sqrt(2.0)*x)
return w0
def ReadBandStructure(bandsfile):
#
# This function reads the band structure as written
# to output of the bands.x program. It returns the bs
# as a flat list with all energies and another list with
# the k-point coordinates.
#
f = open(bandsfile, 'r')
# First line contains nbnd and nks. Read.
currentline = f.readline()
nks = int(currentline[22:26])
nbnd = int(currentline[12:16])
# Following lines contain the k-point coordinates
# and the band energies.
# Calculate number of lines containing band structure:
# nks k-point lines
# At each k-point there are (1+nbnd/10) energy values.
nlpkp = 1+nbnd/10 # Number of Lines Per K-Point
nlines = nks + nks * nlpkp
bsaux = []
xk = []
for ik in range (0, nks):
currentline = f.readline()
#kpoint = currentline[12:40]
kpoint = [float(x) for x in currentline.split()]
xk.append(kpoint)
auxenerg = []
for ibnd in range(0, nlpkp):
currentline = f.readline()
# append current line to auxiliary list
auxenerg.append( float(x) for x in currentline.split() )
# flatten list of lists containing energies for a given kpoint
# (each sublist corresponds to one line in the bands.dat file)
energ = [item for sublist in auxenerg for item in sublist]
# Sort ascendingly band energies for current k-point (to
# prevent artificial level crossings if QE bands.x output
# does not sort them correctly) and append to band structure
bsaux.append(sorted(energ))
f.close()
# Flatten bs list
bsflat = [item for sublist in bsaux for item in sublist]
return nks, nbnd, xk, bsflat
def SortByBands(nks, nbnd, bsflat):
# Rearrarange bs from k-points to bands
bs = []
for ibnd in range (0, nbnd):
currentband=[]
for ik in range (0, nks):
#currentband.append(bsflat[ik*nbnd+ibnd])
bs.append(bsflat[ik*nbnd+ibnd])
#bs.append( currentband )
return bs
def FindHLGap(nks, hvb, lcb):
#
# Find HOMO and LUMO energies and energy gap
#
# hvb = highest valence band
# lcb = lowest conduction band
#
# Ehvb = highest valence energy or HOMO energy
# Elcb = lowest conduction energy or LUMO energy
#
gap = lcb[0] - hvb[0]
for ik1 in range (0, nks):
auxcond = lcb[ik1]
for ik2 in range (0, nks):
auxval = hvb[ik2]
currentgap = auxcond-auxval
if (currentgap < 0.0):
print "ERROR: negative gap"
elif (currentgap < gap):
gap = currentgap
Ehvb = max(hvb)
Elcb = min(lcb)
return Ehvb, Elcb, gap
def Scissor(newgap, bands, nks, nbnd, shifttype):
#
# shifttype == 0 : shift valence bands by -0.5*delta and
# conduction bands by 0.5*delta
# shifttype == 1 : as in 0 but placing the highest valence
# energy at 0.0
# shifttype == 2 : as in 0 but placing the gap center at 0.0
#
EHOMO, ELUMO, oldgap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta=(newgap-oldgap)/2.0
# Apply scissor to band structure
for ibnd in range (0, nbnd):
if (ibnd < nval):
bands[ibnd].shift(-1.0*delta)
else:
bands[ibnd].shift(delta)
if (shifttype==0):
print "Scissor correction to band energies has been applied."
return
elif (shifttype==1):
EHOMO, ELUMO, gap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta = -1.0*EHOMO
#print "delta=", delta
for ibnd in range (0, nbnd):
bands[ibnd].shift(delta)
print "Scissor correction to band energies has been applied."
print "Highest valence energy has been set to 0.0 eV"
return
elif (shifttype==2):
EHOMO, ELUMO, gap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta = -0.5*(EHOMO+ELUMO)
for ibnd in range (0, nbnd):
bands[ibnd].shift(delta)
print "Scissor correction to band energies has been applied."
print "Gap center has been set to 0.0 eV"
return
else:
print "ERROR: shifttype has an non-valid value. Default value shifttype==0."
print "Scissor correction to band energies has been applied."
return
def CreateDOS(nks, nbnd, deltaE, bnd):
# ATTENTION: bnd must be an object of the class band
print "Creating DOS"
Emin = min(bnd[0].nrg)
Emax = max(bnd[nbnd-1].nrg)
ndos = int((Emax - Emin)/deltaE) + 1 # int always rounds to lower integer
dosE = []
dosG = []
intg=0.0
# Create DOS
for idos in range (0, ndos):
E = Emin + idos * deltaE
dosg = 0.0
for ik in range(0, nks):
for ibnd in range (0, nbnd):
dosg = dosg + w0gauss ( (E - bnd[ibnd].nrg[ik] ) / deltaE ) # * wk(ik)
dosg = dosg/deltaE
intg = intg + dosg*deltaE # integrated DOS
dosE.append(E)
dosG.append(dosg)
# Normalize DOS
dosGnorm=dosG
for idos in range (0, ndos):
dosGnorm[idos]=dosGnorm[idos]/intg
return dosE, dosGnorm
def PlotBandStructure(nbnd, nval, bnd, plotfile, Ef, sympoints, nks_btw_sympoints ):
#
# ATTENTION: bnd must be an object of the class band
#
# nval: number of valence bands
# Ef: Fermi Energy. If false then it won't print horizontal line
# sympoints: list containing labels of symmetry points
# nks_btw_sympoints: number of k-points between symmetry points
#
# NOTE: this function assumes that the number of points
# between symmetry points is constant
#
plt.clf() #clear figure
print "Plotting band structure to", plotfile
col = 'k'
for ibnd in range (0, nbnd):
#if (ibnd < nval):
# col='b'
#else:
# col='r'
plt.plot(bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o')
y_min = min(bnd[0].nrg)
y_max = min(bnd[nbnd-1].nrg)
plt.xlabel("Brillouin zone path")
plt.ylabel("band energies (eV)")
numsympoints = len(sympoints)
kpath=[]
xticks = range(0, numsympoints*nks_btw_sympoints + 1, nks_btw_sympoints)
for i in range(0, numsympoints):
kpath.append(sympoints[i])
if (i < numsympoints-1):
for j in range (0, nks_btw_sympoints-1):
kpath.append('')
# plt.axvline(x=xticks, ymin=0, ymax=1, hold=None, **kwargs)
# Ticks and vertical lines across BS plot
plt.xticks(xticks, sympoints)
for i in range(0,numsympoints):
plt.axvline(x=xticks[i], ymin=y_min, ymax=y_max, hold=None, color='k', linewidth=0.25)
if (not Ef):
plt.axhline(Ef, color="black", linestyle="--")
plt.xlim( 0, len(bnd[0].nrg)-1 )
plt.savefig(plotfile)
return
def PlotDOS(dosE, dosG, plotname):
# ATTENTION: dosG and dosE must be lists of reals
plt.clf() #clear figure
plt.plot(dosG, dosE)
plt.xlabel("Density Of States")
plt.ylabel("band energies (eV)")
plt.gca().set_xlim(left=0)
plt.savefig(plotname)
return
def PlottwoDOS(dosE1, dosG1, dosE2, dosG2, plotname):
# ATTENTION: dosG and dosE must be lists of reals
plt.clf() #clear figure
plt.plot(dosG1, dosE1, color='r', label='Si')
plt.plot(dosG2, dosE2, color='b', label='Ge')
plt.xlabel("Density Of States")
plt.ylabel("band energies (eV)")
plt.legend(loc=1)
plt.gca().set_xlim(left=0)
plt.savefig(plotname)
return
def PlotMultipleDOS(dosE, dosG, plotname):
# ATTENTION: dosG and dosE must be lists of lists of reals
Ndos=len(dosE[:])
for i in range(0, Ndos):
plt.plot(dosG[i], dosE[i])
plt.xlabel("Density Of States")
plt.ylabel("band energies (eV)")
plt.savefig(plotname)
return
#def WriteBandStructure():
# print (" %10.6f%10.6f%10.6f" % (kpoint[0], kpoint[1], kpoint[2]) )
############################################################################################
############################################################################################
############################################################################################
############################################################################################
############################ PROGRAM STARTS HERE ###################################
############################################################################################
############################################################################################
############################################################################################
############################################################################################
# Datafiles containing band structure
filename=["bands.si.301.dat", "bands.ge.301.dat"]
sympoints=['$\Gamma$', '$X$', '$W$', '$L$', '$\Gamma$', '$K$', '$X$']
nks_btw_sympoints=50 # To be set by user
nks0 = 0
nbnd0=0
xk0=[]
bsflt0=[]
bs0=[]
nks1 = 0
nbnd1=0
xk1=[]
bsflt1=[]
bs1=[]
nks0, nbnd0, xk0, bsflt0 = ReadBandStructure(filename[0]) # Si
nks1, nbnd1, xk1, bsflt1 = ReadBandStructure(filename[1]) # Ge
########################
# For scissor correction
nval = 4
ncond0 = nbnd0 - nval
ncond1 = nbnd1 - nval
exptgap0 = 1.1 # eV # Si
exptgap1 = 0.74 # Ge
#########################
# For DOS plot
deltaE = 0.1 #eV
# For Si -- 0
if(nbnd0 == 0):
print "%% ERROR READING BANDS. EXIT %%"
else:
bs0 = SortByBands(nks0, nbnd0, bsflt0)
print "nks=", nks0
print "nbnd=", nbnd0
# Create band objects
bands=[]
for ibnd in range (0, nbnd0):
ledge = ibnd*nks0
redge = ledge+nks0
currentband = bs0[ledge:redge]
bands.append( band(nks0, currentband) )
print "band ", ibnd+1, " created"
Scissor(exptgap0, bands, nks0, nbnd0, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0
print "Scissor correction with gap set to", exptgap0
# Generate DOS to be plotted later
print "ASC-- checkpoint: creating DOS"
dosE0, dosG0 = CreateDOS(nks0, nbnd0, deltaE, bands)
# Plot BS
PlotBandStructure(nbnd0, nval, bands, "Si_BS.pdf", 0.0, sympoints, nks_btw_sympoints)
# For Ge -- 0
if(nbnd0 == 0):
print "%% ERROR READING BANDS. EXIT %%"
else:
bs1 = SortByBands(nks1, nbnd1, bsflt1)
print "nks=", nks1
print "nbnd=", nbnd1
# Create band objects
bands=[]
for ibnd in range (0, nbnd0):
ledge = ibnd*nks0
redge = ledge+nks0
currentband = bs1[ledge:redge]
bands.append( band(nks1, currentband) )
print "band ", ibnd+1, " created"
Scissor(exptgap1, bands, nks1, nbnd1, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0
print "Scissor correction with gap set to", exptgap1
# Generate DOS to be plotted later
dosE1, dosG1 = CreateDOS(nks1, nbnd1, deltaE, bands)
# Plot BS
PlotBandStructure(nbnd1, nval, bands, "Ge_BS.pdf", 0.0, sympoints, nks_btw_sympoints)
PlottwoDOS(dosE0, dosG0, dosE1, dosG1, "DOS.pdf")
| adrian-soto/QEdark_repo | tools/bandsndos/SiandGe.py | Python | gpl-2.0 | 12,825 | [
"Quantum ESPRESSO"
] | 9b7075029d743404fc0281f561c267125a08fed0b2467bf351c05e46c19cbc53 |
#!/usr/bin/env python
import string, sys
from distutils.core import setup
myVersion = "$Revision: 1.1 $";
# We requre Python 2.0
pyversion = string.split( string.split( sys.version )[0], "." )
if map( int, pyversion ) < [2, 0, 0]:
sys.stderr.write( "Sorry, this library requires at least Python 2.0\n" )
sys.exit(1);
# Call the distutils setup function to install ourselves
setup ( name = "netdevicelib",
version = myVersion.split()[-2],
description = "Python Networking Device library",
author = "Brian Landers",
author_email = "[email protected]",
url = "http://netdevicelib.sourceforge.net",
package_dir = { '': 'src' },
packages = [ 'netdevicelib' ]
)
| hdecreis/netdevicelib | setup.py | Python | lgpl-2.1 | 770 | [
"Brian"
] | bcdadec4ee891d8495f41a5d873aba37dda6c26b2ebe78e69fcbc39cd199ae09 |
"""
The Grid module contains several utilities for grid operations
"""
__RCSID__ = "$Id$"
import os
import types
import re
from DIRAC.Core.Utilities.Os import sourceEnv
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers import Local
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.Core.Utilities.Subprocess import systemCall, shellCall
def executeGridCommand( proxy, cmd, gridEnvScript = None ):
"""
Execute cmd tuple after sourcing GridEnv
"""
currentEnv = dict( os.environ )
if not gridEnvScript:
# if not passed as argument, use default from CS Helpers
gridEnvScript = Local.gridEnv()
if gridEnvScript:
command = gridEnvScript.split()
ret = sourceEnv( 10, command )
if not ret['OK']:
return S_ERROR( 'Failed sourcing GridEnv: %s' % ret['Message'] )
gridEnv = ret['outputEnv']
#
# Preserve some current settings if they are there
#
if currentEnv.has_key( 'X509_VOMS_DIR' ):
gridEnv['X509_VOMS_DIR'] = currentEnv['X509_VOMS_DIR']
if currentEnv.has_key( 'X509_CERT_DIR' ):
gridEnv['X509_CERT_DIR'] = currentEnv['X509_CERT_DIR']
else:
gridEnv = currentEnv
if not proxy:
res = getProxyInfo()
if not res['OK']:
return res
gridEnv['X509_USER_PROXY' ] = res['Value']['path']
elif type( proxy ) in types.StringTypes:
if os.path.exists( proxy ):
gridEnv[ 'X509_USER_PROXY' ] = proxy
else:
return S_ERROR( 'Can not treat proxy passed as a string' )
else:
ret = gProxyManager.dumpProxyToFile( proxy )
if not ret['OK']:
return ret
gridEnv[ 'X509_USER_PROXY' ] = ret['Value']
result = systemCall( 120, cmd, env = gridEnv )
return result
def ldapsearchBDII( filt = None, attr = None, host = None, base = None ):
""" Python wrapper for ldapserch at bdii.
:param filt: Filter used to search ldap, default = '', means select all
:param attr: Attributes returned by ldapsearch, default = '*', means return all
:param host: Host used for ldapsearch, default = 'lcg-bdii.cern.ch:2170', can be changed by $LCG_GFAL_INFOSYS
:return: standard DIRAC answer with Value equals to list of ldapsearch responses
Each element of list is dictionary with keys:
'dn': Distinguished name of ldapsearch response
'objectClass': List of classes in response
'attr': Dictionary of attributes
"""
if filt == None:
filt = ''
if attr == None:
attr = ''
if host == None:
host = 'lcg-bdii.cern.ch:2170'
if base == None:
base = 'Mds-Vo-name=local,o=grid'
if type( attr ) == types.ListType:
attr = ' '.join( attr )
cmd = 'ldapsearch -x -LLL -h %s -b %s "%s" %s' % ( host, base, filt, attr )
result = shellCall( 0, cmd )
response = []
if not result['OK']:
return result
status = result['Value'][0]
stdout = result['Value'][1]
stderr = result['Value'][2]
if not status == 0:
return S_ERROR( stderr )
lines = []
for line in stdout.split( "\n" ):
if line.find( " " ) == 0:
lines[-1] += line.strip()
else:
lines.append( line.strip() )
record = None
for line in lines:
if line.find( 'dn:' ) == 0:
record = {'dn':line.replace( 'dn:', '' ).strip(),
'objectClass':[],
'attr':{'dn':line.replace( 'dn:', '' ).strip()}}
response.append( record )
continue
if record:
if line.find( 'objectClass:' ) == 0:
record['objectClass'].append( line.replace( 'objectClass:', '' ).strip() )
continue
if line.find( 'Glue' ) == 0:
index = line.find( ':' )
if index > 0:
attr = line[:index]
value = line[index + 1:].strip()
if record['attr'].has_key( attr ):
if type( record['attr'][attr] ) == type( [] ):
record['attr'][attr].append( value )
else:
record['attr'][attr] = [record['attr'][attr], value]
else:
record['attr'][attr] = value
return S_OK( response )
def ldapSite( site, attr = None, host = None ):
""" Site information from bdii.
:param site: Site as it defined in GOCDB or part of it with globing, for example: \UKI-*
:return: standard DIRAC answer with Value equals to list of sites.
Each site is dictionary which contains attributes of site.
For example result['Value'][0]['GlueSiteLocation']
"""
filt = '(GlueSiteUniqueID=%s)' % site
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
sites = []
for value in result['Value']:
sites.append( value['attr'] )
return S_OK( sites )
def ldapCluster( ce, attr = None, host = None ):
""" CE (really SubCluster in definition of bdii) information from bdii.
It contains by the way host information for ce.
:param ce: ce or part of it with globing, for example, "ce0?.tier2.hep.manchester*"
:return: standard DIRAC answer with Value equals to list of clusters.
Each cluster is dictionary which contains attributes of ce.
For example result['Value'][0]['GlueHostBenchmarkSI00']
"""
filt = '(GlueClusterUniqueID=%s)' % ce
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
clusters = []
for value in result['Value']:
clusters.append( value['attr'] )
return S_OK( clusters )
def ldapCE( ce, attr = None, host = None ):
""" CE (really SubCluster in definition of bdii) information from bdii.
It contains by the way host information for ce.
:param ce: ce or part of it with globing, for example, "ce0?.tier2.hep.manchester*"
:return: standard DIRAC answer with Value equals to list of clusters.
Each cluster is dictionary which contains attributes of ce.
For example result['Value'][0]['GlueHostBenchmarkSI00']
"""
filt = '(GlueSubClusterUniqueID=%s)' % ce
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
ces = []
for value in result['Value']:
ces.append( value['attr'] )
return S_OK( ces )
def ldapCEState( ce, vo, attr = None, host = None ):
""" CEState information from bdii. Only CE with CEAccessControlBaseRule=VO:lhcb are selected.
:param ce: ce or part of it with globing, for example, "ce0?.tier2.hep.manchester*"
:return: standard DIRAC answer with Value equals to list of ceStates.
Each ceState is dictionary which contains attributes of ce.
For example result['Value'][0]['GlueCEStateStatus']
"""
voFilters = '(GlueCEAccessControlBaseRule=VOMS:/%s/*)' % vo
voFilters += '(GlueCEAccessControlBaseRule=VOMS:/%s)' % vo
voFilters += '(GlueCEAccessControlBaseRule=VO:%s)' % vo
filt = '(&(GlueCEUniqueID=%s*)(|%s))' % ( ce, voFilters )
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
states = []
for value in result['Value']:
states.append( value['attr'] )
return S_OK( states )
def ldapCEVOView( ce, vo, attr = None, host = None ):
""" CEVOView information from bdii. Only CE with CEAccessControlBaseRule=VO:lhcb are selected.
:param ce: ce or part of it with globing, for example, "ce0?.tier2.hep.manchester*"
:return: standard DIRAC answer with Value equals to list of ceVOViews.
Each ceVOView is dictionary which contains attributes of ce.
For example result['Value'][0]['GlueCEStateRunningJobs']
"""
voFilters = '(GlueCEAccessControlBaseRule=VOMS:/%s/*)' % vo
voFilters += '(GlueCEAccessControlBaseRule=VOMS:/%s)' % vo
voFilters += '(GlueCEAccessControlBaseRule=VO:%s)' % vo
filt = '(&(GlueCEUniqueID=%s*)(|%s))' % ( ce, voFilters )
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
ces = result['Value']
filt = '(&(objectClass=GlueVOView)(|%s))' % ( voFilters )
views = []
for ce in ces:
dn = ce['dn']
result = ldapsearchBDII( filt, attr, host, base = dn )
if result['OK']:
views.append( result['Value'][0]['attr'] )
return S_OK( views )
def ldapSE( site, vo, attr = None, host = None ):
""" SE/SA information from bdii.
:param site: site with globing, for example, "ce0?.tier2.hep.manchester*" or just "*"
:param vo: VO name with globing, "*" if all VOs
:return: standard DIRAC answer with Value equals to list of SE/SA merged items.
Each SE is dictionary which contains attributes of SE and corresponding SA.
For example result['Value'][0]['GlueSESizeFree']
"""
voFilters = '(GlueSAAccessControlBaseRule=VOMS:/%s/*)' % vo
voFilters += '(GlueSAAccessControlBaseRule=VOMS:/%s)' % vo
voFilters += '(GlueSAAccessControlBaseRule=VO:%s)' % vo
filt = '(&(objectClass=GlueSA)(|%s))' % voFilters
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
sas = result['Value']
saDict = {}
seIDFilter = ''
for sa in sas:
chunk = sa['attr'].get('GlueChunkKey','')
if chunk:
seID = sa['attr']['GlueChunkKey'].replace('GlueSEUniqueID=','')
saDict[seID] = sa['attr']
seIDFilter += '(GlueSEUniqueID=%s)' % seID
if vo == "*":
filt = '(&(objectClass=GlueSE)(GlueForeignKey=GlueSiteUniqueID=%s))' % site
else:
filt = '(&(objectClass=GlueSE)(|%s)(GlueForeignKey=GlueSiteUniqueID=%s))' % ( seIDFilter, site )
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
ses = result['Value']
seDict = {}
for se in ses:
seID = se['attr']['GlueSEUniqueID']
seDict[seID] = se['attr']
siteName = se['attr']['GlueForeignKey'].replace('GlueSiteUniqueID=','')
seDict[seID]['GlueSiteUniqueID'] = siteName
if seID in saDict:
seDict[seID].update( saDict[seID] )
seList = seDict.values()
return S_OK( seList )
def ldapSEAccessProtocol( se, attr = None, host = None ):
""" SE access protocol information from bdii
:param se: se or part of it with globing, for example, "ce0?.tier2.hep.manchester*"
:return: standard DIRAC answer with Value equals to list of access protocols.
"""
filt = '(&(objectClass=GlueSEAccessProtocol)(GlueChunkKey=GlueSEUniqueID=%s))' % se
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
protocols = []
for value in result['Value']:
protocols.append( value['attr'] )
return S_OK( protocols )
def ldapService( serviceID = '*', serviceType = '*', vo = '*', attr = None, host = None):
""" Service BDII info for a given VO
:param service: service type, e.g. SRM
:return: standard DIRAC answer with Value equals to list of services
"""
voFilters = '(GlueServiceAccessControlBaseRule=VOMS:/%s/*)' % vo
voFilters += '(GlueServiceAccessControlBaseRule=VOMS:/%s)' % vo
voFilters += '(GlueServiceAccessControlBaseRule=VO:%s)' % vo
filt = '(&(GlueServiceType=%s)(GlueServiceUniqueID=%s)(|%s))' % ( serviceType, serviceID, voFilters )
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
services = []
for value in result['Value']:
services.append( value['attr'] )
return S_OK( services )
def ldapSEVOInfo( vo, seID, attr = ["GlueVOInfoPath","GlueVOInfoAccessControlBaseRule"], host = None ):
""" VOInfo for a given SE
"""
filt = '(GlueChunkKey=GlueSEUniqueID=%s)' % seID
filt += '(GlueVOInfoAccessControlBaseRule=VO:%s*)' % vo
filt += '(objectClass=GlueVOInfo)'
filt = '(&%s)' % filt
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
voInfo = []
for value in result['Value']:
voInfo.append( value['attr'] )
return S_OK( voInfo )
def getBdiiCEInfo( vo, host = None ):
""" Get information for all the CEs/queues for a given VO
:param vo: BDII VO name
:return result structure: result['Value'][siteID]['CEs'][ceID]['Queues'][queueName]. For
each siteID, ceID, queueName all the BDII/Glue parameters are retrieved
"""
result = ldapCEState( '', vo, host = host )
if not result['OK']:
return result
siteDict = {}
ceDict = {}
queueDict = {}
for queue in result['Value']:
clusterID = queue.get('GlueForeignKey','').replace('GlueClusterUniqueID=','')
ceID = queue.get('GlueCEUniqueID','').split(':')[0]
queueDict[queue['GlueCEUniqueID']] = queue
queueDict[queue['GlueCEUniqueID']]['CE'] = ceID
if not ceID in ceDict:
result = ldapCluster( clusterID, host = host )
if not result['OK']:
continue
if not result['Value']:
continue
ce = result['Value'][0]
ceDict[ceID] = ce
fKey = ce['GlueForeignKey']
siteID = ''
for key in fKey:
if key.startswith('GlueSiteUniqueID'):
siteID = key.replace('GlueSiteUniqueID=','')
ceDict[ceID]['Site'] = siteID
result = ldapCE( clusterID, host = host )
ce = {}
if result['OK'] and result['Value']:
ce = result['Value'][0]
ceDict[ceID].update( ce )
if not siteID in siteDict:
site = {}
result = ldapSite( siteID, host = host )
if result['OK'] and result['Value']:
site = result['Value'][0]
siteDict[siteID] = site
for ceID in ceDict:
siteID = ceDict[ceID]['Site']
if siteID in siteDict:
siteDict[siteID].setdefault('CEs',{})
siteDict[siteID]['CEs'][ceID] = ceDict[ceID]
for queueID in queueDict:
ceID = queueDict[queueID]['CE']
siteID = ceDict[ceID]['Site']
siteDict[siteID]['CEs'][ceID].setdefault('Queues',{})
queueName = re.split( ':\d+/', queueDict[queueID]['GlueCEUniqueID'] )[1]
siteDict[siteID]['CEs'][ceID]['Queues'][queueName] = queueDict[queueID]
return S_OK( siteDict )
def getBdiiSEInfo( vo, host = None ):
""" Get information for all the SEs for a given VO
:param vo: BDII VO name
:return result structure: result['Value'][siteID]['SEs'][seID]. For
each siteID, seIDall the BDII/Glue SE/SA parameters are retrieved
"""
result = ldapSE( '*', vo, host = host )
if not result['OK']:
return result
ses = result['Value']
pathDict = {}
result = ldapSEVOInfo( vo, '*' )
if result['OK']:
for entry in result['Value']:
voPath = entry['GlueVOInfoPath']
seID = ''
for en in entry['dn'].split(','):
if en.startswith( 'GlueSEUniqueID=' ):
seID = en.replace( 'GlueSEUniqueID=', '' )
break
if seID:
pathDict[seID] = voPath
siteDict = {}
for se in ses:
siteName = se['GlueSiteUniqueID']
siteDict.setdefault( siteName, { "SEs": {} } )
seID = se['GlueSEUniqueID']
siteDict[siteName]["SEs"][seID] = se
result = ldapSEAccessProtocol( seID, host = host )
siteDict[siteName]["SEs"][seID]['AccessProtocols'] = {}
if result['OK']:
for entry in result['Value']:
apType = entry['GlueSEAccessProtocolType']
if apType in siteDict[siteName]["SEs"][seID]['AccessProtocols']:
count = 0
for p in siteDict[siteName]["SEs"][seID]['AccessProtocols']:
if p.startswith( apType+'.' ):
count += 1
apType = '%s.%d' % ( apType, count + 1 )
siteDict[siteName]["SEs"][seID]['AccessProtocols'][apType] = entry
else:
siteDict[siteName]["SEs"][seID]['AccessProtocols'][apType] = entry
else:
continue
if seID in pathDict:
siteDict[siteName]["SEs"][seID]['VOPath'] = pathDict[seID]
return S_OK( siteDict )
| vmendez/DIRAC | Core/Utilities/Grid.py | Python | gpl-3.0 | 15,561 | [
"DIRAC"
] | eaaa352f68db21f46a769589760b4b43f04dcfe3acea3ad229c7adb9fefd0fd9 |
import os
from ase.io import write
from gpaw import GPAW
calc = GPAW('anti.gpw')
atoms = calc.get_atoms()
up = calc.get_pseudo_density(0)
down = calc.get_pseudo_density(1)
zeta = (up - down) / (up + down)
write('magnetization.cube', atoms, data=zeta)
os.system('vmd -e isosurfaces.vmd magnetization.cube')
| qsnake/gpaw | doc/exercises/dos/viewmag.py | Python | gpl-3.0 | 311 | [
"ASE",
"GPAW",
"VMD"
] | 80f84e2dfe784f6b40eda19a604402d753496358594379242ca128e76173722a |
from __future__ import division
import pytest
import os
from bayesian.gaussian import MeansVector, CovarianceMatrix
from bayesian.gaussian_bayesian_network import *
from bayesian.examples.gaussian_bayesian_networks.river import (
f_a, f_b, f_c, f_d)
def pytest_funcarg__river_graph(request):
g = build_graph(f_a, f_b, f_c, f_d)
return g
class TestGBN():
def test_get_joint_parameters(self, river_graph):
mu, sigma = river_graph.get_joint_parameters()
assert mu == MeansVector(
[[3],
[4],
[9],
[14]],
names=['a', 'b', 'c', 'd'])
assert sigma == CovarianceMatrix(
[[4, 4, 8, 12],
[4, 5, 8, 13],
[8, 8, 20, 28],
[12, 13, 28, 42]],
names=['a', 'b', 'c', 'd'])
def test_query(self, river_graph):
result = river_graph.query(a=7)
mu = result['joint']['mu']
sigma = result['joint']['sigma']
assert mu == MeansVector([
[8],
[17],
[26]], names=['b', 'c', 'd'])
assert sigma == CovarianceMatrix(
[[1, 0, 1],
[0, 4, 4],
[1, 4, 6]],
names=['b', 'c', 'd'])
result = river_graph.query(a=7, c=17)
mu = result['joint']['mu']
sigma = result['joint']['sigma']
assert mu == MeansVector([
[8],
[26]], names=['b', 'd'])
assert sigma == CovarianceMatrix(
[[1, 1],
[1, 2]],
names=['b', 'd'])
result = river_graph.query(a=7, c=17, b=8)
mu = result['joint']['mu']
sigma = result['joint']['sigma']
assert mu == MeansVector([
[26]], names=['d'])
assert sigma == CovarianceMatrix(
[[1]],
names=['d'])
def test_assignment_of_joint_parameters(self, river_graph):
assert river_graph.nodes['b'].func.joint_mu == MeansVector([
[3],
[4]], names=['a', 'b'])
assert river_graph.nodes['b'].func.covariance_matrix == CovarianceMatrix([
[4, 4],
[4, 5]], names=['a', 'b'])
def test_gaussian_pdf(self, river_graph):
assert round(river_graph.nodes['a'].func(3), 4) == 0.1995
assert round(river_graph.nodes['a'].func(10), 4) == 0.0002
def test_multivariate_gaussian_pdf(self, river_graph):
assert round(river_graph.nodes['d'].func(3, 1, 3), 4) == 0.0005
| kamijawa/ogc_server | bayesian/test/test_gaussian_bayesian_network.py | Python | mit | 2,497 | [
"Gaussian"
] | ac8174ad9ea582b0b2a7aabe0b38a1fa8823b66217d895d89de926658f5e67fa |
import os
import functools
import pytest
import lammps
# Redefine Lammps command-line args so no annoying logs or stdout
Lammps = functools.partial(lammps.Lammps, args=[
'-log', 'none',
'-screen', 'none'
])
# Lammps command line arguments
def test_lammps_init_default_units():
lmp = Lammps()
assert lmp.units == 'lj'
def test_lammps_init_set_get_units():
lmp = Lammps(units='metal')
assert lmp.units == 'metal'
def test_lammps_init_invalid_units():
with pytest.raises(ValueError):
Lammps(units='invalid_units')
# MPI_Comm comm (don't want to do mpi tests)
def test_lammps_init_invalid_comm():
with pytest.raises(TypeError):
Lammps(comm='invalid_comm')
# Style
def test_lammps_init_default_style():
lmp = Lammps()
assert lmp.system.style == 'atomic'
def test_lammps_init_set_get_style():
lmp = Lammps(style='full')
assert lmp.system.style == 'full'
def test_lammps_init_invalid_style():
with pytest.raises(ValueError):
Lammps(style='invalid_style')
# version
def test_lammps_version():
lmp = Lammps()
assert isinstance(lmp.__version__, str)
# command
def test_lammps_command():
lmp = Lammps()
lmp.command('timestep 2.0')
assert lmp.dt == 2.0
# file
def test_lammps_file(tmpdir):
tmpfile = tmpdir.join("test_file.in")
tmpfile.write("timestep 1.0\n")
lmp = Lammps()
lmp.file(str(tmpfile))
assert lmp.dt == 1.0
def test_lammps_file_twice(tmpdir):
tmpfile1 = tmpdir.join("test_file1.in")
tmpfile1.write("timestep 1.0\n")
tmpfile2 = tmpdir.join("test_file2.in")
tmpfile2.write("timestep 2.0\n")
lmp = Lammps()
lmp.file(str(tmpfile1))
assert lmp.dt == 1.0
lmp.file(str(tmpfile2))
assert lmp.dt == 2.0
# Run
def test_lammps_run():
# This tests has a dependency of the
# LAMMPS example melt
# dt tested
# time step tested
# time tested
# This is hardly a unit test... (a better way?)
lmp = Lammps()
lmp.file(os.path.join(lammps.__path__[0], 'data', 'melt.in'))
assert lmp.dt == 0.005
assert lmp.time_step == 100
assert lmp.time == lmp.time_step * lmp.dt
# time_step
def test_lammps_default_time_step():
lmp = Lammps()
assert lmp.time_step == 0
def test_lammps_set_get_time_step():
lmp = Lammps()
lmp.time_step = 100
assert lmp.time_step == 100
# dt
def test_lammps_default_dt():
lmp = Lammps()
assert lmp.dt == 0.005
def test_lammps_set_get_dt():
lmp = Lammps()
lmp.dt = 13.0
assert lmp.dt == 13.0
# time
def test_lammps_default_time():
lmp = Lammps()
assert lmp.time == 0.0
# reset
def test_lammps_reset():
lmp = Lammps()
lmp.dt = 13.0
lmp.reset()
assert lmp.dt == 0.005
if __name__ == '__main__':
pytest.main()
| costrouc/lammps-python | lammps/test/test_lammps.py | Python | gpl-3.0 | 2,816 | [
"LAMMPS"
] | 9ee6f20027796ae2f39ea26fd7b08a45c8cc7975bcea595becd6d149ac9dc03c |
'''
Groups.tests.views
'''
from MyPidge.Groups.models import Group, Service, GroupCategory, GroupTag, MembershipInvitation
from MyPidge.Users.models import User, Membership, MembershipClaim
from MyPidge.Events.models import Event, EventTime, EventTag
from django.test import TestCase, Client
from django.core import mail
from django.core.urlresolvers import reverse
class BasicViewTests(TestCase):
def setUp(self):
# Reqd so that PREPEND_WWW in settings doesn't cause all view tests to fail
self.host = "www.testhost"
self.client = Client(HTTP_HOST = self.host)
self.service = Service.objects.create(name = "Servicey")
self.category = GroupCategory.objects.create(name = "NewCat", small_name = "NC", all_can_create = True)
self.tagone = GroupTag.objects.create(name = "TagEinz", description = "Here's the first.")
self.tagtwo = GroupTag.objects.create(name = "SPAM", description = "The tanned variety.")
self.group = Group.objects.create(official_name = "My Python", category = self.category)
self.group.tags.add(self.tagone)
self.fergus = User.objects.create(firstname="Fergus", lastname="Ferrier", email="[email protected]", password = "blah")
self.fergus.UpdatePassword("password")
self.fergus.GiveDefaults()
self.fergusmember = Membership.objects.create(person = self.fergus, group = self.group, assoctype="MEM")
self.eventtag = EventTag.objects.create(name = "Fun Things")
self.event = Event.objects.create(group = self.group, title = "Dull Old Event", description = "The best thing to have happened recently.")
self.eventtime = EventTime.objects.create(event = self.event, start = "2007-10-30 16:00:00", finish = "2007-10-30 18:00:00")
self.eventfuture = Event.objects.create(group = self.group, title = "Exciting Upcoming Event", description = "The best thing to have happened recently.")
self.eventfuture.tags.add(self.eventtag)
self.eventfuturetime = EventTime.objects.create(event = self.eventfuture, start = "2009-12-30 16:00:00", finish = "2009-12-30 18:00:00")
# Overview page
def test_overview(self):
''' Not logged in > 200, message about signing in '''
response = self.client.get(reverse('MyPidge.Groups.views.Overview'))
self.assertContains(response, "You must log in to associate yourself with groups.", count=1, status_code=200)
def test_overview_logged(self):
''' Logged in > 200, context with groups '''
response = self.client.post(reverse('MyPidge.Users.views.Login', args=[""]), {'email': self.fergus.email, 'password': 'password'})
response = self.client.get(reverse('MyPidge.Groups.views.Overview'))
self.assertContains(response, "My Python", count=1, status_code=200)
self.assertContains(response,
"Associate yourself with other groups by using the two options on the right.",
count=1,
status_code=200)
# All Groups
def test_groups(self):
''' get > 200 '''
response = self.client.get(reverse('MyPidge.Groups.views.All'))
self.assertContains(response, "Corpus Christi College", count=1, status_code=200)
def test_groups_category(self):
''' get with category > 200 '''
response = self.client.get(reverse('MyPidge.Groups.views.All'), {'category': self.tagone.id})
self.assertNotContains(response, "Corpus Christi College", status_code=200)
self.assertContains(response, "My Python", status_code=200)
def test_groups_query(self):
''' get with query > 200 + Relevant results '''
response = self.client.get(reverse('MyPidge.Groups.views.All'), {'q': "college"})
self.assertContains(response, "Corpus Christi College", count=1, status_code=200)
self.assertNotContains(response, "My Python", status_code=200)
# View Group
def test_group(self):
''' Get existing group > 200 '''
response = self.client.get(reverse('MyPidge.Groups.views.ViewGroup', args=[self.group.id]))
self.assertContains(response, "My Python", status_code=200)
def test_group_false(self):
''' GID not exist > 404 '''
response = self.client.get(reverse('MyPidge.Groups.views.ViewGroup', args=[995756463]))
self.assertEquals(response.status_code, 404)
def test_group_showpast_off(self):
''' View a group without requesting past events '''
response = self.client.get(reverse('MyPidge.Groups.views.ViewGroup', args=[self.group.id]))
self.assertNotContains(response, self.event.title)
self.assertContains(response, self.eventfuture.title)
response = self.client.get(reverse('MyPidge.Groups.views.ViewGroup', args=[self.group.id]), {'showpast': '0'})
self.assertNotContains(response, self.event.title)
self.assertContains(response, self.eventfuture.title)
def test_group_showpast(self):
''' View a group, and show the past events '''
response = self.client.get(reverse('MyPidge.Groups.views.ViewGroup', args=[self.group.id]), {'showpast': '1'})
self.assertContains(response, self.event.title)
self.assertContains(response, self.eventfuture.title)
# TODO Check hidepast functionality
# TODO Show events that have no times
# TODO Show events for the group in order
# TODO Toggling of member / interested state
# TODO Controller shows link to management interface
# View Invitation
def test_view_invite_bad(self):
''' Invitation Key does not exist '''
response = self.client.get(reverse('MyPidge.Groups.views.ViewInvitation', args=["invitebad"]))
self.assertContains(response, "This invitation appears to have been cancelled.", count=1, status_code=200)
def test_view_invite(self):
''' Group controller Invitation Key exists '''
self.inviteone = MembershipInvitation.objects.create(group = self.group, email = "[email protected]", emailsent = True, linkkey = "yeah")
response = self.client.get(reverse('MyPidge.Groups.views.ViewInvitation', args=["yeah"]))
self.assertContains(response, "You have been invited to become a controller of the <b>My Python</b> group", count=1, status_code=200)
# Reject Invitation
def test_reject_invite(self):
''' Reject group control invitation. Linkkey exists. '''
self.inviteone = MembershipInvitation.objects.create(group = self.group, email = "[email protected]", emailsent = True, linkkey = "yeah")
response = self.client.get(reverse('MyPidge.Groups.views.RejectInvitation', args=["yeah"]))
self.assertContains(response, "You have successfully rejected this invitation.", count=1, status_code=200)
self.assertEquals(MembershipInvitation.objects.filter(linkkey = "yeah").count(), 0)
def test_reject_invite_bad(self):
''' Reject group control invitation. Linkkey does not exist. '''
response = self.client.get(reverse('MyPidge.Groups.views.RejectInvitation', args=["yeah"]))
self.assertContains(response, "This invitation appears to have been cancelled.", count=1, status_code=200)
class UserViewTests(TestCase):
def setUp(self):
# Reqd so that PREPEND_WWW in settings doesn't cause all view tests to fail
self.host = "www.testhost"
self.client = Client(HTTP_HOST = self.host)
self.service = Service.objects.create(name = "Servicey")
self.category = GroupCategory.objects.create(name = "NewCat", small_name = "NC", all_can_create = True)
self.tagone = GroupTag.objects.create(name = "TagEinz", description = "Here's the first.")
self.tagtwo = GroupTag.objects.create(name = "SPAM", description = "The tanned variety.")
self.group = Group.objects.create(official_name = "My Python", category = self.category)
self.group.tags.add(self.tagone)
self.fergus = User.objects.create(firstname="Fergus", lastname="Ferrier", email="[email protected]", password = "blah")
self.fergus.UpdatePassword("password")
self.fergus.GiveDefaults()
self.fergusmember = Membership.objects.create(person = self.fergus, group = self.group, assoctype="MEM")
self.client.post(reverse('MyPidge.Users.views.Login', args=[""]), {'email': self.fergus.email, 'password': "password"})
# Registration
def test_register(self):
''' Page renders '''
response = self.client.get(reverse('MyPidge.Groups.views.RegisterGroup'))
self.assertEquals(response.status_code, 200)
def test_register_duplicate(self):
''' Duplicate official_name > message on page '''
response = self.client.post(reverse('MyPidge.Groups.views.RegisterGroup'), {'official_name': 'My Python', 'category': self.category.id, 'tags': (self.tagone.id,)})
self.assertContains(response, "Group with this Official name already exists.", count=1, status_code=200)
def test_register_okay(self):
''' Good submission > 302 to /Group/Register '''
response = self.client.post(reverse('MyPidge.Groups.views.RegisterGroup'), {'official_name': 'ThePidge', 'category': self.category.id, 'tags': (self.tagone.id,)})
newgroup = Group.objects.get(official_name = 'ThePidge')
self.assertRedirects(response, reverse('MyPidge.Groups.views.RegisterGroupSuccess', args=[newgroup.id]), host = self.host)
def test_register_invalid(self):
''' Invalid form renders okay '''
response = self.client.post(reverse('MyPidge.Groups.views.RegisterGroup'), {'official_name': 'Myuuuu Python'})
self.assertEquals(response.status_code, 200)
# Registration Group Success Page
def test_register_success(self):
''' Renders okay '''
response = self.client.get(reverse('MyPidge.Groups.views.RegisterGroupSuccess', args=[2]))
self.assertEquals(response.status_code, 200)
# Accept Invitation
def test_accept_invite(self):
''' Accepting a group control invitation. '''
self.inviteone = MembershipInvitation.objects.create(group = self.group, email = "[email protected]", emailsent = True, linkkey = "yeah")
response = self.client.get(reverse('MyPidge.Groups.views.AcceptInvitation', args=["yeah"]))
self.assertContains(response, "You have successfully accepted the invitation to control the My Python group.", count=1, status_code=200)
self.assertEquals(MembershipInvitation.objects.filter(linkkey = "yeah").count(), 0)
self.assertEquals(Membership.objects.filter(person = self.fergus, group = self.group, assoctype="CTL").count(), 1)
def test_accept_invite_duplicate(self):
''' Accepting a group control invitation but already a controller. '''
self.inviteone = MembershipInvitation.objects.create(group = self.group, email = "[email protected]", emailsent = True, linkkey = "yeah")
self.memship = Membership.objects.create(person = self.fergus, group = self.group, assoctype = "CTL")
response = self.client.get(reverse('MyPidge.Groups.views.AcceptInvitation', args=["yeah"]))
self.assertContains(response, "You are already a controller of the My Python group.", count=1, status_code=200)
self.assertEquals(MembershipInvitation.objects.filter(linkkey = "yeah").count(), 1)
self.assertEquals(Membership.objects.filter(person = self.fergus, group = self.group, assoctype="CTL").count(), 1)
def test_accept_invite_bad(self):
''' Accepting a group control invitation that does not exist. '''
response = self.client.get(reverse('MyPidge.Groups.views.AcceptInvitation', args=["noexist"]))
self.assertContains(response, "This invitation appears to have been cancelled.", count=1, status_code=200)
self.assertEquals(Membership.objects.filter(person = self.fergus, group = self.group, assoctype="CTL").count(), 0)
# Claim Control
def test_claim_control(self):
''' View group claim control page. '''
response = self.client.get(reverse('MyPidge.Groups.views.ClaimControl', args=[self.group.id]))
self.assertContains(response, "You must provide a brief justification as to why you should be a controller of this group.", count=1, status_code=200)
def test_claim_control_bad(self):
''' View non-existant group's claim control page. '''
response = self.client.get(reverse('MyPidge.Groups.views.ClaimControl', args=[4444444]))
self.assertEquals(response.status_code, 404)
def test_claim_control_already(self):
''' Claim control when already a controller of a group. '''
assoc = Membership.objects.create(person = self.fergus, group = self.group, assoctype = "CTL")
response = self.client.get(reverse('MyPidge.Groups.views.ClaimControl', args=[self.group.id]))
self.assertRedirects(response, reverse('MyPidge.Groups.views.Manage', args=[self.group.id]), host = self.host)
def test_claim_control_send(self):
''' Submit group control claim. '''
response = self.client.post(reverse('MyPidge.Groups.views.ClaimControl', args=[self.group.id]), {'reason': 'I am like the president, innit.'})
self.assertContains(response, "Your claim has been sent.", count=1, status_code=200)
self.assertEquals(MembershipClaim.objects.filter(user = self.fergus, group = self.group).count(), 1)
self.assertEquals(len(mail.outbox), 1)
def test_claim_control_again(self):
''' Submit group control claim when user has done so before. '''
response = self.client.post(reverse('MyPidge.Groups.views.ClaimControl', args=[self.group.id]), {'reason': 'I am like the committee member, innit.'})
response = self.client.post(reverse('MyPidge.Groups.views.ClaimControl', args=[self.group.id]), {'reason': 'I am like the president, innit.'})
self.assertContains(response, "Your claim has been sent.", count=1, status_code=200)
self.assertEquals(MembershipClaim.objects.filter(user = self.fergus, group = self.group).count(), 1)
self.assertEquals(len(mail.outbox), 1)
| fergusrossferrier/mypidge.com | MyPidge/Groups/tests/views.py | Python | agpl-3.0 | 14,355 | [
"exciting"
] | 151eeccbe8b3db79ef8632d52ae006332fd3b3f673ae7594d915dfcb15eb9bb4 |
# #
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Easyconfig module that contains the EasyConfig class.
:author: Stijn De Weirdt (Ghent University)
:author: Dries Verdegem (Ghent University)
:author: Kenneth Hoste (Ghent University)
:author: Pieter De Baets (Ghent University)
:author: Jens Timmerman (Ghent University)
:author: Toon Willems (Ghent University)
:author: Ward Poelmans (Ghent University)
:author: Alan O'Cais (Juelich Supercomputing Centre)
:author: Bart Oldeman (McGill University, Calcul Quebec, Compute Canada)
:author: Maxime Boissonneault (Universite Laval, Calcul Quebec, Compute Canada)
:author: Victor Holanda (CSCS, ETH Zurich)
"""
import copy
import difflib
import functools
import os
import re
from distutils.version import LooseVersion
from contextlib import contextmanager
import easybuild.tools.filetools as filetools
from easybuild.base import fancylogger
from easybuild.framework.easyconfig import MANDATORY
from easybuild.framework.easyconfig.constants import EXTERNAL_MODULE_MARKER
from easybuild.framework.easyconfig.default import DEFAULT_CONFIG
from easybuild.framework.easyconfig.format.convert import Dependency
from easybuild.framework.easyconfig.format.format import DEPENDENCY_PARAMETERS
from easybuild.framework.easyconfig.format.one import EB_FORMAT_EXTENSION, retrieve_blocks_in_spec
from easybuild.framework.easyconfig.format.yeb import YEB_FORMAT_EXTENSION, is_yeb_format
from easybuild.framework.easyconfig.licenses import EASYCONFIG_LICENSES_DICT
from easybuild.framework.easyconfig.parser import DEPRECATED_PARAMETERS, REPLACED_PARAMETERS
from easybuild.framework.easyconfig.parser import EasyConfigParser, fetch_parameters_from_easyconfig
from easybuild.framework.easyconfig.templates import TEMPLATE_CONSTANTS, template_constant_dict
from easybuild.tools.build_log import EasyBuildError, print_warning, print_msg
from easybuild.tools.config import GENERIC_EASYBLOCK_PKG, LOCAL_VAR_NAMING_CHECK_ERROR, LOCAL_VAR_NAMING_CHECK_LOG
from easybuild.tools.config import LOCAL_VAR_NAMING_CHECK_WARN
from easybuild.tools.config import Singleton, build_option, get_module_naming_scheme
from easybuild.tools.filetools import convert_name, copy_file, create_index, decode_class_name, encode_class_name
from easybuild.tools.filetools import find_backup_name_candidate, find_easyconfigs, load_index
from easybuild.tools.filetools import read_file, write_file
from easybuild.tools.hooks import PARSE, load_hooks, run_hook
from easybuild.tools.module_naming_scheme.mns import DEVEL_MODULE_SUFFIX
from easybuild.tools.module_naming_scheme.utilities import avail_module_naming_schemes, det_full_ec_version
from easybuild.tools.module_naming_scheme.utilities import det_hidden_modname, is_valid_module_name
from easybuild.tools.modules import modules_tool
from easybuild.tools.py2vs3 import OrderedDict, create_base_metaclass, string_type
from easybuild.tools.systemtools import check_os_dependency, pick_dep_version
from easybuild.tools.toolchain.toolchain import SYSTEM_TOOLCHAIN_NAME, is_system_toolchain
from easybuild.tools.toolchain.toolchain import TOOLCHAIN_CAPABILITIES, TOOLCHAIN_CAPABILITY_CUDA
from easybuild.tools.toolchain.utilities import get_toolchain, search_toolchain
from easybuild.tools.utilities import flatten, get_class_for, nub, quote_py_str, remove_unwanted_chars
from easybuild.tools.version import VERSION
from easybuild.toolchains.compiler.cuda import Cuda
_log = fancylogger.getLogger('easyconfig.easyconfig', fname=False)
# add license here to make it really MANDATORY (remove comment in default)
MANDATORY_PARAMS = ['name', 'version', 'homepage', 'description', 'toolchain']
# set of configure/build/install options that can be provided as lists for an iterated build
ITERATE_OPTIONS = ['builddependencies',
'preconfigopts', 'configopts', 'prebuildopts', 'buildopts', 'preinstallopts', 'installopts']
# name of easyconfigs archive subdirectory
EASYCONFIGS_ARCHIVE_DIR = '__archive__'
# prefix for names of local variables in easyconfig files
LOCAL_VAR_PREFIX = 'local_'
try:
import autopep8
HAVE_AUTOPEP8 = True
except ImportError as err:
_log.warning("Failed to import autopep8, dumping easyconfigs with reformatting enabled will not work: %s", err)
HAVE_AUTOPEP8 = False
_easyconfig_files_cache = {}
_easyconfigs_cache = {}
_path_indexes = {}
def handle_deprecated_or_replaced_easyconfig_parameters(ec_method):
"""Decorator to handle deprecated/replaced easyconfig parameters."""
def new_ec_method(self, key, *args, **kwargs):
"""Check whether any replace easyconfig parameters are still used"""
# map deprecated parameters to their replacements, issue deprecation warning(/error)
if key in DEPRECATED_PARAMETERS:
depr_key = key
key, ver = DEPRECATED_PARAMETERS[depr_key]
_log.deprecated("Easyconfig parameter '%s' is deprecated, use '%s' instead." % (depr_key, key), ver)
if key in REPLACED_PARAMETERS:
_log.nosupport("Easyconfig parameter '%s' is replaced by '%s'" % (key, REPLACED_PARAMETERS[key]), '2.0')
return ec_method(self, key, *args, **kwargs)
return new_ec_method
def is_local_var_name(name):
"""
Determine whether provided variable name can be considered as the name of a local variable:
One of the following suffices to be considered a name of a local variable:
* name starts with 'local_' or '_'
* name consists of a single letter
* name is __builtins__ (which is always defined)
"""
res = False
if name.startswith(LOCAL_VAR_PREFIX) or name.startswith('_'):
res = True
# __builtins__ is always defined as a 'local' variables
# single-letter local variable names are allowed (mainly for use in list comprehensions)
# in Python 2, variables defined in list comprehensions leak to the outside (no longer the case in Python 3)
elif name in ['__builtins__']:
res = True
# single letters are acceptable names for local variables
elif re.match('^[a-zA-Z]$', name):
res = True
return res
def triage_easyconfig_params(variables, ec):
"""
Triage supplied variables into known easyconfig parameters and other variables.
Unknown easyconfig parameters that have a single-letter name, or of which the name starts with 'local_'
are considered to be local variables.
:param variables: dictionary with names/values of variables that should be triaged
:param ec: dictionary with set of known easyconfig parameters
:return: 2-tuple with dict of names/values for known easyconfig parameters + unknown (non-local) variables
"""
# first make sure that none of the known easyconfig parameters have a name that makes it look like a local variable
wrong_params = []
for key in ec:
if is_local_var_name(key):
wrong_params.append(key)
if wrong_params:
raise EasyBuildError("Found %d easyconfig parameters that are considered local variables: %s",
len(wrong_params), ', '.join(sorted(wrong_params)))
ec_params, unknown_keys = {}, []
for key in variables:
# validations are skipped, just set in the config
if key in ec:
ec_params[key] = variables[key]
_log.debug("setting config option %s: value %s (type: %s)", key, ec_params[key], type(ec_params[key]))
elif key in REPLACED_PARAMETERS:
_log.nosupport("Easyconfig parameter '%s' is replaced by '%s'" % (key, REPLACED_PARAMETERS[key]), '2.0')
# anything else is considered to be a local variable in the easyconfig file;
# to catch mistakes (using unknown easyconfig parameters),
# and to protect against using a local variable name that may later become a known easyconfig parameter,
# we require that non-single letter names of local variables start with 'local_'
elif is_local_var_name(key):
_log.debug("Ignoring local variable '%s' (value: %s)", key, variables[key])
else:
unknown_keys.append(key)
return ec_params, unknown_keys
def toolchain_hierarchy_cache(func):
"""Function decorator to cache (and retrieve cached) toolchain hierarchy queries."""
cache = {}
@functools.wraps(func)
def cache_aware_func(toolchain, incl_capabilities=False):
"""Look up toolchain hierarchy in cache first, determine and cache it if not available yet."""
cache_key = (toolchain['name'], toolchain['version'], incl_capabilities)
# fetch from cache if available, cache it if it's not
if cache_key in cache:
_log.debug("Using cache to return hierarchy for toolchain %s: %s", str(toolchain), cache[cache_key])
return cache[cache_key]
else:
toolchain_hierarchy = func(toolchain, incl_capabilities)
cache[cache_key] = toolchain_hierarchy
return cache[cache_key]
# Expose clear method of cache to wrapped function
cache_aware_func.clear = cache.clear
return cache_aware_func
def det_subtoolchain_version(current_tc, subtoolchain_name, optional_toolchains, cands, incl_capabilities=False):
"""
Returns unique version for subtoolchain, in tc dict.
If there is no unique version:
* use '' for system, if system is not skipped.
* return None for skipped subtoolchains, that is,
optional toolchains or system toolchain without add_system_to_minimal_toolchains.
* in all other cases, raises an exception.
"""
uniq_subtc_versions = set([subtc['version'] for subtc in cands if subtc['name'] == subtoolchain_name])
# init with "skipped"
subtoolchain_version = None
# system toolchain: bottom of the hierarchy
if is_system_toolchain(subtoolchain_name):
add_system_to_minimal_toolchains = build_option('add_system_to_minimal_toolchains')
if not add_system_to_minimal_toolchains and build_option('add_dummy_to_minimal_toolchains'):
depr_msg = "Use --add-system-to-minimal-toolchains instead of --add-dummy-to-minimal-toolchains"
_log.deprecated(depr_msg, '5.0')
add_system_to_minimal_toolchains = True
if add_system_to_minimal_toolchains and not incl_capabilities:
subtoolchain_version = ''
elif len(uniq_subtc_versions) == 1:
subtoolchain_version = list(uniq_subtc_versions)[0]
elif len(uniq_subtc_versions) == 0:
if subtoolchain_name not in optional_toolchains:
# raise error if the subtoolchain considered now is not optional
raise EasyBuildError("No version found for subtoolchain %s in dependencies of %s",
subtoolchain_name, current_tc['name'])
else:
raise EasyBuildError("Multiple versions of %s found in dependencies of toolchain %s: %s",
subtoolchain_name, current_tc['name'], ', '.join(sorted(uniq_subtc_versions)))
return subtoolchain_version
@toolchain_hierarchy_cache
def get_toolchain_hierarchy(parent_toolchain, incl_capabilities=False):
r"""
Determine list of subtoolchains for specified parent toolchain.
Result starts with the most minimal subtoolchains first, ends with specified toolchain.
The system toolchain is considered the most minimal subtoolchain only if the add_system_to_minimal_toolchains
build option is enabled.
The most complex hierarchy we have now is goolfc which works as follows:
goolfc
/ \
gompic golfc(*)
\ / \ (*) optional toolchains, not compulsory for backwards compatibility
gcccuda golf(*)
\ /
GCC
/ |
GCCcore(*) |
\ |
(system: only considered if --add-system-to-minimal-toolchains configuration option is enabled)
:param parent_toolchain: dictionary with name/version of parent toolchain
:param incl_capabilities: also register toolchain capabilities in result
"""
# obtain list of all possible subtoolchains
_, all_tc_classes = search_toolchain('')
subtoolchains = dict((tc_class.NAME, getattr(tc_class, 'SUBTOOLCHAIN', None)) for tc_class in all_tc_classes)
optional_toolchains = set(tc_class.NAME for tc_class in all_tc_classes if getattr(tc_class, 'OPTIONAL', False))
composite_toolchains = set(tc_class.NAME for tc_class in all_tc_classes if len(tc_class.__bases__) > 1)
# the parent toolchain is at the top of the hierarchy,
# we need a copy so that adding capabilities (below) doesn't affect the original object
toolchain_hierarchy = [copy.copy(parent_toolchain)]
# use a queue to handle a breadth-first-search of the hierarchy,
# which is required to take into account the potential for multiple subtoolchains
bfs_queue = [parent_toolchain]
visited = set()
while bfs_queue:
current_tc = bfs_queue.pop()
current_tc_name, current_tc_version = current_tc['name'], current_tc['version']
subtoolchain_names = subtoolchains[current_tc_name]
# if current toolchain has no subtoolchains, consider next toolchain in queue
if subtoolchain_names is None:
continue
# make sure we always have a list of subtoolchains, even if there's only one
if not isinstance(subtoolchain_names, list):
subtoolchain_names = [subtoolchain_names]
# grab the easyconfig of the current toolchain and search the dependencies for a version of the subtoolchain
path = robot_find_easyconfig(current_tc_name, current_tc_version)
if path is None:
raise EasyBuildError("Could not find easyconfig for %s toolchain version %s",
current_tc_name, current_tc_version)
# parse the easyconfig
parsed_ec = process_easyconfig(path, validate=False)[0]
# search for version of the subtoolchain in dependencies
# considers deps + toolchains of deps + deps of deps + toolchains of deps of deps
# consider both version and versionsuffix for dependencies
cands = []
for dep in parsed_ec['ec'].dependencies():
# skip dependencies that are marked as external modules
if dep['external_module']:
continue
# include dep and toolchain of dep as candidates
cands.extend([
{'name': dep['name'], 'version': dep['version'] + dep['versionsuffix']},
dep['toolchain'],
])
# find easyconfig file for this dep and parse it
ecfile = robot_find_easyconfig(dep['name'], det_full_ec_version(dep))
if ecfile is None:
raise EasyBuildError("Could not find easyconfig for dependency %s with version %s",
dep['name'], det_full_ec_version(dep))
easyconfig = process_easyconfig(ecfile, validate=False)[0]['ec']
# include deps and toolchains of deps of this dep, but skip dependencies marked as external modules
for depdep in easyconfig.dependencies():
if depdep['external_module']:
continue
cands.append({'name': depdep['name'], 'version': depdep['version'] + depdep['versionsuffix']})
cands.append(depdep['toolchain'])
for dep in subtoolchain_names:
# try to find subtoolchains with the same version as the parent
# only do this for composite toolchains, not single-compiler toolchains, whose
# versions match those of the component instead of being e.g. "2018a".
if dep in composite_toolchains:
ecfile = robot_find_easyconfig(dep, current_tc_version)
if ecfile is not None:
cands.append({'name': dep, 'version': current_tc_version})
# only retain candidates that match subtoolchain names
cands = [c for c in cands if c['name'] in subtoolchain_names]
for subtoolchain_name in subtoolchain_names:
subtoolchain_version = det_subtoolchain_version(current_tc, subtoolchain_name, optional_toolchains, cands,
incl_capabilities=incl_capabilities)
# add to hierarchy and move to next
if subtoolchain_version is not None and subtoolchain_name not in visited:
tc = {'name': subtoolchain_name, 'version': subtoolchain_version}
toolchain_hierarchy.insert(0, tc)
bfs_queue.insert(0, tc)
visited.add(subtoolchain_name)
# also add toolchain capabilities
if incl_capabilities:
for toolchain in toolchain_hierarchy:
toolchain_class, _ = search_toolchain(toolchain['name'])
tc = toolchain_class(version=toolchain['version'])
for capability in TOOLCHAIN_CAPABILITIES:
# cuda is the special case which doesn't have a family attribute
if capability == TOOLCHAIN_CAPABILITY_CUDA:
# use None rather than False, useful to have it consistent with the rest
toolchain[capability] = isinstance(tc, Cuda) or None
elif hasattr(tc, capability):
toolchain[capability] = getattr(tc, capability)()
_log.info("Found toolchain hierarchy for toolchain %s: %s", parent_toolchain, toolchain_hierarchy)
return toolchain_hierarchy
@contextmanager
def disable_templating(ec):
"""Temporarily disable templating on the given EasyConfig
Usage:
with disable_templating(ec):
# Do what you want without templating
# Templating set to previous value
"""
old_enable_templating = ec.enable_templating
ec.enable_templating = False
try:
yield old_enable_templating
finally:
ec.enable_templating = old_enable_templating
class EasyConfig(object):
"""
Class which handles loading, reading, validation of easyconfigs
"""
def __init__(self, path, extra_options=None, build_specs=None, validate=True, hidden=None, rawtxt=None,
auto_convert_value_types=True, local_var_naming_check=None):
"""
initialize an easyconfig.
:param path: path to easyconfig file to be parsed (ignored if rawtxt is specified)
:param extra_options: dictionary with extra variables that can be set for this specific instance
:param build_specs: dictionary of build specifications (see EasyConfig class, default: {})
:param validate: indicates whether validation should be performed (note: combined with 'validate' build option)
:param hidden: indicate whether corresponding module file should be installed hidden ('.'-prefixed)
:param rawtxt: raw contents of easyconfig file
:param auto_convert_value_types: indicates wether types of easyconfig values should be automatically converted
in case they are wrong
:param local_var_naming_check: mode to use when checking if local variables use the recommended naming scheme
"""
self.template_values = None
self.enable_templating = True # a boolean to control templating
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
if path is not None and not os.path.isfile(path):
raise EasyBuildError("EasyConfig __init__ expected a valid path")
# read easyconfig file contents (or use provided rawtxt), so it can be passed down to avoid multiple re-reads
self.path = None
if rawtxt is None:
self.path = path
self.rawtxt = read_file(path)
self.log.debug("Raw contents from supplied easyconfig file %s: %s", path, self.rawtxt)
else:
self.rawtxt = rawtxt
self.log.debug("Supplied raw easyconfig contents: %s" % self.rawtxt)
# constructing easyconfig parser object includes a "raw" parse,
# which serves as a check to see whether supplied easyconfig file is an actual easyconfig...
self.log.info("Performing quick parse to check for valid easyconfig file...")
self.parser = EasyConfigParser(filename=self.path, rawcontent=self.rawtxt,
auto_convert_value_types=auto_convert_value_types)
self.modules_tool = modules_tool()
# use legacy module classes as default
self.valid_module_classes = build_option('valid_module_classes')
if self.valid_module_classes is not None:
self.log.info("Obtained list of valid module classes: %s" % self.valid_module_classes)
self._config = copy.deepcopy(DEFAULT_CONFIG)
# obtain name and easyblock specifications from raw easyconfig contents
self.software_name, self.easyblock = fetch_parameters_from_easyconfig(self.rawtxt, ['name', 'easyblock'])
# determine line of extra easyconfig parameters
if extra_options is None:
easyblock_class = get_easyblock_class(self.easyblock, name=self.software_name)
self.extra_options = easyblock_class.extra_options()
else:
self.extra_options = extra_options
if not isinstance(self.extra_options, dict):
tup = (type(self.extra_options), self.extra_options)
self.log.nosupport("extra_options return value should be of type 'dict', found '%s': %s" % tup, '2.0')
self.mandatory = MANDATORY_PARAMS[:]
# deep copy to make sure self.extra_options remains unchanged
self.extend_params(copy.deepcopy(self.extra_options))
# set valid stops
self.valid_stops = build_option('valid_stops')
self.log.debug("List of valid stops obtained: %s" % self.valid_stops)
# store toolchain
self._toolchain = None
self.validations = {
'moduleclass': self.valid_module_classes,
'stop': self.valid_stops,
}
self.external_modules_metadata = build_option('external_modules_metadata')
# list of all options to iterate over
self.iterate_options = []
self.iterating = False
# parse easyconfig file
self.build_specs = build_specs
self.parse()
self.local_var_naming(local_var_naming_check)
# check whether this easyconfig file is deprecated, and act accordingly if so
self.check_deprecated(self.path)
# perform validations
self.validation = build_option('validate') and validate
if self.validation:
self.validate(check_osdeps=build_option('check_osdeps'))
# filter hidden dependencies from list of dependencies
self.filter_hidden_deps()
self._all_dependencies = None
# keep track of whether the generated module file should be hidden
if hidden is None:
hidden = self['hidden'] or build_option('hidden')
self.hidden = hidden
# set installdir/module info
mns = ActiveMNS()
self.full_mod_name = mns.det_full_module_name(self)
self.short_mod_name = mns.det_short_module_name(self)
self.mod_subdir = mns.det_module_subdir(self)
self.set_default_module = False
self.software_license = None
def filename(self):
"""Determine correct filename for this easyconfig file."""
if is_yeb_format(self.path, self.rawtxt):
ext = YEB_FORMAT_EXTENSION
else:
ext = EB_FORMAT_EXTENSION
return '%s-%s%s' % (self.name, det_full_ec_version(self), ext)
def extend_params(self, extra, overwrite=True):
"""Extend list of known parameters via provided list of extra easyconfig parameters."""
self.log.debug("Extending list of known easyconfig parameters with: %s", ' '.join(extra.keys()))
if overwrite:
self._config.update(extra)
else:
for key in extra:
if key not in self._config:
self._config[key] = extra[key]
self.log.debug("Added new easyconfig parameter: %s", key)
else:
self.log.debug("Easyconfig parameter %s already known, not overwriting", key)
# extend mandatory keys
for key, value in extra.items():
if value[2] == MANDATORY:
self.mandatory.append(key)
self.log.debug("Updated list of mandatory easyconfig parameters: %s", self.mandatory)
def copy(self, validate=None):
"""
Return a copy of this EasyConfig instance.
"""
if validate is None:
validate = self.validation
# create a new EasyConfig instance
ec = EasyConfig(self.path, validate=validate, hidden=self.hidden, rawtxt=self.rawtxt)
# take a copy of the actual config dictionary (which already contains the extra options)
ec._config = copy.deepcopy(self._config)
# since rawtxt is defined, self.path may not get inherited, make sure it does
if self.path:
ec.path = self.path
return ec
def update(self, key, value, allow_duplicate=True):
"""
Update an easyconfig parameter with the specified value (i.e. append to it).
Note: For dictionary easyconfig parameters, 'allow_duplicate' is ignored (since it's meaningless).
"""
if isinstance(value, string_type):
inval = [value]
elif isinstance(value, (list, dict, tuple)):
inval = value
else:
msg = "Can't update configuration value for %s, because the attempted"
msg += " update value, '%s', is not a string, list, tuple or dictionary."
raise EasyBuildError(msg, key, value)
# For easyconfig parameters that are dictionaries, input value must also be a dictionary
if isinstance(self[key], dict) and not isinstance(value, dict):
msg = "Can't update configuration value for %s, because the attempted"
msg += "update value (%s), is not a dictionary (type: %s)."
raise EasyBuildError(msg, key, value, type(value))
# Grab current parameter value so we can modify it
param_value = copy.deepcopy(self[key])
if isinstance(param_value, string_type):
for item in inval:
# re.search: only add value to string if it's not there yet (surrounded by whitespace)
if allow_duplicate or (not re.search(r'(^|\s+)%s(\s+|$)' % re.escape(item), param_value)):
param_value = param_value + ' %s ' % item
elif isinstance(param_value, (list, tuple)):
# make sure we have a list value so we can just append to it
param_value = list(param_value)
for item in inval:
if allow_duplicate or item not in param_value:
param_value.append(item)
# cast back to tuple if original value was a tuple
if isinstance(self[key], tuple):
param_value = tuple(param_value)
elif isinstance(param_value, dict):
param_value.update(inval)
else:
msg = "Can't update configuration value for %s, because it's not a string, list, tuple or dictionary."
raise EasyBuildError(msg, key)
# Overwrite easyconfig parameter value with updated value, preserving type
self[key] = param_value
def set_keys(self, params):
"""
Set keys in this EasyConfig instance based on supplied easyconfig parameter values.
If any unknown easyconfig parameters are encountered here, an error is raised.
:param params: a dict value with names/values of easyconfig parameters to set
"""
# disable templating when setting easyconfig parameters
# required to avoid problems with values that need more parsing to be done (e.g. dependencies)
with disable_templating(self):
for key in sorted(params.keys()):
# validations are skipped, just set in the config
if key in self._config.keys():
self[key] = params[key]
self.log.info("setting easyconfig parameter %s: value %s (type: %s)",
key, self[key], type(self[key]))
else:
raise EasyBuildError("Unknown easyconfig parameter: %s (value '%s')", key, params[key])
def parse(self):
"""
Parse the file and set options
mandatory requirements are checked here
"""
if self.build_specs is None:
arg_specs = {}
elif isinstance(self.build_specs, dict):
# build a new dictionary with only the expected keys, to pass as named arguments to get_config_dict()
arg_specs = self.build_specs
else:
raise EasyBuildError("Specifications should be specified using a dictionary, got %s",
type(self.build_specs))
self.log.debug("Obtained specs dict %s" % arg_specs)
self.log.info("Parsing easyconfig file %s with rawcontent: %s", self.path, self.rawtxt)
self.parser.set_specifications(arg_specs)
local_vars = self.parser.get_config_dict()
self.log.debug("Parsed easyconfig as a dictionary: %s" % local_vars)
# make sure all mandatory parameters are defined
# this includes both generic mandatory parameters and software-specific parameters defined via extra_options
missing_mandatory_keys = [key for key in self.mandatory if key not in local_vars]
if missing_mandatory_keys:
raise EasyBuildError("mandatory parameters not provided in %s: %s", self.path, missing_mandatory_keys)
# provide suggestions for typos
possible_typos = [(key, difflib.get_close_matches(key.lower(), self._config.keys(), 1, 0.85))
for key in local_vars if key not in self]
typos = [(key, guesses[0]) for (key, guesses) in possible_typos if len(guesses) == 1]
if typos:
raise EasyBuildError("You may have some typos in your easyconfig file: %s",
', '.join(["%s -> %s" % typo for typo in typos]))
# set keys in current EasyConfig instance based on dict obtained by parsing easyconfig file
known_ec_params, self.unknown_keys = triage_easyconfig_params(local_vars, self._config)
self.set_keys(known_ec_params)
# templating is disabled when parse_hook is called to allow for easy updating of mutable easyconfig parameters
# (see also comment in resolve_template)
with disable_templating(self):
# if any lists of dependency versions are specified over which we should iterate,
# deal with them now, before calling parse hook, parsing of dependencies & iterative easyconfig parameters
self.handle_multi_deps()
parse_hook_msg = None
if self.path:
parse_hook_msg = "Running %s hook for %s..." % (PARSE, os.path.basename(self.path))
# trigger parse hook
hooks = load_hooks(build_option('hooks'))
run_hook(PARSE, hooks, args=[self], msg=parse_hook_msg)
# parse dependency specifications
# it's important that templating is still disabled at this stage!
self.log.info("Parsing dependency specifications...")
self['dependencies'] = [self._parse_dependency(dep) for dep in self['dependencies']]
self['hiddendependencies'] = [
self._parse_dependency(dep, hidden=True) for dep in self['hiddendependencies']
]
# need to take into account that builddependencies may need to be iterated over,
# i.e. when the value is a list of lists of tuples
builddeps = self['builddependencies']
if builddeps and all(isinstance(x, (list, tuple)) for b in builddeps for x in b):
self.iterate_options.append('builddependencies')
builddeps = [[self._parse_dependency(dep, build_only=True) for dep in x] for x in builddeps]
else:
builddeps = [self._parse_dependency(dep, build_only=True) for dep in builddeps]
self['builddependencies'] = builddeps
# keep track of parsed multi deps, they'll come in handy during sanity check & module steps...
self.multi_deps = self.get_parsed_multi_deps()
# update templating dictionary
self.generate_template_values()
# finalize dependencies w.r.t. minimal toolchains & module names
self._finalize_dependencies()
# indicate that this is a parsed easyconfig
self._config['parsed'] = [True, "This is a parsed easyconfig", "HIDDEN"]
def local_var_naming(self, local_var_naming_check):
"""Deal with local variables that do not follow the recommended naming scheme (if any)."""
if local_var_naming_check is None:
local_var_naming_check = build_option('local_var_naming_check')
if self.unknown_keys:
cnt = len(self.unknown_keys)
if self.path:
in_fn = "in %s" % os.path.basename(self.path)
else:
in_fn = ''
unknown_keys_msg = ', '.join(sorted(self.unknown_keys))
msg = "Use of %d unknown easyconfig parameters detected %s: %s\n" % (cnt, in_fn, unknown_keys_msg)
msg += "If these are just local variables please rename them to start with '%s', " % LOCAL_VAR_PREFIX
msg += "or try using --fix-deprecated-easyconfigs to do this automatically.\nFor more information, see "
msg += "https://easybuild.readthedocs.io/en/latest/Easyconfig-files-local-variables.html ."
# always log a warning if local variable that don't follow recommended naming scheme are found
self.log.warning(msg)
if local_var_naming_check == LOCAL_VAR_NAMING_CHECK_ERROR:
raise EasyBuildError(msg)
elif local_var_naming_check == LOCAL_VAR_NAMING_CHECK_WARN:
print_warning(msg, silent=build_option('silent'))
elif local_var_naming_check != LOCAL_VAR_NAMING_CHECK_LOG:
raise EasyBuildError("Unknown mode for checking local variable names: %s", local_var_naming_check)
def check_deprecated(self, path):
"""Check whether this easyconfig file is deprecated."""
depr_msgs = []
deprecated = self['deprecated']
if deprecated:
if isinstance(deprecated, string_type):
depr_msgs.append("easyconfig file '%s' is marked as deprecated:\n%s\n" % (path, deprecated))
else:
raise EasyBuildError("Wrong type for value of 'deprecated' easyconfig parameter: %s", type(deprecated))
if self.toolchain.is_deprecated():
depr_msgs.append("toolchain '%(name)s/%(version)s' is marked as deprecated" % self['toolchain'])
if depr_msgs:
depr_msg = ', '.join(depr_msgs)
depr_maj_ver = int(str(VERSION).split('.')[0]) + 1
depr_ver = '%s.0' % depr_maj_ver
more_info_depr_ec = " (see also http://easybuild.readthedocs.org/en/latest/Deprecated-easyconfigs.html)"
self.log.deprecated(depr_msg, depr_ver, more_info=more_info_depr_ec, silent=build_option('silent'))
def validate(self, check_osdeps=True):
"""
Validate this easyonfig
- ensure certain easyconfig parameters are set to a known value (see self.validations)
- check OS dependencies
- check license
"""
self.log.info("Validating easyconfig")
for attr in self.validations:
self._validate(attr, self.validations[attr])
if check_osdeps:
self.log.info("Checking OS dependencies")
self.validate_os_deps()
else:
self.log.info("Not checking OS dependencies")
self.log.info("Checking skipsteps")
if not isinstance(self._config['skipsteps'][0], (list, tuple,)):
raise EasyBuildError('Invalid type for skipsteps. Allowed are list or tuple, got %s (%s)',
type(self._config['skipsteps'][0]), self._config['skipsteps'][0])
self.log.info("Checking build option lists")
self.validate_iterate_opts_lists()
self.log.info("Checking licenses")
self.validate_license()
def validate_license(self):
"""Validate the license"""
lic = self['software_license']
if lic is None:
# when mandatory, remove this possibility
if 'software_license' in self.mandatory:
raise EasyBuildError("Software license is mandatory, but 'software_license' is undefined")
elif lic in EASYCONFIG_LICENSES_DICT:
# create License instance
self.software_license = EASYCONFIG_LICENSES_DICT[lic]()
else:
known_licenses = ', '.join(sorted(EASYCONFIG_LICENSES_DICT.keys()))
raise EasyBuildError("Invalid license %s (known licenses: %s)", lic, known_licenses)
# TODO, when GROUP_SOURCE and/or GROUP_BINARY is True
# check the owner of source / binary (must match 'group' parameter from easyconfig)
return True
def validate_os_deps(self):
"""
validate presence of OS dependencies
osdependencies should be a single list
"""
not_found = []
for dep in self['osdependencies']:
# make sure we have a tuple
if isinstance(dep, string_type):
dep = (dep,)
elif not isinstance(dep, tuple):
raise EasyBuildError("Non-tuple value type for OS dependency specification: %s (type %s)",
dep, type(dep))
if not any([check_os_dependency(cand_dep) for cand_dep in dep]):
not_found.append(dep)
if not_found:
raise EasyBuildError("One or more OS dependencies were not found: %s", not_found)
else:
self.log.info("OS dependencies ok: %s" % self['osdependencies'])
return True
def validate_iterate_opts_lists(self):
"""
Configure/build/install options specified as lists should have same length.
"""
# configure/build/install options may be lists, in case of an iterated build
# when lists are used, they should be all of same length
# list of length 1 are treated as if it were strings in EasyBlock
opt_counts = []
for opt in ITERATE_OPTIONS:
# only when builddependencies is a list of lists are we iterating over them
if opt == 'builddependencies' and not all(isinstance(e, list) for e in self.get_ref(opt)):
continue
opt_value = self.get(opt, None, resolve=False)
# anticipate changes in available easyconfig parameters (e.g. makeopts -> buildopts?)
if opt_value is None:
raise EasyBuildError("%s not available in self.cfg (anymore)?!", opt)
# keep track of list, supply first element as first option to handle
if isinstance(opt_value, (list, tuple)):
opt_counts.append((opt, len(opt_value)))
# make sure that options that specify lists have the same length
list_opt_lengths = [length for (opt, length) in opt_counts if length > 1]
if len(nub(list_opt_lengths)) > 1:
raise EasyBuildError("Build option lists for iterated build should have same length: %s", opt_counts)
return True
def start_iterating(self):
"""Start iterative mode."""
for opt in ITERATE_OPTIONS:
# builddpendencies is already handled, see __init__
if opt == 'builddependencies':
continue
# list of values indicates that this is a value to iterate over
if isinstance(self[opt], (list, tuple)):
self.iterate_options.append(opt)
# keep track of when we're iterating (used by builddependencies())
self.iterating = True
def stop_iterating(self):
"""Stop iterative mode."""
self.iterating = False
def filter_hidden_deps(self):
"""
Replace dependencies by hidden dependencies in list of (build) dependencies, where appropriate.
"""
faulty_deps = []
# obtain reference to original lists, so their elements can be changed in place
deps = dict([(key, self.get_ref(key)) for key in ['dependencies', 'builddependencies', 'hiddendependencies']])
if 'builddependencies' in self.iterate_options:
deplists = copy.deepcopy(deps['builddependencies'])
else:
deplists = [deps['builddependencies']]
deplists.append(deps['dependencies'])
for hidden_idx, hidden_dep in enumerate(deps['hiddendependencies']):
hidden_mod_name = ActiveMNS().det_full_module_name(hidden_dep)
visible_mod_name = ActiveMNS().det_full_module_name(hidden_dep, force_visible=True)
# replace (build) dependencies with their equivalent hidden (build) dependency (if any)
replaced = False
for deplist in deplists:
for idx, dep in enumerate(deplist):
dep_mod_name = dep['full_mod_name']
if dep_mod_name in [visible_mod_name, hidden_mod_name]:
# track whether this hidden dep is listed as a build dep
hidden_dep = deps['hiddendependencies'][hidden_idx]
hidden_dep['build_only'] = dep['build_only']
# actual replacement
deplist[idx] = hidden_dep
replaced = True
if dep_mod_name == visible_mod_name:
msg = "Replaced (build)dependency matching hidden dependency %s"
else:
msg = "Hidden (build)dependency %s is already marked to be installed as a hidden module"
self.log.debug(msg, hidden_dep)
if not replaced:
# hidden dependencies must also be included in list of dependencies;
# this is done to try and make easyconfigs portable w.r.t. site-specific policies with minimal effort,
# i.e. by simply removing the 'hiddendependencies' specification
self.log.warning("Hidden dependency %s not in list of (build)dependencies", visible_mod_name)
faulty_deps.append(visible_mod_name)
if faulty_deps:
dep_mod_names = [dep['full_mod_name'] for dep in self['dependencies'] + self['builddependencies']]
raise EasyBuildError("Hidden deps with visible module names %s not in list of (build)dependencies: %s",
faulty_deps, dep_mod_names)
def parse_version_range(self, version_spec):
"""Parse provided version specification as a version range."""
res = {}
range_sep = ':' # version range separator (e.g. ]1.0:2.0])
if range_sep in version_spec:
# remove range characters ('[' and ']') to obtain lower/upper version limits
version_limits = re.sub(r'[\[\]]', '', version_spec).split(range_sep)
if len(version_limits) == 2:
res['lower'], res['upper'] = version_limits
if res['lower'] and res['upper'] and LooseVersion(res['lower']) > LooseVersion(res['upper']):
raise EasyBuildError("Incorrect version range, found lower limit > higher limit: %s", version_spec)
else:
raise EasyBuildError("Incorrect version range, expected lower/upper limit: %s", version_spec)
res['excl_lower'] = version_spec[0] == ']'
res['excl_upper'] = version_spec[-1] == '['
else: # strict version spec (not a range)
res['lower'] = res['upper'] = version_spec
res['excl_lower'] = res['excl_upper'] = False
return res
def parse_filter_deps(self):
"""Parse specifications for which dependencies should be filtered."""
res = {}
separator = '='
for filter_dep_spec in build_option('filter_deps') or []:
if separator in filter_dep_spec:
dep_specs = filter_dep_spec.split(separator)
if len(dep_specs) == 2:
dep_name, dep_version_spec = dep_specs
else:
raise EasyBuildError("Incorrect specification for dependency to filter: %s", filter_dep_spec)
res[dep_name] = self.parse_version_range(dep_version_spec)
else:
res[filter_dep_spec] = {'always_filter': True}
return res
def dep_is_filtered(self, dep, filter_deps_specs):
"""Returns True if a dependency is filtered according to the filter_deps_specs"""
filter_dep = False
if dep['name'] in filter_deps_specs:
filter_spec = filter_deps_specs[dep['name']]
if filter_spec.get('always_filter', False):
filter_dep = True
else:
version = LooseVersion(dep['version'])
lower = LooseVersion(filter_spec['lower']) if filter_spec['lower'] else None
upper = LooseVersion(filter_spec['upper']) if filter_spec['upper'] else None
# assume dep is filtered before checking version range
filter_dep = True
# if version is lower than lower limit: no filtering
if lower:
if version < lower or (filter_spec['excl_lower'] and version == lower):
filter_dep = False
# if version is higher than upper limit: no filtering
if upper:
if version > upper or (filter_spec['excl_upper'] and version == upper):
filter_dep = False
return filter_dep
def filter_deps(self, deps):
"""Filter dependencies according to 'filter-deps' configuration setting."""
retained_deps = []
filter_deps_specs = self.parse_filter_deps()
for dep in deps:
# figure out whether this dependency should be filtered
if self.dep_is_filtered(dep, filter_deps_specs):
self.log.info("filtered out dependency %s", dep)
else:
retained_deps.append(dep)
return retained_deps
def dependencies(self, build_only=False):
"""
Returns an array of parsed dependencies (after filtering, if requested)
dependency = {'name': '', 'version': '', 'system': (False|True), 'versionsuffix': '', 'toolchain': ''}
Iterable builddependencies are flattened when not iterating.
:param build_only: only return build dependencies, discard others
"""
deps = self.builddependencies()
if not build_only:
# use += rather than .extend to get a new list rather than updating list of build deps in place...
deps += self['dependencies']
# if filter-deps option is provided we "clean" the list of dependencies for
# each processed easyconfig to remove the unwanted dependencies
self.log.debug("Dependencies BEFORE filtering: %s", deps)
retained_deps = self.filter_deps(deps)
self.log.debug("Dependencies AFTER filtering: %s", retained_deps)
return retained_deps
def builddependencies(self):
"""
Return a flat list of the parsed build dependencies
When builddependencies are iterable they are flattened lists with
duplicates removed outside of the iterating process, because the callers
want simple lists.
"""
builddeps = self['builddependencies']
if 'builddependencies' in self.iterate_options and not self.iterating:
# flatten and remove duplicates (can't use 'nub', since dict values are not hashable)
all_builddeps = flatten(builddeps)
builddeps = []
for dep in all_builddeps:
if dep not in builddeps:
builddeps.append(dep)
return builddeps
@property
def name(self):
"""
returns name
"""
return self['name']
@property
def version(self):
"""
returns version
"""
return self['version']
@property
def toolchain(self):
"""
returns the Toolchain used
"""
if self._toolchain is None:
# provide list of (direct) toolchain dependencies (name & version), if easyconfig can be found for toolchain
tcdeps = None
tcname, tcversion = self['toolchain']['name'], self['toolchain']['version']
if not is_system_toolchain(tcname):
tc_ecfile = robot_find_easyconfig(tcname, tcversion)
if tc_ecfile is None:
self.log.debug("No easyconfig found for toolchain %s version %s, can't determine dependencies",
tcname, tcversion)
else:
self.log.debug("Found easyconfig for toolchain %s version %s: %s", tcname, tcversion, tc_ecfile)
tc_ec = process_easyconfig(tc_ecfile)[0]
tcdeps = tc_ec['ec'].dependencies()
self.log.debug("Toolchain dependencies based on easyconfig: %s", tcdeps)
self._toolchain = get_toolchain(self['toolchain'], self['toolchainopts'],
mns=ActiveMNS(), tcdeps=tcdeps, modtool=self.modules_tool)
tc_dict = self._toolchain.as_dict()
self.log.debug("Initialized toolchain: %s (opts: %s)" % (tc_dict, self['toolchainopts']))
return self._toolchain
@property
def all_dependencies(self):
"""Return list of all dependencies, incl. hidden/build deps & toolchain, but excluding filtered deps."""
if self._all_dependencies is None:
self.log.debug("Composing list of all dependencies (incl. toolchain)")
self._all_dependencies = copy.deepcopy(self.dependencies())
if not is_system_toolchain(self['toolchain']['name']):
self._all_dependencies.append(self.toolchain.as_dict())
return self._all_dependencies
def dump(self, fp, always_overwrite=True, backup=False, explicit_toolchains=False):
"""
Dump this easyconfig to file, with the given filename.
:param always_overwrite: overwrite existing file at specified location without use of --force
:param backup: create backup of existing file before overwriting it
"""
# templated values should be dumped unresolved
with disable_templating(self):
# build dict of default values
default_values = dict([(key, DEFAULT_CONFIG[key][0]) for key in DEFAULT_CONFIG])
default_values.update(dict([(key, self.extra_options[key][0]) for key in self.extra_options]))
self.generate_template_values()
templ_const = dict([(quote_py_str(const[1]), const[0]) for const in TEMPLATE_CONSTANTS])
# create reverse map of templates, to inject template values where possible
# longer template values are considered first, shorter template keys get preference over longer ones
sorted_keys = sorted(self.template_values, key=lambda k: (len(self.template_values[k]), -len(k)),
reverse=True)
templ_val = OrderedDict([])
for key in sorted_keys:
# shortest template 'key' is retained in case of duplicates
# ('namelower' is preferred over 'github_account')
# only template values longer than 2 characters are retained
if self.template_values[key] not in templ_val and len(self.template_values[key]) > 2:
templ_val[self.template_values[key]] = key
toolchain_hierarchy = None
if not explicit_toolchains:
try:
toolchain_hierarchy = get_toolchain_hierarchy(self['toolchain'])
except EasyBuildError as err:
# don't fail hard just because we can't get the hierarchy
self.log.warning('Could not generate toolchain hierarchy for %s to use in easyconfig dump method, '
'error:\n%s', self['toolchain'], str(err))
try:
ectxt = self.parser.dump(self, default_values, templ_const, templ_val,
toolchain_hierarchy=toolchain_hierarchy)
except NotImplementedError as err:
raise NotImplementedError(err)
self.log.debug("Dumped easyconfig: %s", ectxt)
if build_option('dump_autopep8'):
autopep8_opts = {
'aggressive': 1, # enable non-whitespace changes, but don't be too aggressive
'max_line_length': 120,
}
self.log.info("Reformatting dumped easyconfig using autopep8 (options: %s)", autopep8_opts)
ectxt = autopep8.fix_code(ectxt, options=autopep8_opts)
self.log.debug("Dumped easyconfig after autopep8 reformatting: %s", ectxt)
if not ectxt.endswith('\n'):
ectxt += '\n'
write_file(fp, ectxt, always_overwrite=always_overwrite, backup=backup, verbose=backup)
def _validate(self, attr, values): # private method
"""
validation helper method. attr is the attribute it will check, values are the possible values.
if the value of the attribute is not in the is array, it will report an error
"""
if values is None:
values = []
if self[attr] and self[attr] not in values:
raise EasyBuildError("%s provided '%s' is not valid: %s", attr, self[attr], values)
def probe_external_module_metadata(self, mod_name, existing_metadata=None):
"""
Helper function for handle_external_module_metadata.
Tries to determine metadata for external module when there is not entry in the metadata file,
by looking at the variables defined by the module file.
This is mainly intended for modules provided in the Cray Programming Environment,
but it could also be useful in other contexts.
The following pairs of variables are considered (in order, first hit wins),
where 'XXX' is the software name in capitals:
1. $CRAY_XXX_PREFIX and $CRAY_XXX_VERSION
1. $CRAY_XXX_PREFIX_DIR and $CRAY_XXX_VERSION
2. $CRAY_XXX_DIR and $CRAY_XXX_VERSION
2. $CRAY_XXX_ROOT and $CRAY_XXX_VERSION
5. $XXX_PREFIX and $XXX_VERSION
4. $XXX_DIR and $XXX_VERSION
5. $XXX_ROOT and $XXX_VERSION
3. $XXX_HOME and $XXX_VERSION
If none of the pairs is found, then an empty dictionary is returned.
:param mod_name: name of the external module
:param metadata: already available metadata for this external module (if any)
"""
res = {}
if existing_metadata is None:
existing_metadata = {}
soft_name = existing_metadata.get('name')
if soft_name:
# software name is a list of names in metadata, just grab first one
soft_name = soft_name[0]
else:
# if the software name is not known yet, use the first part of the module name as software name,
# but strip off the leading 'cray-' part first (examples: cray-netcdf/4.6.1.3, cray-fftw/3.3.8.2)
soft_name = mod_name.split('/')[0]
cray_prefix = 'cray-'
if soft_name.startswith(cray_prefix):
soft_name = soft_name[len(cray_prefix):]
# determine software name to use in names of environment variables (upper case, '-' becomes '_')
soft_name_in_mod_name = convert_name(soft_name.replace('-', '_'), upper=True)
var_name_pairs = [
('CRAY_%s_PREFIX', 'CRAY_%s_VERSION'),
('CRAY_%s_PREFIX_DIR', 'CRAY_%s_VERSION'),
('CRAY_%s_DIR', 'CRAY_%s_VERSION'),
('CRAY_%s_ROOT', 'CRAY_%s_VERSION'),
('%s_PREFIX', '%s_VERSION'),
('%s_DIR', '%s_VERSION'),
('%s_ROOT', '%s_VERSION'),
('%s_HOME', '%s_VERSION'),
]
for prefix_var_name, version_var_name in var_name_pairs:
prefix_var_name = prefix_var_name % soft_name_in_mod_name
version_var_name = version_var_name % soft_name_in_mod_name
prefix = self.modules_tool.get_setenv_value_from_modulefile(mod_name, prefix_var_name)
version = self.modules_tool.get_setenv_value_from_modulefile(mod_name, version_var_name)
# we only have a hit when values for *both* variables are found
if prefix and version:
if 'name' not in existing_metadata:
res['name'] = [soft_name]
# if a version is already set in the available metadata, we retain it
if 'version' not in existing_metadata:
res['version'] = [version]
self.log.info('setting external module %s version to be %s', mod_name, version)
# if a prefix is already set in the available metadata, we retain it
if 'prefix' not in existing_metadata:
res['prefix'] = prefix
self.log.info('setting external module %s prefix to be %s', mod_name, prefix_var_name)
break
return res
def handle_external_module_metadata(self, mod_name):
"""
Helper function for _parse_dependency; collects metadata for external module dependencies.
:param mod_name: name of external module to collect metadata for
"""
partial_mod_name = mod_name.split('/')[0]
# check whether existing metadata for external modules already has metadata for this module;
# first using full module name (as it is provided), for example 'cray-netcdf/4.6.1.3',
# then with partial module name, for example 'cray-netcdf'
metadata = self.external_modules_metadata.get(mod_name, {})
self.log.info("Available metadata for external module %s: %s", mod_name, metadata)
partial_mod_name_metadata = self.external_modules_metadata.get(partial_mod_name, {})
self.log.info("Available metadata for external module using partial module name %s: %s",
partial_mod_name, partial_mod_name_metadata)
for key in partial_mod_name_metadata:
if key not in metadata:
metadata[key] = partial_mod_name_metadata[key]
self.log.info("Combined available metadata for external module %s: %s", mod_name, metadata)
# if not all metadata is available (name/version/prefix), probe external module to collect more metadata;
# first with full module name, and then with partial module name if first probe didn't return anything;
# note: result of probe_external_module_metadata only contains metadata for keys that were not set yet
if not all(key in metadata for key in ['name', 'prefix', 'version']):
self.log.info("Not all metadata found yet for external module %s, probing module...", mod_name)
probed_metadata = self.probe_external_module_metadata(mod_name, existing_metadata=metadata)
if probed_metadata:
self.log.info("Extra metadata found by probing external module %s: %s", mod_name, probed_metadata)
metadata.update(probed_metadata)
else:
self.log.info("No extra metadata found by probing %s, trying with partial module name...", mod_name)
probed_metadata = self.probe_external_module_metadata(partial_mod_name, existing_metadata=metadata)
self.log.info("Extra metadata for external module %s found by probing partial module name %s: %s",
mod_name, partial_mod_name, probed_metadata)
metadata.update(probed_metadata)
self.log.info("Obtained metadata after module probing: %s", metadata)
return {'external_module_metadata': metadata}
def handle_multi_deps(self):
"""
Handle lists of dependency versions of which we should iterate specified in 'multi_deps' easyconfig parameter.
This is basically just syntactic sugar to prevent having to specify a list of lists in 'builddependencies'.
"""
multi_deps = self['multi_deps']
if multi_deps:
# first, make sure all lists have same length, otherwise we're dealing with invalid input...
multi_dep_cnts = nub([len(dep_vers) for dep_vers in multi_deps.values()])
if len(multi_dep_cnts) == 1:
multi_dep_cnt = multi_dep_cnts[0]
else:
raise EasyBuildError("Not all the dependencies listed in multi_deps have the same number of versions!")
self.log.info("Found %d lists of %d dependency versions to iterate over", len(multi_deps), multi_dep_cnt)
# make sure that build dependencies is not a list of lists to iterate over already...
if self['builddependencies'] and all(isinstance(bd, list) for bd in self['builddependencies']):
raise EasyBuildError("Can't combine multi_deps with builddependencies specified as list of lists")
# now make builddependencies a list of lists to iterate over
builddeps = self['builddependencies']
self['builddependencies'] = []
keys = sorted(multi_deps.keys())
for idx in range(multi_dep_cnt):
self['builddependencies'].append([(key, multi_deps[key][idx]) for key in keys] + builddeps)
self.log.info("Original list of build dependencies: %s", builddeps)
self.log.info("List of lists of build dependencies to iterate over: %s", self['builddependencies'])
def get_parsed_multi_deps(self):
"""Get list of lists of parsed dependencies that correspond with entries in multi_deps easyconfig parameter."""
multi_deps = []
if self['multi_deps']:
builddeps = self['builddependencies']
# all multi_deps entries should be listed in builddependencies (if not, something is very wrong)
if isinstance(builddeps, list) and all(isinstance(x, list) for x in builddeps):
for iter_id in range(len(builddeps)):
# only build dependencies that correspond to multi_deps entries should be loaded as extra modules
# (other build dependencies should not be required to make sanity check pass for this iteration)
iter_deps = []
for key in self['multi_deps']:
hits = [d for d in builddeps[iter_id] if d['name'] == key]
if len(hits) == 1:
iter_deps.append(hits[0])
else:
raise EasyBuildError("Failed to isolate %s dep during iter #%d: %s", key, iter_id, hits)
multi_deps.append(iter_deps)
else:
error_msg = "builddependencies should be a list of lists when calling get_parsed_multi_deps(): %s"
raise EasyBuildError(error_msg, builddeps)
return multi_deps
# private method
def _parse_dependency(self, dep, hidden=False, build_only=False):
"""
parses the dependency into a usable dict with a common format
dep can be a dict, a tuple or a list.
if it is a tuple or a list the attributes are expected to be in the following order:
('name', 'version', 'versionsuffix', 'toolchain')
of these attributes, 'name' and 'version' are mandatory
output dict contains these attributes:
['name', 'version', 'versionsuffix', 'system', 'toolchain', 'short_mod_name', 'full_mod_name', 'hidden',
'external_module']
:param hidden: indicate whether corresponding module file should be installed hidden ('.'-prefixed)
:param build_only: indicate whether this is a build-only dependency
"""
# convert tuple to string otherwise python might complain about the formatting
self.log.debug("Parsing %s as a dependency" % str(dep))
attr = ['name', 'version', 'versionsuffix', 'toolchain']
dependency = {
# full/short module names
'full_mod_name': None,
'short_mod_name': None,
# software name, version, versionsuffix
'name': None,
'version': None,
'versionsuffix': '',
# toolchain with which this dependency is installed
'toolchain': None,
'toolchain_inherited': False,
# boolean indicating whether we're dealing with a system toolchain for this dependency
SYSTEM_TOOLCHAIN_NAME: False,
# boolean indicating whether the module for this dependency is (to be) installed hidden
'hidden': hidden,
# boolean indicating whether this this a build-only dependency
'build_only': build_only,
# boolean indicating whether this dependency should be resolved via an external module
'external_module': False,
# metadata in case this is an external module;
# provides information on what this module represents (software name/version, install prefix, ...)
'external_module_metadata': {},
}
if isinstance(dep, dict):
dependency.update(dep)
# make sure 'system' key is handled appropriately
if SYSTEM_TOOLCHAIN_NAME in dep and 'toolchain' not in dep:
dependency['toolchain'] = dep[SYSTEM_TOOLCHAIN_NAME]
if dep.get('external_module', False):
dependency.update(self.handle_external_module_metadata(dep['full_mod_name']))
elif isinstance(dep, Dependency):
dependency['name'] = dep.name()
dependency['version'] = dep.version()
versionsuffix = dep.versionsuffix()
if versionsuffix is not None:
dependency['versionsuffix'] = versionsuffix
toolchain = dep.toolchain()
if toolchain is not None:
dependency['toolchain'] = toolchain
elif isinstance(dep, (list, tuple)):
if dep and dep[-1] == EXTERNAL_MODULE_MARKER:
if len(dep) == 2:
dependency['external_module'] = True
dependency['short_mod_name'] = dep[0]
dependency['full_mod_name'] = dep[0]
dependency.update(self.handle_external_module_metadata(dep[0]))
else:
raise EasyBuildError("Incorrect external dependency specification: %s", dep)
else:
# non-external dependency: tuple (or list) that specifies name/version(/versionsuffix(/toolchain))
dependency.update(dict(zip(attr, dep)))
else:
raise EasyBuildError("Dependency %s of unsupported type: %s", dep, type(dep))
# Find the version to use on this system
dependency['version'] = pick_dep_version(dependency['version'])
if dependency['external_module']:
# check whether the external module is hidden
if dependency['full_mod_name'].split('/')[-1].startswith('.'):
dependency['hidden'] = True
self.log.debug("Returning parsed external dependency: %s", dependency)
return dependency
# check whether this dependency should be hidden according to --hide-deps
if build_option('hide_deps'):
dependency['hidden'] |= dependency['name'] in build_option('hide_deps')
# dependency inherits toolchain, unless it's specified to have a custom toolchain
tc = copy.deepcopy(self['toolchain'])
tc_spec = dependency['toolchain']
if tc_spec is None:
self.log.debug("Inheriting parent toolchain %s for dep %s (until deps are finalised)", tc, dependency)
dependency['toolchain_inherited'] = True
# (true) boolean value simply indicates that a system toolchain is used
elif isinstance(tc_spec, bool) and tc_spec:
tc = {'name': SYSTEM_TOOLCHAIN_NAME, 'version': ''}
# two-element list/tuple value indicates custom toolchain specification
elif isinstance(tc_spec, (list, tuple,)):
if len(tc_spec) == 2:
tc = {'name': tc_spec[0], 'version': tc_spec[1]}
else:
raise EasyBuildError("List/tuple value for toolchain should have two elements (%s)", str(tc_spec))
elif isinstance(tc_spec, dict):
if 'name' in tc_spec and 'version' in tc_spec:
tc = copy.deepcopy(tc_spec)
else:
raise EasyBuildError("Found toolchain spec as dict with wrong keys (no name/version): %s", tc_spec)
else:
raise EasyBuildError("Unsupported type for toolchain spec encountered: %s (%s)", tc_spec, type(tc_spec))
self.log.debug("Derived toolchain to use for dependency %s, based on toolchain spec %s: %s", dep, tc_spec, tc)
dependency['toolchain'] = tc
# validations
if dependency['name'] is None:
raise EasyBuildError("Dependency specified without name: %s", dependency)
if dependency['version'] is None:
raise EasyBuildError("Dependency specified without version: %s", dependency)
return dependency
def _finalize_dependencies(self):
"""Finalize dependency parameters, after initial parsing."""
filter_deps_specs = self.parse_filter_deps()
for key in DEPENDENCY_PARAMETERS:
# loop over a *copy* of dependency dicts (with resolved templates);
deps = self[key]
# to update the original dep dict, we need to get a reference with templating disabled...
deps_ref = self.get_ref(key)
# take into account that this *dependencies parameter may be iterated over
if key in self.iterate_options:
deps = flatten(deps)
deps_ref = flatten(deps_ref)
for idx, dep in enumerate(deps):
# reference to original dep dict, this is the one we should be updating
orig_dep = deps_ref[idx]
if self.dep_is_filtered(orig_dep, filter_deps_specs):
self.log.debug("Skipping filtered dependency %s when finalising dependencies", orig_dep['name'])
continue
# handle dependencies with inherited (non-system) toolchain
# this *must* be done after parsing all dependencies, to avoid problems with templates like %(pyver)s
if dep['toolchain_inherited'] and not is_system_toolchain(dep['toolchain']['name']):
tc = None
dep_str = '%s %s%s' % (dep['name'], dep['version'], dep['versionsuffix'])
self.log.debug("Figuring out toolchain to use for dep %s...", dep)
if build_option('minimal_toolchains'):
# determine 'smallest' subtoolchain for which a matching easyconfig file is available
self.log.debug("Looking for minimal toolchain for dependency %s (parent toolchain: %s)...",
dep_str, dep['toolchain'])
tc = robot_find_subtoolchain_for_dep(dep, self.modules_tool)
if tc is None:
raise EasyBuildError("Failed to determine minimal toolchain for dep %s", dep_str)
else:
# try to determine subtoolchain for dep;
# this is done considering both available modules and easyconfigs (in that order)
tc = robot_find_subtoolchain_for_dep(dep, self.modules_tool, parent_first=True)
self.log.debug("Using subtoolchain %s for dep %s", tc, dep_str)
if tc is None:
self.log.debug("Inheriting toolchain %s from parent for dep %s", dep['toolchain'], dep_str)
else:
# put derived toolchain in place
self.log.debug("Figured out toolchain to use for dep %s: %s", dep_str, tc)
dep['toolchain'] = orig_dep['toolchain'] = tc
dep['toolchain_inherited'] = orig_dep['toolchain_inherited'] = False
if not dep['external_module']:
# make sure 'system' is set correctly
orig_dep[SYSTEM_TOOLCHAIN_NAME] = is_system_toolchain(dep['toolchain']['name'])
# set module names
orig_dep['short_mod_name'] = ActiveMNS().det_short_module_name(dep)
orig_dep['full_mod_name'] = ActiveMNS().det_full_module_name(dep)
def generate_template_values(self):
"""Try to generate all template values."""
self._generate_template_values()
# recursive call, until there are no more changes to template values;
# important since template values may include other templates
cont = True
while cont:
cont = False
for key in self.template_values:
try:
curr_val = self.template_values[key]
new_val = str(curr_val) % self.template_values
if new_val != curr_val:
cont = True
self.template_values[key] = new_val
except KeyError:
# KeyError's may occur when not all templates are defined yet, but these are safe to ignore
pass
def _generate_template_values(self, ignore=None):
"""Actual code to generate the template values"""
# step 0. self.template_values can/should be updated from outside easyconfig
# (eg the run_step code in EasyBlock)
# step 1-3 work with easyconfig.templates constants
# disable templating with creating dict with template values to avoid looping back to here via __getitem__
with disable_templating(self):
if self.template_values is None:
# if no template values are set yet, initiate with a minimal set of template values;
# this is important for easyconfig that use %(version_minor)s to define 'toolchain',
# which is a pretty weird use case, but fine...
self.template_values = template_constant_dict(self, ignore=ignore)
# grab toolchain instance with templating support enabled,
# which is important in case the Toolchain instance was not created yet
toolchain = self.toolchain
# get updated set of template values, now with toolchain instance
# (which is used to define the %(mpi_cmd_prefix)s template)
with disable_templating(self):
template_values = template_constant_dict(self, ignore=ignore, toolchain=toolchain)
# update the template_values dict
self.template_values.update(template_values)
# cleanup None values
for key in list(self.template_values):
if self.template_values[key] is None:
del self.template_values[key]
@handle_deprecated_or_replaced_easyconfig_parameters
def __contains__(self, key):
"""Check whether easyconfig parameter is defined"""
return key in self._config
@handle_deprecated_or_replaced_easyconfig_parameters
def __getitem__(self, key):
"""Return value of specified easyconfig parameter (without help text, etc.)"""
value = None
if key in self._config:
value = self._config[key][0]
else:
raise EasyBuildError("Use of unknown easyconfig parameter '%s' when getting parameter value", key)
if self.enable_templating:
if self.template_values is None or len(self.template_values) == 0:
self.generate_template_values()
value = resolve_template(value, self.template_values)
return value
def is_mandatory_param(self, key):
"""Check whether specified easyconfig parameter is mandatory."""
return key in self.mandatory
def get_ref(self, key):
"""
Obtain reference to original/untemplated value of specified easyconfig parameter
rather than a copied value with templated values.
"""
# see also comments in resolve_template
# temporarily disable templating
with disable_templating(self):
ref = self[key]
return ref
@handle_deprecated_or_replaced_easyconfig_parameters
def __setitem__(self, key, value):
"""Set value of specified easyconfig parameter (help text & co is left untouched)"""
if key in self._config:
self._config[key][0] = value
else:
raise EasyBuildError("Use of unknown easyconfig parameter '%s' when setting parameter value to '%s'",
key, value)
@handle_deprecated_or_replaced_easyconfig_parameters
def get(self, key, default=None, resolve=True):
"""
Gets the value of a key in the config, with 'default' as fallback.
:param resolve: if False, disables templating via calling get_ref, else resolves template values
"""
if key in self:
return self[key] if resolve else self.get_ref(key)
else:
return default
# *both* __eq__ and __ne__ must be implemented for == and != comparisons to work correctly
# see also https://docs.python.org/2/reference/datamodel.html#object.__eq__
def __eq__(self, ec):
"""Is this EasyConfig instance equivalent to the provided one?"""
return self.asdict() == ec.asdict()
def __ne__(self, ec):
"""Is this EasyConfig instance equivalent to the provided one?"""
return self.asdict() != ec.asdict()
def __hash__(self):
"""Return hash value for a hashable representation of this EasyConfig instance."""
def make_hashable(val):
"""Make a hashable value of the given value."""
if isinstance(val, list):
val = tuple([make_hashable(x) for x in val])
elif isinstance(val, dict):
val = tuple([(key, make_hashable(val)) for (key, val) in sorted(val.items())])
return val
lst = []
for (key, val) in sorted(self.asdict().items()):
lst.append((key, make_hashable(val)))
# a list is not hashable, but a tuple is
return hash(tuple(lst))
def asdict(self):
"""
Return dict representation of this EasyConfig instance.
"""
res = {}
for key, tup in self._config.items():
value = tup[0]
if self.enable_templating:
if not self.template_values:
self.generate_template_values()
value = resolve_template(value, self.template_values)
res[key] = value
return res
def det_installversion(version, toolchain_name, toolchain_version, prefix, suffix):
"""Deprecated 'det_installversion' function, to determine exact install version, based on supplied parameters."""
old_fn = 'framework.easyconfig.easyconfig.det_installversion'
_log.nosupport('Use det_full_ec_version from easybuild.tools.module_generator instead of %s' % old_fn, '2.0')
def get_easyblock_class(easyblock, name=None, error_on_failed_import=True, error_on_missing_easyblock=None, **kwargs):
"""
Get class for a particular easyblock (or use default)
"""
if 'default_fallback' in kwargs:
msg = "Named argument 'default_fallback' for get_easyblock_class is deprecated, "
msg += "use 'error_on_missing_easyblock' instead"
_log.deprecated(msg, '4.0')
if error_on_missing_easyblock is None:
error_on_missing_easyblock = kwargs['default_fallback']
elif error_on_missing_easyblock is None:
error_on_missing_easyblock = True
cls = None
try:
if easyblock:
# something was specified, lets parse it
es = easyblock.split('.')
class_name = es.pop(-1)
# figure out if full path was specified or not
if es:
modulepath = '.'.join(es)
_log.info("Assuming that full easyblock module path was specified (class: %s, modulepath: %s)",
class_name, modulepath)
cls = get_class_for(modulepath, class_name)
else:
modulepath = get_module_path(easyblock)
cls = get_class_for(modulepath, class_name)
_log.info("Derived full easyblock module path for %s: %s" % (class_name, modulepath))
else:
# if no easyblock specified, try to find if one exists
if name is None:
name = "UNKNOWN"
# The following is a generic way to calculate unique class names for any funny software title
class_name = encode_class_name(name)
# modulepath will be the namespace + encoded modulename (from the classname)
modulepath = get_module_path(class_name, generic=False)
modulepath_imported = False
try:
__import__(modulepath, globals(), locals(), [''])
modulepath_imported = True
except ImportError as err:
_log.debug("Failed to import module '%s': %s" % (modulepath, err))
# check if determining module path based on software name would have resulted in a different module path
if modulepath_imported:
_log.debug("Module path '%s' found" % modulepath)
else:
_log.debug("No module path '%s' found" % modulepath)
modulepath_bis = get_module_path(name, generic=False, decode=False)
_log.debug("Module path determined based on software name: %s" % modulepath_bis)
if modulepath_bis != modulepath:
_log.nosupport("Determining module path based on software name", '2.0')
# try and find easyblock
try:
_log.debug("getting class for %s.%s" % (modulepath, class_name))
cls = get_class_for(modulepath, class_name)
_log.info("Successfully obtained %s class instance from %s" % (class_name, modulepath))
except ImportError as err:
# when an ImportError occurs, make sure that it's caused by not finding the easyblock module,
# and not because of a broken import statement in the easyblock module
modname = modulepath.replace('easybuild.easyblocks.', '')
error_re = re.compile(r"No module named '?.*/?%s'?" % modname)
_log.debug("error regexp for ImportError on '%s' easyblock: %s", modname, error_re.pattern)
if error_re.match(str(err)):
if error_on_missing_easyblock:
raise EasyBuildError("No software-specific easyblock '%s' found for %s", class_name, name)
elif error_on_failed_import:
raise EasyBuildError("Failed to import %s easyblock: %s", class_name, err)
else:
_log.debug("Failed to import easyblock for %s, but ignoring it: %s" % (class_name, err))
if cls is not None:
_log.info("Successfully obtained class '%s' for easyblock '%s' (software name '%s')",
cls.__name__, easyblock, name)
else:
_log.debug("No class found for easyblock '%s' (software name '%s')", easyblock, name)
return cls
except EasyBuildError as err:
# simply reraise rather than wrapping it into another error
raise err
except Exception as err:
raise EasyBuildError("Failed to obtain class for %s easyblock (not available?): %s", easyblock, err)
def is_generic_easyblock(easyblock):
"""Return whether specified easyblock name is a generic easyblock or not."""
_log.deprecated("is_generic_easyblock function was moved to easybuild.tools.filetools", '5.0')
return filetools.is_generic_easyblock(easyblock)
def get_module_path(name, generic=None, decode=True):
"""
Determine the module path for a given easyblock or software name,
based on the encoded class name.
:param generic: whether or not the easyblock is generic (if None: auto-derive from specified class name)
:param decode: whether or not to decode the provided class name
"""
if name is None:
return None
if generic is None:
generic = filetools.is_generic_easyblock(name)
# example: 'EB_VSC_minus_tools' should result in 'vsc_tools'
if decode:
name = decode_class_name(name)
module_name = remove_unwanted_chars(name.replace('-', '_')).lower()
modpath = ['easybuild', 'easyblocks']
if generic:
modpath.append(GENERIC_EASYBLOCK_PKG)
return '.'.join(modpath + [module_name])
def resolve_template(value, tmpl_dict):
"""Given a value, try to susbstitute the templated strings with actual values.
- value: some python object (supported are string, tuple/list, dict or some mix thereof)
- tmpl_dict: template dictionary
"""
if isinstance(value, string_type):
# simple escaping, making all '%foo', '%%foo', '%%%foo' post-templates values available,
# but ignore a string like '%(name)s'
# behaviour of strings like '%(name)s',
# make sure that constructs like %%(name)s are preserved
# higher order escaping in the original text is considered advanced users only,
# and a big no-no otherwise. It indicates that want some new functionality
# in easyconfigs, so just open an issue for it.
# detailed behaviour:
# if a an odd number of % prefixes the (name)s,
# we assume that templating is assumed and the behaviour is as follows
# '%(name)s' -> '%(name)s', and after templating with {'name':'x'} -> 'x'
# '%%%(name)s' -> '%%%(name)s', and after templating with {'name':'x'} -> '%x'
# if a an even number of % prefixes the (name)s,
# we assume that no templating is desired and the behaviour is as follows
# '%%(name)s' -> '%%(name)s', and after templating with {'name':'x'} -> '%(name)s'
# '%%%%(name)s' -> '%%%%(name)s', and after templating with {'name':'x'} -> '%%(name)s'
# examples:
# '10%' -> '10%%'
# '%s' -> '%%s'
# '%%' -> '%%%%'
# '%(name)s' -> '%(name)s'
# '%%(name)s' -> '%%(name)s'
if '%' in value:
value = re.sub(re.compile(r'(%)(?!%*\(\w+\)s)'), r'\1\1', value)
try:
value = value % tmpl_dict
except KeyError:
_log.warning("Unable to resolve template value %s with dict %s", value, tmpl_dict)
else:
# this block deals with references to objects and returns other references
# for reading this is ok, but for self['x'] = {}
# self['x']['y'] = z does not work
# self['x'] is a get, will return a reference to a templated version of self._config['x']
# and the ['y] = z part will be against this new reference
# you will need to do
# self.enable_templating = False
# self['x']['y'] = z
# self.enable_templating = True
# or (direct but evil)
# self._config['x']['y'] = z
# it can not be intercepted with __setitem__ because the set is done at a deeper level
if isinstance(value, list):
value = [resolve_template(val, tmpl_dict) for val in value]
elif isinstance(value, tuple):
value = tuple(resolve_template(list(value), tmpl_dict))
elif isinstance(value, dict):
value = dict((resolve_template(k, tmpl_dict), resolve_template(v, tmpl_dict)) for k, v in value.items())
return value
def process_easyconfig(path, build_specs=None, validate=True, parse_only=False, hidden=None):
"""
Process easyconfig, returning some information for each block
:param path: path to easyconfig file
:param build_specs: dictionary specifying build specifications (e.g. version, toolchain, ...)
:param validate: whether or not to perform validation
:param parse_only: only parse easyconfig superficially (faster, but results in partial info)
:param hidden: indicate whether corresponding module file should be installed hidden ('.'-prefixed)
"""
blocks = retrieve_blocks_in_spec(path, build_option('only_blocks'))
if hidden is None:
hidden = build_option('hidden')
# only cache when no build specifications are involved (since those can't be part of a dict key)
cache_key = None
if build_specs is None:
cache_key = (path, validate, hidden, parse_only)
if cache_key in _easyconfigs_cache:
return [e.copy() for e in _easyconfigs_cache[cache_key]]
easyconfigs = []
for spec in blocks:
# process for dependencies and real installversionname
_log.debug("Processing easyconfig %s" % spec)
# create easyconfig
try:
ec = EasyConfig(spec, build_specs=build_specs, validate=validate, hidden=hidden)
except EasyBuildError as err:
raise EasyBuildError("Failed to process easyconfig %s: %s", spec, err.msg)
name = ec['name']
easyconfig = {
'ec': ec,
}
easyconfigs.append(easyconfig)
if not parse_only:
# also determine list of dependencies, module name (unless only parsed easyconfigs are requested)
easyconfig.update({
'spec': ec.path,
'short_mod_name': ec.short_mod_name,
'full_mod_name': ec.full_mod_name,
'dependencies': [],
'builddependencies': [],
'hiddendependencies': [],
'hidden': ec.hidden,
})
if len(blocks) > 1:
easyconfig['original_spec'] = path
# add build dependencies
for dep in ec['builddependencies']:
_log.debug("Adding build dependency %s for app %s." % (dep, name))
easyconfig['builddependencies'].append(dep)
# add dependencies (including build & hidden dependencies)
for dep in ec.dependencies():
_log.debug("Adding dependency %s for app %s." % (dep, name))
easyconfig['dependencies'].append(dep)
# add toolchain as dependency too
if not is_system_toolchain(ec['toolchain']['name']):
tc = ec.toolchain.as_dict()
_log.debug("Adding toolchain %s as dependency for app %s." % (tc, name))
easyconfig['dependencies'].append(tc)
if cache_key is not None:
_easyconfigs_cache[cache_key] = [e.copy() for e in easyconfigs]
return easyconfigs
def letter_dir_for(name):
"""
Determine 'letter' directory for specified software name.
This usually just the 1st letter of the software name (in lowercase),
except for funky software names, e.g. ones starting with a digit.
"""
# wildcard name should result in wildcard letter
if name == '*':
letter = '*'
else:
letter = name.lower()[0]
# outside of a-z range, use '0'
if letter < 'a' or letter > 'z':
letter = '0'
return letter
def create_paths(path, name, version):
"""
Returns all the paths where easyconfig could be located
<path> is the basepath
<name> should be a string
<version> can be a '*' if you use glob patterns, or an install version otherwise
"""
cand_paths = [
(name, version), # e.g. <path>/GCC/4.8.2.eb
(name, '%s-%s' % (name, version)), # e.g. <path>/GCC/GCC-4.8.2.eb
(letter_dir_for(name), name, '%s-%s' % (name, version)), # e.g. <path>/g/GCC/GCC-4.8.2.eb
('%s-%s' % (name, version),), # e.g. <path>/GCC-4.8.2.eb
]
return ['%s.eb' % os.path.join(path, *cand_path) for cand_path in cand_paths]
def robot_find_easyconfig(name, version):
"""
Find an easyconfig for module in path, returns (absolute) path to easyconfig file (or None, if none is found).
"""
key = (name, version)
if key in _easyconfig_files_cache:
_log.debug("Obtained easyconfig path from cache for %s: %s" % (key, _easyconfig_files_cache[key]))
return _easyconfig_files_cache[key]
paths = build_option('robot_path')
if paths is None:
paths = []
elif not isinstance(paths, (list, tuple)):
paths = [paths]
# if we should also consider archived easyconfigs, duplicate paths list with archived equivalents
if build_option('consider_archived_easyconfigs'):
paths = paths + [os.path.join(p, EASYCONFIGS_ARCHIVE_DIR) for p in paths]
res = None
for path in paths:
if build_option('ignore_index'):
_log.info("Ignoring index for %s...", path)
path_index = []
elif path in _path_indexes:
path_index = _path_indexes[path]
_log.info("Found loaded index for %s", path)
elif os.path.exists(path):
path_index = load_index(path)
if path_index is None:
_log.info("No index found for %s, so creating it...", path)
path_index = create_index(path)
else:
_log.info("Loaded index for %s", path)
_path_indexes[path] = path_index
else:
path_index = []
easyconfigs_paths = create_paths(path, name, version)
for easyconfig_path in easyconfigs_paths:
_log.debug("Checking easyconfig path %s" % easyconfig_path)
if easyconfig_path in path_index or os.path.isfile(easyconfig_path):
_log.debug("Found easyconfig file for name %s, version %s at %s" % (name, version, easyconfig_path))
_easyconfig_files_cache[key] = os.path.abspath(easyconfig_path)
res = _easyconfig_files_cache[key]
break
if res:
break
return res
def verify_easyconfig_filename(path, specs, parsed_ec=None):
"""
Check whether parsed easyconfig at specified path matches expected specs;
this basically verifies whether the easyconfig filename corresponds to its contents
:param path: path to easyconfig file
:param specs: expected specs (dict with easyconfig parameter values)
:param parsed_ec: (list of) EasyConfig instance(s) corresponding to easyconfig file
"""
if isinstance(parsed_ec, EasyConfig):
ecs = [{'ec': parsed_ec}]
elif isinstance(parsed_ec, (list, tuple)):
ecs = parsed_ec
elif parsed_ec is None:
ecs = process_easyconfig(path)
else:
raise EasyBuildError("Unexpected value type for parsed_ec: %s (%s)", type(parsed_ec), parsed_ec)
fullver = det_full_ec_version(specs)
expected_filename = '%s-%s.eb' % (specs['name'], fullver)
if os.path.basename(path) != expected_filename:
# only retain relevant specs to produce a more useful error message
specstr = ''
for key in ['name', 'version', 'versionsuffix']:
specstr += "%s: %s; " % (key, quote_py_str(specs.get(key)))
toolchain = specs.get('toolchain')
if toolchain:
tcname, tcver = quote_py_str(toolchain.get('name')), quote_py_str(toolchain.get('version'))
specstr += "toolchain name, version: %s, %s" % (tcname, tcver)
else:
specstr += "toolchain: None"
raise EasyBuildError("Easyconfig filename '%s' does not match with expected filename '%s' (specs: %s)",
os.path.basename(path), expected_filename, specstr)
for ec in ecs:
found_fullver = det_full_ec_version(ec['ec'])
if ec['ec']['name'] != specs['name'] or found_fullver != fullver:
subspec = dict((key, specs[key]) for key in ['name', 'toolchain', 'version', 'versionsuffix'])
error_msg = "Contents of %s does not match with filename" % path
error_msg += "; expected filename based on contents: %s-%s.eb" % (ec['ec']['name'], found_fullver)
error_msg += "; expected (relevant) parameters based on filename %s: %s" % (os.path.basename(path), subspec)
raise EasyBuildError(error_msg)
_log.info("Contents of %s verified against easyconfig filename, matches %s", path, specs)
def robot_find_subtoolchain_for_dep(dep, modtool, parent_tc=None, parent_first=False):
"""
Find the subtoolchain to use for a dependency
:param dep: dependency target dict (long and short module names may not exist yet)
:param parent_tc: toolchain from which to derive the toolchain hierarchy to search (default: use dep's toolchain)
:param parent_first: reverse order in which subtoolchains are considered: parent toolchain, then subtoolchains
:return: minimal toolchain for which an easyconfig exists for this dependency (and matches build_options)
"""
if parent_tc is None:
parent_tc = dep['toolchain']
retain_all_deps = build_option('retain_all_deps')
use_existing_modules = build_option('use_existing_modules') and not retain_all_deps
if parent_first or use_existing_modules:
avail_modules = modtool.available()
else:
avail_modules = []
newdep = copy.deepcopy(dep)
# try to determine toolchain hierarchy
# this may fail if not all easyconfig files that define this toolchain are available,
# but that's not always fatal: it's mostly irrelevant under --review-pr for example
try:
toolchain_hierarchy = get_toolchain_hierarchy(parent_tc)
except EasyBuildError as err:
warning_msg = "Failed to determine toolchain hierarchy for %(name)s/%(version)s when determining " % parent_tc
warning_msg += "subtoolchain for dependency '%s': %s" % (dep['name'], err)
_log.warning(warning_msg)
print_warning(warning_msg, silent=build_option('silent'))
toolchain_hierarchy = []
# start with subtoolchains first, i.e. first (system or) compiler-only toolchain, etc.,
# unless parent toolchain should be considered first
if parent_first:
toolchain_hierarchy = toolchain_hierarchy[::-1]
cand_subtcs = []
for tc in toolchain_hierarchy:
# try to determine module name using this particular subtoolchain;
# this may fail if no easyconfig is available in robot search path
# and the module naming scheme requires an easyconfig file
newdep['toolchain'] = tc
mod_name = ActiveMNS().det_full_module_name(newdep, require_result=False)
# if the module name can be determined, subtoolchain is an actual candidate
if mod_name:
# check whether module already exists or not (but only if that info will actually be used)
mod_exists = None
if parent_first or use_existing_modules:
mod_exists = mod_name in avail_modules
# fallback to checking with modtool.exist is required,
# for hidden modules and external modules where module name may be partial
if not mod_exists:
maybe_partial = dep.get('external_module', True)
mod_exists = modtool.exist([mod_name], skip_avail=True, maybe_partial=maybe_partial)[0]
# add the subtoolchain to list of candidates
cand_subtcs.append({'toolchain': tc, 'mod_exists': mod_exists})
_log.debug("List of possible subtoolchains for %s: %s", dep, cand_subtcs)
cand_subtcs_with_mod = [tc for tc in cand_subtcs if tc.get('mod_exists', False)]
# scenario I:
# - regardless of whether minimal toolchains mode is enabled or not
# - try to pick subtoolchain based on available easyconfigs (first hit wins)
minimal_toolchain = None
for cand_subtc in cand_subtcs:
newdep['toolchain'] = cand_subtc['toolchain']
ec_file = robot_find_easyconfig(newdep['name'], det_full_ec_version(newdep))
if ec_file:
minimal_toolchain = cand_subtc['toolchain']
break
if cand_subtcs_with_mod:
if parent_first:
# scenario II:
# - parent toolchain first (minimal toolchains mode *not* enabled)
# - module for dependency is already available for one of the subtoolchains
# - only used as fallback in case subtoolchain could not be determined via easyconfigs (scenario I)
# If so, we retain the subtoolchain closest to the parent (so top of the list of candidates)
if minimal_toolchain is None or use_existing_modules:
minimal_toolchain = cand_subtcs_with_mod[0]['toolchain']
elif use_existing_modules:
# scenario III:
# - minimal toolchains mode + --use-existing-modules
# - reconsider subtoolchain based on already available modules for dependency
# - this may overrule subtoolchain picked in scenario II
# take the last element, i.e. the maximum toolchain where a module exists already
# (allows for potentially better optimisation)
minimal_toolchain = cand_subtcs_with_mod[-1]['toolchain']
if minimal_toolchain is None:
_log.info("Irresolvable dependency found (even with minimal toolchains): %s", dep)
_log.info("Minimally resolving dependency %s using toolchain %s", dep, minimal_toolchain)
return minimal_toolchain
def det_location_for(path, target_dir, soft_name, target_file):
"""
Determine path to easyconfigs directory for specified software name, using specified target file name.
:param path: path of file to copy
:param target_dir: (parent) target directory, should contain easybuild/easyconfigs subdirectory
:param soft_name: software name (to determine location to copy to)
:param target_file: target file name
:return: full path to the right location
"""
subdir = os.path.join('easybuild', 'easyconfigs')
if os.path.exists(os.path.join(target_dir, subdir)):
target_path = os.path.join('easybuild', 'easyconfigs', letter_dir_for(soft_name), soft_name, target_file)
_log.debug("Target path for %s: %s", path, target_path)
target_path = os.path.join(target_dir, target_path)
else:
raise EasyBuildError("Subdirectory %s not found in %s", subdir, target_dir)
return target_path
def clean_up_easyconfigs(paths):
"""
Clean up easyconfigs (in place) by filtering out comments/buildstats included by EasyBuild in archived easyconfigs
(cfr. FileRepository.add_easyconfig in easybuild.tools.repository.filerepo)
:param paths: list of paths to easyconfigs to clean up
"""
regexs = [
re.compile(r"^# Built with EasyBuild.*\n", re.M),
re.compile(r"^# Build statistics.*\n", re.M),
# consume buildstats as a whole, i.e. all lines until closing '}]'
re.compile(r"\n*buildstats\s*=(.|\n)*\n}\]\s*\n?", re.M),
]
for path in paths:
ectxt = read_file(path)
for regex in regexs:
ectxt = regex.sub('', ectxt)
write_file(path, ectxt, forced=True)
def det_file_info(paths, target_dir):
"""
Determine useful information on easyconfig files relative to a target directory,
before any actual operation (e.g. copying) is performed
:param paths: list of paths to easyconfig files
:param target_dir: target directory
:return: dict with useful information on easyconfig files (corresponding EasyConfig instances, paths, status)
relative to a target directory
"""
file_info = {
'ecs': [],
'paths': [],
'paths_in_repo': [],
'new': [],
'new_folder': [],
'new_file_in_existing_folder': [],
}
for path in paths:
ecs = process_easyconfig(path, validate=False)
if len(ecs) == 1:
file_info['paths'].append(path)
file_info['ecs'].append(ecs[0]['ec'])
soft_name = file_info['ecs'][-1].name
ec_filename = file_info['ecs'][-1].filename()
target_path = det_location_for(path, target_dir, soft_name, ec_filename)
new_file = not os.path.exists(target_path)
new_folder = not os.path.exists(os.path.dirname(target_path))
file_info['new'].append(new_file)
file_info['new_folder'].append(new_folder)
file_info['new_file_in_existing_folder'].append(new_file and not new_folder)
file_info['paths_in_repo'].append(target_path)
else:
raise EasyBuildError("Multiple EasyConfig instances obtained from easyconfig file %s", path)
return file_info
def copy_easyconfigs(paths, target_dir):
"""
Copy easyconfig files to specified directory, in the 'right' location and using the filename expected by robot.
:param paths: list of paths to copy to git working dir
:param target_dir: target directory
:return: dict with useful information on copied easyconfig files (corresponding EasyConfig instances, paths, status)
"""
file_info = det_file_info(paths, target_dir)
for path, target_path in zip(file_info['paths'], file_info['paths_in_repo']):
copy_file(path, target_path, force_in_dry_run=True)
if build_option('cleanup_easyconfigs'):
clean_up_easyconfigs(file_info['paths_in_repo'])
return file_info
def copy_patch_files(patch_specs, target_dir):
"""
Copy patch files to specified directory, in the 'right' location according to the software name they relate to.
:param patch_specs: list of tuples with patch file location and name of software they are for
:param target_dir: target directory
"""
patched_files = {
'paths_in_repo': [],
}
for patch_path, soft_name in patch_specs:
target_path = det_location_for(patch_path, target_dir, soft_name, os.path.basename(patch_path))
copy_file(patch_path, target_path, force_in_dry_run=True)
patched_files['paths_in_repo'].append(target_path)
return patched_files
def fix_deprecated_easyconfigs(paths):
"""Fix use of deprecated functionality in easyconfigs at specified locations."""
dummy_tc_regex = re.compile(r'^toolchain\s*=\s*{.*name.*dummy.*}', re.M)
easyconfig_paths = []
for path in paths:
easyconfig_paths.extend(find_easyconfigs(path))
cnt, idx, fixed_cnt = len(easyconfig_paths), 0, 0
for path in easyconfig_paths:
ectxt = read_file(path)
idx += 1
print_msg("* [%d/%d] fixing %s... ", idx, cnt, path, prefix=False, newline=False)
fixed = False
# fix use of 'dummy' toolchain, use SYSTEM constant instead
if dummy_tc_regex.search(ectxt):
ectxt = dummy_tc_regex.sub("toolchain = SYSTEM", ectxt)
fixed = True
# fix use of local variables with a name other than a single letter or 'local_*'
ec = EasyConfig(path, local_var_naming_check=LOCAL_VAR_NAMING_CHECK_LOG)
for key in ec.unknown_keys:
regexp = re.compile(r'\b(%s)\b' % key)
ectxt = regexp.sub(LOCAL_VAR_PREFIX + key, ectxt)
fixed = True
if fixed:
fixed_cnt += 1
backup_path = find_backup_name_candidate(path + '.orig')
copy_file(path, backup_path)
write_file(path, ectxt)
print_msg('FIXED!', prefix=False)
print_msg(" (changes made in place, original copied to %s)", backup_path, prefix=False)
else:
print_msg("(no changes made)", prefix=False)
print_msg("\nAll done! Fixed %d easyconfigs (out of %d found).\n", fixed_cnt, cnt, prefix=False)
# singleton metaclass: only one instance is created
BaseActiveMNS = create_base_metaclass('BaseActiveMNS', Singleton, object)
class ActiveMNS(BaseActiveMNS):
"""Wrapper class for active module naming scheme."""
def __init__(self, *args, **kwargs):
"""Initialize logger."""
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
# determine active module naming scheme
avail_mnss = avail_module_naming_schemes()
self.log.debug("List of available module naming schemes: %s" % avail_mnss.keys())
sel_mns = get_module_naming_scheme()
if sel_mns in avail_mnss:
self.mns = avail_mnss[sel_mns]()
else:
raise EasyBuildError("Selected module naming scheme %s could not be found in %s",
sel_mns, avail_mnss.keys())
def requires_full_easyconfig(self, keys):
"""Check whether specified list of easyconfig parameters is sufficient for active module naming scheme."""
return self.mns.requires_toolchain_details() or not self.mns.is_sufficient(keys)
def check_ec_type(self, ec, raise_error=True):
"""
Obtain a full parsed easyconfig file to pass to naming scheme methods if provided keys are insufficient.
:param ec: available easyconfig parameter specifications (EasyConfig instance or dict value)
:param raise_error: boolean indicating whether or not an error should be raised
if a full easyconfig is required but not found
"""
if not isinstance(ec, EasyConfig) and self.requires_full_easyconfig(ec.keys()):
self.log.debug("A parsed easyconfig is required by the module naming scheme, so finding one for %s" % ec)
# fetch/parse easyconfig file if deemed necessary
eb_file = robot_find_easyconfig(ec['name'], det_full_ec_version(ec))
if eb_file is not None:
parsed_ec = process_easyconfig(eb_file, parse_only=True, hidden=ec['hidden'])
if len(parsed_ec) > 1:
self.log.warning("More than one parsed easyconfig obtained from %s, only retaining first" % eb_file)
self.log.debug("Full list of parsed easyconfigs: %s" % parsed_ec)
ec = parsed_ec[0]['ec']
elif raise_error:
raise EasyBuildError("Failed to find easyconfig file '%s-%s.eb' when determining module name for: %s",
ec['name'], det_full_ec_version(ec), ec)
else:
self.log.info("No easyconfig found as required by module naming scheme, but not considered fatal")
ec = None
return ec
def _det_module_name_with(self, mns_method, ec, force_visible=False, require_result=True):
"""
Determine module name using specified module naming scheme method, based on supplied easyconfig.
Returns a string representing the module name, e.g. 'GCC/4.6.3', 'Python/2.7.5-ictce-4.1.13',
with the following requirements:
- module name is specified as a relative path
- string representing module name has length > 0
- module name only contains printable characters (string.printable, except carriage-control chars)
"""
mod_name = None
ec = self.check_ec_type(ec, raise_error=require_result)
if ec:
# replace software name with desired replacement (if specified)
orig_name = None
if ec.get('modaltsoftname', None):
orig_name = ec['name']
ec['name'] = ec['modaltsoftname']
self.log.info("Replaced software name '%s' with '%s' when determining module name",
orig_name, ec['name'])
else:
self.log.debug("No alternative software name specified to determine module name with")
mod_name = mns_method(ec)
# restore original software name if it was tampered with
if orig_name is not None:
ec['name'] = orig_name
if not is_valid_module_name(mod_name):
raise EasyBuildError("%s is not a valid module name", str(mod_name))
# check whether module name should be hidden or not
# ec may be either a dict or an EasyConfig instance, 'force_visible' argument overrules
if (ec.get('hidden', False) or getattr(ec, 'hidden', False)) and not force_visible:
mod_name = det_hidden_modname(mod_name)
elif require_result:
raise EasyBuildError("Failed to determine module name for %s using %s", ec, mns_method)
return mod_name
def det_full_module_name(self, ec, force_visible=False, require_result=True):
"""Determine full module name by selected module naming scheme, based on supplied easyconfig."""
self.log.debug("Determining full module name for %s (force_visible: %s)" % (ec, force_visible))
if ec.get('external_module', False):
# external modules have the module name readily available, and may lack the info required by the MNS
mod_name = ec['full_mod_name']
self.log.debug("Full module name for external module: %s", mod_name)
else:
mod_name = self._det_module_name_with(self.mns.det_full_module_name, ec, force_visible=force_visible,
require_result=require_result)
self.log.debug("Obtained valid full module name %s", mod_name)
return mod_name
def det_install_subdir(self, ec):
"""Determine name of software installation subdirectory."""
self.log.debug("Determining software installation subdir for %s", ec)
if build_option('fixed_installdir_naming_scheme'):
subdir = os.path.join(ec['name'], det_full_ec_version(ec))
self.log.debug("Using fixed naming software installation subdir: %s", subdir)
else:
subdir = self.mns.det_install_subdir(self.check_ec_type(ec))
self.log.debug("Obtained subdir %s", subdir)
return subdir
def det_devel_module_filename(self, ec, force_visible=False):
"""Determine devel module filename."""
modname = self.det_full_module_name(ec, force_visible=force_visible)
return modname.replace(os.path.sep, '-') + DEVEL_MODULE_SUFFIX
def det_short_module_name(self, ec, force_visible=False):
"""Determine short module name according to module naming scheme."""
self.log.debug("Determining short module name for %s (force_visible: %s)" % (ec, force_visible))
mod_name = self._det_module_name_with(self.mns.det_short_module_name, ec, force_visible=force_visible)
self.log.debug("Obtained valid short module name %s" % mod_name)
# sanity check: obtained module name should pass the 'is_short_modname_for' check
if 'modaltsoftname' in ec and not self.is_short_modname_for(mod_name, ec['modaltsoftname'] or ec['name']):
raise EasyBuildError("is_short_modname_for('%s', '%s') for active module naming scheme returns False",
mod_name, ec['name'])
return mod_name
def det_module_subdir(self, ec):
"""Determine module subdirectory according to module naming scheme."""
self.log.debug("Determining module subdir for %s" % ec)
mod_subdir = self.mns.det_module_subdir(self.check_ec_type(ec))
self.log.debug("Obtained subdir %s" % mod_subdir)
return mod_subdir
def det_module_symlink_paths(self, ec):
"""
Determine list of paths in which symlinks to module files must be created.
"""
return self.mns.det_module_symlink_paths(ec)
def det_modpath_extensions(self, ec):
"""Determine modulepath extensions according to module naming scheme."""
self.log.debug("Determining modulepath extensions for %s" % ec)
modpath_extensions = self.mns.det_modpath_extensions(self.check_ec_type(ec))
self.log.debug("Obtained modulepath extensions: %s" % modpath_extensions)
return modpath_extensions
def det_user_modpath_extensions(self, ec):
"""Determine user-specific modulepath extensions according to module naming scheme."""
self.log.debug("Determining user modulepath extensions for %s", ec)
modpath_extensions = self.mns.det_user_modpath_extensions(self.check_ec_type(ec))
self.log.debug("Obtained user modulepath extensions: %s", modpath_extensions)
return modpath_extensions
def det_init_modulepaths(self, ec):
"""Determine initial modulepaths according to module naming scheme."""
self.log.debug("Determining initial module paths for %s" % ec)
init_modpaths = self.mns.det_init_modulepaths(self.check_ec_type(ec))
self.log.debug("Obtained initial module paths: %s" % init_modpaths)
return init_modpaths
def expand_toolchain_load(self, ec=None):
"""
Determine whether load statements for a toolchain should be expanded to load statements for its dependencies.
This is useful when toolchains are not exposed to users.
"""
return self.mns.expand_toolchain_load(ec=ec)
def is_short_modname_for(self, short_modname, name):
"""
Determine whether the specified (short) module name is a module for software with the specified name.
"""
return self.mns.is_short_modname_for(short_modname, name)
| pescobar/easybuild-framework | easybuild/framework/easyconfig/easyconfig.py | Python | gpl-2.0 | 119,828 | [
"NetCDF"
] | 187b7da73b4aa108bb9854f339050054c2e8aeddb69c1454801732b3186c2d68 |
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <[email protected]>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
from androguard.decompiler.dad.basic_blocks import (build_node_from_block,
StatementBlock, CondBlock)
from androguard.decompiler.dad.util import get_type
from androguard.decompiler.dad.instruction import Variable
logger = logging.getLogger('dad.graph')
class Graph(object):
def __init__(self):
self.entry = None
self.exit = None
self.nodes = list()
self.rpo = collections.deque()
self.edges = defaultdict(list)
self.catch_edges = defaultdict(list)
self.reverse_edges = defaultdict(list)
self.reverse_catch_edges = defaultdict(list)
self.loc_to_ins = None
self.loc_to_node = None
def sucs(self, node):
return self.edges.get(node, collections.deque())
def all_sucs(self, node):
return self.edges.get(node, collections.deque()) + self.catch_edges.get(node, collections.deque())
def preds(self, node):
return [n for n in self.reverse_edges.get(node, collections.deque()) if not n.in_catch]
def all_preds(self, node):
return (self.reverse_edges.get(node, collections.deque()) + self.reverse_catch_edges.get(
node, collections.deque()))
def add_node(self, node):
self.nodes.append(node)
def add_edge(self, e1, e2):
lsucs = self.edges[e1]
if e2 not in lsucs:
lsucs.append(e2)
lpreds = self.reverse_edges[e2]
if e1 not in lpreds:
lpreds.append(e1)
def add_catch_edge(self, e1, e2):
lsucs = self.catch_edges[e1]
if e2 not in lsucs:
lsucs.append(e2)
lpreds = self.reverse_catch_edges[e2]
if e1 not in lpreds:
lpreds.append(e1)
def remove_node(self, node):
preds = self.reverse_edges.get(node, collections.deque())
for pred in preds:
self.edges[pred].remove(node)
succs = self.edges.get(node, collections.deque())
for suc in succs:
self.reverse_edges[suc].remove(node)
exc_preds = self.reverse_catch_edges.pop(node, collections.deque())
for pred in exc_preds:
self.catch_edges[pred].remove(node)
exc_succs = self.catch_edges.pop(node, collections.deque())
for suc in exc_succs:
self.reverse_catch_edges[suc].remove(node)
self.nodes.remove(node)
if node in self.rpo:
self.rpo.remove(node)
del node
def number_ins(self):
self.loc_to_ins = {}
self.loc_to_node = {}
num = 0
for node in self.rpo:
start_node = num
num = node.number_ins(num)
end_node = num - 1
self.loc_to_ins.update(node.get_loc_with_ins())
self.loc_to_node[start_node, end_node] = node
def get_ins_from_loc(self, loc):
return self.loc_to_ins.get(loc)
def get_node_from_loc(self, loc):
for (start, end), node in self.loc_to_node.iteritems():
if start <= loc <= end:
return node
def remove_ins(self, loc):
ins = self.get_ins_from_loc(loc)
self.get_node_from_loc(loc).remove_ins(loc, ins)
self.loc_to_ins.pop(loc)
def compute_rpo(self):
'''
Number the nodes in reverse post order.
An RPO traversal visit as many predecessors of a node as possible
before visiting the node itself.
'''
nb = len(self.nodes) + 1
for node in self.post_order():
node.num = nb - node.po
self.rpo = sorted(self.nodes, key=lambda n: n.num)
def post_order(self):
'''
Return the nodes of the graph in post-order i.e we visit all the
children of a node before visiting the node itself.
'''
def _visit(n, cnt):
visited.add(n)
for suc in self.all_sucs(n):
if not suc in visited:
for cnt, s in _visit(suc, cnt):
yield cnt, s
n.po = cnt
yield cnt + 1, n
visited = set()
for _, node in _visit(self.entry, 1):
yield node
def draw(self, name, dname, draw_branches=True):
from pydot import Dot, Edge
g = Dot()
g.set_node_defaults(color='lightgray',
style='filled',
shape='box',
fontname='Courier',
fontsize='10')
for node in sorted(self.nodes, key=lambda x: x.num):
if draw_branches and node.type.is_cond:
g.add_edge(Edge(str(node), str(node.true), color='green'))
g.add_edge(Edge(str(node), str(node.false), color='red'))
else:
for suc in self.sucs(node):
g.add_edge(Edge(str(node), str(suc), color='blue'))
for except_node in self.catch_edges.get(node, collections.deque()):
g.add_edge(Edge(str(node),
str(except_node),
color='black',
style='dashed'))
g.write_png('%s/%s.png' % (dname, name))
def immediate_dominators(self):
return dom_lt(self)
def __len__(self):
return len(self.nodes)
def __repr__(self):
return str(self.nodes)
def __iter__(self):
for node in self.nodes:
yield node
def split_if_nodes(graph):
'''
Split IfNodes in two nodes, the first node is the header node, the
second one is only composed of the jump condition.
'''
node_map = {n: n for n in graph}
to_update = set()
for node in graph.nodes[:]:
if node.type.is_cond:
if len(node.get_ins()) > 1:
pre_ins = node.get_ins()[:-1]
last_ins = node.get_ins()[-1]
pre_node = StatementBlock('%s-pre' % node.name, pre_ins)
cond_node = CondBlock('%s-cond' % node.name, [last_ins])
node_map[node] = pre_node
node_map[pre_node] = pre_node
node_map[cond_node] = cond_node
pre_node.copy_from(node)
cond_node.copy_from(node)
for var in node.var_to_declare:
pre_node.add_variable_declaration(var)
pre_node.type.is_stmt = True
cond_node.true = node.true
cond_node.false = node.false
for pred in graph.all_preds(node):
pred_node = node_map[pred]
# Verify that the link is not an exception link
if node not in graph.sucs(pred):
graph.add_catch_edge(pred_node, pre_node)
continue
if pred is node:
pred_node = cond_node
if pred.type.is_cond: # and not (pred is node):
if pred.true is node:
pred_node.true = pre_node
if pred.false is node:
pred_node.false = pre_node
graph.add_edge(pred_node, pre_node)
for suc in graph.sucs(node):
graph.add_edge(cond_node, node_map[suc])
# We link all the exceptions to the pre node instead of the
# condition node, which should not trigger any of them.
for suc in graph.catch_edges.get(node, collections.deque()):
graph.add_catch_edge(pre_node, node_map[suc])
if node is graph.entry:
graph.entry = pre_node
graph.add_node(pre_node)
graph.add_node(cond_node)
graph.add_edge(pre_node, cond_node)
pre_node.update_attribute_with(node_map)
cond_node.update_attribute_with(node_map)
graph.remove_node(node)
else:
to_update.add(node)
for node in to_update:
node.update_attribute_with(node_map)
def simplify(graph):
'''
Simplify the CFG by merging/deleting statement nodes when possible:
If statement B follows statement A and if B has no other predecessor
besides A, then we can merge A and B into a new statement node.
We also remove nodes which do nothing except redirecting the control
flow (nodes which only contains a goto).
'''
redo = True
while redo:
redo = False
node_map = {}
to_update = set()
for node in graph.nodes[:]:
if node.type.is_stmt and node in graph:
sucs = graph.all_sucs(node)
if len(sucs) != 1:
continue
suc = sucs[0]
if len(node.get_ins()) == 0:
if any(pred.type.is_switch
for pred in graph.all_preds(node)):
continue
if node is suc:
continue
node_map[node] = suc
for pred in graph.all_preds(node):
pred.update_attribute_with(node_map)
if node not in graph.sucs(pred):
graph.add_catch_edge(pred, suc)
continue
graph.add_edge(pred, suc)
redo = True
if node is graph.entry:
graph.entry = suc
graph.remove_node(node)
elif (suc.type.is_stmt and len(graph.all_preds(suc)) == 1 and
not (suc in graph.catch_edges) and not (
(node is suc) or (suc is graph.entry))):
ins_to_merge = suc.get_ins()
node.add_ins(ins_to_merge)
for var in suc.var_to_declare:
node.add_variable_declaration(var)
new_suc = graph.sucs(suc)[0]
if new_suc:
graph.add_edge(node, new_suc)
for exception_suc in graph.catch_edges.get(suc, collections.deque()):
graph.add_catch_edge(node, exception_suc)
redo = True
graph.remove_node(suc)
else:
to_update.add(node)
for node in to_update:
node.update_attribute_with(node_map)
def dom_lt(graph):
'''Dominator algorithm from Lengaeur-Tarjan'''
def _dfs(v, n):
semi[v] = n = n + 1
vertex[n] = label[v] = v
ancestor[v] = 0
for w in graph.all_sucs(v):
if not semi[w]:
parent[w] = v
n = _dfs(w, n)
pred[w].add(v)
return n
def _compress(v):
u = ancestor[v]
if ancestor[u]:
_compress(u)
if semi[label[u]] < semi[label[v]]:
label[v] = label[u]
ancestor[v] = ancestor[u]
def _eval(v):
if ancestor[v]:
_compress(v)
return label[v]
return v
def _link(v, w):
ancestor[w] = v
parent, ancestor, vertex = {}, {}, {}
label, dom = {}, {}
pred, bucket = defaultdict(set), defaultdict(set)
# Step 1:
semi = {v: 0 for v in graph.nodes}
n = _dfs(graph.entry, 0)
for i in xrange(n, 1, -1):
w = vertex[i]
# Step 2:
for v in pred[w]:
u = _eval(v)
y = semi[w] = min(semi[w], semi[u])
bucket[vertex[y]].add(w)
pw = parent[w]
_link(pw, w)
# Step 3:
bpw = bucket[pw]
while bpw:
v = bpw.pop()
u = _eval(v)
dom[v] = u if semi[u] < semi[v] else pw
# Step 4:
for i in range(2, n + 1):
w = vertex[i]
dw = dom[w]
if dw != vertex[semi[w]]:
dom[w] = dom[dw]
dom[graph.entry] = None
return dom
def bfs(start):
to_visit = [start]
visited = set([start])
while to_visit:
node = to_visit.pop(0)
yield node
if node.exception_analysis:
for _, _, exception in node.exception_analysis.exceptions:
if exception not in visited:
to_visit.append(exception)
visited.add(exception)
for _, _, child in node.childs:
if child not in visited:
to_visit.append(child)
visited.add(child)
class GenInvokeRetName(object):
def __init__(self):
self.num = 0
self.ret = None
def new(self):
self.num += 1
self.ret = Variable('tmp%d' % self.num)
return self.ret
def set_to(self, ret):
self.ret = ret
def last(self):
return self.ret
def make_node(graph, block, block_to_node, vmap, gen_ret):
node = block_to_node.get(block)
if node is None:
node = build_node_from_block(block, vmap, gen_ret)
block_to_node[block] = node
if block.exception_analysis:
for _type, _, exception_target in block.exception_analysis.exceptions:
exception_node = block_to_node.get(exception_target)
if exception_node is None:
exception_node = build_node_from_block(exception_target, vmap,
gen_ret, _type)
exception_node.set_catch_type(_type)
exception_node.in_catch = True
block_to_node[exception_target] = exception_node
graph.add_catch_edge(node, exception_node)
for _, _, child_block in block.childs:
child_node = block_to_node.get(child_block)
if child_node is None:
child_node = build_node_from_block(child_block, vmap, gen_ret)
block_to_node[child_block] = child_node
graph.add_edge(node, child_node)
if node.type.is_switch:
node.add_case(child_node)
if node.type.is_cond:
if_target = ((block.end / 2) -
(block.last_length / 2) + node.off_last_ins)
child_addr = child_block.start / 2
if if_target == child_addr:
node.true = child_node
else:
node.false = child_node
# Check that both branch of the if point to something
# It may happen that both branch point to the same node, in this case
# the false branch will be None. So we set it to the right node.
# TODO: In this situation, we should transform the condition node into
# a statement node
if node.type.is_cond and node.false is None:
node.false = node.true
return node
def construct(start_block, vmap, exceptions):
bfs_blocks = bfs(start_block)
graph = Graph()
gen_ret = GenInvokeRetName()
# Construction of a mapping of basic blocks into Nodes
block_to_node = {}
exceptions_start_block = collections.deque()
for exception in exceptions:
for _, _, block in exception.exceptions:
exceptions_start_block.append(block)
for block in bfs_blocks:
node = make_node(graph, block, block_to_node, vmap, gen_ret)
graph.add_node(node)
graph.entry = block_to_node[start_block]
del block_to_node, bfs_blocks
graph.compute_rpo()
graph.number_ins()
for node in graph.rpo:
preds = [pred for pred in graph.all_preds(node) if pred.num < node.num]
if preds and all(pred.in_catch for pred in preds):
node.in_catch = True
# Create a list of Node which are 'return' node
# There should be one and only one node of this type
# If this is not the case, try to continue anyway by setting the exit node
# to the one which has the greatest RPO number (not necessarily the case)
lexit_nodes = [node for node in graph if node.type.is_return]
if len(lexit_nodes) > 1:
# Not sure that this case is possible...
logger.error('Multiple exit nodes found !')
graph.exit = graph.rpo[-1]
elif len(lexit_nodes) < 1:
# A method can have no return if it has throw statement(s) or if its
# body is a while(1) whitout break/return.
logger.debug('No exit node found !')
else:
graph.exit = lexit_nodes[0]
return graph
| yang-guangliang/android_guard | androguard/decompiler/dad/graph.py | Python | apache-2.0 | 17,133 | [
"VisIt"
] | 0c05360024a380d1e1288f41ad69fe0103726c708e57ac62bfa19a24b96f5dc4 |
import numpy as np
from mayavi import mlab
from BDQuaternions import Conventions, EulerAngles
from BDSpace.Coordinates import Cartesian
import BDSpaceVis as Visual
# Create cartesian coordinate system
convention = Conventions().get_convention('Bunge')
# if you don't pass arguments the basis coincide with 'Absolute' (mayavi) coordinate system
CS_1 = Cartesian(origin=np.array([0, 0, 0]), euler_angles_convention=convention)
CS_2 = Cartesian(origin=np.array([3, 0, 0]), euler_angles_convention=convention)
CS_3 = Cartesian(origin=np.array([6, 0, 0]), euler_angles_convention=convention)
CS_4 = Cartesian(origin=np.array([0, 3, 0]), euler_angles_convention=convention)
CS_5 = Cartesian(origin=np.array([3, 3, 0]), euler_angles_convention=convention)
CS_6 = Cartesian(origin=np.array([6, 3, 0]), euler_angles_convention=convention)
step = 1.0 # in degrees
# to visualise the coordinate system basis the module Visual is used
fig = mlab.figure('CS demo', bgcolor=(0.5, 0.5, 0.5)) # Create the mayavi figure
cs_box_1, arrows_1, labels_1 = Visual.draw_coordinate_system_box(fig, CS_1)
cs_box_2, arrows_2, labels_2 = Visual.draw_coordinate_system_box(fig, CS_2)
cs_box_3, arrows_3, labels_3 = Visual.draw_coordinate_system_box(fig, CS_3)
cs_box_4, arrows_4, labels_4 = Visual.draw_coordinate_system_box(fig, CS_4)
cs_box_5, arrows_5, labels_5 = Visual.draw_coordinate_system_box(fig, CS_5)
cs_box_6, arrows_6, labels_6 = Visual.draw_coordinate_system_box(fig, CS_6)
direction = 1
@mlab.show
@mlab.animate(delay=10)
def anim():
direction = 1
while True:
CS_1.rotate_axis_angle(np.array([0, 1, 0], dtype=np.double), np.deg2rad(step)) # this is inplace transform
CS_2.rotate_axis_angle(np.array([1, 0, 0], dtype=np.double), np.deg2rad(step)) # this is inplace transform
CS_3.rotate_axis_angle(np.array([0, 0, 1], dtype=np.double), np.deg2rad(step)) # this is inplace transform
CS_4.euler_angles = EulerAngles(CS_4.euler_angles.euler_angles + np.array([0, 0, np.deg2rad(step)]), convention)
CS_5.euler_angles = EulerAngles(CS_5.euler_angles.euler_angles + direction * np.array([0, np.deg2rad(step), 0]),
convention)
CS_6.euler_angles = EulerAngles(CS_6.euler_angles.euler_angles + np.array([np.deg2rad(step), 0, 0]), convention)
if direction == 1 and abs(np.pi - CS_5.euler_angles.euler_angles[1]) < np.deg2rad(step):
direction *= -1
elif direction == -1 and abs(CS_5.euler_angles.euler_angles[1]) < np.deg2rad(step):
direction *= -1
Visual.update_coordinate_system_box(CS_1, cs_box_1, arrows_1, labels_1)
Visual.update_coordinate_system_box(CS_2, cs_box_2, arrows_2, labels_2)
Visual.update_coordinate_system_box(CS_3, cs_box_3, arrows_3, labels_3)
Visual.update_coordinate_system_box(CS_4, cs_box_4, arrows_4, labels_4)
Visual.update_coordinate_system_box(CS_5, cs_box_5, arrows_5, labels_5)
Visual.update_coordinate_system_box(CS_6, cs_box_6, arrows_6, labels_6)
yield
anim()
| bond-anton/Space_visualization | demo/02_euler_angles_animation.py | Python | apache-2.0 | 3,075 | [
"Mayavi"
] | ac546ab9ba175f299297cc7f0db5d70cba729aebd0f6bcc89d2c0e74abb6074c |
# _*_ coding: utf-8 -*-
import os
import sys
import json
import traceback
import cv2
import caffe
import numpy as np
import selectivesearch as ss
"""
AlpacaDB/selectivesearch
Parameters
----------
im_orig : ndarray
Input image
scale : int
Free parameter. Higher means larger clusters in felzenszwalb segmentation.
sigma : float
Width of Gaussian kernel for felzenszwalb segmentation.
min_size : int
Minimum component size for felzenszwalb segmentation.
Returns
-------
img : ndarray
image with region label
region label is stored in the 4th value of each pixel [r,g,b,(region)]
regions : array of dict
[
{
'rect': (left, top, right, bottom), #sampleは(x,y,w,h)なんじゃが...
'labels': [...]
},
...
]
'''
"""
class ObjectDetection:
def __init__(self, result_dir="/media/EXT/selective_search"):
self._result_dir = result_dir
def detect(self, img_path, overwrite=False, img_size=224, scale=300, sigma=0.7, min_size=10): #パラメータは経験的
filename = os.path.splitext(os.path.basename(img_path))[0]
dir_no = os.path.basename(os.path.dirname(img_path))
save_dir = os.path.join(self._result_dir, dir_no)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_path = os.path.join(save_dir, filename) #拡張子なし
try:
img = caffe.io.load_image(img_path, color=True) #caffeのloadは[0,1] 一応動いてるけどもしかしてuint8のほうがいい?
#img = cv2.imread(img_path)
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if not overwrite and os.path.exists(save_path + ".json"):
img_lbl = np.load(save_path + ".npy")
with open(save_path + ".json", "r") as fin:
data = json.load(fin)
regions = data["regions"]
img = caffe.io.resize(img, (data["resize"][1], data["resize"][0], 3)) # resize to fixed size
#img = cv2.resize(img, (data["resize"][1], data["resize"][0])) # resize to fixed size
return img, img_lbl, regions
else:
img_h, img_w, _ = img.shape
if img_h >= img_size and img_w >= img_size: #画像が大きい場合はリサイズ(高速化&画像ごとのパラメータ設定をなくす)
img = caffe.io.resize(img, (img_size, img_size, 3)) # resize to fixed size
#img = cv2.resize(img, (img_size, img_size)) # resize to fixed size
img_h, img_w, _ = img.shape
resize = [img_w, img_h]
img_lbl, regions = ss.selective_search(img, scale=scale, sigma=sigma, min_size=min_size) #経験的
np.save(save_path + ".npy", img_lbl)
data = {"resize":resize, "regions":regions}
with open(save_path + ".json", "w") as fout:
json.dump(data, fout, indent=2)
return img, img_lbl, regions
except KeyboardInterrupt:
print traceback.format_exc(sys.exc_info()[2])
sys.exit()
except:
raise
| AtsushiHashimoto/fujino_mthesis | tools/module/ObjectDetection.py | Python | bsd-2-clause | 3,363 | [
"Gaussian"
] | 0098c53a4da22dc019528ed843b0d7bc51b208da81d495019f41cb4a9205ccbd |
'''Translate operation on globally scoped symbols to
operations on symbol_cell mapping
'''
from __future__ import absolute_import
from __future__ import with_statement
from ..runtime.symbol import get_symbol_cells_map, gensym
from . import ir as I
from . import bind
from .walk import IRWalker, propigate_location
from .translate import state as translation_state
class GlobalSymbolTransformer(IRWalker):
def __init__(self, symbol_map_sym, top_scope):
IRWalker.__init__(self)
self.symbol_map_sym = symbol_map_sym
self.current_scope = top_scope
@staticmethod
def is_global(binding):
return bind.get_binding_use_type(binding) == bind.BND_GLOBAL
@staticmethod
def replace(old, new, skips=[]):
propigate_location(old, new, skips)
I.replace_child(old, new)
def visit_function(self, func):
for child in func.defaults:
self.visit(child)
old_scope = self.current_scope
self.current_scope = func.scope
self.visit(func.body)
self.current_scope = old_scope
def make_read_map(self):
return I.make_read_binding(self.current_scope.use_symbol(self.symbol_map_sym))
def visit_read_binding(self, rb):
if not self.is_global(rb.binding):
return
self.replace(rb, I.make_getitem(self.make_read_map(),
I.make_constant(rb.binding.symbol)))
def make_set(self, binding, value_ir):
return I.make_setitem(self.make_read_map(),
I.make_constant(binding.symbol),
value_ir)
def visit_write_binding(self, wb):
value = wb.value
if self.is_global(wb.binding):
del value.continuation
self.replace(wb, self.make_set(wb.binding, value),
skips=[value])
self.visit(value)
def visit_delete_binding(self, db):
if not self.is_global(db.binding):
return
self.replace(db, I.make_delitem(self.make_read_map(),
I.make_constant(db.binding.symbol)))
def visit_foriter(self, fi):
itr = fi.iter
if self.is_global(fi.binding):
old_binding = fi.binding
del fi.binding
sym = gensym('foriter-tmp')
self.current_scope.register_local(sym)
del itr.continuation
self.replace(fi, I.make_progn([
I.make_foriter(tag=fi.tag,
binding=self.current_scope.use_symbol(sym),
iter=itr),
self.make_set(old_binding, I.make_read_binding(self.current_scope.use_symbol(sym)))
]),
skips=[itr])
del fi.tag
self.visit(itr)
def visit_unpack_seq(self, us):
new_bindings = []
copies = []
for binding in us.places:
if not self.is_global(binding):
new_bindings.append(binding)
else:
gs = gensym('unpack-tmp')
new_bindings.append(self.current_scope.register_and_use_local(gs))
copies.append([gs, binding])
seq = us.seq
if copies:
del seq.continuation
del us.places
self.replace(us, I.make_progn([
I.make_unpack_seq(seq, new_bindings)
] + [self.make_set(binding, I.make_read_binding(self.current_scope.use_symbol(gs)))
for gs,binding in copies]),
skips=[seq])
self.visit(seq)
def transform_global_symbol_use(top):
assert isinstance(top, I.toplevel)
top_scope = top.scope
assert not top_scope.parent
symbol_map_sym = gensym('symbol-cells-map')
symbol_map_binding = top_scope.register_local(symbol_map_sym)
GlobalSymbolTransformer(symbol_map_sym, top_scope).visit(top.expression)
if not len(symbol_map_binding.uses):
top_scope.unregister_binding(symbol_map_binding)
return top
expression = top.expression
del expression.continuation
when = None
if isinstance(expression, I.evalwhen):
when = expression.when
expression = expression.expression
del expression.continuation
new_ir = I.make_progn([I.make_write_binding(
top_scope.use_symbol(symbol_map_sym),
I.make_call(callee=I.make_constant(get_symbol_cells_map),
args=[], kwd_names=[], kwd_values=[],
star_args=None, star_kwds=None)),
expression])
if when is not None:
new_ir = I.make_evalwhen(when=when, expression=new_ir)
new_top = I.make_toplevel(new_ir, top_scope)
propigate_location(top, new_top, [expression])
return new_top
| matthagy/Jamenson | jamenson/compiler/Attic/global_trans.py | Python | apache-2.0 | 4,911 | [
"VisIt"
] | e07ab367914a3a50ac87d04d322fc0f19bf4e314ff8e9fa35c3cb219257f8567 |
#!/usr/bin/env python
"""Convert BAM files to BigWig file format in a specified region.
Original version copyright Brad Chapman with revisions from Peter Cock
and ideas from Lance Parsons
Usage:
bam_to_bigwig.py <BAM file> [--outfile=<output file name>] [--split]
The --split argument is passed to bedtools genomecov
The script requires:
pysam (http://code.google.com/p/pysam/)
bedtools genomecov (http://code.google.com/p/bedtools/)
bedGraphToBigWig from UCSC (http://hgdownload.cse.ucsc.edu/admin/exe/)
"""
import os
import sys
import subprocess
import tempfile
from optparse import OptionParser
from contextlib import contextmanager, closing
import pysam
def main(bam_file, outfile=None, split=False):
config = {"program": {"ucsc_bedGraphToBigWig": ["bedGraphToBigWig"],
"bedtools_genomeCoverageBed":
["bedtools", "genomecov"]}}
if outfile is None:
outfile = "%s.bigwig" % os.path.splitext(bam_file)[0]
if os.path.abspath(bam_file) == os.path.abspath(outfile):
sys.stderr.write("Bad arguments, "
"input and output files are the same.\n")
sys.exit(1)
if os.path.exists(outfile) and os.path.getsize(outfile) > 0:
sys.stderr.write("Warning, output file already exists.\n")
sizes = get_sizes(bam_file, config)
print "Have %i references" % len(sizes)
if not sizes:
sys.stderr.write("Problem reading BAM header.\n")
sys.exit(1)
# Use a temp file to avoid any possiblity of not having write permission
temp_handle = tempfile.NamedTemporaryFile(delete=False)
temp_file = temp_handle.name
with closing(temp_handle):
print "Calculating coverage..."
convert_to_graph(bam_file, split, config, temp_handle)
try:
print("Converting %i MB graph file to bigwig..." %
(os.path.getsize(temp_file) // (1024 * 1024)))
# Can't pipe this as stdin due to converter design,
# https://lists.soe.ucsc.edu/pipermail/genome/2011-March/025455.html
convert_to_bigwig(temp_file, sizes, config, outfile)
finally:
if os.path.isfile(temp_file):
os.remove(temp_file)
print "Done"
@contextmanager
def indexed_bam(bam_file, config):
if not os.path.exists(bam_file + ".bai"):
pysam.index(bam_file)
sam_reader = pysam.Samfile(bam_file, "rb")
yield sam_reader
sam_reader.close()
def get_sizes(bam_file, config):
with indexed_bam(bam_file, config) as work_bam:
sizes = zip(work_bam.references, work_bam.lengths)
return sizes
def convert_to_graph(bam_file, split, config, out_handle):
cl = config["program"]["bedtools_genomeCoverageBed"] + \
["-ibam", bam_file, "-bg"]
if split:
cl.append("-split")
new_env = os.environ.copy()
new_env['LC_COLLATE'] = 'C'
p1 = subprocess.Popen(cl, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["sort", "-k1,1", "-k2,2n"],
env=new_env,
stdin=p1.stdout,
stdout=out_handle)
p1.stdout.close()
p2.communicate()
def convert_to_bigwig(bedgraph_file, chr_sizes, config, bw_file):
# This will be fine under Galaxy, but could use temp folder?
size_file = "%s-sizes.txt" % (os.path.splitext(bw_file)[0])
with open(size_file, "w") as out_handle:
for chrom, size in chr_sizes:
out_handle.write("%s\t%s\n" % (chrom, size))
try:
cl = config["program"]["ucsc_bedGraphToBigWig"] + \
[bedgraph_file, size_file, bw_file]
subprocess.check_call(cl)
finally:
os.remove(size_file)
return bw_file
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-o", "--outfile", dest="outfile")
parser.add_option("-s", "--split", action="store_true", dest="split")
(options, args) = parser.parse_args()
if len(args) not in [1, 2]:
print "Incorrect arguments"
print __doc__
sys.exit()
kwargs = dict(
outfile=options.outfile,
split=options.split)
main(*args, **kwargs)
| bgruening/galaxy_tools | tools/bam_to_bigwig/bam_to_bigwig.py | Python | bsd-2-clause | 4,155 | [
"Galaxy",
"pysam"
] | 37c6dbe89578a8b3a5486380b6aec0c94a13a77675fe55e390cca6ed8a769541 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from numpy.testing import assert_equal, assert_almost_equal
import MDAnalysis as mda
from MDAnalysisTests.topology.base import ParserBase
from MDAnalysisTests.datafiles import FHIAIMS
class TestFHIAIMS(ParserBase):
parser = mda.topology.FHIAIMSParser.FHIAIMSParser
expected_attrs = ['names', 'elements']
guessed_attrs = ['masses', 'types']
expected_n_residues = 1
expected_n_segments = 1
expected_n_atoms = 6
ref_filename = FHIAIMS
def test_names(self, top):
assert_equal(top.names.values,
['O', 'H', 'H', 'O', 'H', 'H'])
def test_types(self, top):
assert_equal(top.types.values,
['O', 'H', 'H', 'O', 'H', 'H'])
def test_elements(self, top):
assert_equal(top.elements.values,
['O', 'H', 'H', 'O', 'H', 'H'])
def test_masses(self, top):
assert_almost_equal(top.masses.values,
[15.999, 1.008, 1.008, 15.999,
1.008, 1.008])
| MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/topology/test_fhiaims.py | Python | gpl-2.0 | 2,101 | [
"MDAnalysis"
] | 8e49b566f4cbc627cd0c34d645687d267436ce83ba0b225318a92d0192f3f387 |
"""Core visualization operations."""
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# Oleh Kozynets <[email protected]>
# Guillaume Favelier <[email protected]>
#
# License: Simplified BSD
from abc import ABCMeta, abstractclassmethod
class _BaseRenderer(metaclass=ABCMeta):
@abstractclassmethod
def __init__(self, fig=None, size=(600, 600), bgcolor=(0., 0., 0.),
name=None, show=False, shape=(1, 1)):
"""Set up the scene."""
pass
@abstractclassmethod
def subplot(self, x, y):
"""Set the active subplot."""
pass
@abstractclassmethod
def scene(self):
"""Return scene handle."""
pass
@abstractclassmethod
def set_interaction(self, interaction):
"""Set interaction mode."""
pass
@abstractclassmethod
def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1., normals=None, **kwargs):
"""Add a mesh in the scene.
Parameters
----------
x: array, shape (n_vertices,)
The array containing the X component of the vertices.
y: array, shape (n_vertices,)
The array containing the Y component of the vertices.
z: array, shape (n_vertices,)
The array containing the Z component of the vertices.
triangles: array, shape (n_polygons, 3)
The array containing the indices of the polygons.
color: tuple | str
The color of the mesh as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
opacity: float
The opacity of the mesh.
shading: bool
If True, enable the mesh shading.
backface_culling: bool
If True, enable backface culling on the mesh.
scalars: ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
vmin: float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax: float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap:
The colormap to use.
interpolate_before_map:
Enabling makes for a smoother scalars display. Default is True.
When False, OpenGL will interpolate the mapped colors which can
result is showing colors that are not present in the color map.
representation: str
The representation of the mesh: either 'surface' or 'wireframe'.
line_width: int
The width of the lines when representation='wireframe'.
normals: array, shape (n_vertices, 3)
The array containing the normal of each vertex.
kwargs: args
The arguments to pass to triangular_mesh
Returns
-------
surface:
Handle of the mesh in the scene.
"""
pass
@abstractclassmethod
def contour(self, surface, scalars, contours, width=1.0, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, kind='line', color=None):
"""Add a contour in the scene.
Parameters
----------
surface: surface object
The mesh to use as support for contour.
scalars: ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
contours: int | list
Specifying a list of values will only give the requested contours.
width: float
The width of the lines or radius of the tubes.
opacity: float
The opacity of the contour.
vmin: float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax: float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap:
The colormap to use.
normalized_colormap: bool
Specify if the values of the colormap are between 0 and 1.
kind: 'line' | 'tube'
The type of the primitives to use to display the contours.
color:
The color of the mesh as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def surface(self, surface, color=None, opacity=1.0,
vmin=None, vmax=None, colormap=None, scalars=None,
backface_culling=False):
"""Add a surface in the scene.
Parameters
----------
surface: surface object
The information describing the surface.
color: tuple | str
The color of the surface as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
opacity: float
The opacity of the surface.
vmin: float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax: float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap:
The colormap to use.
scalars: ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
backface_culling: bool
If True, enable backface culling on the surface.
"""
pass
@abstractclassmethod
def sphere(self, center, color, scale, opacity=1.0,
resolution=8, backface_culling=False,
radius=None):
"""Add sphere in the scene.
Parameters
----------
center: ndarray, shape(n_center, 3)
The list of centers to use for the sphere(s).
color: tuple | str
The color of the sphere as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scale: float
The scaling applied to the spheres. The given value specifies
the maximum size in drawing units.
opacity: float
The opacity of the sphere(s).
resolution: int
The resolution of the sphere created. This is the number
of divisions along theta and phi.
backface_culling: bool
If True, enable backface culling on the sphere(s).
radius: float | None
Replace the glyph scaling by a fixed radius value for each
sphere (not supported by mayavi).
"""
pass
@abstractclassmethod
def tube(self, origin, destination, radius=0.001, color='white',
scalars=None, vmin=None, vmax=None, colormap='RdBu',
normalized_colormap=False, reverse_lut=False):
"""Add tube in the scene.
Parameters
----------
origin: array, shape(n_lines, 3)
The coordinates of the first end of the tube(s).
destination: array, shape(n_lines, 3)
The coordinates of the other end of the tube(s).
radius: float
The radius of the tube(s).
color: tuple | str
The color of the tube as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scalars: array, shape (n_quivers,) | None
The optional scalar data to use.
vmin: float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax: float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap:
The colormap to use.
opacity: float
The opacity of the tube(s).
backface_culling: bool
If True, enable backface culling on the tube(s).
reverse_lut: bool
If True, reverse the lookup table.
Returns
-------
surface:
Handle of the tube in the scene.
"""
pass
@abstractclassmethod
def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8,
glyph_height=None, glyph_center=None, glyph_resolution=None,
opacity=1.0, scale_mode='none', scalars=None,
backface_culling=False, colormap=None, vmin=None, vmax=None,
line_width=2., name=None):
"""Add quiver3d in the scene.
Parameters
----------
x: array, shape (n_quivers,)
The X component of the position of the quiver.
y: array, shape (n_quivers,)
The Y component of the position of the quiver.
z: array, shape (n_quivers,)
The Z component of the position of the quiver.
u: array, shape (n_quivers,)
The last X component of the quiver.
v: array, shape (n_quivers,)
The last Y component of the quiver.
w: array, shape (n_quivers,)
The last Z component of the quiver.
color: tuple | str
The color of the quiver as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scale: float
The scaling applied to the glyphs. The size of the glyph
is by default calculated from the inter-glyph spacing.
The given value specifies the maximum glyph size in drawing units.
mode: 'arrow', 'cone' or 'cylinder'
The type of the quiver.
resolution: int
The resolution of the glyph created. Depending on the type of
glyph, it represents the number of divisions in its geometric
representation.
glyph_height: float
The height of the glyph used with the quiver.
glyph_center: tuple
The center of the glyph used with the quiver: (x, y, z).
glyph_resolution: float
The resolution of the glyph used with the quiver.
opacity: float
The opacity of the quiver.
scale_mode: 'vector', 'scalar' or 'none'
The scaling mode for the glyph.
scalars: array, shape (n_quivers,) | None
The optional scalar data to use.
backface_culling: bool
If True, enable backface culling on the quiver.
colormap:
The colormap to use.
vmin: float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax: float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
line_width: float
The width of the 2d arrows.
"""
pass
@abstractclassmethod
def text2d(self, x_window, y_window, text, size=14, color='white'):
"""Add 2d text in the scene.
Parameters
----------
x: float
The X component to use as position of the text in the
window coordinates system (window_width, window_height).
y: float
The Y component to use as position of the text in the
window coordinates system (window_width, window_height).
text: str
The content of the text.
size: int
The size of the font.
color: tuple | str
The color of the text as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def text3d(self, x, y, z, text, width, color='white'):
"""Add 2d text in the scene.
Parameters
----------
x: float
The X component to use as position of the text.
y: float
The Y component to use as position of the text.
z: float
The Z component to use as position of the text.
text: str
The content of the text.
width: float
The width of the text.
color: tuple | str
The color of the text as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def scalarbar(self, source, color="white", title=None, n_labels=4,
bgcolor=None):
"""Add a scalar bar in the scene.
Parameters
----------
source:
The object of the scene used for the colormap.
color:
The color of the label text.
title: str | None
The title of the scalar bar.
n_labels: int | None
The number of labels to display on the scalar bar.
bgcolor:
The color of the background when there is transparency.
"""
pass
@abstractclassmethod
def show(self):
"""Render the scene."""
pass
@abstractclassmethod
def close(self):
"""Close the scene."""
pass
@abstractclassmethod
def set_camera(self, azimuth=None, elevation=None, distance=None,
focalpoint=None):
"""Configure the camera of the scene.
Parameters
----------
azimuth: float
The azimuthal angle of the camera.
elevation: float
The zenith angle of the camera.
distance: float
The distance to the focal point.
focalpoint: tuple
The focal point of the camera: (x, y, z).
"""
pass
@abstractclassmethod
def reset_camera(self):
"""Reset the camera properties."""
pass
@abstractclassmethod
def screenshot(self, mode='rgb', filename=None):
"""Take a screenshot of the scene.
Parameters
----------
mode: str
Either 'rgb' or 'rgba' for values to return.
Default is 'rgb'.
filename: str | None
If not None, save the figure to the disk.
"""
pass
@abstractclassmethod
def project(self, xyz, ch_names):
"""Convert 3d points to a 2d perspective.
Parameters
----------
xyz: array, shape(n_points, 3)
The points to project.
ch_names: array, shape(_n_points,)
Names of the channels.
"""
pass
@abstractclassmethod
def enable_depth_peeling(self):
"""Enable depth peeling."""
pass
@abstractclassmethod
def remove_mesh(self, mesh_data):
"""Remove the given mesh from the scene.
Parameters
----------
mesh_data : tuple | Surface
The mesh to remove.
"""
pass
| cjayb/mne-python | mne/viz/backends/base_renderer.py | Python | bsd-3-clause | 15,252 | [
"Mayavi"
] | b5dae8acc3e6a13d1395fa92dd277f2f79fcc333141e8a23a72c8d6e91360baa |
# -*- coding: utf-8 -*-
__version__ = '0.15.0.dev0'
PROJECT_NAME = "pulsar"
PROJECT_OWNER = PROJECT_USERAME = "galaxyproject"
PROJECT_AUTHOR = 'Galaxy Project and Community'
PROJECT_EMAIL = '[email protected]'
PROJECT_URL = "https://github.com/%s/%s" % (PROJECT_OWNER, PROJECT_NAME)
RAW_CONTENT_URL = "https://raw.github.com/%s/%s/master/" % (
PROJECT_USERAME, PROJECT_NAME
)
| natefoo/pulsar | pulsar/__init__.py | Python | apache-2.0 | 385 | [
"Galaxy"
] | 2d071c9e23c20d86232b5d69b32d8fd7c2671eba4635a01239a6012668b704b7 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts Assert statements to their corresponding TF calls."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.py2tf.pyct import templates
from tensorflow.contrib.py2tf.pyct import transformer
class AssertsTransformer(transformer.Base):
"""Transforms Print nodes to Call so they can be handled as functions."""
# pylint:disable=invalid-name
def visit_Assert(self, node):
self.generic_visit(node)
# Note: The lone tf.Assert call will be wrapped with control_dependencies
# by side_effect_guards.
template = """
tf.Assert(test, [msg])
"""
if node.msg is None:
return templates.replace(
template, test=node.test, msg=gast.Str('Assertion error'))
elif isinstance(node.msg, gast.Str):
return templates.replace(template, test=node.test, msg=node.msg)
else:
raise NotImplementedError('Can only convert string messages for now.')
# pylint:enable=invalid-name
def transform(node, context):
return AssertsTransformer(context).visit(node)
| zasdfgbnm/tensorflow | tensorflow/contrib/py2tf/converters/asserts.py | Python | apache-2.0 | 1,805 | [
"VisIt"
] | 3cb685afda1ebb456281d5002339ce26bcdef3b07698cff8eb2108034df7374e |
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
'''VASP POSCAR, CHGCAR and POTCAR file formats'''
import numpy as np
from horton.units import angstrom, electronvolt
from horton.periodic import periodic
from horton.cext import Cell
from horton.grid.cext import UniformGrid
__all__ = ['load_chgcar', 'load_locpot', 'load_poscar', 'dump_poscar']
def _unravel_counter(counter, shape):
result = []
for i in xrange(0, len(shape)):
result.append(counter % shape[i])
counter /= shape[i]
return result
def _load_vasp_header(f, nskip):
'''Load the cell and atoms from a VASP file
**Arguments:**
f
An open file object
nskip
The number of lines to skip after the line with elements
**Returns:** ``title``, ``cell``, ``numbers``, ``coordinates``
'''
# reat the title
title = f.next().strip()
f.next()
# read cell parameters in angstrom. each row is one cell vector
rvecs = []
for i in xrange(3):
rvecs.append([float(w) for w in f.next().split()])
rvecs = np.array(rvecs)*angstrom
# Convert to cell object
cell = Cell(rvecs)
vasp_numbers = [periodic[w].number for w in f.next().split()]
vasp_counts = [int(w) for w in f.next().split()]
numbers = []
for n, c in zip(vasp_numbers, vasp_counts):
numbers.extend([n]*c)
numbers = np.array(numbers)
# skip some lines
for i in xrange(nskip):
f.next()
assert f.next().startswith('Direct')
# read the fractional coordinates and convert to Cartesian
coordinates = []
for line in f:
if len(line.strip()) == 0:
break
coordinates.append([float(w) for w in line.split()[:3]])
coordinates = np.dot(np.array(coordinates), rvecs)
return title, cell, numbers, coordinates
def _load_vasp_grid(filename):
'''Load a grid data file from VASP 5
**Arguments:**
filename
The VASP filename
**Returns:** a dictionary containing: ``title``, ``coordinates``,
``numbers``, ``cell``, ``grid``, ``cube_data``.
'''
with open(filename) as f:
# Load header
title, cell, numbers, coordinates = _load_vasp_header(f, 0)
# read the shape of the data
shape = np.array([int(w) for w in f.next().split()])
# read data
cube_data = np.zeros(shape, float)
counter = 0
for line in f:
if counter >= cube_data.size:
break
for w in line.split():
i0, i1, i2 = _unravel_counter(counter, shape)
# Fill in the data with transposed indexes. In horton, X is
# the slowest index while Z is the fastest.
cube_data[i0, i1, i2] = float(w)
counter += 1
assert counter == cube_data.size
return {
'title': title,
'coordinates': coordinates,
'numbers': numbers,
'cell': cell,
'grid': UniformGrid(np.zeros(3), cell.rvecs/shape.reshape(-1,1), shape, np.ones(3, int)),
'cube_data': cube_data,
}
def load_chgcar(filename):
'''Reads a vasp 5 chgcar file.
**Arguments:**
filename
The VASP filename
**Returns:** a dictionary containing: ``title``, ``coordinates``,
``numbers``, ``cell``, ``grid``, ``cube_data``.
'''
result = _load_vasp_grid(filename)
# renormalize electron density
result['cube_data'] /= result['cell'].volume
return result
def load_locpot(filename):
'''Reads a vasp 5 locpot file.
**Arguments:**
filename
The VASP filename
**Returns:** a dictionary containing: ``title``, ``coordinates``,
``numbers``, ``cell``, ``grid``, ``cube_data``.
'''
result = _load_vasp_grid(filename)
# convert locpot to atomic units
result['cube_data'] *= electronvolt
return result
def load_poscar(filename):
'''Reads a vasp 5 poscar file.
**Arguments:**
filename
The VASP filename
**Returns:** a dictionary containing: ``title``, ``coordinates``,
``numbers``, ``cell``.
'''
with open(filename) as f:
# Load header
title, cell, numbers, coordinates = _load_vasp_header(f, 1)
return {
'title': title,
'coordinates': coordinates,
'numbers': numbers,
'cell': cell,
}
def dump_poscar(filename, data):
'''Write a file in VASP's POSCAR format
**Arguments:**
filename
The name of the file to be written. This is usually POSCAR.
data
An IOData instance. Must contain ``coordinates``, ``numbers``,
``cell``. May contain ``title``.
'''
with open(filename, 'w') as f:
print >> f, getattr(data, 'title', 'Created with HORTON')
print >> f, ' 1.00000000000000'
# Write cell vectors, each row is one vector in angstrom:
rvecs = data.cell.rvecs
for rvec in rvecs:
print >> f, ' % 21.16f % 21.16f % 21.16f' % tuple(rvec/angstrom)
# Construct list of elements to make sure the coordinates get written
# in this order. Heaviest elements are put furst.
unumbers = sorted(np.unique(data.numbers))[::-1]
print >> f, ' '.join('%5s' % periodic[unumber].symbol for unumber in unumbers)
print >> f, ' '.join('%5i' % (data.numbers == unumber).sum() for unumber in unumbers)
print >> f, 'Selective dynamics'
print >> f, 'Direct'
# Write the coordinates
for unumber in unumbers:
indexes = (data.numbers == unumber).nonzero()[0]
for index in indexes:
row = data.cell.to_frac(data.coordinates[index])
print >> f, ' % 21.16f % 21.16f % 21.16f F F F' % tuple(row)
| eustislab/horton | horton/io/vasp.py | Python | gpl-3.0 | 6,647 | [
"VASP"
] | ef0d400490208bea2ac518d969606d82a501ae1beeb33b478c1bc16b113f7eeb |
"""
This module module is used to generate the CAs and CRLs (revoked certificates)
Example::
from DIRAC.Core.Security import Utilities
retVal = Utilities.generateRevokedCertsFile()
if retVal['OK']:
cl = Elasticsearch( self.__url,
timeout = self.__timeout,
use_ssl = True,
verify_certs = True,
ca_certs = retVal['Value'] )
or::
retVal = Utilities.generateCAFile('/WebApp/HTTPS/Cert')
if retVal['OK']:
sslops = dict( certfile = CertificateMgmt.getCert(/WebApp/HTTPS/Cert),
keyfile = CertificateMgmt.getCert(/WebApp/HTTPS/Key),
cert_reqs = ssl.CERT_OPTIONAL,
ca_certs = retVal['Value'],
ssl_version = ssl.PROTOCOL_TLSv1 )
srv = tornado.httpserver.HTTPServer( self.__app, ssl_options = sslops, xheaders = True )
Note: If you wan to make sure that the CA is up to date, better to use the BundleDeliveryClient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from DIRAC.Core.Security import X509Chain, X509CRL
from DIRAC.Core.Security import Locations
from DIRAC import gLogger, S_OK, S_ERROR
def generateCAFile(location=None):
"""
Generate/find a single CA file with all the PEMs
:param str location: we can specify a specific CS location
where it's written a directory where to find the CAs and CRLs
:return: directory where the file cas.pem which contains all certificates is found/created
"""
caDir = Locations.getCAsLocation()
if not caDir:
return S_ERROR('No CAs dir found')
# look in what's normally /etc/grid-security/certificates
if os.path.isfile(os.path.join(os.path.dirname(caDir), "cas.pem")):
return S_OK(os.path.join(os.path.dirname(caDir), "cas.pem"))
# look in what's normally /opt/dirac/etc/grid-security
diracCADirPEM = os.path.join(
os.path.dirname(Locations.getHostCertificateAndKeyLocation(location)[0]),
"cas.pem")
if os.path.isfile(diracCADirPEM):
return S_OK(diracCADirPEM)
# Now we create it in tmpdir
fn = tempfile.mkstemp(prefix="cas.", suffix=".pem")[1]
try:
with open(fn, "w") as fd:
for caFile in os.listdir(caDir):
caFile = os.path.join(caDir, caFile)
chain = X509Chain.X509Chain()
result = chain.loadChainFromFile(caFile)
if not result['OK']:
continue
expired = chain.hasExpired()
if not expired['OK'] or expired['Value']:
continue
fd.write(chain.dumpAllToString()['Value'])
gLogger.info("CAs used from: %s" % str(fn))
return S_OK(fn)
except IOError as err:
gLogger.warn(err)
return S_ERROR("Could not find/generate CAs")
def generateRevokedCertsFile(location=None):
"""
Generate a single CA file with all the PEMs
:param str location: we can specify a specific CS location
where it's written a directory where to find the CAs and CRLs
:return: directory where the file crls.pem which contains all CRLs is created
"""
caDir = Locations.getCAsLocation()
if not caDir:
return S_ERROR('No CAs dir found')
# look in what's normally /etc/grid-security/certificates
if os.path.isfile(os.path.join(os.path.dirname(caDir), "crls.pem")):
return S_OK(os.path.join(os.path.dirname(caDir), "crls.pem"))
# look in what's normally /opt/dirac/etc/grid-security
diracCADirPEM = os.path.join(
os.path.dirname(Locations.getHostCertificateAndKeyLocation(location)[0]),
"crls.pem")
if os.path.isfile(diracCADirPEM):
return S_OK(diracCADirPEM)
# Now we create it in tmpdir
fn = tempfile.mkstemp(prefix="crls", suffix=".pem")[1]
try:
with open(fn, "w") as fd:
for caFile in os.listdir(caDir):
caFile = os.path.join(caDir, caFile)
result = X509CRL.X509CRL.instanceFromFile(caFile)
if not result['OK']:
continue
chain = result['Value']
fd.write(chain.dumpAllToString()['Value'])
return S_OK(fn)
except IOError as err:
gLogger.warn(err)
return S_ERROR("Could not find/generate CRLs")
| yujikato/DIRAC | src/DIRAC/Core/Security/Utilities.py | Python | gpl-3.0 | 4,229 | [
"DIRAC"
] | 7d215b340f298c18048081877e0c14bfcf59ba2ebab536e75bf97df34222d9ca |
#IMPORTANT: if you want to load this script direclty to Paraview, copy it to OSS_Platform/ and load from there
#### import the simple module from the paraview
from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# create a new 'CSV Reader'
#get this list using arr_srt = [str(r) for r in list(hf.keys())]
list_of_connections=['Direct_upsampled_GPi_Str', 'Direct_upsampled_SN_Str', 'HDP', 'HDP_STN_GPi_upsampled_30', 'HDP_STN_SN_10', 'Indirect_upsampled_STN_GPe', 'Indirect_upsampled_STN_SN_10']
#prepare a color map
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import os
def make_a_screenshot(path_to_insert):
# create a new 'PVD Reader'
home_dir=os.path.expanduser("~")
viridis = cm.get_cmap('viridis', len(list_of_connections))
index_population=0
for connection in list_of_connections:
connection_csv = CSVReader(FileName=[home_dir+path_to_insert+'/Neuron_model_arrays/'+connection+'.csv'])
#gPi_mask_GPe_mask_5_Indirect_Branch_MC_Str_GPe_GPicsv = CSVReader(FileName=['/home/konstantin/Documents/brukerMRI-master/Classified_by_Sosoho/Segment/GPi_mask_GPe_mask_5_Indirect_Branch_MC_Str_GPe_GPi.csv'])
# Properties modified on gPi_mask_GPe_mask_5_Indirect_Branch_MC_Str_GPe_GPicsv
connection_csv.HaveHeaders = 0
connection_csv.FieldDelimiterCharacters = ' '
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
viewLayout1 = GetLayout()
# Create a new 'SpreadSheet View'
spreadSheetView1 = CreateView('SpreadSheetView')
spreadSheetView1.BlockSize = 1024L
spreadSheetView1.ColumnToSort = ''
viewLayout1.AssignView(2, spreadSheetView1)
# show data in view
connection_csvDisplay = Show(connection_csv, spreadSheetView1)
# trace defaults for the display properties.
connection_csvDisplay.FieldAssociation = 'Row Data'
# create a new 'Table To Points'
#tableToPoints1 = TableToPoints(Input=neuron_model_results_STN_mask_GPe_mask_20_Excitatory_stn2gpe_asscsv)
## rename source object
#RenameSource('Nmodels', tableToPoints1)
globals()[connection+'_table']= TableToPoints(Input=connection_csv) #very bad way, but it is just a standalone script
globals()[connection+'_table'].XColumn = 'Field 0'
globals()[connection+'_table'].YColumn = 'Field 1'
globals()[connection+'_table'].ZColumn = 'Field 2'
# tableToPoints1 = TableToPoints(Input=connection_csv)
# tableToPoints1.XColumn = 'Field 0'
# tableToPoints1.YColumn = 'Field 0'
# tableToPoints1.ZColumn = 'Field 0'
#
# # Properties modified on tableToPoints1
# tableToPoints1.YColumn = 'Field 1'
# tableToPoints1.ZColumn = 'Field 2'
# show data in view
#tableToPoints1Display = Show(tableToPoints1, spreadSheetView1)
globals()[connection+'_tableDisplay']= Show(globals()[connection+'_table'], spreadSheetView1)
RenameSource(connection,globals()[connection+'_table'])
# hide data in view
Hide(connection_csv, spreadSheetView1)
# set active view
SetActiveView(renderView1)
# set active source
#SetActiveSource(tableToPoints1)
SetActiveSource(globals()[connection+'_table'])
## show data in view
#tableToPoints1Display_1 = Show(tableToPoints1, renderView1)
## trace defaults for the display properties.
#tableToPoints1Display_1.ColorArrayName = [None, '']
#tableToPoints1Display_1.GlyphType = 'Arrow'
globals()[connection+'_tableDisplay_1'] = Show(globals()[connection+'_table'], renderView1)
# trace defaults for the display properties.
globals()[connection+'_tableDisplay_1'].ColorArrayName = [None, '']
globals()[connection+'_tableDisplay_1'].GlyphType = 'Arrow'
# reset view to fit data
renderView1.ResetCamera()
# set active view
SetActiveView(spreadSheetView1)
# destroy spreadSheetView1
Delete(spreadSheetView1)
del spreadSheetView1
# close an empty frame
viewLayout1.Collapse(2)
# set active view
SetActiveView(renderView1)
## change solid color
##tableToPoints1Display_1.DiffuseColor = [0.6196078431372549, 1.0, 0.12549019607843137]
#if "Indirect" in connection:
# globals()[connection+'_tableDisplay_1'].DiffuseColor = [0.6196078431372549, 1.0, 0.12549019607843137]
#elif "Direct" in connection:
# globals()[connection+'_tableDisplay_1'].DiffuseColor = [1.0, 0.0, 0.0]
#elif "HDP" in connection:
# globals()[connection+'_tableDisplay_1'].DiffuseColor = [0.12156862745098039, 0.5843137254901961, 0.8941176470588236]
#else:
# globals()[connection+'_tableDisplay_1'].DiffuseColor = [1.0,1.0,1.0]
globals()[connection+'_tableDisplay_1'].DiffuseColor = list(viridis(index_population))[:3]
index_population=index_population+1
#
## create a new 'CSV Reader'
#gPi_mask_Str_mask_5_Direct_Branch_MC_Str_GPicsv = CSVReader(FileName=['/home/konstantin/Documents/brukerMRI-master/Classified_by_Sosoho/Segment/GPi_mask_Str_mask_5_Direct_Branch_MC_Str_GPi.csv'])
#
## Properties modified on gPi_mask_Str_mask_5_Direct_Branch_MC_Str_GPicsv
#gPi_mask_Str_mask_5_Direct_Branch_MC_Str_GPicsv.HaveHeaders = 0
#gPi_mask_Str_mask_5_Direct_Branch_MC_Str_GPicsv.FieldDelimiterCharacters = ' '
#
## Create a new 'SpreadSheet View'
#spreadSheetView1 = CreateView('SpreadSheetView')
#spreadSheetView1.BlockSize = 1024L
## uncomment following to set a specific view size
## spreadSheetView1.ViewSize = [400, 400]
#
## place view in the layout
#viewLayout1.AssignView(2, spreadSheetView1)
#
## show data in view
#gPi_mask_Str_mask_5_Direct_Branch_MC_Str_GPicsvDisplay = Show(gPi_mask_Str_mask_5_Direct_Branch_MC_Str_GPicsv, spreadSheetView1)
## trace defaults for the display properties.
#gPi_mask_Str_mask_5_Direct_Branch_MC_Str_GPicsvDisplay.FieldAssociation = 'Row Data'
#
## create a new 'Table To Points'
#tableToPoints2 = TableToPoints(Input=gPi_mask_Str_mask_5_Direct_Branch_MC_Str_GPicsv)
#tableToPoints2.XColumn = 'Field 0'
#tableToPoints2.YColumn = 'Field 0'
#tableToPoints2.ZColumn = 'Field 0'
#
## Properties modified on tableToPoints2
#tableToPoints2.YColumn = 'Field 1'
#tableToPoints2.ZColumn = 'Field 2'
#
## show data in view
#tableToPoints2Display = Show(tableToPoints2, spreadSheetView1)
#
## hide data in view
#Hide(gPi_mask_Str_mask_5_Direct_Branch_MC_Str_GPicsv, spreadSheetView1)
#
## set active view
#SetActiveView(renderView1)
#
## set active source
#SetActiveSource(tableToPoints2)
#
## show data in view
#tableToPoints2Display_1 = Show(tableToPoints2, renderView1)
## trace defaults for the display properties.
#tableToPoints2Display_1.ColorArrayName = [None, '']
#tableToPoints2Display_1.GlyphType = 'Arrow'
#
## set active view
#SetActiveView(spreadSheetView1)
#
## destroy spreadSheetView1
#Delete(spreadSheetView1)
#del spreadSheetView1
#
## close an empty frame
#viewLayout1.Collapse(2)
#
## set active view
#SetActiveView(renderView1)
#
## change solid color
#tableToPoints2Display_1.DiffuseColor = [1.0, 0.0, 0.0]
#
## create a new 'CSV Reader'
#sTN_mask_MC_mask_35_HDP_Branch_STNcsv = CSVReader(FileName=['/home/konstantin/Documents/brukerMRI-master/Classified_by_Sosoho/Segment/STN_mask_MC_mask_35_HDP_Branch_STN.csv'])
#
## Properties modified on sTN_mask_MC_mask_35_HDP_Branch_STNcsv
#sTN_mask_MC_mask_35_HDP_Branch_STNcsv.HaveHeaders = 0
#sTN_mask_MC_mask_35_HDP_Branch_STNcsv.FieldDelimiterCharacters = ' '
#
## Create a new 'SpreadSheet View'
#spreadSheetView1 = CreateView('SpreadSheetView')
#spreadSheetView1.BlockSize = 1024L
## uncomment following to set a specific view size
## spreadSheetView1.ViewSize = [400, 400]
#
## place view in the layout
#viewLayout1.AssignView(2, spreadSheetView1)
#
## show data in view
#sTN_mask_MC_mask_35_HDP_Branch_STNcsvDisplay = Show(sTN_mask_MC_mask_35_HDP_Branch_STNcsv, spreadSheetView1)
## trace defaults for the display properties.
#sTN_mask_MC_mask_35_HDP_Branch_STNcsvDisplay.FieldAssociation = 'Row Data'
#
## create a new 'Table To Points'
#tableToPoints3 = TableToPoints(Input=sTN_mask_MC_mask_35_HDP_Branch_STNcsv)
#tableToPoints3.XColumn = 'Field 0'
#tableToPoints3.YColumn = 'Field 0'
#tableToPoints3.ZColumn = 'Field 0'
#
## Properties modified on tableToPoints3
#tableToPoints3.YColumn = 'Field 1'
#tableToPoints3.ZColumn = 'Field 2'
#
## show data in view
#tableToPoints3Display = Show(tableToPoints3, spreadSheetView1)
#
## hide data in view
#Hide(sTN_mask_MC_mask_35_HDP_Branch_STNcsv, spreadSheetView1)
#
## destroy spreadSheetView1
#Delete(spreadSheetView1)
#del spreadSheetView1
#
## close an empty frame
#viewLayout1.Collapse(2)
#
## set active view
#SetActiveView(renderView1)
#
## set active source
#SetActiveSource(tableToPoints3)
#
## show data in view
#tableToPoints3Display = Show(tableToPoints3, renderView1)
## trace defaults for the display properties.
#tableToPoints3Display.ColorArrayName = [None, '']
#tableToPoints3Display.GlyphType = 'Arrow'
#
## change solid color
#tableToPoints3Display.DiffuseColor = [0.12156862745098039, 0.5843137254901961, 0.8941176470588236]
#### saving camera placements for all active views
# current camera placement for renderView1
renderView1.CameraPosition = [-5.943765067238889, 30.063310553609174, 19.481178764885886]
renderView1.CameraFocalPoint = [20.037838396112274, -8.840344561377364, -4.671394748401762]
renderView1.CameraViewUp = [-0.4484871730366652, 0.23572557572149833, -0.8621442504432473]
renderView1.CameraParallelScale = 24.899825806468712
renderView1.ViewSize = [1600, 1000]
renderView1.ResetCamera()
SaveScreenshot(home_dir+path_to_insert+'/Images/Axon_connections.png', magnification=1, quality=100, view=renderView1)
#### uncomment the following to render all views
# RenderAllViews()
# alternatively, if you want to write images, you can use SaveScreenshot(...).
if __name__ == '__main__':
make_a_screenshot(*sys.argv[1:])
| andreashorn/lead_dbs | ext_libs/OSS-DBS/OSS_platform/Visualization_files/Paraview_connections_processed.py | Python | gpl-3.0 | 10,179 | [
"ParaView"
] | 908b4e1158eb20e01a74d2d03afdf4b0ee43a6b181e08a86cd1f8ec103ab0308 |
"""
This module adapted ANUGA
https://anuga.anu.edu.au/
"""
#FIXME: Ensure that all attributes of a georef are treated everywhere
#and unit test
import types, sys
import copy
import numpy as num
DEFAULT_ZONE = -1
TITLE = '#geo reference' + "\n" # this title is referred to in the test format
DEFAULT_PROJECTION = 'UTM'
DEFAULT_DATUM = 'wgs84'
DEFAULT_UNITS = 'm'
DEFAULT_FALSE_EASTING = 500000
DEFAULT_FALSE_NORTHING = 10000000 # Default for southern hemisphere
##
# @brief A class for ...
class Geo_reference:
"""
Attributes of the Geo_reference class:
.zone The UTM zone (default is -1)
.false_easting ??
.false_northing ??
.datum The Datum used (default is wgs84)
.projection The projection used (default is 'UTM')
.units The units of measure used (default metres)
.xllcorner The X coord of origin (default is 0.0 wrt UTM grid)
.yllcorner The y coord of origin (default is 0.0 wrt UTM grid)
.is_absolute ??
"""
##
# @brief Instantiate an instance of class Geo_reference.
# @param zone The UTM zone.
# @param xllcorner X coord of origin of georef.
# @param yllcorner Y coord of origin of georef.
# @param datum ??
# @param projection The projection used (default UTM).
# @param units Units used in measuring distance (default m).
# @param false_easting ??
# @param false_northing ??
# @param NetCDFObject NetCDF file *handle* to write to.
# @param ASCIIFile ASCII text file *handle* to write to.
# @param read_title Title of the georeference text.
def __init__(self,
zone=DEFAULT_ZONE,
xllcorner=0.0,
yllcorner=0.0,
datum=DEFAULT_DATUM,
projection=DEFAULT_PROJECTION,
units=DEFAULT_UNITS,
false_easting=DEFAULT_FALSE_EASTING,
false_northing=DEFAULT_FALSE_NORTHING,
NetCDFObject=None,
ASCIIFile=None,
read_title=None):
"""
input:
NetCDFObject - a handle to the netCDF file to be written to
ASCIIFile - a handle to the text file
read_title - the title of the georeference text, if it was read in.
If the function that calls this has already read the title line,
it can't unread it, so this info has to be passed.
If you know of a way to unread this info, then tell us.
Note, the text file only saves a sub set of the info the
points file does. Currently the info not written in text
must be the default info, since ANUGA assumes it isn't
changing.
"""
if zone is None:
zone = DEFAULT_ZONE
self.false_easting = int(false_easting)
self.false_northing = int(false_northing)
self.datum = datum
self.projection = projection
self.zone = int(zone)
self.units = units
self.xllcorner = float(xllcorner)
self.yllcorner = float(yllcorner)
if NetCDFObject is not None:
self.read_NetCDF(NetCDFObject)
if ASCIIFile is not None:
self.read_ASCII(ASCIIFile, read_title=read_title)
# Set flag for absolute points (used by get_absolute)
self.absolute = num.allclose([self.xllcorner, self.yllcorner], 0)
def get_xllcorner(self):
return self.xllcorner
##
# @brief Get the Y coordinate of the origin of this georef.
def get_yllcorner(self):
return self.yllcorner
##
# @brief Get the zone of this georef.
def get_zone(self):
return self.zone
##
# @brief Write <something> to an open NetCDF file.
# @param outfile Handle to open NetCDF file.
def write_NetCDF(self, outfile):
outfile.xllcorner = self.xllcorner
outfile.yllcorner = self.yllcorner
outfile.zone = self.zone
outfile.false_easting = self.false_easting
outfile.false_northing = self.false_northing
outfile.datum = self.datum
outfile.projection = self.projection
outfile.units = self.units
##
# @brief Read data from an open NetCDF file.
# @param infile Handle to open NetCDF file.
def read_NetCDF(self, infile):
self.xllcorner = float(infile.xllcorner[0])
self.yllcorner = float(infile.yllcorner[0])
self.zone = int(infile.zone[0])
try:
self.false_easting = int(infile.false_easting[0])
self.false_northing = int(infile.false_northing[0])
self.datum = infile.datum
self.projection = infile.projection
self.units = infile.units
except:
pass
if self.false_easting != DEFAULT_FALSE_EASTING:
print ("WARNING: False easting of %f specified." % self.false_easting)
print ("Default false easting is %f." % DEFAULT_FALSE_EASTING)
print ("ANUGA does not correct for differences in False Eastings.")
if self.false_northing != DEFAULT_FALSE_NORTHING:
print ("WARNING: False northing of %f specified."
% self.false_northing)
print ("Default false northing is %f." % DEFAULT_FALSE_NORTHING)
print ("ANUGA does not correct for differences in False Northings.")
if self.datum.upper() != DEFAULT_DATUM.upper():
print ("WARNING: Datum of %s specified." % self.datum)
print ("Default Datum is %s." % DEFAULT_DATUM)
print ("ANUGA does not correct for differences in datums.")
if self.projection.upper() != DEFAULT_PROJECTION.upper():
print ("WARNING: Projection of %s specified." % self.projection)
print ("Default Projection is %s." % DEFAULT_PROJECTION)
print ("ANUGA does not correct for differences in Projection.")
if self.units.upper() != DEFAULT_UNITS.upper():
print ("WARNING: Units of %s specified." % self.units)
print ("Default units is %s." % DEFAULT_UNITS)
print ("ANUGA does not correct for differences in units.")
################################################################################
# ASCII files with geo-refs are currently not used
################################################################################
##
# @brief Write georef data to an open text file.
# @param fd Handle to open text file.
def write_ASCII(self, fd):
fd.write(TITLE)
fd.write(str(self.zone) + "\n")
fd.write(str(self.xllcorner) + "\n")
fd.write(str(self.yllcorner) + "\n")
##
# @brief Read georef data from an open text file.
# @param fd Handle to open text file.
def read_ASCII(self, fd, read_title=None):
try:
if read_title == None:
read_title = fd.readline() # remove the title line
if read_title[0:2].upper() != TITLE[0:2].upper():
msg = ('File error. Expecting line: %s. Got this line: %s'
% (TITLE, read_title))
raise TitleError, msg
self.zone = int(fd.readline())
self.xllcorner = float(fd.readline())
self.yllcorner = float(fd.readline())
except SyntaxError:
msg = 'File error. Got syntax error while parsing geo reference'
raise ParsingError, msg
# Fix some assertion failures
if isinstance(self.zone, num.ndarray) and self.zone.shape == ():
self.zone = self.zone[0]
if (isinstance(self.xllcorner, num.ndarray) and
self.xllcorner.shape == ()):
self.xllcorner = self.xllcorner[0]
if (isinstance(self.yllcorner, num.ndarray) and
self.yllcorner.shape == ()):
self.yllcorner = self.yllcorner[0]
assert (type(self.xllcorner) == types.FloatType)
assert (type(self.yllcorner) == types.FloatType)
assert (type(self.zone) == types.IntType)
################################################################################
##
# @brief Change points to be absolute wrt new georef 'points_geo_ref'.
# @param points The points to change.
# @param points_geo_ref The new georef to make points absolute wrt.
# @return The changed points.
# @note If 'points' is a list then a changed list is returned.
def change_points_geo_ref(self, points, points_geo_ref=None):
"""Change the geo reference of a list or numeric array of points to
be this reference.(The reference used for this object)
If the points do not have a geo ref, assume 'absolute' values
"""
import copy
# remember if we got a list
is_list = isinstance(points, list)
points = ensure_numeric(points, num.float)
# sanity checks
if len(points.shape) == 1:
#One point has been passed
msg = 'Single point must have two elements'
assert len(points) == 2, msg
points = num.reshape(points, (1,2))
msg = 'Points array must be two dimensional.\n'
msg += 'I got %d dimensions' %len(points.shape)
assert len(points.shape) == 2, msg
msg = 'Input must be an N x 2 array or list of (x,y) values. '
msg += 'I got an %d x %d array' %points.shape
assert points.shape[1] == 2, msg
# FIXME (Ole): Could also check if zone, xllcorner, yllcorner
# are identical in the two geo refs.
if points_geo_ref is not self:
# If georeferences are different
points = copy.copy(points) # Don't destroy input
if not points_geo_ref is None:
# Convert points to absolute coordinates
points[:,0] += points_geo_ref.xllcorner
points[:,1] += points_geo_ref.yllcorner
# Make points relative to primary geo reference
points[:,0] -= self.xllcorner
points[:,1] -= self.yllcorner
if is_list:
points = points.tolist()
return points
def is_absolute(self):
"""Return True if xllcorner==yllcorner==0 indicating that points
in question are absolute.
"""
# FIXME(Ole): It is unfortunate that decision about whether points
# are absolute or not lies with the georeference object. Ross pointed this out.
# Moreover, this little function is responsible for a large fraction of the time
# using in data fitting (something in like 40 - 50%.
# This was due to the repeated calls to allclose.
# With the flag method fitting is much faster (18 Mar 2009).
# FIXME(Ole): HACK to be able to reuse data already cached (18 Mar 2009).
# Remove at some point
if not hasattr(self, 'absolute'):
self.absolute = num.allclose([self.xllcorner, self.yllcorner], 0)
# Return absolute flag
return self.absolute
def get_absolute(self, points):
"""Given a set of points geo referenced to this instance,
return the points as absolute values.
"""
# remember if we got a list
is_list = isinstance(points, list)
points = ensure_numeric(points, num.float)
if len(points.shape) == 1:
# One point has been passed
msg = 'Single point must have two elements'
if not len(points) == 2:
raise ShapeError, msg
msg = 'Input must be an N x 2 array or list of (x,y) values. '
msg += 'I got an %d x %d array' %points.shape
if not points.shape[1] == 2:
raise ShapeError, msg
# Add geo ref to points
if not self.is_absolute():
points = copy.copy(points) # Don't destroy input
points[:,0] += self.xllcorner
points[:,1] += self.yllcorner
if is_list:
points = points.tolist()
return points
##
# @brief Convert points to relative measurement.
# @param points Points to convert to relative measurements.
# @return A set of points relative to the geo_reference instance.
def get_relative(self, points):
"""Given a set of points in absolute UTM coordinates,
make them relative to this geo_reference instance,
return the points as relative values.
This is the inverse of get_absolute.
"""
# remember if we got a list
is_list = isinstance(points, list)
points = ensure_numeric(points, num.float)
if len(points.shape) == 1:
#One point has been passed
msg = 'Single point must have two elements'
if not len(points) == 2:
raise ShapeError, msg
if not points.shape[1] == 2:
msg = ('Input must be an N x 2 array or list of (x,y) values. '
'I got an %d x %d array' % points.shape)
raise ShapeError, msg
# Subtract geo ref from points
if not self.is_absolute():
points = copy.copy(points) # Don't destroy input
points[:,0] -= self.xllcorner
points[:,1] -= self.yllcorner
if is_list:
points = points.tolist()
return points
##
# @brief ??
# @param other ??
def reconcile_zones(self, other):
if other is None:
other = Geo_reference()
if (self.zone == other.zone or
self.zone == DEFAULT_ZONE and
other.zone == DEFAULT_ZONE):
pass
elif self.zone == DEFAULT_ZONE:
self.zone = other.zone
elif other.zone == DEFAULT_ZONE:
other.zone = self.zone
else:
msg = ('Geospatial data must be in the same '
'ZONE to allow reconciliation. I got zone %d and %d'
% (self.zone, other.zone))
raise ANUGAError, msg
#def easting_northing2geo_reffed_point(self, x, y):
# return [x-self.xllcorner, y - self.xllcorner]
#def easting_northing2geo_reffed_points(self, x, y):
# return [x-self.xllcorner, y - self.xllcorner]
##
# @brief Get origin of this geo_reference.
# @return (zone, xllcorner, yllcorner).
def get_origin(self):
return (self.zone, self.xllcorner, self.yllcorner)
##
# @brief Get a string representation of this geo_reference instance.
def __repr__(self):
return ('(zone=%i easting=%f, northing=%f)'
% (self.zone, self.xllcorner, self.yllcorner))
##
# @brief Compare two geo_reference instances.
# @param self This geo_reference instance.
# @param other Another geo_reference instance to compare against.
# @return 0 if instances have the same attributes, else 1.
# @note Attributes are: zone, xllcorner, yllcorner.
def __cmp__(self, other):
# FIXME (DSG) add a tolerence
if other is None:
return 1
cmp = 0
if not (self.xllcorner == self.xllcorner):
cmp = 1
if not (self.yllcorner == self.yllcorner):
cmp = 1
if not (self.zone == self.zone):
cmp = 1
return cmp
##
# @brief Write a geo_reference to a NetCDF file (usually SWW).
# @param origin A georef instance or parameters to create a georef instance.
# @param outfile Path to file to write.
# @return A normalized geo_reference.
def write_NetCDF_georeference(origin, outfile):
"""Write georeference info to a netcdf file, usually sww.
The origin can be a georef instance or parameters for a geo_ref instance
outfile is the name of the file to be written to.
"""
geo_ref = ensure_geo_reference(origin)
geo_ref.write_NetCDF(outfile)
return geo_ref
##
# @brief Convert an object to a georeference instance.
# @param origin A georef instance or (zone, xllcorner, yllcorner)
# @return A georef object, or None if 'origin' was None.
def ensure_geo_reference(origin):
"""
Given a list/tuple of zone, xllcorner and yllcorner of a geo-ref object,
return a geo ref object.
If the origin is None, return None, so calling this function doesn't
effect code logic
"""
if isinstance(origin, Geo_reference):
geo_ref = origin
elif origin is None:
geo_ref = None
else:
geo_ref = apply(Geo_reference, origin)
return geo_ref
#-----------------------------------------------------------------------
if __name__ == "__main__":
pass
| mazafrav/JdeRobot | src/drivers/MAVLinkServer/modules/lib/ANUGA/geo_reference.py | Python | gpl-3.0 | 16,869 | [
"NetCDF"
] | d6e8d22904bc66d192ae956b500dae4e137b150144ee82cf6759d0b5df89489e |
#!/usr/bin/env python3
import re
import os
import sys
import json
import argparse
import upload_bot
import download_game
import compare_bots
from json.decoder import JSONDecodeError
"""client.py: Client for interacting with the Halite II servers."""
__author__ = "Two Sigma"
__copyright__ = "Copyright 2017, Two Sigma"
__credits__ = ["David M. Li", "Jaques Clapauch"]
__date__ = "August 1, 2017"
__email__ = "[email protected]"
__license__ = "MIT"
__status__ = "Production"
__version__ = "1.5"
URI_HALITE_API = 'http://api.halite.io/v1/api'
URI_API_CREATE_BOT = URI_HALITE_API + "/user/{}/bot"
URI_API_EXISTING_BOT = URI_HALITE_API + "/user/{}/bot/{}"
URI_HALITE_WEB_PAGE = 'http://halite.io'
URI_WEB_API_KEY = "{}/user/settings".format(URI_HALITE_WEB_PAGE)
SUCCESS = 200
FIRST_BOT_ID = 0
BOT_FILE_KEY = 'botFile'
API_KEY_HEADER = 'X-API-KEY'
AUTH_MODE = 'auth'
GYM_MODE = 'gym'
REPLAY_MODE = 'replay'
BOT_MODE = 'bot'
MODES = str({AUTH_MODE, GYM_MODE, REPLAY_MODE, BOT_MODE})
REPLAY_MODE_DATE = 'date'
REPLAY_MODE_USER = 'user'
class Config:
_key_example = "<username>:<key>"
_key_delimiter = ':'
_user_position = 0
_user_key = 'user'
_api_key_key = 'api_key'
def __init__(self, auth=None):
self._config_folder = self._get_config_folder_path()
self._auth_file = self._get_auth_file_path()
if not os.path.exists(self._config_folder):
os.makedirs(self._config_folder)
if auth:
auth = self._parse_api_key(auth)
self._write_auth(auth)
else:
auth = self._get_auth_json()
self.api_key = auth[self._api_key_key]
self.user_id = auth[self._user_key]
@staticmethod
def _get_config_folder_path():
"""
Returns system specific folder for config
:return: %LOCALAPPDATA%/Halite if windows ~/.config/hlt otherwise
"""
return "{}/Halite".format(os.getenv('LOCALAPPDATA')) if sys.platform == 'win32' \
else "{}/.config/hlt".format(os.path.expanduser("~"))
@staticmethod
def _get_auth_file_path():
"""
:return: Auth file location where configs will be written to in JSON format
"""
return "{}/auth".format(Config._get_config_folder_path())
@staticmethod
def auth_exists():
"""
Whether tha auth file has been created already
:return: True if exists, False otherwise
"""
return os.path.isfile(Config._get_auth_file_path())
def _write_auth(self, data):
"""
Writes to auth file the desired data. Expected that the input be in JSON format.
:param data: Data to be written to the auth file
:return: Nothing
"""
config_file = open(self._auth_file, 'w')
config_file.writelines(json.dumps(data))
def _get_auth_json(self):
"""
Returns the auth JSON object as acquired from the auth file. If none, throws exception asking the user
to first authenticate.
:return: The JSON object with the auth information
"""
if not self.auth_exists():
raise ValueError("CLI not authenticated. Please run `client.py --auth` first.")
with open(self._auth_file) as file:
config_contents = file.read()
try:
return json.loads(config_contents)
except (TypeError, JSONDecodeError):
raise ValueError("Secret formatting has been mangled. Try re-authenticating (`client.py --auth`).")
@staticmethod
def _parse_api_key(api_key):
"""
Determines if the API key supplied is valid via regex. Returns the parsed contents in a dict (user and key)
:param api_key: The string containing the API key
:return: A dict containing the parse contents of the api key (user and key)
"""
config_result = {}
key_regex = re.compile("\d+:[0-9a-fA-F]{32}")
if not api_key or not re.match(key_regex, api_key):
raise ValueError("Malformed API Key. Expected {}".format(Config._key_example))
config_result[Config._api_key_key] = api_key
config_result[Config._user_key] = api_key.split(Config._key_delimiter)[Config._user_position]
return config_result
def __str__(self):
return "* id:\t\t{}{}* api_key:\t{}".format(self.user_id, os.linesep, self.api_key)
def __repr__(self):
return self.__str__()
def _parse_arguments():
"""
Simple argparser
:return: parsed arguments if any. Prints help otherwise
"""
parser = argparse.ArgumentParser(description="Halite 2.0 CLI")
# .Modes
subparser = parser.add_subparsers(dest='mode', metavar=MODES)
# .Modes.Auth
auth_parser = subparser.add_parser(AUTH_MODE, help='Authorize client to make requests on your behalf')
auth_parser.add_argument('-m', '--metadata', action='store_true', help="Print auth metadata")
# .Modes.Bot
bot_parser = subparser.add_parser('bot', help='Actions associated with a bot')
bot_parser.add_argument('-b', '--bot-path', dest='bot_path', action='store', type=str, required=True,
help="The path wherein your bot zip lives.")
# .Modes.Gym
bot_parser = subparser.add_parser('gym', help='Train your Bot(s)!')
bot_parser.add_argument('-r', '--run-command', dest='run_commands', action='append', type=str, required=True,
help="The command to run a specific bot. You may pass either 2 or 4 of these arguments")
bot_parser.add_argument('-b', '--binary', dest='halite_binary', action='store', type=str, required=True,
help="The halite executable/binary path, used to run the games")
bot_parser.add_argument('-W', '--width', dest='map_width', action='store', type=int, default=240,
help="The map width the simulations will run in")
bot_parser.add_argument('-H', '--height', dest='map_height', action='store', type=int, default=160,
help="The map height the simulations will run in")
bot_parser.add_argument('-i', '--iterations', dest='iterations', action='store', type=int, default=100,
help="Number of games to be run")
# .Modes.Replay
replay_parser = subparser.add_parser('replay', help='Actions associated with replay files')
# .Modes.Replay.Modes
replay_subparser = replay_parser.add_subparsers(dest='replay_mode', metavar='{date, user}')
# .Modes.Replay.Modes.User
replay_user_parser = replay_subparser.add_parser(REPLAY_MODE_USER, help='Retrieve replays based on a specified user')
replay_user_parser.add_argument('-i', '--id', action='store', dest='user_id',
help="Fetch recent replay files apposite a user. "
"Enter a user id to fetch that specific"
"user's files; leave blank to fetch yours")
replay_user_parser.add_argument('-l', '--limit', action='store', dest='limit', type=int, default=250,
help='Number of replays to fetch')
replay_user_parser.add_argument('-d', '--destination', dest='destination', action='store', type=str, required=True,
help="In which folder to store all resulting replay files.")
# .Modes.Replay.Modes.Date
replay_regex_parser = replay_subparser.add_parser(REPLAY_MODE_DATE, help='Retrieve replays based on regex')
replay_regex_parser.add_argument('-t', '--date', action='store', type=str, dest='date', required=True,
help="Fetch replay files matching the specified date. To fetch a day's files user"
"the YYYYMMDD format.")
replay_regex_parser.add_argument('-a', '--all', action='store_true', default=False,
help="Whether to retrieve all files. Omit for only Gold and higher.")
replay_regex_parser.add_argument('-d', '--destination', dest='destination', action='store', type=str, required=True,
help="In which folder to store all resulting replay files.")
if len(sys.argv) < 2:
parser.print_help()
return parser.parse_args()
def authorize():
"""
Create the config for the user. This will ask the user to visit a webpage and paste the api key encountered.
:return: Nothing
"""
api_key = input("Please go to {} to obtain an api_key, and paste here: ".format(URI_WEB_API_KEY))
Config(api_key)
print("Successfully set up user account")
def main():
"""
Main function gets the args input and determines which method to call to handle. Handles exceptions from
malformed input.
:return: Nothing
"""
try:
args = _parse_arguments()
if args.mode == AUTH_MODE:
if not (args.metadata and Config.auth_exists()):
authorize()
if args.metadata:
print(Config())
elif args.mode == BOT_MODE:
upload_bot.upload(args.bot_path)
elif args.mode == REPLAY_MODE:
download_game.download(args.replay_mode, args.destination,
getattr(args, 'date', None), getattr(args, 'all', None),
Config().user_id if Config.auth_exists() else None, getattr(args, 'user_id', None),
getattr(args, 'limit', None))
elif args.mode == GYM_MODE:
compare_bots.play_games(args.halite_binary,
args.map_width, args.map_height,
args.run_commands, args.iterations)
except (IndexError, TypeError, ValueError, IOError, FileNotFoundError) as err:
sys.stderr.write(str(err) + os.linesep)
exit(-1)
if __name__ == "__main__":
main()
| AlexandreOuellet/halite-bot | client.py | Python | mit | 9,947 | [
"VisIt"
] | 14df7ebbc9f5e2777688cb4bc4adde708fa76c2f704e224b355b388cdbe2f37b |
from .. import Tractography
from .. import (
tractography_from_trackvis_file, tractography_to_trackvis_file,
tractography_from_files, tractography_to_file
)
try:
VTK = True
from ..vtkInterface import (
tractography_from_vtk_files, tractography_to_vtk_file,
)
except ImportError:
VTK = False
from nose.tools import with_setup
import copy
from itertools import chain
from numpy import all, eye, ones, allclose
from numpy.random import randint, randn
from numpy.testing import assert_array_equal
dimensions = None
tracts = None
tracts_data = None
tractography = None
max_tract_length = 50
n_tracts = 50
def equal_tracts(a, b):
for t1, t2 in zip(a, b):
if not (len(t1) == len(t2) and allclose(t1, t2)):
return False
return True
def equal_tracts_data(a, b):
if set(a.keys()) != set(b.keys()):
return False
for k in a.keys():
v1 = a[k]
v2 = b[k]
if isinstance(v1, str) and isinstance(v2, str) and v1 == v2:
continue
elif not isinstance(v1, str) and not isinstance(v2, str):
for t1, t2 in zip(a[k], b[k]):
if not (len(t1) == len(t2) and allclose(t1, t2)):
return False
else:
return False
return True
def equal_tractography(a, b):
return (
equal_tracts(a.tracts(), b.tracts()) and
equal_tracts_data(a.tracts_data(), b.tracts_data())
)
def setup(*args, **kwargs):
global dimensions
global tracts
global tracts_data
global tractography
if 'test_active_data' in kwargs:
test_active_data = kwargs['test_active_data']
else:
test_active_data = False
dimensions = [(randint(5, max_tract_length), 3) for _ in range(n_tracts)]
tracts = [randn(*d) for d in dimensions]
tracts_data = {
'a%d' % i: [
randn(d[0], k)
for d in dimensions
]
for i, k in zip(range(4), randint(1, 3, 9))
}
if test_active_data:
mask = 0
for k, v in tracts_data.items():
if mask & (1 + 2 + 4):
break
if v[0].shape[1] == 1 and mask & 1 == 0:
tracts_data['ActiveScalars'] = k
mask |= 1
if v[0].shape[1] == 3 and mask & 2 == 0:
tracts_data['ActiveVectors'] = k
mask |= 2
if v[0].shape[1] == 9 and mask & 4 == 0:
tracts_data['ActiveTensors'] = k
mask |= 4
tractography = Tractography(tracts, tracts_data)
@with_setup(setup)
def test_creation():
assert(equal_tracts(tractography.tracts(), tracts))
assert(equal_tracts_data(tractography.tracts_data(), tracts_data))
assert(not tractography.are_tracts_subsampled())
assert(not tractography.are_tracts_filtered())
@with_setup(setup)
def test_subsample_tracts():
tractography.subsample_tracts(5)
assert(all(tract.shape[0] <= 5 for tract in tractography.tracts()))
assert(
all(
all(values.shape[0] <= 5 for values in value)
for value in tractography.tracts_data().values()
)
)
assert(not equal_tracts(tractography.tracts(), tractography.original_tracts()))
assert(equal_tracts(tracts, tractography.original_tracts()))
assert(not equal_tracts_data(tractography.tracts_data(), tractography.original_tracts_data()))
assert(equal_tracts_data(tracts_data, tractography.original_tracts_data()))
assert(tractography.are_tracts_subsampled())
tractography.unsubsample_tracts()
assert(equal_tracts(tractography.tracts(), tracts))
assert(equal_tracts_data(tractography.tracts_data(), tracts_data))
assert(not tractography.are_tracts_subsampled())
assert(not tractography.are_tracts_filtered())
@with_setup(setup)
def test_append():
old_tracts = copy.deepcopy(tractography.tracts())
new_data = {}
for k, v in tracts_data.items():
new_data[k] = v + v
tractography.append(tracts, tracts_data)
assert(equal_tracts(tractography.tracts(), chain(old_tracts, old_tracts)))
assert(equal_tracts_data(tractography.tracts_data(), new_data))
if VTK:
@with_setup(setup)
def test_saveload_vtk():
import tempfile
import os
fname = tempfile.mkstemp('.vtk')[1]
tractography_to_vtk_file(fname, tractography)
new_tractography = tractography_from_vtk_files(fname)
assert(equal_tracts(tractography.tracts(), new_tractography.tracts()))
assert(equal_tracts_data(
tractography.tracts_data(),
new_tractography.tracts_data())
)
os.remove(fname)
@with_setup(setup)
def test_saveload_vtp():
import tempfile
import os
fname = tempfile.mkstemp('.vtp')[1]
tractography_to_vtk_file(fname, tractography)
new_tractography = tractography_from_vtk_files(fname)
assert(equal_tracts(tractography.tracts(), new_tractography.tracts()))
assert(equal_tracts_data(tractography.tracts_data(), new_tractography.tracts_data()))
os.remove(fname)
@with_setup(setup)
def test_saveload_trk():
import tempfile
import os
fname = tempfile.mkstemp('.trk')[1]
tract_data_new = {
k: v
for k, v in tractography.tracts_data().items()
if (v[0].ndim == 1) or (v[0].ndim == 2 and v[0].shape[1] == 1)
}
tractography_ = Tractography(tractography.tracts(), tract_data_new)
tractography_to_trackvis_file(
fname, tractography_,
affine=eye(4), image_dimensions=ones(3)
)
new_tractography = tractography_from_trackvis_file(fname)
assert(equal_tracts(tractography_.tracts(), new_tractography.tracts()))
assert(equal_tracts_data(tractography_.tracts_data(), new_tractography.tracts_data()))
assert_array_equal(eye(4), new_tractography.affine)
assert_array_equal(ones(3), new_tractography.image_dims)
os.remove(fname)
@with_setup(setup)
def test_saveload():
import tempfile
import os
extensions = ('.trk',)
if VTK:
extensions += ('.vtk', '.vtp')
for ext in extensions:
fname = tempfile.mkstemp(ext)[1]
kwargs = {}
if ext == '.trk':
kwargs['affine'] = eye(4)
kwargs['image_dimensions'] = ones(3)
tract_data_new = {
k: v
for k, v in tractography.tracts_data().items()
if (v[0].ndim == 1) or (v[0].ndim == 2 and v[0].shape[1] == 1)
}
tractography_ = Tractography(tractography.tracts(), tract_data_new)
else:
tractography_ = tractography
tractography_to_file(
fname, tractography_,
**kwargs
)
new_tractography = tractography_from_files(fname)
assert(equal_tracts(tractography_.tracts(), new_tractography.tracts()))
assert(equal_tracts_data(
tractography_.tracts_data(),
new_tractography.tracts_data()
))
os.remove(fname)
| demianw/tract_querier | tract_querier/tractography/tests/test_tractography.py | Python | bsd-3-clause | 7,092 | [
"VTK"
] | af389d7a6a44c0ad8e1e773136c40b3c7651c094afdc3329daaf22964f582159 |
from uuid import uuid4
from Firefly import logging, scheduler
from Firefly.components.virtual_devices import AUTHOR
from Firefly.const import (COMMAND_UPDATE, DEVICE_TYPE_THERMOSTAT, LEVEL)
from Firefly.helpers.action import Command
from Firefly.helpers.device.device import Device
from Firefly.helpers.metadata.metadata import action_button_group, action_button_object, action_level, action_text
# TODO(zpriddy): Add more delayed setters to help with rate limits.
TITLE = 'Nest Thermostat'
DEVICE_TYPE = DEVICE_TYPE_THERMOSTAT
AUTHOR = AUTHOR
COMMANDS = [COMMAND_UPDATE, LEVEL, 'temperature', 'mode', 'away', 'home']
REQUESTS = ['temperature', 'humidity', 'mode', 'away', 'target', 'last_refresh']
INITIAL_VALUES = {
'_temperature': -1,
'_humidity': -1,
'_target': -1,
'_mode': 'unknown',
'_away': 'unknown',
'_last_refresh': -1,
}
MODE_LIST = ['off', 'eco', 'cool', 'heat', 'heat-cool']
def Setup(firefly, package, **kwargs):
logging.message('Entering %s setup' % TITLE)
thermostat = Thermostat(firefly, package, **kwargs)
firefly.install_component(thermostat)
refresh_command = Command('service_firebase', 'nest', 'refresh')
firefly.send_command(refresh_command)
return thermostat.id
class Thermostat(Device):
""" Nest Thermostat device.
"""
def __init__(self, firefly, package, **kwargs):
if kwargs.get('initial_values'):
INITIAL_VALUES.update(kwargs.get('initial_values'))
kwargs['initial_values'] = INITIAL_VALUES
super().__init__(firefly, package, TITLE, AUTHOR, COMMANDS, REQUESTS, DEVICE_TYPE, **kwargs)
self.__dict__.update(kwargs['initial_values'])
self.thermostat = kwargs.get('thermostat')
self.add_command(COMMAND_UPDATE, self.update_thermostat)
self.add_command('temperature', self.set_temperature)
self.add_command('mode', self.set_mode)
self.add_command('away', self.set_away)
self.add_command('home', self.set_home)
self.add_request('temperature', self.get_temperature)
self.add_request('target', self.get_target)
self.add_request('humidity', self.get_humidity)
self.add_request('mode', self.get_mode)
self.add_request('away', self.get_away)
self.add_request('last_refresh', self.get_last_refresh)
# self.add_action('temperature', metaSlider(min=50, max=90, request_param='target', set_command='temperature', command_param='temperature', title='Target Temperature'))
self.add_action('current_temperature', action_text(title='Current Temperature', context='Current temperature', request='temperature', primary=True))
# eco_button = metaButtonObject('Eco', 'mode', 'mode', 'eco')
# heat_button = metaButtonObject('Heat', 'mode', 'mode', 'heat')
# cool_button = metaButtonObject('Cool', 'mode', 'mode', 'cool')
# off_button = metaButtonObject('Off', 'mode', 'mode', 'off')
# TODO: Enable range when supported
# range_button = metaButtonObject('Range ', 'mode', 'mode', 'off')
# buttons = [eco_button, cool_button, heat_button, off_button]
# self.add_action('mode_buttons', metaButtons(title='AC Modes', buttons=buttons, request_val='mode', context='Change AC Mode'))
# Buttons for Home/Away
# home_button = metaButtonObject('Home', 'away', 'away', 'home')
# away_button = metaButtonObject('Away', 'away', 'away', 'away')
# self.add_action('home_away_buttons', metaButtons(title='Home Mode (nest)', buttons=[home_button, away_button], request_val='away', context='Set Nest to Home/Away'))
# New Buttons
eco_button = action_button_object('Eco', 'mode', 'mode', 'eco', 'eco')
heat_button = action_button_object('Heat', 'mode', 'mode', 'heat', 'heat')
cool_button = action_button_object('Cool', 'mode', 'mode', 'cool', 'cool')
off_button = action_button_object('Off', 'mode', 'mode', 'off', 'off')
self.add_action('mode_buttons', action_button_group(title='Set Mode', request='mode', buttons=[cool_button, heat_button, eco_button, off_button]))
home_button = action_button_object('Home', 'away', 'away', 'home', 'home')
away_button = action_button_object('Away', 'away', 'away', 'away', 'away')
self.add_action('home_away_buttons', action_button_group(title='Set Home/Away', request='away', buttons=[home_button, away_button]))
self.add_action('set_temperature', action_level(title='Set Temperature', command='temperature', command_prop='temperature', request='target', context='Set target temperature'))
self._alexa_export = False
self.timer_id = str(uuid4())
def get_temperature(self, **kwargs):
return self.temperature
def get_target(self, **kwargs):
return self.target
def get_humidity(self, **kwargs):
return self.humidity
def get_mode(self, **kwargs):
return self.mode
def get_away(self, **kwargs):
return self.away
def get_last_refresh(self, **kwargs):
return self._last_refresh
def set_away(self, **kwargs):
'''set_away will set to away by default, if 'away' is in kwargs it will set to the value of 'away'
'''
away = kwargs.get('away')
if away is None:
away = 'away'
if away not in ['away', 'home']:
return
self.away = away
def set_home(self, **kwargs):
self.away = 'home'
def set_temperature(self, **kwargs):
t = kwargs.get('temperature')
if t is None:
return
try:
t = int(t)
except:
return
self.temperature = t
def set_mode(self, **kwargs):
m = kwargs.get('mode')
if m is None:
logging.error('no mode provided')
return
m = m.lower()
if m not in MODE_LIST:
logging.error('Invalid Mode')
return
self.mode = m
def update_thermostat(self, **kwargs):
thermostat = kwargs.get('thermostat')
logging.info('[NEST] updating thermostat: %s' % str(thermostat))
self._last_refresh = self.firefly.location.now.timestamp()
if thermostat is not None:
self.thermostat = thermostat
@property
def temperature(self):
if self.thermostat:
self._temperature = self.thermostat.temperature
return self._temperature
@temperature.setter
def temperature(self, value):
if self.thermostat:
self._temperature = value
scheduler.runInS(5, self.set_temperature_delayed, job_id=self.timer_id, temperature=value)
else:
logging.error('thermostat not set yet')
def set_temperature_delayed(self, temperature=None):
"""Set the mode after a 5 second delay. This helps with rate limiting.
Args:
mode: mode to be set to.
"""
if temperature is not None:
try:
self.thermostat.temperature = temperature
except Exception as e:
logging.error('Error setting thermostat temperature: %s' % e)
@property
def humidity(self):
if self.thermostat:
self._humidity = self.thermostat.humidity
return self._humidity
@property
def mode(self):
if self.thermostat:
self._mode = self.thermostat.mode
return self._mode
@mode.setter
def mode(self, value):
if self.thermostat:
self._mode = value
scheduler.runInS(5, self.set_mode_delayed, job_id=self.timer_id, mode=value)
else:
logging.error('thermostat not set yet')
def set_mode_delayed(self, mode=None):
"""Set the mode after a 5 second delay. This helps with rate limiting.
Args:
mode: mode to be set to.
"""
if mode is not None:
try:
self.thermostat.mode = mode
except Exception as e:
logging.error('Error setting thermostat mode: %s' % e)
@property
def away(self):
if self.thermostat:
self._away = self.thermostat.structure.away
return self._away
@away.setter
def away(self, value):
if self.thermostat:
self.thermostat.structure.away = value
self._away = value
else:
logging.error('thermostat not set yet')
@property
def target(self):
if self.thermostat:
self._target = self.thermostat.target
return self._target
| Firefly-Automation/Firefly | Firefly/components/nest/thermostat.py | Python | apache-2.0 | 7,969 | [
"Firefly"
] | 73cf85cd38f9bd9de57ecb238e578d562262d4a014ba0c365b526003368486e2 |
"""
The cutoffs module gives a few different options for smoothly sending the GP
kernel to zero near the boundary of the cutoff sphere.
"""
from math import cos, sin, pi
from numba import njit
@njit
def hard_cutoff(r_cut: float, ri: float, ci: float):
"""A hard cutoff that assigns a value of 1 to all interatomic distances.
Args:
r_cut (float): Cutoff value (in angstrom).
ri (float): Interatomic distance.
ci (float): Cartesian coordinate divided by the distance.
Returns:
(float, float): Cutoff value and its derivative.
"""
return 1, 0
@njit
def quadratic_cutoff_bound(r_cut: float, ri: float, ci: float):
"""A quadratic cutoff that goes to zero smoothly at the cutoff boundary.
Args:
r_cut (float): Cutoff value (in angstrom).
ri (float): Interatomic distance.
ci (float): Cartesian coordinate divided by the distance.
Returns:
(float, float): Cutoff value and its derivative.
"""
if r_cut > ri:
rdiff = r_cut - ri
fi = rdiff * rdiff
fdi = 2 * rdiff * ci
else:
fi = 0
fdi = 0
return fi, fdi
@njit
def quadratic_cutoff(r_cut: float, ri: float, ci: float):
"""A quadratic cutoff that goes to zero smoothly at the cutoff boundary.
Args:
r_cut (float): Cutoff value (in angstrom).
ri (float): Interatomic distance.
ci (float): Cartesian coordinate divided by the distance.
Returns:
(float, float): Cutoff value and its derivative.
"""
rdiff = r_cut - ri
fi = rdiff * rdiff
fdi = 2 * rdiff * ci
return fi, fdi
@njit
def cubic_cutoff(r_cut: float, ri: float, ci: float):
"""A cubic cutoff that goes to zero smoothly at the cutoff boundary.
Args:
r_cut (float): Cutoff value (in angstrom).
ri (float): Interatomic distance.
ci (float): Cartesian coordinate divided by the distance.
Returns:
(float, float): Cutoff value and its derivative.
"""
rdiff = r_cut - ri
fi = rdiff * rdiff * rdiff
fdi = 3 * rdiff * rdiff * ci
return fi, fdi
@njit
def cosine_cutoff(r_cut: float, ri: float, ci: float, d: float = 1):
"""A cosine cutoff that returns 1 up to r_cut - d, and assigns a cosine
envelope to values of r between r_cut - d and r_cut. Based on Eq. 24 of
Albert P. Bartók and Gábor Csányi. "Gaussian approximation potentials: A
brief tutorial introduction." International Journal of Quantum Chemistry
115.16 (2015): 1051-1057.
Args:
r_cut (float): Cutoff value (in angstrom).
ri (float): Interatomic distance.
ci (float): Cartesian coordinate divided by the distance.
Returns:
(float, float): Cutoff value and its derivative.
"""
if ri > r_cut - d:
fi = (1 / 2) * (cos(pi * (ri - r_cut + d) / d) + 1)
fdi = (pi / (2 * d)) * sin(pi * (r_cut - ri) / d) * ci
else:
fi = 1
fdi = 0
return fi, fdi
| mir-group/flare | flare/kernels/cutoffs.py | Python | mit | 3,007 | [
"Gaussian"
] | f389da8e383fff900230b24b957ea1f303b7dae7a3d4ce3b2111c48ac96c08df |
import sys
import sourceM
import argparse
Usage = """
Welcome to MotionMeerkat!
Automated capture of motion frames from a video file.
For help, see the wiki: https://github.com/bw4sz/OpenCV_HummingbirdsMotion/wiki
Default values for parameters are in parenthesis. To select default hit enter.
Affirmative answers to questions are y, negative answers n
Please do not use quotes for any responses.
"""
def arguments(self):
#If there were system argument
self.parser = argparse.ArgumentParser()
#Read in system arguments if they exist
if len(sys.argv)< 2:
print Usage
else:
self.parser.add_argument("--runtype", help="Batch or single file",default='file')
self.parser.add_argument("--batchpool", help="run directory of videos",type=str)
self.parser.add_argument("--inDEST", help="path of single video",type=str,default='/home/bw4sz/bisque/modules/MotionMeerkat/MotionMeerkat/PlotwatcherTest.tlv')
self.parser.add_argument("--fileD", help="output directory",default="")
self.parser.add_argument("--adapt", help="Adaptive background averaging",action='store_true',default=False)
self.parser.add_argument("--accAvg", help="Fixed background averaging rate",default=0.35,type=float)
self.parser.add_argument("--frameHIT", help="expected percentage of motion frames",default=0.1,type=float)
self.parser.add_argument("--floorvalue", help="minimum background averaging",default=0.01,type=float)
self.parser.add_argument("--threshT", help="Threshold of movement",default=30,type=int)
self.parser.add_argument("--minSIZE", help="Minimum size of contour",default=0.1,type=float)
self.parser.add_argument("--burnin", help="Delay time",default=0,type=int)
self.parser.add_argument("--scan", help="Scan one of every X frames for motion",default=0,type=int)
self.parser.add_argument("--frameSET", help="Set frame_rate?",action='store_true',default=False)
self.parser.add_argument("--plotwatcher", help="Camera was a plotwatcher?",action="store_true",default=False)
self.parser.add_argument("--frame_rate", help="frames per second",default=0)
self.parser.add_argument("--moghistory", help="Length of history for MOG background detector",default=1000,type=int)
self.parser.add_argument("--subMethod", help="Accumulated Averaging [Acc] or Mixture of Gaussian [MOG] background method",default='Acc',type=str)
self.parser.add_argument("--mogvariance", help="Variance in MOG to select background",default=16,type=int)
self.parser.add_argument("--set_ROI", help="Set region of interest?",action='store_true',default=False)
self.parser.add_argument("--ROI_include", help="include or exclude?",default="exclude")
self.parser.add_argument("--set_areacounter", help="Set region to count area",action="store_true",default=False)
self.parser.add_argument("--makeVID", help="Output images as 'frames','video','both', 'none' ?",default='frames',type=str)
self.args = self.parser.parse_args(namespace=self)
if not self.runtype=="pictures":
self.pictures=False
self.segment = False
print "\n"
print "\n"
if(len(sys.argv)< 2):
#Batch or single file
self.runtype=raw_input("'batch' run, single video 'file' or folder of ordered 'pictures'? (file):\n")
if not self.runtype: self.runtype="file"
if(self.runtype=="file"):
self.inDEST=sourceM.ask_file()
self.pictures=False
if(self.runtype=="batch"):
self.batchpool=raw_input("Enter folder containing videos:\n")
self.pictures=False
if(self.runtype=="pictures"):
self.inDEST=raw_input("Enter folder containing pictures\n Please note that filenames need to be chronological order \n")
self.pictures=True
#Destination of file
self.fileD=raw_input("File Destination Folder (C:\MotionMeerkat):\n")
if not self.fileD: self.fileD = str("C:\MotionMeerkat")
#Sensitivity to movement
self.accAvg=sourceM.ask_acc()
if not self.accAvg: self.accAvg=0.35
#thresholding, a way of differentiating the background from movement, higher values (0-255) disregard more motion, lower values make the model more sensitive to motion
self.threshT=raw_input("Threshold for movement tolerance\nranging from 0 [all] to 255 [no movement] (30):\n")
if not self.threshT: self.threshT = 30
else: self.threshT=float(self.threshT)
#minimum size of contour object
self.minSIZE=raw_input("Minimum motion contour size (0.2):\n")
if not self.minSIZE: self.minSIZE = 0.2
else: self.minSIZE=float(self.minSIZE)
self.advanced= 'y'==raw_input("Set advanced options? (n) :\n")
if self.advanced:
#Set background subtractor
self.subMethod=raw_input("Accumulated Averaging [Acc] or Mixture of Gaussian [MOG] background method? (Acc) :\n")
if not self.subMethod: self.subMethod="Acc"
if self.subMethod=="Acc":
#Should accAVG be adapted every 10minutes based on an estimated hitrate
self.adapt= 'y'==raw_input("Adapt the motion sensitivity based on hitrate? (n) :\n")
if self.adapt:
self.accAvg=sourceM.ask_acc()
if not self.accAvg: self.accAvg = 0.35
#Hitrate, the expected % of frames per 10 minutes - this is a helpful adaptive setting that helps tune the model, this will be multiplied the frame_rate
self.frameHIT=raw_input("Expected percentage of frames with motion (decimal 0.01):\n")
if not self.frameHIT: self.frameHIT = 0.01
else: self.frameHIT=float(self.frameHIT)
#Floor value, if adapt = TRUE, what is the minimum AccAVG allowed. If this is unset, and it is a particularly still video, the algorithm paradoically spits out alot of frames, because its trying to find the accAVG that matches the frameHit rate below. We can avoid this by simply placing a floor value for accAVG
self.floorvalue=raw_input("Minimum allowed sensitivity (0.05):\n")
if not self.floorvalue: self.floorvalue = 0.05
else: self.floorvalue=float(self.floorvalue)
#Still need to set moghistory to pass to argument, even if it isn't used.
self.moghistory = 500
self.mogvariance = 16
if self.subMethod=="MOG":
self.moghistory=raw_input("History of Frames for Gaussian (500):\n")
if not self.moghistory: self.moghistory = 500
self.mogvariance=raw_input("Variance in background threshold (16):\n")
if not self.mogvariance: self.mogvariance = 500
self.adapt=False
#Skip initial frames of video, in case of camera setup and shake.
self.burnin= raw_input("Burn in, skip initial minutes of video (0):\n")
if not self.burnin: self.burnin = 0
else: self.burnin=float(self.burnin)
#Decrease frame rate, downsample
self.scan= raw_input("Scan one of every X frames (0):\n")
if not self.scan: self.scan = 0
else: self.scan=int(self.scan)
#Manually set framerate?
self.frameSET= "y" == raw_input("Set frame rate in fps? (n):\n")
#Set frame rate.
if self.frameSET:
self.frame_rate = raw_input("frames per second:\n")
else: self.frame_rate=0
#There are specific conditions for the plotwatcher, because the frame_rate is off, turn this to a boolean
self.plotwatcher='y'==raw_input("Does this video come from a plotwatcher camera? (n) :\n")
if not self.plotwatcher: self.plotwatcher = False
self.segment='y'==raw_input("Segment image using grabcut? (n) :\n")
if not self.segment: self.segment = False
#set ROI
self.set_ROI= "y" == raw_input("Exclude a portion of the image? (n) :\n")
if self.set_ROI:
self.ROI_include=raw_input("Subregion of interest to 'include' or 'exclude'?:\n")
else: self.ROI_include='exclude'
#Create area counter by highlighting a section of frame
self.set_areacounter='y'==raw_input("Highlight region for area count? (n) \n")
if not self.set_areacounter: self.set_areacounter=False
#make video by stringing the jpgs back into an avi
self.makeVID=raw_input("Write output as 'video', 'frames','both','none'? (frames):\n")
if not self.makeVID:self.makeVID="frames"
else:
self.floorvalue=0
self.frameHIT=0
self.adapt=False
self.makeVID="frames"
self.scan = 0
self.burnin = 0
self.ROI_include='exclude'
self.frameSET=False
self.plotwatcher=False
self.frame_rate=0
self.set_ROI=False
self.set_areacounter=False
self.subMethod="Acc"
self.moghistory = 500
self.mogvariance = 16
self.pictures = False
self.segment = False
| bw4sz/MotionMeerkat_Bisque | MotionMeerkat/arguments.py | Python | gpl-3.0 | 14,901 | [
"Gaussian"
] | e33fe71598471f47a383462eb3eeba7df3a8142f83728c8c1e2f21fe904a45a7 |
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from warnings import catch_warnings, simplefilter, warn
import threading
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from joblib import Parallel, delayed
from ..base import ClassifierMixin, RegressorMixin, MultiOutputMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import parallel_helper, _joblib_parallel_args
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = np.bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with catch_warnings():
simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(BaseEnsemble, MultiOutputMixin, metaclass=ABCMeta):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=100,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer="threads"))(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer='threads'))(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
self.n_features_ = X.shape[1]
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = [self._make_estimator(append=False,
random_state=random_state)
for i in range(n_more_estimators)]
# Parallel loop: we prefer the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading more efficient than multiprocessing in
# that case. However, for joblib 0.12+ we respect any
# parallel_backend contexts set at a higher level,
# since correctness does not rely on using threads.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer='threads'))(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
check_is_fitted(self, 'estimators_')
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
The values of this array sum to 1, unless all trees are single node
trees consisting of only the root node, in which case it will be an
array of zeros.
"""
check_is_fitted(self, 'estimators_')
all_importances = Parallel(n_jobs=self.n_jobs,
**_joblib_parallel_args(prefer='threads'))(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_ if tree.tree_.node_count > 1)
if not all_importances:
return np.zeros(self.n_features_, dtype=np.float64)
all_importances = np.mean(all_importances,
axis=0, dtype=np.float64)
return all_importances / np.sum(all_importances)
def _accumulate_prediction(predict, X, out, lock):
"""This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
prediction = predict(X, check_input=False)
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
class ForestClassifier(BaseForest, ClassifierMixin, metaclass=ABCMeta):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=100,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super().__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = [np.zeros((n_samples, n_classes_[k]))
for k in range(self.n_outputs_)]
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, str):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
# all dtypes should be the same, so just take the first
class_type = self.classes_[0].dtype
predictions = np.empty((n_samples, self.n_outputs_),
dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
all_proba = [np.zeros((X.shape[0], j), dtype=np.float64)
for j in np.atleast_1d(self.n_classes_)]
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose,
**_joblib_parallel_args(require="sharedmem"))(
delayed(_accumulate_prediction)(e.predict_proba, X, all_proba,
lock)
for e in self.estimators_)
for proba in all_proba:
proba /= len(self.estimators_)
if len(all_proba) == 1:
return all_proba[0]
else:
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(BaseForest, RegressorMixin, metaclass=ABCMeta):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=100,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False):
super().__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
if self.n_outputs_ > 1:
y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
else:
y_hat = np.zeros((X.shape[0]), dtype=np.float64)
# Parallel loop
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose,
**_joblib_parallel_args(require="sharedmem"))(
delayed(_accumulate_prediction)(e.predict, X, [y_hat], lock)
for e in self.estimators_)
y_hat /= len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and uses averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=100)
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it
will be removed in 0.25. Use ``min_impurity_decrease`` instead.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees. If False, the
whole datset is used to build each tree.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for both `fit` and `predict`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity when fitting and predicting.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or \
None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
base_estimator_ : DecisionTreeClassifier
The child estimator template used to create the collection of fitted
sub-estimators.
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Examples
--------
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=1000, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = RandomForestClassifier(max_depth=2, random_state=0)
>>> clf.fit(X, y)
RandomForestClassifier(max_depth=2, random_state=0)
>>> print(clf.feature_importances_)
[0.14205973 0.76664038 0.0282433 0.06305659]
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=100,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super().__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and uses averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it
will be removed in 0.25. Use ``min_impurity_decrease`` instead.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees. If False, the
whole datset is used to build each tree.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for both `fit` and `predict`.
`None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity when fitting and predicting.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
Attributes
----------
base_estimator_ : DecisionTreeRegressor
The child estimator template used to create the collection of fitted
sub-estimators.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Examples
--------
>>> from sklearn.ensemble import RandomForestRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
>>> regr = RandomForestRegressor(max_depth=2, random_state=0)
>>> regr.fit(X, y)
RandomForestRegressor(max_depth=2, random_state=0)
>>> print(regr.feature_importances_)
[0.18146984 0.81473937 0.00145312 0.00233767]
>>> print(regr.predict([[0, 0, 0, 0]]))
[-8.32987858]
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
The default value ``max_features="auto"`` uses ``n_features``
rather than ``n_features / 3``. The latter was originally suggested in
[1], whereas the former was more recently justified empirically in [2].
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
.. [2] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized
trees", Machine Learning, 63(1), 3-42, 2006.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=100,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False):
super().__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and uses averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it
will be removed in 0.25. Use ``min_impurity_decrease`` instead.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees. If False, the
whole datset is used to build each tree.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for both `fit` and `predict`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity when fitting and predicting.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or \
None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
base_estimator_ : ExtraTreeClassifier
The child estimator template used to create the collection of fitted
sub-estimators.
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized
trees", Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=100,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super().__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and uses averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it
will be removed in 0.25. Use ``min_impurity_decrease`` instead.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees. If False, the
whole datset is used to build each tree.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for both `fit` and `predict`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity when fitting and predicting.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
Attributes
----------
base_estimator_ : ExtraTreeRegressor
The child estimator template used to create the collection of fitted
sub-estimators.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=100,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False):
super().__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it
will be removed in 0.25. Use ``min_impurity_decrease`` instead.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for both `fit` and `predict`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity when fitting and predicting.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
criterion = 'mse'
max_features = 1
def __init__(self,
n_estimators=100,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
sparse_output=True,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False):
super().__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super().fit(X, y, sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
check_is_fitted(self, 'one_hot_encoder_')
return self.one_hot_encoder_.transform(self.apply(X))
| chrsrds/scikit-learn | sklearn/ensemble/forest.py | Python | bsd-3-clause | 82,923 | [
"Brian"
] | c307c84ea37cd9034ca630280ea1abb1362a85bdacab094e561c3bc9e13cb534 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.