src
stringlengths 721
1.04M
|
---|
import random
import pytest
from cctbx import sgtbx
from dials.algorithms.symmetry.cosym._generate_test_data import generate_intensities
from dials.array_family import flex
from dxtbx.model import Beam, Crystal, Experiment, Scan
from dxtbx.model.experiment_list import ExperimentList
from dxtbx.serialize import load
from xia2.Modules.Scaler.DialsScaler import decide_correct_lattice_using_refiner
flex.set_random_seed(42)
random.seed(42)
@pytest.fixture
def helper_directory(ccp4, tmpdir):
"""Initialise a DialsScalerHelper"""
# import kept here as the import depends on CCP4 being present
from xia2.Modules.Scaler.DialsScaler import DialsScalerHelper
helper = DialsScalerHelper()
helper.set_pname_xname("AUTOMATIC", "DEFAULT")
helper.set_working_directory(tmpdir.strpath)
return (helper, tmpdir)
def generated_exp(n=1, space_group="P 2", assign_ids=False, id_=None):
"""Generate an experiment list with two experiments."""
experiments = ExperimentList()
exp_dict = {
"__id__": "crystal",
"real_space_a": [15.0, 0.0, 0.0],
"real_space_b": [0.0, 10.0, 0.0],
"real_space_c": [0.0, 0.0, 20.0],
"space_group_hall_symbol": space_group,
}
crystal = Crystal.from_dict(exp_dict)
scan = Scan(image_range=[0, 90], oscillation=[0.0, 1.0])
beam = Beam(s0=(0.0, 0.0, 1.01))
if assign_ids:
experiments.append(
Experiment(identifier="0", beam=beam, scan=scan, crystal=crystal)
)
elif id_:
experiments.append(
Experiment(identifier=str(id_), beam=beam, scan=scan, crystal=crystal)
)
else:
experiments.append(Experiment(beam=beam, scan=scan, crystal=crystal))
if n > 1:
for i in range(1, n):
if assign_ids:
experiments.append(
Experiment(identifier=str(i), beam=beam, scan=scan, crystal=crystal)
)
else:
experiments.append(Experiment(beam=beam, scan=scan, crystal=crystal))
return experiments
def generate_reflections_in_sg(space_group, id_=0, assign_id=False):
"""Generate reflections with intensities consistent with space group"""
sgi = sgtbx.space_group_info(symbol=space_group)
cs = sgi.any_compatible_crystal_symmetry(volume=3000)
cs = cs.best_cell()
cs = cs.minimum_cell()
intensities = (
generate_intensities(cs, d_min=2.0)
.generate_bijvoet_mates()
.set_observation_type_xray_intensity()
)
intensities = intensities.expand_to_p1()
# needed to give vaguely sensible E_cc_true values
reflections = flex.reflection_table()
reflections["intensity.sum.value"] = intensities.data()
reflections["intensity.sum.variance"] = flex.pow2(intensities.sigmas())
reflections["miller_index"] = intensities.indices()
reflections["d"] = intensities.d_spacings().data()
reflections["id"] = flex.int(reflections.size(), id_)
if assign_id:
reflections.experiment_identifiers()[id_] = str(id_)
reflections.set_flags(
flex.bool(reflections.size(), True), reflections.flags.integrated
)
return reflections
def generate_test_refl(id_=0, assign_id=False):
"""Generate a small reflection table"""
reflections = flex.reflection_table()
reflections["intensity.sum.value"] = flex.double([1.0, 1.0, 2.0, 2.0, 3.0, 3.0])
reflections["variance.sum.variance"] = flex.double([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
reflections["miller_index"] = flex.miller_index(
[(1, 0, 0), (0, 0, 1), (2, 0, 0), (0, 0, 2), (0, 1, 0), (0, -1, 0)]
)
reflections["id"] = flex.int(6, id_)
if assign_id:
reflections.experiment_identifiers()[id_] = str(id_)
return reflections
symmetry_test_data = [
(
"P 2 ",
"P 2 ",
["mP", "aP", "oP"],
["P 1 2 1", "P 1"],
["P 1 2 1", "P 1 2 1", "P 2 2 2"],
),
(
"P 1 ",
"P 2 ",
["aP", "mP", "oP"],
["P 1", "P 1 2 1"],
["P 1 2 1", "P 1 2 1", "P 2 2 2"],
),
]
@pytest.mark.parametrize(
"""reflection_spacegroup, experiments_spacegroup,
expected_lattices, required_spacegroup_order, other_spacegroups""",
symmetry_test_data,
)
def test_dials_symmetry_decide_pointgroup(
reflection_spacegroup,
experiments_spacegroup,
expected_lattices,
required_spacegroup_order,
other_spacegroups,
helper_directory,
):
"""Test for the dials_symmetry_decide_pointgroup helper function"""
helper, tmpdir = helper_directory
refl_path = (tmpdir / "test.refl").strpath
exp_path = (tmpdir / "test.expt").strpath
generated_exp(space_group=experiments_spacegroup).as_file(exp_path)
generate_reflections_in_sg(reflection_spacegroup).as_file(refl_path)
symmetry_analyser = helper.dials_symmetry_decide_pointgroup([exp_path], [refl_path])
# Note : instabilities have been observed in the order of the end of the
# spacegroup list - this is likely due to the use of unseeded random number
# generation in dials.symmetry symmetry element scoring, but this only seems
# to affect the order of groups with a score near zero. Hence only assert the
# order of the spacegroups that must be in order, near the start of the list.
assert symmetry_analyser.get_possible_lattices() == expected_lattices
spacegroups = symmetry_analyser.get_likely_spacegroups()
assert spacegroups[: len(required_spacegroup_order)] == required_spacegroup_order
assert set(spacegroups[len(required_spacegroup_order) :]) == set(other_spacegroups)
def test_assign_identifiers(helper_directory):
"""Test the call to the assign identifiers wrapper"""
helper, tmpdir = helper_directory
experiments = []
reflections = []
for i in range(0, 3):
refl_path = tmpdir.join("test_%s.refl" % i).strpath
exp_path = tmpdir.join("test_%s.expt" % i).strpath
generate_test_refl().as_file(refl_path)
generated_exp().as_file(exp_path)
experiments.append(exp_path)
reflections.append(refl_path)
assigner = helper.assign_dataset_identifiers(experiments, reflections)
expts = load.experiment_list(assigner.get_output_experiments_filename())
assert len(set(expts.identifiers())) == 3
refls = flex.reflection_table.from_file(assigner.get_output_reflections_filename())
assert refls.experiment_identifiers()[0] == expts[0].identifier
assert refls.experiment_identifiers()[1] == expts[1].identifier
assert refls.experiment_identifiers()[2] == expts[2].identifier
class simple_sweep_info:
"""Simple sweep info class for testing"""
def __init__(self):
self.reflections = ""
self.experiments = ""
def get_integrater(self):
return self
def get_integrated_experiments(self):
return self.experiments
def get_integrated_reflections(self):
return self.reflections
def set_reflections(self, refl):
self.reflections = refl
def get_reflections(self):
return self.reflections
def set_experiments(self, exp):
self.experiments = exp
def get_experiments(self):
return self.experiments
class simple_sweep_handler:
"""Simple sweep handler class for testing"""
def __init__(self, number_of_experiments):
self.number_of_experiments = number_of_experiments
self.sis = [simple_sweep_info() for _ in range(number_of_experiments)]
def get_epochs(self):
"""Return a list of 0...n-1"""
return list(range(self.number_of_experiments))
def get_sweep_information(self, epoch):
"""Return the simple sweep info class for a given epoch"""
return self.sis[epoch]
@pytest.mark.parametrize("number_of_experiments", [2, 10])
def test_split_experiments(number_of_experiments, helper_directory):
"""Test the call to split experiments: should split the dataset on experiment
id, giving single datasets with unique ids from 0..n-1"""
helper, tmpdir = helper_directory
sweephandler = simple_sweep_handler(number_of_experiments)
exp_path = tmpdir.join("test.expt").strpath
refl_path = tmpdir.join("test.refl").strpath
generated_exp(number_of_experiments, assign_ids=True).as_file(exp_path)
reflections = flex.reflection_table()
for i in range(number_of_experiments):
reflections.extend(generate_test_refl(id_=i, assign_id=True))
reflections.as_file(refl_path)
# Now call split_experiments and inspect handler to check result
sweephandler = helper.split_experiments(exp_path, refl_path, sweephandler)
check_data_in_sweep_handler(sweephandler)
def check_data_in_sweep_handler(sweephandler):
"""Check that data in sweep handler has ids set correctly"""
for i, epoch in enumerate(sweephandler.get_epochs()):
si = sweephandler.get_sweep_information(epoch)
r = flex.reflection_table.from_file(si.get_reflections())
assert list(set(r["id"])) == [0]
assert list(r.experiment_identifiers().keys()) == [0]
identifiers = r.experiment_identifiers().values()
assert len(identifiers) == 1
experiment = load.experiment_list(si.get_experiments())
assert len(experiment) == 1
assert experiment[0].identifier == identifiers[0]
def test_assign_and_return_datasets(helper_directory):
"""Test the combined method of assigning ids and setting in the sweep handler"""
n = 3
helper, tmpdir = helper_directory
sweephandler = simple_sweep_handler(n)
for i in range(0, n):
si = sweephandler.get_sweep_information(i)
refl_path = tmpdir.join("test_%s.refl" % i).strpath
exp_path = tmpdir.join("test_%s.expt" % i).strpath
generate_test_refl().as_file(refl_path)
generated_exp().as_file(exp_path)
si.set_experiments(exp_path)
si.set_reflections(refl_path)
sweephandler = helper.assign_and_return_datasets(sweephandler)
check_data_in_sweep_handler(sweephandler)
class simple_refiner:
LATTICE_POSSIBLE = "LATTICE_POSSIBLE"
LATTICE_IMPOSSIBLE = "LATTICE_IMPOSSIBLE"
LATTICE_CORRECT = "LATTICE_CORRECT"
def __init__(self, refiner_lattices):
self.refiner_lattices = (
refiner_lattices # first one should be 'best' one used in refinement
)
self.indexer_done = True
self._refiner_reset = False
def get(self):
return self.refiner_lattices
def set_refiner_asserted_lattice(self, lattice):
"""Replicate asserted_lattice methods of refiner and indexer"""
# calls indexer, if not in list of lattices - returns LATTICE_IMPOSSIBLE
if lattice not in self.refiner_lattices:
return self.LATTICE_IMPOSSIBLE
if lattice == self.refiner_lattices[0]:
"""if (PhilIndex.params.xia2.settings.integrate_p1 and
asserted_lattice != self.get_indexer_lattice() and
asserted_lattice != 'aP'):
if PhilIndex.params.xia2.settings.reintegrate_correct_lattice:
self.set_indexer_done(False)
return self.LATTICE_POSSIBLE"""
return self.LATTICE_CORRECT
# else, - calls eliminate, set indexer done false
while self.get()[0] != lattice:
del self.refiner_lattices[0] # i.e. eliminate
# if (not integrate_p1) or reintegrate_correct_lattice
self.indexer_done = False
self.refiner_reset()
return self.LATTICE_POSSIBLE
def get_refiner_lattice(self):
"""Return first lattice"""
return self.refiner_lattices[0]
def refiner_reset(self):
"""Set refiner reset as True"""
self._refiner_reset = True
def get_refiner_reset(self):
"""Get refiner reset status"""
return self._refiner_reset
# get ntr if symmetry lower than refiner - resets reindex op in integrater and
# sets need_to_return = True, which then sets scaler prepare done as False
# get rerun if symmetry finds higher than refiner or no possible - then in symmetry jiffy sets the
# correct lattice in symmetry and makes it run with that.
# test_data = (refiner lattice, possible lattices, (correct, rerun, ntr))
test_data = [
(["mP", "aP", "oP"], ["mP"], ("mP", False, False)), # symmetry same as from refiner
(
["mP", "aP", "oP"],
["aP"],
("aP", False, True),
), # symmetry is lower than from refiner
(
["mP", "aP", "oP"],
["tP", "mP"],
("mP", True, False),
), # symmetry finds higher than refiner
(["mP", "aP", "oP"], ["tP", "aP"], ("aP", True, True)),
] # symmetry finds higher than refiner,
# but next best is lower than refiner
@pytest.mark.parametrize(
"refiner_lattices, possible_lattices, expected_output", test_data
)
def test_decide_correct_lattice_using_refiner(
ccp4, refiner_lattices, possible_lattices, expected_output
):
refiner = simple_refiner(refiner_lattices)
result = decide_correct_lattice_using_refiner(possible_lattices, refiner)
assert result == expected_output
# refiner lattices, (pg, ntr, pt, refiner_reset, reindex_init)
test_lattices = [
(["mP", "aP", "oP"], ("P 1 2 1", False, False, False, False)),
# symmetry finds consistent lattice, all good
(["tP", "mP", "aP", "oP"], ("P 1 2 1", True, False, True, False)),
# symmetry finds lower than refiner lattice, so need to return to rerefine
(["aP"], ("P 1", False, False, False, True)),
] # symmetry finds higher than refiner - can occur
# if pseudosymmetry, so just drop to lower symmetry of lattice and don't need to rerefine
# as already done in this space group.
@pytest.mark.parametrize("refiner_lattices, expected_output", test_lattices)
def test_dials_symmetry_indexer_jiffy(
refiner_lattices, expected_output, helper_directory
):
"""Test the jiffy"""
helper, tmpdir = helper_directory
n = 1
multisweep = False
# Create list of experiments, reflections and refiners
experiments = []
reflections = []
refiners = []
for i in range(0, n):
refl_path = tmpdir.join("test_%s.refl" % i).strpath
exp_path = tmpdir.join("test_%s.expt" % i).strpath
generate_reflections_in_sg("P 2", id_=i, assign_id=True).as_file(refl_path)
generated_exp(space_group="P 2", id_=i).as_file(exp_path)
experiments.append(exp_path)
reflections.append(refl_path)
refiners.append(simple_refiner(refiner_lattices))
result = helper.dials_symmetry_indexer_jiffy(
experiments, reflections, refiners, multisweep=multisweep
)
pg, reind_op, ntr, pt, reind_refl, reind_exp, reind_init = result
refiner_reset = refiners[0].get_refiner_reset()
assert (pg, ntr, pt, refiner_reset, reind_init) == expected_output
if expected_output[3]:
for refiner in refiners[1:]:
assert refiner.get_refiner_reset()
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the market data."""
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
core = tff.experimental.pricing_platform.framework.core
market_data = tff.experimental.pricing_platform.framework.market_data
interpolation_method = tff.experimental.pricing_platform.framework.core.interpolation_method
@test_util.run_all_in_graph_and_eager_modes
class MarketDataTest(tf.test.TestCase):
def setUp(self):
valuation_date = [(2020, 6, 24)]
fixing_dates = [(2020, 2, 24), (2020, 3, 12), (2020, 4, 14), (2020, 5, 21)]
fixing_rates = [0.01, 0.02, 0.03, 0.025]
dates = [[2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8],
[2027, 2, 8], [2030, 2, 8], [2050, 2, 8]]
discounts = [0.97197441, 0.94022746, 0.91074031, 0.85495089, 0.8013675,
0.72494879, 0.37602059]
vol_dates = [
[2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8], [2027, 2, 8]]
strikes = [[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510]]
volatilities = [[0.1, 0.12, 0.13],
[0.15, 0.2, 0.15],
[0.1, 0.2, 0.1],
[0.1, 0.2, 0.1],
[0.1, 0.1, 0.3]]
risk_free_dates = [
[2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8], [2050, 2, 8]]
risk_free_discounts = [
0.97197441, 0.94022746, 0.91074031, 0.85495089, 0.37602059]
self._market_data_dict = {
"rates": {
"USD": {
"risk_free_curve": {
"dates": risk_free_dates, "discounts": risk_free_discounts
},
"OIS": {
"dates": dates, "discounts": discounts
},
"LIBOR_3M": {
"dates": dates,
"discounts": discounts,
"fixing_dates": fixing_dates,
"fixing_rates": fixing_rates,
"fixing_daycount": "ACTUAL_365",
"config": {
"interpolation_method": interpolation_method.
InterpolationMethod.LINEAR
}
},
},
},
"equities": {
"USD": {
"GOOG": {
"spot": 1500,
"volatility_surface": {
"dates": vol_dates,
"strikes": strikes,
"implied_volatilities": volatilities
}
}
}
},
"reference_date": valuation_date,
}
self._libor_discounts = discounts
self._risk_free_discounts = risk_free_discounts
super(MarketDataTest, self).setUp()
def test_discount_curve(self):
market = market_data.MarketDataDict(
self._market_data_dict)
# Get the risk free discount curve
risk_free_curve_type = core.curve_types.RiskFreeCurve(currency="USD")
risk_free_curve = market.yield_curve(risk_free_curve_type)
# Get LIBOR 3M discount
libor_3m = core.rate_indices.RateIndex(type="LIBOR_3M")
rate_index_curve_type = core.curve_types.RateIndexCurve(
currency="USD", index=libor_3m)
libor_3m_curve = market.yield_curve(rate_index_curve_type)
with self.subTest("RiskFree"):
discount_factor_nodes = risk_free_curve.discount_factor_nodes
self.assertAllClose(discount_factor_nodes, self._risk_free_discounts)
with self.subTest("LIBOR_3M"):
discount_factor_nodes = libor_3m_curve.discount_factor_nodes
self.assertAllClose(discount_factor_nodes, self._libor_discounts)
def test_volatility(self):
market = market_data.MarketDataDict(
self._market_data_dict)
# Get volatility surface
vol_surface = market.volatility_surface(currency=["USD", "USD"],
asset=["GOOG", "GOOG"])
expiry = tff.datetime.dates_from_year_month_day(
year=[[2023], [2030]], month=[[5], [10]], day=[[10], [15]])
vols = vol_surface.volatility(expiry_dates=expiry, strike=[[1510], [1520]])
self.assertAllClose(
self.evaluate(vols), [[0.108], [0.31]], atol=1e-6)
def test_fixings(self):
market = market_data.MarketDataDict(
self._market_data_dict)
index_curve_3m = core.curve_types.RateIndexCurve(
"USD", core.rate_indices.RateIndex(type="LIBOR_3M"))
index_curve_ois = core.curve_types.RateIndexCurve(
"USD", core.rate_indices.RateIndex(type="OIS"))
dates = [(2020, 5, 24), (2020, 3, 24)]
with self.subTest("LIBOR_3M"):
fixings, fixings_daycount = market.fixings(dates, index_curve_3m)
self.assertAllClose(
self.evaluate(fixings), [0.025, 0.03], atol=1e-6)
self.assertEqual(fixings_daycount.value, "ACTUAL_365")
with self.subTest("OIS"):
fixings, _ = market.fixings(dates, index_curve_ois)
self.assertAllClose(
self.evaluate(fixings), [0.0, 0.0], atol=1e-6)
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/env python
from tkFileDialog import *
from Tkinter import *
from tkSimpleDialog import Dialog
import tkMessageBox
from plotAscii import *
from imageUtil import *
from view2d import *
from mdaAscii import *
import Pmw
import os, string
import AppShell
global Scan
global SH # SHARED
class setupPrinter(Dialog):
"Dialog for setting up printer "
def body(self,master):
self.title("Set Printer Dialog")
Label(master, text='Enter Printer Name:').grid(row=1, sticky=W)
self.label = StringVar()
self.label = Entry(master, width = 26 )
self.label.grid(row=1,column=1)
self.label.insert(0,SH['printer'])
return self.label
def apply(self):
SH['printer'] = self.label.get()
writeSH(SH)
class commandSyntax(Dialog):
"Dialog for sending a system command or any executable client"
def body(self,master):
self.title("Command Dialog")
self.commandsyntax = Pmw.EntryField(master, labelpos='w',
label_text='Enter Command:', value='',
command=self.valuechanged)
self.commandsyntax.pack(fill='x')
self.commandsyntax.component('entry').focus_set()
def valuechanged(self):
os.system(self.commandsyntax.get()+ ' &')
def apply(self):
self.destroy()
class pickDIdialog(Dialog):
"Dialog for selecting a text line which contains DI names to be used in multiline plot. If blank comment line picked, sequence number is used."
def body(self,master):
file = Scan['txtfile']
data = readArray(file)
nc = len(data[0])
self.nc = nc
fo = open(file,'r')
lines = fo.read()
fo.close()
lines = string.split(lines,'\n')
self.title("Pick Line where DI Names Resides")
box = Pmw.ScrolledListBox(master,
items=(lines),
labelpos=NW,label_font=SH['font'],
label_text='Extract column legends from the text window\nSelect the text line which contains\nlegends to be extracted for multi-line plot',
selectioncommand=self.selectionCommand,
dblclickcommand=self.selectionCommand,
usehullsize=1,hull_width=700,hull_height=400)
box.pack()
self.box = box
def selectionCommand(self):
box = self.box
sels = box.getcurselection()
sels = string.split(sels[0])
no = len(sels)
dc = no - self.nc
if dc >= 0:
sels = sels[dc:no]
ix = SH['ix']
sel = sels[ix+1:no]
else:
sel = range(self.nc)
V = []
for i in range(85):
V.append('')
for i in range(len(sel)):
V[i] = sel[i]
fo = open('pvs','w')
fo.write(str(V))
fo.close()
Scan['nc'] = len(V)
namedialog = GetLegends(self)
def apply(self):
self.destroy()
class GetXYVdialog(Dialog):
"Dialog to set column or line # of X, Y, DATA array located in the opend ascii 2D image file (generated by scanSee/catcher/yviewer)"
def body(self,master):
try:
font=SH['font'] #'Verdana 10 bold'
self.title("Extract X,Y,DATA array from scanSee ASCII file")
self.ix = [IntVar(),IntVar(),IntVar(),IntVar()]
Label(master,text='X and Data column #:',font=font).grid(row=0,column=0,sticky=W)
Label(master,text='X Vector Column #').grid(row=1,column=1,sticky=W)
Label(master,text='Data Start Column #').grid(row=2,column=1,sticky=W)
Label(master,text='Y Vector Defined in:',font=font).grid(row=3,column=0,sticky=W)
Label(master,text='Y Vector Line #').grid(row=4,column=1,sticky=W)
Label(master,text='Y Start Column #').grid(row=5,column=1,sticky=W)
Entry(master,width=4,textvariable=self.ix[0]).grid(row=1,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[1]).grid(row=2,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[2]).grid(row=4,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[3]).grid(row=5,column=2,sticky=W)
self.ix[0].set(0)
self.ix[1].set(2)
self.ix[2].set(3)
self.ix[3].set(2)
except AttributeError:
return self.ix[0]
def get(self):
return [self.ix[0].get(),self.ix[1].get(),self.ix[2].get(),self.ix[3].get()]
def apply(self):
ix = self.get()
Scan['rowcol'] = ix
file = Scan['txtfile']
if file != '':
data = readArray(file)
nc = len(data)
nr = len(data[0])
data = rowreverse(data)
x = data[ix[0]]
data = data[ix[1]:nr]
data = array(data)
fo = open(file,'r')
lines = fo.read()
fo.close
lines = string.split(lines,'\n')
if ix[2] >= 0:
py = lines[ix[2]]
py = string.split(py)
y = py[ix[3]:len(py)]
for i in range(len(y)):
y[i] = string.atof(y[i])
else:
y = range(len(data))
Scan['X'] = x
Scan['Y'] = y
file = Scan['txtfile']
if Scan['updown']:
plot2dUpdown(data,x,y,title=file)
else:
plot2d(data,x,y,title=file)
class defineXYdialog(Dialog):
"Dialog for entering Xmin,Xmax,Ymin,Ymax ranges"
def body(self,master):
try:
file = Scan['txtfile']
data = readArray(file)
data = rowreverse(data)
data = array(data)
nc = data.shape[1]
nr = data.shape[0]
Scan['im'] = data
font=SH['font'] #'Verdana 10 bold'
self.title("Set X, Y Ranges for Image Plot")
self.ix = [StringVar(),StringVar(),StringVar(),StringVar()]
Label(master,text='Enter X Plot Range',font=font).grid(row=0,column=0,sticky=W)
Label(master,text='Xmin').grid(row=1,column=1,sticky=W)
Label(master,text='Xmax').grid(row=2,column=1,sticky=W)
Label(master,text='Enter Y Plot Range',font=font).grid(row=3,column=0,sticky=W)
Label(master,text='Ymin').grid(row=4,column=1,sticky=W)
Label(master,text='Ymax').grid(row=5,column=1,sticky=W)
Entry(master,width=14,textvariable=self.ix[0]).grid(row=1,column=2,sticky=W)
Entry(master,width=14,textvariable=self.ix[1]).grid(row=2,column=2,sticky=W)
Entry(master,width=14,textvariable=self.ix[2]).grid(row=4,column=2,sticky=W)
Entry(master,width=14,textvariable=self.ix[3]).grid(row=5,column=2,sticky=W)
self.ix[0].set(1.)
self.ix[1].set(float(nc))
self.ix[2].set(1.)
self.ix[3].set(float(nr))
except AttributeError:
return self.ix[0]
def get(self):
return [self.ix[0].get(),self.ix[1].get(),self.ix[2].get(),self.ix[3].get()]
def apply(self):
ix = self.get()
ix = [string.atof(ix[0]),string.atof(ix[1]),string.atof(ix[2]),
string.atof(ix[3])]
data = Scan['im']
nr = data.shape[0]
nc = data.shape[1]
x = []
dx = (ix[1]-ix[0])/(nc-1)
for i in range(nc):
x.append(ix[0]+dx*i)
y = []
dy = (ix[3]-ix[2])/(nr-1)
for i in range(nr):
y.append(ix[2]+dy*i)
if Scan['updown']:
plot2dUpdown(data,x,y,title=Scan['txtfile'])
else:
plot2d(data,x,y,title=Scan['txtfile'])
class GetXYdialog(Dialog):
"Dialog for define X,Y vector line and column #"
def body(self,master):
try:
font=SH['font'] #'Verdana 10 bold'
self.title("Get X, Y Vectors from ASCII file")
self.ix = [IntVar(),IntVar(),IntVar(),IntVar()]
Label(master,text='X Vector Defined in:',font=font).grid(row=0,column=0,sticky=W)
Label(master,text='Line #').grid(row=1,column=1,sticky=W)
Label(master,text='Start Column #').grid(row=2,column=1,sticky=W)
Label(master,text='Y Vector Defined in:',font=font).grid(row=3,column=0,sticky=W)
Label(master,text='Line #').grid(row=4,column=1,sticky=W)
Label(master,text='Start Column #').grid(row=5,column=1,sticky=W)
Entry(master,width=4,textvariable=self.ix[0]).grid(row=1,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[1]).grid(row=2,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[2]).grid(row=4,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[3]).grid(row=5,column=2,sticky=W)
# cl = Scan['rowcol']
cl = [3,2,4,2]
self.ix[0].set(cl[0])
self.ix[1].set(cl[1])
self.ix[2].set(cl[2])
self.ix[3].set(cl[3])
except AttributeError:
return self.ix[0]
def get(self):
return [self.ix[0].get(),self.ix[1].get(),self.ix[2].get(),self.ix[3].get()]
def apply(self):
ix = self.get()
Scan['rowcol'] = ix
file = Scan['txtfile']
if file != '':
fo = open(file,'r')
lines = fo.read()
fo.close
lines = string.split(lines,'\n')
px = lines[ix[0]]
px = string.split(px)
x = px[ix[1]:len(px)]
for i in range(len(x)):
x[i] = string.atof(x[i])
py = lines[ix[2]]
py = string.split(py)
y = py[ix[3]:len(py)]
for i in range(len(y)):
y[i] = string.atof(y[i])
Scan['X'] = x
Scan['Y'] = y
file = Scan['txtfile']
data = readArray(file)
data = rowreverse(data)
data = array(data)
if Scan['updown']:
plot2dUpdown(data,x,y,title=file)
else:
plot2d(data,x,y,title=file)
class GetXdialog(Dialog):
"Dialog for defining X column # in text file"
def body(self,master):
font=SH['font'] #'Verdana 10 bold'
self.title("1D Multi-Line Plot")
self.ix = IntVar()
Label(master,text='Defined valid X column # from text file:',font=font).pack(anchor=NW)
Label(master,text=Scan['txtfile'],font=font).pack(anchor=NW)
Label(master,text='-1 - No X column defined ').pack(anchor=NW)
Label(master,text=' 0 - X defined at First column').pack(anchor=NW)
Label(master,text=' 1 - X defined at Second column').pack(anchor=NW)
Label(master,text='Enter X Column Index #:',font=font).pack(side=LEFT)
self.ix = Entry(master, width = 4)
self.ix.pack(side=LEFT)
v = self.get()
self.ix.insert(0,v)
return self.ix
def get(self):
# fo.close()
SH = readSH()
ix = SH['ix']
return ix
def apply(self):
ix = self.ix.get()
SH['ix'] = string.atoi(ix)
writeSH(SH)
os.system('plotAscii.py '+Scan['txtfile']+' '+str(ix) +' &')
class pick2Ddetector(Dialog):
"Dialog to pick any detector from the MDA 2D detector list and plot the selected 2D detector image"
def body(self,master):
self.title("Select 2D Detector")
box = Pmw.ScrolledListBox(master,
items=('1','2','3','4'),
labelpos=NW,label_text='Pick Detector',
selectioncommand=self.selectionCommand,
dblclickcommand=self.selectionCommand,
usehullsize=1,hull_width=200,hull_height=200)
box.pack()
self.box = box
def selectionCommand(self):
box = self.box
sels = box.getcurselection()
sels = string.split(sels[0])
sel = string.atoi(sels[0])
Scan['2d'] = sel
d = Scan['data']
pick2d(d,sel,updown=Scan['updown'])
def apply(self):
self.destroy()
class pviewer(AppShell.AppShell):
usecommandarea=1
balloonhelp=1
appversion = '1.0'
appname = 'pviewer'
copyright = 'Copyright ANL-APS-AOD-BCDA. All Rights Reserved'
contactname = 'Ben-chin K Cha'
contactphone = '(630) 252-8653'
contactemail = '[email protected]'
frameWidth = 800
frameHeight = 500
def unimplemented(self):
pass
def messageMDA(self):
box = Pmw.Dialog(self.interior(),
defaultbutton='OK',title='Info')
w = Label(box.interior(),
text='You need to use File->Open MDA...\n to load in an MDA file first',
padx=10,pady=10).pack()
box.activate()
def messageAscii(self):
box = Pmw.Dialog(self.interior(),
defaultbutton='OK',title='Info')
w = Label(box.interior(),
text='You need to use File->Open Ascii...\n to load in an ASCII file first',
padx=10,pady=10).pack()
box.activate()
def savepvs(self):
file = 'pvs'
V = self.apply()
fd = open(file,'w')
fd.write(str(V))
fd.close()
def createButtons(self):
self.buttonAdd('Exit',
helpMessage='Exit pviewer',
statusMessage='Exit pviewer',
command=self.closeup)
def startup(self):
if os.path.isfile('pviewer.config'):
lines = readST('pviewer.config')
self.mdapath = lines[0]
self.txtpath = lines[1]
print 'self.mdapath=', self.mdapath
print 'self.txtpath=', self.txtpath
else:
self.mdapath = os.curdir
self.txtpath = os.curdir
def closeup(self):
fo = open('pviewer.config','w')
st = [ self.mdapath,self.txtpath]
# print str(st)
fo.write(str(st))
fo.close()
self.quit()
# def addmenuBar(self):
# self.menuBar.addmenu('Setup','Fields for plot legend')
def addMoremenuBar(self):
self.menuBar.addmenuitem('File', 'command', 'Quit this application',
label='Quit',
command=self.closeup)
self.menuBar.addmenuitem('File', 'command', '', label='--------------')
self.menuBar.addmenuitem('File', 'command',
'Setup Printer ...',
label='Printer...',
command=self.printerDialog)
self.menuBar.addmenuitem('File', 'command', '', label='--------------')
self.menuBar.addmenuitem('File', 'command',
'File Selection dialog for Ascii File ...',
label='Open Ascii ...',
command=self.openAscii)
self.menuBar.addmenuitem('File', 'command', '', label='--------------')
self.menuBar.addmenuitem('File', 'command',
'File Selection dialog for MDA File ...',
label='Open MDA ...',
command=self.openMDA)
self.menuBar.addmenuitem('Help', 'command',
'Online help about this application ...',
label='pviewer_help.txt ...',
command=self.openHelpText)
self.menuBar.addmenuitem('Setup','command',
'Pick and load Color Table for 2D image plot ',
label='Color Table...',
command=self.setCTdialog)
self.menuBar.addmenuitem('Setup','command',
'Modify legend field names used in multiline plot',
label='Name Legends...',
command=self.legenddialog)
self.toggleUpdownVar=IntVar()
self.toggleUpdownVar.set(1)
self.menuBar.addmenuitem('Setup','checkbutton',
'Toggle plot2d updown mode',
label='Image Upside Down',
variable=self.toggleUpdownVar,
command=self.updownImage)
self.menuBar.addmenu('MDAView','Various MDAView features')
self.menuBar.addmenuitem('MDAView','command',
'Access 1D Array and pass to multiline plotter...',
label='Multi-line 1D Plot...',
command=self.mda1DRptPlot)
self.menuBar.addmenuitem('MDAView', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAView','command',
'Access panimage window',
label='PanImages...',
command=self.getpanimage)
self.menuBar.addmenuitem('MDAView','command',
'Display 2D image for the select detector',
label='Pick Di Image...',
command=self.get2Ddetector)
self.menuBar.addmenu('MDAReports','Various Report features')
self.menuBar.addmenuitem('MDAReports','command',
'Generate MDA 1D/2D reports',
label='MDA 1D/2D Reports...',
command=self.mdaReport)
self.menuBar.addmenuitem('MDAReports','command',
'Generate sequential MDA 1D report from 2D array',
label='MDA 2D->1D Report...',
command=self.mda2D1DRpt)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Generate MDA report for current MDA directory',
label='Generate All MDA Report...',
command=self.mdaAllRpt)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Generate MDA 2D report in IGOR format',
label='MDA to IGOR Report...',
command=self.mdaIGORRpt)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Show ASCII Report Files',
label='View ASCII Report...',
command=self.showAscii)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Clear All Files in ASCII directory',
label='Remove All Reports...',
command=self.removeAscii)
self.menuBar.addmenu('AsciiView','Various AsciiView features')
self.menuBar.addmenuitem('AsciiView', 'command', '',
label='--------------')
self.menuBar.addmenuitem('AsciiView','command',
'Enter the zero based X column # in ASCII file',
label='Multi-line Plotter...',
command=self.XcolDialog)
self.menuBar.addmenuitem('AsciiView','command',
'Pick line of DI legend name from the ascii file',
label='Extract & Modify Legend...',
command=self.DIlinedialog)
self.menuBar.addmenuitem('AsciiView', 'command', '',
label='--------------')
self.menuBar.addmenuitem('AsciiView', 'command',
'Pass ascii text data to image plot ...',
label='TV Image ...',
command=self.imageAscii)
self.menuBar.addmenu('Ascii2Image','Plot2D Ascii Image features')
self.menuBar.addmenuitem('Ascii2Image', 'command',
'No X,Y vector defined in ascii file',
label='Plot2d...',
command=self.plot2ddialog)
self.menuBar.addmenuitem('Ascii2Image', 'command',
'User set X,Y ranges dialog',
label='X,Y Range for image...',
command=self.XYrangeDialog)
self.menuBar.addmenuitem('Ascii2Image', 'command', '',
label='--------------')
self.menuBar.addmenuitem('Ascii2Image', 'command',
'Extract the X,Y line vectors from mdaAscii generated file',
label='X,Y Line vector from mdaAscii file...',
command=self.XYrowcolDialog)
self.menuBar.addmenuitem('Ascii2Image', 'command', '',
label='--------------')
self.menuBar.addmenuitem('Ascii2Image', 'command',
'Extract X,Y,Data from scanSee/catcher/yviewer generated file',
label='X column, Y line, DATA column from ascii file...',
command=self.XYVDialog)
self.menuBar.addmenu('ScanTools','Various scan programs')
self.menuBar.addmenuitem('ScanTools','command',
'Run plot.py python program',
label='Python plot.py ...',
command=self.runPlot)
self.menuBar.addmenuitem('ScanTools', 'command', '',
label='--------------')
self.menuBar.addmenuitem('ScanTools','command',
'Run idlvm sscan (scanSee) program',
label='idlvm sscan ...',
command=self.runSscan)
self.menuBar.addmenuitem('ScanTools','command',
'Run idlvm catcher (catcher) program',
label='idlvm catcher ...',
command=self.runCatcher)
self.menuBar.addmenuitem('ScanTools','command',
'Run idlvm mca (MCA) program',
label='idlvm mca ...',
command=self.runMCA)
self.menuBar.addmenu('Tools','Various system tools')
self.menuBar.addmenuitem('Tools','command',
'Run start_epics program',
label='start_epics ...',
command=self.runMedm)
self.menuBar.addmenuitem('Tools', 'command', '',
label='--------------')
self.menuBar.addmenuitem('Tools', 'command',
'Enter any valid command syntax ...',
label='Command Dialog...',
command=self.commandDialog)
def runPlot(self):
os.system('plot.py & ')
def runSscan(self):
os.system('idlvm sscan & ')
def runCatcher(self):
os.system('idlvm catcher & ')
def runMCA(self):
os.system('idlvm mca & ')
def runMedm(self):
h = os.getenv('HOME')
os.system(h +'/start_epics & ')
def commandDialog(self):
cmd = commandSyntax(self.interior())
def printerDialog(self):
setupPrinter(self.interior())
def removeAscii(self):
from Dialog import *
# dir = os.getcwd() +os.sep+'ASCII'+os.sep+'*.txt'
dir = self.txtpath+os.sep+'*.txt'
dir = 'rm -fr '+dir
pa = {'title': 'Remove ASCII files',
'text': dir + '\n\n'
'All ascii text files will be removed\n'
'from the sub-directory ASCII.\n'
'Is it OK to remove all files ?\n ',
'bitmap': DIALOG_ICON,
'default': 1,
'strings': ('OK','Cancel')}
dialog = Dialog(self.interior(),pa)
ans = dialog.num
if ans == 0:
print dir
os.system(dir)
def showAscii(self):
fname = tkFileDialog.askopenfilename(initialdir=self.txtpath,initialfile="*txt*")
if fname == (): return
xdisplayfile(fname)
def mdaIGORRpt(self):
if Scan['open']:
d = self.MDA
if d[0]['rank'] < 2:
return
fname = self.mdafile
ofname = mdaAscii_IGOR(d)
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
SH['ix'] = -1
writeSH(SH)
(self.txtpath,fn) = os.path.split(ofname)
else:
self.messageMDA()
def mdaAllRpt(self):
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text='MDA file from: '+self.mdapath,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.pack()
st.settext('Reports saved in: '+os.getcwd()+os.sep+'ASCII')
self.textWid=st
mdaAscii_all(self.mdapath)
def mda2D1DRpt(self):
# d = readMDA.readMDA(fname, 1, 0, 0)
if Scan['open']:
d = self.MDA
if d[0]['rank'] < 2: return
if d[2].nd == 0: return
fname = self.mdafile
ofname = mdaAscii_2D1D(d)
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
Scan['txtfile'] = ofname
SH['ix'] = 0
(self.txtpath,fn) = os.path.split(ofname)
def mda2DRpt(self):
# d = readMDA.readMDA(fname, 1, 0, 0)
if Scan['open']:
d = self.MDA
fname = self.mdafile
if d[1].nd > 0 :
ofname = mdaAscii_1D(d)
if d[0]['rank'] < 2: return
if d[2].nd == 0 : return
ofname = mdaAscii_2D(d)
py = d[1].p[0].data
px = d[2].p[0].data
px = px[0]
Scan['X'] = px
Scan['Y'] = py
Scan['txtfile'] = ofname
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
SH['ix'] = -1
writeSH(SH)
(self.txtpath,fn) = os.path.split(ofname)
else:
self.messageMDA()
def mda1DRptPlot(self):
self.mda1DRpt()
self.plotAscii()
def mdaReport(self):
d = self.MDA
if d[0]['rank'] == 1:
self.mda1DRpt()
if d[0]['rank'] >= 2:
self.mda2DRpt()
def mda1DRpt(self):
# d = readMDA.readMDA(fname, 1, 0, 0)
if Scan['open']:
d = self.MDA
fname = self.mdafile
ofname = mdaAscii_1D(d)
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
Scan['txtfile'] = ofname
SH['ix'] = 0
(self.txtpath,fn) = os.path.split(ofname)
else:
self.messageMDA()
def colorbar(self):
W = 256
clrbar =[]
for j in range(10):
clrbar.append(range(W))
clrbar = array(clrbar)
imagebar = PNGImage(self.canvas,clrbar,(2,2))
imagebar.pack(side='top')
self.imagebar = imagebar
def executeCT(self):
sels = self.textWid.getcurselection()
sels = string.split(sels[0])
CT_id = string.atoi(sels[0])
ps = str(CT[CT_id])
fo = open('pal.dat','wb')
fo.write(ps)
fo.close()
self.imagebar.destroy()
self.colorbar()
def setCTdialog(self):
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
CT = readCT()
CT_id=39
frame = self.interior()
self.canvas = Canvas(frame,width=300,height=50)
self.canvas.pack()
self.colorbar()
dname=('0 B-W LINEAR','1 BLUE/WHITE','2 GRN-RED-BLU-WHT',
'3 RED TEMPERATURE','4 BLUE/GREEN/RED/YELLOW','5 STD GAMMA-II',
'6 PRISM','7 RED-PURPLE','8 GREEN/WHITE LINEAR',
'9 GRN/WHT EXPONENTIAL','10 GREEN-PINK','11 BLUE-RED',
'12 16-LEVEL','13 RAINBOW','14 STEPS',
'15 STERN SPECIAL','16 Haze','17 Blue-Pastel-Red',
'18 Pastels','19 Hue Sat Lightness1','20 Hue Sat Lightness2',
'21 Hue Sat Value 1','22 Hue Sat Value 2','23 Purple-Red + Stripes',
'24 Beach','25 Mac Style','26 Eos A',
'27 Eos B','28 Hardcandy','29 Nature',
'30 Ocean','31 Peppermint','32 Plasma',
'33 Blue-Red','34 Rainbow',
'35 Blue Waves','36 Volcano','37 Waves',
'38 Rainbow18','39 Rainbow + white','40 Rainbow + black')
box = Pmw.ScrolledListBox(frame,
labelpos=N,label_text='Color Table #',
items=dname,
listbox_height=5,vscrollmode='static',
selectioncommand= self.executeCT,
dblclickcommand= self.executeCT,
usehullsize=1, hull_width=400, hull_height=200)
# box.pack(fill=BOTH,expand=1,padx=10,pady=10)
box.pack()
self.textWid = box
def selectionCommand(self):
box = self.textWid
sels = box.getcurselection()
sels = string.split(sels[0])
sel = string.atoi(sels[0])
Scan['2d'] = sel
d = self.MDA
pick2d(d,sel,updown=Scan['updown'])
def get2Ddetector(self):
if self.mdafile == '':
self.messageMDA()
return
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
root = self.interior()
d = self.MDA
nd = d[2].nd
dname =[]
for i in range(nd):
lst = str(i) + ' '+d[2].d[i].fieldName +' ' + d[2].d[i].name +' '+ d[2].d[i].desc +' '+d[2].d[i].unit
dname.append(lst)
box = Pmw.ScrolledListBox(root,
labelpos=N,label_text='2D Image Seq #',
items=(dname[0:nd]),
listbox_height=5,vscrollmode='static',
selectioncommand= self.selectionCommand,
dblclickcommand= self.selectionCommand,
usehullsize=1, hull_width=500, hull_height=200)
# box.pack(fill=BOTH,expand=1,padx=10,pady=10)
box.pack()
self.textWid = box
def getpanimage(self):
file = self.mdafile
if file != '':
d = self.MDA
pal = readPalette()
if d[0]['rank'] > 1:
det2D(d[2].d[0:d[2].nd],scale=(1,1),columns=5,file=file,pal=pal)
else:
self.messageMDA()
def headerMDA(self,d,J,st_text):
try:
if d[J].nd > 0:
st_text = st_text+d[J].scan_name+'\n'
st_test = st_text+'NPTS: '+str(d[J].npts)+'\n'
st_test = st_text+'CURR_PT: '+str(d[J].curr_pt)+'\n'
st_text = st_text + '**'+str(J)+'D detectors**\n'
for i in range(d[J].nd):
st_text=st_text+d[J].d[i].fieldName+' : '+d[J].d[i].name+', '+d[J].d[i].desc+', '+d[J].d[i].unit+'\n'
except IndexError:
pass
return st_text
def openMDA(self):
fname = askopenfilename( initialdir=self.mdapath,
filetypes=[("MDA File", '.mda'),
("All Files","*")])
if fname =='':
return
self.mdafile = fname
(self.mdapath, fn) = os.path.split(fname)
d = readMDA(fname)
self.MDA = d
Scan['data'] = d
Scan['open'] = 1
st_text = 'Please use ViewMDA menu to access MDA 1D/2D data array\n\n'
try:
if d[1].nd > 0:
st_text = self.headerMDA(d,1,st_text)
if d[1].nd > 0:
V=[]
for i in range(85):
V.append('')
for i in range(d[1].nd):
V[i] = d[1].d[i].fieldName
file='pvs'
fd = open(file,'w')
fd.write(str(V))
fd.close()
except IndexError:
pass
try:
if d[2].nd > 0:
st_text = self.headerMDA(d,2,st_text)
except IndexError:
pass
try:
if d[3].nd > 0:
st_text = self.headerMDA(d,3,st_text)
except IndexError:
pass
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=fname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=1, pady=1)
self.textWid = st
def openHelpText(self):
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
fname = os.environ['PYTHONSTARTUP']+os.sep+'pviewer_help.txt'
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=fname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.importfile(fname)
st.pack(fill=BOTH, expand=1, padx=1, pady=1)
self.textWid = st
def openAscii(self):
fname = askopenfilename(initialdir=self.txtpath,
filetypes=[("ASCII Data", '.txt'),
("Image Files","*im*"),
("Data Files",".dat"),
("All Files","*")])
if fname == '':
return
(self.txtpath,fn) = os.path.split(fname)
Scan['txtfile'] = fname
self.textfile = fname
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=fname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.importfile(fname)
st.pack(fill=BOTH, expand=1, padx=1, pady=1)
self.textWid = st
def imageAscii(self):
if self.textfile != '':
file = self.textfile
data = readArray(file)
data = rowreverse(data)
TV(data)
else:
self.messageAscii()
def plot2ddialog(self):
if self.textfile != '':
file = self.textfile
data = readArray(file)
data = rowreverse(data)
nr = len(data)
nc = len(data[0])
x = range(nc)
y = range(nr)
data = array(data)
if Scan['updown']:
plot2dUpdown(data,x,y,title=file)
else:
plot2d(data,x,y,title=file)
else:
self.messageAscii()
def plotAscii(self):
if self.textfile == '':
self.messageAscii()
return
try:
os.system('plotAscii.py '+self.textfile+' &')
except AttributeError:
pass
def XYrowcolDialog(self):
file = Scan['txtfile']
if file == '':
self.messageAscii()
return
ix = GetXYdialog(self.interior())
def XYVDialog(self):
file = Scan['txtfile']
if file == '':
self.messageAscii()
return
ix = GetXYVdialog(self.interior())
def XYrangeDialog(self):
file = Scan['txtfile']
if file == '':
self.messageAscii()
return
ix = defineXYdialog(self.interior())
def XcolDialog(self):
if self.textfile == '':
self.messageAscii()
else:
Scan['txtfile'] = self.textfile
ix=GetXdialog(self.interior())
def legenddialog(self):
# dialog=GetLegends(self.interior())
GetLegends(self.interior())
def DIlinedialog(self):
file = Scan['txtfile']
if file == '': return
dialog=pickDIdialog(self.interior())
def updownImage(self):
Scan['updown'] = self.toggleUpdownVar.get()
def pick2Ddialog(self):
if Scan['open']:
dialog=pick2Ddetector(self.interior())
def createInterface(self):
AppShell.AppShell.createInterface(self)
self.addMoremenuBar()
# self.createButtons()
self.textWid = None
self.mdafile = ''
self.textfile = ''
self.startup()
if __name__ == '__main__':
SH = {'ix': 0, 'printer': '', 'font': 'Verdana 10 bold', }
if os.path.isfile('SH'):
SH = readSH()
else:
writeSH(SH)
Scan = { 'open': 0,
'2d': 0,
'updown': 1,
'1d': 0,
'nc': 0,
'CT': 39,
'rowcol': [3,2,4,2],
'txtfile': '',
'pvs1': None,
'pvs2': None,
'pvs3': None,
'X': None,
'Y': None,
'im': None,
'data': None }
CT = readCT()
pt = pviewer()
pt.run()
|
from pylibelf import *
from pylibelf.types import *
from pylibelf.iterators import *
from pylibelf.constants import *
from pylibelf.util import *
from pylibelf.util.syms import *
from pylibelf.macros import *
from bisect import bisect_left
import pylibelf.util
import pylibelf
import types
import os
def _inrange(x, a,b):
return x>=a and x < b
def _overlap(a, b, c, d):
return a <= d and c <= b
class Bunch:
def __setitem__(self, k, v): self.__dict__[k] = v
def __getitem__(self, k): return self.__dict__[k]
class BaseElfNode(object):
@staticmethod
def extract(obj):
return BaseElfNode._extract(obj, {})
@staticmethod
def _extract(obj, m):
""" Given a BaseElfNode object extract a static snapshot of the current
object and its children that does not refer to the parent or any pylibelf
objects
"""
if isinstance(obj, BaseElfNode):
if obj in m:
return m[obj]
res = Bunch()
m[obj] = res
for attr in dir(obj):
if (isinstance(obj, ElfSym) and attr == 'contents' and not obj.defined):
v = None
elif (isinstance(obj, ElfScn) and (attr == 'info_scn' or attr == 'link_scn' or attr == 'index')):
try:
v = getattr(obj, attr)
except ElfError: # This section doesn't have a info_scn or a link_scn
v = None
else:
v = getattr(obj, attr)
if hasattr(v, "__call__"):
# This is a function - ignore
continue
try:
res[attr] = BaseElfNode._extract(v, m)
except AttributeError: pass
return res
elif type(obj) == list:
return map(lambda x: BaseElfNode._extract(x, m), obj)
elif type(obj) == tuple:
return tuple(map(lambda x: BaseElfNode._extract(x, m), obj))
elif type(obj) == dict:
return dict([(BaseElfNode.extract(k, m), BaseElfNode.extract(v, m)) for (k,v) in obj.items()])
elif type(obj) in [int, str, long, bool, types.NoneType]:
return obj
else:
print type(obj), obj
return None
def __init__(self, elf, pt, obj, typ = None, addFields = []):
assert(pt == None or isinstance(pt, BaseElfNode))
self._elf = elf
self._pt = pt
self._obj = obj
self._ptr = cast(self._obj, c_void_p).value
self._typ = typ
# All object's memoization cache points to the root elf file's memoization cache
if (isinstance(self, Elf)):
self._cache = {}
else:
while (not isinstance(pt, Elf)): pt = pt._pt
self._cache = pt._cache
self._fields = []
if self._typ != None:
self._fields += map(lambda x: x[0], self._typ._fields_)
self._fields += addFields
def _select(self, name): return select(self._elf, name)
def __getattr__(self, name):
cache = self._cache
key = (self._ptr, name)
if (key in cache):
return cache[key]
res = self._getattr_impl(name)
if (isinstance(res, types.GeneratorType)):
cache[key] = list(res)
else:
cache[key] = res
return res
def _getattr_impl(self, name):
try:
if (self._obj != None):
inner = self._obj.contents
else:
return 0
except AttributeError:
raise Exception("Can't access %s in %s - not a pointer" % \
(name, str(self._obj)))
return getattr(inner, name)
def _getelf(self):
p = self
while not isinstance(p, Elf):
p = p._pt
return p
def _class(self):
return pylibelf.util._class(self._elf)
def __dir__(self):
return self._fields
class ElfEhdr(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj,
Elf64_Ehdr if is64(elf) else Elf32_Ehdr, [])
class ElfShdr(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj,
Elf64_Shdr if is64(elf) else Elf32_Shdr, ['name'])
def _getattr_impl(self, name):
if (name == "name"):
return elf_strptr(self._elf, self._pt._pt.ehdr.e_shstrndx, self._obj.contents.sh_name)
else:
return BaseElfNode._getattr_impl(self, name)
class ElfSym(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj,
Elf64_Sym if is64(elf) else Elf32_Sym, ['name', 'section', 'defined', \
'contents', 'type', 'binding', 'targetScn', 'index'])
def _getattr_impl(self, name):
if (name == "name"):
return elf_strptr(self._elf, self._pt.shdr.sh_link, self._obj.contents.st_name)
elif (name == "section"):
return self._pt
elif (name == "defined"):
return self.st_shndx != SHN_UNDEF
elif (name == "type"):
if is64(self._elf):
return ELF64_ST_TYPE(self.st_info)
else:
return ELF32_ST_TYPE(self.st_info)
elif (name == "binding"):
if is64(self._elf):
return ELF64_ST_BIND(self.st_info)
else:
return ELF32_ST_BIND(self.st_info)
elif (name == "targetScn"):
return self._pt._pt.section(self.st_shndx)
elif (name == "contents"):
targetSec = self._pt._pt.section(self.st_shndx)
relas = []
for relaScn in targetSec.relaScns:
# [self.st_value ...
start = bisect_left(relaScn.relas, self.st_value)
# ... self.st_value + self.st_size)
end = bisect_left(relaScn.relas, self.st_value + self.st_size)
relas.extend(relaScn.relas[start:end])
# Testing only
#for r in relas:
# assert(r.r_offset >= self.st_value and r.r_offset < self.st_value + self.st_size)
#TODO: rels
rels = []
mem = targetSec.memInRange(self.st_value, self.st_size)
return (mem, rels, relas)
elif (name == "index"):
size = sizeof(self._typ)
ptr = cast(self._obj, c_voidp).value
ind = None
for d in self.section.data():
if d.d_buf <= ptr and d.d_buf + d.d_size > ptr:
assert (ptr - d.d_buf) % size == 0, "Misaligned symbol pointer %d in section %s" % \
(ptr, self.section.shdr.name)
ind = (ptr - d.d_buf) / size
assert ind != None, "Symbol not found in section!"
return ind
else:
return BaseElfNode._getattr_impl(self, name)
class ElfRela(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, \
Elf64_Rela if is64(elf) else Elf32_Rela, ['sym'])
def _getattr_impl(self, name):
if (name == "sym"):
elfO = self._getelf()
scn = elfO.section(self._pt.shdr.sh_link)
symInd = ELF64_R_SYM(self.r_info) if is64(self._elf) else \
ELF32_R_SYM(self.r_info)
return ElfSym(self._elf, scn, scn.sym(symInd)._obj)
else:
return BaseElfNode._getattr_impl(self, name)
def __cmp__(self, other):
if type(other) == long or type(other) == int:
if self.r_offset < other:
return -1
elif self.r_offset == other:
return 0
else:
return 1
raise Exception("NYI")
class ElfRel(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, \
Elf64_Rel if is64(elf) else Elf32_Rel, ['sym'])
def _getattr_impl(self, name):
if (name == "sym"):
elfO = self._getelf()
scn = elfO.section(self._pt.shdr.sh_link)
symInd = ELF64_R_SYM(self.r_info) if is64(self._elf) else \
ELF32_R_SYM(self.r_info)
return ElfSym(self._elf, scn, scn.sym(symInd)._obj)
else:
return BaseElfNode._getattr_impl(self, name)
class ElfData(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, Elf_Data, [])
class ElfArhdr(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, Elf_Arhdr, [])
class ElfScn(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, Elf_Scn,\
['index', 'shdr', 'link_scn', 'info_scn', 'syms', 'relas', 'relaScns', 'sym', 'data', 'memInRange',
'relasInRange', 'strAtAddr'])
def _getattr_impl(self, name):
if (name == "index"):
return elf_ndxscn(self._obj)
elif (name == "shdr"):
return ElfShdr(self._elf, self, select(self._elf, 'getshdr')(self._obj))
elif (name == "link_scn" and self.shdr.sh_link != SHN_UNDEF):
return ElfScn(self._elf, self._pt, elf_getscn(self._elf, \
self.shdr.sh_link))
elif (name == "info_scn" and (self.shdr.sh_type == SHT_REL or \
self.shdr.sh_type == SHT_RELA)):
return ElfScn(self._elf, self._pt, elf_getscn(self._elf, \
self.shdr.sh_info))
elif (name == "syms" and self.shdr.sh_type in [SHT_SYMTAB, SHT_DYNSYM]):
symT = Elf32_Sym if (is32(self._elf)) else Elf64_Sym
return reduce(lambda a,c: a+c, \
map(lambda d: map(lambda sym: ElfSym(self._elf, self, pointer(sym)), \
list(arr_iter(d, symT))), list(data(self._obj))))
elif (name == "relas" and self.shdr.sh_type == SHT_RELA):
relaT = Elf32_Rela if (is32(self._elf)) else Elf64_Rela
return reduce(lambda a,c: a+c, \
map(lambda d: map(lambda rela: ElfRela(self._elf, self, pointer(rela)),\
list(arr_iter(d, relaT))), list(data(self._obj))))
elif (name == "relaScns"):
return [s for s in self._pt.sections if s.shdr.sh_info == self.index\
and s.shdr.sh_type == SHT_RELA]
elif (name == "name"):
return self.shdr.name
else:
return BaseElfNode._getattr_impl(self, name)
def sym(self, ind):
shtype = self.shdr.sh_type
if shtype not in [SHT_SYMTAB, SHT_DYNSYM]:
raise Exception("Section %s does not contain symbols" % (self.shdr.name,))
return self.syms[ind]
def data(self):
d = None
while True:
d = elf_getdata(self._obj, d)
if not bool(d): break
yield ElfData(self._elf, self, d)
def memInRange(self, start, size):
r = ''
off = 0
base = self.shdr.sh_addr
end = start + size
for d in self.data():
if start >= end: break;
off = base + d.d_off
if start >= off and start < off + d.d_size:
c = cast(d.d_buf, POINTER(c_char))
l = min(off + d.d_size, end) - start
r += c[start- off : start - off + l]
start += l
return r
def relasInRange(self, start, size):
relas = []
for relaScn in self.relaScns:
# [self.st_value ...
start = bisect_left(relaScn.relas, start)
# ... self.st_value + self.st_size)
end = bisect_left(relaScn.relas, start + size)
relas.extend(relaScn.relas[start:end])
return relas
def strAtAddr(self, ptr):
r = ''
off = 0
base = self.shdr.sh_addr
start = ptr - base
for d in self.data():
off = d.d_off
c = cast(d.d_buf, POINTER(c_char))
while (start >= off and start < off + d.d_size):
if c[start] == '\x00':
break
r += c[start]
start += 1
return r
class Elf(BaseElfNode):
def __init__(self, elf, pt=None, claz = None):
if type(elf) == str:
self.fd = os.open(elf, os.O_RDONLY)
elf = elf_begin(self.fd, ELF_C_READ, None)
elif isinstance(elf, ElfP):
self.fd = None
else:
raise Exception("Invalid input to Elf.__init__(): %s" % (str(elf), ))
if claz != None:
self._class = claz
else:
self._class = pylibelf.util._class(elf)
BaseElfNode.__init__(self, elf, pt, elf, pylibelf.types.Elf, \
['ehdr', 'shstrndx', 'arhdr', 'sections', 'section', 'syms', 'findSym'])
self._symsMap = dict([
(sym.name, sym) for sym in self.syms()
])
self._secMap = dict([
(elf_ndxscn(s._obj), s) for s in self.sections
])
nullScn = ElfScn(self._elf, self, None)
self._secMap[0] = nullScn
def finalize(self):
elf_end(self._elf)
if self.fd != None:
os.close(self.fd)
def _getattr_impl(self, name):
if (name == "ehdr"):
return ElfEhdr(self._elf, self, self._select("getehdr")(self._elf))
elif (name == "shstrndx"):
return self.ehdr.e_shstrndx
elif (name == "arhdr"):
arhdr = elf_getarhdr(self._elf)
if (bool(arhdr)):
return ElfArhdr(self._elf, self, arhdr)
else:
raise AttributeError("Elf file doesn't have an arhdr")
elif (name == "sections"):
return [ ElfScn(self._elf, self, pointer(s)) for s in
sections(self._elf) ]
elif (name == "relasMap"):
return dict([(s.index, s.relas) \
for s in self.sections if s.shdr.sh_type == SHT_RELA])
else:
return BaseElfNode._getattr_impl(self, name)
def section(self, ind):
return self._secMap[ind]
def syms(self):
for scn in self.sections:
if scn.shdr.sh_type != SHT_SYMTAB and scn.shdr.sh_type != SHT_DYNSYM:
continue
for sym in syms(self._elf, scn._obj.contents):
yield ElfSym(self._elf, scn, pointer(sym[1]))
def findSym(self, name):
try:
return self._symsMap[name]
except:
return None
def deref(self, addr, size):
r = None
for s in self.sections:
# TODO(dbounov): Hack, due to .tbss overlapping other sections. Figure out correct way to deal with this.
if s.shdr.name == ".tbss":
continue
if _overlap(addr, addr+size - 1, s.shdr.sh_addr, s.shdr.sh_addr + s.shdr.sh_size - 1):
assert r == None # Currently support address ranges in a single section only
r = (s.memInRange(addr, size), [], s.relasInRange(addr, size) )
return r
class Ar:
def __init__(self, fname, claz):
self._fname = fname
self._class = claz
def elfs(self):
self.fd = os.open(self._fname, os.O_RDONLY)
ar = elf_begin(self.fd, ELF_C_READ, None)
while True:
e = elf_begin(self.fd, ELF_C_READ, ar)
if (not bool(e)): break
yield Elf(e, None, self._class)
elf_end(ar)
os.close(self.fd)
__all__ = [ 'BaseElfNode', 'ElfEhdr', 'ElfShdr', 'ElfSym', 'ElfRela', \
'ElfData', 'ElfArhdr', 'ElfScn', 'Elf', 'Ar' ]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 23 19:05:35 2017
@author: SuperKogito
"""
# Define imports
import tkinter as tk
class ExitPage(tk.Frame):
""" Exit page class """
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.configure(background='black')
# Define main frame
self.main_frame = tk.Frame(self, background='black')
self.main_frame.pack(expand=1)
self.main_frame.pack()
# Define upper frame
upper_frame = tk.Frame(self.main_frame, width=300, height=50,
background='black')
upper_frame.grid(column=0, row=0)
# Define label
exit_string = '\n\nAre you sure that you want to exit Crypotos?\n'
exit_label = tk.Label(upper_frame, text=exit_string,
background='black', foreground="white")
exit_label.pack(side="top", fill="x", pady=10)
# Define middle frame
middle_frame = tk.Frame(self.main_frame, background='black',
width=300, height=50)
middle_frame.grid(column=0, row=1)
# Define cancel button
cancel_button = tk.Button(middle_frame, text="Cancel",
command=lambda: controller.show_frame("PageOne"))
cancel_button.pack(side=tk.RIGHT)
# Define yes button
yes_button = tk.Button(middle_frame, text="Yes",
command=lambda: controller.quit_func())
yes_button.pack(side=tk.RIGHT, padx=5, pady=5)
# Configure the buttons
cancel_button.configure(background='black', foreground='white',
activebackground='#0080ff',
activeforeground='white')
yes_button.configure(background='black', foreground='white',
activebackground='#0080ff',
activeforeground='white')
# Define lower frame
lower_frame = tk.Frame(self.main_frame, background='black',
width=300, height=50)
lower_frame.grid(column=0, row=2)
# Define label
dev_text = (
"\nDeveloped by: SuperKogito\n"
"Gihthub repository: "
"https://github.com/SuperKogito/Cryptos"
)
self.developper_text = tk.Label(lower_frame,
text=dev_text,
background='black',
foreground='White')
self.developper_text.pack(side="bottom")
|
#!/usr/bin/python
import lldb
import fblldbbase as fb
def lldbcommands():
return [
PrintDebugInformation(),
PrintUnifications(),
PrintTypes(),
PrintMachineTypes(),
PrintRewriteCalls(),
RecorderDump()
]
class PrintDebugInformation(fb.FBCommand):
def name(self):
return 'xl'
def description(self):
return "Pretty-print an XL compiler entity"
def args(self):
return [
fb.FBCommandArgument(arg='object',
type='XL compiler type',
help='Value to print.')
]
def run(self, arguments, options):
lldb.debugger.HandleCommand('p xldebug(%s)' % arguments[0])
class PrintUnifications(fb.FBCommand):
def name(self):
return 'xlu'
def description(self):
return "Show XL type unifications"
def run(self, arguments, options):
lldb.debugger.HandleCommand('p XL::Types::DumpUnifications()')
class PrintTypes(fb.FBCommand):
def name(self):
return 'xlt'
def description(self):
return "Show XL types"
def run(self, arguments, options):
lldb.debugger.HandleCommand('p XL::Types::DumpTypes()')
class PrintMachineTypes(fb.FBCommand):
def name(self):
return 'xlm'
def description(self):
return "Show XL machine types"
def run(self, arguments, options):
lldb.debugger.HandleCommand('p XL::Types::DumpMachineTypes()')
class PrintRewriteCalls(fb.FBCommand):
def name(self):
return 'xlr'
def description(self):
return "Show XL rewrite calls"
def run(self, arguments, options):
lldb.debugger.HandleCommand('p XL::Types::DumpRewriteCalls()')
class RecorderDump(fb.FBCommand):
def name(self):
return 'rdump'
def description(self):
return "Dump the flight recorder"
def run(self, arguments, options):
lldb.debugger.HandleCommand('p recorder_dump()')
|
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, render_to_response
from django.utils.functional import memoize
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView, ListView as BaseListView
from bootstrap.views import (ListView,
CreateView,
UpdateView,
DeleteView)
from .models import (Post,
Comment,
User)
from .forms import CommentForm
class IndexView(BaseListView):
"""
List published posts
"""
template_name = 'index.html'
queryset = Post.objects.filter(state='published')
class SingleView(TemplateView):
"""
Display a single post
"""
template_name = 'single.html'
def get_context_data(self, slug):
context = super(SingleView, self).get_context_data()
context['object'] = get_object_or_404(Post, slug=slug)
return context
@csrf_exempt
def preview(request, template_name='preview.html'):
"""
Post preview
TODO: Transform into a TemplateView class
"""
text = request.POST.get('text', '')
data = {'text': text}
return render_to_response(template_name, data)
class PostCreateView(CreateView):
"""
Post creation view.
Set the current user as post author
If the post get updated the first author remains
"""
def get_form(self, form_class):
form = super(PostCreateView, self).get_form(form_class)
form.instance.user = self.request.user
return form
def get_template_names(self):
return ('post_create.html',)
class PostUpdateView(UpdateView):
"""
Post update view
"""
def get_template_names(self):
return ('post_update.html',)
class PostListView(ListView):
"""
List posts
"""
def get_template_names(self):
return ('post_list.html',)
class CommentMixin(object):
"""
Common comment forms methods
"""
def _get_post(self):
"""
Get comment post.
This method uses memoization for caching
"""
return self.get_object().content_object
get_post = memoize(_get_post, {}, 1)
def get_success_url(self):
post_pk = self.get_post().pk
return reverse('blogages_core:comment_list', args=(post_pk,))
class CommentUpdateView(CommentMixin, UpdateView):
"""
Comment update
"""
form_class = CommentForm
class CommentDeleteView(CommentMixin, DeleteView):
"""
Comment removing
"""
model = Comment
class PostCommentMixin(CommentMixin):
"""
Common PostComment methods
"""
def _get_post(self):
"""
Get comment post.
This method uses memoization for caching
"""
post_pk = self.kwargs.get('post_pk', 0)
return get_object_or_404(Post, pk=post_pk)
get_post = memoize(_get_post, {}, 1)
class CommentCreateView(PostCommentMixin, CreateView):
"""
Comment creation right now but may be used in future
for replying other comments.
"""
form_class = CommentForm
def get_form(self, form_class):
return self.form_class(self.get_post(), **self.get_form_kwargs())
class CommentListView(PostCommentMixin, ListView):
"""
Comment listing
"""
template_name = 'comment_list.html'
model = Comment
def get_queryset(self):
"""
Filter comments from specific post
"""
post_pk = self.get_post().pk
queryset = super(CommentListView, self).get_queryset()
return queryset.filter(object_pk=post_pk)
def _get_create_url(self):
kwargs = {'post_pk': self.get_post().pk}
return reverse('blogages_core:comment_form', kwargs=kwargs)
class UserListView(ListView):
"""
User listing
"""
model = User
def get_queryset(self):
queryset = super(UserListView, self).get_queryset()
# Exclude anonymous user
queryset = queryset.exclude(pk=-1)
return queryset
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The worker communicates with the scheduler and does two things:
1. Sends all tasks that has to be run
2. Gets tasks from the scheduler that should be run
When running in local mode, the worker talks directly to a :py:class:`~luigi.scheduler.CentralPlannerScheduler` instance.
When you run a central server, the worker will talk to the scheduler using a :py:class:`~luigi.rpc.RemoteScheduler` instance.
Everything in this module is private to luigi and may change in incompatible
ways between versions. The exception is the exception types and the
:py:class:`worker` config class.
"""
import collections
import getpass
import logging
import multiprocessing # Note: this seems to have some stability issues: https://github.com/spotify/luigi/pull/438
import os
import signal
try:
import Queue
except ImportError:
import queue as Queue
import random
import socket
import threading
import time
import traceback
import types
from luigi import six
from luigi import notifications
from luigi.event import Event
from luigi.task_register import load_task
from luigi.scheduler import DISABLED, DONE, FAILED, PENDING, CentralPlannerScheduler
from luigi.target import Target
from luigi.task import Task, flatten, getpaths, Config
from luigi.task_register import TaskClassException
from luigi.task_status import RUNNING
from luigi.parameter import FloatParameter, IntParameter, BoolParameter
try:
import simplejson as json
except ImportError:
import json
logger = logging.getLogger('luigi-interface')
# Prevent fork() from being called during a C-level getaddrinfo() which uses a process-global mutex,
# that may not be unlocked in child process, resulting in the process being locked indefinitely.
fork_lock = threading.Lock()
# Why we assert on _WAIT_INTERVAL_EPS:
# multiprocessing.Queue.get() is undefined for timeout=0 it seems:
# https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.get.
# I also tried with really low epsilon, but then ran into the same issue where
# the test case "test_external_dependency_worker_is_patient" got stuck. So I
# unscientifically just set the final value to a floating point number that
# "worked for me".
_WAIT_INTERVAL_EPS = 0.00001
class TaskException(Exception):
pass
class TaskProcess(multiprocessing.Process):
""" Wrap all task execution in this class.
Mainly for convenience since this is run in a separate process. """
def __init__(self, task, worker_id, result_queue, random_seed=False, worker_timeout=0,
tracking_url_callback=None):
super(TaskProcess, self).__init__()
self.task = task
self.worker_id = worker_id
self.result_queue = result_queue
self.random_seed = random_seed
self.tracking_url_callback = tracking_url_callback
if task.worker_timeout is not None:
worker_timeout = task.worker_timeout
self.timeout_time = time.time() + worker_timeout if worker_timeout else None
def _run_get_new_deps(self):
run_again = False
try:
task_gen = self.task.run(tracking_url_callback=self.tracking_url_callback)
except TypeError as ex:
if 'unexpected keyword argument' not in getattr(ex, 'message', ex.args[0]):
raise
run_again = True
if run_again:
task_gen = self.task.run()
if not isinstance(task_gen, types.GeneratorType):
return None
next_send = None
while True:
try:
if next_send is None:
requires = six.next(task_gen)
else:
requires = task_gen.send(next_send)
except StopIteration:
return None
new_req = flatten(requires)
new_deps = [(t.task_module, t.task_family, t.to_str_params())
for t in new_req]
if all(t.complete() for t in new_req):
next_send = getpaths(requires)
else:
return new_deps
def run(self):
logger.info('[pid %s] Worker %s running %s', os.getpid(), self.worker_id, self.task)
if self.random_seed:
# Need to have different random seeds if running in separate processes
random.seed((os.getpid(), time.time()))
status = FAILED
expl = ''
missing = []
new_deps = []
try:
# Verify that all the tasks are fulfilled!
missing = [dep.task_id for dep in self.task.deps() if not dep.complete()]
if missing:
deps = 'dependency' if len(missing) == 1 else 'dependencies'
raise RuntimeError('Unfulfilled %s at run time: %s' % (deps, ', '.join(missing)))
self.task.trigger_event(Event.START, self.task)
t0 = time.time()
status = None
if self.task.run == NotImplemented:
# External task
# TODO(erikbern): We should check for task completeness after non-external tasks too!
# This will resolve #814 and make things a lot more consistent
status = DONE if self.task.complete() else FAILED
else:
new_deps = self._run_get_new_deps()
status = DONE if not new_deps else PENDING
if new_deps:
logger.info(
'[pid %s] Worker %s new requirements %s',
os.getpid(), self.worker_id, self.task)
elif status == DONE:
self.task.trigger_event(
Event.PROCESSING_TIME, self.task, time.time() - t0)
expl = self.task.on_success()
logger.info('[pid %s] Worker %s done %s', os.getpid(),
self.worker_id, self.task)
self.task.trigger_event(Event.SUCCESS, self.task)
except KeyboardInterrupt:
raise
except BaseException as ex:
status = FAILED
logger.exception("[pid %s] Worker %s failed %s", os.getpid(), self.worker_id, self.task)
self.task.trigger_event(Event.FAILURE, self.task, ex)
raw_error_message = self.task.on_failure(ex)
expl = raw_error_message
finally:
self.result_queue.put(
(self.task.task_id, status, expl, missing, new_deps))
def _recursive_terminate(self):
import psutil
try:
parent = psutil.Process(self.pid)
children = parent.children(recursive=True)
# terminate parent. Give it a chance to clean up
super(TaskProcess, self).terminate()
parent.wait()
# terminate children
for child in children:
try:
child.terminate()
except psutil.NoSuchProcess:
continue
except psutil.NoSuchProcess:
return
def terminate(self):
"""Terminate this process and its subprocesses."""
# default terminate() doesn't cleanup child processes, it orphans them.
try:
return self._recursive_terminate()
except ImportError:
return super(TaskProcess, self).terminate()
class SingleProcessPool(object):
"""
Dummy process pool for using a single processor.
Imitates the api of multiprocessing.Pool using single-processor equivalents.
"""
def apply_async(self, function, args):
return function(*args)
def close(self):
pass
def join(self):
pass
class DequeQueue(collections.deque):
"""
deque wrapper implementing the Queue interface.
"""
def put(self, obj, block=None, timeout=None):
return self.append(obj)
def get(self, block=None, timeout=None):
return self.pop()
class AsyncCompletionException(Exception):
"""
Exception indicating that something went wrong with checking complete.
"""
def __init__(self, trace):
self.trace = trace
class TracebackWrapper(object):
"""
Class to wrap tracebacks so we can know they're not just strings.
"""
def __init__(self, trace):
self.trace = trace
def check_complete(task, out_queue):
"""
Checks if task is complete, puts the result to out_queue.
"""
logger.debug("Checking if %s is complete", task)
try:
is_complete = task.complete()
except Exception:
is_complete = TracebackWrapper(traceback.format_exc())
out_queue.put((task, is_complete))
class worker(Config):
ping_interval = FloatParameter(default=1.0,
config_path=dict(section='core', name='worker-ping-interval'))
keep_alive = BoolParameter(default=False,
config_path=dict(section='core', name='worker-keep-alive'))
count_uniques = BoolParameter(default=False,
config_path=dict(section='core', name='worker-count-uniques'),
description='worker-count-uniques means that we will keep a '
'worker alive only if it has a unique pending task, as '
'well as having keep-alive true')
wait_interval = FloatParameter(default=1.0,
config_path=dict(section='core', name='worker-wait-interval'))
wait_jitter = FloatParameter(default=5.0)
max_reschedules = IntParameter(default=1,
config_path=dict(section='core', name='worker-max-reschedules'))
timeout = IntParameter(default=0,
config_path=dict(section='core', name='worker-timeout'))
task_limit = IntParameter(default=None,
config_path=dict(section='core', name='worker-task-limit'))
retry_external_tasks = BoolParameter(default=False,
config_path=dict(section='core', name='retry-external-tasks'),
description='If true, incomplete external tasks will be '
'retested for completion while Luigi is running.')
class KeepAliveThread(threading.Thread):
"""
Periodically tell the scheduler that the worker still lives.
"""
def __init__(self, scheduler, worker_id, ping_interval):
super(KeepAliveThread, self).__init__()
self._should_stop = threading.Event()
self._scheduler = scheduler
self._worker_id = worker_id
self._ping_interval = ping_interval
def stop(self):
self._should_stop.set()
def run(self):
while True:
self._should_stop.wait(self._ping_interval)
if self._should_stop.is_set():
logger.info("Worker %s was stopped. Shutting down Keep-Alive thread" % self._worker_id)
break
with fork_lock:
try:
self._scheduler.ping(worker=self._worker_id)
except: # httplib.BadStatusLine:
logger.warning('Failed pinging scheduler')
class Worker(object):
"""
Worker object communicates with a scheduler.
Simple class that talks to a scheduler and:
* tells the scheduler what it has to do + its dependencies
* asks for stuff to do (pulls it in a loop and runs it)
"""
def __init__(self, scheduler=None, worker_id=None, worker_processes=1, assistant=False, **kwargs):
if scheduler is None:
scheduler = CentralPlannerScheduler()
self.worker_processes = int(worker_processes)
self._worker_info = self._generate_worker_info()
if not worker_id:
worker_id = 'Worker(%s)' % ', '.join(['%s=%s' % (k, v) for k, v in self._worker_info])
self._config = worker(**kwargs)
assert self._config.wait_interval >= _WAIT_INTERVAL_EPS, "[worker] wait_interval must be positive"
assert self._config.wait_jitter >= 0.0, "[worker] wait_jitter must be equal or greater than zero"
self._id = worker_id
self._scheduler = scheduler
self._assistant = assistant
self._stop_requesting_work = False
self.host = socket.gethostname()
self._scheduled_tasks = {}
self._suspended_tasks = {}
self._first_task = None
self.add_succeeded = True
self.run_succeeded = True
self.unfulfilled_counts = collections.defaultdict(int)
try:
signal.signal(signal.SIGUSR1, self.handle_interrupt)
except AttributeError:
pass
# Keep info about what tasks are running (could be in other processes)
if worker_processes == 1:
self._task_result_queue = DequeQueue()
else:
self._task_result_queue = multiprocessing.Queue()
self._running_tasks = {}
# Stuff for execution_summary
self._add_task_history = []
self._get_work_response_history = []
def _add_task(self, *args, **kwargs):
"""
Call ``self._scheduler.add_task``, but store the values too so we can
implement :py:func:`luigi.execution_summary.summary`.
"""
task_id = kwargs['task_id']
status = kwargs['status']
runnable = kwargs['runnable']
task = self._scheduled_tasks.get(task_id)
if task:
msg = (task, status, runnable)
self._add_task_history.append(msg)
self._scheduler.add_task(*args, **kwargs)
logger.info('Informed scheduler that task %s has status %s', task_id, status)
def __enter__(self):
"""
Start the KeepAliveThread.
"""
self._keep_alive_thread = KeepAliveThread(self._scheduler, self._id, self._config.ping_interval)
self._keep_alive_thread.daemon = True
self._keep_alive_thread.start()
return self
def __exit__(self, type, value, traceback):
"""
Stop the KeepAliveThread and kill still running tasks.
"""
self._keep_alive_thread.stop()
self._keep_alive_thread.join()
for task in self._running_tasks.values():
if task.is_alive():
task.terminate()
return False # Don't suppress exception
def _generate_worker_info(self):
# Generate as much info as possible about the worker
# Some of these calls might not be available on all OS's
args = [('salt', '%09d' % random.randrange(0, 999999999)),
('workers', self.worker_processes)]
try:
args += [('host', socket.gethostname())]
except BaseException:
pass
try:
args += [('username', getpass.getuser())]
except BaseException:
pass
try:
args += [('pid', os.getpid())]
except BaseException:
pass
try:
sudo_user = os.getenv("SUDO_USER")
if sudo_user:
args.append(('sudo_user', sudo_user))
except BaseException:
pass
return args
def _validate_task(self, task):
if not isinstance(task, Task):
raise TaskException('Can not schedule non-task %s' % task)
if not task.initialized():
# we can't get the repr of it since it's not initialized...
raise TaskException('Task of class %s not initialized. Did you override __init__ and forget to call super(...).__init__?' % task.__class__.__name__)
def _log_complete_error(self, task, tb):
log_msg = "Will not schedule {task} or any dependencies due to error in complete() method:\n{tb}".format(task=task, tb=tb)
logger.warning(log_msg)
def _log_dependency_error(self, task, tb):
log_msg = "Will not schedule {task} or any dependencies due to error in deps() method:\n{tb}".format(task=task, tb=tb)
logger.warning(log_msg)
def _log_unexpected_error(self, task):
logger.exception("Luigi unexpected framework error while scheduling %s", task) # needs to be called from within except clause
def _email_complete_error(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: {task} failed scheduling. Host: {host}",
headline="Will not schedule task or any dependencies due to error in complete() method",
)
def _email_dependency_error(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: {task} failed scheduling. Host: {host}",
headline="Will not schedule task or any dependencies due to error in deps() method",
)
def _email_unexpected_error(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: Framework error while scheduling {task}. Host: {host}",
headline="Luigi framework error",
)
def _email_task_failure(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: {task} FAILED. Host: {host}",
headline="A task failed when running. Most likely run() raised an exception.",
)
def _email_error(self, task, formatted_traceback, subject, headline):
formatted_subject = subject.format(task=task, host=self.host)
message = notifications.format_task_error(headline, task, formatted_traceback)
notifications.send_error_email(formatted_subject, message, task.owner_email)
def add(self, task, multiprocess=False):
"""
Add a Task for the worker to check and possibly schedule and run.
Returns True if task and its dependencies were successfully scheduled or completed before.
"""
if self._first_task is None and hasattr(task, 'task_id'):
self._first_task = task.task_id
self.add_succeeded = True
if multiprocess:
queue = multiprocessing.Manager().Queue()
pool = multiprocessing.Pool()
else:
queue = DequeQueue()
pool = SingleProcessPool()
self._validate_task(task)
pool.apply_async(check_complete, [task, queue])
# we track queue size ourselves because len(queue) won't work for multiprocessing
queue_size = 1
try:
seen = set([task.task_id])
while queue_size:
current = queue.get()
queue_size -= 1
item, is_complete = current
for next in self._add(item, is_complete):
if next.task_id not in seen:
self._validate_task(next)
seen.add(next.task_id)
pool.apply_async(check_complete, [next, queue])
queue_size += 1
except (KeyboardInterrupt, TaskException):
raise
except Exception as ex:
self.add_succeeded = False
formatted_traceback = traceback.format_exc()
self._log_unexpected_error(task)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_unexpected_error(task, formatted_traceback)
finally:
pool.close()
pool.join()
return self.add_succeeded
def _add(self, task, is_complete):
if self._config.task_limit is not None and len(self._scheduled_tasks) >= self._config.task_limit:
logger.warning('Will not schedule %s or any dependencies due to exceeded task-limit of %d', task, self._config.task_limit)
return
formatted_traceback = None
try:
self._check_complete_value(is_complete)
except KeyboardInterrupt:
raise
except AsyncCompletionException as ex:
formatted_traceback = ex.trace
except BaseException:
formatted_traceback = traceback.format_exc()
if formatted_traceback is not None:
self.add_succeeded = False
self._log_complete_error(task, formatted_traceback)
task.trigger_event(Event.DEPENDENCY_MISSING, task)
self._email_complete_error(task, formatted_traceback)
# abort, i.e. don't schedule any subtasks of a task with
# failing complete()-method since we don't know if the task
# is complete and subtasks might not be desirable to run if
# they have already ran before
return
if is_complete:
deps = None
status = DONE
runnable = False
task.trigger_event(Event.DEPENDENCY_PRESENT, task)
elif task.run == NotImplemented:
deps = None
status = PENDING
runnable = worker().retry_external_tasks
task.trigger_event(Event.DEPENDENCY_MISSING, task)
logger.warning('Data for %s does not exist (yet?). The task is an '
'external data depedency, so it can not be run from'
' this luigi process.', task)
else:
try:
deps = task.deps()
except Exception as ex:
formatted_traceback = traceback.format_exc()
self.add_succeeded = False
self._log_dependency_error(task, formatted_traceback)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_dependency_error(task, formatted_traceback)
return
status = PENDING
runnable = True
if task.disabled:
status = DISABLED
if deps:
for d in deps:
self._validate_dependency(d)
task.trigger_event(Event.DEPENDENCY_DISCOVERED, task, d)
yield d # return additional tasks to add
deps = [d.task_id for d in deps]
self._scheduled_tasks[task.task_id] = task
self._add_task(worker=self._id, task_id=task.task_id, status=status,
deps=deps, runnable=runnable, priority=task.priority,
resources=task.process_resources(),
params=task.to_str_params(),
family=task.task_family,
module=task.task_module)
def _validate_dependency(self, dependency):
if isinstance(dependency, Target):
raise Exception('requires() can not return Target objects. Wrap it in an ExternalTask class')
elif not isinstance(dependency, Task):
raise Exception('requires() must return Task objects')
def _check_complete_value(self, is_complete):
if is_complete not in (True, False):
if isinstance(is_complete, TracebackWrapper):
raise AsyncCompletionException(is_complete.trace)
raise Exception("Return value of Task.complete() must be boolean (was %r)" % is_complete)
def _add_worker(self):
self._worker_info.append(('first_task', self._first_task))
self._scheduler.add_worker(self._id, self._worker_info)
def _log_remote_tasks(self, running_tasks, n_pending_tasks, n_unique_pending):
logger.debug("Done")
logger.debug("There are no more tasks to run at this time")
if running_tasks:
for r in running_tasks:
logger.debug('%s is currently run by worker %s', r['task_id'], r['worker'])
elif n_pending_tasks:
logger.debug("There are %s pending tasks possibly being run by other workers", n_pending_tasks)
if n_unique_pending:
logger.debug("There are %i pending tasks unique to this worker", n_unique_pending)
def _get_work(self):
if self._stop_requesting_work:
return None, 0, 0, 0
logger.debug("Asking scheduler for work...")
r = self._scheduler.get_work(
worker=self._id,
host=self.host,
assistant=self._assistant,
current_tasks=list(self._running_tasks.keys()),
)
n_pending_tasks = r['n_pending_tasks']
task_id = r['task_id']
running_tasks = r['running_tasks']
n_unique_pending = r['n_unique_pending']
self._get_work_response_history.append(dict(
task_id=task_id,
running_tasks=running_tasks,
))
if task_id is not None and task_id not in self._scheduled_tasks:
logger.info('Did not schedule %s, will load it dynamically', task_id)
try:
# TODO: we should obtain the module name from the server!
self._scheduled_tasks[task_id] = \
load_task(module=r.get('task_module'),
task_name=r['task_family'],
params_str=r['task_params'])
except TaskClassException as ex:
msg = 'Cannot find task for %s' % task_id
logger.exception(msg)
subject = 'Luigi: %s' % msg
error_message = notifications.wrap_traceback(ex)
notifications.send_error_email(subject, error_message)
self._add_task(worker=self._id, task_id=task_id, status=FAILED, runnable=False,
assistant=self._assistant)
task_id = None
self.run_succeeded = False
return task_id, running_tasks, n_pending_tasks, n_unique_pending
def _run_task(self, task_id):
task = self._scheduled_tasks[task_id]
p = self._create_task_process(task)
self._running_tasks[task_id] = p
if self.worker_processes > 1:
with fork_lock:
p.start()
else:
# Run in the same process
p.run()
def _create_task_process(self, task):
def update_tracking_url(tracking_url):
self._scheduler.add_task(
task_id=task.task_id,
worker=self._id,
status=RUNNING,
tracking_url=tracking_url,
)
return TaskProcess(
task, self._id, self._task_result_queue,
random_seed=bool(self.worker_processes > 1),
worker_timeout=self._config.timeout,
tracking_url_callback=update_tracking_url,
)
def _purge_children(self):
"""
Find dead children and put a response on the result queue.
:return:
"""
for task_id, p in six.iteritems(self._running_tasks):
if not p.is_alive() and p.exitcode:
error_msg = 'Task %s died unexpectedly with exit code %s' % (task_id, p.exitcode)
elif p.timeout_time is not None and time.time() > float(p.timeout_time) and p.is_alive():
p.terminate()
error_msg = 'Task %s timed out and was terminated.' % task_id
else:
continue
logger.info(error_msg)
self._task_result_queue.put((task_id, FAILED, error_msg, [], []))
def _handle_next_task(self):
"""
We have to catch three ways a task can be "done":
1. normal execution: the task runs/fails and puts a result back on the queue,
2. new dependencies: the task yielded new deps that were not complete and
will be rescheduled and dependencies added,
3. child process dies: we need to catch this separately.
"""
while True:
self._purge_children() # Deal with subprocess failures
try:
task_id, status, expl, missing, new_requirements = (
self._task_result_queue.get(
timeout=self._config.wait_interval))
except Queue.Empty:
return
task = self._scheduled_tasks[task_id]
if not task or task_id not in self._running_tasks:
continue
# Not a running task. Probably already removed.
# Maybe it yielded something?
if status == FAILED and expl:
# If no expl, it is because of a retry-external-task failure.
self._email_task_failure(task, expl)
new_deps = []
if new_requirements:
new_req = [load_task(module, name, params)
for module, name, params in new_requirements]
for t in new_req:
self.add(t)
new_deps = [t.task_id for t in new_req]
self._add_task(worker=self._id,
task_id=task_id,
status=status,
expl=json.dumps(expl),
resources=task.process_resources(),
runnable=None,
params=task.to_str_params(),
family=task.task_family,
module=task.task_module,
new_deps=new_deps,
assistant=self._assistant)
self._running_tasks.pop(task_id)
# re-add task to reschedule missing dependencies
if missing:
reschedule = True
# keep out of infinite loops by not rescheduling too many times
for task_id in missing:
self.unfulfilled_counts[task_id] += 1
if (self.unfulfilled_counts[task_id] >
self._config.max_reschedules):
reschedule = False
if reschedule:
self.add(task)
self.run_succeeded &= (status == DONE) or (len(new_deps) > 0)
return
def _sleeper(self):
# TODO is exponential backoff necessary?
while True:
jitter = self._config.wait_jitter
wait_interval = self._config.wait_interval + random.uniform(0, jitter)
logger.debug('Sleeping for %f seconds', wait_interval)
time.sleep(wait_interval)
yield
def _keep_alive(self, n_pending_tasks, n_unique_pending):
"""
Returns true if a worker should stay alive given.
If worker-keep-alive is not set, this will always return false.
For an assistant, it will always return the value of worker-keep-alive.
Otherwise, it will return true for nonzero n_pending_tasks.
If worker-count-uniques is true, it will also
require that one of the tasks is unique to this worker.
"""
if not self._config.keep_alive:
return False
elif self._assistant:
return True
else:
return n_pending_tasks and (n_unique_pending or not self._config.count_uniques)
def handle_interrupt(self, signum, _):
"""
Stops the assistant from asking for more work on SIGUSR1
"""
if signum == signal.SIGUSR1:
self._config.keep_alive = False
self._stop_requesting_work = True
def run(self):
"""
Returns True if all scheduled tasks were executed successfully.
"""
logger.info('Running Worker with %d processes', self.worker_processes)
sleeper = self._sleeper()
self.run_succeeded = True
self._add_worker()
while True:
while len(self._running_tasks) >= self.worker_processes:
logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks))
self._handle_next_task()
task_id, running_tasks, n_pending_tasks, n_unique_pending = self._get_work()
if task_id is None:
if not self._stop_requesting_work:
self._log_remote_tasks(running_tasks, n_pending_tasks, n_unique_pending)
if len(self._running_tasks) == 0:
if self._keep_alive(n_pending_tasks, n_unique_pending):
six.next(sleeper)
continue
else:
break
else:
self._handle_next_task()
continue
# task_id is not None:
logger.debug("Pending tasks: %s", n_pending_tasks)
self._run_task(task_id)
while len(self._running_tasks):
logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks))
self._handle_next_task()
return self.run_succeeded
|
from oslo.config import cfg
from SoftLayer import API_PUBLIC_ENDPOINT
FILE_OPTIONS = {
None: [
cfg.ListOpt('enabled_services', default=['identity',
'compute',
'image',
'block_storage',
'network,'
'baremetal']),
cfg.StrOpt('log_level', default='INFO',
help='Log level to report. '
'Options: DEBUG, INFO, WARNING, ERROR, CRITICAL'),
cfg.StrOpt('secret_key',
default='SET ME',
help='Secret key used to encrypt tokens'),
cfg.ListOpt('request_hooks', default=[]),
cfg.ListOpt('response_hooks', default=[]),
cfg.StrOpt('default_domain', default='jumpgate.com')
],
'softlayer': [
cfg.StrOpt('endpoint', default=API_PUBLIC_ENDPOINT),
cfg.StrOpt('proxy', default=None),
cfg.StrOpt('catalog_template_file', default='identity.templates'),
],
'identity': [
cfg.StrOpt('driver', default='jumpgate.identity.drivers.sl'),
cfg.StrOpt('mount', default=None),
cfg.StrOpt('auth_driver', default='jumpgate.identity.'
'drivers.sl.tokens.SLAuthDriver'),
cfg.StrOpt('token_driver', default='jumpgate.identity.drivers.core.'
'JumpgateTokenDriver'),
cfg.StrOpt('token_id_driver', default='jumpgate.identity.drivers.core.'
'AESTokenIdDriver')
],
'compute': [
cfg.StrOpt('driver', default='jumpgate.compute.drivers.sl'),
cfg.StrOpt('mount', default='/compute'),
cfg.StrOpt('default_injected_file_content_bytes', default=10240),
cfg.StrOpt('default_injected_file_path_bytes', default=255),
cfg.StrOpt('default_cores', default=200),
cfg.StrOpt('default_floating_ips', default=100),
cfg.StrOpt('default_injected_files', default=5),
cfg.StrOpt('default_instances', default=10),
cfg.StrOpt('default_key_pairs', default=100),
cfg.StrOpt('default_metadata_items', default=128),
cfg.StrOpt('default_ram', default=512000),
cfg.StrOpt('default_security_group_rules', default=20),
cfg.StrOpt('default_security_groups', default=10),
],
'image': [
cfg.StrOpt('driver', default='jumpgate.image.drivers.sl'),
cfg.StrOpt('mount', default='/image'),
],
'block_storage': [
cfg.StrOpt('driver', default='jumpgate.block_storage.drivers.sl'),
cfg.StrOpt('mount', default='/block_store'),
],
'network': [
cfg.StrOpt('driver', default='jumpgate.network.drivers.sl'),
cfg.StrOpt('mount', default='/network'),
],
'baremetal': [
cfg.StrOpt('driver', default='jumpgate.baremetal.drivers.sl'),
cfg.StrOpt('mount', default='/baremetal'),
]}
CONF = cfg.CONF
def configure(conf=None):
if not conf:
conf = CONF
for section in FILE_OPTIONS:
conf.register_opts(FILE_OPTIONS[section], group=section)
|
#!/usr/bin/python3
""" service_main.py:
"""
# Import Required Libraries (Standard, Third Party, Local) ********************
import asyncio
import datetime
import logging
if __name__ == "__main__":
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from rpihome_v3.occupancy_service.msg_processing import create_heartbeat_msg
from rpihome_v3.occupancy_service.msg_processing import process_heartbeat_msg
from rpihome_v3.schedule_service.msg_processing import process_get_device_scheduled_state_msg
# Authorship Info *************************************************************
__author__ = "Christopher Maue"
__copyright__ = "Copyright 2017, The RPi-Home Project"
__credits__ = ["Christopher Maue"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Christopher Maue"
__email__ = "[email protected]"
__status__ = "Development"
# Internal Service Work Task **************************************************
class MainTask(object):
def __init__(self, log, **kwargs):
# Configure logger
self.log = log or logging.getLogger(__name__)
# Define instance variables
self.ref_num = None
self.msg_in_queue = None
self.msg_out_queue = None
self.schedule = []
self.service_addresses = []
self.message_types = []
self.last_check_hb = datetime.datetime.now()
self.out_msg = str()
self.out_msg_list = []
self.next_msg = str()
self.next_msg_split = []
self.msg_source_addr = str()
self.msg_type = str()
self.destinations = []
# Map input variables
if kwargs is not None:
for key, value in kwargs.items():
if key == "ref":
self.ref_num = value
self.log.debug('Ref number generator set during __init__ '
'to: %s', self.ref_num)
if key == "schedule":
self.schedule = value
self.log.debug('Schedule set during __init__ '
'to: %s', self.schedule)
if key == "msg_in_queue":
self.msg_in_queue = value
self.log.debug('Message in queue set during __init__ '
'to: %s', self.msg_in_queue)
if key == "msg_out_queue":
self.msg_out_queue = value
self.log.debug('Message out queue set during __init__ '
'to: %s', self.msg_out_queue)
if key == "service_addresses":
self.service_addresses = value
self.log.debug('Service address list set during __init__ '
'to: %s', self.service_addresses)
if key == "message_types":
self.message_types = value
self.log.debug('Message type list set during __init__ '
'to: %s', self.message_types)
@asyncio.coroutine
def run(self):
""" task to handle the work the service is intended to do """
self.log.info('Starting schedule service main task')
while True:
# Initialize result list
self.out_msg_list = []
# INCOMING MESSAGE HANDLING
if self.msg_in_queue.qsize() > 0:
self.log.debug('Getting Incoming message from queue')
self.next_msg = self.msg_in_queue.get_nowait()
self.log.debug('Message pulled from queue: [%s]', self.next_msg)
# Determine message type
self.next_msg_split = self.next_msg.split(',')
if len(self.next_msg_split) >= 6:
self.log.debug('Extracting source address and message type')
self.msg_source_addr = self.next_msg_split[1]
self.msg_type = self.next_msg_split[5]
self.log.debug('Source Address: %s', self.msg_source_addr)
self.log.debug('Message Type: %s', self.msg_type)
# Service Check (heartbeat)
if self.msg_type == self.message_types['heartbeat']:
self.log.debug('Message is a heartbeat')
self.out_msg_list = process_heartbeat_msg(
self.log,
self.ref_num,
self.next_msg,
self.message_types)
# Device scheduled command checks
if self.msg_type == self.message_types['get_device_scheduled_state']:
self.log.debug('Message is a get device scheduled state message')
self.out_msg_list = process_get_device_scheduled_state_msg(
self.log,
self.ref_num,
self.schedule,
self.next_msg,
self.message_types)
# Que up response messages in outgoing msg que
if len(self.out_msg_list) > 0:
self.log.debug('Queueing response message(s)')
for self.out_msg in self.out_msg_list:
self.msg_out_queue.put_nowait(self.out_msg)
self.log.debug('Message [%s] successfully queued', self.out_msg)
# PERIODIC TASKS
# Periodically send heartbeats to other services
if datetime.datetime.now() >= (self.last_check_hb + datetime.timedelta(seconds=120)):
self.destinations = [
(self.service_addresses['automation_addr'],
self.service_addresses['automation_port'])
]
self.out_msg_list = create_heartbeat_msg(
self.log,
self.ref_num,
self.destinations,
self.service_addresses['schedule_addr'],
self.service_addresses['schedule_port'],
self.message_types)
# Que up response messages in outgoing msg que
if len(self.out_msg_list) > 0:
self.log.debug('Queueing response message(s)')
for self.out_msg in self.out_msg_list:
self.msg_out_queue.put_nowait(self.out_msg)
self.log.debug('Response message [%s] successfully queued',
self.out_msg)
# Update last-check
self.last_check_hb = datetime.datetime.now()
# Yield to other tasks for a while
yield from asyncio.sleep(0.25)
|
import json
import time
import unittest
from datetime import datetime, timedelta
from mock import Mock, patch
from six.moves.urllib.parse import parse_qs, urlparse
from xero.api import Xero
from xero.auth import (
OAuth2Credentials,
PartnerCredentials,
PrivateCredentials,
PublicCredentials,
)
from xero.constants import XERO_OAUTH2_AUTHORIZE_URL
from xero.exceptions import (
XeroAccessDenied,
XeroException,
XeroNotVerified,
XeroTenantIdNotSet,
XeroUnauthorized,
)
class PublicCredentialsTest(unittest.TestCase):
@patch("requests.post")
def test_initial_constructor(self, r_post):
"Initial construction causes a requst to get a request token"
r_post.return_value = Mock(
status_code=200, text="oauth_token=token&oauth_token_secret=token_secret"
)
credentials = PublicCredentials(
consumer_key="key", consumer_secret="secret", scope="payroll.endpoint"
)
# A HTTP request was made
self.assertTrue(r_post.called)
state = credentials.state
# Expiry times should be calculated
self.assertIsNotNone(state.pop("oauth_authorization_expires_at"))
self.assertIsNotNone(state.pop("oauth_expires_at"))
self.assertEqual(
state,
{
"consumer_key": "key",
"consumer_secret": "secret",
"oauth_token": "token",
"oauth_token_secret": "token_secret",
"verified": False,
"scope": "payroll.endpoint",
},
)
@patch("requests.post")
def test_bad_credentials(self, r_post):
"Initial construction with bad credentials raises an exception"
r_post.return_value = Mock(
status_code=401,
text="oauth_problem=consumer_key_unknown&oauth_problem_advice=Consumer%20key%20was%20not%20recognised",
)
with self.assertRaises(XeroUnauthorized):
PublicCredentials(consumer_key="unknown", consumer_secret="unknown")
@patch("requests.post")
def test_unvalidated_constructor(self, r_post):
"Credentials with an unverified request token can be constructed"
credentials = PublicCredentials(
consumer_key="key",
consumer_secret="secret",
oauth_token="token",
oauth_token_secret="token_secret",
)
self.assertEqual(
credentials.state,
{
"consumer_key": "key",
"consumer_secret": "secret",
"oauth_token": "token",
"oauth_token_secret": "token_secret",
"verified": False,
},
)
# No HTTP requests were made
self.assertFalse(r_post.called)
@patch("requests.post")
def test_validated_constructor(self, r_post):
"A validated set of credentials can be reconstructed"
credentials = PublicCredentials(
consumer_key="key",
consumer_secret="secret",
oauth_token="validated_token",
oauth_token_secret="validated_token_secret",
verified=True,
)
self.assertEqual(
credentials.state,
{
"consumer_key": "key",
"consumer_secret": "secret",
"oauth_token": "validated_token",
"oauth_token_secret": "validated_token_secret",
"verified": True,
},
)
try:
credentials.oauth
except XeroNotVerified:
self.fail("Credentials should have been verified")
# No HTTP requests were made
self.assertFalse(r_post.called)
@patch("requests.post")
def test_url(self, r_post):
"The request token URL can be obtained"
r_post.return_value = Mock(
status_code=200, text="oauth_token=token&oauth_token_secret=token_secret"
)
credentials = PublicCredentials(consumer_key="key", consumer_secret="secret")
self.assertEqual(
credentials.url, "https://api.xero.com/oauth/Authorize?oauth_token=token"
)
@patch("requests.post")
def test_url_with_scope(self, r_post):
"The request token URL includes the scope parameter"
r_post.return_value = Mock(
status_code=200, text="oauth_token=token&oauth_token_secret=token_secret"
)
credentials = PublicCredentials(
consumer_key="key", consumer_secret="secret", scope="payroll.endpoint"
)
self.assertIn("scope=payroll.endpoint", credentials.url)
@patch("requests.post")
def test_configurable_url(self, r_post):
"Test configurable API url"
r_post.return_value = Mock(
status_code=200, text="oauth_token=token&oauth_token_secret=token_secret"
)
url = "https//api-tls.xero.com"
credentials = PublicCredentials(
consumer_key="key", consumer_secret="secret", api_url=url
)
self.assertEqual(
credentials.url, "{url}/oauth/Authorize?oauth_token=token".format(url=url)
)
@patch("requests.post")
def test_verify(self, r_post):
"Unverfied credentials can be verified"
r_post.return_value = Mock(
status_code=200,
text="oauth_token=verified_token&oauth_token_secret=verified_token_secret",
)
credentials = PublicCredentials(
consumer_key="key",
consumer_secret="secret",
oauth_token="token",
oauth_token_secret="token_secret",
)
credentials.verify("verifier")
# A HTTP request was made
self.assertTrue(r_post.called)
state = credentials.state
# Expiry times should be calculated
self.assertIsNotNone(state.pop("oauth_authorization_expires_at"))
self.assertIsNotNone(state.pop("oauth_expires_at"))
self.assertEqual(
state,
{
"consumer_key": "key",
"consumer_secret": "secret",
"oauth_token": "verified_token",
"oauth_token_secret": "verified_token_secret",
"verified": True,
},
)
try:
credentials.oauth
except XeroNotVerified:
self.fail("Credentials should have been verified")
@patch("requests.post")
def test_verify_failure(self, r_post):
"If verification credentials are bad, an error is raised"
r_post.return_value = Mock(
status_code=401,
text="oauth_problem=bad_verifier&oauth_problem_advice=The consumer was denied access to this resource.",
)
credentials = PublicCredentials(
consumer_key="key",
consumer_secret="secret",
oauth_token="token",
oauth_token_secret="token_secret",
)
with self.assertRaises(XeroUnauthorized):
credentials.verify("badverifier")
with self.assertRaises(XeroNotVerified):
credentials.oauth
def test_expired(self):
"Expired credentials are correctly detected"
now = datetime(2014, 1, 1, 12, 0, 0)
soon = now + timedelta(minutes=30)
credentials = PublicCredentials(
consumer_key="key",
consumer_secret="secret",
oauth_token="token",
oauth_token_secret="token_secret",
)
# At this point, oauth_expires_at isn't set
with self.assertRaises(XeroException):
credentials.expired(now)
# Not yet expired
credentials.oauth_expires_at = soon
self.assertFalse(credentials.expired(now=now))
# Expired
self.assertTrue(credentials.expired(now=soon))
class PartnerCredentialsTest(unittest.TestCase):
@patch("requests.post")
def test_initial_constructor(self, r_post):
"Initial construction causes a request to get a request token"
r_post.return_value = Mock(
status_code=200, text="oauth_token=token&oauth_token_secret=token_secret"
)
credentials = PartnerCredentials(
consumer_key="key",
consumer_secret="secret",
rsa_key="abc",
scope="payroll.endpoint",
)
# A HTTP request was made
self.assertTrue(r_post.called)
state = credentials.state
# Expiry times should be calculated
self.assertIsNotNone(state.pop("oauth_authorization_expires_at"))
self.assertIsNotNone(state.pop("oauth_expires_at"))
self.assertEqual(
state,
{
"consumer_key": "key",
"consumer_secret": "secret",
"oauth_token": "token",
"oauth_token_secret": "token_secret",
"verified": False,
"scope": "payroll.endpoint",
},
)
@patch("requests.post")
def test_refresh(self, r_post):
"Refresh function gets a new token"
r_post.return_value = Mock(
status_code=200,
text="oauth_token=token2&oauth_token_secret=token_secret2&oauth_session_handle=session",
)
credentials = PartnerCredentials(
consumer_key="key",
consumer_secret="secret",
rsa_key="key",
oauth_token="token",
oauth_token_secret="token_secret",
verified=True,
)
credentials.refresh()
# Expiry times should be calculated
state = credentials.state
self.assertIsNotNone(state.pop("oauth_authorization_expires_at"))
self.assertIsNotNone(state.pop("oauth_expires_at"))
self.assertEqual(
state,
{
"consumer_key": "key",
"consumer_secret": "secret",
"oauth_token": "token2",
"oauth_token_secret": "token_secret2",
"oauth_session_handle": "session",
"verified": True,
},
)
@patch("requests.post")
def test_configurable_url(self, r_post):
"Test configurable API url"
r_post.return_value = Mock(
status_code=200,
text="oauth_token=token&oauth_token_secret=token_secret&oauth_session_handle=session",
)
url = "https//api-tls.xero.com"
credentials = PartnerCredentials(
consumer_key="key",
consumer_secret="secret",
rsa_key="key",
oauth_token="token",
oauth_token_secret="token_secret",
verified=True,
api_url=url,
)
credentials.refresh()
self.assertEqual(
credentials.url, "{url}/oauth/Authorize?oauth_token=token".format(url=url)
)
class PrivateCredentialsTest(unittest.TestCase):
def test_default_url(self):
"Test default API url"
credentials = PrivateCredentials(consumer_key="key", rsa_key="rsa_key")
self.assertEqual(credentials.base_url, "https://api.xero.com")
def test_configurable_url(self):
"Test configurable API url"
url = "https//api-tls.xero.com"
credentials = PrivateCredentials(
consumer_key="key", rsa_key="rsa_key", api_url=url
)
self.assertEqual(credentials.base_url, url)
class OAuth2CredentialsTest(unittest.TestCase):
callback_uri = "https://myapp.example.com/xero/auth/callback/"
def setUp(self):
super(OAuth2CredentialsTest, self).setUp()
# Create an expired token to be used by tests
self.expired_token = {
"access_token": "1234567890",
"expires_in": 1800,
"token_type": "Bearer",
"refresh_token": "0987654321",
# 'expires_at': datetime.utcnow().timestamp()}
"expires_at": time.time(),
}
def test_authorisation_url_and_random_state(self):
credentials = OAuth2Credentials(
"client_id", "client_secret", callback_uri=self.callback_uri
)
url = credentials.generate_url()
self.assertTrue(url.startswith(XERO_OAUTH2_AUTHORIZE_URL))
qs = parse_qs(urlparse(url).query)
# Test that the credentials object can be dumped by state
cred_state = credentials.state
# Then test that the relevant attributes are in the querystring
self.assertEqual(qs["client_id"][0], cred_state["client_id"])
self.assertEqual(qs["redirect_uri"][0], cred_state["callback_uri"])
self.assertEqual(qs["response_type"][0], "code")
self.assertEqual(qs["scope"][0], " ".join(cred_state["scope"]))
self.assertEqual(qs["state"][0], cred_state["auth_state"])
def test_authorisation_url_using_initial_state(self):
credentials = OAuth2Credentials(
"client_id",
"client_secret",
callback_uri=self.callback_uri,
auth_state="test_state",
)
url = urlparse(credentials.generate_url())
self.assertEqual(credentials.auth_state, "test_state")
qs = parse_qs(url.query)
self.assertEqual(qs["state"][0], "test_state")
@patch("requests_oauthlib.OAuth2Session.request")
def test_verification_using_bad_auth_uri(self, r_request):
credentials = OAuth2Credentials(
"client_id", "client_secret", auth_state="test_state"
)
bad_auth_uri = "{}?error=access_denied&state={}".format(
self.callback_uri, credentials.auth_state
)
with self.assertRaises(XeroAccessDenied):
credentials.verify(bad_auth_uri)
with self.assertRaises(XeroAccessDenied):
OAuth2Credentials(
"client_id",
"client_secret",
auth_state="test_state",
auth_secret=bad_auth_uri,
)
self.assertFalse(r_request.called)
@patch("requests_oauthlib.OAuth2Session.request")
def test_verification_success(self, r_request):
credentials = OAuth2Credentials(
"client_id", "client_secret", auth_state="test_state"
)
auth_uri = "{}?code=0123456789&scope={}&state={}".format(
self.callback_uri, "%20".join(credentials.scope), credentials.auth_state
)
r_request.return_value = Mock(
status_code=200,
request=Mock(headers={}, body=""),
headers={},
text='{"access_token":"1234567890","expires_in":1800,'
'"token_type":"Bearer","refresh_token":"0987654321"}',
)
credentials.verify(auth_uri)
self.assertTrue(r_request.called)
self.assertTrue(credentials.token)
self.assertTrue(credentials.oauth)
self.assertFalse(credentials.expired())
# Finally test the state
self.assertEqual(
credentials.state,
{
"client_id": credentials.client_id,
"client_secret": credentials.client_secret,
"auth_state": credentials.auth_state,
"scope": credentials.scope,
"user_agent": credentials.user_agent,
"token": credentials.token,
},
)
@patch("requests_oauthlib.OAuth2Session.request")
def test_verification_failure(self, r_request):
credentials = OAuth2Credentials(
"client_id", "client_secret", auth_state="test_state"
)
auth_uri = "{}?code=0123456789&scope={}&state={}".format(
self.callback_uri, "%20".join(credentials.scope), credentials.auth_state
)
r_request.return_value = Mock(
status_code=400,
request=Mock(headers={}, body=""),
headers={},
text='{"error":"invalid_grant"}',
)
with self.assertRaises(XeroAccessDenied):
credentials.verify(auth_uri)
with self.assertRaises(XeroAccessDenied):
OAuth2Credentials(
"client_id",
"client_secret",
auth_state="test_state",
auth_secret=auth_uri,
)
@patch("requests_oauthlib.OAuth2Session.post")
def test_token_refresh(self, r_post):
credentials = OAuth2Credentials(
"client_id", "client_secret", token=self.expired_token
)
self.assertTrue(credentials.oauth)
self.assertTrue(credentials.expired())
r_post.return_value = Mock(
status_code=200,
headers={},
text='{"access_token":"5555555555","expires_in":1800,'
'"token_type":"Bearer","refresh_token":"44444444444"}',
)
credentials.refresh()
self.assertTrue(r_post.called)
self.assertFalse(credentials.expired())
# Test that the headers were set correctly
auth = r_post.call_args[1]["auth"]
self.assertEqual(auth.username, "client_id")
self.assertEqual(auth.password, "client_secret")
@patch("requests.get")
def test_get_tenants(self, r_get):
credentials = OAuth2Credentials(
"client_id", "client_secret", token=self.expired_token
)
content = '[{"id":"1","tenantId":"12345","tenantType":"ORGANISATION"}]'
def json_fct():
return json.loads(content)
r_get.return_value = Mock(status_code=200, json=json_fct)
tenants = credentials.get_tenants()
self.assertTrue(r_get.called)
self.assertEqual(
tenants, [{"id": "1", "tenantId": "12345", "tenantType": "ORGANISATION"}]
)
tenants = credentials.get_tenants(auth_event_id="b71db552-68ff-4d80-a824-7544e5ccad28")
self.assertEqual(r_get.mock_calls[-1].args[0].split('?authEventId=')[1], "b71db552-68ff-4d80-a824-7544e5ccad28")
@patch("xero.auth.OAuth2Credentials.get_tenants")
def test_set_default_tenant(self, get_tenants):
get_tenants.return_value = [
{"id": "1", "tenantId": "12345", "tenantType": "ORGANISATION"}
]
credentials = OAuth2Credentials(
"client_id", "client_secret", token=self.expired_token
)
credentials.set_default_tenant()
self.assertEqual(credentials.tenant_id, "12345")
@patch("requests.get")
def test_tenant_is_used_in_xero_request(self, r_get):
credentials = OAuth2Credentials(
"client_id", "client_secret", token=self.expired_token, tenant_id="12345"
)
xero = Xero(credentials)
# Just return any old response
r_get.return_value = None
try:
xero.contacts.all()
except: # NOQA: E722
pass
self.assertEqual(r_get.call_args[1]["headers"]["Xero-tenant-id"], "12345")
def test_tenant_id_not_set_raises_error(self):
credentials = OAuth2Credentials(
"client_id", "client_secret", token=self.expired_token
)
xero = Xero(credentials)
with self.assertRaises(XeroTenantIdNotSet):
xero.contacts.all()
@patch.object(OAuth2Credentials, "get_tenants", Mock(return_value=[]))
def test_set_default_tenant_raises_exception(self):
credentials = OAuth2Credentials(
"client_id", "client_secret", token=self.expired_token
)
with self.assertRaises(XeroException):
credentials.set_default_tenant()
|
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
import logging
from datetime import datetime
from django.conf import settings
from django.db.models import Sum, Q
from django.utils import translation
from django.utils.translation import ugettext as _
from subtitles import models as new_models
from teams.models import Task, Workflow, Team, BillingRecord
from teams.moderation_const import APPROVED, UNMODERATED, WAITING_MODERATION
from teams.permissions import (
can_create_and_edit_subtitles, can_create_and_edit_translations,
can_publish_edits_immediately, can_review, can_approve, can_add_version,
)
from teams.signals import (
api_language_new, api_language_edited, api_video_edited
)
from utils import send_templated_email
from utils.forms import flatten_errorlists
from utils.subtitles import create_new_subtitles
from utils.translation import get_user_languages_from_request
from videos import models
from videos.models import record_workflow_origin, Subtitle
from videos.tasks import (
video_changed_tasks, subtitles_complete_changed
)
from widget import video_cache
from widget.base_rpc import BaseRpc
from widget.forms import FinishReviewForm, FinishApproveForm
from widget.models import SubtitlingSession
from functools import partial
from subtitles import pipeline
from subtitles.models import ORIGIN_LEGACY_EDITOR
from babelsubs.storage import SubtitleSet, diff
yt_logger = logging.getLogger("youtube-ei-error")
ALL_LANGUAGES = settings.ALL_LANGUAGES
LANGUAGES_MAP = dict(ALL_LANGUAGES)
def get_general_settings(request):
general_settings = {
'writelock_expiration' : new_models.WRITELOCK_EXPIRATION,
'embed_version': '',
'languages': ALL_LANGUAGES,
'metadata_languages': settings.METADATA_LANGUAGES
}
if request.user.is_authenticated():
general_settings['username'] = request.user.username
return general_settings
def add_general_settings(request, dict):
dict.update(get_general_settings(request))
class Rpc(BaseRpc):
# Logging
def log_session(self, request, log):
send_templated_email(
settings.WIDGET_LOG_EMAIL,
'Subtitle save failure',
'widget/session_log_email.txt',
{ 'log_pk': dialog_log.pk },
fail_silently=False)
return { 'response': 'ok' }
def log_youtube_ei_failure(self, request, page_url):
user_agent = request.META.get('HTTP_USER_AGENT', '(Unknown)')
yt_logger.error(
"Youtube ExternalInterface load failure",
extra={
'request': request,
'data': {
'user_agent': user_agent,
'page_url': page_url }
})
return { 'response': 'ok' }
# Widget
def _check_visibility_policy_for_widget(self, request, video_id):
"""Return an error if the user cannot see the widget, None otherwise."""
visibility_policy = video_cache.get_visibility_policies(video_id)
if not visibility_policy.get("is_public", True):
team = Team.objects.get(id=visibility_policy['team_id'])
if not team.is_member(request.user):
return {"error_msg": _("Video embedding disabled by owner")}
def _get_video_urls_for_widget(self, video_url, video_id):
"""Return the video URLs, 'cleaned' video id, and error."""
try:
video_urls = video_cache.get_video_urls(video_id)
except models.Video.DoesNotExist:
video_cache.invalidate_video_id(video_url)
try:
video_id = video_cache.get_video_id(video_url)
except Exception as e:
return None, None, {"error_msg": unicode(e)}
video_urls = video_cache.get_video_urls(video_id)
return video_urls, video_id, None
def _find_remote_autoplay_language(self, request):
language = None
if not request.user.is_authenticated() or request.user.preferred_language == '':
language = translation.get_language_from_request(request)
else:
language = request.user.preferred_language
return language if language != '' else None
def _get_subtitles_for_widget(self, request, base_state, video_id, is_remote):
# keeping both forms valid as backwards compatibility layer
lang_code = base_state and base_state.get("language_code", base_state.get("language", None))
if base_state is not None and lang_code is not None:
lang_pk = base_state.get('language_pk', None)
if lang_pk is None:
lang_pk = video_cache.pk_for_default_language(video_id, lang_code)
return self._autoplay_subtitles(request.user, video_id, lang_pk,
base_state.get('revision', None))
else:
if is_remote:
autoplay_language = self._find_remote_autoplay_language(request)
language_pk = video_cache.pk_for_default_language(video_id, autoplay_language)
if autoplay_language is not None:
return self._autoplay_subtitles(request.user, video_id,
language_pk, None)
def show_widget(self, request, video_url, is_remote, base_state=None, additional_video_urls=None):
try:
video_id = video_cache.get_video_id(video_url)
except Exception as e:
# for example, private youtube video or private widgets
return {"error_msg": unicode(e)}
if video_id is None:
return None
error = self._check_visibility_policy_for_widget(request, video_id)
if error:
return error
video_urls, video_id, error = self._get_video_urls_for_widget(video_url, video_id)
if error:
return error
resp = {
'video_id' : video_id,
'subtitles': None,
'video_urls': video_urls,
'is_moderated': video_cache.get_is_moderated(video_id),
'filename': video_cache.get_download_filename(video_id),
}
if additional_video_urls is not None:
for url in additional_video_urls:
video_cache.associate_extra_url(url, video_id)
add_general_settings(request, resp)
if request.user.is_authenticated():
resp['username'] = request.user.username
resp['drop_down_contents'] = video_cache.get_video_languages(video_id)
resp['my_languages'] = get_user_languages_from_request(request)
resp['subtitles'] = self._get_subtitles_for_widget(request, base_state,
video_id, is_remote)
return resp
def track_subtitle_play(self, request, video_id):
# NOTE: we used to use this method to track when subtitles were
# played from amara or other sites, however it wasn't very useful
# since most other sites din't use it. So when we switched to the new
# statistics system we just removed the functionality.
return { 'response': 'ok' }
# Start Dialog (aka "Subtitle Into" Dialog)
def _get_blocked_languages(self, team_video, user):
# This is yet another terrible hack for the tasks system. I'm sorry.
#
# Normally the in-progress languages will be marked as disabled in the
# language_summary call, but that doesn't happen for languages that
# don't have SubtitleLanguage objects yet, i.e. ones that have a task
# but haven't been started yet.
#
# This function returns a list of languages that should be disabled ON
# TOP OF the already-disabled ones.
#
# Here's a kitten to cheer you up:
#
# ,_
# (\(\ \\
# /.. \ ||
# \Y_, '----.//
# ) /
# | \_/ ;
# \\ |\`\ |
# jgs ((_/(_(_/
if team_video:
tasks = team_video.task_set.incomplete()
if user.is_authenticated():
tasks = tasks.exclude(assignee=user)
return list(tasks.values_list('language', flat=True))
else:
return []
def fetch_start_dialog_contents(self, request, video_id):
my_languages = get_user_languages_from_request(request)
my_languages.extend([l[:l.find('-')] for l in my_languages if l.find('-') > -1])
video = models.Video.objects.get(video_id=video_id)
team_video = video.get_team_video()
languages = (new_models.SubtitleLanguage.objects.having_public_versions()
.filter(video=video))
video_languages = [language_summary(l, team_video, request.user) for l
in languages]
original_language = video.primary_audio_language_code
tv = video.get_team_video()
writable_langs = list(tv.team.get_writable_langs()) if tv else []
blocked_langs = self._get_blocked_languages(team_video, request.user)
return {
'my_languages': my_languages,
'video_languages': video_languages,
'original_language': original_language,
'limit_languages': writable_langs,
'is_moderated': video.is_moderated,
'blocked_languages': blocked_langs
}
# Fetch Video ID and Settings
def fetch_video_id_and_settings(self, request, video_id):
is_original_language_subtitled = self._subtitle_count(video_id) > 0
general_settings = {}
add_general_settings(request, general_settings)
return {
'video_id': video_id,
'is_original_language_subtitled': is_original_language_subtitled,
'general_settings': general_settings
}
def get_timing_mode(self, language, user):
"""
Decides if allows forking. Criteria:
- hard coded ted teams can't fork, ever
- Non team videos, can fork, always
- For team videos, the user must have permission to subtitle
(not only translate)
"""
team_video = language.video.get_team_video()
_TED_TEAMS = ['ted', 'ted-transcribe']
if team_video and team_video.team.slug.lower() in _TED_TEAMS:
return 'off'
elif team_video and not can_create_and_edit_subtitles(user, team_video, language):
return 'off'
else:
return 'on'
# Start Editing
def _check_team_video_locking(self, user, video_id, language_code):
"""Check whether the a team prevents the user from editing the subs.
Returns a dict appropriate for sending back if the user should be
prevented from editing them, or None if the user can safely edit.
"""
video = models.Video.objects.get(video_id=video_id)
check_result = can_add_version(user, video, language_code)
if check_result:
return None
else:
return {
"can_edit": False,
"locked_by": check_result.locked_by,
"message": check_result.message
}
def _get_version_to_edit(self, language, session):
"""Return a version (and other info) that should be edited.
When subtitles are going to be created or edited for a given language,
we need to have a "base" version to work with. This function returns
this base version along with its number and a flag specifying whether it
is an edit (as opposed to a brand new set of subtitles).
"""
version_for_subs = language.get_tip(public=False)
if not version_for_subs:
version_for_subs = None
version_number = 0
else:
version_number = version_for_subs.version_number + 1
return version_for_subs, version_number
def start_editing(self, request, video_id, language_code,
subtitle_language_pk=None, base_language_code=None,
original_language_code=None, mode=None):
"""Called by subtitling widget when subtitling or translation is to commence on a video.
Does a lot of things, some of which should probably be split out into
other functions.
"""
# TODO: remove whenever blank SubtitleLanguages become illegal.
# Find the subtitle language we'll be editing (if available).
language, locked = self._get_language_for_editing(
request, video_id, language_code, subtitle_language_pk, base_language_code)
if locked:
return locked
version = language.get_tip(public=False)
# Ensure that the user is not blocked from editing this video by team
# permissions.
locked = self._check_team_video_locking(
request.user, video_id, language_code)
if locked:
return locked
# just lock the video *after* we verify if team moderation happened
language.writelock(request.user)
language.save()
# Create the subtitling session and subtitle version for these edits.
# we determine that a it's a translation if:
# - the front end specifically said to translate from (base_language_code)
# - The language has another source in it's lineage and it is not marked as forking
translated_from_code = None
translated_from = None
if base_language_code:
translated_from_code = base_language_code
elif language.is_forked == False:
translated_from_code = language.get_translation_source_language_code()
if translated_from_code:
translated_from = language.video.subtitle_language(translated_from_code)
session = self._make_subtitling_session(request, language, translated_from_code, video_id)
version_for_subs, version_number = self._get_version_to_edit(language, session)
args = {'session': session}
if version_for_subs:
args['version'] = version_for_subs
session.parent = version_for_subs
session.save()
else:
args['language'] = language
subtitles = self._subtitles_dict(**args)
# this is basically how it worked before. don't ask.
subtitles['forked'] = base_language_code is None
return_dict = { "can_edit": True,
"session_pk": session.pk,
"timing_mode": self.get_timing_mode(language, request.user),
"subtitles": subtitles }
# If this is a translation, include the subtitles it's based on in the response.
if translated_from:
version = translated_from.get_tip(public=True)
if not version:
return { "can_edit": False, "locked_by": "", "message": "You cannot translate from a version that is incomplete" }
original_subtitles = self._subtitles_dict(version=version)
return_dict['original_subtitles'] = original_subtitles
# If we know the original language code for this video, make sure it's
# saved and there's a SubtitleLanguage for it in the database.
#
# Remember: the "original language" is the language of the video, NOT
# the language these subs are a translation of (if any).
if original_language_code:
self._save_original_language(video_id, original_language_code)
# Writelock this language for this video before we successfully return.
video_cache.writelock_add_lang(video_id, language.language_code)
return return_dict
# Resume Editing
def resume_editing(self, request, session_pk):
try:
session = SubtitlingSession.objects.get(pk=session_pk)
except SubtitlingSession.DoesNotExist:
return {'response': 'cannot_resume'}
language = session.language
error = self._check_team_video_locking(request.user, session.video.video_id, language.language_code)
if error:
return {'response': 'cannot_resume'}
if language.can_writelock(request.user) and \
session.parent_version == language.version():
language.writelock(request.user)
version_for_subs, version_number = self._get_version_to_edit(language, session)
args = {'session': session}
if version_for_subs is None:
args['language'] = language
else:
args['version'] = version_for_subs
subtitles = self._subtitles_dict(**args)
return_dict = { "response": "ok",
"can_edit" : True,
"session_pk" : session.pk,
"timing_mode": self.get_timing_mode(session.language, request.user),
"subtitles" : subtitles }
if session.base_language:
return_dict['original_subtitles'] = \
self._subtitles_dict(version=session.base_language.get_tip())
return return_dict
else:
return { 'response': 'cannot_resume' }
# Locking
def release_lock(self, request, session_pk):
language = SubtitlingSession.objects.get(pk=session_pk).language
if language.can_writelock(request.user):
language.release_writelock()
language.save()
video_cache.writelocked_langs_clear(language.video.video_id)
return { "response": "ok" }
def regain_lock(self, request, session_pk):
language = SubtitlingSession.objects.get(pk=session_pk).language
if not language.can_writelock(request.user):
return { 'response': 'unlockable' }
else:
language.writelock(request.user)
video_cache.writelock_add_lang(
language.video.video_id, language.language_code)
return { 'response': 'ok' }
# Permissions
def can_user_edit_video(self, request, video_id):
"""Return a dictionary of information about what the user can do with this video.
The response will contain can_subtitle and can_translate attributes.
"""
video = models.Video.objects.get(video_id=video_id)
team_video = video.get_team_video()
if not team_video:
can_subtitle = True
can_translate = True
else:
can_subtitle = can_create_and_edit_subtitles(request.user, team_video)
can_translate = can_create_and_edit_translations(request.user, team_video)
return { 'response': 'ok',
'can_subtitle': can_subtitle,
'can_translate': can_translate, }
# Finishing and Saving
def _get_user_message_for_save(self, user, language, is_complete):
"""Return the message that should be sent to the user regarding this save.
This may be a message saying that the save was successful, or an error message.
The message displayed to the user has a complex requirement / outcomes
1) Subs will go live in a moment. Works for unmoderated subs and for D and H
D. Transcript, post-publish edit by moderator with the power to approve. Will go live immediately.
H. Translation, post-publish edit by moderator with the power to approve. Will go live immediately.
2) Subs must be completed before being submitted to moderators. Works for A and E
A. Transcript, incomplete (checkbox not ticked). Must be completed before being submitted to moderators.
E. Translation, incomplete (some lines missing). Must be completed before being submitted to moderators.
3) Subs will be submitted for review/approval. Works for B, C, F, and G
B. Transcript, complete (checkbox ticked). Will be submitted to moderators promptly for approval or rejection.
C. Transcript, post-publish edit by contributor. Will be submitted to moderators promptly for approval or rejection.
F. Translation, complete (all the lines filled). Will be submitted to moderators promptly for approval or rejection.
G. Translation, post-publish edit by contributor. Will be submitted to moderators promptly for approval or rejection.
TODO: Localize this?
"""
message_will_be_live_soon = "Your changes have been saved. It may take a moment for your subtitles to appear."
message_will_be_submited = ("This video is moderated by %s."
"Your changes will be reviewed by the "
"team's moderators.")
message_incomplete = ("These subtitles are incomplete. "
"They will not be submitted for publishing "
"until they've been completed.")
under_moderation = language.video.is_moderated
_user_can_publish = True
team_video = language.video.get_team_video()
if under_moderation and team_video:
# videos are only supposed to have one team video
_user_can_publish = can_publish_edits_immediately(team_video, user, language.language_code)
# this is case 1
if under_moderation and not _user_can_publish:
if is_complete:
# case 3
return message_will_be_submited % team_video.team.name
else:
# case 2
return message_incomplete
else:
return message_will_be_live_soon
def _save_tasks_for_save(self, request, save_for_later, language,
new_version, is_complete, task_id, task_type,
task_notes, task_approved):
"""Handle any outstanding tasks for this save. May return an error.
save_for_later is the most important argument here. It determines
whether any tasks will actually be completed.
"""
team_video = language.video.get_team_video()
if not save_for_later:
# If we've just saved a completed subtitle language, we may need to
# complete a subtitle or translation task.
if is_complete:
if team_video:
tasks = team_video.task_set.incomplete().filter(
type__in=(Task.TYPE_IDS['Subtitle'],
Task.TYPE_IDS['Translate']),
language=language.language_code
)
for task in tasks:
task.complete()
# If the user is specifically performing a review/approve task we should
# handle it.
if task_id:
if task_type == 'review':
handle = self._save_review
elif task_type == 'approve':
handle = self._save_approve
error = handle(request, save_for_later, task_id, task_notes,
task_approved, new_version=new_version)
if error:
return error
def _get_new_version_for_save(self, subtitles, language, session, user, new_title, new_description, new_metadata, save_for_later=None):
"""Return a new subtitle version for this save, or None if not needed."""
new_version = None
previous_version = language.get_tip(public=False)
if previous_version:
title_changed = (new_title is not None
and new_title != previous_version.title)
desc_changed = (new_description is not None
and new_description != previous_version.description)
metadata_changed = (new_metadata is not None
and new_metadata != previous_version.get_metadata())
else:
title_changed = new_title is not None
desc_changed = new_description is not None
metadata_changed = new_metadata is not None
subtitle_set = None
subs_length = 0
if isinstance(subtitles, basestring):
subtitle_set = SubtitleSet(language.language_code, subtitles)
elif isinstance(subtitles, SubtitleSet):
subtitle_set = subtitles
if subtitle_set:
subs_length = len(subtitle_set)
# subtitles have changed if only one of the version is empty
# or if the versions themselves differ
if not previous_version and not subtitle_set:
subtitles_changed = False
elif not previous_version or not subtitle_set:
subtitles_changed = True
else:
subtitles_changed = diff(previous_version.get_subtitles(), subtitle_set)['changed']
should_create_new_version = (
subtitles_changed or title_changed or desc_changed or
metadata_changed)
if should_create_new_version:
new_version, should_create_task = self._create_version(
session.language, user,
new_title=new_title,
new_description=new_description,
new_metadata=new_metadata,
subtitles=subtitles,
session=session)
incomplete = not new_version.is_synced() or save_for_later
# Record the origin of this set of subtitles.
#
# We need to record it *before* creating review/approve tasks (if
# any) because that means these subs were from a post-publish edit
# or something similar. If we record the origin after creating the
# review task it'll be marked as originating from review, which
# isn't right because these subs had to come from something else.
#
# :(
record_workflow_origin(new_version, new_version.video.get_team_video())
if (not incomplete) and should_create_task:
self._create_review_or_approve_task(new_version)
return new_version
def _update_language_attributes_for_save(self, language, completed, session, forked):
"""Update the attributes of the language as necessary and save it.
Will also send the appropriate API notification if needed.
"""
must_trigger_api_language_edited = False
if completed is not None:
if language.subtitles_complete != completed:
must_trigger_api_language_edited = True
language.subtitles_complete = completed
# this means all 'original languages' will be marked as forks
# but this is cool for now because all those languages should
# be shown on the transcribe dialog. if there's a base language,
# that means we should always show the translate dialog.
if forked or session.base_language is None:
language.is_forked = True
language.save()
if forked:
pipeline._fork_dependents(language)
if must_trigger_api_language_edited:
language.video.save()
api_language_edited.send(language)
def save_finished(self, request, user, session, subtitles, new_title=None,
completed=None, forked=False, new_description=None,
new_metadata=None, task_id=None, task_notes=None,
task_approved=None, task_type=None,
save_for_later=None):
# TODO: lock all this in a transaction please!
language = session.language
new_version = self._get_new_version_for_save(
subtitles, language, session, user, new_title,
new_description, new_metadata, save_for_later)
language.release_writelock()
# do this here, before _update_language_a... changes it ;)
complete_changed = bool(completed ) != language.subtitles_complete
self._update_language_attributes_for_save(language, completed, session, forked)
if new_version:
video_changed_tasks.delay(language.video.id, new_version.id)
else:
video_changed_tasks.delay(language.video.id)
api_video_edited.send(language.video)
if completed and complete_changed:
# not a new version, but if this just got marked as complete
# we want to push this to the third parties:
subtitles_complete_changed(language.pk)
user_message = self._get_user_message_for_save(user, language, language.subtitles_complete)
error = self._save_tasks_for_save(
request, save_for_later, language, new_version, language.subtitles_complete,
task_id, task_type, task_notes, task_approved)
if error:
return error
return { 'response': 'ok', 'user_message': user_message }
def finished_subtitles(self, request, session_pk, subtitles=None,
new_title=None, completed=None, forked=False,
throw_exception=False, new_description=None,
new_metadata=None,
task_id=None, task_notes=None, task_approved=None,
task_type=None, save_for_later=None):
"""Called when a user has finished a set of subtitles and they should be saved.
TODO: Rename this to something verby, like "finish_subtitles".
"""
session = SubtitlingSession.objects.get(pk=session_pk)
if not request.user.is_authenticated():
return { 'response': 'not_logged_in' }
if not session.language.can_writelock(request.user):
return { "response" : "unlockable" }
if not session.matches_request(request):
return { "response" : "does not match request" }
if throw_exception:
raise Exception('purposeful exception for testing')
return self.save_finished(
request, request.user, session, subtitles, new_title, completed,
forked, new_description, new_metadata, task_id, task_notes,
task_approved, task_type, save_for_later)
def _create_review_or_approve_task(self, subtitle_version):
team_video = subtitle_version.video.get_team_video()
lang = subtitle_version.subtitle_language.language_code
workflow = Workflow.get_for_team_video(team_video)
if workflow.review_allowed:
type = Task.TYPE_IDS['Review']
can_do = partial(can_review, allow_own=True)
elif workflow.approve_allowed:
type = Task.TYPE_IDS['Approve']
can_do = can_approve
else:
return None
# TODO: Dedupe this and Task._find_previous_assignee
# Find the assignee.
#
# For now, we'll assign the review/approval task to whomever did
# it last time (if it was indeed done), but only if they're
# still eligible to perform it now.
last_task = team_video.task_set.complete().filter(
language=lang, type=type
).order_by('-completed')[:1]
assignee = None
if last_task:
candidate = last_task[0].assignee
if candidate and can_do(team_video, candidate, lang):
assignee = candidate
task = Task(team=team_video.team, team_video=team_video,
assignee=assignee, language=lang, type=type)
task.set_expiration()
task.new_subtitle_version = subtitle_version
if task.get_type_display() in ['Review', 'Approve']:
task.new_review_base_version = subtitle_version
task.save()
def _moderate_language(self, language, user):
"""Return the right visibility for a version based on the given session.
Also may possibly return a Task object that needs to be saved once the
subtitle_version is ready.
Also perform any ancillary tasks that are appropriate, assuming the
version actually gets created later.
Also :(
"""
team_video = language.video.get_team_video()
if not team_video:
return 'public', False
team = team_video.team
workflow = team.get_workflow()
# If there are any open team tasks for this video/language, it needs to
# be kept under moderation.
tasks = team_video.task_set.incomplete().filter(
Q(language=language.language_code)
| Q(type=Task.TYPE_IDS['Subtitle'])
)
if tasks:
for task in tasks:
if task.type == Task.TYPE_IDS['Subtitle']:
if not task.language:
task.language = language.language_code
task.save()
return ('public', False) if not team.workflow_enabled else ('private', False)
if not workflow.requires_tasks:
return 'public', False
elif language.old_has_version:
# If there are already active subtitles for this language, we're
# dealing with an edit.
if can_publish_edits_immediately(team_video, user, language.language_code):
# The user may have the rights to immediately publish edits to
# subtitles. If that's the case we mark them as approved and
# don't need a task.
return 'public', False
else:
# Otherwise it's an edit that needs to be reviewed/approved.
return 'private', True
else:
# Otherwise we're dealing with a new set of subtitles for this
# language.
return 'private', True
def _create_version(self, language, user=None, new_title=None, new_description=None, new_metadata=None, subtitles=None, session=None):
latest_version = language.get_tip(public=False)
visibility, should_create_task = self._moderate_language(language, user)
kwargs = dict(visibility=visibility)
# it's a title/description update
# we can do this better btw
# TODO: improve-me plz
if (new_title or new_description) and not subtitles and latest_version:
subtitles = latest_version.get_subtitles()
if user is not None:
kwargs['author'] = user
if new_title is not None:
kwargs['title'] = new_title
elif latest_version:
kwargs['title'] = latest_version.title
else:
kwargs['title'] = language.video.title
if new_description is not None:
kwargs['description'] = new_description
elif latest_version:
kwargs['description'] = latest_version.description
else:
kwargs['description'] = language.video.description
kwargs['metadata'] = new_metadata
if subtitles is None:
subtitles = []
kwargs['video'] = language.video
kwargs['language_code'] = language.language_code
kwargs['subtitles'] = subtitles
kwargs['origin'] = ORIGIN_LEGACY_EDITOR
if session and session.base_language:
base_language_code = session.base_language.language_code
base_subtitle_language = language.video.subtitle_language(base_language_code)
if base_language_code:
kwargs['parents'] = [base_subtitle_language.get_tip(full=True)]
version = pipeline.add_subtitles(**kwargs)
return version, should_create_task
def fetch_subtitles(self, request, video_id, language_pk):
cache = video_cache.get_subtitles_dict(
video_id, language_pk, None,
lambda version: self._subtitles_dict(version=version))
return cache
def get_widget_info(self, request):
return {
'all_videos': models.Video.objects.count(),
'videos_with_captions': models.Video.objects.exclude(subtitlelanguage=None).count(),
'translations_count': models.SubtitleLanguage.objects.filter(is_original=False).count()
}
def _make_subtitling_session(self, request, language, base_language_code, video_id, version=None):
try:
base_language = new_models.SubtitleLanguage.objects.get(video__video_id=video_id,
language_code=base_language_code)
except new_models.SubtitleLanguage.DoesNotExist:
base_language = None
session = SubtitlingSession(
language=language,
base_language=base_language,
parent_version=version)
if request.user.is_authenticated():
session.user = request.user
session.save()
return session
# Review
def fetch_review_data(self, request, task_id):
task = Task.objects.get(pk=task_id)
return {'response': 'ok', 'body': task.body}
def _save_review(self, request, save_for_later, task_id=None, body=None,
approved=None, new_version=None):
"""
If the task performer has edited this version, then we need to
set the task's version to the new one that he has edited.
"""
data = {'task': task_id, 'body': body, 'approved': approved}
form = FinishReviewForm(request, data)
if form.is_valid():
task = form.cleaned_data['task']
task.body = form.cleaned_data['body']
task.approved = form.cleaned_data['approved']
# If there is a new version, update the task's version.
if new_version:
task.new_subtitle_version = new_version
task.save()
if not save_for_later:
if task.approved in Task.APPROVED_FINISHED_IDS:
task.complete()
task.new_subtitle_version.subtitle_language.release_writelock()
task.new_subtitle_version.subtitle_language.followers.add(request.user)
video_changed_tasks.delay(task.team_video.video_id)
else:
return {'error_msg': _(u'\n'.join(flatten_errorlists(form.errors)))}
# Approval
def fetch_approve_data(self, request, task_id):
task = Task.objects.get(pk=task_id)
return {'response': 'ok', 'body': task.body}
def _save_approve(self, request, save_for_later, task_id=None, body=None,
approved=None, new_version=None):
"""
If the task performer has edited this version, then we need to
set the task's version to the new one that he has edited.
"""
data = {'task': task_id, 'body': body, 'approved': approved}
form = FinishApproveForm(request, data)
if form.is_valid():
task = form.cleaned_data['task']
task.body = form.cleaned_data['body']
task.approved = form.cleaned_data['approved']
# If there is a new version, update the task's version.
if new_version:
task.new_subtitle_version = new_version
task.save()
if not save_for_later:
if task.approved in Task.APPROVED_FINISHED_IDS:
task.complete()
task.new_subtitle_version.subtitle_language.release_writelock()
video_changed_tasks.delay(task.team_video.video_id)
else:
return {'error_msg': _(u'\n'.join(flatten_errorlists(form.errors)))}
def _find_base_language(self, base_language):
if base_language:
video = base_language.video
if base_language.is_original or base_language.is_forked:
return base_language
else:
if base_language.standard_language:
return base_language.standard_language
else:
return video.subtitle_language()
else:
return None
def _needs_new_sub_language(self, language, base_language):
if language.standard_language and not base_language:
# forking existing
return False
elif language.is_forked and base_language:
return True
else:
return language.standard_language != base_language
def _get_language_for_editing(self, request, video_id, language_code,
subtitle_language_pk=None, base_language_code=None):
"""Return the subtitle language to edit or a lock response."""
video = models.Video.objects.get(video_id=video_id)
editable = False
created = False
if subtitle_language_pk is not None:
language = new_models.SubtitleLanguage.objects.get(pk=subtitle_language_pk)
else:
# we can tell which language it is from the language code
candidates = video.newsubtitlelanguage_set.filter(language_code=language_code)
if not candidates.exists():
# no languages with the language code, we must create one
language = new_models.SubtitleLanguage(
video=video, language_code=language_code,
created=datetime.now())
language.is_forked = not base_language_code and video.newsubtitlelanguage_set.exists()
language.save()
created = True
else:
for candidate in candidates:
if base_language_code == candidate.get_translation_source_language_code():
# base language matches, break me
language = candidate
break
# if we reached this point, we have no good matches
language = candidates[0]
editable = language.can_writelock(request.user)
if editable:
if created:
api_language_new.send(language)
return language, None
else:
return None, { "can_edit": False,
"locked_by": unicode(language.writelock_owner) }
def _save_original_language(self, video_id, language_code):
video = models.Video.objects.get(video_id=video_id)
if not video.primary_audio_language_code:
video.primary_audio_language_code = language_code
video.save()
def _autoplay_subtitles(self, user, video_id, language_pk, version_number):
cache = video_cache.get_subtitles_dict(video_id, language_pk,
version_number,
lambda version: self._subtitles_dict(version=version))
if cache and cache.get("language", None) is not None:
cache['language_code'] = cache['language'].language
cache['language_pk'] = cache['language'].pk
return cache
def _subtitles_dict(self, version=None, language=None, session=None):
if not language and not version:
raise ValueError("You need to specify either language or version")
latest_version = language.get_tip() if language else None
is_latest = False
if not version and not latest_version:
version_number = 0
language_code = language.language_code
subtitles = create_new_subtitles(language_code).to_xml()
is_latest = True
metadata = language.get_metadata()
for key in language.video.get_metadata():
if key not in metadata:
metadata[key] = ''
else:
version = version or latest_version
version_number = version.version_number
subtitles = version.get_subtitles().to_xml()
language = version.subtitle_language
language_code = language.language_code
metadata = version.get_metadata()
for key in version.video.get_metadata():
if key not in metadata:
metadata[key] = ''
if latest_version is None or version_number >= latest_version.version_number:
is_latest = True
if session:
translated_from = session.base_language
else:
translated_from = language.get_translation_source_language()
return self._make_subtitles_dict(
subtitles,
language,
language.pk,
language.is_primary_audio_language(),
None if translated_from is not None else language.subtitles_complete,
version_number,
is_latest,
translated_from,
language.get_title(public=False),
language.get_description(public=False),
language.is_rtl(),
language.video.is_moderated,
metadata,
)
def language_summary(language, team_video=-1, user=None):
"""Return a dictionary of info about the given SubtitleLanguage.
The team video can be given to avoid an extra database lookup.
"""
if team_video == -1:
team_video = language.video.get_team_video()
translation_source = language.get_translation_source_language()
is_translation = bool(translation_source)
summary = {
'pk': language.pk,
'language': language.language_code,
'dependent': is_translation,
'subtitle_count': language.get_subtitle_count(),
'in_progress': language.is_writelocked,
'disabled_from': False }
if team_video:
tasks = team_video.task_set.incomplete().filter(language=language.language_code)
if tasks:
task = tasks[0]
summary['disabled_to'] = user and user != task.assignee
latest_version = language.get_tip()
if latest_version and language.is_complete_and_synced() and 'disabled_to' not in summary:
# Languages with existing subtitles cannot be selected as a "to"
# language in the "add new translation" dialog. If you want to work on
# that language, select it and hit "Improve these Subtitles" instead.
summary['disabled_to'] = True
elif not latest_version or not latest_version.has_subtitles:
# Languages with *no* existing subtitles cannot be selected as a "from"
# language in the "add new translation" dialog. There's nothing to work
# from!
summary['disabled_from'] = True
if is_translation:
summary['standard_pk'] = translation_source.pk
summary['translated_from'] = translation_source.language_code
summary['is_complete'] = language.subtitles_complete
summary['is_public'] = True if language.get_public_tip() else False
return summary
|
from unittest import TestCase
from urllib.parse import urlparse
import warnings
from scrapy.http import Response, Request
from scrapy.spiders import Spider
from scrapy.spidermiddlewares.offsite import OffsiteMiddleware, URLWarning
from scrapy.utils.test import get_crawler
class TestOffsiteMiddleware(TestCase):
def setUp(self):
crawler = get_crawler(Spider)
self.spider = crawler._create_spider(**self._get_spiderargs())
self.mw = OffsiteMiddleware.from_crawler(crawler)
self.mw.spider_opened(self.spider)
def _get_spiderargs(self):
return dict(name='foo', allowed_domains=['scrapytest.org', 'scrapy.org', 'scrapy.test.org'])
def test_process_spider_output(self):
res = Response('http://scrapytest.org')
onsite_reqs = [Request('http://scrapytest.org/1'),
Request('http://scrapy.org/1'),
Request('http://sub.scrapy.org/1'),
Request('http://offsite.tld/letmepass', dont_filter=True),
Request('http://scrapy.test.org/')]
offsite_reqs = [Request('http://scrapy2.org'),
Request('http://offsite.tld/'),
Request('http://offsite.tld/scrapytest.org'),
Request('http://offsite.tld/rogue.scrapytest.org'),
Request('http://rogue.scrapytest.org.haha.com'),
Request('http://roguescrapytest.org'),
Request('http://test.org/'),
Request('http://notscrapy.test.org/')]
reqs = onsite_reqs + offsite_reqs
out = list(self.mw.process_spider_output(res, reqs, self.spider))
self.assertEqual(out, onsite_reqs)
class TestOffsiteMiddleware2(TestOffsiteMiddleware):
def _get_spiderargs(self):
return dict(name='foo', allowed_domains=None)
def test_process_spider_output(self):
res = Response('http://scrapytest.org')
reqs = [Request('http://a.com/b.html'), Request('http://b.com/1')]
out = list(self.mw.process_spider_output(res, reqs, self.spider))
self.assertEqual(out, reqs)
class TestOffsiteMiddleware3(TestOffsiteMiddleware2):
def _get_spider(self):
return Spider('foo')
class TestOffsiteMiddleware4(TestOffsiteMiddleware3):
def _get_spider(self):
bad_hostname = urlparse('http:////scrapytest.org').hostname
return dict(name='foo', allowed_domains=['scrapytest.org', None, bad_hostname])
def test_process_spider_output(self):
res = Response('http://scrapytest.org')
reqs = [Request('http://scrapytest.org/1')]
out = list(self.mw.process_spider_output(res, reqs, self.spider))
self.assertEqual(out, reqs)
class TestOffsiteMiddleware5(TestOffsiteMiddleware4):
def test_get_host_regex(self):
self.spider.allowed_domains = ['http://scrapytest.org', 'scrapy.org', 'scrapy.test.org']
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.mw.get_host_regex(self.spider)
assert issubclass(w[-1].category, URLWarning)
|
from rest_framework import generics, permissions
from django.contrib.auth.models import User
# from T.tings.models.models_users import TUserProfile
from T.tings.serializers.serializers_users import TUserSerializer
class TUserPermission(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# anybody can submit GET, HEAD or OPTIONS requests...
if request.method in permissions.SAFE_METHODS:
return True
# only the admin or collection owners can submit PUT, POST, or DELETE requests...
user = request.user
return user.is_superuser or user == obj
class TUserList(generics.ListCreateAPIView):
queryset = User.objects.all()
serializer_class = TUserSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, TUserPermission,)
class TUserDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = User.objects.all()
serializer_class = TUserSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, TUserPermission,)
|
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks \
import tables as project_tables
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
class DeleteNetwork(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Network",
u"Delete Networks",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Network",
u"Deleted Networks",
count
)
policy_rules = (("network", "delete_network"),)
def delete(self, request, obj_id):
try:
api.neutron.network_delete(request, obj_id)
except Exception as e:
LOG.info('Failed to delete network %(id)s: %(exc)s',
{'id': obj_id, 'exc': e})
msg = _('Failed to delete network %s') % obj_id
redirect = reverse('horizon:admin:networks:index')
exceptions.handle(request, msg, redirect=redirect)
class CreateNetwork(tables.LinkAction):
name = "create"
verbose_name = _("Create Network")
url = "horizon:admin:networks:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_network"),)
class EditNetwork(policy.PolicyTargetMixin, tables.LinkAction):
name = "update"
verbose_name = _("Edit Network")
url = "horizon:admin:networks:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_network"),)
DISPLAY_CHOICES = (
("up", pgettext_lazy("Admin state of a Network", u"UP")),
("down", pgettext_lazy("Admin state of a Network", u"DOWN")),
)
class AdminNetworksFilterAction(project_tables.ProjectNetworksFilterAction):
name = "filter_admin_networks"
filter_choices = (('project', _("Project ="), True),) +\
project_tables.ProjectNetworksFilterAction.filter_choices
class NetworksTable(tables.DataTable):
tenant = tables.Column("tenant_name", verbose_name=_("Project"))
name = tables.WrappingColumn("name_or_id", verbose_name=_("Network Name"),
link='horizon:admin:networks:detail')
subnets = tables.Column(project_tables.get_subnets,
verbose_name=_("Subnets Associated"),)
num_agents = tables.Column("num_agents",
verbose_name=_("DHCP Agents"))
shared = tables.Column("shared", verbose_name=_("Shared"),
filters=(filters.yesno, filters.capfirst))
external = tables.Column("router:external",
verbose_name=_("External"),
filters=(filters.yesno, filters.capfirst))
status = tables.Column(
"status", verbose_name=_("Status"),
display_choices=project_tables.STATUS_DISPLAY_CHOICES)
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=DISPLAY_CHOICES)
def get_object_display(self, network):
return network.name_or_id
class Meta(object):
name = "networks"
verbose_name = _("Networks")
table_actions = (CreateNetwork, DeleteNetwork,
AdminNetworksFilterAction)
row_actions = (EditNetwork, DeleteNetwork)
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(NetworksTable, self).__init__(
request, data=data,
needs_form_wrapper=needs_form_wrapper,
**kwargs)
try:
if not api.neutron.is_extension_supported(request,
'dhcp_agent_scheduler'):
del self.columns['num_agents']
except Exception:
msg = _("Unable to check if DHCP agent scheduler "
"extension is supported")
exceptions.handle(self.request, msg)
del self.columns['num_agents']
|
"""
High-level libvirt test utility functions.
This module is meant to reduce code size by performing common test procedures.
Generally, code here should look like test code.
More specifically:
- Functions in this module should raise exceptions if things go wrong
- Functions in this module typically use functions and classes from
lower-level modules (e.g. utils_misc, qemu_vm, aexpect).
- Functions in this module should not be used by lower-level linux_modules.
- Functions in this module should be used in the right context.
For example, a function should not be used where it may display
misleading or inaccurate info or debug messages.
:copyright: 2014 Red Hat Inc.
"""
from __future__ import division
import re
import os
import ast
import logging
import shutil
import time
import sys
import aexpect
import platform
import random
import string
from aexpect import remote
from avocado.core import exceptions
from avocado.utils import path as utils_path
from avocado.utils import process
from avocado.utils import stacktrace
from avocado.utils import linux_modules
from avocado.utils import distro
from avocado.utils.astring import to_text
import six
from virttest import virsh
from virttest import xml_utils
from virttest import iscsi
from virttest import nfs
from virttest import utils_misc
from virttest import utils_selinux
from virttest import libvirt_storage
from virttest import utils_net
from virttest import gluster
from virttest import test_setup
from virttest import data_dir
from virttest import utils_libvirtd
from virttest import utils_config
from virttest import utils_split_daemons
from virttest import remote as remote_old
from virttest.staging import lv_utils
from virttest.utils_libvirtd import service_libvirtd_control
from virttest.libvirt_xml import vm_xml
from virttest.libvirt_xml import network_xml
from virttest.libvirt_xml import xcepts
from virttest.libvirt_xml import NetworkXML
from virttest.libvirt_xml import IPXML
from virttest.libvirt_xml import pool_xml
from virttest.libvirt_xml import nwfilter_xml
from virttest.libvirt_xml import vol_xml
from virttest.libvirt_xml import secret_xml
from virttest.libvirt_xml import CapabilityXML
from virttest.libvirt_xml import base
from virttest.libvirt_xml.devices import disk
from virttest.libvirt_xml.devices import hostdev
from virttest.libvirt_xml.devices import controller
from virttest.libvirt_xml.devices import redirdev
from virttest.libvirt_xml.devices import seclabel
from virttest.libvirt_xml.devices import channel
from virttest.libvirt_xml.devices import interface
from virttest.libvirt_xml.devices import panic
from virttest.libvirt_xml.devices import tpm
from virttest.libvirt_xml.devices import vsock
from virttest.libvirt_xml.devices import rng
ping = utils_net.ping
class LibvirtNetwork(object):
"""
Class to create a temporary network for testing.
"""
def create_network_xml_with_dhcp(self):
"""
Create XML for a network xml with dhcp.
"""
net_xml = NetworkXML()
net_xml.name = self.kwargs.get('net_name')
ip_version = self.kwargs.get('ip_version')
dhcp_start = self.kwargs.get('dhcp_start')
dhcp_end = self.kwargs.get('dhcp_end')
address = self.kwargs.get('address')
ip = None
if ip_version == 'ipv6':
ip = IPXML(address=address, ipv6=ip_version)
ip.prefix = '64'
else:
ip = IPXML(address=address)
ip.family = ip_version
ran = network_xml.RangeXML()
ran.attrs = {'start': dhcp_start, 'end': dhcp_end}
ip.dhcp_ranges = ran
net_xml.ip = ip
net_xml.bridge = {'name': self.kwargs.get('br_name'), 'stp': 'on', 'delay': '0'}
return net_xml
def create_vnet_xml(self):
"""
Create XML for a virtual network.
"""
address = self.kwargs.get('address')
if not address:
raise exceptions.TestError('Create vnet need address be set')
net_xml = NetworkXML()
net_xml.name = self.name
ip = IPXML(address=address)
dhcp_start = self.kwargs.get('dhcp_start')
dhcp_end = self.kwargs.get('dhcp_end')
if all([dhcp_start, dhcp_end]):
ran = network_xml.RangeXML()
ran.attrs = {'start': dhcp_start, 'end': dhcp_end}
ip.dhcp_ranges = ran
net_xml.ip = ip
return address, net_xml
def create_macvtap_xml(self):
"""
Create XML for a macvtap network.
"""
iface = self.kwargs.get('iface')
if not iface:
raise exceptions.TestError('Create macvtap need iface be set')
net_xml = NetworkXML()
net_xml.name = self.kwargs.get('net_name')
net_xml.forward = {'mode': 'bridge', 'dev': iface}
ip_version = self.kwargs.get('ip_version')
ip = utils_net.get_ip_address_by_interface(iface, ip_ver=ip_version)
return ip, net_xml
def create_bridge_xml(self):
"""
Create XML for a bridged network.
"""
iface = self.kwargs.get('iface')
if not iface:
raise exceptions.TestError('Create bridge need iface be set')
net_xml = NetworkXML()
net_xml.name = self.name
net_xml.forward = {'mode': 'bridge'}
net_xml.bridge = {'name': iface}
ip = utils_net.get_ip_address_by_interface(iface)
return ip, net_xml
def create_nat_xml(self):
"""
Create XML for a nat network.
"""
address = self.kwargs.get('address')
if not address:
raise exceptions.TestError("'address' is required to create nat network xml.")
net_xml = self.create_network_xml_with_dhcp()
net_xml.forward = {'mode': 'nat'}
return address, net_xml
def create_route_xml(self):
"""
Create XML for a route network.
"""
address = self.kwargs.get('address')
if not address:
raise exceptions.TestError("'address' is required to create route network xml.")
net_xml = self.create_network_xml_with_dhcp()
net_xml.forward = {'mode': 'route'}
return address, net_xml
def __init__(self, net_type, **kwargs):
self.kwargs = kwargs
net_name = kwargs.get('net_name')
if net_name is None:
self.name = 'avocado-vt-%s' % net_type
else:
self.name = net_name
self.persistent = kwargs.get('persistent', False)
if net_type == 'vnet':
self.ip, net_xml = self.create_vnet_xml()
elif net_type == 'macvtap':
self.ip, net_xml = self.create_macvtap_xml()
elif net_type == 'bridge':
self.ip, net_xml = self.create_bridge_xml()
elif net_type == 'nat':
self.ip, net_xml = self.create_nat_xml()
elif net_type == 'route':
self.ip, net_xml = self.create_route_xml()
else:
raise exceptions.TestError(
'Unknown libvirt network type %s' % net_type)
if self.persistent:
net_xml.define()
net_xml.start()
else:
net_xml.create()
def cleanup(self):
"""
Clear up network.
"""
virsh.net_destroy(self.name)
if self.persistent:
virsh.net_undefine(self.name)
def create_macvtap_vmxml(iface, params):
"""
Method to create Macvtap interface xml for VM
:param iface: macvtap interface name
:param params: Test dict params for macvtap config
:return: macvtap xml object
"""
mode = params.get('macvtap_mode', 'passthrough')
model = params.get('macvtap_model', 'virtio')
macvtap_type = params.get('macvtap_type', 'direct')
macvtap = interface.Interface(macvtap_type)
macvtap.mac_address = utils_net.generate_mac_address_simple()
macvtap.model = model
macvtap.source = {'dev': iface, 'mode': mode}
return macvtap
def get_machine_types(arch, virt_type, virsh_instance=base.virsh, ignore_status=True):
"""
Method to get all supported machine types
:param arch: architecture of the machine
:param virt_type: virtualization type hvm or pv
:param virsh_instance: virsh instance object
:param ignore_status: False to raise Error, True to ignore
:return: list of machine types supported
"""
machine_types = []
try:
capability = CapabilityXML(virsh_instance=virsh_instance)
machine_types = capability.guest_capabilities[virt_type][arch]['machine']
return machine_types
except KeyError as detail:
if ignore_status:
return machine_types
else:
if detail.args[0] == virt_type:
raise KeyError("No libvirt support for %s virtualization, "
"does system hardware + software support it?"
% virt_type)
elif detail.args[0] == arch:
raise KeyError("No libvirt support for %s virtualization of "
"%s, does system hardware + software support "
"it?" % (virt_type, arch))
raise exceptions.TestError(detail)
def clean_up_snapshots(vm_name, snapshot_list=[], domxml=None):
"""
Do recovery after snapshot
:param vm_name: Name of domain
:param snapshot_list: The list of snapshot name you want to remove
:param domxml: The object of domain xml for dumpxml command
"""
if not snapshot_list:
# Get all snapshot names from virsh snapshot-list
snapshot_list = virsh.snapshot_list(vm_name)
# Get snapshot disk path
for snap_name in snapshot_list:
# Delete useless disk snapshot file if exists
result = virsh.snapshot_dumpxml(vm_name, snap_name)
snap_xml = result.stdout_text.strip()
xtf_xml = xml_utils.XMLTreeFile(snap_xml)
disks_path = xtf_xml.findall('disks/disk/source')
for disk in disks_path:
os.system('rm -f %s' % disk.get('file'))
# Delete snapshots of vm
virsh.snapshot_delete(vm_name, snap_name)
# External disk snapshot couldn't be deleted by virsh command,
# It need to be deleted by qemu-img command
snapshot_list = virsh.snapshot_list(vm_name)
if snapshot_list:
# Delete snapshot metadata first
for snap_name in snapshot_list:
virsh.snapshot_delete(vm_name, snap_name, "--metadata")
# Delete all snapshot by qemu-img.
# Domain xml should be proviced by parameter, we can't get
# the image name from dumpxml command, it will return a
# snapshot image name
if domxml:
disks_path = domxml.xmltreefile.findall('devices/disk/source')
for disk in disks_path:
img_name = disk.get('file')
snaps = utils_misc.get_image_snapshot(img_name)
cmd = "qemu-img snapshot %s" % img_name
for snap in snaps:
process.run("%s -d %s" % (cmd, snap))
else:
# Get snapshot disk path from domain xml because
# there is no snapshot info with the name
dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name).xmltreefile
disk_path = dom_xml.find('devices/disk/source').get('file')
for name in snapshot_list:
snap_disk_path = disk_path.split(".")[0] + "." + name
os.system('rm -f %s' % snap_disk_path)
def get_all_cells():
"""
Use virsh freecell --all to get all cells on host
::
# virsh freecell --all
0: 124200 KiB
1: 1059868 KiB
--------------------
Total: 1184068 KiB
That would return a dict like:
::
cell_dict = {"0":"124200 KiB", "1":"1059868 KiB", "Total":"1184068 KiB"}
:return: cell_dict
"""
fc_result = virsh.freecell(options="--all", ignore_status=True)
if fc_result.exit_status:
stderr = fc_result.stderr_text.strip()
if fc_result.stderr.count(b"NUMA not supported"):
raise exceptions.TestSkipError(stderr)
else:
raise exceptions.TestFail(stderr)
output = fc_result.stdout_text.strip()
cell_list = output.splitlines()
# remove "------------" line
del cell_list[-2]
cell_dict = {}
for cell_line in cell_list:
cell_info = cell_line.split(":")
cell_num = cell_info[0].strip()
cell_mem = cell_info[-1].strip()
cell_dict[cell_num] = cell_mem
return cell_dict
def check_blockjob(vm_name, target, check_point="none", value="0"):
"""
Run blookjob command to check block job progress, bandwidth, ect.
:param vm_name: Domain name
:param target: Domian disk target dev
:param check_point: Job progrss, bandwidth or none(no job)
:param value: Value of progress, bandwidth(with unit) or 0(no job)
:return: Boolean value, true for pass, false for fail
"""
if check_point not in ["progress", "bandwidth", "none"]:
logging.error("Check point must be: progress, bandwidth or none")
return False
try:
cmd_result = virsh.blockjob(
vm_name, target, "--info", debug=True, ignore_status=True)
output = cmd_result.stdout_text.strip()
err = cmd_result.stderr_text.strip()
status = cmd_result.exit_status
except Exception as e:
logging.error("Error occurred: %s", e)
return False
if status:
logging.error("Run blockjob command fail")
return False
# libvirt print block job progress to stderr
if check_point == 'none':
if len(err):
logging.error("Expect no job but find block job:\n%s", err)
return False
return True
if check_point == "progress":
progress = value + " %"
if re.search(progress, err):
return True
return False
# Since 1.3.3-1, libvirt support bytes and scaled integers for bandwith,
# and the output of blockjob may looks like:
# # virsh blockjob avocado-vt-vm1 vda --info
# Block Copy: [100 %] Bandwidth limit: 9223372036853727232 bytes/s (8.000 EiB/s)
#
# So we need specific the bandwidth unit when calling this function
# and universalize the unit before comparing
if check_point == "bandwidth":
try:
bandwidth, unit = re.findall(r'(\d+) (\w+)/s', output)[0]
# unit could be 'bytes' or 'Mib'
if unit == 'bytes':
unit = 'B'
else:
unit = 'M'
u_value = utils_misc.normalize_data_size(value, unit)
if float(u_value) == float(bandwidth):
logging.debug("Bandwidth is equal to %s", bandwidth)
return True
logging.error("Bandwidth is not equal to %s", bandwidth)
return False
except Exception as e:
logging.error("Fail to get bandwidth: %s", e)
return False
def setup_or_cleanup_nfs(is_setup, mount_dir="nfs-mount", is_mount=False,
export_options="rw,no_root_squash",
mount_options="rw",
export_dir="nfs-export",
restore_selinux="",
rm_export_dir=True,
set_selinux_permissive=False):
"""
Set SElinux to "permissive" and Set up nfs service on localhost.
Or clean up nfs service on localhost and restore SElinux.
Note: SElinux status must be backed up and restored after use.
Example:
# Setup NFS.
res = setup_or_cleanup_nfs(is_setup=True)
# Backup SELinux status.
selinux_bak = res["selinux_status_bak"]
# Do something.
...
# Cleanup NFS and restore NFS.
res = setup_or_cleanup_nfs(is_setup=False, restore_selinux=selinux_bak)
:param is_setup: Boolean value, true for setup, false for cleanup
:param mount_dir: NFS mount dir. This can be an absolute path on the
host or a relative path origin from libvirt tmp dir.
Default to "nfs-mount".
:param is_mount: Boolean value, Whether the target NFS should be mounted.
:param export_options: Options for nfs dir. Default to "nfs-export".
:param mount_options: Options for mounting nfs dir. Default to "rw".
:param export_dir: NFS export dir. This can be an absolute path on the
host or a relative path origin from libvirt tmp dir.
Default to "nfs-export".
:param rm_export_dir: Boolean, True for forcely removing nfs export dir
False for keeping nfs export dir
:param set_selinux_permissive: Boolean, True to set selinux to permissive
mode, False not set.
:return: A dict contains export and mount result parameters:
export_dir: Absolute directory of exported local NFS file system.
mount_dir: Absolute directory NFS file system mounted on.
selinux_status_bak: SELinux status before set
"""
result = {}
ubuntu = distro.detect().name == 'Ubuntu'
tmpdir = data_dir.get_tmp_dir()
if not os.path.isabs(export_dir):
export_dir = os.path.join(tmpdir, export_dir)
if not os.path.isabs(mount_dir):
mount_dir = os.path.join(tmpdir, mount_dir)
result["export_dir"] = export_dir
result["mount_dir"] = mount_dir
result["selinux_status_bak"] = None
if not ubuntu:
result["selinux_status_bak"] = utils_selinux.get_status()
nfs_params = {"nfs_mount_dir": mount_dir, "nfs_mount_options": mount_options,
"nfs_mount_src": export_dir, "setup_local_nfs": "yes",
"export_options": export_options}
_nfs = nfs.Nfs(nfs_params)
if is_setup:
if not ubuntu and utils_selinux.is_enforcing():
if set_selinux_permissive:
utils_selinux.set_status("permissive")
logging.debug("selinux set to permissive mode, "
"this is not recommended, potential access "
"control error could be missed.")
else:
logging.debug("selinux is in enforcing mode, libvirt needs "
"\"setsebool virt_use_nfs on\" to get "
"nfs access right.")
_nfs.setup()
nfs_mount_info = process.run('nfsstat -m', shell=True).stdout_text.strip().split(",")
for i in nfs_mount_info:
if 'vers' in i:
source_protocol_ver = i[5]
result["source_protocol_ver"] = source_protocol_ver
break
if not is_mount:
_nfs.umount()
del result["mount_dir"]
else:
if not ubuntu and restore_selinux:
utils_selinux.set_status(restore_selinux)
_nfs.unexportfs_in_clean = True
_nfs.rm_mount_dir = True
_nfs.rm_export_dir = rm_export_dir
_nfs.cleanup()
return result
def setup_or_cleanup_iscsi(is_setup, is_login=True,
emulated_image="emulated-iscsi", image_size="1G",
chap_user="", chap_passwd="", restart_tgtd="no",
portal_ip="127.0.0.1"):
"""
Set up(and login iscsi target) or clean up iscsi service on localhost.
:param is_setup: Boolean value, true for setup, false for cleanup
:param is_login: Boolean value, true for login, false for not login
:param emulated_image: name of iscsi device
:param image_size: emulated image's size
:param chap_user: CHAP authentication username
:param chap_passwd: CHAP authentication password
:return: iscsi device name or iscsi target
"""
tmpdir = data_dir.get_tmp_dir()
emulated_path = os.path.join(tmpdir, emulated_image)
emulated_target = ("iqn.%s.com.virttest:%s.target" %
(time.strftime("%Y-%m"), emulated_image))
iscsi_params = {"emulated_image": emulated_path, "target": emulated_target,
"image_size": image_size, "iscsi_thread_id": "virt",
"chap_user": chap_user, "chap_passwd": chap_passwd,
"restart_tgtd": restart_tgtd, "portal_ip": portal_ip}
_iscsi = iscsi.Iscsi.create_iSCSI(iscsi_params)
if is_setup:
if is_login:
_iscsi.login()
# The device doesn't necessarily appear instantaneously, so give
# about 5 seconds for it to appear before giving up
iscsi_device = utils_misc.wait_for(_iscsi.get_device_name, 5, 0, 1,
"Searching iscsi device name.")
if iscsi_device:
logging.debug("iscsi device: %s", iscsi_device)
return iscsi_device
if not iscsi_device:
logging.error("Not find iscsi device.")
# Cleanup and return "" - caller needs to handle that
# _iscsi.export_target() will have set the emulated_id and
# export_flag already on success...
_iscsi.cleanup()
process.run("rm -f %s" % emulated_path)
else:
_iscsi.export_target()
return (emulated_target, _iscsi.luns)
else:
_iscsi.export_flag = True
_iscsi.emulated_id = _iscsi.get_target_id()
_iscsi.cleanup()
process.run("rm -f %s" % emulated_path)
return ""
def define_pool(pool_name, pool_type, pool_target, cleanup_flag, **kwargs):
"""
To define a given type pool(Support types: 'dir', 'netfs', logical',
iscsi', 'gluster', 'disk' and 'fs').
:param pool_name: Name of the pool
:param pool_type: Type of the pool
:param pool_target: Target for underlying storage
:param cleanup_flag: A list contains 3 booleans and 1 string stands for
need_cleanup_nfs, need_cleanup_iscsi,
need_cleanup_logical, selinux_bak and
need_cleanup_gluster
:param kwargs: key words for special pool define. eg, glusterfs pool
source path and source name, etc
"""
extra = ""
vg_name = pool_name
cleanup_nfs = False
cleanup_iscsi = False
cleanup_logical = False
selinux_bak = ""
cleanup_gluster = False
if not os.path.exists(pool_target) and pool_type != "gluster":
os.mkdir(pool_target)
if pool_type == "dir":
pass
elif pool_type == "netfs":
# Set up NFS server without mount
res = setup_or_cleanup_nfs(True, pool_target, False)
nfs_path = res["export_dir"]
selinux_bak = res["selinux_status_bak"]
cleanup_nfs = True
extra = "--source-host %s --source-path %s" % ('127.0.0.1',
nfs_path)
elif pool_type == "logical":
# Create vg by using iscsi device
lv_utils.vg_create(vg_name, setup_or_cleanup_iscsi(True))
cleanup_iscsi = True
cleanup_logical = True
extra = "--source-name %s" % vg_name
elif pool_type == "iscsi":
# Set up iscsi target without login
iscsi_target, _ = setup_or_cleanup_iscsi(True, False)
cleanup_iscsi = True
extra = "--source-host %s --source-dev %s" % ('127.0.0.1',
iscsi_target)
elif pool_type == "disk":
# Set up iscsi target and login
device_name = setup_or_cleanup_iscsi(True)
cleanup_iscsi = True
# Create a partition to make sure disk pool can start
mk_label(device_name)
mk_part(device_name)
extra = "--source-dev %s" % device_name
elif pool_type == "fs":
# Set up iscsi target and login
device_name = setup_or_cleanup_iscsi(True)
cleanup_iscsi = True
# Format disk to make sure fs pool can start
source_format = kwargs.get('source_format', 'ext4')
mkfs(device_name, source_format)
extra = "--source-dev %s --source-format %s" % (device_name, source_format)
elif pool_type == "gluster":
gluster_source_path = kwargs.get('gluster_source_path')
gluster_source_name = kwargs.get('gluster_source_name')
gluster_file_name = kwargs.get('gluster_file_name')
gluster_file_type = kwargs.get('gluster_file_type')
gluster_file_size = kwargs.get('gluster_file_size')
gluster_vol_number = kwargs.get('gluster_vol_number')
# Prepare gluster service and create volume
hostip = gluster.setup_or_cleanup_gluster(True, gluster_source_name,
pool_name=pool_name, **kwargs)
logging.debug("hostip is %s", hostip)
# create image in gluster volume
file_path = "gluster://%s/%s" % (hostip, gluster_source_name)
for i in range(gluster_vol_number):
file_name = "%s_%d" % (gluster_file_name, i)
process.run("qemu-img create -f %s %s/%s %s" %
(gluster_file_type, file_path, file_name,
gluster_file_size))
cleanup_gluster = True
extra = "--source-host %s --source-path %s --source-name %s" % \
(hostip, gluster_source_path, gluster_source_name)
elif pool_type in ["scsi", "mpath", "rbd", "sheepdog"]:
raise exceptions.TestSkipError(
"Pool type '%s' has not yet been supported in the test." %
pool_type)
else:
raise exceptions.TestFail("Invalid pool type: '%s'." % pool_type)
# Mark the clean up flags
cleanup_flag[0] = cleanup_nfs
cleanup_flag[1] = cleanup_iscsi
cleanup_flag[2] = cleanup_logical
cleanup_flag[3] = selinux_bak
cleanup_flag[4] = cleanup_gluster
try:
result = virsh.pool_define_as(pool_name, pool_type, pool_target, extra,
ignore_status=True)
except process.CmdError:
logging.error("Define '%s' type pool fail.", pool_type)
return result
def verify_virsh_console(session, user, passwd, timeout=10, debug=False):
"""
Run commands in console session.
"""
log = ""
console_cmd = "cat /proc/cpuinfo"
try:
while True:
match, text = session.read_until_last_line_matches(
[r"[E|e]scape character is", r"login:",
r"[P|p]assword:", session.prompt],
timeout, internal_timeout=1)
if match == 0:
if debug:
logging.debug("Got '^]', sending '\\n'")
session.sendline()
elif match == 1:
if debug:
logging.debug("Got 'login:', sending '%s'", user)
session.sendline(user)
elif match == 2:
if debug:
logging.debug("Got 'Password:', sending '%s'", passwd)
session.sendline(passwd)
elif match == 3:
if debug:
logging.debug("Got Shell prompt -- logged in")
break
status, output = session.cmd_status_output(console_cmd)
logging.info("output of command:\n%s", output)
session.close()
except (aexpect.ShellError,
aexpect.ExpectError) as detail:
log = session.get_output()
logging.error("Verify virsh console failed:\n%s\n%s", detail, log)
session.close()
return False
if not re.search("processor", output):
logging.error("Verify virsh console failed: Result does not match.")
return False
return True
def pci_info_from_address(address_dict, radix=10, type="label"):
"""
Generate a pci label or id from a dict of address.
:param address_dict: A dict contains domain, bus, slot and function.
:param radix: The radix of your data in address_dict.
:param type: label or id
Example:
::
address_dict = {'domain': '0x0000', 'bus': '0x08', 'slot': '0x10', 'function': '0x0'}
radix = 16
return = pci_0000_08_10_0 or 0000:08:10.0
"""
try:
domain = int(address_dict['domain'], radix)
bus = int(address_dict['bus'], radix)
slot = int(address_dict['slot'], radix)
function = int(address_dict['function'], radix)
except (TypeError, KeyError) as detail:
raise exceptions.TestError(detail)
if type == "label":
result = ("pci_%04x_%02x_%02x_%01x" % (domain, bus, slot, function))
elif type == "id":
result = ("%04x:%02x:%02x.%01x" % (domain, bus, slot, function))
else:
# TODO: for other type
result = None
return result
def mk_label(disk, label="msdos", session=None):
"""
Set label for disk.
"""
mklabel_cmd = "parted -s %s mklabel %s" % (disk, label)
if session:
session.cmd(mklabel_cmd)
else:
process.run(mklabel_cmd)
def mk_part(disk, size="100M", fs_type='ext4', session=None):
"""
Create a partition for disk
"""
# TODO: This is just a temporary function to create partition for
# testing usage, should be replaced by a more robust one.
support_lable = ['unknown', 'gpt', 'msdos']
disk_label = 'msdos'
part_type = 'primary'
part_start = '0'
run_cmd = process.system_output
if session:
run_cmd = session.get_command_output
print_cmd = "parted -s %s print" % disk
output = to_text(run_cmd(print_cmd))
current_label = re.search(r'Partition Table: (\w+)', output).group(1)
if current_label not in support_lable:
logging.error('Not support create partition on %s disk', current_label)
return
disk_size = re.search(r"Disk %s: (\w+)" % disk, output).group(1)
pat = r'(?P<num>\d+)\s+(?P<start>\S+)\s+(?P<end>\S+)\s+(?P<size>\S+)\s+'
current_parts = [m.groupdict() for m in re.finditer(pat, output)]
mkpart_cmd = "parted -s -a optimal %s" % disk
if current_label == 'unknown':
mkpart_cmd += " mklabel %s" % disk_label
if len(current_parts) > 0:
part_start = current_parts[-1]['end']
part_end = (float(utils_misc.normalize_data_size(part_start,
factor='1000')) +
float(utils_misc.normalize_data_size(size, factor='1000')))
# Deal with msdos disk
if current_label == 'msdos':
if len(current_parts) == 3:
extended_cmd = " mkpart extended %s %s" % (part_start, disk_size)
to_text(run_cmd(mkpart_cmd + extended_cmd))
if len(current_parts) > 2:
part_type = 'logical'
mkpart_cmd += ' mkpart %s %s %s %s' % (part_type, fs_type, part_start,
part_end)
to_text(run_cmd(mkpart_cmd))
def mkfs(partition, fs_type, options="", session=None):
"""
Force to make a file system on the partition
"""
force_option = ''
if fs_type in ['ext2', 'ext3', 'ext4', 'ntfs']:
force_option = '-F'
elif fs_type in ['fat', 'vfat', 'msdos']:
force_option = '-I'
elif fs_type in ['xfs', 'btrfs']:
force_option = '-f'
mkfs_cmd = "mkfs.%s %s %s %s" % (fs_type, force_option, partition, options)
if session:
session.cmd(mkfs_cmd)
else:
process.run(mkfs_cmd)
def yum_install(pkg_list, session=None):
"""
Try to install packages on system
"""
if not isinstance(pkg_list, list):
raise exceptions.TestError("Parameter error.")
yum_cmd = "rpm -q {0} || yum -y install {0}"
for pkg in pkg_list:
if session:
status = session.cmd_status(yum_cmd.format(pkg))
else:
status = process.run(yum_cmd.format(pkg),
shell=True).exit_status
if status:
raise exceptions.TestFail("Failed to install package: %s"
% pkg)
def check_actived_pool(pool_name):
"""
Check if pool_name exist in active pool list
"""
sp = libvirt_storage.StoragePool()
if not sp.pool_exists(pool_name):
raise exceptions.TestFail("Can't find pool %s" % pool_name)
if not sp.is_pool_active(pool_name):
raise exceptions.TestFail("Pool %s is not active." % pool_name)
logging.debug("Find active pool %s", pool_name)
return True
def check_vm_state(vm_name, state='paused', reason=None, uri=None):
"""
checks whether state of the vm is as expected
:param vm_name: VM name
:param state: expected state of the VM
:param reason: expected reason of vm state
:param uri: connect uri
:return: True if state of VM is as expected, False otherwise
"""
if not virsh.domain_exists(vm_name, uri=uri):
return False
if reason:
result = virsh.domstate(vm_name, extra="--reason", uri=uri)
expected_result = "%s (%s)" % (state.lower(), reason.lower())
else:
result = virsh.domstate(vm_name, uri=uri)
expected_result = state.lower()
vm_state = result.stdout_text.strip()
return vm_state.lower() == expected_result
class PoolVolumeTest(object):
"""Test class for storage pool or volume"""
def __init__(self, test, params):
self.tmpdir = data_dir.get_tmp_dir()
self.params = params
self.selinux_bak = ""
def cleanup_pool(self, pool_name, pool_type, pool_target, emulated_image,
**kwargs):
"""
Delete vols, destroy the created pool and restore the env
"""
sp = libvirt_storage.StoragePool()
source_format = kwargs.get('source_format')
source_name = kwargs.get('source_name')
device_name = kwargs.get('device_name', "/DEV/EXAMPLE")
try:
if sp.pool_exists(pool_name):
pv = libvirt_storage.PoolVolume(pool_name)
if pool_type in ["dir", "netfs", "logical", "disk"]:
if sp.is_pool_active(pool_name):
vols = pv.list_volumes()
for vol in vols:
# Ignore failed deletion here for deleting pool
pv.delete_volume(repr(vol))
if not sp.delete_pool(pool_name):
raise exceptions.TestFail(
"Delete pool %s failed" % pool_name)
finally:
if pool_type == "netfs" and source_format != 'glusterfs':
nfs_server_dir = self.params.get("nfs_server_dir", "nfs-server")
nfs_path = os.path.join(self.tmpdir, nfs_server_dir)
setup_or_cleanup_nfs(is_setup=False, export_dir=nfs_path,
restore_selinux=self.selinux_bak)
if os.path.exists(nfs_path):
shutil.rmtree(nfs_path)
if pool_type == "logical":
cmd = "pvs |grep vg_logical|awk '{print $1}'"
pv = process.run(cmd, shell=True).stdout_text
# Cleanup logical volume anyway
process.run("vgremove -f vg_logical", ignore_status=True)
process.run("pvremove %s" % pv, ignore_status=True)
# These types used iscsi device
# If we did not provide block device
if (pool_type in ["logical", "fs", "disk"] and
device_name.count("EXAMPLE")):
setup_or_cleanup_iscsi(is_setup=False,
emulated_image=emulated_image)
# Used iscsi device anyway
if pool_type in ["iscsi", "iscsi-direct", "scsi"]:
setup_or_cleanup_iscsi(is_setup=False,
emulated_image=emulated_image)
if pool_type == "scsi":
scsi_xml_file = self.params.get("scsi_xml_file", "")
if os.path.exists(scsi_xml_file):
os.remove(scsi_xml_file)
if pool_type in ["dir", "fs", "netfs"]:
pool_target = os.path.join(self.tmpdir, pool_target)
if os.path.exists(pool_target):
shutil.rmtree(pool_target)
if pool_type == "gluster" or source_format == 'glusterfs':
gluster.setup_or_cleanup_gluster(False, source_name,
pool_name=pool_name, **kwargs)
def pre_pool(self, pool_name, pool_type, pool_target, emulated_image,
**kwargs):
"""
Prepare(define or create) the specific type pool
:param pool_name: created pool name
:param pool_type: dir, disk, logical, fs, netfs or else
:param pool_target: target of storage pool
:param emulated_image: use an image file to simulate a scsi disk
it could be used for disk, logical pool, etc
:param kwargs: key words for specific pool
"""
extra = ""
image_size = kwargs.get('image_size', "100M")
source_format = kwargs.get('source_format')
source_name = kwargs.get('source_name', None)
persistent = kwargs.get('persistent', False)
device_name = kwargs.get('device_name', "/DEV/EXAMPLE")
adapter_type = kwargs.get('pool_adapter_type', 'scsi_host')
pool_wwnn = kwargs.get('pool_wwnn', None)
pool_wwpn = kwargs.get('pool_wwpn', None)
source_protocol_ver = kwargs.get('source_protocol_ver', "no")
# If tester does not provide block device, creating one
if (device_name.count("EXAMPLE") and
pool_type in ["disk", "fs", "logical"]):
device_name = setup_or_cleanup_iscsi(is_setup=True,
emulated_image=emulated_image,
image_size=image_size)
if pool_type == "dir":
if not os.path.isdir(pool_target):
pool_target = os.path.join(self.tmpdir, pool_target)
if not os.path.exists(pool_target):
os.mkdir(pool_target)
elif pool_type == "disk":
extra = " --source-dev %s" % device_name
# msdos is libvirt default pool source format, but libvirt use
# notion 'dos' here
if not source_format:
source_format = 'dos'
extra += " --source-format %s" % source_format
disk_label = source_format
if disk_label == 'dos':
disk_label = 'msdos'
mk_label(device_name, disk_label)
# Disk pool does not allow to create volume by virsh command,
# so introduce parameter 'pre_disk_vol' to create partition(s)
# by 'parted' command, the parameter is a list of partition size,
# and the max number of partitions depends on the disk label.
# If pre_disk_vol is None, disk pool will have no volume
pre_disk_vol = kwargs.get('pre_disk_vol', None)
if isinstance(pre_disk_vol, list) and len(pre_disk_vol):
for vol in pre_disk_vol:
mk_part(device_name, vol)
elif pool_type == "fs":
pool_target = os.path.join(self.tmpdir, pool_target)
if not os.path.exists(pool_target):
os.mkdir(pool_target)
if not source_format:
source_format = 'ext4'
mkfs(device_name, source_format)
extra = " --source-dev %s --source-format %s" % (device_name,
source_format)
elif pool_type == "logical":
logical_device = device_name
vg_name = "vg_%s" % pool_type
lv_utils.vg_create(vg_name, logical_device)
extra = "--source-name %s" % vg_name
# Create a small volume for verification
# And VG path will not exist if no any volume in.(bug?)
lv_utils.lv_create(vg_name, 'default_lv', '1M')
elif pool_type == "netfs":
export_options = kwargs.get('export_options',
"rw,async,no_root_squash")
pool_target = os.path.join(self.tmpdir, pool_target)
if not os.path.exists(pool_target):
os.mkdir(pool_target)
if source_format == 'glusterfs':
hostip = gluster.setup_or_cleanup_gluster(True, source_name,
pool_name=pool_name,
**kwargs)
logging.debug("hostip is %s", hostip)
extra = "--source-host %s --source-path %s" % (hostip,
source_name)
extra += " --source-format %s" % source_format
process.system("setsebool virt_use_fusefs on")
else:
nfs_server_dir = self.params.get(
"nfs_server_dir", "nfs-server")
nfs_path = os.path.join(self.tmpdir, nfs_server_dir)
if not os.path.exists(nfs_path):
os.mkdir(nfs_path)
res = setup_or_cleanup_nfs(is_setup=True,
export_options=export_options,
export_dir=nfs_path)
self.selinux_bak = res["selinux_status_bak"]
source_host = self.params.get("source_host", "localhost")
extra = "--source-host %s --source-path %s" % (source_host,
nfs_path)
if source_protocol_ver == "yes":
extra += " --source-protocol-ver %s" % res["source_protocol_ver"]
elif pool_type in ["iscsi", "iscsi-direct"]:
ip_protocal = kwargs.get('ip_protocal', "ipv4")
iscsi_chap_user = kwargs.get('iscsi_chap_user', None)
iscsi_chap_password = kwargs.get('iscsi_chap_password', None)
iscsi_secret_usage = kwargs.get('iscsi_secret_usage', None)
iscsi_initiator = kwargs.get('iscsi_initiator', None)
if ip_protocal == "ipv6":
ip_addr = "::1"
else:
ip_addr = "127.0.0.1"
if iscsi_chap_user and iscsi_chap_password and iscsi_secret_usage:
logging.debug("setup %s pool with chap authentication", pool_type)
extra = (" --auth-type chap --auth-username %s "
"--secret-usage %s" %
(iscsi_chap_user, iscsi_secret_usage))
else:
logging.debug("setup %s pool without authentication", pool_type)
setup_or_cleanup_iscsi(is_setup=True,
emulated_image=emulated_image,
image_size=image_size,
chap_user=iscsi_chap_user,
chap_passwd=iscsi_chap_password,
portal_ip=ip_addr)
iscsi_sessions = iscsi.iscsi_get_sessions()
iscsi_target = None
for iscsi_node in iscsi_sessions:
if iscsi_node[1].count(emulated_image):
iscsi_target = iscsi_node[1]
break
iscsi.iscsi_logout(iscsi_target)
extra += " --source-host %s --source-dev %s" % (ip_addr,
iscsi_target)
if pool_type == "iscsi-direct":
extra += " --source-initiator %s" % iscsi_initiator
elif pool_type == "scsi":
scsi_xml_file = self.params.get("scsi_xml_file", "")
if not os.path.exists(scsi_xml_file):
logical_device = setup_or_cleanup_iscsi(
is_setup=True,
emulated_image=emulated_image,
image_size=image_size)
cmd = ("iscsiadm -m session -P 3 |grep -B3 %s| grep Host|awk "
"'{print $3}'" % logical_device.split('/')[2])
scsi_host = process.run(cmd, shell=True).stdout_text.strip()
scsi_pool_xml = pool_xml.PoolXML()
scsi_pool_xml.name = pool_name
scsi_pool_xml.pool_type = "scsi"
scsi_pool_xml.target_path = pool_target
scsi_pool_source_xml = pool_xml.SourceXML()
scsi_pool_source_xml.adp_type = adapter_type
scsi_pool_source_xml.adp_name = "host" + scsi_host
if pool_wwpn:
scsi_pool_source_xml.adp_wwpn = pool_wwpn
if pool_wwnn:
scsi_pool_source_xml.adp_wwnn = pool_wwnn
scsi_pool_xml.set_source(scsi_pool_source_xml)
logging.debug("SCSI pool XML %s:\n%s", scsi_pool_xml.xml,
str(scsi_pool_xml))
scsi_xml_file = scsi_pool_xml.xml
self.params['scsi_xml_file'] = scsi_xml_file
elif pool_type == "gluster":
source_path = kwargs.get('source_path')
logging.info("source path is %s" % source_path)
hostip = gluster.setup_or_cleanup_gluster(True, source_name,
pool_name=pool_name,
**kwargs)
logging.debug("Gluster host ip address: %s", hostip)
extra = "--source-host %s --source-path %s --source-name %s" % \
(hostip, source_path, source_name)
elif pool_type == "mpath":
mpath_xml_file = self.params.get("mpath_xml_file", "")
if not os.path.exists(mpath_xml_file):
mpath_pool_xml = pool_xml.PoolXML()
mpath_pool_xml.name = pool_name
mpath_pool_xml.pool_type = "mpath"
mpath_pool_xml.target_path = pool_target
logging.debug("mpath pool XML %s:\n%s",
mpath_pool_xml.xml, str(mpath_pool_xml))
mpath_xml_file = mpath_pool_xml.xml
self.params['mpath_xml_file'] = mpath_xml_file
func = virsh.pool_create_as
if pool_type == "scsi" or pool_type == "mpath":
func = virsh.pool_create
if persistent:
func = virsh.pool_define_as
if pool_type == "scsi" or pool_type == "mpath":
func = virsh.pool_define
# Create/define pool
if pool_type == "scsi":
result = func(scsi_xml_file, debug=True)
elif pool_type == "mpath":
result = func(mpath_xml_file, debug=True)
elif pool_type == "iscsi-direct":
result = func(pool_name, pool_type, "", extra, debug=True)
else:
result = func(pool_name, pool_type, pool_target, extra, debug=True)
# Here, virsh.pool_create_as return a boolean value and all other 3
# functions return CmdResult object
if isinstance(result, bool):
re_v = result
else:
re_v = result.exit_status == 0
if not re_v:
self.cleanup_pool(pool_name, pool_type, pool_target,
emulated_image, **kwargs)
raise exceptions.TestFail("Prepare pool failed")
xml_str = virsh.pool_dumpxml(pool_name)
logging.debug("New prepared pool XML: %s", xml_str)
logging.info("Refreshing pool")
virsh.pool_refresh(pool_name)
def pre_vol(self, vol_name, vol_format, capacity, allocation, pool_name):
"""
Preapare the specific type volume in pool
"""
pv = libvirt_storage.PoolVolume(pool_name)
if not pv.create_volume(vol_name, capacity, allocation, vol_format):
raise exceptions.TestFail("Prepare volume failed.")
if not pv.volume_exists(vol_name):
raise exceptions.TestFail("Can't find volume: %s" % vol_name)
def pre_vol_by_xml(self, pool_name, **vol_params):
"""
Prepare volume by xml file
"""
volxml = vol_xml.VolXML()
v_xml = volxml.new_vol(**vol_params)
v_xml.xmltreefile.write()
ret = virsh.vol_create(pool_name, v_xml.xml, ignore_status=True)
check_exit_status(ret, False)
def check_status_output(status, output='',
expected_fails=None,
skip_if=None,
any_error=False,
expected_match=None):
"""
Proxy function to call check_result for commands run in vm session.
:param status: Exit status (used as CmdResult.exit_status)
:param output: Stdout and/or stderr
:param expected_fails: a string or list of regex of expected stderr patterns.
The check will pass if any of these patterns matches.
:param skip_if: a string or list of regex of expected stderr patterns. The
check will raise a TestSkipError if any of these patterns matches.
:param any_error: Whether expect on any error message. Setting to True will
will override expected_fails
:param expected_match: a string or list of regex of expected stdout patterns.
The check will pass if any of these patterns matches.
"""
result = process.CmdResult(stderr=output,
stdout=output,
exit_status=status)
check_result(result, expected_fails, skip_if, any_error, expected_match)
def check_result(result,
expected_fails=[],
skip_if=[],
any_error=False,
expected_match=[]):
"""
Check the result of a command and check command error message against
expectation.
:param result: Command result instance.
:param expected_fails: a string or list of regex of expected stderr patterns.
The check will pass if any of these patterns matches.
:param skip_if: a string or list of regex of expected stderr patterns. The
check will raise a TestSkipError if any of these patterns matches.
:param any_error: Whether expect on any error message. Setting to True will
will override expected_fails
:param expected_match: a string or list of regex of expected stdout patterns.
The check will pass if any of these patterns matches.
"""
stderr = result.stderr_text
stdout = result.stdout_text
all_msg = '\n'.join([stdout, stderr])
logging.debug("Command result: %s", all_msg)
try:
unicode
except NameError:
unicode = str
if skip_if:
if isinstance(skip_if, (str, unicode)):
skip_if = [skip_if]
for patt in skip_if:
if re.search(patt, stderr):
raise exceptions.TestSkipError("Test skipped: found '%s' in test "
"result: %s" %
(patt, all_msg))
if any_error:
if result.exit_status:
return
else:
raise exceptions.TestFail(
"Expect should fail but got: %s" % all_msg)
if result.exit_status:
if expected_fails:
if isinstance(expected_fails, (str, unicode)):
expected_fails = [expected_fails]
if not any(re.search(patt, stderr)
for patt in expected_fails):
raise exceptions.TestFail("Expect should fail with one of %s, "
"but failed with:\n%s" %
(expected_fails, all_msg))
else:
logging.info("Get expect error msg:%s" % stderr)
else:
raise exceptions.TestFail(
"Expect should succeed, but got: %s" % all_msg)
else:
if expected_fails:
raise exceptions.TestFail("Expect should fail with one of %s, "
"but succeeded: %s" %
(expected_fails, all_msg))
elif expected_match:
if isinstance(expected_match, (str, unicode)):
expected_match = [expected_match]
if not any(re.search(patt, stdout)
for patt in expected_match):
raise exceptions.TestFail("Expect should match with one of %s,"
"but failed with: %s" %
(expected_match, all_msg))
def check_exit_status(result, expect_error=False):
"""
Check the exit status of virsh commands.
:param result: Virsh command result object
:param expect_error: Boolean value, expect command success or fail
"""
if not expect_error:
if result.exit_status != 0:
raise exceptions.TestFail(result.stderr_text)
else:
logging.debug("Command output:\n%s",
result.stdout_text.strip())
elif expect_error and result.exit_status == 0:
raise exceptions.TestFail("Run '%s' expect fail, but run "
"successfully." % result.command)
def get_interface_details(vm_name):
"""
Get the interface details from virsh domiflist command output
:return: list of all interfaces details
"""
# Parse the domif-list command output
domiflist_out = virsh.domiflist(vm_name).stdout_text
# Regular expression for the below output
# vnet0 bridge virbr0 virtio 52:54:00:b2:b3:b4
rg = re.compile(r"^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+"
"(([a-fA-F0-9]{2}:?){6})")
iface_cmd = {}
ifaces_cmd = []
for line in domiflist_out.split('\n'):
match_obj = rg.search(line.strip())
# Due to the extra space in the list
if match_obj is not None:
iface_cmd['interface'] = match_obj.group(1)
iface_cmd['type'] = match_obj.group(2)
iface_cmd['source'] = match_obj.group(3)
iface_cmd['model'] = match_obj.group(4)
iface_cmd['mac'] = match_obj.group(5)
ifaces_cmd.append(iface_cmd)
iface_cmd = {}
return ifaces_cmd
def get_ifname_host(vm_name, mac):
"""
Get the vm interface name on host
:return: interface name, None if not exist
"""
ifaces = get_interface_details(vm_name)
for iface in ifaces:
if iface["mac"] == mac:
return iface["interface"]
return None
def check_iface(iface_name, checkpoint, extra="", **dargs):
"""
Check interface with specified checkpoint.
:param iface_name: Interface name
:param checkpoint: Check if interface exists,
and It's MAC address, IP address and State,
also connectivity by ping.
valid checkpoint: [exists, mac, ip, ping, state]
:param extra: Extra string for checking
:return: Boolean value, true for pass, false for fail
"""
support_check = ["exists", "mac", "ip", "ping"]
iface = utils_net.Interface(name=iface_name)
check_pass = False
try:
if checkpoint == "exists":
# extra is iface-list option
list_find, ifcfg_find = (False, False)
# Check virsh list output
result = virsh.iface_list(extra, ignore_status=True)
check_exit_status(result, False)
output = re.findall(r"(\S+)\ +(\S+)\ +(\S+|\s+)[\ +\n]",
result.stdout_text)
if list(filter(lambda x: x[0] == iface_name, output[1:])):
list_find = True
logging.debug("Find '%s' in virsh iface-list output: %s",
iface_name, list_find)
# Check network script independent of distro
iface_script = utils_net.get_network_cfg_file(iface_name)
ifcfg_find = os.path.exists(iface_script)
logging.debug("Find '%s': %s", iface_script, ifcfg_find)
check_pass = list_find and ifcfg_find
elif checkpoint == "mac":
# extra is the MAC address to compare
iface_mac = iface.get_mac().lower()
check_pass = iface_mac == extra
logging.debug("MAC address of %s: %s", iface_name, iface_mac)
elif checkpoint == "ip":
# extra is the IP address to compare
iface_ip = iface.get_ip()
check_pass = iface_ip == extra
logging.debug("IP address of %s: %s", iface_name, iface_ip)
elif checkpoint == "state":
# check iface State
result = virsh.iface_list(extra, ignore_status=True)
check_exit_status(result, False)
output = re.findall(r"(\S+)\ +(\S+)\ +(\S+|\s+)[\ +\n]",
result.stdout_text)
iface_state = filter(lambda x: x[0] == iface_name, output[1:])
iface_state = list(iface_state)[0][1]
# active corresponds True, otherwise return False
check_pass = iface_state == "active"
elif checkpoint == "ping":
# extra is the ping destination
count = dargs.get("count", 3)
timeout = dargs.get("timeout", 5)
ping_s, _ = ping(dest=extra, count=count, interface=iface_name,
timeout=timeout,)
check_pass = ping_s == 0
else:
logging.debug("Support check points are: %s", support_check)
logging.error("Unsupport check point: %s", checkpoint)
except Exception as detail:
raise exceptions.TestFail("Interface check failed: %s" % detail)
return check_pass
def create_hostdev_xml(pci_id, boot_order=None, xmlfile=True,
dev_type="pci", managed="yes", alias=None):
"""
Create a hostdev configuration file; supported hostdev types:
a. pci
b. usb
c. scsi
The named parameter "pci_id" now has an overloaded meaning of "device id".
:param pci_id: device id on host, naming maintained for compatiblity reasons
a. "0000:03:04.0" for pci
b. "1d6b:0002:001:002" for usb (vendor:product:bus:device)
c. "0:0:0:1" for scsi (scsi_num:bus_num:target_num:unit_num)
:param boot_order: boot order for hostdev device
:param xmlfile: Return the file path of xmlfile if True
:param dev_type: type of hostdev
:param managed: managed of hostdev
:param alias: alias name of hostdev
:return: xml of hostdev device by default
"""
hostdev_xml = hostdev.Hostdev()
hostdev_xml.mode = "subsystem"
hostdev_xml.managed = managed
hostdev_xml.type = dev_type
if boot_order:
hostdev_xml.boot_order = boot_order
if alias:
hostdev_xml.alias = dict(name=alias)
# Create attributes dict for device's address element
logging.info("pci_id/device id is %s" % pci_id)
if dev_type in ["pci", "usb"]:
device_domain = pci_id.split(':')[0]
device_domain = "0x%s" % device_domain
device_bus = pci_id.split(':')[1]
device_bus = "0x%s" % device_bus
device_slot = pci_id.split(':')[-1].split('.')[0]
device_slot = "0x%s" % device_slot
device_function = pci_id.split('.')[-1]
device_function = "0x%s" % device_function
if dev_type == "pci":
attrs = {'domain': device_domain, 'slot': device_slot,
'bus': device_bus, 'function': device_function}
hostdev_xml.source = hostdev_xml.new_source(**attrs)
if dev_type == "usb":
addr_bus = pci_id.split(':')[2]
addr_device = pci_id.split(':')[3]
hostdev_xml.source = hostdev_xml.new_source(
**(dict(vendor_id=device_domain, product_id=device_bus,
address_bus=addr_bus, address_device=addr_device)))
if dev_type == "scsi":
id_parts = pci_id.split(':')
hostdev_xml.source = hostdev_xml.new_source(
**(dict(adapter_name="scsi_host%s" % id_parts[0], bus=id_parts[1],
target=id_parts[2], unit=id_parts[3])))
logging.debug("Hostdev XML:\n%s", str(hostdev_xml))
if not xmlfile:
return hostdev_xml
return hostdev_xml.xml
def add_controller(vm_name, contr):
"""
Add a specified controller to the vm xml
:param vm_name: The vm name to be added with a controller
:param contr: The controller object to be added
:raise: exceptions.TestFail if the controller can't be added
"""
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
org_controllers = vmxml.get_controllers(contr.type)
vmxml.add_device(contr)
vmxml.sync()
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
updated_controllers = vmxml.get_controllers(contr.type)
for one_contr in updated_controllers:
if one_contr not in org_controllers:
new_added_ctr = one_contr
if 'new_added_ctr' not in locals():
raise exceptions.TestFail("Fail to get new added controller.")
def create_controller_xml(contr_dict):
"""
Create a controller xml
:param contr_dict: The dict params includs controller configurations
:return: new controller created
"""
contr_type = contr_dict.get("controller_type", 'scsi')
contr_model = contr_dict.get("controller_model", "virtio-scsi")
contr_index = contr_dict.get("controller_index")
contr_alias = contr_dict.get("contr_alias")
contr_addr = contr_dict.get("controller_addr")
contr_target = contr_dict.get("controller_target")
contr_node = contr_dict.get("controller_node")
contr = controller.Controller(contr_type)
contr.model = contr_model
if contr_index:
contr.index = contr_index
if contr_alias:
contr.alias = dict(name=contr_alias)
if contr_target:
contr.target = eval(contr_target)
if contr_node:
contr.node = contr_node
if contr_addr:
contr.address = contr.new_controller_address(attrs=eval(contr_addr))
return contr
def create_redirdev_xml(redir_type="spicevmc", redir_bus="usb",
redir_alias=None, redir_params={}):
"""
Create redirdev xml
:param redir_type: redirdev type name
:param redir_bus: redirdev bus type
:param redir_alias: redirdev alias name
:param redir_params: others redir xml parameters
:return redirdev xml file
"""
redir = redirdev.Redirdev(redir_type)
redir.type = redir_type
redir.bus = redir_bus
if redir_alias:
redir.alias = dict(name=redir_alias)
redir_source = redir_params.get("source")
if redir_source:
redir_source_dict = eval(redir_source)
redir.source = redir.new_source(**redir_source_dict)
redir_protocol = redir_params.get("protocol")
if redir_protocol:
redir.protocol = eval(redir_protocol)
return redir.xml
def alter_boot_order(vm_name, pci_id, boot_order=0):
"""
Alter the startup sequence of VM to PCI-device firstly
OS boot element and per-device boot elements are mutually exclusive,
It's necessary that remove all OS boots before setting PCI-device order
:param vm_name: VM name
:param pci_id: such as "0000:06:00.1"
:param boot_order: order priority, such as 1, 2, ...
"""
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
# remove all of OS boots
vmxml.remove_all_boots()
# prepare PCI-device XML with boot order
try:
device_domain = pci_id.split(':')[0]
device_domain = "0x%s" % device_domain
device_bus = pci_id.split(':')[1]
device_bus = "0x%s" % device_bus
device_slot = pci_id.split(':')[-1].split('.')[0]
device_slot = "0x%s" % device_slot
device_function = pci_id.split('.')[-1]
device_function = "0x%s" % device_function
except IndexError:
raise exceptions.TestError("Invalid PCI Info: %s" % pci_id)
attrs = {'domain': device_domain, 'slot': device_slot,
'bus': device_bus, 'function': device_function}
vmxml.add_hostdev(attrs, boot_order=boot_order)
# synchronize XML
vmxml.sync()
def create_disk_xml(params):
"""
Create a disk configuration file.
"""
# Create attributes dict for disk's address element
type_name = params.get("type_name", "file")
target_dev = params.get("target_dev", "vdb")
target_bus = params.get("target_bus", "virtio")
diskxml = disk.Disk(type_name)
diskxml.device = params.get("device_type", "disk")
snapshot_attr = params.get('disk_snapshot_attr')
slice_in_source = params.get('disk_slice')
# After libvirt 3.9.0, auth element can be placed in source part.
# Use auth_in_source to diff whether it is placed in source or disk itself.
auth_in_source = params.get('auth_in_source')
input_source_file = params.get("input_source_file")
source_encryption_dict = params.get("source_encryption_dict")
if snapshot_attr:
diskxml.snapshot = snapshot_attr
source_attrs = {}
source_host = []
source_seclabel = []
auth_attrs = {}
driver_attrs = {}
try:
if type_name == "file":
source_file = params.get("source_file", "")
source_attrs = {'file': source_file}
if slice_in_source:
source_attrs = {'file': input_source_file}
elif type_name == "block":
source_file = params.get("source_file", "")
source_attrs = {'dev': source_file}
elif type_name == "dir":
source_dir = params.get("source_dir", "")
source_attrs = {'dir': source_dir}
elif type_name == "volume":
source_pool = params.get("source_pool")
source_volume = params.get("source_volume")
source_mode = params.get("source_mode", "")
source_attrs = {'pool': source_pool, 'volume': source_volume}
if source_mode:
source_attrs.update({"mode": source_mode})
elif type_name == "network":
source_protocol = params.get("source_protocol")
source_name = params.get("source_name")
source_host_name = params.get("source_host_name").split()
source_host_port = params.get("source_host_port").split()
transport = params.get("transport")
source_attrs = {'protocol': source_protocol, 'name': source_name}
source_host = []
for host_name, host_port in list(
zip(source_host_name, source_host_port)):
source_host.append({'name': host_name,
'port': host_port})
if transport:
source_host[0].update({'transport': transport})
else:
exceptions.TestSkipError("Unsupport disk type %s" % type_name)
source_startupPolicy = params.get("source_startupPolicy")
if source_startupPolicy:
source_attrs['startupPolicy'] = source_startupPolicy
sec_model = params.get("sec_model")
relabel = params.get("relabel")
label = params.get("sec_label")
if sec_model or relabel:
sec_dict = {}
sec_xml = seclabel.Seclabel()
if sec_model:
sec_dict.update({'model': sec_model})
if relabel:
sec_dict.update({'relabel': relabel})
if label:
sec_dict.update({'label': label})
sec_xml.update(sec_dict)
logging.debug("The sec xml is %s", sec_xml.xmltreefile)
source_seclabel.append(sec_xml)
source_params = {"attrs": source_attrs, "seclabels": source_seclabel,
"hosts": source_host}
src_config_file = params.get("source_config_file")
if src_config_file:
source_params.update({"config_file": src_config_file})
# If we use config file, "hosts" isn't needed
if "hosts" in source_params:
source_params.pop("hosts")
snapshot_name = params.get('source_snap_name')
if snapshot_name:
source_params.update({"snapshot_name": snapshot_name})
disk_source = diskxml.new_disk_source(**source_params)
auth_user = params.get("auth_user")
secret_type = params.get("secret_type")
secret_uuid = params.get("secret_uuid")
secret_usage = params.get("secret_usage")
if auth_user:
auth_attrs['auth_user'] = auth_user
if secret_type:
auth_attrs['secret_type'] = secret_type
if secret_uuid:
auth_attrs['secret_uuid'] = secret_uuid
elif secret_usage:
auth_attrs['secret_usage'] = secret_usage
if auth_attrs:
if auth_in_source:
disk_source.auth = diskxml.new_auth(**auth_attrs)
else:
diskxml.auth = diskxml.new_auth(**auth_attrs)
if slice_in_source:
if slice_in_source.get('slice_size', None):
disk_source.slices = diskxml.new_slices(**slice_in_source)
else:
slice_size_param = process.run("du -b %s" % input_source_file).stdout_text.strip()
slice_size = re.findall(r'^[0-9]+', slice_size_param)
slice_size = ''.join(slice_size)
disk_source.slices = diskxml.new_slices(**{"slice_type": "storage", "slice_offset": "0",
"slice_size": slice_size})
if source_encryption_dict:
disk_source.encryption = diskxml.new_encryption(**source_encryption_dict)
diskxml.source = disk_source
driver_name = params.get("driver_name", "qemu")
driver_type = params.get("driver_type", "")
driver_cache = params.get("driver_cache", "")
driver_discard = params.get("driver_discard", "")
driver_model = params.get("model")
driver_packed = params.get("driver_packed", "")
if driver_name:
driver_attrs['name'] = driver_name
if driver_type:
driver_attrs['type'] = driver_type
if driver_cache:
driver_attrs['cache'] = driver_cache
if driver_discard:
driver_attrs['discard'] = driver_discard
if driver_packed:
driver_attrs['packed'] = driver_packed
if driver_attrs:
diskxml.driver = driver_attrs
if driver_model:
diskxml.model = driver_model
diskxml.readonly = "yes" == params.get("readonly", "no")
diskxml.share = "yes" == params.get("shareable", "no")
diskxml.target = {'dev': target_dev, 'bus': target_bus}
alias = params.get('alias')
if alias:
diskxml.alias = {'name': alias}
sgio = params.get('sgio')
if sgio:
diskxml.sgio = sgio
rawio = params.get('rawio')
if rawio:
diskxml.rawio = rawio
diskxml.xmltreefile.write()
except Exception as detail:
logging.error("Fail to create disk XML:\n%s", detail)
logging.debug("Disk XML %s:\n%s", diskxml.xml, str(diskxml))
# Wait for file completed
def file_exists():
if not process.run("ls %s" % diskxml.xml,
ignore_status=True).exit_status:
return True
utils_misc.wait_for(file_exists, 5)
# Wait for file write over with '</disk>' keyword at the file end
def file_write_over():
if not process.run("grep '</disk>' %s" % diskxml.xml,
ignore_status=True).exit_status:
return True
utils_misc.wait_for(file_write_over, 10)
return diskxml.xml
def set_disk_attr(vmxml, target, tag, attr):
"""
Set value of disk tag attributes for a given target dev.
:param vmxml: domain VMXML instance
:param target: dev of the disk
:param tag: disk tag
:param attr: the tag attribute dict to set
:return: True if success, otherwise, False
"""
key = ""
try:
disk = vmxml.get_disk_all()[target]
if tag in ["driver", "boot", "address", "alias", "source"]:
for key in attr:
disk.find(tag).set(key, attr[key])
logging.debug("key '%s' value '%s' pair is "
"set", key, attr[key])
vmxml.xmltreefile.write()
else:
logging.debug("tag '%s' is not supported now", tag)
return False
except AttributeError:
logging.error("Fail to set attribute '%s' with value "
"'%s'.", key, attr[key])
return False
return True
def create_net_xml(net_name, params):
"""
Create a new network or update an existed network xml
"""
dns_dict = {}
host_dict = {}
net_bridge = params.get("net_bridge", '{}')
net_forward = params.get("net_forward", '{}')
net_forward_pf = params.get("net_forward_pf", '{}')
forward_iface = params.get("forward_iface")
net_dns_forward = params.get("net_dns_forward")
net_dns_txt = params.get("net_dns_txt")
net_dns_srv = params.get("net_dns_srv")
net_dns_forwarders = params.get("net_dns_forwarders", "").split()
net_dns_hostip = params.get("net_dns_hostip")
net_dns_hostnames = params.get("net_dns_hostnames", "").split()
net_domain = params.get("net_domain")
net_virtualport = params.get("net_virtualport")
net_bandwidth_inbound = params.get("net_bandwidth_inbound", "{}")
net_bandwidth_outbound = params.get("net_bandwidth_outbound", "{}")
net_ip_family = params.get("net_ip_family")
net_ip_address = params.get("net_ip_address")
net_ip_netmask = params.get("net_ip_netmask", "255.255.255.0")
net_ipv6_address = params.get("net_ipv6_address")
net_ipv6_prefix = params.get("net_ipv6_prefix", "64")
nat_attrs = params.get('nat_attrs', '{}')
nat_port = params.get("nat_port")
guest_name = params.get("guest_name")
guest_ipv4 = params.get("guest_ipv4")
guest_ipv6 = params.get("guest_ipv6")
guest_mac = params.get("guest_mac")
dhcp_start_ipv4 = params.get("dhcp_start_ipv4", "192.168.122.2")
dhcp_end_ipv4 = params.get("dhcp_end_ipv4", "192.168.122.254")
dhcp_start_ipv6 = params.get("dhcp_start_ipv6")
dhcp_end_ipv6 = params.get("dhcp_end_ipv6")
tftp_root = params.get("tftp_root")
bootp_file = params.get("bootp_file")
routes = params.get("routes", "").split()
pg_name = params.get("portgroup_name", "").split()
net_port = params.get('net_port')
try:
if not virsh.net_info(net_name, ignore_status=True).exit_status:
# Edit an existed network
netxml = network_xml.NetworkXML.new_from_net_dumpxml(net_name)
netxml.del_ip()
else:
netxml = network_xml.NetworkXML(net_name)
if net_dns_forward:
dns_dict["dns_forward"] = net_dns_forward
if net_dns_txt:
dns_dict["txt"] = ast.literal_eval(net_dns_txt)
if net_dns_srv:
dns_dict["srv"] = ast.literal_eval(net_dns_srv)
if net_dns_forwarders:
dns_dict["forwarders"] = [ast.literal_eval(x) for x in
net_dns_forwarders]
if net_dns_hostip:
host_dict["host_ip"] = net_dns_hostip
if net_dns_hostnames:
host_dict["hostnames"] = net_dns_hostnames
dns_obj = netxml.new_dns(**dns_dict)
if host_dict:
host = dns_obj.new_host(**host_dict)
dns_obj.host = host
netxml.dns = dns_obj
bridge = ast.literal_eval(net_bridge)
if bridge:
netxml.bridge = bridge
forward = ast.literal_eval(net_forward)
if forward:
netxml.forward = forward
forward_pf = ast.literal_eval(net_forward_pf)
if forward_pf:
netxml.pf = forward_pf
if forward_iface:
interface = [
{'dev': x} for x in forward_iface.split()]
netxml.forward_interface = interface
nat_attrs = ast.literal_eval(nat_attrs)
if nat_attrs:
netxml.nat_attrs = nat_attrs
if nat_port:
netxml.nat_port = ast.literal_eval(nat_port)
if net_domain:
netxml.domain_name = net_domain
net_inbound = ast.literal_eval(net_bandwidth_inbound)
net_outbound = ast.literal_eval(net_bandwidth_outbound)
if net_inbound:
netxml.bandwidth_inbound = net_inbound
if net_outbound:
netxml.bandwidth_outbound = net_outbound
if net_virtualport:
netxml.virtualport_type = net_virtualport
if net_port:
netxml.port = ast.literal_eval(net_port)
if net_ip_family == "ipv6":
ipxml = network_xml.IPXML()
ipxml.family = net_ip_family
ipxml.prefix = net_ipv6_prefix
del ipxml.netmask
if net_ipv6_address:
ipxml.address = net_ipv6_address
if dhcp_start_ipv6 and dhcp_end_ipv6:
range_6 = network_xml.RangeXML()
range_6.attrs = {"start": dhcp_start_ipv6,
"end": dhcp_end_ipv6}
ipxml.dhcp_ranges = range_6
if guest_name and guest_ipv6 and guest_mac:
host_xml_6 = network_xml.DhcpHostXML()
host_xml_6.attrs = {"name": guest_name,
"ip": guest_ipv6}
ipxml.hosts = [host_xml_6]
netxml.set_ip(ipxml)
if net_ip_address:
ipxml = network_xml.IPXML(net_ip_address,
net_ip_netmask)
if dhcp_start_ipv4 and dhcp_end_ipv4:
range_4 = network_xml.RangeXML()
range_4.attrs = {"start": dhcp_start_ipv4,
"end": dhcp_end_ipv4}
ipxml.dhcp_ranges = range_4
if tftp_root:
ipxml.tftp_root = tftp_root
if bootp_file:
ipxml.dhcp_bootp = bootp_file
if guest_name and guest_ipv4 and guest_mac:
host_xml_4 = network_xml.DhcpHostXML()
host_xml_4.attrs = {"name": guest_name,
"ip": guest_ipv4,
"mac": guest_mac}
ipxml.hosts = [host_xml_4]
netxml.set_ip(ipxml)
if routes:
netxml.routes = [ast.literal_eval(x) for x in routes]
if pg_name:
pg_default = params.get("portgroup_default",
"").split()
pg_virtualport = params.get(
"portgroup_virtualport", "").split()
pg_bandwidth_inbound = params.get(
"portgroup_bandwidth_inbound", "").split()
pg_bandwidth_outbound = params.get(
"portgroup_bandwidth_outbound", "").split()
pg_vlan = params.get("portgroup_vlan", "").split()
for i in range(len(pg_name)):
pgxml = network_xml.PortgroupXML()
pgxml.name = pg_name[i]
if len(pg_default) > i:
pgxml.default = pg_default[i]
if len(pg_virtualport) > i:
pgxml.virtualport_type = pg_virtualport[i]
if len(pg_bandwidth_inbound) > i:
pgxml.bandwidth_inbound = ast.literal_eval(
pg_bandwidth_inbound[i])
if len(pg_bandwidth_outbound) > i:
pgxml.bandwidth_outbound = ast.literal_eval(
pg_bandwidth_outbound[i])
if len(pg_vlan) > i:
pgxml.vlan_tag = ast.literal_eval(pg_vlan[i])
netxml.set_portgroup(pgxml)
logging.debug("New network xml file: %s", netxml)
netxml.xmltreefile.write()
return netxml
except Exception as detail:
stacktrace.log_exc_info(sys.exc_info())
raise exceptions.TestFail("Fail to create network XML: %s" % detail)
def create_nwfilter_xml(params):
"""
Create a new network filter or update an existed network filter xml
"""
filter_name = params.get("filter_name", "testcase")
exist_filter = params.get("exist_filter", "no-mac-spoofing")
filter_chain = params.get("filter_chain")
filter_priority = params.get("filter_priority", "")
filter_uuid = params.get("filter_uuid")
# process filterref_name
filterrefs_list = []
filterrefs_key = []
for i in list(params.keys()):
if 'filterref_name_' in i:
filterrefs_key.append(i)
filterrefs_key.sort()
for i in filterrefs_key:
filterrefs_dict = {}
filterrefs_dict['filter'] = params[i]
filterrefs_list.append(filterrefs_dict)
# prepare rule and protocol attributes
protocol = {}
rule_dict = {}
rule_dict_tmp = {}
RULE_ATTR = ('rule_action', 'rule_direction', 'rule_priority',
'rule_statematch')
PROTOCOL_TYPES = ['mac', 'vlan', 'stp', 'arp', 'rarp', 'ip', 'ipv6',
'tcp', 'udp', 'sctp', 'icmp', 'igmp', 'esp', 'ah',
'udplite', 'all', 'tcp-ipv6', 'udp-ipv6', 'sctp-ipv6',
'icmpv6', 'esp-ipv6', 'ah-ipv6', 'udplite-ipv6',
'all-ipv6']
# rule should end with 'EOL' as separator, multiple rules are supported
rule = params.get("rule")
if rule:
rule_list = rule.split('EOL')
for i in range(len(rule_list)):
if rule_list[i]:
attr = rule_list[i].split()
for j in range(len(attr)):
attr_list = attr[j].split('=')
rule_dict_tmp[attr_list[0]] = attr_list[1]
rule_dict[i] = rule_dict_tmp
rule_dict_tmp = {}
# process protocol parameter
for i in list(rule_dict.keys()):
if 'protocol' not in rule_dict[i]:
# Set protocol as string 'None' as parse from cfg is
# string 'None'
protocol[i] = 'None'
else:
protocol[i] = rule_dict[i]['protocol']
rule_dict[i].pop('protocol')
if protocol[i] in PROTOCOL_TYPES:
# replace '-' with '_' in ipv6 types as '-' is not
# supposed to be in class name
if '-' in protocol[i]:
protocol[i] = protocol[i].replace('-', '_')
else:
raise exceptions.TestFail("Given protocol type %s"
" is not in supported list %s"
% (protocol[i], PROTOCOL_TYPES))
try:
new_filter = nwfilter_xml.NwfilterXML()
filterxml = new_filter.new_from_filter_dumpxml(exist_filter)
# Set filter attribute
filterxml.filter_name = filter_name
filterxml.filter_priority = filter_priority
if filter_chain:
filterxml.filter_chain = filter_chain
if filter_uuid:
filterxml.uuid = filter_uuid
filterxml.filterrefs = filterrefs_list
# Set rule attribute
index_total = filterxml.get_rule_index()
rule = filterxml.get_rule(0)
rulexml = rule.backup_rule()
for i in index_total:
filterxml.del_rule()
for i in range(len(list(rule_dict.keys()))):
rulexml.rule_action = rule_dict[i].get('rule_action')
rulexml.rule_direction = rule_dict[i].get('rule_direction')
rulexml.rule_priority = rule_dict[i].get('rule_priority')
rulexml.rule_statematch = rule_dict[i].get('rule_statematch')
for j in RULE_ATTR:
if j in list(rule_dict[i].keys()):
rule_dict[i].pop(j)
# set protocol attribute
if protocol[i] != 'None':
protocolxml = rulexml.get_protocol(protocol[i])
new_one = protocolxml.new_attr(**rule_dict[i])
protocolxml.attrs = new_one
rulexml.xmltreefile = protocolxml.xmltreefile
else:
rulexml.del_protocol()
filterxml.add_rule(rulexml)
# Reset rulexml
rulexml = rule.backup_rule()
filterxml.xmltreefile.write()
logging.info("The network filter xml is:\n%s" % filterxml)
return filterxml
except Exception as detail:
stacktrace.log_exc_info(sys.exc_info())
raise exceptions.TestFail("Fail to create nwfilter XML: %s" % detail)
def create_channel_xml(params, alias=False, address=False):
"""
Create a XML contains channel information.
:param params: the params for Channel slot
:param alias: allow to add 'alias' slot
:param address: allow to add 'address' slot
"""
# Create attributes dict for channel's element
channel_source = {}
channel_target = {}
channel_alias = {}
channel_address = {}
channel_params = {}
channel_type_name = params.get("channel_type_name")
source_mode = params.get("source_mode")
source_path = params.get("source_path")
target_type = params.get("target_type")
target_name = params.get("target_name")
if channel_type_name is None:
raise exceptions.TestFail("channel_type_name not specified.")
# if these params are None, it won't be used.
if source_mode:
channel_source['mode'] = source_mode
if source_path:
channel_source['path'] = source_path
if target_type:
channel_target['type'] = target_type
if target_name:
channel_target['name'] = target_name
channel_params = {'type_name': channel_type_name,
'source': channel_source,
'target': channel_target}
if alias:
if isinstance(alias, str):
channel_alias = alias
else:
channel_alias = target_name
channel_params['alias'] = {'name': channel_alias}
if address:
channel_address = {'type': 'virtio-serial',
'controller': '0',
'bus': '0'}
channel_params['address'] = channel_address
channelxml = channel.Channel.new_from_dict(channel_params)
logging.debug("Channel XML:\n%s", channelxml)
return channelxml
def update_on_crash(vm_name, on_crash):
"""
Update on_crash state of vm
:param vm_name: name of vm
:param on_crash: on crash state, destroy, restart ...
"""
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
vmxml.on_crash = on_crash
vmxml.sync()
def add_panic_device(vm_name, model='isa', addr_type='isa', addr_iobase='0x505'):
"""
Create panic device xml
:param vm_name: name of vm
:param model: panic model
:param addr_type: address type
:param addr_iobase: address iobase
:return: If dev exist, return False, else return True
"""
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
panic_dev = vmxml.xmltreefile.find('devices/panic')
if panic_dev is not None:
logging.info("Panic device already exists")
return False
else:
panic_dev = panic.Panic()
panic_dev.model = model
panic_dev.addr_type = addr_type
panic_dev.addr_iobase = addr_iobase
vmxml.add_device(panic_dev)
vmxml.sync()
return True
def set_domain_state(vm, vm_state):
"""
Set domain state.
:param vm: the vm object
:param vm_state: the given vm state string "shut off", "running"
"paused", "halt" or "pm_suspend"
"""
# reset domain state
if vm.is_alive():
vm.destroy(gracefully=False)
if not vm_state == "shut off":
vm.start()
session = vm.wait_for_login()
if vm_state == "paused":
vm.pause()
elif vm_state == "halt":
try:
session.cmd("halt")
except (aexpect.ShellProcessTerminatedError, aexpect.ShellStatusError):
# The halt command always gets these errors, but execution is OK,
# skip these errors
pass
elif vm_state == "pm_suspend":
# Execute "pm-suspend-hybrid" command directly will get Timeout error,
# so here execute it in background, and wait for 3s manually
if session.cmd_status("which pm-suspend-hybrid"):
raise exceptions.TestSkipError("Cannot execute this test for domain"
" doesn't have pm-suspend-hybrid command!")
session.cmd("pm-suspend-hybrid &")
time.sleep(3)
def create_vsock_xml(model, auto_cid='yes', invalid_cid=False):
"""
Create vsock xml
:param model: device model
:param auto_cid: "yes" or "no"
:param invalid_cid: True or False for cid valid or not
:return: vsock device
"""
vsock_dev = vsock.Vsock()
vsock_dev.model_type = model
if process.run("modprobe vhost_vsock").exit_status != 0:
raise exceptions.TestError("Failed to load vhost_vsock module")
if invalid_cid:
cid = "-1"
else:
cid = random.randint(3, 10)
vsock_dev.cid = {'auto': auto_cid, 'address': cid}
chars = string.ascii_letters + string.digits + '-_'
alias_name = 'ua-' + ''.join(random.choice(chars) for _ in list(range(64)))
vsock_dev.alias = {'name': alias_name}
logging.debug(vsock_dev)
return vsock_dev
def create_rng_xml(dparams):
"""
Modify interface xml options
:param dparams: Rng device paramter dict
"""
rng_model = dparams.get("rng_model", "virtio")
rng_rate = dparams.get("rng_rate")
backend_model = dparams.get("backend_model", "random")
backend_type = dparams.get("backend_type")
backend_dev = dparams.get("backend_dev", "/dev/urandom")
backend_source_list = dparams.get("backend_source",
"").split()
backend_protocol = dparams.get("backend_protocol")
rng_alias = dparams.get("rng_alias")
rng_xml = rng.Rng()
rng_xml.rng_model = rng_model
if rng_rate:
rng_xml.rate = ast.literal_eval(rng_rate)
backend = rng.Rng.Backend()
backend.backend_model = backend_model
if backend_type:
backend.backend_type = backend_type
if backend_dev:
backend.backend_dev = backend_dev
if backend_source_list:
source_list = [ast.literal_eval(source) for source in
backend_source_list]
backend.source = source_list
if backend_protocol:
backend.backend_protocol = backend_protocol
rng_xml.backend = backend
if rng_alias:
rng_xml.alias = dict(name=rng_alias)
logging.debug("Rng xml: %s", rng_xml)
return rng_xml
def update_memballoon_xml(vmxml, membal_dict):
"""
Add/update memballoon attr
:param vmxml: VMXML object
:param membal_dict: memballoon parameter dict
"""
membal_model = membal_dict.get("membal_model")
membal_stats_period = membal_dict.get("membal_stats_period")
vmxml.del_device('memballoon', by_tag=True)
memballoon_xml = vmxml.get_device_class('memballoon')()
if membal_model:
memballoon_xml.model = membal_model
if membal_stats_period:
memballoon_xml.stats_period = membal_stats_period
vmxml.add_device(memballoon_xml)
logging.info(memballoon_xml)
vmxml.sync()
def create_tpm_dev(params):
"""
Create tpm device instance
:param params: tpm parameter dict
:return: tpm device
"""
tpm_model = params.get("tpm_model", 'tpm-crb')
backend_type = params.get("backend_type")
backend_version = params.get("backend_version")
encryption_secret = params.get("encryption_secret")
device_path = params.get("device_path")
tpm_dev = tpm.Tpm()
tpm_dev.tpm_model = tpm_model
if backend_type:
tpm_backend = tpm_dev.Backend()
tpm_backend.backend_type = backend_type
if backend_version:
tpm_backend.backend_version = backend_version
if encryption_secret:
tpm_backend.encryption_secret = encryption_secret
if device_path:
tpm_backend.device_path = device_path
tpm_dev.backend = tpm_backend
return tpm_dev
def get_vm_device(vmxml, dev_tag, index=0):
"""
Get current vm device according to device tag
:param vmxml: domain VMXML instance
:param dev_tag: device tag
:param index: device index
:return: device object
"""
xml_devices = vmxml.devices
dev_index = xml_devices.index(xml_devices.by_device_tag(dev_tag)[index])
dev_obj = xml_devices[dev_index]
return (dev_obj, xml_devices)
def add_vm_device(vmxml, new_device):
"""
Add device in vmxml
:param vmxml: domain VMXML instance
:param new_device: device instance
"""
vmxml.add_device(new_device)
vmxml.xmltreefile.write()
vmxml.sync()
def set_guest_agent(vm):
"""
Set domain xml with guest agent channel and install guest agent rpm
in domain.
:param vm: the vm object
"""
logging.warning("This function is going to be deprecated. "
"Please use vm.prepare_guest_agent() instead.")
# reset domain state
if vm.is_alive():
vm.destroy(gracefully=False)
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
logging.debug("Attempting to set guest agent channel")
vmxml.set_agent_channel()
vmxml.sync()
vm.start()
session = vm.wait_for_login()
# Check if qemu-ga already started automatically
cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
stat_install = session.cmd_status(cmd, 300)
if stat_install != 0:
raise exceptions.TestFail("Fail to install qemu-guest-agent, make "
"sure that you have usable repo in guest")
# Check if qemu-ga already started
stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
if stat_ps != 0:
session.cmd("qemu-ga -d")
# Check if the qemu-ga really started
stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
if stat_ps != 0:
raise exceptions.TestFail("Fail to run qemu-ga in guest")
def set_vm_disk(vm, params, tmp_dir=None, test=None):
"""
Replace vm first disk with given type in domain xml, including file type
(local, nfs), network type(gluster, iscsi), block type(use connected iscsi
block disk).
For all types, all following params are common and need be specified:
disk_device: default to 'disk'
disk_type: 'block' or 'network'
disk_target: default to 'vda'
disk_target_bus: default to 'virtio'
disk_format: default to 'qcow2'
disk_src_protocol: 'iscsi', 'gluster' or 'netfs'
For 'gluster' network type, following params are gluster only and need be
specified:
vol_name: string
pool_name: default to 'gluster-pool'
transport: 'tcp', 'rdma' or '', default to ''
For 'iscsi' network type, following params need be specified:
image_size: default to "10G", 10G is raw size of jeos disk
disk_src_host: default to "127.0.0.1"
disk_src_port: default to "3260"
For 'netfs' network type, following params need be specified:
mnt_path_name: the mount dir name, default to "nfs-mount"
export_options: nfs mount options, default to "rw,no_root_squash,fsid=0"
For 'block' type, using connected iscsi block disk, following params need
be specified:
image_size: default to "10G", 10G is raw size of jeos disk
:param vm: the vm object
:param tmp_dir: string, dir path
:param params: dict, dict include setup vm disk xml configurations
"""
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
logging.debug("original xml is: %s", vmxml.xmltreefile)
disk_device = params.get("disk_device", "disk")
disk_snapshot_attr = params.get("disk_snapshot_attr")
disk_type = params.get("disk_type", "file")
disk_target = params.get("disk_target", 'vda')
disk_target_bus = params.get("disk_target_bus", "virtio")
disk_model = params.get("disk_model")
disk_src_protocol = params.get("disk_source_protocol")
disk_src_name = params.get("disk_source_name")
disk_src_host = params.get("disk_source_host", "127.0.0.1")
disk_src_port = params.get("disk_source_port", "3260")
disk_src_config = params.get("disk_source_config")
disk_snap_name = params.get("disk_snap_name")
emu_image = params.get("emulated_image", "emulated-iscsi")
image_size = params.get("image_size", "10G")
disk_format = params.get("disk_format", "qcow2")
driver_iothread = params.get("driver_iothread", "")
mnt_path_name = params.get("mnt_path_name", "nfs-mount")
exp_opt = params.get("export_options", "rw,no_root_squash,fsid=0")
exp_dir = params.get("export_dir", "nfs-export")
first_disk = vm.get_first_disk_devices()
logging.debug("first disk is %s", first_disk)
blk_source = first_disk['source']
blk_source = params.get("blk_source_name", blk_source)
disk_xml = vmxml.devices.by_device_tag('disk')[0]
src_disk_format = disk_xml.xmltreefile.find('driver').get('type')
sec_model = params.get('sec_model')
relabel = params.get('relabel')
sec_label = params.get('sec_label')
image_owner_group = params.get('image_owner_group')
pool_name = params.get("pool_name", "set-vm-disk-pool")
disk_src_mode = params.get('disk_src_mode', 'host')
auth_user = params.get("auth_user")
secret_type = params.get("secret_type")
secret_usage = params.get("secret_usage")
secret_uuid = params.get("secret_uuid")
enable_cache = "yes" == params.get("enable_cache", "yes")
driver_cache = params.get("driver_cache", "none")
create_controller = "yes" == params.get("create_controller")
del_disks = "yes" == params.get("cleanup_disks", 'no')
disk_params = {'device_type': disk_device,
'disk_snapshot_attr': disk_snapshot_attr,
'type_name': disk_type,
'target_dev': disk_target,
'target_bus': disk_target_bus,
'driver_type': disk_format,
'driver_iothread': driver_iothread,
'sec_model': sec_model,
'relabel': relabel,
'sec_label': sec_label,
'auth_user': auth_user,
'secret_type': secret_type,
'secret_uuid': secret_uuid,
'secret_usage': secret_usage}
if enable_cache:
disk_params['driver_cache'] = driver_cache
if disk_model:
disk_params['model'] = disk_model
if not tmp_dir:
tmp_dir = data_dir.get_tmp_dir()
# gluster only params
vol_name = params.get("vol_name")
transport = params.get("transport", "")
brick_path = os.path.join(tmp_dir, pool_name)
image_convert = "yes" == params.get("image_convert", 'yes')
if vm.is_alive():
vm.destroy(gracefully=False)
# Replace domain disk with iscsi, gluster, block or netfs disk
if disk_src_protocol == 'iscsi':
if disk_type == 'block':
is_login = True
elif disk_type == 'network' or disk_type == 'volume':
is_login = False
else:
raise exceptions.TestFail("Disk type '%s' not expected, only disk "
"type 'block', 'network' or 'volume' work "
"with 'iscsi'" % disk_type)
if disk_type == 'volume':
pvt = PoolVolumeTest(test, params)
pvt.pre_pool(pool_name, 'iscsi', "/dev/disk/by-path",
emulated_image=emu_image,
image_size=image_size)
# Get volume name
vols = get_vol_list(pool_name)
vol_name = list(vols.keys())[0]
emulated_path = vols[vol_name]
else:
# Setup iscsi target
if is_login:
iscsi_target = setup_or_cleanup_iscsi(
is_setup=True, is_login=is_login,
image_size=image_size, emulated_image=emu_image)
else:
iscsi_target, lun_num = setup_or_cleanup_iscsi(
is_setup=True, is_login=is_login,
image_size=image_size, emulated_image=emu_image)
emulated_path = os.path.join(tmp_dir, emu_image)
# Copy first disk to emulated backing store path
cmd = "qemu-img convert -f %s -O %s %s %s" % (src_disk_format,
disk_format,
blk_source,
emulated_path)
process.run(cmd, ignore_status=False)
if disk_type == 'block':
disk_params_src = {'source_file': iscsi_target}
elif disk_type == "volume":
disk_params_src = {'source_pool': pool_name,
'source_volume': vol_name,
'source_mode': disk_src_mode}
else:
disk_params_src = {'source_protocol': disk_src_protocol,
'source_name': iscsi_target + "/" + str(lun_num),
'source_host_name': disk_src_host,
'source_host_port': disk_src_port}
elif disk_src_protocol == 'gluster':
# Setup gluster.
host_ip = gluster.setup_or_cleanup_gluster(True, brick_path=brick_path,
**params)
logging.debug("host ip: %s " % host_ip)
dist_img = "gluster.%s" % disk_format
if image_convert:
# Convert first disk to gluster disk path
disk_cmd = ("qemu-img convert -f %s -O %s %s /mnt/%s" %
(src_disk_format, disk_format, blk_source, dist_img))
else:
# create another disk without convert
disk_cmd = "qemu-img create -f %s /mnt/%s 10M" % (src_disk_format,
dist_img)
# Mount the gluster disk and create the image.
process.run("mount -t glusterfs %s:%s /mnt; %s; umount /mnt"
% (host_ip, vol_name, disk_cmd), shell=True)
disk_params_src = {'source_protocol': disk_src_protocol,
'source_name': "%s/%s" % (vol_name, dist_img),
'source_host_name': host_ip,
'source_host_port': "24007"}
if transport:
disk_params_src.update({"transport": transport})
elif disk_src_protocol == 'netfs':
# For testing multiple VMs in a test this param can used
# to setup/cleanup configurations
src_file_list = params.get("source_file_list", [])
# Setup nfs
res = setup_or_cleanup_nfs(True, mnt_path_name,
is_mount=True,
export_options=exp_opt,
export_dir=exp_dir)
exp_path = res["export_dir"]
mnt_path = res["mount_dir"]
params["selinux_status_bak"] = res["selinux_status_bak"]
dist_img = params.get("source_dist_img", "nfs-img")
# Convert first disk to gluster disk path
disk_cmd = ("qemu-img convert -f %s -O %s %s %s/%s" %
(src_disk_format, disk_format,
blk_source, exp_path, dist_img))
process.run(disk_cmd, ignore_status=False)
# Change image ownership
if image_owner_group:
update_owner_cmd = ("chown %s %s/%s" %
(image_owner_group, exp_path, dist_img))
process.run(update_owner_cmd, ignore_status=False)
src_file_path = "%s/%s" % (mnt_path, dist_img)
if params.get("change_file_uid") and params.get("change_file_gid"):
logging.debug("Changing the ownership of {} to {}.{}."
.format(src_file_path, params["change_file_uid"],
params["change_file_gid"]))
os.chown(src_file_path, params["change_file_uid"],
params["change_file_gid"])
res = os.stat(src_file_path)
logging.debug("The ownership of {} is updated, uid: {}, gid: {}."
.format(src_file_path, res.st_uid, res.st_gid))
disk_params_src = {'source_file': src_file_path}
params["source_file"] = src_file_path
src_file_list.append(src_file_path)
params["source_file_list"] = src_file_list
elif disk_src_protocol == 'rbd':
mon_host = params.get("mon_host")
if image_convert:
disk_cmd = ("qemu-img convert -f %s -O %s %s rbd:%s:mon_host=%s"
% (src_disk_format, disk_format, blk_source,
disk_src_name, mon_host))
process.run(disk_cmd, ignore_status=False)
disk_params_src = {'source_protocol': disk_src_protocol,
'source_name': disk_src_name,
'source_host_name': disk_src_host,
'source_host_port': disk_src_port,
'source_config_file': disk_src_config}
if disk_snap_name:
disk_params_src.update({'source_snap_name': disk_snap_name})
disk_params.update({'readonly': params.get("read_only", "no")})
else:
"""
If disk_src_name is given, replace current source file
Otherwise, use current source file with update params.
"""
if disk_src_name:
blk_source = disk_src_name
disk_params_src = {'source_file': blk_source}
# Delete disk elements
disk_deleted = False
disks = vmxml.get_devices(device_type="disk")
for disk_ in disks:
if disk_.target['dev'] == disk_target:
vmxml.del_device(disk_)
disk_deleted = True
continue
if del_disks:
vmxml.del_device(disk_)
disk_deleted = True
if disk_deleted:
vmxml.sync()
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
#Create controller
if create_controller:
contr_type = params.get("controller_type", 'scsi')
contr_model = params.get("controller_model", "virtio-scsi")
contr_index = params.get("controller_index", "0")
contr_dict = {'controller_type': contr_type,
'controller_model': contr_model,
'controller_index': contr_index}
new_added = create_controller_xml(contr_dict)
add_controller(vm.name, new_added)
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
# New disk xml
new_disk = disk.Disk(type_name=disk_type)
new_disk.new_disk_source(attrs={'file': blk_source})
disk_params.update(disk_params_src)
disk_xml = create_disk_xml(disk_params)
new_disk.xml = disk_xml
# Add new disk xml and redefine vm
vmxml.add_device(new_disk)
# Update disk address
if create_controller:
vmxml.sync()
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
contr_type_from_address = vmxml.get_disk_attr(vm.name, disk_target,
"address", "type")
if contr_type_from_address == 'pci':
address_params = {'bus': "%0#4x" % int(contr_index)}
elif contr_type_from_address == 'drive':
address_params = {'controller': contr_index}
elif contr_type_from_address == 'usb':
address_params = {'bus': contr_index}
else:
# TODO: Other controller types
pass
if not set_disk_attr(vmxml, disk_target, 'address', address_params):
raise exceptions.TestFail("Fail to update disk address.")
# Set domain options
dom_iothreads = params.get("dom_iothreads")
if dom_iothreads:
vmxml.iothreads = int(dom_iothreads)
logging.debug("The vm xml now is: %s" % vmxml.xmltreefile)
vmxml.sync()
vm.start()
def attach_additional_device(vm_name, targetdev, disk_path, params, config=True):
"""
Create a disk with disksize, then attach it to given vm.
:param vm_name: Libvirt VM name.
:param disk_path: path of attached disk
:param targetdev: target of disk device
:param params: dict include necessary configurations of device
"""
logging.info("Attaching disk...")
# Update params for source file
params['source_file'] = disk_path
params['target_dev'] = targetdev
# Create a file of device
xmlfile = create_disk_xml(params)
# To confirm attached device do not exist.
if config:
extra = "--config"
else:
extra = ""
virsh.detach_disk(vm_name, targetdev, extra=extra)
return virsh.attach_device(domain_opt=vm_name, file_opt=xmlfile,
flagstr=extra, debug=True)
def device_exists(vm, target_dev):
"""
Check if given target device exists on vm.
"""
targets = list(vm.get_blk_devices().keys())
if target_dev in targets:
return True
return False
def create_local_disk(disk_type, path=None,
size="10", disk_format="raw",
vgname=None, lvname=None, extra=''):
if disk_type != "lvm" and path is None:
raise exceptions.TestError("Path is needed for creating local disk")
if path:
process.run("mkdir -p %s" % os.path.dirname(path))
try:
size = str(float(size)) + "G"
except ValueError:
pass
cmd = ""
if disk_type == "file":
cmd = "qemu-img create -f %s %s %s %s" % (disk_format, extra, path, size)
elif disk_type == "floppy":
cmd = "dd if=/dev/zero of=%s count=1024 bs=1024" % path
elif disk_type == "iso":
cmd = "mkisofs -o %s /root/*.*" % path
elif disk_type == "lvm":
if vgname is None or lvname is None:
raise exceptions.TestError("Both VG name and LV name are needed")
lv_utils.lv_create(vgname, lvname, size)
path = "/dev/%s/%s" % (vgname, lvname)
else:
raise exceptions.TestError("Unknown disk type %s" % disk_type)
if cmd:
process.run(cmd, ignore_status=True, shell=True)
return path
def delete_local_disk(disk_type, path=None,
vgname=None, lvname=None):
if disk_type in ["file", "floppy", "iso"]:
if path is None:
raise exceptions.TestError(
"Path is needed for deleting local disk")
else:
cmd = "rm -f %s" % path
process.run(cmd, ignore_status=True)
elif disk_type == "lvm":
if vgname is None or lvname is None:
raise exceptions.TestError("Both VG name and LV name needed")
lv_utils.lv_remove(vgname, lvname)
else:
raise exceptions.TestError("Unknown disk type %s" % disk_type)
def create_scsi_disk(scsi_option, scsi_size="2048"):
"""
Get the scsi device created by scsi_debug kernel module
:param scsi_option. The scsi_debug kernel module options.
:return: scsi device if it is created successfully.
"""
try:
utils_path.find_command("lsscsi")
except utils_path.CmdNotFoundError:
raise exceptions.TestSkipError("Missing command 'lsscsi'.")
try:
# Load scsi_debug kernel module.
# Unload it first if it's already loaded.
if linux_modules.module_is_loaded("scsi_debug"):
linux_modules.unload_module("scsi_debug")
linux_modules.load_module("scsi_debug dev_size_mb=%s %s" %
(scsi_size, scsi_option))
# Get the scsi device name
result = process.run("lsscsi|grep scsi_debug|awk '{print $6}'",
shell=True)
scsi_disk = result.stdout_text.strip()
logging.info("scsi disk: %s" % scsi_disk)
return scsi_disk
except Exception as e:
logging.error(str(e))
return None
def delete_scsi_disk():
"""
Delete scsi device by removing scsi_debug kernel module.
"""
scsi_dbg_check = process.run("lsscsi|grep scsi_debug", shell=True)
if scsi_dbg_check.exit_status == 0:
scsi_addr_pattern = '[0-9]+:[0-9]+:[0-9]+:[0-9]+'
for addr in re.findall(scsi_addr_pattern, scsi_dbg_check.stdout_text):
process.run("echo 1>/sys/class/scsi_device/{}/device/delete".format(addr),
shell=True)
if linux_modules.module_is_loaded("scsi_debug"):
linux_modules.unload_module("scsi_debug")
def set_controller_multifunction(vm_name, controller_type='scsi'):
"""
Set multifunction on for controller device and expand to all function.
"""
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
exist_controllers = vmxml.get_devices("controller")
# Used to contain controllers in format:
# domain:bus:slot:func -> controller object
expanded_controllers = {}
# The index of controller
index = 0
for e_controller in exist_controllers:
if e_controller.type != controller_type:
continue
# Set multifunction on
address_attrs = e_controller.address.attrs
address_attrs['multifunction'] = "on"
domain = address_attrs['domain']
bus = address_attrs['bus']
slot = address_attrs['slot']
all_funcs = ["0x0", "0x1", "0x2", "0x3", "0x4", "0x5", "0x6"]
for func in all_funcs:
key = "%s:%s:%s:%s" % (domain, bus, slot, func)
address_attrs['function'] = func
# Create a new controller instance
new_controller = controller.Controller(controller_type)
new_controller.xml = str(xml_utils.XMLTreeFile(e_controller.xml))
new_controller.index = index
new_controller.address = new_controller.new_controller_address(
attrs=address_attrs)
# Expand controller to all functions with multifunction
if key not in list(expanded_controllers.keys()):
expanded_controllers[key] = new_controller
index += 1
logging.debug("Expanded controllers: %s", list(expanded_controllers.values()))
vmxml.del_controller(controller_type)
vmxml.set_controller(list(expanded_controllers.values()))
vmxml.sync()
def attach_disks(vm, path, vgname, params):
"""
Attach multiple disks.According parameter disk_type in params,
it will create lvm or file type disks.
:param path: file type disk's path
:param vgname: lvm type disk's volume group name
"""
# Additional disk on vm
disks_count = int(params.get("added_disks_count", 1)) - 1
multifunction_on = "yes" == params.get("multifunction_on", "no")
disk_size = params.get("added_disk_size", "0.1")
disk_type = params.get("added_disk_type", "file")
disk_target = params.get("added_disk_target", "virtio")
disk_format = params.get("added_disk_format", "raw")
# Whether attaching device with --config
attach_config = "yes" == params.get("attach_disk_config", "yes")
def generate_disks_index(count, target="virtio"):
# Created disks' index
target_list = []
# Used to flag progression
index = 0
# A list to maintain prefix for generating device
# ['a','b','c'] means prefix abc
prefix_list = []
while count > 0:
# Out of range for current prefix_list
if (index // 26) > 0:
# Update prefix_list to expand disks, such as [] -> ['a'],
# ['z'] -> ['a', 'a'], ['z', 'z'] -> ['a', 'a', 'a']
prefix_index = len(prefix_list)
if prefix_index == 0:
prefix_list.append('a')
# Append a new prefix to list, then update pre-'z' in list
# to 'a' to keep the progression 1
while prefix_index > 0:
prefix_index -= 1
prefix_cur = prefix_list[prefix_index]
if prefix_cur == 'z':
prefix_list[prefix_index] = 'a'
# All prefix in prefix_list are 'z',
# it's time to expand it.
if prefix_index == 0:
prefix_list.append('a')
else:
# For whole prefix_list, progression is 1
prefix_list[prefix_index] = chr(ord(prefix_cur) + 1)
break
# Reset for another iteration
index = 0
prefix = "".join(prefix_list)
suffix_index = index % 26
suffix = chr(ord('a') + suffix_index)
index += 1
count -= 1
# Generate device target according to driver type
if target == "virtio":
target_dev = "vd%s" % (prefix + suffix)
elif target == "scsi":
target_dev = "sd%s" % (prefix + suffix)
elif target == "ide":
target_dev = "hd%s" % (prefix + suffix)
target_list.append(target_dev)
return target_list
target_list = generate_disks_index(disks_count, disk_target)
# A dict include disks information: source file and size
added_disks = {}
for target_dev in target_list:
# Do not attach if it does already exist
if device_exists(vm, target_dev):
continue
# Prepare controller for special disks like virtio-scsi
# Open multifunction to add more controller for disks(150 or more)
if multifunction_on:
set_controller_multifunction(vm.name, disk_target)
disk_params = {}
disk_params['type_name'] = disk_type if not disk_type == 'lvm' else 'block'
disk_params['target_dev'] = target_dev
disk_params['target_bus'] = disk_target
disk_params['device_type'] = params.get("device_type", "disk")
device_name = "%s_%s" % (target_dev, vm.name)
disk_path = os.path.join(os.path.dirname(path), device_name)
disk_path = create_local_disk(disk_type, disk_path,
disk_size, disk_format,
vgname, device_name)
added_disks[disk_path] = disk_size
result = attach_additional_device(vm.name, target_dev, disk_path,
disk_params, attach_config)
if result.exit_status:
raise exceptions.TestFail("Attach device %s failed."
% target_dev)
logging.debug("New VM XML:\n%s", vm.get_xml())
return added_disks
def define_new_vm(vm_name, new_name):
"""
Just define a new vm from given name
"""
try:
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
vmxml.vm_name = new_name
del vmxml.uuid
vmxml.define()
return True
except xcepts.LibvirtXMLError as detail:
logging.error(detail)
return False
def remotely_control_libvirtd(server_ip, server_user, server_pwd,
action='restart', status_error='no'):
"""
Remotely restart libvirt service
"""
session = None
try:
session = remote.wait_for_login('ssh', server_ip, '22',
server_user, server_pwd,
r"[\#\$]\s*$")
logging.info("%s libvirt daemon\n", action)
service_libvirtd_control(action, session)
session.close()
except (remote.LoginError, aexpect.ShellError, process.CmdError) as detail:
if session:
session.close()
if status_error == "no":
raise exceptions.TestFail("Failed to %s libvirtd service on "
"server: %s\n", action, detail)
else:
logging.info("It is an expect %s", detail)
def connect_libvirtd(uri, read_only="", virsh_cmd="list", auth_user=None,
auth_pwd=None, vm_name="", status_error="no",
extra="", log_level='LIBVIRT_DEBUG=3', su_user="",
patterns_virsh_cmd=r".*Id\s*Name\s*State\s*.*",
patterns_extra_dict=None):
"""
Connect to libvirt daemon
:param uri: the uri to connect the libvirtd
:param read_only: the read only option for virsh
:param virsh_cmd: the virsh command for virsh
:param auth_user: the user used to connect
:param auth_pwd: the password for the user
:param vm_name: the guest name to operate
:param status_error: if expect error status
:param extra: extra parameters
:param log_level: logging level
:param su_user: the user to su
:param patterns_virsh_cmd: the pattern to match in virsh command output
:param patterns_extra_dict: a mapping with extra patterns and responses
:return: True if success, otherwise False
"""
patterns_yes_no = r".*[Yy]es.*[Nn]o.*"
patterns_auth_name_comm = r".*username:.*"
patterns_auth_name_xen = r".*name.*root.*:.*"
patterns_auth_pwd = r".*[Pp]assword.*"
command = "%s %s virsh %s -c %s %s %s" % (extra, log_level, read_only,
uri, virsh_cmd, vm_name)
# allow specific user to run virsh command
if su_user != "":
command = "su %s -c '%s'" % (su_user, command)
logging.info("Execute %s", command)
# setup shell session
session = aexpect.ShellSession(command, echo=True)
try:
# requires access authentication
match_list = [patterns_yes_no, patterns_auth_name_comm,
patterns_auth_name_xen, patterns_auth_pwd,
patterns_virsh_cmd]
if patterns_extra_dict:
match_list = match_list + list(patterns_extra_dict.keys())
patterns_list_len = len(match_list)
while True:
match, text = session.read_until_any_line_matches(match_list,
timeout=30,
internal_timeout=1)
if match == -patterns_list_len:
logging.info("Matched 'yes/no', details: <%s>", text)
session.sendline("yes")
continue
elif match == -patterns_list_len + 1 or match == -patterns_list_len + 2:
logging.info("Matched 'username', details: <%s>", text)
session.sendline(auth_user)
continue
elif match == -patterns_list_len + 3:
logging.info("Matched 'password', details: <%s>", text)
session.sendline(auth_pwd)
continue
elif match == -patterns_list_len + 4:
logging.info("Expected output of virsh command: <%s>", text)
break
if (patterns_list_len > 5):
extra_len = len(patterns_extra_dict)
index_in_extra_dict = match + extra_len
key = list(patterns_extra_dict.keys())[index_in_extra_dict]
value = patterns_extra_dict.get(key, "")
logging.info("Matched '%s', details:<%s>", key, text)
session.sendline(value)
continue
else:
logging.error("The real prompt text: <%s>", text)
break
log = session.get_output()
session.close()
return (True, log)
except (aexpect.ShellError, aexpect.ExpectError) as details:
log = session.get_output()
session.close()
logging.error("Failed to connect libvirtd: %s\n%s", details, log)
return (False, log)
def get_all_vol_paths():
"""
Get all volumes' path in host
"""
vol_path = []
sp = libvirt_storage.StoragePool()
for pool_name in list(sp.list_pools().keys()):
if sp.list_pools()[pool_name]['State'] != "active":
logging.warning(
"Inactive pool '%s' cannot be processed" % pool_name)
continue
pv = libvirt_storage.PoolVolume(pool_name)
for path in list(pv.list_volumes().values()):
vol_path.append(path)
return set(vol_path)
def do_migration(vm_name, uri, extra, auth_pwd, auth_user="root",
options="--verbose", virsh_patterns=r".*100\s%.*",
su_user="", timeout=30, extra_opt=""):
"""
Migrate VM to target host.
"""
patterns_yes_no = r".*[Yy]es.*[Nn]o.*"
patterns_auth_name = r".*name:.*"
patterns_auth_pwd = r".*[Pp]assword.*"
command = "%s virsh %s migrate %s %s %s" % (extra, extra_opt,
vm_name, options, uri)
# allow specific user to run virsh command
if su_user != "":
command = "su %s -c '%s'" % (su_user, command)
logging.info("Execute %s", command)
# setup shell session
session = aexpect.ShellSession(command, echo=True)
try:
# requires access authentication
match_list = [patterns_yes_no, patterns_auth_name,
patterns_auth_pwd, virsh_patterns]
while True:
match, text = session.read_until_any_line_matches(match_list,
timeout=timeout,
internal_timeout=1)
if match == -4:
logging.info("Matched 'yes/no', details: <%s>", text)
session.sendline("yes")
elif match == -3:
logging.info("Matched 'username', details: <%s>", text)
session.sendline(auth_user)
elif match == -2:
logging.info("Matched 'password', details: <%s>", text)
session.sendline(auth_pwd)
elif match == -1:
logging.info("Expected output of virsh migrate: <%s>", text)
break
else:
logging.error("The real prompt text: <%s>", text)
break
log = session.get_output()
session.close()
return (True, log)
except (aexpect.ShellError, aexpect.ExpectError) as details:
log = session.get_output()
session.close()
logging.error("Failed to migrate %s: %s\n%s", vm_name, details, log)
return (False, log)
def update_vm_disk_driver_cache(vm_name, driver_cache="none", disk_index=0):
"""
Update disk driver cache of the VM
:param vm_name: vm name
:param driver_cache: new vm disk driver cache mode, default to none if not provided
:param disk_index: vm disk index to be updated, the index of first disk is 0.
"""
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
try:
# Get the disk to be updated
devices = vmxml.devices
device_index = devices.index(devices.by_device_tag('disk')[disk_index])
disk = devices[device_index]
# Update disk driver cache mode
driver_dict = disk.driver
driver_dict['cache'] = driver_cache
disk.driver = driver_dict
logging.debug("The new vm disk driver cache is %s", disk.driver['cache'])
vmxml.devices = devices
# SYNC VM XML change
logging.debug("The new VM XML:\n%s", vmxml)
vmxml.sync()
return True
except Exception as e:
logging.error("Can't update disk driver cache!! %s", e)
return False
def update_vm_disk_source(vm_name, disk_source_path,
disk_image_name="",
source_type="file"):
"""
Update disk source path of the VM
:param source_type: it may be 'dev' or 'file' type, which is default
"""
if not os.path.isdir(disk_source_path):
logging.error("Require disk source path!!")
return False
# Prepare to update VM first disk source file
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
devices = vmxml.devices
disk_index = devices.index(devices.by_device_tag('disk')[0])
disks = devices[disk_index]
# Generate a disk image name if it doesn't exist
if not disk_image_name:
disk_source = disks.source.get_attrs().get(source_type)
logging.debug("The disk source file of the VM: %s", disk_source)
disk_image_name = os.path.basename(disk_source)
new_disk_source = os.path.join(disk_source_path, disk_image_name)
logging.debug("The new disk source file of the VM: %s", new_disk_source)
# Update VM disk source file
try:
disks.source = disks.new_disk_source(**{'attrs': {'%s' % source_type:
"%s" % new_disk_source}})
# SYNC VM XML change
vmxml.devices = devices
logging.debug("The new VM XML:\n%s", vmxml.xmltreefile)
vmxml.sync()
return True
except Exception as e:
logging.error("Can't update disk source!! %s", e)
return False
def exec_virsh_edit(source, edit_cmd, connect_uri="qemu:///system"):
"""
Execute edit command.
:param source : virsh edit's option.
:param edit_cmd: Edit command list to execute.
:return: True if edit is successful, False if edit is failure.
"""
logging.info("Trying to edit xml with cmd %s", edit_cmd)
session = aexpect.ShellSession("sudo -s")
try:
session.sendline("virsh -c %s edit %s" % (connect_uri, source))
for cmd in edit_cmd:
session.sendline(cmd)
session.send('\x1b')
session.send('ZZ')
remote.handle_prompts(session, None, None, r"[\#\$]\s*$", debug=True)
session.close()
return True
except Exception as e:
session.close()
logging.error("Error occurred: %s", e)
return False
def new_disk_vol_name(pool_name):
"""
According to BZ#1138523, the new volume name must be the next
created partition(sdb1, etc.), so we need to inspect the original
partitions of the disk then count the new partition number.
:param pool_name: Disk pool name
:return: New volume name or none
"""
poolxml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
if poolxml.get_type(pool_name) != "disk":
logging.error("This is not a disk pool")
return None
disk = poolxml.get_source().device_path[5:]
part_num = len(list(filter(lambda s: s.startswith(disk),
utils_misc.utils_disk.get_parts_list())))
return disk + str(part_num)
def update_polkit_rule(params, pattern, new_value):
"""
This function help to update the rule during testing.
:param params: Test run params
:param pattern: Regex pattern for updating
:param new_value: New value for updating
"""
polkit = test_setup.LibvirtPolkitConfig(params)
polkit_rules_path = polkit.polkit_rules_path
try:
polkit_f = open(polkit_rules_path, 'r+')
rule = polkit_f.read()
new_rule = re.sub(pattern, new_value, rule)
polkit_f.seek(0)
polkit_f.truncate()
polkit_f.write(new_rule)
polkit_f.close()
logging.debug("New polkit config rule is:\n%s", new_rule)
polkit.polkitd.restart()
except IOError as e:
logging.error(e)
def get_vol_list(pool_name, vol_check=True, timeout=5):
"""
This is a wrapper to get all volumes of a pool, especially for
iscsi type pool as the volume may not appear immediately after
iscsi target login.
:param pool_name: Libvirt pool name
:param vol_check: Check if volume and volume path exist
:param timeout: Timeout in seconds.
:return: A dict include volumes' name(key) and path(value).
"""
poolvol = libvirt_storage.PoolVolume(pool_name=pool_name)
vols = utils_misc.wait_for(poolvol.list_volumes, timeout,
text='Waiting for volume show up')
if not vol_check:
return vols
# Check volume name
if not vols:
raise exceptions.TestError("No volume in pool %s" % pool_name)
# Check volume
for vol_path in six.itervalues(vols):
if not utils_misc.wait_for(lambda: os.path.exists(vol_path), timeout,
text='Waiting for %s show up' % vol_path):
raise exceptions.TestError("Volume path %s not exist" % vol_path)
return vols
def get_iothreadsinfo(vm_name, options=None):
"""
Parse domain iothreadinfo.
:param vm_name: Domain name
:return: The dict of domain iothreads
::
# virsh iothreadinfo vm2
IOThread ID CPU Affinity
---------------------------------------------------
2 3
1 0-4
4 0-7
3 0-7
The function return a dict like:
::
{'2': '3', '1': '0-4', '4': '0-7', '3': '0-7'}
"""
info_dict = {}
ret = virsh.iothreadinfo(vm_name, options,
debug=True, ignore_status=True)
if ret.exit_status:
logging.warning(ret.stderr_text.strip())
return info_dict
info_list = re.findall(r"(\d+) +(\S+)", ret.stdout_text, re.M)
for info in info_list:
info_dict[info[0]] = info[1]
return info_dict
def virsh_cmd_has_option(cmd, option, raise_skip=True):
"""
Check whether virsh command support given option.
:param cmd: Virsh command name
:param option: Virsh command option
:raise_skip: Whether raise exception when option not find
:return: True/False or raise TestSkipError
"""
found = False
if virsh.has_command_help_match(cmd, option):
found = True
msg = "command '%s' has option '%s': %s" % (cmd, option, str(found))
if not found and raise_skip:
raise exceptions.TestSkipError(msg)
else:
logging.debug(msg)
return found
def create_secret(params, remote_args=None):
"""
Create a secret with 'virsh secret-define'
:param params: Test run params
:param remote_args: Parameters for remote host
:return: UUID of the secret
"""
sec_usage_type = params.get("sec_usage", "volume")
sec_desc = params.get("sec_desc", "secret_description")
sec_ephemeral = params.get("sec_ephemeral", "no") == "yes"
sec_private = params.get("sec_private", "no") == "yes"
sec_uuid = params.get("sec_uuid", "")
sec_volume = params.get("sec_volume", "/path/to/volume")
sec_name = params.get("sec_name", "secret_name")
sec_target = params.get("sec_target", "secret_target")
supporting_usage_types = ['volume', 'ceph', 'iscsi', 'tls', 'vtpm']
if sec_usage_type not in supporting_usage_types:
raise exceptions.TestError("Supporting secret usage types are: %s" %
supporting_usage_types)
# prepare secret xml
sec_xml = secret_xml.SecretXML("no", "yes")
# set common attributes
sec_xml.description = sec_desc
sec_xml.usage = sec_usage_type
if sec_ephemeral:
sec_xml.secret_ephmeral = "yes"
if sec_private:
sec_xml.secret_private = "yes"
if sec_uuid:
sec_xml.uuid = sec_uuid
sec_xml.usage = sec_usage_type
# set specific attributes for different usage type
if sec_usage_type in ['volume']:
sec_xml.volume = sec_volume
if sec_usage_type in ['ceph', 'tls', 'vtpm']:
sec_xml.usage_name = sec_name
if sec_usage_type in ['iscsi']:
sec_xml.target = sec_target
sec_xml.xmltreefile.write()
logging.debug("The secret xml is: %s" % sec_xml)
# define the secret and get its uuid
if remote_args:
server_ip = remote_args.get("remote_ip", "")
server_user = remote_args.get("remote_user", "")
server_pwd = remote_args.get("remote_pwd", "")
if not all([server_ip, server_user, server_pwd]):
raise exceptions.TestError("remote_[ip|user|pwd] are necessary!")
remote_virsh_session = virsh.VirshPersistent(**remote_args)
remote.scp_to_remote(server_ip, '22', server_user, server_pwd,
sec_xml.xml, sec_xml.xml, limit="",
log_filename=None, timeout=600, interface=None)
ret = remote_virsh_session.secret_define(sec_xml.xml)
remote_virsh_session.close_session()
else:
ret = virsh.secret_define(sec_xml.xml)
check_exit_status(ret)
try:
sec_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
ret.stdout_text)[0].lstrip()
except IndexError:
raise exceptions.TestError("Fail to get newly created secret uuid")
return sec_uuid
def modify_vm_iface(vm_name, oper, iface_dict, index=0, virsh_instance=virsh):
"""
Modify interface xml and do operations
:param vm_name: name of vm
:param oper: Operation need to do, current we have 2 choice
1. "update_iface": modify iface according to dict and updated in vm
2. "get_xml": modify iface according to dict and return xml
:param iface_dict: The dict restore need updated items like iface_driver,
driver_host, driver_guest and so on
:param index: interface index in xml
:param virsh_instance: virsh instance object. To modify remote vm's iface
via remote virsh instance, propose to set "oper"
as "get_xml" and scp xml to remote, then sync up it
"""
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
xml_devices = vmxml.devices
iface_type = iface_dict.get('type')
try:
iface_index = xml_devices.index(
xml_devices.by_device_tag("interface")[index])
iface = xml_devices[iface_index]
except IndexError:
iface = interface.Interface(iface_type)
iface_driver = iface_dict.get('driver')
driver_host = iface_dict.get('driver_host')
driver_guest = iface_dict.get('driver_guest')
iface_model = iface_dict.get('model')
iface_rom = iface_dict.get('rom')
iface_inbound = iface_dict.get('inbound')
iface_outbound = iface_dict.get('outbound')
iface_link = iface_dict.get('link')
iface_source = iface_dict.get('source')
iface_target = iface_dict.get('target')
iface_addr = iface_dict.get('addr')
iface_filter = iface_dict.get('filter')
boot_order = iface_dict.get('boot')
iface_backend = iface_dict.get('backend')
iface_mac = iface_dict.get('mac')
iface_mtu = iface_dict.get('mtu')
iface_alias = iface_dict.get('alias')
iface_teaming = iface_dict.get('teaming')
iface_virtualport_type = iface_dict.get('virtualport_type')
iface_filter_parameters = iface_dict.get('filter_parameters')
iface_port = iface_dict.get('port')
del_addr = iface_dict.get('del_addr')
del_rom = iface_dict.get('del_rom')
del_filter = iface_dict.get('del_filter')
del_port = iface_dict.get('del_port')
del_mac = "yes" == iface_dict.get('del_mac', "no")
if iface_type:
iface.type_name = iface_type
if iface_driver:
iface.driver = iface.new_driver(
driver_attr=eval(iface_driver),
driver_host=eval(driver_host) if driver_host else {},
driver_guest=eval(driver_guest) if driver_guest else {})
if iface_model:
iface.model = iface_model
if del_rom:
iface.del_rom()
if iface_rom:
iface.rom = eval(iface_rom)
if iface_inbound:
iface.bandwidth = iface.new_bandwidth(
inbound=eval(iface_inbound),
outbound=eval(iface_outbound) if iface_outbound else {})
if iface_link:
iface.link_state = iface_link
if iface_source:
iface.source = eval(iface_source)
if iface_target:
iface.target = eval(iface_target)
if iface_addr:
iface.address = iface.new_iface_address(
**{"attrs": eval(iface_addr)})
if del_addr:
iface.del_address()
if del_filter:
iface.del_filterref()
if del_port:
iface.del_port()
if del_mac:
iface.del_mac_address()
if iface_filter:
if iface_filter_parameters:
iface.filterref = iface.new_filterref(name=iface_filter, parameters=iface_filter_parameters)
else:
iface.filterref = iface.new_filterref(name=iface_filter)
if boot_order:
iface.boot = boot_order
if iface_backend:
iface.backend = eval(iface_backend)
if iface_mac:
iface.mac_address = iface_mac
if iface_mtu:
iface.mtu = eval(iface_mtu)
if iface_alias:
iface.alias = eval(iface_alias)
if iface_teaming:
iface.teaming = eval(iface_teaming)
if iface_virtualport_type:
iface.virtualport_type = iface_virtualport_type
if iface_port:
iface.port = eval(iface_port)
if oper == "update_iface":
vmxml.devices = xml_devices
vmxml.xmltreefile.write()
vmxml.sync()
elif oper == "get_xml":
logging.info("iface xml is %s", iface)
return iface.xml
def change_boot_order(vm_name, device_tag, boot_order, index=0):
"""
Change the order for disk/interface device
:param vm_name: name of vm
:param device_tag: the device tag in xml
:param boot_order: the boot order need to be changed to
:param index: which device to change
"""
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
vmxml.remove_all_boots()
xml_devices = vmxml.devices
device_index = xml_devices.index(xml_devices.by_device_tag(device_tag)[index])
device = xml_devices[device_index]
device.boot = boot_order
vmxml.devices = xml_devices
vmxml.xmltreefile.write()
vmxml.sync()
def check_machine_type_arch(machine_type):
"""
Determine if one case for special machine
type can be run on host
:param machine_type: the machine type the case is designed for
:raise exceptions.TestSkipError: Skip if the host arch does
not match the machine type
"""
if not machine_type:
return
arch_machine_map = {'x86_64': ['pc', 'q35'],
'ppc64le': ['pseries'],
's390x': ['s390-ccw-virtio'],
'aarch64': ['arm64-mmio:virt', 'arm64-pci:virt']}
arch = platform.machine()
if machine_type not in arch_machine_map[arch]:
raise exceptions.TestSkipError("This machine type '%s' is not "
"supported on the host with "
"arch '%s'" % (machine_type,
arch))
def customize_libvirt_config(params,
config_type="virtqemud",
remote_host=False,
extra_params=None,
is_recover=False,
config_object=None):
"""
Customize configuration files for libvirt
on local and remote host if needed
:param params: A dict used to configure
:param config_type: "libvirtd" for /etc/libvirt/libvirtd.conf
"qemu" for /etc/libvirt/qemu.conf
"sysconfig" for /etc/sysconfig/libvirtd
"guestconfig" for /etc/sysconfig/libvirt-guests
:param remote_host True for setting up for remote host, too.
False for not setting up
:param extra_params: Parameters for remote host
:param is_recover: True for recovering the configuration and
config_object should be provided
False for configuring specified libvirt configuration
:param config_object: an existing utils_config.LibvirtConfigCommon object
:return: utils_config.LibvirtConfigCommon object
"""
# Hardcode config_type to virtqemud under modularity daemon mode when config_type="libvirtd"
# On the contrary, hardcode it to "libvirtd" when config_type="virt*d".
# Otherwise accept config_type as it is.
if utils_split_daemons.is_modular_daemon():
if config_type in ["libvirtd"]:
config_type = "virtqemud"
else:
if config_type in ["virtqemud", "virtproxyd", "virtnetworkd",
"virtstoraged", "virtinterfaced", "virtnodedevd",
"virtnwfilterd", "virtsecretd"]:
config_type = "libvirtd"
config_list_support = ["libvirtd", "qemu", "sysconfig", "guestconfig",
"virtqemud", "virtproxyd", "virtnetworkd",
"virtstoraged", "virtinterfaced", "virtnodedevd",
"virtnwfilterd", "virtsecretd", "libvirt"]
if config_type not in config_list_support:
logging.debug("'%s' is not in the support list '%s'",
config_type, config_list_support)
return None
else:
logging.debug("The '%s' config file will be updated.", config_type)
if not is_recover:
target_conf = None
# Handle local
if not params or not isinstance(params, dict):
return None
target_conf = utils_config.get_conf_obj(config_type)
#if params and isinstance(params, dict):
for key, value in params.items():
target_conf[key] = value
logging.debug("The '%s' config file is updated with:\n %s",
target_conf.conf_path, params)
libvirtd = utils_libvirtd.Libvirtd()
libvirtd.restart()
obj_conf = target_conf
else:
if not isinstance(config_object, utils_config.LibvirtConfigCommon):
return None
# Handle local libvirtd
config_object.restore()
libvirtd = utils_libvirtd.Libvirtd()
libvirtd.restart()
obj_conf = config_object
if remote_host:
server_ip = extra_params.get("server_ip", "")
server_user = extra_params.get("server_user", "")
server_pwd = extra_params.get("server_pwd", "")
local_path = obj_conf.conf_path
remote.scp_to_remote(server_ip, '22', server_user, server_pwd,
local_path, local_path, limit="",
log_filename=None, timeout=600, interface=None)
remotely_control_libvirtd(server_ip, server_user,
server_pwd, action='restart',
status_error='no')
return obj_conf
def check_logfile(search_str, log_file, str_in_log=True,
cmd_parms=None, runner_on_target=None):
"""
Check if the given string exists in the log file
:param search_str: the string to be searched
:param log_file: the given file
:param str_in_log: Ture if the file should include the given string,
otherwise, False
:param cmd_parms: The parms for remote executing
:param runner_on_target: Remote runner
:raise: test.fail when the result is not expected
"""
cmd = "grep -E '%s' %s" % (search_str, log_file)
if not (cmd_parms and runner_on_target):
cmdRes = process.run(cmd, shell=True, ignore_status=True)
else:
cmdRes = remote_old.run_remote_cmd(cmd, cmd_parms, runner_on_target)
if str_in_log == bool(int(cmdRes.exit_status)):
raise exceptions.TestFail("The string '{}' {} included in {}"
.format(search_str, "is not" if str_in_log else "is",
log_file))
else:
logging.debug('Log check for "%s" PASS', search_str)
def check_qemu_cmd_line(content, err_ignore=False,
remote_params=None, runner_on_target=None):
"""
Check the specified content in the qemu command line
:param content: the desired string to search
:param err_ignore: True to return False when fail
False to raise exception when fail
:param remote_params: The params for remote executing
:param runner_on_target: Remote runner
:return: True if exist, False otherwise
"""
cmd = 'pgrep -a qemu'
if not(remote_params or runner_on_target):
qemu_line = process.run(cmd, shell=True).stdout_text
else:
cmd_result = remote_old.run_remote_cmd(cmd, remote_params, runner_on_target)
qemu_line = cmd_result.stdout
if not re.search(r'%s' % content, qemu_line):
if err_ignore:
return False
else:
raise exceptions.TestFail("Expected '%s' was not found in "
"qemu command line" % content)
return True
def check_cmd_output(cmd, content, err_ignore=False, session=None):
"""
Check the specified content in the output of the cmd
:param cmd: the cmd wants to check
:param content: the desired string or list to search
:param err_ignore: True to return False when fail
False to raise exception when fail
:param session: ShellSession object of VM or remote host
:return: True if exist, False otherwise
:raise: exceptions.TestFail when content is not found in the output of cmd
"""
if cmd is None:
raise exceptions.TestFail("cmd can not be None")
s, cmd_output = utils_misc.cmd_status_output(cmd, shell=True,
ignore_status=err_ignore, session=session)
pattern_list = [content] if not isinstance(content, list) else content
for item in pattern_list:
if not re.search(r'%s' % item, cmd_output):
if err_ignore:
return False
else:
raise exceptions.TestFail("Expected '%s' was not found in "
"output of '%s'" % (item, cmd))
def get_disk_alias(vm, source_file=None):
"""
Get alias name of disk with given source file
:param vm: VM object
:param source_file: domain disk source file
:return: None if not find, else return alias name
"""
if vm.is_alive():
ori_vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
disks = ori_vmxml.devices.by_device_tag('disk')
for disk in disks:
find_source = disk.xmltreefile.find('source') is not None
try:
if ((find_source and disk.source.attrs['file'] == source_file) or
(not find_source and not source_file)):
logging.info("Get alias name %s", disk.alias['name'])
return disk.alias['name']
except KeyError as e:
logging.info("Ignore error of source attr getting for file: %s" % e)
pass
return None
def check_domuuid_compliant_with_rfc4122(dom_uuid_value):
"""
Check the domain uuid format comply with RFC4122.
xxxxxxxx-xxxx-Axxx-Bxxx-xxxxxxxxxxxx
A should be RFC version number, since the compliant RFC version is 4122,
so it should be number 4.
B should be one of "8, 9, a or b".
:param dom_uuid_value: value of domain uuid
:return: True or False indicate whether it is compliant with RFC 4122.
"""
dom_uuid_segments = dom_uuid_value.split('-')
return dom_uuid_segments[2].startswith('4') and dom_uuid_segments[3][0] in '89ab'
def check_dumpxml(vm, content, err_ignore=False):
"""
Check the specified content in the VM dumpxml
:param vm: VM object
:param content: the desired string to search
:param err_ignore: True to return False when fail
False to raise exception when fail
:return: True if exist, False otherwise
"""
v_xml = vm_xml.VMXML.new_from_dumpxml(vm.name)
with open(v_xml.xml) as xml_f:
if content not in xml_f.read():
if err_ignore:
return False
else:
raise exceptions.TestFail("Expected '%s' was not found in "
"%s's xml" % (content, vm.name))
return True
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the operating system path specification implementation."""
import platform
import unittest
from dfvfs.path import os_path_spec
from tests.path import test_lib
class OSPathSpecTest(test_lib.PathSpecTestCase):
"""Tests for the operating system path specification implementation."""
def testInitialize(self):
"""Tests the path specification initialization."""
if platform.system() == u'Windows':
test_location = u'C:\\test'
else:
test_location = u'/test'
path_spec = os_path_spec.OSPathSpec(location=test_location)
self.assertIsNotNone(path_spec)
with self.assertRaises(ValueError):
_ = os_path_spec.OSPathSpec(
location=test_location, parent=self._path_spec)
with self.assertRaises(ValueError):
_ = os_path_spec.OSPathSpec(location=test_location, bogus=u'BOGUS')
def testComparable(self):
"""Tests the path specification comparable property."""
if platform.system() == u'Windows':
test_location = u'C:\\test'
else:
test_location = u'/test'
path_spec = os_path_spec.OSPathSpec(location=test_location)
self.assertIsNotNone(path_spec)
expected_comparable = u'\n'.join([
u'type: OS, location: {0:s}'.format(test_location),
u''])
self.assertEqual(path_spec.comparable, expected_comparable)
def testIsSystemLevel(self):
"""Tests the IsSystemLevel function."""
if platform.system() == u'Windows':
test_location = u'C:\\test'
else:
test_location = u'/test'
path_spec = os_path_spec.OSPathSpec(location=test_location)
self.assertIsNotNone(path_spec)
self.assertTrue(path_spec.IsSystemLevel())
if __name__ == '__main__':
unittest.main()
|
'''
Created on 22/09/2017
@author: Frank Delporte
'''
import thread
import Tkinter as tk
import tkFont
import time
from ButtonHandler import *
from KeyReader import *
from PongGui import *
from SlideShow import *
from ConsoleMenu import *
from Legend import *
try:
import keyboard # pip install keyboard
keyAvailable = True
except ImportError:
keyAvailable = False
class PiGameConsole():
# general vars
pongBusy = False
slideShowBusy = False
keepRunning = False
# frame holders
menu = None
legend = None
win = None
slideShow = None
pong = None
def __init__(self):
print("PiGameConsole initiated")
def preventScreensaver(self):
while (self.keepRunning):
if keyAvailable == True:
keyboard.write('A', delay=0)
time.sleep(10)
def checkInput(self):
btn = ButtonHandler()
key = KeyReader()
while (self.keepRunning):
if btn.getButton(2) == True or key.getKey("1") == True:
#print("Controller red")
if self.slideShowBusy == True and self.slideShow != None:
self.slideShow.stop()
self.startPong()
elif self.pongBusy == True and self.pong != None:
self.pong.stop()
self.startSlideShow()
if btn.getButton(1) == True or key.getKey("2") == True:
#print("Controller green")
print("Controller green")
if btn.getButton(4) == True or key.getKey("3") == True:
#print("Player1 red")
if self.pongBusy == True and self.pong != None:
self.pong.move_player(1, "up")
if btn.getButton(3) == True or key.getKey("4") == True:
#print("Player1 green")
if self.pongBusy == True and self.pong != None:
self.pong.move_player(1, "down")
if btn.getButton(6) == True or key.getKey("5") == True:
#print("Player2 red")
if self.pongBusy == True and self.pong != None:
self.pong.move_player(2, "up")
if btn.getButton(5) == True or key.getKey("6") == True:
#print("Player2 green")
if self.pongBusy == True and self.pong != None:
self.pong.move_player(2, "down")
time.sleep(0.1)
def startGUI(self):
# Start the GUI
self.win = tk.Tk()
self.win.title("PI Gaming console")
self.win.attributes("-fullscreen", True)
self.exitButton = tk.Button(self.win, text = "Quit", command = self.exitProgram)
self.exitButton.grid(row = 0, column = 0, sticky=tk.NW, padx=(10, 0), pady=(10, 0))
self.menu = ConsoleMenu(self.win, 300, 250)
self.menu.grid(row = 1, column = 0, sticky=tk.NW, padx=(10, 10), pady=(0, 0))
self.legend = Legend(self.win, 300, 400)
self.legend.grid(row = 2, column = 0, sticky=tk.NW, padx=(10, 10), pady=(0, 0))
self.startSlideShow()
self.win.mainloop()
def exitProgram(self):
self.keepRunning = False
print "Finished"
self.win.quit()
def clearWindow(self):
if self.slideShow != None:
self.slideShow.stop()
self.slideShow = None
if self.pong != None:
self.pong.stop()
self.pong = None
self.slideShowBusy = False
self.pongBusy = False
time.sleep(0.5)
def startSlideShow(self):
self.clearWindow()
self.menu.setSelected(1)
self.legend.setLegend(1)
self.slideShow = SlideShow(self.win, self.win.winfo_screenwidth() - 300, self.win.winfo_screenheight() - 50)
self.slideShow.grid(row = 0, column = 2, rowspan = 3, sticky=tk.NSEW, pady=(10, 10))
self.slideShowBusy = True
def startPong(self):
self.clearWindow()
self.menu.setSelected(2)
self.legend.setLegend(2)
self.pong = PongGui(self.win, self.win.winfo_screenwidth() - 300, self.win.winfo_screenheight() - 50)
self.pong.grid(row = 0, column = 2, rowspan = 3, sticky=tk.NSEW, pady=(10, 10))
self.pongBusy = True
if __name__ == "__main__":
piGameConsole = PiGameConsole()
# Start a thread to check if a game is running
piGameConsole.keepRunning = True
thread.start_new_thread(piGameConsole.preventScreensaver, ())
thread.start_new_thread(piGameConsole.checkInput, ())
piGameConsole.startGUI()
|
import datetime
from socket import *
from sportorg.utils.time import time_to_hhmmss
"""
Format of WDB data package
- length is 1772 bytes
1) 36b text block at the beginning
2 4132500 0 0 3974600\n
bib - finish_time - disqual_status - 0 - start_time
2) binary part
bytes 128-131 - card number
bytes 136-139 - qty of punches
bytes 144-147 - start in card
bytes 152-155 - finish in card
starting from b172: 8b blocks * 200
- byte 1 control number
- bytes 4-7 punch time
"""
def int_to_time(value):
""" convert value from 1/100 s to time """
today = datetime.datetime.now()
ret = datetime.datetime(
today.year,
today.month,
today.day,
value // 360000 % 24,
(value % 360000) // 6000,
(value % 6000) // 100,
(value % 100) * 10000,
)
return ret
host = 'localhost'
port = 1212
addr = (host, port)
udp_socket = socket(AF_INET, SOCK_DGRAM)
udp_socket.bind(addr)
# main loop
while True:
print('wait data...')
# recvfrom - receiving of data
conn, addr = udp_socket.recvfrom(1772)
print('client addr: ', addr)
print('data: ', conn)
# string = ''
# for i in conn:
# string += str( hex(i)) + '-'
# print(string)
text_array = bytes(conn[0:34]).decode().split()
bib = text_array[0]
result = int_to_time(int(text_array[1]))
status = text_array[2]
start = int_to_time(int(text_array[4]))
byteorder = 'little'
punch_qty = int.from_bytes(conn[136:140], byteorder)
card_start = int_to_time(int.from_bytes(conn[144:148], byteorder))
card_finish = int_to_time(int.from_bytes(conn[152:156], byteorder))
init_offset = 172
punches = []
for i in range(punch_qty):
cp = int.from_bytes(
conn[init_offset + i * 8 : init_offset + i * 8 + 1], byteorder
)
time = int_to_time(
int.from_bytes(
conn[init_offset + i * 8 + 4 : init_offset + i * 8 + 8], byteorder
)
)
punches.append((cp, time_to_hhmmss(time)))
print('bib=' + bib + ' result=' + time_to_hhmmss(result) + ' punches=')
print(punches)
# sendto - responce
udp_socket.sendto(b'message received by the server', addr)
# udp_socket.close()
|
#! /usr/bin/env python3
import sys
import re
def uso():
"""Imprime instruções de uso do programa."""
uso = """
Este programa gera a ordem correta de inclusão em um banco de dados.
Passe o nome dos arquivos na linha de comando. Caso queira imprimir
ocorrências de referência circular ou inexistente utilize a opção -v.
"""
print(uso)
def imprime(dic):
"""Imprime um dicionário de listas num formato melhorado."""
for key in dic.keys():
print(key + ": ", end="")
print(', '.join(dic[key]))
def procura(dic, ordem, verb):
"""Procura uma ordem correta de inclusão enquanto existirem tabelas cujas todas as referências já
tenham sido processadas. Quando em modo detalhado imprime a ocorrência de referência circular ou
inexistente."""
if len(dic) == 0:
""" Busca finalizada. """
return ordem
lst = set()
""" Tabelas a serem removidas nesta iteração. """
for table in dic.keys():
if len(dic[table]) == 0:
lst.add(table)
if len(lst) == 0:
""" Caso todas as tabelas restantes possuam referências a serem processadas restaram apenas
referências inexistentes ou circulares."""
if verb:
print("\nAs tabelas a seguir possuem referência circular ou inexistente:")
imprime(dic)
print("\nO resultado obtido foi:")
return ordem
for key in lst:
ordem.append(key)
del(dic[key])
for table in dic.keys():
for key in lst:
if key in dic[table]:
dic[table].remove(key)
procura(dic, ordem, verb)
def procedencia(lst, verb):
"""Gera uma lista de procedencia para cada tabela.
Inicialmente a função iria trabalhar com o arquivo separado por linhas,
mas como o arquivo pode ser inteiro feito em apenas uma linha modifiquei
a estratégia para uma varredura de estados. Não me preocupei com erros de
sintaxe.
Lista de estados:
0: Procurando por uma instrução CREATE
1: Verificando se é uma instrução de criação de tabela TABLE
2: Procurando o nome da tabela que está sendo criada, contando que diferente de ";"
3: Procurando se é uma referência a criação de chave estrangeira FOREIGN
4: Verificando se é uma referência a criação de chave estrangeira KEY
5: Procurando as referências REFERENCES
6: Procurando o nome da tabela de referência, contando que diferente de ";"
7: Próxima palavra é o novo delimitador
final: Caso ocorra uma instrução com o delimitador encerra a criação da tabela
"""
delimitador = ";"
status = 0
"""Estado inicial do autômato."""
proc = {}
""" Dicionário de procedentes. """
tabela = ""
""" Tabela sendo montada no estado atual. """
fim = re.compile(".*" + delimitador + ".*")
""" Expressão regular que verifica a ocorrência de um delimitador ";".
Supondo que o delimitador não seja alterado. """
create = re.compile(".*[cC][rR][eE][aA][tT][eE]$")
""" Expressão regular que verifica se a palavra atual termina com CREATE. """
delim = re.compile(".*[dD][eE][lL][iI][mM][iI][tT][eE][rR]$")
""" Expressão regular que verifica se a palavra atual termina com DELIMITER. """
for p in lst:
if status == 0 and create.match(p):
status = 1
elif status == 0 and delim.match(p):
status = 7
elif status == 1:
if p.lower() == "table":
status = 2
else:
status = 0
elif status == 2 and p != delimitador and len(p.replace("`","")) > 0:
tabela = p.replace("`","")
if tabela in proc and verb:
print("TABELA " + tabela + " RECRIADA")
proc[tabela] = set()
status = 3
elif status == 3 and p.lower() == "foreign":
status = 4
elif status == 4:
if p.lower() == "key":
status = 5
else:
status = 0
elif status == 5 and p.lower() == "references":
status = 6
elif status == 6 and p != delimitador and len(p.replace("`","")) > 0:
ref = p.replace("`","")
proc[tabela].add(ref)
status = 3
elif status == 7:
delimitador = p
fim = re.compile(".*" + re.escape(delimitador) + ".*")
status = 0
elif fim.match(p):
if create.match(p):
status = 1
else:
status = 0
tabela = ""
return proc
def main(argv):
veb = False
if "-v" in argv:
veb = True
""" Função que trata a linha de comando e chama as funcões do programa."""
ordem = []
""" Lista que irá conter a ordem de restauração dos arquivos. """
if len(argv) > 0:
for arquivo in argv:
if arquivo == "-v":
continue
ordem = []
if len(argv) > 1:
print("\nARQUIVO: " + arquivo)
with open(arquivo, "r") as myfile:
text=myfile.read().split()
dic = procedencia(text, veb)
procura(dic, ordem, veb)
print('.sql\n'.join(ordem), end=".sql\n")
else:
uso()
if __name__ == "__main__":
main(sys.argv[1:])
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Ipv6ExpressRouteCircuitPeeringConfig(Model):
"""Contains IPv6 peering config.
:param primary_peer_address_prefix: The primary address prefix.
:type primary_peer_address_prefix: str
:param secondary_peer_address_prefix: The secondary address prefix.
:type secondary_peer_address_prefix: str
:param microsoft_peering_config: The Microsoft peering configuration.
:type microsoft_peering_config:
~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitPeeringConfig
:param route_filter: The reference of the RouteFilter resource.
:type route_filter: ~azure.mgmt.network.v2017_08_01.models.RouteFilter
:param state: The state of peering. Possible values are: 'Disabled' and
'Enabled'. Possible values include: 'Disabled', 'Enabled'
:type state: str or
~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitPeeringState
"""
_attribute_map = {
'primary_peer_address_prefix': {'key': 'primaryPeerAddressPrefix', 'type': 'str'},
'secondary_peer_address_prefix': {'key': 'secondaryPeerAddressPrefix', 'type': 'str'},
'microsoft_peering_config': {'key': 'microsoftPeeringConfig', 'type': 'ExpressRouteCircuitPeeringConfig'},
'route_filter': {'key': 'routeFilter', 'type': 'RouteFilter'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(self, primary_peer_address_prefix=None, secondary_peer_address_prefix=None, microsoft_peering_config=None, route_filter=None, state=None):
super(Ipv6ExpressRouteCircuitPeeringConfig, self).__init__()
self.primary_peer_address_prefix = primary_peer_address_prefix
self.secondary_peer_address_prefix = secondary_peer_address_prefix
self.microsoft_peering_config = microsoft_peering_config
self.route_filter = route_filter
self.state = state
|
# defivelo-intranet -- Outil métier pour la gestion du Défi Vélo
# Copyright (C) 2016 Didier Raboud <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from factory import Faker
from factory.django import DjangoModelFactory
from factory.fuzzy import FuzzyChoice
from apps.common import DV_STATES
from ..models import Organization
class OrganizationFactory(DjangoModelFactory):
class Meta:
model = Organization
name = Faker("name")
address_street = Faker("street_name")
address_no = Faker("random_int", min=0, max=999)
address_zip = Faker("random_int", min=1000, max=9999)
address_city = Faker("city")
address_canton = FuzzyChoice(DV_STATES)
|
# -*- coding: utf-8 -*-
from gettext import gettext as _
LEVEL1 = [
1,
_('Divisions'),
['lineasDepto'],
[],
[
(_('Adilabad'), 1, _('Adilabad'), _("It's in the north")),
(_('Nizamabad'), 1, _('Nizamabad'), _("It's in the northwest")),
(_('Karimnagar'), 1, _('Karimnagar'), _("It's in the north")),
(_('Medak'), 1, _('Medak'), _("It's in the northwest")),
(_('Warangal'), 1, _('Warangal'), _("It's in the north")),
(_('Rangareddy'), 1, _('Rangareddy'), _("It's in the west")),
(_('Nalgonda'), 1, _('Nalgonda'), _("It's in the center")),
(_('Khammam'), 1, _('Khammam'), _("It's in the northeast")),
(_('Mahabubnagar'), 1, _('Mahabubnagar'), _("It's in the west")),
(_('Kamulu'), 1, _('Kamulu'), _("It's in the southwest")),
(_('Ananthapur'), 1, _('Ananthapur'), _("It's in the southwest")),
(_('Kadapa'), 1, _('Kadapa'), _("It's in the south")),
(_('Chittoor'), 1, _('Chittoor'), _("It's in the south")),
(_('Nelluru'), 1, _('Nelluru'), _("It's in the south")),
(_('Prakasam'), 1, _('Prakasam'), _("It's in the south")),
(_('Guntur'), 1, _('Guntur'), _("It's in the southeast")),
(_('Krishna'), 1, _('Krishna'), _("It's in the east")),
(_('West Godavari'), 1, _('West Godavari'), _("It's in the east")),
(_('East Godavari'), 1, _('East Godavari'), _("It's in the east")),
(_('Vishakhapatnam'), 1, _('Vishakhapatnam'), _("It's in the northeast")),
(_('Vijayanagaram'), 1, _('Vijayanagaram'), _("It's in the northeast")),
(_('Srikakulam'), 1, _('Srikakulam'), _("It's in the northeast"))
]
]
LEVEL2 = [
2,
_('Taluka Headquarters'),
['lineasDepto', 'capitales'],
[],
[
(_('Hyderabad'), _("It's easy")),
(_('Adilabad'), _("It's easy")),
(_('Nizamabab'), _("It's easy")),
(_('Karimnagar'), _("It's easy")),
(_('Sangareddi'), _("It's easy")),
(_('Warangal'), _("It's easy")),
(_('Nalgonda'), _("It's easy")),
(_('Mahabubnagar'), _("It's easy")),
(_('Kurnool'), _("It's easy")),
(_('Anantapur'), _("It's easy")),
(_('Cuddapah'), _("It's easy")),
(_('Nellore'), _("It's easy")),
(_('Chittoor'), _("It's easy")),
(_('Ongole'), _("It's easy")),
(_('Guntur'), _("It's easy")),
(_('Khammam'), _("It's easy")),
(_('Mangiripudi'), _("It's easy")),
(_('Krishna'), _("It's easy")),
(_('Kakinada'), _("It's easy")),
(_('Vishakhapatnam'), _("It's easy")),
(_('Vijayanagaram'), _("It's easy")),
(_('Srikakulam'), _("It's easy"))
]
]
LEVEL3 = [
2,
_('Cities'),
['lineasDepto', 'capitales', 'ciudades'],
[],
[
#(_('New Delhi'), _('It is in %s') % _('New Delhi')),
]
]
LEVELS = [LEVEL1, LEVEL2]
|
from contextlib import contextmanager
from io import StringIO
import tempfile
import logging
import time
import os
log = logging.getLogger("harpoon.helpers")
@contextmanager
def a_temp_file():
"""Yield the name of a temporary file and ensure it's removed after use"""
filename = None
try:
tmpfile = tempfile.NamedTemporaryFile(delete=False)
filename = tmpfile.name
yield tmpfile
finally:
if filename and os.path.exists(filename):
os.remove(filename)
def until(timeout=10, step=0.5, action=None, silent=False):
"""Yield until timeout"""
yield
started = time.time()
while True:
if action and not silent:
log.info(action)
if time.time() - started > timeout:
if action and not silent:
log.error("Timedout %s", action)
return
else:
time.sleep(step)
yield
class memoized_property(object):
"""Decorator to make a descriptor that memoizes it's value"""
def __init__(self, func):
self.func = func
self.name = func.__name__
self.cache_name = "_{0}".format(self.name)
def __get__(self, instance=None, owner=None):
if not instance:
return self
if not getattr(instance, self.cache_name, None):
setattr(instance, self.cache_name, self.func(instance))
return getattr(instance, self.cache_name)
def __set__(self, instance, value):
setattr(instance, self.cache_name, value)
def __delete__(self, instance):
if hasattr(instance, self.cache_name):
delattr(instance, self.cache_name)
def write_to(output, txt):
"""Write some text to some output"""
if isinstance(txt, bytes) and isinstance(output, StringIO):
output.write(txt.decode("utf-8", "replace"))
elif (
isinstance(txt, str)
and hasattr(output, "file")
and "b" in getattr(output.file, "mode", "w")
):
output.write(txt.encode("utf-8", "replace"))
else:
output.write(txt)
|
import os
import errno
import subprocess
def mkdir_p(path):
'''Recursively make all directories in a path'''
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else: raise
def dict_sorted(d):
'''Returns dict sorted by key as a list of tuples'''
return sorted(d.items(), key=lambda x: x[0])
def command_check_error(command,cwd=".",fail_on_stderr=True):
'''Perform a command, piping out both stdout and stderr.
An exception will be raised if command returns a nonzero exit code.
An exception will also be raised if command writes anything to stdout.
This behavior can be disabled by passing False as the argument to
fail_on_stderr.
'''
proc = subprocess.Popen(command, cwd=cwd,
stdout=None, # print to terminal
stderr=subprocess.PIPE)
dup = subprocess.Popen(["tee","/dev/stderr"], stdin=proc.stderr,
stdout=subprocess.PIPE, # catch errors from first
stderr=None) # also print them to terminal
errors = str(dup.stdout.read(),'utf8')
proc.communicate()
if proc.returncode != 0:
raise Exception("{0} returned exit code {1}".format(command,proc.returncode))
elif fail_on_stderr and errors:
raise Exception("There was error output running {0}:\n{1}".format(command,errors))
|
import sys
from termcolor import colored
class Messages():
LOGFILE = "git-events.log"
#Status and operations
RUNNING = 'Successfully started gitevents'
WAS_RUNNING = 'Gitevents is already running'
NOT_RUNNING = 'Git-events is not running'
STOPPED = 'Successfully stopped gitevents'
#Errors
INCOMPATIBLE_OS = 'Your OS is not compatible with Git events'
GITHUB_API_ERROR = 'I\'m unable to access your GitHub account, please check your internet connection and GitHub access token'
GITHUB_LOGIN_ERROR = 'Unable to login. Wrong username/password ?'
CONFIGURATION_ERROR = 'Please configure cfg.ini before starting'
#Success
ACCESS_TOKEN_SET = 'Successfully set access token'
INTERVAL_SET = 'Successfully set polling interval'
#Setup
INPUT_USERNAME = 'Please type your Github account name: '
INPUT_PASSWORD = 'Please type your Github account password: '
SETUP_FAIL = 'Failed to create Github access token'
SETUP_SUCCESS = 'Successfully saved access token. You are all set.'
def abort(self, message=""):
print(colored(message, 'red'))
sys.exit(1)
def print_success(self, message=""):
print(colored(message, 'green'))
def log(self, message=""):
print(message)
def use_logfile(self):
sys.stdout = open(self.LOGFILE, 'w')
sys.stderr = open(self.LOGFILE, 'w')
class MessagesProvider():
def __init__(self):
self.instance = None
def get(self):
if self.instance is None:
self.instance = Messages()
return self.instance
messages_provider = MessagesProvider()
|
from eclcli.common import command
from eclcli.common import utils
from ..networkclient.common import utils as to_obj
class ListCommonFunction(command.Lister):
def get_parser(self, prog_name):
parser = super(ListCommonFunction, self).get_parser(prog_name)
parser.add_argument(
'--description',
metavar="description",
help="filter by description")
parser.add_argument(
'--id',
metavar="id",
help="filter by id")
parser.add_argument(
'--name',
metavar="name",
help="filter by name")
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
columns = (
'id',
'name',
'status',
)
column_headers = (
'ID',
'Name',
'Status',
)
search_opts = dict()
if parsed_args.description:
search_opts.update({"description": parsed_args.description})
if parsed_args.id:
search_opts.update({"id": parsed_args.id})
if parsed_args.name:
search_opts.update({"name": parsed_args.name})
data = [to_obj.CommonFunction(cfp)
for cfp in network_client.list_common_functions(
**search_opts).get('common_functions')]
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in data))
class ShowCommonFunction(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowCommonFunction, self).get_parser(prog_name)
parser.add_argument(
'common_function_id',
metavar="COMMON_FUNCTION_ID",
help="ID of Common Function to show."
)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
common_function_id = parsed_args.common_function_id
dic = network_client.show_common_function(common_function_id).get('common_function')
columns = utils.get_columns(dic)
obj = to_obj.CommonFunction(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
|
# vote.py
# Handles server-side logic for voting
# by Chad Wyszynski ([email protected])
import webapp2
from google.appengine.api import users
import datastore
import json
import logging
class RequestHandler(webapp2.RequestHandler):
# valid vote types
vote_types = {"up": 1, "down": -1}
# Handles POST vote/{article_id}
# returns: success - 200, update total votes for that article as JSON
# failure - error code with a short description of error in body
def post(self, article_id):
user = users.get_current_user()
if(user):
vote = self.extract_vote(self.request.body)
if(self.is_valid_vote(vote)):
votes = datastore.vote_article(article_id,
RequestHandler.vote_types[vote["vote"]],
user)
self.response.headers["Content-Type"] = "application/json"
self.response.write(json.dumps({"article": article_id,
"votes": votes}))
else:
logging.debug(vote)
print(vote)
self.error(400) # bad request, no vote needs to be up or down
self.response.write('Request must be {"vote": "up/down"}')
else:
self.error(401) # user must be logged in to vote
self.response.write('You must be logged in to vote')
# Checks if request_vote is a valid vote request
# request_vote - vote to check
# returns: true if valid request
def is_valid_vote(self, request_vote):
return "vote" in request_vote and \
request_vote["vote"] in RequestHandler.vote_types
# Extracts the "vote" request from the request body
# returns: dictionary representing json request; empty dictionary if
# unsuccessful
def extract_vote(self, request_body):
try:
return json.loads(self.request.body)
except json.JSONDecodeError: # unable to parse json
return {}
|
#!/usr/bin/env python
#
# Problem: Ticket Swapping
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
for case in range(int(raw_input())):
n, m = map(int, raw_input().split())
s = [map(int, raw_input().split()) for i in range(m)]
z = 0
do = {}
de = {}
o, e, p = map(list, zip(*s))
for i in range(m):
if o[i] not in do:
do[o[i]] = 0
do[o[i]] += p[i]
if e[i] not in de:
de[e[i]] = 0
de[e[i]] += p[i]
k = e[i]-o[i]
z += p[i]*(k*n-k*(k-1)/2)
o = sorted(list(set(o)))
e = sorted(list(set(e)))
q = []
c = 0
while len(o) > 0 or len(e) > 0:
if len(o) == 0 or e[0] < o[0]:
t = e.pop(0)
x = de[t]
while x > 0:
v = min(q[-1][1], x)
x -= v
q[-1][1] -= v
r = q[-1][0]
k = t-r
h = v*(k*n-k*(k-1)/2)
c += h
if q[-1][1] == 0:
q.pop()
else:
t = o.pop(0)
q.append([t, do[t]])
ans = (z-c)%1000002013
print "Case #%d: %s" % (case+1, ans)
|
# -*- coding: utf8 -*-
# Copyright (C) 2013 - Oscar Campos <[email protected]>
# This program is Free Software see LICENSE file for details
"""
This file is a wrapper for autopep8 library.
"""
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../linting'))
import threading
from .autopep8_lib import autopep8
class AnacondaAutopep8(threading.Thread):
"""Wrapper class around native autopep8 implementation
"""
def __init__(self, settings, code, callback):
threading.Thread.__init__(self)
self.code = code
self.callback = callback
self.options, _ = autopep8.parse_args(self.parse_settings(settings))
def run(self):
self.callback(autopep8.fix_string(self.code, options=self.options))
def parse_settings(self, settings):
"""Map anaconda settings to autopep8 settings
"""
args = []
args += ['-a'] * settings.get('aggressive', 0)
if len(settings.get('autoformat_ignore', [])) > 0:
args += ['--ignore={}'.format(
','.join(settings.get('autoformat_ignore')))]
if len(settings.get('autoformat_select', [])) > 0:
args += ['--select={}'.format(
','.join(settings.get('autoformat_select')))]
args += ['--max-line-length={}'.format(
settings.get('pep8_max_line_length', 150))]
args += ['anaconda_rocks']
return args
|
import requests
from bloodon.accounts.social.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import GoogleProvider
class GoogleOAuth2Adapter(OAuth2Adapter):
provider_id = GoogleProvider.id
access_token_url = 'https://accounts.google.com/o/oauth2/token'
authorize_url = 'https://accounts.google.com/o/oauth2/auth'
profile_url = 'https://www.googleapis.com/oauth2/v1/userinfo'
def complete_login(self, request, token, **kwargs):
resp = requests.get(self.profile_url,
params={'access_token': token.token,
'alt': 'json'})
extra_data = resp.json()
provider = self.get_provider()
login = provider.social_login_from_response(request, extra_data)
return login
oauth2_login = OAuth2LoginView.adapter_view(GoogleOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(GoogleOAuth2Adapter)
|
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2013 Paul Tagliamonte <[email protected]>
# Copyright (c) 2013 Julien Danjou <[email protected]>
# Copyright (c) 2013 Nicolas Dandrimont <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from hy.errors import HyError
from hy.models.lambdalist import HyLambdaListKeyword
from hy.models.expression import HyExpression
from hy.models.keyword import HyKeyword
from hy.models.integer import HyInteger
from hy.models.complex import HyComplex
from hy.models.string import HyString
from hy.models.symbol import HySymbol
from hy.models.float import HyFloat
from hy.models.list import HyList
from hy.models.dict import HyDict
from hy.macros import require, process
from hy.util import str_type
import hy.importer
import traceback
import importlib
import codecs
import ast
import sys
from collections import defaultdict
_compile_time_ns = {}
def compile_time_ns(module_name):
ns = _compile_time_ns.get(module_name)
if ns is None:
ns = {'hy': hy, '__name__': module_name}
_compile_time_ns[module_name] = ns
return ns
_stdlib = {}
def load_stdlib():
import hy.core
for module in hy.core.STDLIB:
mod = importlib.import_module(module)
for e in mod.EXPORTS:
_stdlib[e] = module
class HyCompileError(HyError):
def __init__(self, exception, traceback=None):
self.exception = exception
self.traceback = traceback
def __str__(self):
if isinstance(self.exception, HyTypeError):
return str(self.exception)
if self.traceback:
tb = "".join(traceback.format_tb(self.traceback)).strip()
else:
tb = "No traceback available. 😟"
return("Internal Compiler Bug 😱\n⤷ %s: %s\nCompilation traceback:\n%s"
% (self.exception.__class__.__name__,
self.exception, tb))
class HyTypeError(TypeError):
def __init__(self, expression, message):
super(HyTypeError, self).__init__(message)
self.expression = expression
def __str__(self):
return (super(HyTypeError, self).__str__() + " (line %s, column %d)"
% (self.expression.start_line,
self.expression.start_column))
_compile_table = {}
def ast_str(foobar):
if sys.version_info[0] >= 3:
return str(foobar)
try:
return str(foobar)
except UnicodeEncodeError:
pass
enc = codecs.getencoder('punycode')
foobar, _ = enc(foobar)
return "hy_%s" % (str(foobar).replace("-", "_"))
def builds(_type):
unpythonic_chars = ["-"]
really_ok = ["-"]
if any(x in unpythonic_chars for x in str_type(_type)):
if _type not in really_ok:
raise TypeError("`build' needs to be *post* translated strings, "
"Mr. / Mrs. Hypser. -- `%s' sucks." % (_type))
def _dec(fn):
_compile_table[_type] = fn
return fn
return _dec
class Result(object):
"""
Smart representation of the result of a hy->AST compilation
This object tries to reconcile the hy world, where everything can be used
as an expression, with the Python world, where statements and expressions
need to coexist.
To do so, we represent a compiler result as a list of statements `stmts`,
terminated by an expression context `expr`. The expression context is used
when the compiler needs to use the result as an expression.
Results are chained by addition: adding two results together returns a
Result representing the succession of the two Results' statements, with
the second Result's expression context.
We make sure that a non-empty expression context does not get clobbered by
adding more results, by checking accesses to the expression context. We
assume that the context has been used, or deliberately ignored, if it has
been accessed.
The Result object is interoperable with python AST objects: when an AST
object gets added to a Result object, it gets converted on-the-fly.
"""
__slots__ = ("imports", "stmts", "temp_variables",
"_expr", "__used_expr", "contains_yield")
def __init__(self, *args, **kwargs):
if args:
# emulate kw-only args for future bits.
raise TypeError("Yo: Hacker: don't pass me real args, dingus")
self.imports = defaultdict(set)
self.stmts = []
self.temp_variables = []
self._expr = None
self.contains_yield = False
self.__used_expr = False
# XXX: Make sure we only have AST where we should.
for kwarg in kwargs:
if kwarg not in ["imports", "contains_yield", "stmts", "expr",
"temp_variables"]:
raise TypeError(
"%s() got an unexpected keyword argument '%s'" % (
self.__class__.__name__, kwarg))
setattr(self, kwarg, kwargs[kwarg])
@property
def expr(self):
self.__used_expr = True
return self._expr
@expr.setter
def expr(self, value):
self.__used_expr = False
self._expr = value
def add_imports(self, mod, imports):
"""Autoimport `imports` from `mod`"""
self.imports[mod].update(imports)
def is_expr(self):
"""Check whether I am a pure expression"""
return self._expr and not (self.imports or self.stmts)
@property
def force_expr(self):
"""Force the expression context of the Result.
If there is no expression context, we return a "None" expression.
"""
if self.expr:
return self.expr
# Spoof the position of the last statement for our generated None
lineno = 0
col_offset = 0
if self.stmts:
lineno = self.stmts[-1].lineno
col_offset = self.stmts[-1].col_offset
return ast.Name(id=ast_str("None"),
arg=ast_str("None"),
ctx=ast.Load(),
lineno=lineno,
col_offset=col_offset)
# XXX: Likely raise Exception here - this will assertionfail
# pypy since the ast will be out of numerical order.
def expr_as_stmt(self):
"""Convert the Result's expression context to a statement
This is useful when we want to use the stored expression in a
statement context (for instance in a code branch).
We drop ast.Names if they are appended to statements, as they
can't have any side effect. "Bare" names still get converted to
statements.
If there is no expression context, return an empty result.
"""
if self.expr and not (isinstance(self.expr, ast.Name) and self.stmts):
return Result() + ast.Expr(lineno=self.expr.lineno,
col_offset=self.expr.col_offset,
value=self.expr)
return Result()
def rename(self, new_name):
"""Rename the Result's temporary variables to a `new_name`.
We know how to handle ast.Names and ast.FunctionDefs.
"""
new_name = ast_str(new_name)
for var in self.temp_variables:
if isinstance(var, ast.Name):
var.id = new_name
var.arg = new_name
elif isinstance(var, ast.FunctionDef):
var.name = new_name
else:
raise TypeError("Don't know how to rename a %s!" % (
var.__class__.__name__))
self.temp_variables = []
def __add__(self, other):
# If we add an ast statement, convert it first
if isinstance(other, ast.stmt):
return self + Result(stmts=[other])
# If we add an ast expression, clobber the expression context
if isinstance(other, ast.expr):
return self + Result(expr=other)
if isinstance(other, ast.excepthandler):
return self + Result(stmts=[other])
if not isinstance(other, Result):
raise TypeError("Can't add %r with non-compiler result %r" % (
self, other))
# Check for expression context clobbering
if self.expr and not self.__used_expr:
traceback.print_stack()
print("Bad boy clobbered expr %s with %s" % (
ast.dump(self.expr),
ast.dump(other.expr)))
# Fairly obvious addition
result = Result()
result.imports = other.imports
result.stmts = self.stmts + other.stmts
result.expr = other.expr
result.temp_variables = other.temp_variables
result.contains_yield = False
if self.contains_yield or other.contains_yield:
result.contains_yield = True
return result
def __str__(self):
return (
"Result(imports=[%s], stmts=[%s], "
"expr=%s, contains_yield=%s)"
) % (
", ".join(ast.dump(x) for x in self.imports),
", ".join(ast.dump(x) for x in self.stmts),
ast.dump(self.expr) if self.expr else None,
self.contains_yield
)
def _branch(results):
"""Make a branch out of a list of Result objects
This generates a Result from the given sequence of Results, forcing each
expression context as a statement before the next result is used.
We keep the expression context of the last argument for the returned Result
"""
results = list(results)
ret = Result()
for result in results[:-1]:
ret += result
ret += result.expr_as_stmt()
for result in results[-1:]:
ret += result
return ret
def _raise_wrong_args_number(expression, error):
raise HyTypeError(expression,
error % (expression.pop(0),
len(expression)))
def checkargs(exact=None, min=None, max=None):
def _dec(fn):
def checker(self, expression):
if exact is not None and (len(expression) - 1) != exact:
_raise_wrong_args_number(
expression, "`%%s' needs %d arguments, got %%d" % exact)
if min is not None and (len(expression) - 1) < min:
_raise_wrong_args_number(
expression,
"`%%s' needs at least %d arguments, got %%d" % (min))
if max is not None and (len(expression) - 1) > max:
_raise_wrong_args_number(
expression,
"`%%s' needs at most %d arguments, got %%d" % (max))
return fn(self, expression)
return checker
return _dec
class HyASTCompiler(object):
def __init__(self, module_name):
self.anon_fn_count = 0
self.anon_var_count = 0
self.imports = defaultdict(set)
self.module_name = module_name
if not module_name.startswith("hy.core"):
# everything in core needs to be explicit.
load_stdlib()
def get_anon_var(self):
self.anon_var_count += 1
return "_hy_anon_var_%s" % self.anon_var_count
def get_anon_fn(self):
self.anon_fn_count += 1
return "_hy_anon_fn_%d" % self.anon_fn_count
def update_imports(self, result):
"""Retrieve the imports from the result object"""
for mod in result.imports:
self.imports[mod].update(result.imports[mod])
def imports_as_stmts(self, expr):
"""Convert the Result's imports to statements"""
ret = Result()
for module, names in self.imports.items():
if None in names:
ret += self.compile([
HyExpression([
HySymbol("import"),
HySymbol(module),
]).replace(expr)
])
names = sorted(name for name in names if name)
if names:
ret += self.compile([
HyExpression([
HySymbol("import"),
HyList([
HySymbol(module),
HyList([HySymbol(name) for name in names])
])
]).replace(expr)
])
self.imports = defaultdict(set)
return ret.stmts
def compile_atom(self, atom_type, atom):
if atom_type in _compile_table:
ret = _compile_table[atom_type](self, atom)
if not isinstance(ret, Result):
ret = Result() + ret
return ret
def compile(self, tree):
try:
tree = process(tree, self.module_name)
_type = type(tree)
ret = self.compile_atom(_type, tree)
if ret:
self.update_imports(ret)
return ret
except HyCompileError:
# compile calls compile, so we're going to have multiple raise
# nested; so let's re-raise this exception, let's not wrap it in
# another HyCompileError!
raise
except Exception as e:
raise HyCompileError(e, sys.exc_info()[2])
raise HyCompileError(Exception("Unknown type: `%s'" % _type))
def _compile_collect(self, exprs):
"""Collect the expression contexts from a list of compiled expression.
This returns a list of the expression contexts, and the sum of the
Result objects passed as arguments.
"""
compiled_exprs = []
ret = Result()
for expr in exprs:
ret += self.compile(expr)
compiled_exprs.append(ret.force_expr)
return compiled_exprs, ret
def _compile_branch(self, exprs):
return _branch(self.compile(expr) for expr in exprs)
def _parse_lambda_list(self, exprs):
""" Return FunctionDef parameter values from lambda list."""
ret = Result()
args = []
defaults = []
varargs = None
kwargs = None
lambda_keyword = None
for expr in exprs:
if isinstance(expr, HyLambdaListKeyword):
if expr not in expr._valid_types:
raise HyTypeError(expr, "{0} is not a valid "
"lambda-keyword.".format(repr(expr)))
if expr == "&rest" and lambda_keyword is None:
lambda_keyword = expr
elif expr == "&optional":
if len(defaults) > 0:
raise HyTypeError(expr,
"There can only be &optional "
"arguments or one &key argument")
lambda_keyword = expr
elif expr == "&key":
lambda_keyword = expr
elif expr == "&kwargs":
lambda_keyword = expr
else:
raise HyTypeError(expr,
"{0} is in an invalid "
"position.".format(repr(expr)))
# we don't actually care about this token, so we set
# our state and continue to the next token...
continue
if lambda_keyword is None:
args.append(expr)
elif lambda_keyword == "&rest":
if varargs:
raise HyTypeError(expr,
"There can only be one "
"&rest argument")
varargs = str(expr)
elif lambda_keyword == "&key":
if type(expr) != HyDict:
raise HyTypeError(expr,
"There can only be one &key "
"argument")
else:
if len(defaults) > 0:
raise HyTypeError(expr,
"There can only be &optional "
"arguments or one &key argument")
# As you can see, Python has a funny way of
# defining keyword arguments.
it = iter(expr)
for k, v in zip(it, it):
args.append(k)
ret += self.compile(v)
defaults.append(ret.force_expr)
elif lambda_keyword == "&optional":
if isinstance(expr, HyList):
if not len(expr) == 2:
raise HyTypeError(expr,
"optional args should be bare names "
"or 2-item lists")
k, v = expr
else:
k = expr
v = HySymbol("None").replace(k)
args.append(k)
ret += self.compile(v)
defaults.append(ret.force_expr)
elif lambda_keyword == "&kwargs":
if kwargs:
raise HyTypeError(expr,
"There can only be one "
"&kwargs argument")
kwargs = str(expr)
return ret, args, defaults, varargs, kwargs
def _storeize(self, name):
"""Return a new `name` object with an ast.Store() context"""
if isinstance(name, Result):
if not name.is_expr():
raise TypeError("Can't assign to a non-expr")
name = name.expr
if isinstance(name, (ast.Tuple, ast.List)):
typ = type(name)
new_elts = []
for x in name.elts:
new_elts.append(self._storeize(x))
new_name = typ(elts=new_elts)
elif isinstance(name, ast.Name):
new_name = ast.Name(id=name.id, arg=name.arg)
elif isinstance(name, ast.Subscript):
new_name = ast.Subscript(value=name.value, slice=name.slice)
elif isinstance(name, ast.Attribute):
new_name = ast.Attribute(value=name.value, attr=name.attr)
else:
raise TypeError("Can't assign to a %s object" % type(name))
new_name.ctx = ast.Store()
ast.copy_location(new_name, name)
return new_name
@builds(list)
def compile_raw_list(self, entries):
ret = self._compile_branch(entries)
ret += ret.expr_as_stmt()
return ret
def _render_quoted_form(self, form, level):
"""
Render a quoted form as a new HyExpression.
`level` is the level of quasiquoting of the current form. We can
unquote if level is 0.
Returns a three-tuple (`imports`, `expression`, `splice`).
The `splice` return value is used to mark `unquote-splice`d forms.
We need to distinguish them as want to concatenate them instead of
just nesting them.
"""
if level == 0:
if isinstance(form, HyExpression):
if form and form[0] in ("unquote", "unquote_splice"):
if len(form) != 2:
raise HyTypeError(form,
("`%s' needs 1 argument, got %s" %
form[0], len(form) - 1))
return set(), form[1], (form[0] == "unquote_splice")
if isinstance(form, HyExpression):
if form and form[0] == "quasiquote":
level += 1
if form and form[0] in ("unquote", "unquote_splice"):
level -= 1
name = form.__class__.__name__
imports = set([name])
if isinstance(form, (HyList, HyDict)):
if not form:
contents = HyList()
else:
# If there are arguments, they can be spliced
# so we build a sum...
contents = HyExpression([HySymbol("+"), HyList()])
for x in form:
f_imports, f_contents, splice = self._render_quoted_form(x,
level)
imports.update(f_imports)
if splice:
to_add = f_contents
else:
to_add = HyList([f_contents])
contents.append(to_add)
return imports, HyExpression([HySymbol(name),
contents]).replace(form), False
elif isinstance(form, HySymbol):
return imports, HyExpression([HySymbol(name),
HyString(form)]).replace(form), False
return imports, HyExpression([HySymbol(name),
form]).replace(form), False
@builds("quote")
@builds("quasiquote")
@checkargs(exact=1)
def compile_quote(self, entries):
if entries[0] == "quote":
# Never allow unquoting
level = float("inf")
else:
level = 0
imports, stmts, splice = self._render_quoted_form(entries[1], level)
ret = self.compile(stmts)
ret.add_imports("hy", imports)
return ret
@builds("unquote")
@builds("unquote_splicing")
def compile_unquote(self, expr):
raise HyTypeError(expr,
"`%s' can't be used at the top-level" % expr[0])
@builds("eval")
@checkargs(exact=1)
def compile_eval(self, expr):
expr.pop(0)
ret = self.compile(HyExpression(
[HySymbol("hy_eval")] + expr + [HyExpression([HySymbol("locals")])]
+ [HyString(self.module_name)]).replace(expr)
)
ret.add_imports("hy.importer", ["hy_eval"])
return ret
@builds("do")
@builds("progn")
def compile_progn(self, expression):
expression.pop(0)
return self._compile_branch(expression)
@builds("throw")
@builds("raise")
@checkargs(max=1)
def compile_throw_expression(self, expr):
expr.pop(0)
ret = Result()
if expr:
ret += self.compile(expr.pop(0))
# Use ret.expr to get a literal `None`
ret += ast.Raise(
lineno=expr.start_line,
col_offset=expr.start_column,
type=ret.expr,
exc=ret.expr,
inst=None,
tback=None,
cause=None)
return ret
@builds("try")
def compile_try_expression(self, expr):
expr.pop(0) # try
try:
body = expr.pop(0)
except IndexError:
body = []
# (try something…)
body = self.compile(body)
var = self.get_anon_var()
name = ast.Name(id=ast_str(var), arg=ast_str(var),
ctx=ast.Store(),
lineno=expr.start_line,
col_offset=expr.start_column)
expr_name = ast.Name(id=ast_str(var), arg=ast_str(var),
ctx=ast.Load(),
lineno=expr.start_line,
col_offset=expr.start_column)
returnable = Result(expr=expr_name, temp_variables=[expr_name, name])
body += ast.Assign(targets=[name],
value=body.force_expr,
lineno=expr.start_line,
col_offset=expr.start_column)
body = body.stmts
if not body:
body = [ast.Pass(lineno=expr.start_line,
col_offset=expr.start_column)]
orelse = []
finalbody = []
handlers = []
handler_results = Result()
for e in expr:
if not len(e):
raise HyTypeError(e, "Empty list not allowed in `try'")
if e[0] in (HySymbol("except"), HySymbol("catch")):
handler_results += self._compile_catch_expression(e, name)
handlers.append(handler_results.stmts.pop())
elif e[0] == HySymbol("else"):
if orelse:
raise HyTypeError(
e,
"`try' cannot have more than one `else'")
else:
orelse = self._compile_branch(e[1:])
# XXX tempvar magic
orelse += orelse.expr_as_stmt()
orelse = orelse.stmts
elif e[0] == HySymbol("finally"):
if finalbody:
raise HyTypeError(
e,
"`try' cannot have more than one `finally'")
else:
finalbody = self._compile_branch(e[1:])
# XXX tempvar magic
finalbody += finalbody.expr_as_stmt()
finalbody = finalbody.stmts
else:
raise HyTypeError(e, "Unknown expression in `try'")
# Using (else) without (except) is verboten!
if orelse and not handlers:
raise HyTypeError(
e,
"`try' cannot have `else' without `except'")
# (try) or (try BODY)
# Generate a default handler for Python >= 3.3 and pypy
if not handlers and not finalbody and not orelse:
handlers = [ast.ExceptHandler(
lineno=expr.start_line,
col_offset=expr.start_column,
type=None,
name=None,
body=[ast.Pass(lineno=expr.start_line,
col_offset=expr.start_column)])]
ret = handler_results
if sys.version_info[0] >= 3 and sys.version_info[1] >= 3:
# Python 3.3 features a merge of TryExcept+TryFinally into Try.
return ret + ast.Try(
lineno=expr.start_line,
col_offset=expr.start_column,
body=body,
handlers=handlers,
orelse=orelse,
finalbody=finalbody) + returnable
if finalbody:
if handlers:
return ret + ast.TryFinally(
lineno=expr.start_line,
col_offset=expr.start_column,
body=[ast.TryExcept(
lineno=expr.start_line,
col_offset=expr.start_column,
handlers=handlers,
body=body,
orelse=orelse)],
finalbody=finalbody) + returnable
return ret + ast.TryFinally(
lineno=expr.start_line,
col_offset=expr.start_column,
body=body,
finalbody=finalbody) + returnable
return ret + ast.TryExcept(
lineno=expr.start_line,
col_offset=expr.start_column,
handlers=handlers,
body=body,
orelse=orelse) + returnable
@builds("except")
@builds("catch")
def magic_internal_form(self, expr):
raise HyTypeError(expr,
"Error: `%s' can't be used like that." % (expr[0]))
def _compile_catch_expression(self, expr, var):
catch = expr.pop(0) # catch
try:
exceptions = expr.pop(0)
except IndexError:
exceptions = HyList()
# exceptions catch should be either:
# [[list of exceptions]]
# or
# [variable [list of exceptions]]
# or
# [variable exception]
# or
# [exception]
# or
# []
if not isinstance(exceptions, HyList):
raise HyTypeError(exceptions,
"`%s' exceptions list is not a list" % catch)
if len(exceptions) > 2:
raise HyTypeError(exceptions,
"`%s' exceptions list is too long" % catch)
# [variable [list of exceptions]]
# let's pop variable and use it as name
if len(exceptions) == 2:
name = exceptions.pop(0)
if not isinstance(name, HySymbol):
raise HyTypeError(
exceptions,
"Exception storage target name must be a symbol.")
if sys.version_info[0] >= 3:
# Python3 features a change where the Exception handler
# moved the name from a Name() to a pure Python String type.
#
# We'll just make sure it's a pure "string", and let it work
# it's magic.
name = ast_str(name)
else:
# Python2 requires an ast.Name, set to ctx Store.
name = self._storeize(self.compile(name))
else:
name = None
try:
exceptions_list = exceptions.pop(0)
except IndexError:
exceptions_list = []
if isinstance(exceptions_list, list):
if len(exceptions_list):
# [FooBar BarFoo] → catch Foobar and BarFoo exceptions
elts, _type = self._compile_collect(exceptions_list)
_type += ast.Tuple(elts=elts,
lineno=expr.start_line,
col_offset=expr.start_column,
ctx=ast.Load())
else:
# [] → all exceptions catched
_type = Result()
elif isinstance(exceptions_list, HySymbol):
_type = self.compile(exceptions_list)
else:
raise HyTypeError(exceptions,
"`%s' needs a valid exception list" % catch)
body = self._compile_branch(expr)
body += ast.Assign(targets=[var],
value=body.force_expr,
lineno=expr.start_line,
col_offset=expr.start_column)
body += body.expr_as_stmt()
body = body.stmts
if not body:
body = [ast.Pass(lineno=expr.start_line,
col_offset=expr.start_column)]
# use _type.expr to get a literal `None`
return _type + ast.ExceptHandler(
lineno=expr.start_line,
col_offset=expr.start_column,
type=_type.expr,
name=name,
body=body)
@builds("if")
@checkargs(min=2, max=3)
def compile_if(self, expression):
expression.pop(0)
cond = self.compile(expression.pop(0))
body = self.compile(expression.pop(0))
orel = Result()
if expression:
orel = self.compile(expression.pop(0))
# We want to hoist the statements from the condition
ret = cond
if body.stmts or orel.stmts:
# We have statements in our bodies
# Get a temporary variable for the result storage
var = self.get_anon_var()
name = ast.Name(id=ast_str(var), arg=ast_str(var),
ctx=ast.Store(),
lineno=expression.start_line,
col_offset=expression.start_column)
# Store the result of the body
body += ast.Assign(targets=[name],
value=body.force_expr,
lineno=expression.start_line,
col_offset=expression.start_column)
# and of the else clause
orel += ast.Assign(targets=[name],
value=orel.force_expr,
lineno=expression.start_line,
col_offset=expression.start_column)
# Then build the if
ret += ast.If(test=ret.force_expr,
body=body.stmts,
orelse=orel.stmts,
lineno=expression.start_line,
col_offset=expression.start_column)
# And make our expression context our temp variable
expr_name = ast.Name(id=ast_str(var), arg=ast_str(var),
ctx=ast.Load(),
lineno=expression.start_line,
col_offset=expression.start_column)
ret += Result(expr=expr_name, temp_variables=[expr_name, name])
else:
# Just make that an if expression
ret += ast.IfExp(test=ret.force_expr,
body=body.force_expr,
orelse=orel.force_expr,
lineno=expression.start_line,
col_offset=expression.start_column)
return ret
@builds("print")
def compile_print_expression(self, expr):
call = expr.pop(0) # print
values, ret = self._compile_collect(expr)
if sys.version_info[0] >= 3:
call = self.compile(call)
ret += call
ret += ast.Call(func=call.expr,
args=values,
keywords=[],
starargs=None,
kwargs=None,
lineno=expr.start_line,
col_offset=expr.start_column)
else:
ret += ast.Print(
lineno=expr.start_line,
col_offset=expr.start_column,
dest=None,
values=values,
nl=True)
return ret
@builds("break")
def compile_break_expression(self, expr):
ret = ast.Break(lineno=expr.start_line,
col_offset=expr.start_column)
return ret
@builds("continue")
def compile_continue_expression(self, expr):
ret = ast.Continue(lineno=expr.start_line,
col_offset=expr.start_column)
return ret
@builds("assert")
@checkargs(1)
def compile_assert_expression(self, expr):
expr.pop(0) # assert
e = expr.pop(0)
ret = self.compile(e)
ret += ast.Assert(test=ret.force_expr,
msg=None,
lineno=e.start_line,
col_offset=e.start_column)
return ret
@builds("global")
@checkargs(1)
def compile_global_expression(self, expr):
expr.pop(0) # global
e = expr.pop(0)
return ast.Global(names=[ast_str(e)],
lineno=e.start_line,
col_offset=e.start_column)
@builds("yield")
@checkargs(max=1)
def compile_yield_expression(self, expr):
expr.pop(0)
ret = Result(contains_yield=True)
value = None
if expr != []:
ret += self.compile(expr.pop(0))
value = ret.force_expr
ret += ast.Yield(
value=value,
lineno=expr.start_line,
col_offset=expr.start_column)
return ret
@builds("import")
def compile_import_expression(self, expr):
def _compile_import(expr, module, names=None, importer=ast.Import):
if not names:
names = [ast.alias(name=ast_str(module), asname=None)]
ret = importer(lineno=expr.start_line,
col_offset=expr.start_column,
module=ast_str(module),
names=names,
level=0)
return Result() + ret
expr.pop(0) # index
rimports = Result()
while len(expr) > 0:
iexpr = expr.pop(0)
if isinstance(iexpr, HySymbol):
rimports += _compile_import(expr, iexpr)
continue
if isinstance(iexpr, HyList) and len(iexpr) == 1:
rimports += _compile_import(expr, iexpr.pop(0))
continue
if isinstance(iexpr, HyList) and iexpr:
module = iexpr.pop(0)
entry = iexpr[0]
if isinstance(entry, HyKeyword) and entry == HyKeyword(":as"):
if not len(iexpr) == 2:
raise HyTypeError(iexpr,
"garbage after aliased import")
iexpr.pop(0) # :as
alias = iexpr.pop(0)
names = [ast.alias(name=ast_str(module),
asname=ast_str(alias))]
rimports += _compile_import(expr, ast_str(module), names)
continue
if isinstance(entry, HyList):
names = []
while entry:
sym = entry.pop(0)
if entry and isinstance(entry[0], HyKeyword):
entry.pop(0)
alias = ast_str(entry.pop(0))
else:
alias = None
names.append(ast.alias(name=ast_str(sym),
asname=alias))
rimports += _compile_import(expr, module,
names, ast.ImportFrom)
continue
raise HyTypeError(
entry,
"Unknown entry (`%s`) in the HyList" % (entry)
)
return rimports
@builds("get")
@checkargs(2)
def compile_index_expression(self, expr):
expr.pop(0) # index
val = self.compile(expr.pop(0)) # target
sli = self.compile(expr.pop(0)) # slice
return val + sli + ast.Subscript(
lineno=expr.start_line,
col_offset=expr.start_column,
value=val.force_expr,
slice=ast.Index(value=sli.force_expr),
ctx=ast.Load())
@builds("slice")
@checkargs(min=1, max=4)
def compile_slice_expression(self, expr):
expr.pop(0) # index
val = self.compile(expr.pop(0)) # target
low = Result()
if expr != []:
low = self.compile(expr.pop(0))
high = Result()
if expr != []:
high = self.compile(expr.pop(0))
step = Result()
if expr != []:
step = self.compile(expr.pop(0))
# use low.expr, high.expr and step.expr to use a literal `None`.
return val + low + high + step + ast.Subscript(
lineno=expr.start_line,
col_offset=expr.start_column,
value=val.force_expr,
slice=ast.Slice(lower=low.expr,
upper=high.expr,
step=step.expr),
ctx=ast.Load())
@builds("assoc")
@checkargs(3)
def compile_assoc_expression(self, expr):
expr.pop(0) # assoc
# (assoc foo bar baz) => foo[bar] = baz
target = self.compile(expr.pop(0))
key = self.compile(expr.pop(0))
val = self.compile(expr.pop(0))
return target + key + val + ast.Assign(
lineno=expr.start_line,
col_offset=expr.start_column,
targets=[
ast.Subscript(
lineno=expr.start_line,
col_offset=expr.start_column,
value=target.force_expr,
slice=ast.Index(value=key.force_expr),
ctx=ast.Store())],
value=val.force_expr)
@builds("with_decorator")
@checkargs(min=1)
def compile_decorate_expression(self, expr):
expr.pop(0) # with-decorator
fn = self.compile(expr.pop(-1))
if not fn.stmts or not isinstance(fn.stmts[-1], ast.FunctionDef):
raise HyTypeError(expr, "Decorated a non-function")
decorators, ret = self._compile_collect(expr)
fn.stmts[-1].decorator_list = decorators
return ret + fn
@builds("with")
@checkargs(min=2)
def compile_with_expression(self, expr):
expr.pop(0) # with
args = expr.pop(0)
if len(args) > 2 or len(args) < 1:
raise HyTypeError(expr, "with needs [arg (expr)] or [(expr)]")
args.reverse()
ctx = self.compile(args.pop(0))
thing = None
if args != []:
thing = self._storeize(self.compile(args.pop(0)))
body = self._compile_branch(expr)
body += body.expr_as_stmt()
if not body.stmts:
body += ast.Pass(lineno=expr.start_line,
col_offset=expr.start_column)
the_with = ast.With(context_expr=ctx.force_expr,
lineno=expr.start_line,
col_offset=expr.start_column,
optional_vars=thing,
body=body.stmts)
if sys.version_info[0] >= 3 and sys.version_info[1] >= 3:
the_with.items = [ast.withitem(context_expr=ctx.force_expr,
optional_vars=thing)]
return ctx + the_with
@builds(",")
def compile_tuple(self, expr):
expr.pop(0)
elts, ret = self._compile_collect(expr)
ret += ast.Tuple(elts=elts,
lineno=expr.start_line,
col_offset=expr.start_column,
ctx=ast.Load())
return ret
@builds("list_comp")
@checkargs(min=2, max=3)
def compile_list_comprehension(self, expr):
# (list-comp expr (target iter) cond?)
expr.pop(0)
expression = expr.pop(0)
tar_it = iter(expr.pop(0))
targets = zip(tar_it, tar_it)
cond = self.compile(expr.pop(0)) if expr != [] else Result()
generator_res = Result()
generators = []
for target, iterable in targets:
comp_target = self.compile(target)
target = self._storeize(comp_target)
generator_res += self.compile(iterable)
generators.append(ast.comprehension(
target=target,
iter=generator_res.force_expr,
ifs=[]))
if cond.expr:
generators[-1].ifs.append(cond.expr)
compiled_expression = self.compile(expression)
ret = compiled_expression + generator_res + cond
ret += ast.ListComp(
lineno=expr.start_line,
col_offset=expr.start_column,
elt=compiled_expression.force_expr,
generators=generators)
return ret
@builds("kwapply")
@checkargs(2)
def compile_kwapply_expression(self, expr):
expr.pop(0) # kwapply
call = self.compile(expr.pop(0))
kwargs = self.compile(expr.pop(0))
if type(call.expr) != ast.Call:
raise HyTypeError(expr, "kwapplying a non-call")
call.expr.kwargs = kwargs.force_expr
return kwargs + call
@builds("not")
@builds("~")
@checkargs(1)
def compile_unary_operator(self, expression):
ops = {"not": ast.Not,
"~": ast.Invert}
operator = expression.pop(0)
operand = self.compile(expression.pop(0))
operand += ast.UnaryOp(op=ops[operator](),
operand=operand.expr,
lineno=operator.start_line,
col_offset=operator.start_column)
return operand
@builds("require")
def compile_require(self, expression):
"""
TODO: keep track of what we've imported in this run and then
"unimport" it after we've completed `thing' so that we don't polute
other envs.
"""
expression.pop(0)
for entry in expression:
__import__(entry) # Import it fo' them macros.
require(entry, self.module_name)
return Result()
@builds("and")
@builds("or")
@checkargs(min=2)
def compile_logical_or_and_and_operator(self, expression):
ops = {"and": ast.And,
"or": ast.Or}
operator = expression.pop(0)
values, ret = self._compile_collect(expression)
ret += ast.BoolOp(op=ops[operator](),
lineno=operator.start_line,
col_offset=operator.start_column,
values=values)
return ret
@builds("=")
@builds("!=")
@builds("<")
@builds("<=")
@builds(">")
@builds(">=")
@builds("is")
@builds("in")
@builds("is_not")
@builds("not_in")
@checkargs(min=2)
def compile_compare_op_expression(self, expression):
ops = {"=": ast.Eq, "!=": ast.NotEq,
"<": ast.Lt, "<=": ast.LtE,
">": ast.Gt, ">=": ast.GtE,
"is": ast.Is, "is_not": ast.IsNot,
"in": ast.In, "not_in": ast.NotIn}
inv = expression.pop(0)
op = ops[inv]
ops = [op() for x in range(1, len(expression))]
e = expression[0]
exprs, ret = self._compile_collect(expression)
return ret + ast.Compare(left=exprs[0],
ops=ops,
comparators=exprs[1:],
lineno=e.start_line,
col_offset=e.start_column)
@builds("+")
@builds("%")
@builds("/")
@builds("//")
@builds("*")
@builds("**")
@builds("<<")
@builds(">>")
@builds("|")
@builds("^")
@builds("&")
@checkargs(min=2)
def compile_maths_expression(self, expression):
ops = {"+": ast.Add,
"/": ast.Div,
"//": ast.FloorDiv,
"*": ast.Mult,
"-": ast.Sub,
"%": ast.Mod,
"**": ast.Pow,
"<<": ast.LShift,
">>": ast.RShift,
"|": ast.BitOr,
"^": ast.BitXor,
"&": ast.BitAnd}
inv = expression.pop(0)
op = ops[inv]
ret = self.compile(expression.pop(0))
for child in expression:
left_expr = ret.force_expr
ret += self.compile(child)
right_expr = ret.force_expr
ret += ast.BinOp(left=left_expr,
op=op(),
right=right_expr,
lineno=child.start_line,
col_offset=child.start_column)
return ret
@builds("-")
@checkargs(min=1)
def compile_maths_expression_sub(self, expression):
if len(expression) > 2:
return self.compile_maths_expression(expression)
else:
arg = expression[1]
ret = self.compile(arg)
ret += ast.UnaryOp(op=ast.USub(),
operand=ret.force_expr,
lineno=arg.start_line,
col_offset=arg.start_column)
return ret
@builds("+=")
@builds("/=")
@builds("//=")
@builds("*=")
@builds("_=")
@builds("%=")
@builds("**=")
@builds("<<=")
@builds(">>=")
@builds("|=")
@builds("^=")
@builds("&=")
@checkargs(2)
def compile_augassign_expression(self, expression):
ops = {"+=": ast.Add,
"/=": ast.Div,
"//=": ast.FloorDiv,
"*=": ast.Mult,
"_=": ast.Sub,
"%=": ast.Mod,
"**=": ast.Pow,
"<<=": ast.LShift,
">>=": ast.RShift,
"|=": ast.BitOr,
"^=": ast.BitXor,
"&=": ast.BitAnd}
op = ops[expression[0]]
target = self._storeize(self.compile(expression[1]))
ret = self.compile(expression[2])
ret += ast.AugAssign(
target=target,
value=ret.force_expr,
op=op(),
lineno=expression.start_line,
col_offset=expression.start_column)
return ret
@checkargs(1)
def _compile_keyword_call(self, expression):
expression.append(expression.pop(0))
expression.insert(0, HySymbol("get"))
return self.compile(expression)
@builds(HyExpression)
def compile_expression(self, expression):
if expression == []:
return self.compile_list(expression)
fn = expression[0]
func = None
if isinstance(fn, HyKeyword):
return self._compile_keyword_call(expression)
if isinstance(fn, HyString):
ret = self.compile_atom(fn, expression)
if ret:
return ret
if fn in _stdlib:
self.imports[_stdlib[fn]].add(fn)
if fn.startswith("."):
# (.split "test test") -> "test test".split()
# Get the attribute name
ofn = fn
fn = HySymbol(ofn[1:])
fn.replace(ofn)
# Get the object we want to take an attribute from
func = self.compile(expression.pop(1))
# And get the attribute
func += ast.Attribute(lineno=fn.start_line,
col_offset=fn.start_column,
value=func.force_expr,
attr=ast_str(fn),
ctx=ast.Load())
if not func:
func = self.compile(fn)
args, ret = self._compile_collect(expression[1:])
ret += ast.Call(func=func.expr,
args=args,
keywords=[],
starargs=None,
kwargs=None,
lineno=expression.start_line,
col_offset=expression.start_column)
return func + ret
@builds("def")
@builds("setv")
@checkargs(2)
def compile_def_expression(self, expression):
return self._compile_assign(expression[1], expression[2],
expression.start_line,
expression.start_column)
def _compile_assign(self, name, result,
start_line, start_column):
result = self.compile(result)
if result.temp_variables and isinstance(name, HyString):
result.rename(name)
return result
ld_name = self.compile(name)
st_name = self._storeize(ld_name)
result += ast.Assign(
lineno=start_line,
col_offset=start_column,
targets=[st_name], value=result.force_expr)
result += ld_name
return result
@builds("foreach")
@checkargs(min=1)
def compile_for_expression(self, expression):
expression.pop(0) # for
target_name, iterable = expression.pop(0)
target = self._storeize(self.compile(target_name))
ret = Result()
orel = Result()
# (foreach [] body (else …))
if expression and expression[-1][0] == HySymbol("else"):
else_expr = expression.pop()
if len(else_expr) > 2:
raise HyTypeError(
else_expr,
"`else' statement in `foreach' is too long")
elif len(else_expr) == 2:
orel += self.compile(else_expr[1])
orel += orel.expr_as_stmt()
ret += self.compile(iterable)
body = self._compile_branch(expression)
body += body.expr_as_stmt()
ret += ast.For(lineno=expression.start_line,
col_offset=expression.start_column,
target=target,
iter=ret.force_expr,
body=body.stmts,
orelse=orel.stmts)
ret.contains_yield = body.contains_yield
return ret
@builds("while")
@checkargs(min=2)
def compile_while_expression(self, expr):
expr.pop(0) # "while"
ret = self.compile(expr.pop(0))
body = self._compile_branch(expr)
body += body.expr_as_stmt()
ret += ast.While(test=ret.force_expr,
body=body.stmts,
orelse=[],
lineno=expr.start_line,
col_offset=expr.start_column)
ret.contains_yield = body.contains_yield
return ret
@builds(HyList)
def compile_list(self, expression):
elts, ret = self._compile_collect(expression)
ret += ast.List(elts=elts,
ctx=ast.Load(),
lineno=expression.start_line,
col_offset=expression.start_column)
return ret
@builds("lambda")
@builds("fn")
@checkargs(min=1)
def compile_function_def(self, expression):
called_as = expression.pop(0)
arglist = expression.pop(0)
ret, args, defaults, stararg, kwargs = self._parse_lambda_list(arglist)
args = ast.arguments(
args=[ast.Name(arg=ast_str(x), id=ast_str(x),
ctx=ast.Param(),
lineno=x.start_line,
col_offset=x.start_column)
for x in args],
vararg=stararg,
kwarg=kwargs,
kwonlyargs=[],
kw_defaults=[],
defaults=defaults)
body = self._compile_branch(expression)
if not body.stmts and called_as == "lambda":
ret += ast.Lambda(
lineno=expression.start_line,
col_offset=expression.start_column,
args=args,
body=body.force_expr)
return ret
if body.expr:
if body.contains_yield:
body += body.expr_as_stmt()
else:
body += ast.Return(value=body.expr,
lineno=body.expr.lineno,
col_offset=body.expr.col_offset)
if not body.stmts:
body += ast.Pass(lineno=expression.start_line,
col_offset=expression.start_column)
name = self.get_anon_fn()
ret += ast.FunctionDef(name=name,
lineno=expression.start_line,
col_offset=expression.start_column,
args=args,
body=body.stmts,
decorator_list=[])
ast_name = ast.Name(id=name,
arg=name,
ctx=ast.Load(),
lineno=expression.start_line,
col_offset=expression.start_column)
ret += Result(expr=ast_name, temp_variables=[ast_name, ret.stmts[-1]])
return ret
@builds("defclass")
@checkargs(min=1)
def compile_class_expression(self, expression):
expression.pop(0) # class
class_name = expression.pop(0)
if expression:
base_list = expression.pop(0)
if not isinstance(base_list, HyList):
raise HyTypeError(expression,
"Bases class must be a list")
bases_expr, bases = self._compile_collect(base_list)
else:
bases_expr = []
bases = Result()
body = Result()
# grab the doc string, if there is one
if expression and isinstance(expression[0], HyString):
docstring = expression.pop(0)
symb = HySymbol("__doc__")
symb.start_line = docstring.start_line
symb.start_column = docstring.start_column
body += self._compile_assign(symb, docstring,
docstring.start_line,
docstring.start_column)
body += body.expr_as_stmt()
if expression:
try:
body_expression = iter(expression.pop(0))
except TypeError:
raise HyTypeError(
expression,
"Wrong argument type for defclass slots definition.")
for b in body_expression:
if len(b) != 2:
raise HyTypeError(
expression,
"Wrong number of argument in defclass slot.")
body += self._compile_assign(b[0], b[1],
b.start_line, b.start_column)
body += body.expr_as_stmt()
if not body.stmts:
body += ast.Pass(lineno=expression.start_line,
col_offset=expression.start_column)
return bases + ast.ClassDef(
lineno=expression.start_line,
col_offset=expression.start_column,
decorator_list=[],
name=ast_str(class_name),
keywords=[],
starargs=None,
kwargs=None,
bases=bases_expr,
body=body.stmts)
@builds("defmacro")
@checkargs(min=1)
def compile_macro(self, expression):
expression.pop(0)
name = expression.pop(0)
if not isinstance(name, HySymbol):
raise HyTypeError(name, ("received a `%s' instead of a symbol "
"for macro name" % type(name).__name__))
name = HyString(name).replace(name)
new_expression = HyExpression([
HySymbol("with_decorator"),
HyExpression([HySymbol("hy.macros.macro"), name]),
HyExpression([HySymbol("fn")] + expression),
]).replace(expression)
# Compile-time hack: we want to get our new macro now
# We must provide __name__ in the namespace to make the Python
# compiler set the __module__ attribute of the macro function.
hy.importer.hy_eval(new_expression,
compile_time_ns(self.module_name),
self.module_name)
# We really want to have a `hy` import to get hy.macro in
ret = self.compile(new_expression)
ret.add_imports('hy', [None])
return ret
@builds("eval_and_compile")
def compile_eval_and_compile(self, expression):
expression[0] = HySymbol("progn")
hy.importer.hy_eval(expression,
compile_time_ns(self.module_name),
self.module_name)
expression.pop(0)
return self._compile_branch(expression)
@builds("eval_when_compile")
def compile_eval_when_compile(self, expression):
expression[0] = HySymbol("progn")
hy.importer.hy_eval(expression,
compile_time_ns(self.module_name),
self.module_name)
return Result()
@builds(HyInteger)
def compile_integer(self, number):
return ast.Num(n=int(number),
lineno=number.start_line,
col_offset=number.start_column)
@builds(HyFloat)
def compile_float(self, number):
return ast.Num(n=float(number),
lineno=number.start_line,
col_offset=number.start_column)
@builds(HyComplex)
def compile_complex(self, number):
return ast.Num(n=complex(number),
lineno=number.start_line,
col_offset=number.start_column)
@builds(HySymbol)
def compile_symbol(self, symbol):
if "." in symbol:
glob, local = symbol.rsplit(".", 1)
glob = HySymbol(glob).replace(symbol)
ret = self.compile_symbol(glob)
ret = ast.Attribute(
lineno=symbol.start_line,
col_offset=symbol.start_column,
value=ret,
attr=ast_str(local),
ctx=ast.Load()
)
return ret
return ast.Name(id=ast_str(symbol),
arg=ast_str(symbol),
ctx=ast.Load(),
lineno=symbol.start_line,
col_offset=symbol.start_column)
@builds(HyString)
def compile_string(self, string):
return ast.Str(s=str_type(string),
lineno=string.start_line,
col_offset=string.start_column)
@builds(HyKeyword)
def compile_keyword(self, keyword):
return ast.Str(s=str_type(keyword),
lineno=keyword.start_line,
col_offset=keyword.start_column)
@builds(HyDict)
def compile_dict(self, m):
keyvalues, ret = self._compile_collect(m)
ret += ast.Dict(lineno=m.start_line,
col_offset=m.start_column,
keys=keyvalues[::2],
values=keyvalues[1::2])
return ret
def hy_compile(tree, module_name, root=ast.Module, get_expr=False):
"""
Compile a HyObject tree into a Python AST Module.
If `get_expr` is True, return a tuple (module, last_expression), where
`last_expression` is the.
"""
if hasattr(sys, "subversion"):
implementation = sys.subversion[0].lower()
elif hasattr(sys, "implementation"):
implementation = sys.implementation.name.lower()
body = []
expr = None
if tree:
compiler = HyASTCompiler(module_name)
result = compiler.compile(tree)
expr = result.force_expr
if not get_expr:
result += result.expr_as_stmt()
if isinstance(tree, list):
spoof_tree = tree[0]
else:
spoof_tree = tree
body = compiler.imports_as_stmts(spoof_tree) + result.stmts
ret = root(body=body)
# PyPy _really_ doesn't like the ast going backwards...
if implementation != "cpython":
for node in ast.walk(ret):
node.lineno = 1
node.col_offset = 1
if get_expr:
expr = ast.Expression(body=expr)
ret = (ret, expr)
return ret
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import base64
import itertools
import functools
from datetime import datetime
import traceback
from PyQt5.QtCore import QByteArray, QTimer
from PyQt5.QtNetwork import (
QNetworkAccessManager,
QNetworkProxyQuery,
QNetworkRequest,
QNetworkReply
)
from twisted.python import log
from splash.qtutils import qurl2ascii, REQUEST_ERRORS, get_request_webframe
from splash.request_middleware import (
AdblockMiddleware,
AllowedDomainsMiddleware,
AllowedSchemesMiddleware,
RequestLoggingMiddleware,
AdblockRulesRegistry,
ResourceTimeoutMiddleware,
ResponseBodyTrackingMiddleware,
)
from splash.response_middleware import ContentTypeMiddleware
from splash import defaults
from splash.utils import to_bytes
from splash.cookies import SplashCookieJar
class NetworkManagerFactory(object):
def __init__(self, filters_path=None, verbosity=None, allowed_schemes=None):
verbosity = defaults.VERBOSITY if verbosity is None else verbosity
self.verbosity = verbosity
self.request_middlewares = []
self.response_middlewares = []
self.adblock_rules = None
# Initialize request and response middlewares
allowed_schemes = (defaults.ALLOWED_SCHEMES if allowed_schemes is None
else allowed_schemes.split(','))
if allowed_schemes:
self.request_middlewares.append(
AllowedSchemesMiddleware(allowed_schemes, verbosity=verbosity)
)
if self.verbosity >= 2:
self.request_middlewares.append(RequestLoggingMiddleware())
self.request_middlewares.append(AllowedDomainsMiddleware(verbosity=verbosity))
self.request_middlewares.append(ResourceTimeoutMiddleware())
self.request_middlewares.append(ResponseBodyTrackingMiddleware())
if filters_path is not None:
self.adblock_rules = AdblockRulesRegistry(filters_path, verbosity=verbosity)
self.request_middlewares.append(
AdblockMiddleware(self.adblock_rules, verbosity=verbosity)
)
self.response_middlewares.append(ContentTypeMiddleware(self.verbosity))
def __call__(self):
manager = SplashQNetworkAccessManager(
request_middlewares=self.request_middlewares,
response_middlewares=self.response_middlewares,
verbosity=self.verbosity,
)
manager.setCache(None)
return manager
class ProxiedQNetworkAccessManager(QNetworkAccessManager):
"""
QNetworkAccessManager subclass with extra features. It
* Enables "splash proxy factories" support. Qt provides similar
functionality via setProxyFactory method, but standard
QNetworkProxyFactory is not flexible enough.
* Sets up extra logging.
* Provides a way to get the "source" request (that was made to Splash
itself).
* Tracks information about requests/responses and stores it in HAR format,
including response content.
* Allows to set per-request timeouts.
"""
_REQUEST_ID = QNetworkRequest.User + 1
_SHOULD_TRACK = QNetworkRequest.User + 2
def __init__(self, verbosity):
super(ProxiedQNetworkAccessManager, self).__init__()
self.sslErrors.connect(self._on_ssl_errors)
self.finished.connect(self._on_finished)
self.verbosity = verbosity
self._reply_timeout_timers = {} # requestId => timer
self._default_proxy = self.proxy()
self.cookiejar = SplashCookieJar(self)
self.setCookieJar(self.cookiejar)
self._response_bodies = {} # requestId => response content
self._request_ids = itertools.count()
assert self.proxyFactory() is None, "Standard QNetworkProxyFactory is not supported"
def _on_ssl_errors(self, reply, errors):
reply.ignoreSslErrors()
def _on_finished(self, reply):
reply.deleteLater()
def createRequest(self, operation, request, outgoingData=None):
"""
This method is called when a new request is sent;
it must return a reply object to work with.
"""
start_time = datetime.utcnow()
# Proxies are managed per-request, so we're restoring a default
# before each request. This assumes all requests go through
# this method.
self._clear_proxy()
request, req_id = self._wrap_request(request)
self._handle_custom_headers(request)
self._handle_request_cookies(request)
self._run_webpage_callbacks(request, 'on_request',
request, operation, outgoingData)
self._handle_custom_proxies(request)
self._handle_request_response_tracking(request)
har = self._get_har(request)
if har is not None:
har.store_new_request(
req_id=req_id,
start_time=start_time,
operation=operation,
request=request,
outgoingData=outgoingData,
)
reply = super(ProxiedQNetworkAccessManager, self).createRequest(
operation, request, outgoingData
)
if hasattr(request, 'timeout'):
timeout = request.timeout * 1000
if timeout:
self._set_reply_timeout(reply, timeout)
if har is not None:
har.store_new_reply(req_id, reply)
reply.error.connect(self._on_reply_error)
reply.finished.connect(self._on_reply_finished)
if self._should_track_content(request):
self._response_bodies[req_id] = QByteArray()
reply.readyRead.connect(self._on_reply_ready_read)
reply.metaDataChanged.connect(self._on_reply_headers)
reply.downloadProgress.connect(self._on_reply_download_progress)
return reply
def _set_reply_timeout(self, reply, timeout_ms):
request_id = self._get_request_id(reply.request())
# reply is used as a parent for the timer in order to destroy
# the timer when reply is destroyed. It segfaults otherwise.
timer = QTimer(reply)
timer.setSingleShot(True)
timer_callback = functools.partial(self._on_reply_timeout,
reply=reply,
timer=timer,
request_id=request_id)
timer.timeout.connect(timer_callback)
self._reply_timeout_timers[request_id] = timer
timer.start(timeout_ms)
def _on_reply_timeout(self, reply, timer, request_id):
self._reply_timeout_timers.pop(request_id)
self.log("timed out, aborting: {url}", reply, min_level=1)
# FIXME: set proper error code
reply.abort()
def _cancel_reply_timer(self, reply):
request_id = self._get_request_id(reply.request())
timer = self._reply_timeout_timers.pop(request_id, None)
if timer and timer.isActive():
timer.stop()
def _clear_proxy(self):
""" Init default proxy """
self.setProxy(self._default_proxy)
def _wrap_request(self, request):
req = QNetworkRequest(request)
req_id = next(self._request_ids)
req.setAttribute(self._REQUEST_ID, req_id)
for attr in ['timeout', 'track_response_body']:
if hasattr(request, attr):
setattr(req, attr, getattr(request, attr))
return req, req_id
def _handle_custom_proxies(self, request):
proxy = None
# proxies set in proxy profiles or `proxy` HTTP argument
splash_proxy_factory = self._get_webpage_attribute(request, 'splash_proxy_factory')
if splash_proxy_factory:
proxy_query = QNetworkProxyQuery(request.url())
proxy = splash_proxy_factory.queryProxy(proxy_query)[0]
self.setProxy(proxy)
# proxies set in on_request
if hasattr(request, 'custom_proxy'):
proxy = request.custom_proxy
self.setProxy(proxy)
# Handle proxy auth. We're setting Proxy-Authorization header
# explicitly because Qt loves to cache proxy credentials.
if proxy is None:
return
user, password = proxy.user(), proxy.password()
if not user and not password:
return
auth = b"Basic " + base64.b64encode("{}:{}".format(user, password).encode("utf-8"))
request.setRawHeader(b"Proxy-Authorization", auth)
def _handle_custom_headers(self, request):
if self._get_webpage_attribute(request, "skip_custom_headers"):
# XXX: this hack assumes that new requests between
# BrowserTab._create_request and this function are not possible,
# i.e. we don't give control to the event loop in between.
# Unfortunately we can't store this flag on a request itself
# because a new QNetworkRequest instance is created by QWebKit.
self._set_webpage_attribute(request, "skip_custom_headers", False)
return
headers = self._get_webpage_attribute(request, "custom_headers")
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers or []:
request.setRawHeader(to_bytes(name), to_bytes(value))
def _handle_request_cookies(self, request):
self.cookiejar.update_cookie_header(request)
def _handle_reply_cookies(self, reply):
self.cookiejar.fill_from_reply(reply)
def _handle_request_response_tracking(self, request):
track = getattr(request, 'track_response_body', False)
request.setAttribute(self._SHOULD_TRACK, track)
def _should_track_content(self, request):
return request.attribute(self._SHOULD_TRACK)
def _get_request_id(self, request=None):
if request is None:
request = self.sender().request()
return request.attribute(self._REQUEST_ID)
def _get_har(self, request=None):
"""
Return HarBuilder instance.
:rtype: splash.har_builder.HarBuilder | None
"""
if request is None:
request = self.sender().request()
return self._get_webpage_attribute(request, "har")
def _get_webpage_attribute(self, request, attribute):
web_frame = get_request_webframe(request)
if web_frame:
return getattr(web_frame.page(), attribute, None)
def _set_webpage_attribute(self, request, attribute, value):
web_frame = get_request_webframe(request)
if web_frame:
return setattr(web_frame.page(), attribute, value)
def _on_reply_error(self, error_id):
self._response_bodies.pop(self._get_request_id(), None)
if error_id != QNetworkReply.OperationCanceledError:
error_msg = REQUEST_ERRORS.get(error_id, 'unknown error')
self.log('Download error %d: %s ({url})' % (error_id, error_msg),
self.sender(), min_level=2)
def _on_reply_ready_read(self):
reply = self.sender()
self._store_response_chunk(reply)
def _store_response_chunk(self, reply):
req_id = self._get_request_id(reply.request())
if req_id not in self._response_bodies:
self.log("Internal problem in _store_response_chunk: "
"request %s is not tracked" % req_id, reply, min_level=1)
return
chunk = reply.peek(reply.bytesAvailable())
self._response_bodies[req_id].append(chunk)
def _on_reply_finished(self):
reply = self.sender()
request = reply.request()
self._cancel_reply_timer(reply)
har = self._get_har()
har_entry, content = None, None
if har is not None:
req_id = self._get_request_id()
# FIXME: what if har is None? When can it be None?
# Who removes the content from self._response_bodies dict?
content = self._response_bodies.pop(req_id, None)
if content is not None:
content = bytes(content)
# FIXME: content is kept in memory at least twice,
# as raw data and as a base64-encoded copy.
har.store_reply_finished(req_id, reply, content)
har_entry = har.get_entry(req_id)
# We're passing HAR entry to the callbacks because reply object
# itself doesn't have all information.
# Content is passed in order to avoid decoding it from base64.
self._run_webpage_callbacks(request, "on_response", reply, har_entry,
content)
self.log("Finished downloading {url}", reply)
def _on_reply_headers(self):
"""Signal emitted before reading response body, after getting headers
"""
reply = self.sender()
request = reply.request()
self._handle_reply_cookies(reply)
self._run_webpage_callbacks(request, "on_response_headers", reply)
har = self._get_har()
if har is not None:
har.store_reply_headers_received(self._get_request_id(request), reply)
self.log("Headers received for {url}", reply, min_level=3)
def _on_reply_download_progress(self, received, total):
har = self._get_har()
if har is not None:
req_id = self._get_request_id()
har.store_reply_download_progress(req_id, received, total)
if total == -1:
total = '?'
self.log("Downloaded %d/%s of {url}" % (received, total),
self.sender(), min_level=4)
def _on_reply_upload_progress(self, sent, total):
# FIXME: is it used?
har = self._get_har()
if har is not None:
req_id = self._get_request_id()
har.store_request_upload_progress(req_id, sent, total)
if total == -1:
total = '?'
self.log("Uploaded %d/%s of {url}" % (sent, total),
self.sender(), min_level=4)
def _get_render_options(self, request):
return self._get_webpage_attribute(request, 'render_options')
def _run_webpage_callbacks(self, request, event_name, *args):
callbacks = self._get_webpage_attribute(request, "callbacks")
if not callbacks:
return
for cb in callbacks.get(event_name, []):
try:
cb(*args)
except:
# TODO unhandled exceptions in lua callbacks
# should we raise errors here?
# https://github.com/scrapinghub/splash/issues/161
self.log("error in %s callback" % event_name, min_level=1)
self.log(traceback.format_exc(), min_level=1, format_msg=False)
def log(self, msg, reply=None, min_level=2, format_msg=True):
if self.verbosity < min_level:
return
if not reply:
url = ''
else:
url = qurl2ascii(reply.url())
if not url:
return
if format_msg:
msg = msg.format(url=url)
log.msg(msg, system='network-manager')
class SplashQNetworkAccessManager(ProxiedQNetworkAccessManager):
"""
This QNetworkAccessManager provides:
* proxy support;
* request middleware support;
* additional logging.
"""
def __init__(self, request_middlewares, response_middlewares, verbosity):
super(SplashQNetworkAccessManager, self).__init__(verbosity=verbosity)
self.request_middlewares = request_middlewares
self.response_middlewares = response_middlewares
def run_response_middlewares(self):
reply = self.sender()
reply.metaDataChanged.disconnect(self.run_response_middlewares)
render_options = self._get_render_options(reply.request())
if render_options:
try:
for middleware in self.response_middlewares:
middleware.process(reply, render_options)
except:
self.log("internal error in response middleware", min_level=1)
self.log(traceback.format_exc(), min_level=1, format_msg=False)
def createRequest(self, operation, request, outgoingData=None):
# XXX: This method MUST return a reply, otherwise PyQT segfaults.
render_options = self._get_render_options(request)
if render_options:
try:
for middleware in self.request_middlewares:
request = middleware.process(request, render_options, operation, outgoingData)
except:
self.log("internal error in request middleware", min_level=1)
self.log(traceback.format_exc(), min_level=1, format_msg=False)
reply = super(SplashQNetworkAccessManager, self).createRequest(operation, request, outgoingData)
if render_options:
reply.metaDataChanged.connect(self.run_response_middlewares)
return reply
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/joncrall/code/hotspotter/hsgui/_frontend/EditPrefSkel.ui'
#
# Created: Mon Feb 10 13:40:41 2014
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_editPrefSkel(object):
def setupUi(self, editPrefSkel):
editPrefSkel.setObjectName(_fromUtf8("editPrefSkel"))
editPrefSkel.resize(668, 530)
self.verticalLayout = QtGui.QVBoxLayout(editPrefSkel)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.prefTreeView = QtGui.QTreeView(editPrefSkel)
self.prefTreeView.setObjectName(_fromUtf8("prefTreeView"))
self.verticalLayout.addWidget(self.prefTreeView)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.redrawBUT = QtGui.QPushButton(editPrefSkel)
self.redrawBUT.setObjectName(_fromUtf8("redrawBUT"))
self.horizontalLayout.addWidget(self.redrawBUT)
self.unloadFeaturesAndModelsBUT = QtGui.QPushButton(editPrefSkel)
self.unloadFeaturesAndModelsBUT.setObjectName(_fromUtf8("unloadFeaturesAndModelsBUT"))
self.horizontalLayout.addWidget(self.unloadFeaturesAndModelsBUT)
self.defaultPrefsBUT = QtGui.QPushButton(editPrefSkel)
self.defaultPrefsBUT.setObjectName(_fromUtf8("defaultPrefsBUT"))
self.horizontalLayout.addWidget(self.defaultPrefsBUT)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(editPrefSkel)
QtCore.QMetaObject.connectSlotsByName(editPrefSkel)
def retranslateUi(self, editPrefSkel):
editPrefSkel.setWindowTitle(QtGui.QApplication.translate("editPrefSkel", "Edit Preferences", None, QtGui.QApplication.UnicodeUTF8))
self.redrawBUT.setText(QtGui.QApplication.translate("editPrefSkel", "Redraw", None, QtGui.QApplication.UnicodeUTF8))
self.unloadFeaturesAndModelsBUT.setText(QtGui.QApplication.translate("editPrefSkel", "Unload Features and Models", None, QtGui.QApplication.UnicodeUTF8))
self.defaultPrefsBUT.setText(QtGui.QApplication.translate("editPrefSkel", "Defaults", None, QtGui.QApplication.UnicodeUTF8))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
editPrefSkel = QtGui.QWidget()
ui = Ui_editPrefSkel()
ui.setupUi(editPrefSkel)
editPrefSkel.show()
sys.exit(app.exec_())
|
#!/usr/bin/python
import pyaudio
import wave
import urllib2
import thread
import time
import requests
CHUNK = 1024
FORMAT = pyaudio.paInt16 #paInt8
CHANNELS = 2
RATE = 44100 #sample rate
filename = "output.wav"
website = 'http://ec2-54-71-180-108.us-west-2.compute.amazonaws.com/hearboi/device/record/status'
status = 'STOP'
def recordAudio(threadname):
global status
print (threadname)
while True:
p = pyaudio.PyAudio()
while status=='STOP':
time.sleep(0.5)
print("* start recording")
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) #buffer
frames = []
#for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
while(status=='START'):
data = stream.read(CHUNK)
frames.append(data) # 2 bytes(16 bits) per channel
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(filename, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
# POST file to server
files = {'audioFile': open(filename, 'rb')}
uploadSite = 'http://ec2-54-71-180-108.us-west-2.compute.amazonaws.com/hearboi/device/record/upload'
r = requests.post(uploadSite, files=files)
def monitor(threadname):
global status
print (threadname)
while True:
try:
status = urllib2.urlopen(website).read()
except urllib2.HTTPError:
time.sleep(1)
time.sleep(0.5)
thread.start_new_thread(monitor,("monitor thread",))
thread.start_new_thread(recordAudio,("record thread",))
c = raw_input("Type something to quit.")
|
#
# This sets up how models are displayed
# in the web admin interface.
#
from django import forms
from django.conf import settings
from django.contrib import admin
from src.objects.models import ObjAttribute, ObjectDB, ObjectNick, Alias
from src.utils.utils import mod_import
class ObjAttributeInline(admin.TabularInline):
model = ObjAttribute
fields = ('db_key', 'db_value')
extra = 0
class NickInline(admin.TabularInline):
model = ObjectNick
fields = ('db_nick', 'db_real', 'db_type')
extra = 0
class AliasInline(admin.TabularInline):
model = Alias
fields = ("db_key",)
extra = 0
class ObjectCreateForm(forms.ModelForm):
"This form details the look of the fields"
class Meta:
model = ObjectDB
db_key = forms.CharField(label="Name/Key",
widget=forms.TextInput(attrs={'size':'78'}),
help_text="Main identifier, like 'apple', 'strong guy', 'Elizabeth' etc. If creating a Character, check so the name is unique among characters!",)
db_typeclass_path = forms.CharField(label="Typeclass",initial="Change to (for example) %s or %s." % (settings.BASE_OBJECT_TYPECLASS, settings.BASE_CHARACTER_TYPECLASS),
widget=forms.TextInput(attrs={'size':'78'}),
help_text="This defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass. If you are creating a Character you should use the typeclass defined by settings.BASE_CHARACTER_TYPECLASS or one derived from that.")
db_permissions = forms.CharField(label="Permissions",
initial=settings.PERMISSION_PLAYER_DEFAULT,
required=False,
widget=forms.TextInput(attrs={'size':'78'}),
help_text="a comma-separated list of text strings checked by certain locks. They are mainly of use for Character objects. Character permissions overload permissions defined on a controlling Player. Most objects normally don't have any permissions defined.")
db_cmdset_storage = forms.CharField(label="CmdSet",
initial=settings.CMDSET_DEFAULT,
required=False,
widget=forms.TextInput(attrs={'size':'78'}),
help_text="Most non-character objects don't need a cmdset and can leave this field blank.")
class ObjectEditForm(ObjectCreateForm):
"Form used for editing. Extends the create one with more fields"
db_lock_storage = forms.CharField(label="Locks",
required=False,
widget=forms.Textarea(attrs={'cols':'100', 'rows':'2'}),
help_text="In-game lock definition string. If not given, defaults will be used. This string should be on the form <i>type:lockfunction(args);type2:lockfunction2(args);...")
class ObjectDBAdmin(admin.ModelAdmin):
list_display = ('id', 'db_key', 'db_location', 'db_player', 'db_typeclass_path')
list_display_links = ('id', 'db_key')
ordering = ['db_player', 'db_typeclass_path', 'id']
search_fields = ['^db_key', 'db_typeclass_path']
save_as = True
save_on_top = True
list_select_related = True
list_filter = ('db_permissions', 'db_location', 'db_typeclass_path')
# editing fields setup
form = ObjectEditForm
fieldsets = (
(None, {
'fields': (('db_key','db_typeclass_path'), ('db_permissions', 'db_lock_storage'),
('db_location', 'db_home'), 'db_destination','db_cmdset_storage'
)}),
)
#deactivated temporarily, they cause empty objects to be created in admin
inlines = [AliasInline]#, ObjAttributeInline]
# Custom modification to give two different forms wether adding or not.
add_form = ObjectCreateForm
add_fieldsets = (
(None, {
'fields': (('db_key','db_typeclass_path'), 'db_permissions',
('db_location', 'db_home'), 'db_destination','db_cmdset_storage'
)}),
)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(ObjectDBAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during creation
"""
defaults = {}
if obj is None:
defaults.update({
'form': self.add_form,
'fields': admin.util.flatten_fieldsets(self.add_fieldsets),
})
defaults.update(kwargs)
return super(ObjectDBAdmin, self).get_form(request, obj, **defaults)
def save_model(self, request, obj, form, change):
if not change:
# adding a new object
obj = obj.typeclass
obj.basetype_setup()
obj.basetype_posthook_setup()
obj.at_object_creation()
obj.at_init()
admin.site.register(ObjectDB, ObjectDBAdmin)
|
# Anibots (anigraf robots) physical/visual sim
#
# Copyright (c) 2007-2012 Samuel H. Kenyon. <[email protected]>
# http://synapticnulship.com
# This is open source, made available under the MIT License (see the
# accompanying file LICENSE).
#
# This python script connects my anibots C++ program (with the help of SWIG)
# to the Breve simulation environment. It also defines low-level physical
# actions triggered by higher-level anigraf actions.
#
# For final project 9.343J Fall 2006
# Experiment 0: The block task
#
from anibots import *
import breve
class AnibotPhysicsSim( breve.PhysicalControl ):
def __init__( self ):
breve.Control.__init__( self )
self.bots = breve.objectList()
self.actionDuration = 45
self.iterCount=self.actionDuration
self.videoLog = breve.Movie()
self.block = None
#configure the anibots
self.env = None
self.numBots = 1
self.iterations = 20
self.kDepth = 3
self.takeTurns = True
self.anibotConfig = AnibotConfig("final-x-g.dat", "final-x-pedge.dat","pweights-alt.dat")
self.anibotConfig.proxify = False
# bool proxyWeightsProportional;
# float proxyWeightsProportion;
#bool randomizeEdges
self.anibotConfig.randomize = False
#self.anibotConfig.quant = 11
#self.anibotConfig.quantDiff = 1
#anibotConfig.quantIrregular;
#self.anibotConfig.randMin = 0
#self.anibotConfig.randMax = 10
# bool singleTops;
AnibotPhysicsSim.init( self )
def init( self ):
print '''Setting up Anibot environment'''
# start the anibots environment (mental simulation)
self.env = AgentEnv("test_anibots_exp0.py",self.kDepth,self.takeTurns)
self.env.NewAnibot(self.numBots,self.anibotConfig)
self.env.InitLoner(0)
#self.env.InitTransaction(0,1)
print '''Setting up Physics Sim.'''
#start the visual/physical environment in Breve
self.setDisplayText( "Anibots Sim", -1.0, 0.8, 1 )
self.setRandomSeedFromDevRandom()
self.enableFastPhysics()
self.setFastPhysicsIterations( 15 )
#self.setGravity( breve.vector(0.0,-3.0,0.0) )
self.enableLighting()
self.enableSmoothDrawing()
self.moveLight( breve.vector( 20, 30, 20 ) )
floor = breve.createInstances( breve.Floor, 1 )
floor.catchShadows()
#floor.setE( 1.000000 )
floor.setMu(0.0)
#floor.showAxis()
self.cloudTexture = breve.createInstances( breve.Image, 1 ).load( 'images/clouds.png' )
self.enableShadowVolumes()
self.enableReflections()
self.setBackgroundColor( breve.vector( 0.400000, 0.600000, 0.900000 ) )
self.setBackgroundTextureImage( self.cloudTexture )
#self.offsetCamera( breve.vector( 3, 13, -13 ) )
self.pointCamera( breve.vector( 0, 0, 0 ), breve.vector( 20, 20, 60 ) )
#the virtual bodies
self.bots = breve.createInstances( breve.AnibotBody, 1 )
self.bots.move( breve.vector( 0.0, self.bots.radius, 14 ) )
self.env.Next()
#the block
self.block = breve.createInstances( breve.Mobile, 1 )
shape = breve.createInstances( breve.Cube, 1 ).initWith( breve.vector(15,3,4) )
shape.setMass(0.5)
self.block.setShape(shape)
self.block.setColor(breve.vector( 1.0, 0.5 ,0.0 ))
self.block.move( breve.vector( 0.0, 1.5 ,0.0 ) )
self.block.setMu(0.0)
#self.block.setE(0.1)
self.block.enablePhysics()
print self.block.getMass()
#self.block.setForce( breve.vector( 500.0, 500.0 , 500.0 ) )
#self.block.setVelocity( breve.vector( 0, 0, -10 ) )
#self.watch( self.bots[0] )
self.watch( self.block )
self.videoLog.record("anibots-pushtest.mpg")
def iterate( self ):
s2 = "block dist: %.2f" % (-self.block.getLocation()[2])
self.setDisplayText(s2, -1.0, 0.5, 6)
breve.Control.iterate( self )
breve.AnibotPhysicsSim = AnibotPhysicsSim
class AnibotBody( breve.Mobile ):
def __init__( self ):
breve.Mobile.__init__( self )
self.radius = 1.5
AnibotBody.init( self )
def init( self ):
shape = breve.createInstances( breve.Sphere, 1 ).initWith( self.radius )
shape.setDensity(100)
self.setShape( shape )
#self.setShape( breve.createInstances( breve.Cube, 1 ).initWith( breve.vector(self.radius,self.radius,self.radius) ))
self.setColor( breve.randomExpression( breve.vector( 1.000000, 1.000000, 1.000000 ) ) )
#self.move( breve.vector( breve.randomExpression(8.0)-4.0, self.radius, breve.randomExpression(20.0) + 8.0 ) )
self.move( breve.vector( 0.0, self.radius, 14.0 ) )
print self.getMass()
self.enablePhysics()
#self.setVelocity( breve.vector( 0.0, 0.0, -2.0 ) )
#self.setForce( breve.vector( 0.0, 0.0, -100.0 ) )
def moveX( self, x ):
if self.getLocation()[0] != x:
z = self.getLocation()[2]
self.move( breve.vector( x, self.radius, z+2 ) )
def iterate( self ):
#print self.getVelocity()
self.setVelocity( breve.vector( 0.0, 0.0, -2.0 ) )
breve.AnibotBody = AnibotBody
# Create an instance of our controller object to initialize the simulation
AnibotPhysicsSim()
|
# Authors:
# Rob Crittenden <[email protected]>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipalib.x509` module.
"""
import base64
from binascii import hexlify
from configparser import RawConfigParser
import datetime
from io import StringIO
import pickle
import pytest
from cryptography import x509 as crypto_x509
from cryptography.x509.general_name import DNSName
from ipalib import x509
from ipapython.dn import DN
pytestmark = pytest.mark.tier0
# certutil -
# certificate for CN=ipa.example.com,O=IPA
goodcert = (
b'MIICAjCCAWugAwIBAgICBEUwDQYJKoZIhvcNAQEFBQAwKTEnMCUGA1UEAxMeSVBB'
b'IFRlc3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4XDTEwMDYyNTEzMDA0MloXDTE1'
b'MDYyNTEzMDA0MlowKDEMMAoGA1UEChMDSVBBMRgwFgYDVQQDEw9pcGEuZXhhbXBs'
b'ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAJcZ+H6+cQaN/BlzR8OY'
b'kVeJgaU5tCaV9FF1m7Ws/ftPtTJUaSL1ncp6603rjA4tH1aa/B8i8xdC46+ZbY2a'
b'u8b9ryGcOsx2uaRpNLEQ2Fy//q1kQC8oM+iD8Nd6osF0a2wnugsgnJHPuJzhViaW'
b'xYgzk5DRdP81debokF3f3FX/AgMBAAGjOjA4MBEGCWCGSAGG+EIBAQQEAwIGQDAT'
b'BgNVHSUEDDAKBggrBgEFBQcDATAOBgNVHQ8BAf8EBAMCBPAwDQYJKoZIhvcNAQEF'
b'BQADgYEALD6X9V9w381AzzQPcHsjIjiX3B/AF9RCGocKZUDXkdDhsD9NZ3PLPEf1'
b'AMjkraKG963HPB8scyiBbbSuSh6m7TCp0eDgRpo77zNuvd3U4Qpm0Qk+KEjtHQDj'
b'NNG6N4ZnCQPmjFPScElvc/GgW7XMbywJy2euF+3/Uip8cnPgSH4='
)
goodcert_headers = (
b'-----BEGIN CERTIFICATE-----\n' +
goodcert +
b'\n-----END CERTIFICATE-----'
)
# The base64-encoded string 'bad cert'
badcert = (
b'-----BEGIN CERTIFICATE-----\n'
b'YmFkIGNlcnQ=\r\n'
b'-----END CERTIFICATE-----'
)
good_pkcs7 = (
b'-----BEGIN PKCS7-----\n'
b'MIIDvAYJKoZIhvcNAQcCoIIDrTCCA6kCAQExADALBgkqhkiG9w0BBwGgggOPMIID\n'
b'izCCAnOgAwIBAgIBATANBgkqhkiG9w0BAQsFADA2MRQwEgYDVQQKDAtFWEFNUExF\n'
b'LkNPTTEeMBwGA1UEAwwVQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4XDTE3MDkyMDIw\n'
b'NDI1N1oXDTM3MDkyMDIwNDI1N1owNjEUMBIGA1UECgwLRVhBTVBMRS5DT00xHjAc\n'
b'BgNVBAMMFUNlcnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQAD\n'
b'ggEPADCCAQoCggEBAMNojX57UCCPTtEn9tQJBS4By5NixwodKm1UqOGsiecDrB0i\n'
b'Pw7D6uGP6g4b6srYtbh+YsRJnfekB2L08q1dX3LVEItq2TS0WKqgZuRZkw7DvnGl\n'
b'eANMwjHmE8k6/E0yI3GGxJLAfDZYw6CDspLkyN9anjQwVCz5N5z5bpeqi5BeVwin\n'
b'O8WVF6FNn3iyL66uwOsTGEzCo3Y5HiwqYgaND73TtdsBHcIqOdRql3CC3IdoXXcW\n'
b'044w4Lm2E95MuY729pPBHREtyzVkYtyuoKJ8KApghIY5oCklBkRDjyFK4tE7iF/h\n'
b's+valeT9vcz2bHMIpvbjqAu/kqE8MjcNEFPjLhcCAwEAAaOBozCBoDAfBgNVHSME\n'
b'GDAWgBTUB04/d1eLhbMtBi4AB65tsAt+2TAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud\n'
b'DwEB/wQEAwIBxjAdBgNVHQ4EFgQU1AdOP3dXi4WzLQYuAAeubbALftkwPQYIKwYB\n'
b'BQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwOi8vaXBhLWNhLmdyZXlvYWsuY29t\n'
b'L2NhL29jc3AwDQYJKoZIhvcNAQELBQADggEBADQFwX1uh8tqLq8SqWZWtH95j33o\n'
b'5Ze2dW7sVppb/wVnNauG0wDQW7uIx+Ynr7GgufXLNBMn1aP/mA2CdHk7NZz2IB1s\n'
b'ZvbIfE8dVxzkA+Hh9d6cdgk4eU5rGf6Fw8ScEJ/48Mmncea3uGkHcOmt+BGLA8a1\n'
b'wtruy+iQylOkbv36CbxKV7IsZDP106Zc+cVeOUQZnCLKmvQkotn6UJd8N1X0R2J3\n'
b'4/qv0rUtcCnyEBNSgpTGCRlYM4kd98Dqc5W7wUpMcsQMFxQMSYY7pFQkdLPfJEx2\n'
b'Mg63SPawxfAgUeukrdsF3wTIKkIBu1TVse+kvRvgmRRrfF2a4ZOv5qORe2uhADEA\n'
b'-----END PKCS7-----'
)
long_oid_cert = b'''
-----BEGIN CERTIFICATE-----
MIIFiTCCBHGgAwIBAgITSAAAAAd1bEC5lsOdnQAAAAAABzANBgkqhkiG9w0BAQsF
ADBLMRUwEwYKCZImiZPyLGQBGRYFbG9jYWwxEjAQBgoJkiaJk/IsZAEZFgJhZDEe
MBwGA1UEAxMVYWQtV0lOLVBQSzAxNUY5TURRLUNBMB4XDTE3MDUyNTIzNDg0NVoX
DTE5MDUyNTIzNTg0NVowNDESMBAGA1UEChMJSVBBLkxPQ0FMMR4wHAYDVQQDExVD
ZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQDyyuty6irlL89hdaSW0UyAGLsOOMgAuJwBAeuRUorR159rsSnUXLcTHIsm
EszKhwxp3NkkawRWx/s0UN1m2+RUwMl6gvlw+G80Mz0S77C77M+2lO8HRmZGm+Wu
zBNcc9SANHuDQ1NISfZgLiscMS0+l0T3g6/Iqtg1kPWrq/tMevfh6tJEIedSBGo4
3xKEMSDkrvaeTuSVrgn/QT0m+WNccZa0c7X35L/hgR22/l5sr057Ef8F9vL8zUH5
TttFBIuiWJo8A8XX9I1zYIFhWjW3OVDZPBUnhGHH6yNyXGxXMRfcrrc74eTw8ivC
080AQuRtgwvDErB/JPDJ5w5t/ielAgMBAAGjggJ7MIICdzA9BgkrBgEEAYI3FQcE
MDAuBiYrBgEEAYI3FQiEoqJGhYq1PoGllQqGi+F4nacAgRODs5gfgozzAAIBZAIB
BTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUnSrC
yW3CR0e3ilJdN6kL06P3KHMwHwYDVR0jBBgwFoAUj69xtyUNwp8on+NWO+HlxKyg
X7AwgdgGA1UdHwSB0DCBzTCByqCBx6CBxIaBwWxkYXA6Ly8vQ049YWQtV0lOLVBQ
SzAxNUY5TURRLUNBLENOPVdJTi1QUEswMTVGOU1EUSxDTj1DRFAsQ049UHVibGlj
JTIwS2V5JTIwU2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmlndXJhdGlvbixE
Qz1hZCxEQz1sb2NhbD9jZXJ0aWZpY2F0ZVJldm9jYXRpb25MaXN0P2Jhc2U/b2Jq
ZWN0Q2xhc3M9Y1JMRGlzdHJpYnV0aW9uUG9pbnQwgcQGCCsGAQUFBwEBBIG3MIG0
MIGxBggrBgEFBQcwAoaBpGxkYXA6Ly8vQ049YWQtV0lOLVBQSzAxNUY5TURRLUNB
LENOPUFJQSxDTj1QdWJsaWMlMjBLZXklMjBTZXJ2aWNlcyxDTj1TZXJ2aWNlcyxD
Tj1Db25maWd1cmF0aW9uLERDPWFkLERDPWxvY2FsP2NBQ2VydGlmaWNhdGU/YmFz
ZT9vYmplY3RDbGFzcz1jZXJ0aWZpY2F0aW9uQXV0aG9yaXR5MDMGA1UdIAQsMCow
KAYmKwYBBAGCNxUIhKKiRoWKtT6BpZUKhovheJ2nAIEThrXzUYabpA4wDQYJKoZI
hvcNAQELBQADggEBAIsFS+Qc/ufTrkuHbMmzksOpxq+OIi9rot8zy9/1Vmj6d+iP
kB+vQ1u4/IhdQArJFNhsBzWSY9Pi8ZclovpepFeEZfXPUenyeRCU43HdMXcHXnlP
YZfyLQWOugdo1WxK6S9qQSOSlC7BSGZWvKkiAPAwr4zNbbS+ROA2w0xaYMv0rr5W
A4UAyzZAdqaGRJBRvCZ/uFHM5wMw0LzNCL4CqKW9jfZX0Fc2tdGx8zbTYxIdgr2D
PL25as32r3S/m4uWqoQaK0lxK5Y97eusK2rrmidy32Jctzwl29UWq8kpjRAuD8iR
CSc7sKqOf+fn3+fKITR2/DcSVvb0SGCr5fVVnjQ=
-----END CERTIFICATE-----
'''
ipa_demo_crt = b'''\
-----BEGIN CERTIFICATE-----
MIIGFTCCBP2gAwIBAgISA61CoqWtpZoTEyfLCXliPLYFMA0GCSqGSIb3DQEBCwUA
MEoxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MSMwIQYDVQQD
ExpMZXQncyBFbmNyeXB0IEF1dGhvcml0eSBYMzAeFw0xODA3MjUwNTM2NTlaFw0x
ODEwMjMwNTM2NTlaMCAxHjAcBgNVBAMTFWlwYS5kZW1vMS5mcmVlaXBhLm9yZzCC
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKisvYUdarWE0CS9i+RcNf9Q
41Euw36R4Myf/PUCDVUvGsVXQWSCanbtyxa8Ows4cAHrfqhiKAnSg0IhLqCMJVQ8
8F699FHrP9EfPmZkG3RMLYPxKNrSmOVyNpIEQY9qfkDXZPLung6dk/c225Znoltq
bVWLObXA7eP9C/djupg3gUD7vOAMHFmfZ3OKnx1uktL5p707o2/qlkSiEO4Z5ebD
M8X0dTkN8V3LCCOjzCp88itGUWJM8Tjb86WkmYkJxmeZx6REd37rDXjqgYhwgXOB
bSqDkYKRaihwvd5Up/vE1wApBS1k7b1oEW80teDUbzbaaqp7oBWbZD2Ac1yJF7UC
AwEAAaOCAx0wggMZMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcD
AQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUUmTMI1CB6qFMXc0+
AGmqpfBAwhIwHwYDVR0jBBgwFoAUqEpqYwR93brm0Tm3pkVl7/Oo7KEwbwYIKwYB
BQUHAQEEYzBhMC4GCCsGAQUFBzABhiJodHRwOi8vb2NzcC5pbnQteDMubGV0c2Vu
Y3J5cHQub3JnMC8GCCsGAQUFBzAChiNodHRwOi8vY2VydC5pbnQteDMubGV0c2Vu
Y3J5cHQub3JnLzAgBgNVHREEGTAXghVpcGEuZGVtbzEuZnJlZWlwYS5vcmcwgf4G
A1UdIASB9jCB8zAIBgZngQwBAgEwgeYGCysGAQQBgt8TAQEBMIHWMCYGCCsGAQUF
BwIBFhpodHRwOi8vY3BzLmxldHNlbmNyeXB0Lm9yZzCBqwYIKwYBBQUHAgIwgZ4M
gZtUaGlzIENlcnRpZmljYXRlIG1heSBvbmx5IGJlIHJlbGllZCB1cG9uIGJ5IFJl
bHlpbmcgUGFydGllcyBhbmQgb25seSBpbiBhY2NvcmRhbmNlIHdpdGggdGhlIENl
cnRpZmljYXRlIFBvbGljeSBmb3VuZCBhdCBodHRwczovL2xldHNlbmNyeXB0Lm9y
Zy9yZXBvc2l0b3J5LzCCAQQGCisGAQQB1nkCBAIEgfUEgfIA8AB2AMEWSuCnctLU
OS3ICsEHcNTwxJvemRpIQMH6B1Fk9jNgAAABZNAnsSAAAAQDAEcwRQIgHkd/UkTZ
w8iV1Ox8MPHLrpY33cX6i5FV6w9+7YH3H2kCIQCVcrhsr4fokDyE2ueUqSFxkBVH
WND84/w5rFNAPjyO1QB2ACk8UZZUyDlluqpQ/FgH1Ldvv1h6KXLcpMMM9OVFR/R4
AAABZNAnsyUAAAQDAEcwRQIhALDWY2k55abu7IPwnFvMr4Zqd1DYQXEKWZEQLXUP
s4XGAiAabjpUwrLKVXpbp4WNLkTNlFjrSJafOzLG68H9AnoD4zANBgkqhkiG9w0B
AQsFAAOCAQEAfBNuQn/A2olJHxoBGLfMcQCkkNOfvBpfQeKgni2VVM+r1ZY8YVXx
OtVnV6XQ5M+l+6xlRpP1IwDdmJd/yaQgwbmYf4zl94W/s/qq4nlTd9G4ahmJOhlc
mWeIQMoEtAmQlIOqWto+Knfakz6Xyo+HVCQEyeoBmYFGZcakeAm6tp/6qtpkej+4
wBjShMPAdSYDPRaAqnZ3BAK2UmmlpAA5tkNvqOaHBCi760zYoxT6j1an7FotG0v9
2+W0aL34eMWKz/g4qhwk+Jiz45LLQWhHGIgXIUoNSzHgLIVuVOQI8DPsguvT6GHW
QUs1Hx1wL7mL4U8fKCFDKA+ds2B2xWgoZg==
-----END CERTIFICATE-----
'''
class test_x509:
"""
Test `ipalib.x509`
I created the contents of this certificate with a self-signed CA with:
% certutil -R -s "CN=ipa.example.com,O=IPA" -d . -a -o example.csr
% ./ipa host-add ipa.example.com
% ./ipa cert-request --add --principal=test/ipa.example.com example.csr
"""
def test_1_load_base64_cert(self):
"""
Test loading a base64-encoded certificate.
"""
# Load a good cert
x509.load_pem_x509_certificate(goodcert_headers)
# Load a good cert with headers and leading text
newcert = (
b'leading text\n' + goodcert_headers)
x509.load_pem_x509_certificate(newcert)
# Load a good cert with bad headers
newcert = b'-----BEGIN CERTIFICATE-----' + goodcert_headers
with pytest.raises((TypeError, ValueError)):
x509.load_pem_x509_certificate(newcert)
# Load a bad cert
with pytest.raises(ValueError):
x509.load_pem_x509_certificate(badcert)
def test_1_load_der_cert(self):
"""
Test loading a DER certificate.
"""
der = base64.b64decode(goodcert)
# Load a good cert
x509.load_der_x509_certificate(der)
def test_3_cert_contents(self):
"""
Test the contents of a certificate
"""
# Verify certificate contents. This exercises python-cryptography
# more than anything but confirms our usage of it.
not_before = datetime.datetime(2010, 6, 25, 13, 0, 42)
not_after = datetime.datetime(2015, 6, 25, 13, 0, 42)
cert = x509.load_pem_x509_certificate(goodcert_headers)
assert DN(cert.subject) == DN(('CN', 'ipa.example.com'), ('O', 'IPA'))
assert DN(cert.issuer) == DN(('CN', 'IPA Test Certificate Authority'))
assert cert.serial_number == 1093
assert cert.not_valid_before == not_before
assert cert.not_valid_after == not_after
assert cert.san_general_names == []
assert cert.san_a_label_dns_names == []
assert cert.extended_key_usage == {'1.3.6.1.5.5.7.3.1'}
assert cert.extended_key_usage_bytes == (
b'0\x16\x06\x03U\x1d%\x01\x01\xff\x04\x0c0\n\x06\x08'
b'+\x06\x01\x05\x05\x07\x03\x01'
)
def test_load_pkcs7_pem(self):
certlist = x509.pkcs7_to_certs(good_pkcs7, datatype=x509.PEM)
assert len(certlist) == 1
cert = certlist[0]
assert DN(cert.subject) == DN('CN=Certificate Authority,O=EXAMPLE.COM')
assert cert.serial_number == 1
def test_long_oid(self):
"""
Test cerificate with very long OID. In this case we are using a
certificate from an opened case where one of X509v3 Certificate`s
Policies OID is longer then 80 chars.
"""
cert = x509.load_pem_x509_certificate(long_oid_cert)
ext = cert.extensions.get_extension_for_class(crypto_x509.
CertificatePolicies)
assert len(ext.value) == 1
assert ext.value[0].policy_identifier.dotted_string == (
u'1.3.6.1.4.1.311.21.8.8950086.10656446.2706058.12775672.480128.'
'147.13466065.13029902')
def test_ipa_demo_letsencrypt(self):
cert = x509.load_pem_x509_certificate(ipa_demo_crt)
assert DN(cert.subject) == DN('CN=ipa.demo1.freeipa.org')
assert DN(cert.issuer) == DN(
"CN=Let's Encrypt Authority X3,O=Let's Encrypt,C=US")
assert cert.serial_number == 0x03ad42a2a5ada59a131327cb0979623cb605
not_before = datetime.datetime(2018, 7, 25, 5, 36, 59)
not_after = datetime.datetime(2018, 10, 23, 5, 36, 59)
assert cert.not_valid_before == not_before
assert cert.not_valid_after == not_after
assert cert.san_general_names == [DNSName('ipa.demo1.freeipa.org')]
assert cert.san_a_label_dns_names == ['ipa.demo1.freeipa.org']
assert cert.extended_key_usage == {
'1.3.6.1.5.5.7.3.1', '1.3.6.1.5.5.7.3.2'
}
assert cert.extended_key_usage_bytes == (
b'0 \x06\x03U\x1d%\x01\x01\xff\x04\x160\x14\x06\x08+\x06\x01'
b'\x05\x05\x07\x03\x01\x06\x08+\x06\x01\x05\x05\x07\x03\x02'
)
class test_ExternalCAProfile:
def test_MSCSTemplateV1_good(self):
o = x509.MSCSTemplateV1("MySubCA")
assert hexlify(o.get_ext_data()) == b'1e0e004d007900530075006200430041'
def test_MSCSTemplateV1_bad(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV1("MySubCA:1")
def test_MSCSTemplateV1_pickle_roundtrip(self):
o = x509.MSCSTemplateV1("MySubCA")
s = pickle.dumps(o)
assert o.get_ext_data() == pickle.loads(s).get_ext_data()
def test_MSCSTemplateV2_too_few_parts(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4")
def test_MSCSTemplateV2_too_many_parts(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4:100:200:300")
def test_MSCSTemplateV2_bad_oid(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("not_an_oid:1")
def test_MSCSTemplateV2_non_numeric_major_version(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4:major:200")
def test_MSCSTemplateV2_non_numeric_minor_version(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4:100:minor")
def test_MSCSTemplateV2_major_version_lt_zero(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4:-1:200")
def test_MSCSTemplateV2_minor_version_lt_zero(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4:100:-1")
def test_MSCSTemplateV2_major_version_gt_max(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4:4294967296:200")
def test_MSCSTemplateV2_minor_version_gt_max(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4:100:4294967296")
def test_MSCSTemplateV2_good_major(self):
o = x509.MSCSTemplateV2("1.2.3.4:4294967295")
assert hexlify(o.get_ext_data()) == b'300c06032a0304020500ffffffff'
def test_MSCSTemplateV2_good_major_minor(self):
o = x509.MSCSTemplateV2("1.2.3.4:4294967295:0")
assert hexlify(o.get_ext_data()) \
== b'300f06032a0304020500ffffffff020100'
def test_MSCSTemplateV2_pickle_roundtrip(self):
o = x509.MSCSTemplateV2("1.2.3.4:4294967295:0")
s = pickle.dumps(o)
assert o.get_ext_data() == pickle.loads(s).get_ext_data()
def test_ExternalCAProfile_dispatch(self):
"""
Test that constructing ExternalCAProfile actually returns an
instance of the appropriate subclass.
"""
assert isinstance(
x509.ExternalCAProfile("MySubCA"),
x509.MSCSTemplateV1)
assert isinstance(
x509.ExternalCAProfile("1.2.3.4:100"),
x509.MSCSTemplateV2)
def test_write_pkispawn_config_file_MSCSTemplateV1(self):
template = x509.MSCSTemplateV1(u"SubCA")
expected = (
'[CA]\n'
'pki_req_ext_oid = 1.3.6.1.4.1.311.20.2\n'
'pki_req_ext_data = 1e0a00530075006200430041\n\n'
)
self._test_write_pkispawn_config_file(template, expected)
def test_write_pkispawn_config_file_MSCSTemplateV2(self):
template = x509.MSCSTemplateV2(u"1.2.3.4:4294967295")
expected = (
'[CA]\n'
'pki_req_ext_oid = 1.3.6.1.4.1.311.21.7\n'
'pki_req_ext_data = 300c06032a0304020500ffffffff\n\n'
)
self._test_write_pkispawn_config_file(template, expected)
def _test_write_pkispawn_config_file(self, template, expected):
"""
Test that the values we read from an ExternalCAProfile
object can be used to produce a reasonable-looking pkispawn
configuration.
"""
config = RawConfigParser()
config.optionxform = str
config.add_section("CA")
config.set("CA", "pki_req_ext_oid", template.ext_oid)
config.set("CA", "pki_req_ext_data",
hexlify(template.get_ext_data()).decode('ascii'))
out = StringIO()
config.write(out)
assert out.getvalue() == expected
|
# Copyright 2004-2006 Joe Wreschnig, Michael Urman, Iñigo Serna
# 2012 Christoph Reiter
# 2013 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from quodlibet import _
from quodlibet import app
from quodlibet.plugins.events import EventPlugin
from quodlibet.qltk import is_wayland, Icons
from quodlibet.util import (is_unity, is_osx, is_plasma, is_enlightenment,
print_exc, print_w, print_d)
from .prefs import Preferences
from .systemtray import SystemTray
if is_osx():
# Works, but not without problems:
# https://github.com/quodlibet/quodlibet/issues/1870
# The dock menu is more useful so disable.
from quodlibet.plugins import PluginNotSupportedError
raise PluginNotSupportedError
def get_indicator_impl():
"""Returns a BaseIndicator implementation depending on the environ"""
use_app_indicator = (is_unity() or is_wayland() or is_plasma() or
is_enlightenment())
print_d("use app indicator: %s" % use_app_indicator)
if not use_app_indicator:
return SystemTray
else:
try:
from .appindicator import AppIndicator
except ImportError:
print_w("importing app indicator failed")
print_exc()
# no indicator, fall back
return SystemTray
else:
return AppIndicator
class TrayIconPlugin(EventPlugin):
PLUGIN_ID = "Tray Icon"
PLUGIN_NAME = _("Tray Icon")
PLUGIN_DESC = _("Controls Quod Libet from the system tray.")
PLUGIN_ICON = Icons.USER_DESKTOP
def enabled(self):
impl = get_indicator_impl()
self._tray = impl()
self._tray.set_song(app.player.song)
self._tray.set_info_song(app.player.info)
self._tray.set_paused(app.player.paused)
def disabled(self):
self._tray.remove()
del self._tray
def PluginPreferences(self, parent):
return Preferences()
def plugin_on_song_started(self, song):
self._tray.set_song(app.player.song)
self._tray.set_info_song(app.player.info)
def plugin_on_paused(self):
self._tray.set_paused(True)
def plugin_on_unpaused(self):
self._tray.set_paused(False)
|
#!multiconf
from multiconf.envs import EnvFactory
from multiconf import ConfigRoot, ConfigItem, ConfigBuilder
from multiconf.decorators import nested_repeatables, repeat, required, named_as
@nested_repeatables('cloud_servers, jenkins, apache')
@required('git')
class Project(ConfigRoot):
def __init__(self, selected_env, name, valid_envs, **attr):
super(Project, self).__init__(selected_env=selected_env, name=name,
valid_envs=valid_envs, **attr)
def render(self):
# for all items within config:
# collect result of their own render() function
pass
@named_as('cloud_servers')
@repeat()
class CloudServer(ConfigItem):
def __init__(self, host_name, server_num):
print 'DEBUG: in CloudServer.__init__ host_name=', host_name
super(CloudServer, self).__init__(host_name=host_name,
server_num=server_num)
print 'DEBUG: in CloudServer.__init__ self.host_name=', self.host_name
class CloudServers(ConfigBuilder):
'''
This is builder - it will insert into config multiple objects
if type CloudServer and will calculate hostname for each
'''
def __init__(self, base_host_name, num_servers):
super(CloudServers, self).__init__(host_name=base_host_name,
num_servers=num_servers)
def build(self):
for server_num in xrange(1, self.num_servers+1):
cs = CloudServer(host_name='%s%s' % (self.host_name, server_num),
server_num=server_num)
print 'DEBUG: cs.host_name=%s' % cs.host_name
@named_as('git')
class GitRepo(ConfigItem):
def __init__(self, origin, branch, branches_mask):
super(GitRepo, self).__init__(origin=origin, branch=branch,
branches_mask=branches_mask)
@named_as('jenkins')
@required('nodes, view')
@repeat()
class Jenkins(ConfigItem):
def __init__(self, num_nodes, base_port=0):
super(Jenkins, self).__init__(num_nodes=num_nodes, base_port=base_port)
@named_as('nodes')
class Nodes(ConfigItem):
def __init__(self, hosts):
super(Nodes, self).__init__(hosts=hosts)
@named_as('view')
@nested_repeatables('sub_view')
class NestedView(ConfigItem):
def __init__(self, name):
super(NestedView, self).__init__(name=name)
@named_as('sub_view')
@repeat()
class JobsView(ConfigItem):
def __init__(self, name):
super(JobsView, self).__init__(name=name)
class Jobs(ConfigItem):
def __init__(self, slaves=None):
super(Jobs, self).__init__(slaves=slaves)
class ProjectJobs(Jobs):
pass
class RepositoryJobs(Jobs):
pass
@named_as('apache')
@repeat()
class Apache(ConfigItem):
def __init__(self, base_port, nodes):
super(Apache, self).__init__(base_port=base_port, nodes=nodes)
class Database(ConfigItem):
def __init__(self, name):
super(Database, self).__init__(name=name)
# Define environments
# Use EnvFactory() to create environment or group of environments
ef = EnvFactory()
# We have five environments and we define them here
devlocal = ef.Env('devlocal') # Local dev box
dev = ef.Env('dev') # Dev build box
cloud = ef.Env('cloud') # Cloud
prod = ef.Env('prod')
# Grouping environments per their roles
g_dev = ef.EnvGroup('g_dev', devlocal, dev, cloud)
g_prod = ef.EnvGroup('g_prod', prod)
# This function is used to describe all environments and return an instantiated environment
# configuration for environment with name 'env_name', which is passed as parameter
def conf(env_name):
env = ef.env(env_name)
with Project(env, 'SampleProject', [g_dev, g_prod]) as project:
# CloudServers is a multiconf builder - it will not be present in
# configuration. Instead there will be CloudServer objects based on
# num_servers parameter
with CloudServers(base_host_name='something', num_servers=0) as cloud_servers:
cloud_servers.setattr('num_servers', devlocal=1, dev=2, cloud=4)
# GitRepo is set to be a required element of a project
# Try to comment out this declaration and see what happens
with GitRepo(origin='[email protected]:lechat/envsample.git',
branch='master', branches_mask='fb_[0-9]*$') as git:
git.setattr('branch', g_dev='develop')
with Jenkins(num_nodes=0) as jenkins:
jenkins.setattr('num_nodes', g_dev=2)
jenkins.setattr('base_port', g_dev=8080)
# Nodes is a builder too
jenkins_nodes = Nodes(hosts=cloud_servers)
with NestedView(project.name) as top_view:
with JobsView('%s_branches' % project.name) as jw1:
ProjectJobs(jenkins_nodes)
with JobsView('%s_common' % project.name) as jw1:
RepositoryJobs()
with Apache(base_port=80, nodes=cloud_servers) as apache:
apache.setattr('base_port', g_dev=18000)
with Database('%s_db' % project.name) as db:
db.setattr('name', g_dev='%s_dev_db' % project.name)
return project
config = conf('devlocal')
print config
assert(config.name=='SampleProject')
# Check that we only have one cloud server in devlocal
assert(len(config.cloud_servers)==1)
print config.cloud_servers
cloud_server = config.cloud_servers.values()[0]
assert(isinstance(cloud_server, CloudServer))
assert(cloud_server.host_name == 'something1')
|
from setuptools import setup, find_packages
VERSION = (1, 0, 0)
# Dynamically calculate the version based on VERSION tuple
if len(VERSION)>2 and VERSION[2] is not None:
str_version = "%d.%d_%s" % VERSION[:3]
else:
str_version = "%d.%d" % VERSION[:2]
version= str_version
setup(
name='django-options',
version=version ,
author='joke2k',
author_email='[email protected]',
packages=find_packages(),
include_package_data=True,
url='http://github.com/joke2k/django-options',
license='MIT',
description='A easy way to manage Site options in your django applications.',
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Framework :: Django'
],
keywords='faker fixtures data test django',
install_requires=['django',],
tests_require=['django','fake-factory>=0.2'],
test_suite="runtests.runtests",
zip_safe=False,
)
|
# This file is part of cappuccino.
#
# cappuccino is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cappuccino is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with cappuccino. If not, see <https://www.gnu.org/licenses/>.
import irc3
from cappuccino import Plugin
@irc3.plugin
class NickServ(Plugin):
@irc3.event(
r":(?P<nickserv>NickServ)!\S+@\S+ NOTICE .* :This nickname is registered.*"
)
def login_attempt(self, nickserv):
password = self.config.get("password", None)
if not password:
self.logger.warning(
"This nick is registered but no nickserv password is set in config.ini"
)
return
self.bot.privmsg(nickserv, f"IDENTIFY {password}")
@irc3.event(r":(?P<mask>NickServ!\S+@\S+) NOTICE .* :Password accepted.*")
def login_succeeded(self, mask):
self.logger.info(f"Authenticated with {mask}")
@irc3.event(r":(?P<mask>NickServ!\S+@\S+) NOTICE .* :Password incorrect.*")
def login_failed(self, mask):
self.logger.warning(
f"Failed to authenticate with {mask} due to an incorrect password"
)
|
'''
wallstweet, February 9th, 2014
Data mining twitter with style.
Copyright 2014 Riley Dawson, Kent Rasmussen, Chadwyck Goulet
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__author__ = 'Riley'
import nltk, MySQLdb
import datetime
# This class is responsible for extracting the price and sentiment score data from the database.
# It takes in optional flags for the stock symbol, exchange, time interval (minutes),
# startTime (datetime), and endTime (endTime).
class plotBot:
def getLines(self,stock="MSFT",exchange="NASDAQ",interval=30,startTime=None,endTime=None):
# If start and/or endtimes are not initialized, default to 3 days
if endTime == None:
endTime = datetime.datetime.utcnow()
if startTime == None:
startTime = endTime - datetime.timedelta(days=3)
print startTime
print endTime
# Open the database
db = MySQLdb.connect(host="localhost",user="wallstweet", db="wallstweet")
cur1 = db.cursor()
lines = list()
lines.append(list())
lines.append(list())
# Construct query for fetching stock prices over time and store result
cur1.execute("SELECT ROUND(UNIX_TIMESTAMP(time)/(%s * 60)) AS timeinterval, AVG(price), MIN(time) FROM stock_dataset WHERE time >= %s and time <= %s and id=%s GROUP BY timeinterval", [interval, str(startTime), str(endTime), stock])
stockRows = cur1.fetchall()
print(len(stockRows))
for row in stockRows:
lines[0].append([row[2],row[1]])
cur2 = db.cursor()
# Construct query for fetching tweet sentiments over time and store result
if(stock == "MSFT"):
company = "@Microsoft"
if(stock == "YUM"):
company = "Yum Brands"
if(stock == "ATVI"):
company = "Activision"
cur2.execute("SELECT ROUND(UNIX_TIMESTAMP(time)/(%s * 60)) AS timeinterval, AVG(polarity), MIN(time), COUNT(*) FROM tweet_dataset WHERE time >= %s AND time <= %s AND text LIKE %s GROUP BY timeinterval",[interval, str(startTime), str(endTime), "%" + company + "%"])
tweetRows = cur2.fetchall()
for row in tweetRows:
lines[1].append([row[2],row[1],row[3]])
print(len(tweetRows))
# for row in tweetRows:
# print(row)
return lines
#fun= plotBot()
# Debug junk code, feel free to ignore.
lines = plotBot().getLines()
x = list()
y1 = list()
for row in lines[0]:
# print(str(row[0])+" "+str(row[1]))
x.append(row[0])
y1.append(row[1])
#print(len(x))
#print(len(y1))
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from datetime import datetime, timedelta
from mock import patch
from accounts.models import Account
class TestIndexView(TestCase):
def setUp(self):
self.url = reverse('accounts_account_list')
self.user = User.objects.create_user('user1', '[email protected]', '123456')
self.account1 = Account.objects.create(
user=self.user,
provider='twitter',
provider_id='1',
provider_username='fulano1',
oauth_token='708983112-iaDKF97Shz8cxZu9x86ZAj0MuYZcgNkRXNniXyzI',
oauth_token_secret='u2spIuwwzaRtFvUHwaGFdoA7X4e1uiXXq81oWjJ9aos',
)
self.account2 = Account.objects.create(
user=self.user,
provider='facebook',
provider_id='2',
provider_username='fulano2',
oauth_token='CAACo0zwBRUMBAH2Vs6GoKeeqqd4t0qxgOdqbSUF2iQLMps6pf9IKqtcV8ZAOQ9vlZA6SpqnXlxA2fdpZAbKj5s1XlTAihTWQKSCMKODCddJpZAc0EBGbIvWGxX4LqH9G1MsSIIE3xkM71UQcb10CfV9pevYUPI4ZD',
expires_in=datetime.now() + timedelta(days=60)
)
self.account3 = Account.objects.create(
user=self.user,
provider='youtube',
provider_id='3',
provider_username='fulano3',
oauth_token='ya29.AHES6ZShg0YNnCV1U2tdXGORa8RrRtXnvaBJ4_K2cD9iuns',
refresh_token='1/00Gq_VIN9zRsdRFzhX8v-OrWgz8PfCA19ZSoRa1-Ih4',
expires_in=datetime.now() + timedelta(hours=1)
)
self.client.login(username='user1', password='123456')
def test_without_login(self):
self.client.logout()
response = self.client.get(self.url, follow=True)
self.assertRedirects(response, '/login/?next=' + self.url)
def test_render(self):
response = self.client.get(self.url)
self.assertEquals(response.status_code, 200)
self.assertTrue(self.account1 in response.context['account_list'])
self.assertTrue(self.account2 in response.context['account_list'])
self.assertTrue(self.account3 in response.context['account_list'])
def twitter_get_request_token(*args, **kwargs):
return u'request_token', u'secret_token'
class TestTwitterNewView(TestCase):
def setUp(self):
self.url = reverse('accounts_twitter_new')
self.user = User.objects.create_user('user1', '[email protected]', '123456')
self.client.login(username='user1', password='123456')
def test_without_login(self):
self.client.logout()
response = self.client.get(self.url, follow=True)
self.assertRedirects(response, '/login/?next=' + self.url)
@patch('rauth.OAuth1Service.get_request_token', twitter_get_request_token)
def test_render(self):
response = self.client.get(self.url)
self.assertEquals(
response['Location'],
'https://api.twitter.com/oauth/authorize?oauth_token=request_token'
)
def twitter_get_access_token(*args, **kwargs):
return u'oauth_token', u'oauth_token_secret'
def twitter_get(*args, **kwargs):
class TwitterGet(object):
def json(self):
data = {}
data['id'] = 38895958
data['screen_name'] = 'theSeanCook'
return data
return TwitterGet()
def twitter_get_session(*args, **kwargs):
return 'oauth_token_secret'
class TestTwitterCallbackView(TestCase):
def setUp(self):
self.url = reverse('accounts_twitter_callback')
self.user = User.objects.create_user('user1', '[email protected]', '123456')
self.client.login(username='user1', password='123456')
def test_without_login(self):
self.client.logout()
response = self.client.get(self.url, follow=True)
self.assertRedirects(response, '/login/?next=' + self.url)
def test_render_without_tokens(self):
response = self.client.get(self.url, follow=True)
self.assertRedirects(response, reverse('accounts_account_list'))
self.assertContains(response, u'Ocorreu um erro ao adicionar a conta do twitter.')
response = self.client.get(
self.url,
{'oauth_token': 'oauth_token', 'oauth_verifier': 'oauth_verifier'},
follow=True
)
self.assertRedirects(response, reverse('accounts_account_list'))
self.assertContains(response, u'Ocorreu um erro ao adicionar a conta do twitter.')
@patch('rauth.OAuth1Service.get_access_token', twitter_get_access_token)
@patch('rauth.OAuth1Session.get', twitter_get)
@patch('django.contrib.sessions.backends.signed_cookies.SessionStore.get', twitter_get_session)
def test_render(self):
session = self.client.session
session['oauth_token_secret'] = 'oauth_token_secret'
session.save()
response = self.client.get(
self.url,
{'oauth_token': 'oauth_token', 'oauth_verifier': 'oauth_verifier'},
follow=True
)
self.assertRedirects(response, reverse('accounts_account_list'))
self.assertContains(response, u'Conta do twitter adicionada com sucesso.')
self.assertTrue(
Account.objects.filter(
provider='twitter',
provider_id='38895958',
provider_username='theSeanCook'
)
)
class TestFacebookNewView(TestCase):
def setUp(self):
self.url = reverse('accounts_facebook_new')
self.user = User.objects.create_user('user1', '[email protected]', '123456')
self.client.login(username='user1', password='123456')
def test_without_login(self):
self.client.logout()
response = self.client.get(self.url, follow=True)
self.assertRedirects(response, '/login/?next=' + self.url)
def test_render(self):
response = self.client.get(self.url)
self.assertEquals(
response['Location'],
'https://www.facebook.com/dialog/oauth?redirect_uri=http%3A%2F%2Fmutiraopython.org%2Faccounts%2Fnew%2Ffacebook%2Fcallback%2F&client_id=185625198282051'
)
def facebook_get_raw_access_token(*args, **kwargs):
class FacebookGetRaw(object):
@property
def content(self):
data = 'access_token=access_token&expires=5158944'
return data
return FacebookGetRaw()
def facebook_get(*args, **kwargs):
class FacebookGet(object):
def json(self):
data = {}
data['id'] = 38895958
data['username'] = 'theSeanCook'
return data
return FacebookGet()
class TestFacebookCallbackView(TestCase):
def setUp(self):
self.url = reverse('accounts_facebook_callback')
self.user = User.objects.create_user('user1', '[email protected]', '123456')
self.client.login(username='user1', password='123456')
def test_without_login(self):
self.client.logout()
response = self.client.get(self.url, follow=True)
self.assertRedirects(response, '/login/?next=' + self.url)
def test_render_without_code(self):
response = self.client.get(self.url, follow=True)
self.assertRedirects(response, reverse('accounts_account_list'))
self.assertContains(response, u'Ocorreu um erro ao adicionar a conta do facebook.')
@patch('rauth.OAuth2Service.get_raw_access_token', facebook_get_raw_access_token)
@patch('rauth.OAuth2Session.get', facebook_get)
def test_render(self):
response = self.client.get(self.url, {'code': 'code'}, follow=True)
self.assertRedirects(response, reverse('accounts_account_list'))
self.assertContains(response, u'Conta do facebook adicionada com sucesso.')
self.assertTrue(
Account.objects.filter(
provider='facebook',
provider_id='38895958',
provider_username='theSeanCook'
)
)
class TestYoutubeNewView(TestCase):
def setUp(self):
self.url = reverse('accounts_youtube_new')
self.user = User.objects.create_user('user1', '[email protected]', '123456')
self.client.login(username='user1', password='123456')
def test_without_login(self):
self.client.logout()
response = self.client.get(self.url, follow=True)
self.assertRedirects(response, '/login/?next=' + self.url)
def test_render(self):
response = self.client.get(self.url)
self.assertEquals(
response['Location'],
'https://accounts.google.com/o/oauth2/auth?redirect_uri=http%3A%2F%2Fmutiraopython.org%2Faccounts%2Fnew%2Fyoutube%2Fcallback%2F&response_type=code&client_id=863393693267.apps.googleusercontent.com&approval_prompt=force&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fyoutube+https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fuserinfo.profile+https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fuserinfo.email&access_type=offline'
)
def youtube_get_raw_access_token(*args, **kwargs):
class YoutubeGetRaw(object):
def json(self):
data = {
'access_token': 'access_token',
'expires_in': 5158944,
'refresh_token': 'refresh_token'
}
return data
return YoutubeGetRaw()
def youtube_get(*args, **kwargs):
class YoutubeGet(object):
def json(self):
data = {}
data['id'] = 38895958
data['email'] = '[email protected]'
return data
return YoutubeGet()
class TestYoutubeCallbackView(TestCase):
def setUp(self):
self.url = reverse('accounts_youtube_callback')
self.user = User.objects.create_user('user1', '[email protected]', '123456')
self.client.login(username='user1', password='123456')
def test_without_login(self):
self.client.logout()
response = self.client.get(self.url, follow=True)
self.assertRedirects(response, '/login/?next=' + self.url)
def test_render_without_code(self):
response = self.client.get(self.url, follow=True)
self.assertRedirects(response, reverse('accounts_account_list'))
self.assertContains(response, u'Ocorreu um erro ao adicionar a conta do youtube.')
@patch('rauth.OAuth2Service.get_raw_access_token', youtube_get_raw_access_token)
@patch('rauth.OAuth2Session.get', youtube_get)
def test_render(self):
response = self.client.get(self.url, {'code': 'code'}, follow=True)
self.assertRedirects(response, reverse('accounts_account_list'))
self.assertContains(response, u'Conta do youtube adicionada com sucesso.')
self.assertTrue(
Account.objects.filter(
provider='youtube',
provider_id='38895958',
provider_username='[email protected]'
)
)
|
from datetime import date, timedelta
from django.conf import settings
from django.core.management.base import BaseCommand
from kitsune.kpi.management import utils
from kitsune.kpi.models import L10N_METRIC_CODE, Metric, MetricKind
from kitsune.sumo import googleanalytics
class Command(BaseCommand):
help = "Calculate new l10n coverage numbers and save."
def handle(self, **options):
"""
L10n coverage is a measure of the amount of translations that are
up to date, weighted by the number of visits for each locale.
The "algorithm" (see Bug 727084):
SUMO visits = Total SUMO visits for the last 30 days;
Total translated = 0;
For each locale {
Total up to date = Total up to date +
((Number of up to date articles in the en-US top 50 visited)/50 ) *
(Visitors for that locale / SUMO visits));
}
An up to date article is any of the following:
* An en-US article (by definition it is always up to date)
* The latest en-US revision has been translated
* There are only new revisions with TYPO_SIGNIFICANCE not translated
* There is only one revision of MEDIUM_SIGNIFICANCE not translated
"""
# Get the top 60 visited articles. We will only use the top 50
# but a handful aren't localizable so we get some extras.
top_60_docs = utils._get_top_docs(60)
# Get the visits to each locale in the last 30 days.
end = date.today() - timedelta(days=1) # yesterday
start = end - timedelta(days=30)
locale_visits = googleanalytics.visitors_by_locale(start, end)
# Total visits.
total_visits = sum(locale_visits.values())
# Calculate the coverage.
coverage = 0
for locale, visits in locale_visits.items():
if locale == settings.WIKI_DEFAULT_LANGUAGE:
num_docs = utils.MAX_DOCS_UP_TO_DATE
up_to_date_docs = utils.MAX_DOCS_UP_TO_DATE
else:
up_to_date_docs, num_docs = utils._get_up_to_date_count(top_60_docs, locale)
if num_docs and total_visits:
coverage += (float(up_to_date_docs) / num_docs) * (float(visits) / total_visits)
# Save the value to Metric table.
metric_kind = MetricKind.objects.get_or_create(code=L10N_METRIC_CODE)[0]
day = date.today()
Metric.objects.create(
kind=metric_kind,
start=day,
end=day + timedelta(days=1),
value=int(coverage * 100),
) # Store as a % int.
|
"""
Spin-restricted Hartree-Fock for atom
"""
import numpy as np
import scipy.linalg as slg
from frankenstein import molecule, scf
from frankenstein.tools.mol_utils import get_norb_l
from frankenstein.tools.scf_utils import get_fock, get_fock_ao_direct, \
get_scf_energy
from frankenstein.data.atom_data import get_atomic_number, get_nelec_by_l
class RHFATOM(scf.RHF):
"""Basic class for spin-restricted Hartree-Fock for atoms.
Note:
The idea is to fix the MO occupation to satisfy the aufbau principle. For degenerate shells (l > 0), the occupation is spherically averaged. The following example illustrates this using nitrogen atom.
>>> mfa = RHFATOM("N", "cc-pVDZ")
>>> print(mfa.mol.bas_l)
<<< [0, 0, 0, 1, 1, 2]
>>> print(mfa.mol.bas_pure)
<<< [False, False, False, False, False, True]
>>> print(mfa.idao_by_l)
<<< [[0, 1, 2], [3, 6], [9]]
>>> print(mfa.occ_vec)
<<< [1. 1. 0. 0.5 0.5 0.5 0. 0. 0. 0. 0. 0. 0. 0. ]
"""
def __init__(self, atomsymb, basis, **kwargs):
Z = get_atomic_number(atomsymb)
spin = 2 if Z % 2 else 1
atom = molecule.MOL("{:s} 0 0 0".format(atomsymb), basis, spin=spin, \
verbose=0)
scf.RHF.__init__(self, atom, orth_hV=False, max_iter=10, conv=5, \
guess="core", **kwargs)
self.norb_by_l = self.mol.get_norb_by_l()
self.idao_by_l = self.mol.get_idao_by_l()
self.occ_vec = self.get_occ_vec()
self.initialize()
def initialize(self):
# we need re-define how we compute Fock matrices and etc. since we are now working in raw AOs (i.e., non-orthogonal)
def __rdm1_builder_ez(mo_coeff):
id_occ = self.occ_vec > 0
Cocc = mo_coeff[id_occ]
return (Cocc * self.occ_vec[id_occ]) @ Cocc.T
# Inp: rdm1 in AO; Out: Fock in AO
if self.ao_direct:
def __fock_builder_ez(Dao):
m = self.mol
Iao = np.eye(m.nao)
return get_fock_ao_direct(m.h, m.Zs, m.xyzs, m.basis, Iao, Dao)
else:
def __fock_builder_ez(Dao):
m = self.mol
return get_fock(m.h, m.V, Dao)
def __e_scf_builder_ez(fock, rdm1):
return get_scf_energy(self.mol.h, fock, rdm1)
self.rdm1_builder_ez = __rdm1_builder_ez
self.fock_builder_ez = __fock_builder_ez
self.e_scf_builder_ez = __e_scf_builder_ez
def get_sphave_occ(self):
"""Get spherically averaged occupation
"""
nelec_by_l = get_nelec_by_l(self.mol.atoms[0])
max_l = len(self.norb_by_l)
ndocc = [0] * max_l
nfocc = [0.] * max_l
for l in range(max_l):
norb_l = self.norb_by_l[l]
ndocc[l] = nelec_by_l[l] // (2 * norb_l)
nfocc[l] = (nelec_by_l[l] - ndocc[l]*2*norb_l) / float(norb_l)
return ndocc, nfocc
def get_occ_vec(self):
ndocc, nfocc = self.get_sphave_occ()
occ_vec = np.zeros(self.mol.nmo)
for l,idao in enumerate(self.idao_by_l):
norb_l = self.norb_by_l[l]
for m in range(norb_l):
occ_vec[np.array(idao[:ndocc[l]], dtype=int)+m] = 1.
if len(idao) > ndocc[l]:
occ_vec[idao[ndocc[l]]+m] = nfocc[l] * 0.5
return occ_vec
def Roothaan_step(self):
"""Diagonalize the spherically averaged Fock matrix.
Note:
Since AOs with different l's are orthogonal, this "average and diagonalize" process is performed one l-group at a time, and the final MO coefficient matrix will be block diagonalized.
"""
mo_energy = np.zeros(self.nao)
mo_coeff = np.zeros([self.nao, self.nao])
max_l = len(self.idao_by_l)
for l in range(max_l):
idao = np.array(self.idao_by_l[l], dtype=int)
norb_l = self.norb_by_l[l]
# compute spherically averaged Fock matrix for shell with a.m. = l
fock_l = 0.
ovlp_l = 0.
for m in range(norb_l):
fock_l += self.fock[idao+m,:][:,idao+m]
ovlp_l += self.mol.S[idao+m,:][:,idao+m]
fock_l /= float(norb_l)
ovlp_l /= float(norb_l)
# diagonalize fl
eps_l, C_l = slg.eigh(fock_l, ovlp_l)
# construct mo_coeff and mo_energy
for m in range(norb_l):
mo_energy[idao+m] = eps_l
for i,i1 in enumerate(idao):
mo_coeff[idao+m,i1+m] = C_l[:,i]
self.mo_energy = mo_energy
self.mo_coeff = mo_coeff
def update(self):
if not self.mo_coeff is None:
self.rdm1 = (self.mo_coeff * self.occ_vec) @ self.mo_coeff.T
elif self.rdm1 is None:
raise RuntimeError("Both mo_coeff and rdm1 are None.")
self.fock = self.fock_builder_ez(self.rdm1)
self.e_scf = self.e_scf_builder_ez(self.fock, self.rdm1)
self.S2 = 0. if not self.unrestricted \
else get_uscf_S2(self.rdm1, self.noccs)
def get_diis_errmat(self):
if self.unrestricted:
raise ValueError("Atomic SCF only supports spin-restricted calculations!")
else:
X = self.fock @ self.rdm1 @ self.mol.S
X -= X.T
return X
if __name__ == "__main__":
from frankenstein.data.atom_data import get_atomic_name
from frankenstein.tools.io_utils import dumpMat
# for Z in range(1,10):
for Z in [7]:
atom = get_atomic_name(Z)
mfa = RHFATOM(atom, "cc-pVDZ")
mfa.verbose = 1
print(mfa.mol.bas_l)
print(mfa.mol.bas_pure)
print(mfa.idao_by_l)
print(mfa.occ_vec)
# mfa.kernel()
# print(np.trace([email protected]))
# print(mfa.e_scf, "\n")
# dumpMat(mfa.mo_energy)
# dumpMat(mfa.mo_coeff)
# dumpMat(mfa.occ_vec)
# dumpMat((mfa.mo_coeff*mfa.occ_vec)@mfa.mo_coeff.T*2.)
|
# -*- coding: utf-8 -*-
import os
import zipfile
from django import forms
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from mock import Mock, patch
from nose.tools import eq_
import amo.tests
from files.utils import SafeUnzip
from mkt.files.helpers import FileViewer, DiffHelper
root = os.path.join(settings.ROOT, 'apps/files/fixtures/files')
get_file = lambda x: '%s/%s' % (root, x)
def make_file(pk, file_path, **kwargs):
obj = Mock()
obj.id = pk
for k, v in kwargs.items():
setattr(obj, k, v)
obj.file_path = file_path
obj.__str__ = lambda x: x.pk
obj.version = Mock()
obj.version.version = 1
return obj
# TODO: It'd be nice if these used packaged app examples but these addons still
# flex the code so it wasn't converted.
class TestFileHelper(amo.tests.TestCase):
def setUp(self):
self.viewer = FileViewer(make_file(1, get_file('dictionary-test.xpi')))
def tearDown(self):
self.viewer.cleanup()
def test_files_not_extracted(self):
eq_(self.viewer.is_extracted(), False)
def test_files_extracted(self):
self.viewer.extract()
eq_(self.viewer.is_extracted(), True)
def test_recurse_extract(self):
self.viewer.src = get_file('recurse.xpi')
self.viewer.extract()
eq_(self.viewer.is_extracted(), True)
def test_recurse_contents(self):
self.viewer.src = get_file('recurse.xpi')
self.viewer.extract()
files = self.viewer.get_files()
nm = ['recurse/recurse.xpi/chrome/test-root.txt',
'recurse/somejar.jar/recurse/recurse.xpi/chrome/test.jar',
'recurse/somejar.jar/recurse/recurse.xpi/chrome/test.jar/test']
for name in nm:
eq_(name in files, True, 'File %r not extracted' % name)
def test_cleanup(self):
self.viewer.extract()
self.viewer.cleanup()
eq_(self.viewer.is_extracted(), False)
def test_truncate(self):
truncate = self.viewer.truncate
for x, y in (['foo.rdf', 'foo.rdf'],
['somelongfilename.rdf', 'somelongfilenam...rdf'],
[u'unicode삮.txt', u'unicode\uc0ae.txt'],
[u'unicodesomelong삮.txt', u'unicodesomelong...txt'],
['somelongfilename.somelongextension',
'somelongfilenam...somelonge..'],):
eq_(truncate(x), y)
def test_get_files_not_extracted(self):
assert not self.viewer.get_files()
def test_get_files_size(self):
self.viewer.extract()
files = self.viewer.get_files()
eq_(len(files), 14)
def test_get_files_directory(self):
self.viewer.extract()
files = self.viewer.get_files()
eq_(files['install.js']['directory'], False)
eq_(files['install.js']['binary'], False)
eq_(files['__MACOSX']['directory'], True)
eq_(files['__MACOSX']['binary'], False)
def test_url_file(self):
self.viewer.extract()
files = self.viewer.get_files()
url = reverse('mkt.files.list', args=[self.viewer.file.id, 'file',
'install.js'])
assert files['install.js']['url'].endswith(url)
def test_get_files_depth(self):
self.viewer.extract()
files = self.viewer.get_files()
eq_(files['dictionaries/license.txt']['depth'], 1)
def test_bom(self):
dest = os.path.join(settings.TMP_PATH, 'test_bom')
open(dest, 'w').write('foo'.encode('utf-16'))
self.viewer.select('foo')
self.viewer.selected = {'full': dest, 'size': 1}
eq_(self.viewer.read_file(), u'foo')
os.remove(dest)
def test_syntax(self):
for filename, syntax in [('foo.rdf', 'xml'),
('foo.xul', 'xml'),
('foo.json', 'js'),
('foo.jsm', 'js'),
('foo.js', 'js'),
('manifest.webapp', 'js'),
('foo.html', 'html'),
('foo.css', 'css'),
('foo.bar', 'plain')]:
eq_(self.viewer.get_syntax(filename), syntax)
def test_file_order(self):
self.viewer.extract()
dest = self.viewer.dest
open(os.path.join(dest, 'chrome.manifest'), 'w')
subdir = os.path.join(dest, 'chrome')
os.mkdir(subdir)
open(os.path.join(subdir, 'foo'), 'w')
cache.clear()
files = self.viewer.get_files().keys()
rt = files.index(u'chrome')
eq_(files[rt:rt + 3], [u'chrome', u'chrome/foo', u'dictionaries'])
@patch.object(settings, 'FILE_VIEWER_SIZE_LIMIT', 5)
def test_file_size(self):
self.viewer.extract()
self.viewer.get_files()
self.viewer.select('install.js')
res = self.viewer.read_file()
eq_(res, '')
assert self.viewer.selected['msg'].startswith('File size is')
@patch.object(settings, 'FILE_VIEWER_SIZE_LIMIT', 5)
def test_file_size_unicode(self):
with self.activate(locale='he'):
self.viewer.extract()
self.viewer.get_files()
self.viewer.select('install.js')
res = self.viewer.read_file()
eq_(res, '')
assert self.viewer.selected['msg'].startswith('File size is')
@patch.object(settings, 'FILE_UNZIP_SIZE_LIMIT', 5)
def test_contents_size(self):
self.assertRaises(forms.ValidationError, self.viewer.extract)
def test_default(self):
eq_(self.viewer.get_default(None), 'manifest.webapp')
def test_delete_mid_read(self):
self.viewer.extract()
self.viewer.select('install.js')
os.remove(os.path.join(self.viewer.dest, 'install.js'))
res = self.viewer.read_file()
eq_(res, '')
assert self.viewer.selected['msg'].startswith('That file no')
@patch('mkt.files.helpers.get_md5')
def test_delete_mid_tree(self, get_md5):
get_md5.side_effect = IOError('ow')
self.viewer.extract()
eq_({}, self.viewer.get_files())
class TestDiffHelper(amo.tests.TestCase):
def setUp(self):
src = os.path.join(settings.ROOT, get_file('dictionary-test.xpi'))
self.helper = DiffHelper(make_file(1, src), make_file(2, src))
def tearDown(self):
self.helper.cleanup()
def test_files_not_extracted(self):
eq_(self.helper.is_extracted(), False)
def test_files_extracted(self):
self.helper.extract()
eq_(self.helper.is_extracted(), True)
def test_get_files(self):
eq_(self.helper.left.get_files(),
self.helper.get_files())
def test_diffable(self):
self.helper.extract()
self.helper.select('install.js')
assert self.helper.is_diffable()
def test_diffable_one_missing(self):
self.helper.extract()
os.remove(os.path.join(self.helper.right.dest, 'install.js'))
self.helper.select('install.js')
assert self.helper.is_diffable()
def test_diffable_allow_empty(self):
self.helper.extract()
self.assertRaises(AssertionError, self.helper.right.read_file)
eq_(self.helper.right.read_file(allow_empty=True), '')
def test_diffable_both_missing(self):
self.helper.extract()
self.helper.select('foo.js')
assert not self.helper.is_diffable()
def test_diffable_deleted_files(self):
self.helper.extract()
os.remove(os.path.join(self.helper.left.dest, 'install.js'))
eq_('install.js' in self.helper.get_deleted_files(), True)
def test_diffable_one_binary_same(self):
self.helper.extract()
self.helper.select('install.js')
self.helper.left.selected['binary'] = True
assert self.helper.is_binary()
def test_diffable_one_binary_diff(self):
self.helper.extract()
self.change(self.helper.left.dest, 'asd')
cache.clear()
self.helper.select('install.js')
self.helper.left.selected['binary'] = True
assert self.helper.is_binary()
def test_diffable_two_binary_diff(self):
self.helper.extract()
self.change(self.helper.left.dest, 'asd')
self.change(self.helper.right.dest, 'asd123')
cache.clear()
self.helper.select('install.js')
self.helper.left.selected['binary'] = True
self.helper.right.selected['binary'] = True
assert self.helper.is_binary()
def test_diffable_one_directory(self):
self.helper.extract()
self.helper.select('install.js')
self.helper.left.selected['directory'] = True
assert not self.helper.is_diffable()
assert self.helper.left.selected['msg'].startswith('This file')
def test_diffable_parent(self):
self.helper.extract()
self.change(self.helper.left.dest, 'asd',
filename='__MACOSX/._dictionaries')
cache.clear()
files = self.helper.get_files()
eq_(files['__MACOSX/._dictionaries']['diff'], True)
eq_(files['__MACOSX']['diff'], True)
def change(self, file, text, filename='install.js'):
path = os.path.join(file, filename)
data = open(path, 'r').read()
data += text
open(path, 'w').write(data)
class TestSafeUnzipFile(amo.tests.TestCase, amo.tests.AMOPaths):
#TODO(andym): get full coverage for existing SafeUnzip methods, most
# is covered in the file viewer tests.
@patch.object(settings, 'FILE_UNZIP_SIZE_LIMIT', 5)
def test_unzip_limit(self):
zip = SafeUnzip(self.xpi_path('langpack-localepicker'))
self.assertRaises(forms.ValidationError, zip.is_valid)
def test_unzip_fatal(self):
zip = SafeUnzip(self.xpi_path('search.xml'))
self.assertRaises(zipfile.BadZipfile, zip.is_valid)
def test_unzip_not_fatal(self):
zip = SafeUnzip(self.xpi_path('search.xml'))
assert not zip.is_valid(fatal=False)
def test_extract_path(self):
zip = SafeUnzip(self.xpi_path('langpack-localepicker'))
assert zip.is_valid()
assert'locale browser de' in zip.extract_path('chrome.manifest')
def test_not_secure(self):
zip = SafeUnzip(self.xpi_path('extension'))
zip.is_valid()
assert not zip.is_signed()
def test_is_secure(self):
zip = SafeUnzip(self.xpi_path('signed'))
zip.is_valid()
assert zip.is_signed()
def test_is_broken(self):
zip = SafeUnzip(self.xpi_path('signed'))
zip.is_valid()
zip.info[2].filename = 'META-INF/foo.sf'
assert not zip.is_signed()
|
from __future__ import print_function
import yaml
import os
from apt_package_mirror.mirror import Mirror
import sys
import argparse
def main():
# When files are created make them with a 022 umask
os.umask(022)
# Add commandline options and help text for them
parser = argparse.ArgumentParser()
parser.add_argument('-U', '--update-packages-only',
dest='update_packages_only', action='store_true',
default=False, help='Grab new packages only')
config_file_help = ('yaml config file that describes what mirror to copy '
'and where to store the data')
parser.add_argument(
'config_file', default='config.yaml', nargs='?',
help=config_file_help
)
args = parser.parse_args()
# Check if the config file exists, if it doesnt fail with a message
try:
with open(args.config_file, "r") as file_stream:
config = yaml.load(file_stream)
except:
print("failed to load the config file")
sys.exit(1)
# Check if the mirror path defined in the config file exists
mirror_path = config['mirror_path']
if not os.path.exists(mirror_path):
print("Mirror path does not exist, please fix it")
sys.exit(1)
# Check if the directory for temp files is defined
try:
temp_indices = config['temp_files_path']
except:
temp_indices = None
# Check if a log_level is defined
try:
log_level = config['log_level']
except:
log_level = None
# Check if a package_ttl is defined
try:
package_ttl = config['package_ttl']
except:
package_ttl = None
# Check if a hash_function is defined
try:
hash_function = config['hash_function']
except:
hash_function = None
# Create a file for logging in the location defined by the config file
try:
log_file = config['log_file']
f = open(log_file, 'a')
f.close()
except:
log_file = None
mirror = Mirror(mirror_path=mirror_path,
mirror_url=config['mirror_url'],
temp_indices=temp_indices,
log_file=log_file, log_level=log_level,
package_ttl=package_ttl, hash_function=hash_function)
# If a -U option is used, only update the 'pool' directory. This only grabs
# new packages
if args.update_packages_only:
mirror.update_pool()
# If a -U option is not used, attempt to update the whole mirror
else:
mirror.sync()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#-------------------------------------------------------------------------------
# FILE: gentankdiy.py
# PURPOSE: gentankdiy.py add enhanced external tank data to genmon
#
# AUTHOR: jgyates
# DATE: 06-18-2019
#
# MODIFICATIONS:
#-------------------------------------------------------------------------------
import datetime, time, sys, signal, os, threading, collections, json, ssl
import atexit, getopt, requests
try:
from genmonlib.myclient import ClientInterface
from genmonlib.mylog import SetupLogger
from genmonlib.myconfig import MyConfig
from genmonlib.mysupport import MySupport
from genmonlib.mycommon import MyCommon
from genmonlib.mythread import MyThread
from genmonlib.program_defaults import ProgramDefaults
from genmonlib.gaugediy import GaugeDIY1, GaugeDIY2
import smbus
except Exception as e1:
print("\n\nThis program requires the modules located in the genmonlib directory in the github repository.\n")
print("Please see the project documentation at https://github.com/jgyates/genmon.\n")
print("Error: " + str(e1))
sys.exit(2)
#------------ GenTankData class ------------------------------------------------
class GenTankData(MySupport):
#------------ GenTankData::init---------------------------------------------
def __init__(self,
log = None,
loglocation = ProgramDefaults.LogPath,
ConfigFilePath = MyCommon.DefaultConfPath,
host = ProgramDefaults.LocalHost,
port = ProgramDefaults.ServerPort,
console = None):
super(GenTankData, self).__init__()
self.LogFileName = os.path.join(loglocation, "gentankdiy.log")
self.AccessLock = threading.Lock()
self.log = log
self.console = console
self.MonitorAddress = host
configfile = os.path.join(ConfigFilePath, 'gentankdiy.conf')
try:
if not os.path.isfile(configfile):
self.LogConsole("Missing config file : " + configfile)
self.LogError("Missing config file : " + configfile)
sys.exit(1)
self.config = MyConfig(filename = configfile, section = 'gentankdiy', log = self.log)
self.gauge_type = self.config.ReadValue('gauge_type', return_type = int, default = 1)
self.nb_tanks = self.config.ReadValue('nb_tanks', return_type = int, default = 1)
if self.MonitorAddress == None or not len(self.MonitorAddress):
self.MonitorAddress = ProgramDefaults.LocalHost
except Exception as e1:
self.LogErrorLine("Error reading " + configfile + ": " + str(e1))
self.LogConsole("Error reading " + configfile + ": " + str(e1))
sys.exit(1)
try:
if self.gauge_type == 1:
self.gauge = GaugeDIY1(self.config, log = self.log, console = self.console)
elif self.gauge_type == 2:
self.gauge = GaugeDIY2(self.config, log = self.log, console = self.console)
else:
self.LogError("Invalid gauge type: " + str(self.gauge_type))
sys.exit(1)
if not self.nb_tanks in [1,2]:
self.LogError("Invalid Number of tanks (nb_tanks), 1 or 2 accepted: " + str(self.nb_tanks))
sys.exit(1)
self.debug = self.gauge.debug
self.Generator = ClientInterface(host = self.MonitorAddress, port = port, log = self.log)
# start thread monitor time for exercise
self.Threads["TankCheckThread"] = MyThread(self.TankCheckThread, Name = "TankCheckThread", start = False)
if not self.gauge.InitADC():
self.LogError("InitADC failed, exiting")
sys.exit(1)
self.Threads["TankCheckThread"].Start()
signal.signal(signal.SIGTERM, self.SignalClose)
signal.signal(signal.SIGINT, self.SignalClose)
except Exception as e1:
self.LogErrorLine("Error in GenTankData init: " + str(e1))
self.console.error("Error in GenTankData init: " + str(e1))
sys.exit(1)
#---------- GenTankData::SendCommand --------------------------------------
def SendCommand(self, Command):
if len(Command) == 0:
return "Invalid Command"
try:
with self.AccessLock:
data = self.Generator.ProcessMonitorCommand(Command)
except Exception as e1:
self.LogErrorLine("Error calling ProcessMonitorCommand: " + str(Command))
data = ""
return data
# ---------- GenTankData::TankCheckThread-----------------------------------
def TankCheckThread(self):
time.sleep(1)
while True:
try:
dataforgenmon = {}
tankdata = self.gauge.GetGaugeData()
if tankdata != None:
dataforgenmon["Tank Name"] = "External Tank"
dataforgenmon["Capacity"] = 0
dataforgenmon["Percentage"] = tankdata
if self.nb_tanks == 2:
tankdata2 = self.gauge.GetGaugeData(tanktwo = True)
if tankdata2 != None:
dataforgenmon["Percentage2"] = tankdata2
retVal = self.SendCommand("generator: set_tank_data=" + json.dumps(dataforgenmon))
self.LogDebug(retVal)
if self.WaitForExit("TankCheckThread", float(self.gauge.PollTime * 60)):
return
except Exception as e1:
self.LogErrorLine("Error in TankCheckThread: " + str(e1))
if self.WaitForExit("TankCheckThread", float(self.gauge.PollTime * 60)):
return
# ----------GenTankData::SignalClose----------------------------------------
def SignalClose(self, signum, frame):
self.Close()
sys.exit(1)
# ----------GenTankData::Close----------------------------------------------
def Close(self):
self.KillThread("TankCheckThread")
self.gauge.Close()
self.Generator.Close()
#-------------------------------------------------------------------------------
if __name__ == "__main__":
console, ConfigFilePath, address, port, loglocation, log = MySupport.SetupAddOnProgram("gentankdiy")
GenTankDataInstance = GenTankData(log = log, loglocation = loglocation, ConfigFilePath = ConfigFilePath, host = address, port = port, console = console)
while True:
time.sleep(0.5)
sys.exit(1)
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import numpy as np
from pymatgen.core import PeriodicSite
from pymatgen.io.vasp import Vasprun, Poscar, Outcar
from pymatgen.analysis.defects.core import Vacancy, Interstitial, DefectEntry
from pymatgen.analysis.defects.defect_compatibility import DefectCompatibility
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files')
class DefectCompatibilityTest(PymatgenTest):
def setUp(self):
struc = PymatgenTest.get_structure("VO2")
struc.make_supercell(3)
struc = struc
self.vac = Vacancy(struc, struc.sites[0], charge=-3)
abc = self.vac.bulk_structure.lattice.abc
axisdata = [np.arange(0., lattval, 0.2) for lattval in abc]
bldata = [np.array([1. for u in np.arange(0., lattval, 0.2)]) for lattval in abc]
dldata = [
np.array([(-1 - np.cos(2 * np.pi * u / lattval)) for u in np.arange(0., lattval, 0.2)]) for lattval in abc
]
self.frey_params = {'axis_grid': axisdata, 'bulk_planar_averages': bldata,
'defect_planar_averages': dldata, 'dielectric': 15,
'initial_defect_structure': struc.copy(),
'defect_frac_sc_coords': struc.sites[0].frac_coords[:]}
kumagai_bulk_struc = Poscar.from_file(os.path.join( test_dir, 'defect', 'CONTCAR_bulk')).structure
bulk_out = Outcar( os.path.join( test_dir, 'defect', 'OUTCAR_bulk.gz'))
defect_out = Outcar( os.path.join( test_dir, 'defect', 'OUTCAR_vac_Ga_-3.gz'))
self.kumagai_vac = Vacancy(kumagai_bulk_struc, kumagai_bulk_struc.sites[0], charge=-3)
kumagai_defect_structure = self.kumagai_vac.generate_defect_structure()
self.kumagai_params = {'bulk_atomic_site_averages': bulk_out.electrostatic_potential,
'defect_atomic_site_averages': defect_out.electrostatic_potential,
'site_matching_indices': [[ind, ind-1] for ind in range(len(kumagai_bulk_struc))],
'defect_frac_sc_coords': [0.,0.,0.],
'initial_defect_structure': kumagai_defect_structure,
'dielectric': 18.118 * np.identity(3),
'gamma': 0.153156 #not neccessary to load gamma, but speeds up unit test
}
v = Vasprun(os.path.join(test_dir, 'vasprun.xml'))
eigenvalues = v.eigenvalues.copy()
kptweights = v.actual_kpoints_weights
potalign = -0.1
vbm = v.eigenvalue_band_properties[2]
cbm = v.eigenvalue_band_properties[1]
self.bandfill_params = { 'eigenvalues': eigenvalues,
'kpoint_weights': kptweights,
'potalign': potalign,
'vbm': vbm, 'cbm': cbm }
self.band_edge_params = {'hybrid_cbm': 1., 'hybrid_vbm': -1., 'vbm': -0.5,
'cbm': 0.6, 'num_hole_vbm': 1., 'num_elec_cbm': 1.}
def test_process_entry(self):
# basic process with no corrections
dentry = DefectEntry(self.vac, 0., corrections={}, parameters={'vbm': 0., 'cbm': 0.}, entry_id=None)
dc = DefectCompatibility()
dentry = dc.process_entry( dentry)
self.assertIsNotNone( dentry)
# process with corrections from parameters used in other unit tests
params = self.frey_params.copy()
params.update(self.bandfill_params)
params.update({'hybrid_cbm': params['cbm'] + .2, 'hybrid_vbm': params['vbm'] - .4, })
dentry = DefectEntry(self.vac, 0., corrections={}, parameters=params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.process_entry( dentry)
self.assertAlmostEqual( dentry.corrections['bandedgeshifting_correction'], 1.2)
self.assertAlmostEqual( dentry.corrections['bandfilling_correction'], 0.0)
self.assertAlmostEqual( dentry.corrections['charge_correction'], 5.44595036)
# test over delocalized free carriers which forces skipping charge correction
# modify the eigenvalue list to have free holes
hole_eigenvalues = {}
for spinkey, spinset in params['eigenvalues'].items():
hole_eigenvalues[spinkey] = []
for kptset in spinset:
hole_eigenvalues[spinkey].append([])
for eig in kptset:
if (eig[0] < params['vbm']) and (eig[0] > params['vbm'] - .8):
hole_eigenvalues[spinkey][-1].append([eig[0], 0.5])
else:
hole_eigenvalues[spinkey][-1].append(eig)
params.update( {'eigenvalues': hole_eigenvalues})
dentry = DefectEntry(self.vac, 0., corrections={}, parameters=params, entry_id=None)
dc = DefectCompatibility( free_chg_cutoff=0.8)
dentry = dc.process_entry( dentry)
self.assertAlmostEqual( dentry.corrections['bandedgeshifting_correction'], 1.19999999)
self.assertAlmostEqual( dentry.corrections['bandfilling_correction'], -1.62202400)
self.assertAlmostEqual( dentry.corrections['charge_correction'], 0.)
# turn off band filling and band edge shifting
dc = DefectCompatibility( free_chg_cutoff=0.8, use_bandfilling=False, use_bandedgeshift=False)
dentry = dc.process_entry( dentry)
self.assertAlmostEqual( dentry.corrections['bandedgeshifting_correction'], 0.)
self.assertAlmostEqual( dentry.corrections['bandfilling_correction'], 0.)
self.assertAlmostEqual( dentry.corrections['charge_correction'], 0.)
def test_perform_all_corrections(self):
#return entry even if insufficent values are provided
# for freysoldt, kumagai, bandfilling, or band edge shifting
de = DefectEntry(self.vac, 0., corrections={}, parameters={}, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_all_corrections( de)
self.assertIsNotNone( dentry)
#all other correction applications are tested in unit tests below
def test_perform_freysoldt(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.frey_params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_freysoldt( de)
val = dentry.parameters['freysoldt_meta']
self.assertAlmostEqual(val['freysoldt_electrostatic'], 0.975893)
self.assertAlmostEqual(val['freysoldt_potential_alignment_correction'], 4.4700574)
self.assertAlmostEqual(val['freysoldt_potalign'], 1.4900191)
self.assertTrue('pot_corr_uncertainty_md' in val.keys())
self.assertTrue('pot_plot_data' in val.keys())
def test_perform_kumagai(self):
de = DefectEntry( self.kumagai_vac, 0., parameters=self.kumagai_params)
dc = DefectCompatibility()
dentry = dc.perform_kumagai( de)
val = dentry.parameters['kumagai_meta']
self.assertAlmostEqual(val['kumagai_electrostatic'], 0.88236299)
self.assertAlmostEqual(val['kumagai_potential_alignment_correction'], 2.09704862)
self.assertAlmostEqual(val['kumagai_potalign'], 0.69901620)
self.assertTrue('pot_corr_uncertainty_md' in val.keys())
self.assertTrue('pot_plot_data' in val.keys())
def test_run_bandfilling(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.bandfill_params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_bandfilling( de)
val = dentry.parameters['bandfilling_meta']
self.assertAlmostEqual(val['num_hole_vbm'], 0.)
self.assertAlmostEqual(val['num_elec_cbm'], 0.)
self.assertAlmostEqual(val['bandfilling_correction'], 0.)
def test_run_band_edge_shifting(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.band_edge_params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_band_edge_shifting( de)
val = dentry.parameters['bandshift_meta']
self.assertEqual(val['vbmshift'], -0.5)
self.assertEqual(val['cbmshift'], 0.4)
self.assertEqual(val['bandedgeshifting_correction'], 1.5)
def test_delocalization_analysis(self):
#return entry even if insufficent values are provided
# for delocalization analysis with freysoldt, kumagai,
# bandfilling, or band edge shifting
de = DefectEntry(self.vac, 0., corrections={}, parameters={}, entry_id=None)
dc = DefectCompatibility()
dentry = dc.delocalization_analysis( de)
self.assertIsNotNone( dentry)
#all other correction applications are tested in unit tests below
def test_check_freysoldt_delocalized(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.frey_params, entry_id=None)
de.parameters.update( {'is_compatible': True}) #needs to be initialized with this here for unittest
dc = DefectCompatibility( plnr_avg_var_tol=0.1, plnr_avg_minmax_tol=0.5)
dentry = dc.perform_freysoldt( de)
# check case which fits under compatibility constraints
dentry = dc.check_freysoldt_delocalized( dentry)
frey_delocal = dentry.parameters['delocalization_meta']['plnr_avg']
self.assertTrue( frey_delocal['is_compatible'])
ans_var = [0.00038993, 0.02119532, 0.02119532]
ans_window = [0.048331509, 0.36797169, 0.36797169]
for ax in range(3):
ax_metadata = frey_delocal['metadata'][ax]
self.assertTrue( ax_metadata['frey_variance_compatible'])
self.assertAlmostEqual( ax_metadata['frey_variance'], ans_var[ax])
self.assertTrue( ax_metadata['frey_minmax_compatible'])
self.assertAlmostEqual( ax_metadata['frey_minmax_window'], ans_window[ax])
self.assertTrue( dentry.parameters['is_compatible'])
# check planar delocalization on 2nd and 3rd axes
dc = DefectCompatibility( plnr_avg_var_tol=0.1, plnr_avg_minmax_tol=0.2)
dentry.parameters.update( {'is_compatible': True})
dentry = dc.check_freysoldt_delocalized( dentry)
frey_delocal = dentry.parameters['delocalization_meta']['plnr_avg']
self.assertFalse( frey_delocal['is_compatible'])
ax_metadata = frey_delocal['metadata'][0]
self.assertTrue( ax_metadata['frey_variance_compatible'])
self.assertTrue( ax_metadata['frey_minmax_compatible'])
for ax in [1,2]:
ax_metadata = frey_delocal['metadata'][ax]
self.assertTrue( ax_metadata['frey_variance_compatible'])
self.assertFalse( ax_metadata['frey_minmax_compatible'])
self.assertFalse( dentry.parameters['is_compatible'])
# check variance based delocalization on 2nd and 3rd axes
dc = DefectCompatibility( plnr_avg_var_tol=0.01, plnr_avg_minmax_tol=0.5)
dentry.parameters.update( {'is_compatible': True})
dentry = dc.check_freysoldt_delocalized( dentry)
frey_delocal = dentry.parameters['delocalization_meta']['plnr_avg']
self.assertFalse( frey_delocal['is_compatible'])
ax_metadata = frey_delocal['metadata'][0]
self.assertTrue( ax_metadata['frey_variance_compatible'])
self.assertTrue( ax_metadata['frey_minmax_compatible'])
for ax in [1,2]:
ax_metadata = frey_delocal['metadata'][ax]
self.assertFalse( ax_metadata['frey_variance_compatible'])
self.assertTrue( ax_metadata['frey_minmax_compatible'])
self.assertFalse( dentry.parameters['is_compatible'])
def test_check_kumagai_delocalized(self):
de = DefectEntry( self.kumagai_vac, 0., parameters=self.kumagai_params)
de.parameters.update( {'is_compatible': True}) #needs to be initialized with this here for unittest
dc = DefectCompatibility( atomic_site_var_tol=13.3, atomic_site_minmax_tol=20.95)
dentry = dc.perform_kumagai( de)
# check case which fits under compatibility constraints
dentry = dc.check_kumagai_delocalized( dentry)
kumagai_delocal = dentry.parameters['delocalization_meta']['atomic_site']
self.assertTrue( kumagai_delocal['is_compatible'])
kumagai_md = kumagai_delocal['metadata']
true_variance = 13.262304401193997
true_minmax = 20.9435
self.assertTrue(kumagai_md['kumagai_variance_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_variance'], true_variance)
self.assertTrue(kumagai_md['kumagai_minmax_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_minmax_window'], true_minmax)
self.assertTrue( dentry.parameters['is_compatible'])
# break variable compatibility
dc = DefectCompatibility( atomic_site_var_tol=0.1, atomic_site_minmax_tol=20.95)
de.parameters.update( {'is_compatible': True})
dentry = dc.perform_kumagai( de)
dentry = dc.check_kumagai_delocalized( dentry)
kumagai_delocal = dentry.parameters['delocalization_meta']['atomic_site']
self.assertFalse( kumagai_delocal['is_compatible'])
kumagai_md = kumagai_delocal['metadata']
self.assertFalse(kumagai_md['kumagai_variance_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_variance'], true_variance)
self.assertTrue(kumagai_md['kumagai_minmax_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_minmax_window'], true_minmax)
self.assertFalse( dentry.parameters['is_compatible'])
# break maxmin compatibility
dc = DefectCompatibility(atomic_site_var_tol=13.3, atomic_site_minmax_tol=0.5)
de.parameters.update({'is_compatible': True})
dentry = dc.perform_kumagai(de)
dentry = dc.check_kumagai_delocalized(dentry)
kumagai_delocal = dentry.parameters['delocalization_meta']['atomic_site']
self.assertFalse(kumagai_delocal['is_compatible'])
kumagai_md = kumagai_delocal['metadata']
self.assertTrue(kumagai_md['kumagai_variance_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_variance'], true_variance)
self.assertFalse(kumagai_md['kumagai_minmax_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_minmax_window'], true_minmax)
self.assertFalse(dentry.parameters['is_compatible'])
def test_check_final_relaxed_structure_delocalized(self):
# test structure delocalization analysis
# first test no movement in atoms
initial_defect_structure = self.vac.generate_defect_structure()
final_defect_structure = initial_defect_structure.copy()
sampling_radius = 4.55
defect_frac_sc_coords = self.vac.site.frac_coords[:]
params = {'initial_defect_structure': initial_defect_structure,
'final_defect_structure': final_defect_structure,
'sampling_radius': sampling_radius,
'defect_frac_sc_coords': defect_frac_sc_coords,
'is_compatible': True}
dentry = DefectEntry(self.vac, 0., corrections={}, parameters=params, entry_id=None)
dc = DefectCompatibility( tot_relax_tol=0.1, perc_relax_tol=0.1, defect_tot_relax_tol=0.1)
dentry = dc.check_final_relaxed_structure_delocalized( dentry)
struc_delocal = dentry.parameters['delocalization_meta']['structure_relax']
self.assertTrue( dentry.parameters['is_compatible'])
self.assertTrue( struc_delocal['is_compatible'])
self.assertTrue( struc_delocal['metadata']['structure_tot_relax_compatible'])
self.assertEqual( struc_delocal['metadata']['tot_relax_outside_rad'], 0.)
self.assertTrue( struc_delocal['metadata']['structure_perc_relax_compatible'])
self.assertEqual( struc_delocal['metadata']['perc_relax_outside_rad'], 0.)
self.assertEqual( len(struc_delocal['metadata']['full_structure_relax_data']), len(initial_defect_structure))
self.assertIsNone( struc_delocal['metadata']['defect_index'])
defect_delocal = dentry.parameters['delocalization_meta']['defectsite_relax']
self.assertTrue( defect_delocal['is_compatible'])
self.assertIsNone( defect_delocal['metadata']['relax_amount'])
# next test for when structure has delocalized outside of radius from defect
pert_struct_fin_struct = initial_defect_structure.copy()
pert_struct_fin_struct.perturb( 0.1)
dentry.parameters.update( {'final_defect_structure': pert_struct_fin_struct})
dentry = dc.check_final_relaxed_structure_delocalized( dentry)
struc_delocal = dentry.parameters['delocalization_meta']['structure_relax']
self.assertFalse( dentry.parameters['is_compatible'])
self.assertFalse( struc_delocal['is_compatible'])
self.assertFalse( struc_delocal['metadata']['structure_tot_relax_compatible'])
self.assertAlmostEqual( struc_delocal['metadata']['tot_relax_outside_rad'], 12.5)
self.assertFalse( struc_delocal['metadata']['structure_perc_relax_compatible'])
self.assertAlmostEqual( struc_delocal['metadata']['perc_relax_outside_rad'], 77.63975155)
# now test for when an interstitial defect has migrated too much
inter_def_site = PeriodicSite('H', [7.58857304, 11.70848069, 12.97817518],
self.vac.bulk_structure.lattice, to_unit_cell=True,
coords_are_cartesian=True)
inter = Interstitial(self.vac.bulk_structure, inter_def_site, charge=0)
initial_defect_structure = inter.generate_defect_structure()
final_defect_structure = initial_defect_structure.copy()
poss_deflist = sorted(
final_defect_structure.get_sites_in_sphere(inter.site.coords,
2, include_index=True), key=lambda x: x[1])
def_index = poss_deflist[0][2]
final_defect_structure.translate_sites(indices=[def_index],
vector=[0., 0., 0.008]) #fractional coords translation
defect_frac_sc_coords = inter_def_site.frac_coords[:]
params = {'initial_defect_structure': initial_defect_structure,
'final_defect_structure': final_defect_structure,
'sampling_radius': sampling_radius,
'defect_frac_sc_coords': defect_frac_sc_coords,
'is_compatible': True}
dentry = DefectEntry(inter, 0., corrections={}, parameters=params, entry_id=None)
dentry = dc.check_final_relaxed_structure_delocalized( dentry)
defect_delocal = dentry.parameters['delocalization_meta']['defectsite_relax']
self.assertFalse( defect_delocal['is_compatible'])
self.assertAlmostEqual( defect_delocal['metadata']['relax_amount'], 0.10836054)
if __name__ == "__main__":
unittest.main()
|
"""Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Main entry module for data specified in app.yaml.
This module contains the Http handlers for data requests (as JSON) in the
Perfkit Explorer application (as well as other consumers). Data URL's are
prefixed with /data/{source} in the REST API, and in general the entities are
referenced with GET requests.
"""
__author__ = '[email protected] (Joe Allan Muharsky)'
import json
import logging
import MySQLdb
import time
from google.appengine.api import urlfetch_errors
from google.appengine.api import users
from google.appengine.runtime import apiproxy_errors
import google.appengine.runtime
import base
from perfkit.common import big_query_client
from perfkit.common import big_query_result_util as result_util
from perfkit.common import big_query_result_pivot
from perfkit.common import data_source_config
from perfkit.common import gae_big_query_client
from perfkit.common import gae_cloud_sql_client
from perfkit.common import http_util
from perfkit.explorer.model import dashboard
from perfkit.explorer.model import explorer_config
from perfkit.explorer.samples_mart import explorer_method
from perfkit.explorer.samples_mart import product_labels
from perfkit.ext.cloudsql.models import cloudsql_config
import webapp2
from google.appengine.api import urlfetch
DATASET_NAME = 'samples_mart'
URLFETCH_TIMEOUT = 50
ERROR_TIMEOUT = 'The request timed out.'
urlfetch.set_default_fetch_deadline(URLFETCH_TIMEOUT)
class Error(Exception):
pass
class SecurityError(Error):
pass
class DataHandlerUtil(object):
"""Class used to allow us to replace clients with test versions."""
# TODO: Refactor this out into a generic class capable of choosing
# the appropriate data_client for tests and/or product code.
@classmethod
def GetDataClient(cls, env):
"""Returns an instance of a data client for the specified environment.
This is used for testability and GAE support purposes to replace the
default GAE-enabled data client with a "local" one for running unit
tests.
Args:
env: The environment to connect to. For more detail, see
perfkit.data_clients.data_source_config.Environments.
Returns:
A valid data client.
"""
return gae_big_query_client.GaeBigQueryClient(env=env)
class FieldDataHandler(base.RequestHandlerBase):
"""Http handler for getting a list of distinct Field values (/data/fields).
This handler allows start/end date, project_name, test and metric to be
supplied as GET parameters for filtering, and field_name determines the
field to return. It returns, and returns an array of dicts in the
following format:
[{'value': 'time-to-complete'},
{'value': 'weight'}]
"""
def get(self):
"""Request handler for GET operations."""
urlfetch.set_default_fetch_deadline(URLFETCH_TIMEOUT)
filters = http_util.GetJsonParam(self.request, 'filters')
start_date = filters['start_date']
end_date = filters['end_date']
product_name = filters['product_name']
test = filters['test']
metric = filters['metric']
field_name = self.request.GET.get('field_name')
config = explorer_config.ExplorerConfigModel.Get()
client = DataHandlerUtil.GetDataClient(self.env)
client.project_id = config.default_project
query = explorer_method.ExplorerQueryBase(
data_client=client,
dataset_name=config.default_dataset)
query.fields = [field_name + ' AS name']
query.tables = ['lookup_field_cube']
query.wheres = []
if start_date:
query.wheres.append(
'day_timestamp >= %s' %
(explorer_method.ExplorerQueryBase
.GetTimestampFromFilterExpression(
start_date)))
if end_date:
query.wheres.append(
'day_timestamp <= %s' %
(explorer_method.ExplorerQueryBase
.GetTimestampFromFilterExpression(
end_date)))
if product_name and field_name != 'product_name':
query.wheres.append('product_name = "%s"' % product_name)
if test and field_name not in ['test', 'product_name']:
query.wheres.append('test = "%s"' % test)
if metric and field_name not in ['metric', 'test', 'product_name']:
query.wheres.append('metric = "%s"' % metric)
query.groups = ['name']
query.orders = ['name']
response = query.Execute()
data = {'rows': response['rows']}
self.RenderJson(data)
class MetadataDataHandler(base.RequestHandlerBase):
"""Http handler for getting a list of Metadata (Label/Values).
This handler requires project_name and test to be supplied as GET
parameters, and returns an array of dicts in the following format:
[{'label': 'time-to-complete'},
{'label': 'weight', 'value': '20'}]
"""
def get(self):
"""Request handler for GET operations."""
urlfetch.set_default_fetch_deadline(URLFETCH_TIMEOUT)
config = explorer_config.ExplorerConfigModel.Get()
client = DataHandlerUtil.GetDataClient(self.env)
client.project_id = config.default_project
query = product_labels.ProductLabelsQuery(
data_client=client,
dataset_name=config.default_dataset)
filters = http_util.GetJsonParam(self.request, 'filters')
start_date = None
if 'start_date' in filters and filters['start_date']:
start_date = filters['start_date']
end_date = None
if 'end_date' in filters and filters['end_date']:
end_date = filters['end_date']
response = query.Execute(
start_date=start_date,
end_date=end_date,
product_name=filters['product_name'],
test=filters['test'],
metric=filters['metric'])
self.RenderJson({'labels': response['labels']})
class SqlDataHandler(base.RequestHandlerBase):
"""Http handler for returning the results of a SQL statement (/data/sql).
This handler will look for a SQL query in the POST data with a datasource
parameter. Notably, the following elements are expected:
{'datasource': {
'query': 'SELECT foo FROM bar',
'config': {
... // Unused properties for a strict SQL statement.
'results': {
'pivot': false,
'pivot_config': {
'row_field': '',
'column_field': '',
'value_field': '',
}
}
}
}
This handler returns an array of arrays in the following format:
[['product_name', 'test', 'min', 'avg'],
['widget-factory', 'create-widget', 2.2, 3.1]]
"""
def post(self):
"""Request handler for POST operations."""
try:
start_time = time.time()
urlfetch.set_default_fetch_deadline(URLFETCH_TIMEOUT)
config = explorer_config.ExplorerConfigModel.Get()
request_data = json.loads(self.request.body)
datasource = request_data.get('datasource')
if not datasource:
raise KeyError('The datasource is required to run a query')
query = datasource.get('query_exec') or datasource.get('query')
if not query:
raise KeyError('datasource.query must be provided.')
if (not config.grant_query_to_public and
not users.is_current_user_admin()):
dashboard_id = request_data.get('dashboard_id')
if not dashboard_id:
raise KeyError('The dashboard id is required to run a query')
widget_id = request_data.get('id')
if not widget_id:
raise KeyError('The widget id is required to run a query')
if dashboard.Dashboard.IsQueryCustom(query, dashboard_id, widget_id):
raise SecurityError('The user is not authorized to run custom queries')
else:
logging.error('Query is identical.')
cache_duration = config.cache_duration or None
logging.debug('Query datasource: %s', datasource)
query_config = datasource['config']
if datasource.get('type', 'BigQuery') == 'Cloud SQL':
logging.debug('Using Cloud SQL backend')
cloudsql_client_config = query_config.get('cloudsql')
if not cloudsql_client_config:
cloudsql_client_config = {}
cloudsql_server_config = cloudsql_config.CloudsqlConfigModel.Get()
client = gae_cloud_sql_client.GaeCloudSqlClient(
instance=cloudsql_client_config.get('instance'),
db_name=cloudsql_client_config.get('database_name'),
db_user=cloudsql_server_config.username,
db_password=cloudsql_server_config.password)
else:
logging.debug('Using BigQuery backend')
client = DataHandlerUtil.GetDataClient(self.env)
client.project_id = config.default_project
response = client.Query(query, cache_duration=cache_duration)
if query_config['results'].get('pivot'):
pivot_config = query_config['results']['pivot_config']
transformer = big_query_result_pivot.BigQueryPivotTransformer(
reply=response,
rows_name=pivot_config['row_field'],
columns_name=pivot_config['column_field'],
values_name=pivot_config['value_field'])
transformer.Transform()
response['results'] = (
result_util.ReplyFormatter.RowsToDataTableFormat(response))
elapsed_time = time.time() - start_time
response['elapsedTime'] = elapsed_time
self.RenderJson(response)
# If 'expected' errors occur (specifically dealing with SQL problems),
# return JSON with descriptive text so that we can give the user a
# constructive error message.
# TODO: Formalize error reporting/handling across the application.
except (big_query_client.BigQueryError, big_query_result_pivot.DuplicateValueError,
ValueError, KeyError, SecurityError) as err:
logging.error(str(err))
self.RenderJson({'error': str(err)})
except MySQLdb.OperationalError as err:
self.RenderJson({'error': 'MySQLdb error %s' % str(err)})
except (google.appengine.runtime.DeadlineExceededError,
apiproxy_errors.DeadlineExceededError,
urlfetch_errors.DeadlineExceededError):
self.RenderText(text=ERROR_TIMEOUT, status=408)
def get(self):
"""Request handler for GET operations."""
self.post()
# Main WSGI app as specified in app.yaml
app = webapp2.WSGIApplication(
[('/data/fields', FieldDataHandler),
('/data/metadata', MetadataDataHandler),
('/data/sql', SqlDataHandler)])
|
# -*- coding: utf-8 -*-
"""
g_octave.description_tree
~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements a Python object with the content of a directory
tree with DESCRIPTION files. The object contains *g_octave.Description*
objects for each DESCRIPTION file.
:copyright: (c) 2009-2010 by Rafael Goncalves Martins
:license: GPL-2, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = ['DescriptionTree']
import glob
import os
import re
from .config import Config
from .description import Description
from .log import Log
from portage.versions import vercmp
log = Log('g_octave.description_tree')
config = Config()
# from http://wiki.python.org/moin/HowTo/Sorting/
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
class DescriptionTree(list):
def __init__(self, parse_sysreq=True):
log.info('Parsing the package database.')
list.__init__(self)
self._categories = [i.strip() for i in config.categories.split(',')]
for my_file in glob.glob(os.path.join(config.db, 'octave-forge', \
'**', '**', '*.DESCRIPTION')):
description = Description(my_file, parse_sysreq=parse_sysreq)
if description.CAT in self._categories:
self.append(description)
def package_versions(self, pn):
tmp = []
for pkg in self:
if pkg.PN == pn:
tmp.append(pkg.PV)
tmp.sort(key=cmp_to_key(vercmp))
return tmp
def latest_version(self, pn):
tmp = self.package_versions(pn)
return (len(tmp) > 0) and tmp[-1] or None
def latest_version_from_list(self, pv_list):
tmp = pv_list[:]
tmp.sort(key=cmp_to_key(vercmp))
return (len(tmp) > 0) and tmp[-1] or None
def search(self, term):
# term can be a regular expression
re_term = re.compile(r'%s' % term)
packages = {}
for pkg in self:
if re_term.search(pkg.PN) is not None:
if pkg.PN not in packages:
packages[pkg.PN] = []
packages[pkg.PN].append(pkg.PV)
packages[pkg.PN].sort(key=cmp_to_key(vercmp))
return packages
def list(self):
packages = {}
for category in self._categories:
packages[category] = {}
for pkg in self:
if pkg.PN not in packages[pkg.CAT]:
packages[pkg.CAT][pkg.PN] = []
packages[pkg.CAT][pkg.PN].append(pkg.PV)
packages[pkg.CAT][pkg.PN].sort(key=cmp_to_key(vercmp))
return packages
def get(self, p):
for pkg in self:
if pkg.P == p:
return pkg
return None
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("auth", "0006_require_contenttypes_0002")]
replaces = [("userprofile", "0001_initial")]
operations = [
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text=(
"Designates that this user has all permissions "
"without explicitly assigning them."
),
verbose_name="superuser status",
),
),
("email", models.EmailField(unique=True, max_length=254)),
(
"is_staff",
models.BooleanField(default=False, verbose_name="staff status"),
),
(
"is_active",
models.BooleanField(default=False, verbose_name="active"),
),
(
"password",
models.CharField(
verbose_name="password", max_length=128, editable=False
),
),
(
"date_joined",
models.DateTimeField(
default=django.utils.timezone.now,
verbose_name="date joined",
editable=False,
),
),
(
"last_login",
models.DateTimeField(
default=django.utils.timezone.now,
verbose_name="last login",
editable=False,
),
),
],
options={"db_table": "userprofile_user", "abstract": False},
),
migrations.CreateModel(
name="Address",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"first_name",
models.CharField(max_length=256, verbose_name="first name"),
),
(
"last_name",
models.CharField(max_length=256, verbose_name="last name"),
),
(
"company_name",
models.CharField(
max_length=256,
verbose_name="company or organization",
blank=True,
),
),
(
"street_address_1",
models.CharField(max_length=256, verbose_name="address"),
),
(
"street_address_2",
models.CharField(
max_length=256, verbose_name="address", blank=True
),
),
("city", models.CharField(max_length=256, verbose_name="city")),
(
"postal_code",
models.CharField(max_length=20, verbose_name="postal code"),
),
(
"country",
models.CharField(
max_length=2,
verbose_name="country",
choices=[
("AF", "Afghanistan"),
("AX", "\xc5land Islands"),
("AL", "Albania"),
("DZ", "Algeria"),
("AS", "American Samoa"),
("AD", "Andorra"),
("AO", "Angola"),
("AI", "Anguilla"),
("AQ", "Antarctica"),
("AG", "Antigua And Barbuda"),
("AR", "Argentina"),
("AM", "Armenia"),
("AW", "Aruba"),
("AU", "Australia"),
("AT", "Austria"),
("AZ", "Azerbaijan"),
("BS", "Bahamas"),
("BH", "Bahrain"),
("BD", "Bangladesh"),
("BB", "Barbados"),
("BY", "Belarus"),
("BE", "Belgium"),
("BZ", "Belize"),
("BJ", "Benin"),
("BM", "Bermuda"),
("BT", "Bhutan"),
("BO", "Bolivia"),
("BQ", "Bonaire, Saint Eustatius And Saba"),
("BA", "Bosnia And Herzegovina"),
("BW", "Botswana"),
("BV", "Bouvet Island"),
("BR", "Brazil"),
("IO", "British Indian Ocean Territory"),
("BN", "Brunei Darussalam"),
("BG", "Bulgaria"),
("BF", "Burkina Faso"),
("BI", "Burundi"),
("KH", "Cambodia"),
("CM", "Cameroon"),
("CA", "Canada"),
("CV", "Cape Verde"),
("KY", "Cayman Islands"),
("CF", "Central African Republic"),
("TD", "Chad"),
("CL", "Chile"),
("CN", "China"),
("CX", "Christmas Island"),
("CC", "Cocos (Keeling) Islands"),
("CO", "Colombia"),
("KM", "Comoros"),
("CG", "Congo"),
("CD", "Congo, The Democratic Republic of the"),
("CK", "Cook Islands"),
("CR", "Costa Rica"),
("CI", "C\xf4te D'Ivoire"),
("HR", "Croatia"),
("CU", "Cuba"),
("CW", "Cura\xe7o"),
("CY", "Cyprus"),
("CZ", "Czech Republic"),
("DK", "Denmark"),
("DJ", "Djibouti"),
("DM", "Dominica"),
("DO", "Dominican Republic"),
("EC", "Ecuador"),
("EG", "Egypt"),
("SV", "El Salvador"),
("GQ", "Equatorial Guinea"),
("ER", "Eritrea"),
("EE", "Estonia"),
("ET", "Ethiopia"),
("FK", "Falkland Islands (Malvinas)"),
("FO", "Faroe Islands"),
("FJ", "Fiji"),
("FI", "Finland"),
("FR", "France"),
("GF", "French Guiana"),
("PF", "French Polynesia"),
("TF", "French Southern Territories"),
("GA", "Gabon"),
("GM", "Gambia"),
("GE", "Georgia"),
("DE", "Germany"),
("GH", "Ghana"),
("GI", "Gibraltar"),
("GR", "Greece"),
("GL", "Greenland"),
("GD", "Grenada"),
("GP", "Guadeloupe"),
("GU", "Guam"),
("GT", "Guatemala"),
("GG", "Guernsey"),
("GN", "Guinea"),
("GW", "Guinea-Bissau"),
("GY", "Guyana"),
("HT", "Haiti"),
("HM", "Heard Island And Mcdonald Islands"),
("VA", "Holy See (Vatican City State)"),
("HN", "Honduras"),
("HK", "Hong Kong"),
("HU", "Hungary"),
("IS", "Iceland"),
("IN", "India"),
("ID", "Indonesia"),
("IR", "Iran, Islamic Republic of"),
("IQ", "Iraq"),
("IE", "Ireland"),
("IM", "Isle of Man"),
("IL", "Israel"),
("IT", "Italy"),
("JM", "Jamaica"),
("JP", "Japan"),
("JE", "Jersey"),
("JO", "Jordan"),
("KZ", "Kazakhstan"),
("KE", "Kenya"),
("KI", "Kiribati"),
("KP", "Korea, Democratic People's Republic of"),
("KR", "Korea, Republic of"),
("KW", "Kuwait"),
("KG", "Kyrgyzstan"),
("LA", "Lao People's Democratic Republic"),
("LV", "Latvia"),
("LB", "Lebanon"),
("LS", "Lesotho"),
("LR", "Liberia"),
("LY", "Libya"),
("LI", "Liechtenstein"),
("LT", "Lithuania"),
("LU", "Luxembourg"),
("MO", "Macao"),
("MK", "Macedonia, The Former Yugoslav Republic of"),
("MG", "Madagascar"),
("MW", "Malawi"),
("MY", "Malaysia"),
("MV", "Maldives"),
("ML", "Mali"),
("MT", "Malta"),
("MH", "Marshall Islands"),
("MQ", "Martinique"),
("MR", "Mauritania"),
("MU", "Mauritius"),
("YT", "Mayotte"),
("MX", "Mexico"),
("FM", "Micronesia, Federated States of"),
("MD", "Moldova, Republic of"),
("MC", "Monaco"),
("MN", "Mongolia"),
("ME", "Montenegro"),
("MS", "Montserrat"),
("MA", "Morocco"),
("MZ", "Mozambique"),
("MM", "Myanmar"),
("NA", "Namibia"),
("NR", "Nauru"),
("NP", "Nepal"),
("NL", "Netherlands"),
("NC", "New Caledonia"),
("NZ", "New Zealand"),
("NI", "Nicaragua"),
("NE", "Niger"),
("NG", "Nigeria"),
("NU", "Niue"),
("NF", "Norfolk Island"),
("MP", "Northern Mariana Islands"),
("NO", "Norway"),
("OM", "Oman"),
("PK", "Pakistan"),
("PW", "Palau"),
("PS", "Palestinian Territory, Occupied"),
("PA", "Panama"),
("PG", "Papua New Guinea"),
("PY", "Paraguay"),
("PE", "Peru"),
("PH", "Philippines"),
("PN", "Pitcairn"),
("PL", "Poland"),
("PT", "Portugal"),
("PR", "Puerto Rico"),
("QA", "Qatar"),
("RE", "R\xe9union"),
("RO", "Romania"),
("RU", "Russian Federation"),
("RW", "Rwanda"),
("BL", "Saint Barth\xe9lemy"),
("SH", "Saint Helena, Ascension And Tristan Da Cunha"),
("KN", "Saint Kitts And Nevis"),
("LC", "Saint Lucia"),
("MF", "Saint Martin (French Part)"),
("PM", "Saint Pierre And Miquelon"),
("VC", "Saint Vincent And the Grenadines"),
("WS", "Samoa"),
("SM", "San Marino"),
("ST", "Sao Tome And Principe"),
("SA", "Saudi Arabia"),
("SN", "Senegal"),
("RS", "Serbia"),
("SC", "Seychelles"),
("SL", "Sierra Leone"),
("SG", "Singapore"),
("SX", "Sint Maarten (Dutch Part)"),
("SK", "Slovakia"),
("SI", "Slovenia"),
("SB", "Solomon Islands"),
("SO", "Somalia"),
("ZA", "South Africa"),
("GS", "South Georgia and the South Sandwich Islands"),
("ES", "Spain"),
("LK", "Sri Lanka"),
("SD", "Sudan"),
("SR", "Suriname"),
("SJ", "Svalbard and Jan Mayen"),
("SZ", "Swaziland"),
("SE", "Sweden"),
("CH", "Switzerland"),
("SY", "Syria"),
("TW", "Taiwan"),
("TJ", "Tajikistan"),
("TZ", "Tanzania"),
("TH", "Thailand"),
("TL", "Timor-Leste"),
("TG", "Togo"),
("TK", "Tokelau"),
("TO", "Tonga"),
("TT", "Trinidad And Tobago"),
("TN", "Tunisia"),
("TR", "Turkey"),
("TM", "Turkmenistan"),
("TC", "Turks And Caicos Islands"),
("TV", "Tuvalu"),
("UG", "Uganda"),
("UA", "Ukraine"),
("AE", "United Arab Emirates"),
("GB", "United Kingdom"),
("US", "United States"),
("UM", "United States Minor Outlying Islands"),
("UY", "Uruguay"),
("UZ", "Uzbekistan"),
("VU", "Vanuatu"),
("VE", "Venezuela"),
("VN", "Viet Nam"),
("VG", "Virgin Islands, British"),
("VI", "Virgin Islands, U.S."),
("WF", "Wallis And Futuna"),
("EH", "Western Sahara"),
("YE", "Yemen"),
("ZM", "Zambia"),
("ZW", "Zimbabwe"),
],
),
),
(
"country_area",
models.CharField(
max_length=128, verbose_name="state or province", blank=True
),
),
(
"phone",
models.CharField(
max_length=30, verbose_name="phone number", blank=True
),
),
],
options={"db_table": "userprofile_address"},
),
migrations.AddField(
model_name="user",
name="addresses",
field=models.ManyToManyField(to="account.Address"),
),
migrations.AddField(
model_name="user",
name="default_billing_address",
field=models.ForeignKey(
related_name="+",
on_delete=django.db.models.deletion.SET_NULL,
verbose_name="default billing address",
blank=True,
to="account.Address",
null=True,
),
),
migrations.AddField(
model_name="user",
name="default_shipping_address",
field=models.ForeignKey(
related_name="+",
on_delete=django.db.models.deletion.SET_NULL,
verbose_name="default shipping address",
blank=True,
to="account.Address",
null=True,
),
),
migrations.AddField(
model_name="user",
name="groups",
field=models.ManyToManyField(
related_query_name="user",
related_name="user_set",
to="auth.Group",
blank=True,
help_text=(
"The groups this user belongs to. "
"A user will get all permissions granted to each of their groups."
),
verbose_name="groups",
),
),
migrations.AddField(
model_name="user",
name="user_permissions",
field=models.ManyToManyField(
related_query_name="user",
related_name="user_set",
to="auth.Permission",
blank=True,
help_text="Specific permissions for this user.",
verbose_name="user permissions",
),
),
]
|
#!/usr/bin/python
import threading
import time
import logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(levelname)s |%(name)s| %(message)s',
filename='/tmp/mygoclient.log',
filemode='a',
)
logger = logging.getLogger(__name__)
class InputThread(threading.Thread):
'''
Basic input thread
@in: go client instance, within socket module
@in: stop_event, a Thread.Event which supports set() & isSet()
'''
def __init__(self, go_client, stop_event):
self.go_client = go_client
self.sock = go_client.sock
self.stop = stop_event
threading.Thread.__init__(self)
def run(self):
pass
def task(self):
raise NotImplemented
class TestInput(InputThread):
'''
input source for command testing
@in: go client instance, within socket module
@in: stop_event, a Thread.Event which supports set() & isSet()
@in: freq, how often it will fire in seconds
'''
def __init__(self, go_client, stop_event, freq=5):
self.freq = freq
InputThread.__init__(self, go_client, stop_event)
def run(self):
while not self.stop.isSet():
self.task()
time.sleep(self.freq)
def task(self):
logger.debug("who command fired")
self.sock.buffer = "who"
class RawInput(TestInput):
'''
Use this module for interactive tracking
'''
def __init__(self, go_client, stop_event, freq=1):
TestInput.__init__(self, go_client, stop_event, freq)
def task(self):
send_data = raw_input()
logger.debug("raw input received")
if send_data:
send_data = send_data.splitlines()[0]
self.sock.buffer = send_data
|
# coding=gbk
import re
import string,urlparse
import os.path as osp
nums = string.digits
# Çå³ýhtml´úÂëÀïµÄ¶àÓà¿Õ¸ñ
def clearBlank(html):
if not html or html == None : return ;
html = re.sub('\r|\n|\t','',html)
html = html.replace(' ','').replace(' ','').replace('\'','"')
return html
def clearInfo(html):
if not html or html == None : return ;
html = re.sub('´òµç»°¸øÎÒʱ£¬ÇëÒ»¶¨ËµÃ÷ÔÚ.*?Íø¿´µ½µÄ£¬Ð»Ð»£¡|·¢²¼ÈÕÆÚ£º.*?<br />|<a .*?>|\[ºô½Ð\]|</a>|<p .*?>','',html).replace('°ÙÐÕ','¿ìËÙ×âÁÞÍø')
return html
# html´úÂë½ØÈ¡º¯Êý
def rects(html,regx,cls=''):
if not html or html == None or len(html)==0 : return ;
# ÕýÔò±í´ïʽ½ØÈ¡
if regx[:1]==chr(40) and regx[-1:]==chr(41) :
reHTML = re.search(regx,html,re.I)
if reHTML == None : return
reHTML = reHTML.group()
intRegx = re.search(regx,reHTML,re.I)
R = reHTML[intRegx]
# ×Ö·û´®½ØÈ¡
else :
# È¡µÃ×Ö·û´®µÄλÖÃ
pattern =re.compile(regx.lower())
intRegx=pattern.findall(html.lower())
# Èç¹ûËÑË÷²»µ½¿ªÊ¼×Ö·û´®£¬ÔòÖ±½Ó·µ»Ø¿Õ
if not intRegx : return
R = intRegx
# ÇåÀíÄÚÈÝ
if cls:
RC = []
for item in R:
RC.append(resub(item,cls))
return RC
else:
return R
def rect(html,regx,cls=''):
#regx = regx.encode('utf-8')
if not html or html == None or len(html)==0 : return ;
# ÕýÔò±í´ïʽ½ØÈ¡
if regx[:1]==chr(40) and regx[-1:]==chr(41) :
reHTML = re.search(regx,html,re.I)
if reHTML == None : return
reHTML = reHTML.group()
intRegx = re.search(regx,reHTML,re.I)
R = reHTML[intRegx]
# ×Ö·û´®½ØÈ¡
else :
# È¡µÃ×Ö·û´®µÄλÖÃ
pattern =re.compile(regx.lower())
intRegx=pattern.findall(html)
# Èç¹ûËÑË÷²»µ½¿ªÊ¼×Ö·û´®£¬ÔòÖ±½Ó·µ»Ø¿Õ
if not intRegx : return
R = intRegx[0]
if cls:
R = resub(R,cls)
# ·µ»Ø½ØÈ¡µÄ×Ö·û
return R
# ÕýÔòÇå³ý
def resub(html,regexs):
if not regexs: return html
html =re.sub(regexs,'',html)
return html
def rereplace(html,regexs):
if not regexs: return html
html =html.repalce(regexs,'')
return html
#Ìø×ªµç»°URL
def telPageReplace(url):
telUrl=url.split('/')
finalUrl="phone_%s" % telUrl[len(telUrl)-1]
return url.replace(telUrl[len(telUrl)-1],finalUrl)
#ÅжÏÊý×Ö
def check(a):
if type(a) is not str:
return False
else:
for i in a:
if i not in nums:
return False
return True
#Åжϵ绰
def parseNum(a):
strs=''
if type(a) is not str:
return 0
else:
for i in a:
if i in nums or i == '.':
strs +=i
return strs
def reTel(str,regx):
#regx = '((13[0-9]|15[0-9]|18[89])\\d{8})'
p = re.compile(regx)
#print p
if p.findall(str):
return p.findall(str)[0]
else:
regx = '((13[0-9]|15[0-9]|18[89])\d{8})'
#regx = '(13[0-9]|15[0-9]|18[89])\d{8}'
res = re.search(regx,str).group()
if res:
return res
else:
return ''
def matchURL(tag,url):
print tag
print url
urls = re.findall('(.*)(src|href)=(.+?)( |/>|>).*|(.*)url\(([^\)]+)\)',tag,re.I)
if urls == None :
return tag
else :
if urls[0][5] == '' :
urlQuote = urls[0][2]
else:
urlQuote = urls[0][5]
if len(urlQuote) > 0 :
cUrl = re.sub('''['"]''','',urlQuote)
else :
return tag
urls = urlparse(url); scheme = urls[0];
if scheme!='' : scheme+='://'
host = urls[1]; host = scheme + host
if len(host)==0 : return tag
path = osp.dirname(urls[2]);
if path=='/' : path = '';
if cUrl.find("#")!=-1 : cUrl = cUrl[:cUrl.find("#")]
# ÅжÏÀàÐÍ
if re.search('''^(http|https|ftp):(//|\\\\)(([\w/\\\+\-~`@:%])+\.)+([\w/\\\.\=\?\+\-~`@':!%#]|(&)|&)+''',cUrl,re.I) != None :
# http¿ªÍ·µÄurlÀàÐÍÒªÌø¹ý
return tag
elif cUrl[:1] == '/' :
# ¾ø¶Ô·¾¶
cUrl = host + cUrl
elif cUrl[:3]=='../' :
# Ïà¶Ô·¾¶
while cUrl[:3]=='../' :
cUrl = cUrl[3:]
if len(path) > 0 :
path = osp.dirname(path)
elif cUrl[:2]=='./' :
cUrl = host + path + cUrl[1:]
elif cUrl.lower()[:7]=='mailto:' or cUrl.lower()[:11]=='javascript:' :
return tag
else :
cUrl = host + path + '/' + cUrl
R = tag.replace(urlQuote,'"' + cUrl + '"')
return R
def urlencode(str) :
str=str.decode('utf-8').encode('utf-8')
reprStr = repr(str).replace(r'\x', '%')
return reprStr[1:-1]
|
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import time
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
import tensorflow as tf
import load_datasets as ld
import datetime as dt
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.05, 'Initial learning rate')
flags.DEFINE_float('learning_rate_decay', 0.1, 'Learning rate decay, i.e. the fraction of the initial learning rate at the end of training')
flags.DEFINE_integer('max_steps', 1000, 'Number of steps to run trainer')
flags.DEFINE_float('max_loss', 0.01, 'Maximally acceptable validation MSE')
flags.DEFINE_integer('batch_size', 64*193, 'Batch size. Divides evenly into the dataset size of 193')
flags.DEFINE_integer('hidden1', 35, 'Size of the first hidden layer')
flags.DEFINE_integer('hidden2', 10, 'Size of the second hidden layer')
flags.DEFINE_integer('output_vars', 2, 'Size of the output layer')
flags.DEFINE_integer('input_vars', 6, 'Size of the input layer')
#flags.DEFINE_string('train_dir', './data/', 'Directory to put the training data') # not currently used
flags.DEFINE_string('checkpoints_dir', './checkpoints/two-layer/'+dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'Directory to store checkpoints')
flags.DEFINE_string('summaries_dir','./logs/two-layer/'+dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),'Summaries directory')
def fill_feed_dict(data_set, inputs_pl, outputs_pl, train):
"""
Returns feed dictionary for TF.
data_set -- dataset
inputs_pl -- TF placeholder for inputs
outputs_pl -- TF placeholder for outputs
train -- if TRUE, then return DS in batches for training. Otherwise, return the complete DS for validation/testing
"""
if train:
batch_size = FLAGS.batch_size
else:
batch_size = 0
inputs, outputs = data_set.next_batch(batch_size = batch_size)
feed_dict = {
inputs_pl: inputs,
outputs_pl: outputs
}
return feed_dict
def weight_variable(shape):
"""
Returns TF weight variable with given shape. The weights are normally distributed with mean = 0, stddev = 0.1
shape -- shape of the variable, i.e. [4,5] matrix of 4x5
"""
initial = tf.truncated_normal(shape, stddev = 0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""
Returns TF bias variable with given shape. The biases are initially at 0.1
shape -- shape of the variable, i.e. [4] -- vector of length 4
"""
initial = tf.constant(0.1, shape = shape)
return tf.Variable(initial)
def variable_summaries(var, name):
"""
Adds multiple summaries (statistics) for a TF variable
var -- TF variable
name -- variable name
"""
mean = tf.reduce_mean(var)
tf.scalar_summary(name+'/mean', mean)
stddev = tf.reduce_mean(tf.reduce_sum(tf.square(var-mean)))
tf.scalar_summary(name+'/stddev', stddev)
_min = tf.reduce_min(var)
#tf.scalar_summary(name+'/min', _min)
_max = tf.reduce_max(var)
#tf.scalar_summary(name+'/max', _max)
tf.histogram_summary(name, var)
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act = tf.tanh):
"""
Creates and returns NN layer
input_tensor -- TF tensor at layer input
input_dim -- size of layer input
output_dim -- size of layer output
layer_name -- name of the layer for summaries (statistics)
act -- nonlinear activation function
"""
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim])
variable_summaries(weights, layer_name+'/weights')
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases, layer_name+'/biases')
with tf.name_scope('WX_plus_b'):
preactivate = tf.matmul(input_tensor, weights)+biases
tf.histogram_summary(layer_name+'/pre_activations', preactivate)
if act is not None:
activations = act(preactivate, 'activation')
else:
activations = preactivate
tf.histogram_summary(layer_name+'/activations', activations)
return activations
def run_training():
"""
Creates a NN and runs its training/running
"""
train_dataset, valid_dataset, test_dataset = ld.read_data_sets()
with tf.Graph().as_default():
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, FLAGS.input_vars], name='x-input')
y_ = tf.placeholder(tf.float32, [None, FLAGS.output_vars], name = 'y-input')
hidden_1 = nn_layer(x, FLAGS.input_vars, FLAGS.hidden1, 'layer1')
hidden_2 = nn_layer(hidden_1, FLAGS.hidden1, FLAGS.hidden2, 'layer2')
train_prediction = nn_layer(hidden_2, FLAGS.hidden2, FLAGS.output_vars, 'output', act = None)
with tf.name_scope('MSE'):
prediction_diff = train_prediction-y_
MSE = tf.cast(tf.reduce_mean(tf.reduce_mean(tf.square(prediction_diff))),tf.float32)
tf.scalar_summary('MSE', MSE)
with tf.name_scope('train'):
global_step = tf.Variable(0.00, trainable=False)
learning_rate = tf.train.exponential_decay(FLAGS.learning_rate,
global_step, FLAGS.max_steps,
FLAGS.learning_rate_decay, staircase=False)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
#optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(loss, global_step=global_step)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
MSE, global_step=global_step)
merged = tf.merge_all_summaries()
init = tf.initialize_all_variables()
saver = tf.train.Saver()
sess = tf.Session()
train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir+'/train', sess.graph)
test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir+'/validation', sess.graph)
sess.run(init)
#for step in xrange(FLAGS.max_steps):
train_loss = 1.0
valid_loss = 1.0
step = 0
while valid_loss > FLAGS.max_loss and step < FLAGS.max_steps:
start_time = time.time()
if step%10 != 0:
# regular training
feed_dict = fill_feed_dict(train_dataset, x, y_, train = True)
_, train_loss, lr, summary = sess.run([optimizer, MSE, learning_rate, merged], feed_dict=feed_dict)
train_writer.add_summary(summary,step)
else:
# check model fit
feed_dict = fill_feed_dict(valid_dataset, x, y_, train = False)
valid_loss, summary = sess.run([MSE, merged], feed_dict = feed_dict)
test_writer.add_summary(summary,step)
duration = time.time()-start_time
print('Step %d (%d op/sec): Training MSE: %.5f, Validation MSE: %.5f' % (step, 1/duration, train_loss, valid_loss))
step+=1
feed_dict = fill_feed_dict(test_dataset, x, y_, train = False)
test_loss, summary = sess.run([MSE, merged], feed_dict = feed_dict)
print('Test MSE: %.5f' % (test_loss))
#predicted_vs_actual = np.hstack((test_prediction.eval(session = sess), test_dataset.outputs))
#print("correlation coefficients: ")
#print(np.corrcoef(predicted_vs_actual[:,0],predicted_vs_actual[:,2]))
#print(np.corrcoef(predicted_vs_actual[:,1],predicted_vs_actual[:,3]))
sess.close()
def main(argv):
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
if tf.gfile.Exists(FLAGS.checkpoints_dir):
tf.gfile.DeleteRecursively(FLAGS.checkpoints_dir)
tf.gfile.MakeDirs(FLAGS.checkpoints_dir)
run_training()
if __name__ == "__main__":
main(sys.argv)
|
import logging
import logging.config
import sys
from flask import Flask,render_template
from werkzeug.contrib.fixers import ProxyFix
from datetime import datetime
from apis import api, db
import os
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
DATE_FORMAT="%Y-%m-%d %H:%M:%S"
FORMAT = '%(asctime)s - %(filename)s - %(levelname)s:%(lineno)d: %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,format=FORMAT,datefmt=DATE_FORMAT)
LOG = logging.getLogger('app')
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
database_url = str(os.environ['DATABASE_URL'])
database_url.replace("postgre", "postgresql")
app.config['SQLALCHEMY_DATABASE_URI'] = database_url
app.logger_name = "flask.app"
api.init_app(app)
db.init_app(app)
@app.after_request
def after_request(response):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'Content-Type,Authorization'
response.headers['Access-Control-Allow-Methods'] = 'GET,PUT,POST,DELETE'
response.headers['Last-Modified'] = datetime.now()
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
app.wsgi_app = ProxyFix(app.wsgi_app)
@app.route('/todo')
def index():
LOG.info("Rendering Template")
return render_template('index.html')
#Create schema for database
with app.app_context():
db.create_all()
if __name__ == '__main__':
app.run(debug=True)
|
import sys
import os
from PySide import QtCore, QtGui
from P4 import P4, P4Exception
# http://stackoverflow.com/questions/32229314/pyqt-how-can-i-set-row-heights-of-qtreeview
class TreeItem(object):
def __init__(self, data, parent=None):
self.parentItem = parent
self.data = data
self.childItems = []
def appendChild(self, item):
self.childItems.append(item)
def popChild(self):
if self.childItems:
self.childItems.pop()
def row(self):
if self.parentItem:
return self.parentItem.childItems.index(self)
return 0
def reconnect():
p4.disconnect()
p4.connect()
p4.password = "contact_dev"
p4.run_login()
def epochToTimeStr(time):
import datetime
return datetime.datetime.utcfromtimestamp(int(time)).strftime("%d/%m/%Y %H:%M:%S")
def perforceListDir(p4path):
result = []
if p4path[-1] == '/' or p4path[-1] == '\\':
p4path = p4path[:-1]
path = "{0}/{1}".format(p4path, '*')
isDepotPath = p4path.startswith("//depot")
dirs = []
files = []
# Dir silently does nothing if there are no dirs
try:
dirs = p4.run_dirs(path)
except P4Exception:
pass
# Files will return an exception if there are no files in the dir
# Stupid inconsistency imo
try:
if isDepotPath:
files = p4.run_files(path)
else:
tmp = p4.run_have(path)
for fileItem in tmp:
files += p4.run_fstat(fileItem['clientFile'])
except P4Exception:
pass
result = []
for dir in dirs:
if isDepotPath:
dirName = dir['dir'][8:]
else:
dirName = dir['dir']
tmp = {'name': os.path.basename(dirName),
'path': dir['dir'],
'time': '',
'type': 'Folder',
'change': ''
}
result.append(tmp)
for fileItem in files:
if isDepotPath:
deleteTest = p4.run("filelog", "-t", fileItem['depotFile'])[0]
isDeleted = deleteTest['action'][0] == "delete"
fileType = fileItem['type']
if isDeleted:
fileType = "{0} [Deleted]".format(fileType)
# Remove //depot/ from the path for the 'pretty' name
tmp = {'name': os.path.basename(fileItem['depotFile'][8:]),
'path': fileItem['depotFile'],
'time': epochToTimeStr(fileItem['time']),
'type': fileType,
'change': fileItem['change']
}
result.append(tmp)
else:
deleteTest = p4.run("filelog", "-t", fileItem['clientFile'])[0]
isDeleted = deleteTest['action'][0] == "delete"
fileType = fileItem['headType']
if isDeleted:
fileType = "{0} [Deleted]".format(fileType)
tmp = {'name': os.path.basename(fileItem['clientFile']),
'path': fileItem['clientFile'],
'time': epochToTimeStr(fileItem['headModTime']),
'type': fileType,
'change': fileItem['headChange']
}
result.append(tmp)
return sorted(result, key=lambda k: k['name'])
def perforceIsDir(p4path):
try:
if p4path[-1] == '/' or p4path[-1] == '\\':
p4path = p4path[:-1]
result = p4.run_dirs(p4path)
return len(result) > 0
except P4Exception as e:
print e
return False
def p4Filelist(dir, findDeleted=False):
p4path = '/'.join([dir, '*'])
try:
files = p4.run_filelog("-t", p4path)
except P4Exception as e:
print e
return []
results = []
for x in files:
latestRevision = x.revisions[0]
print latestRevision.action, latestRevision.depotFile
if not findDeleted and latestRevision.action == 'delete':
continue
else:
results.append({'name': latestRevision.depotFile,
'action': latestRevision.action,
'change': latestRevision.change,
'time': latestRevision.time,
'type': latestRevision.type
}
)
filesInCurrentChange = p4.run_opened(p4path)
for x in filesInCurrentChange:
print x
results.append({'name': x['clientFile'],
'action': x['action'],
'change': x['change'],
'time': "",
'type': x['type']
}
)
return results
class TreeModel(QtCore.QAbstractItemModel):
def __init__(self, parent=None):
super(TreeModel, self).__init__(parent)
self.rootItem = TreeItem(None)
self.showDeleted = False
def populate(self, rootdir="//depot", findDeleted=False):
self.rootItem = TreeItem(None)
self.showDeleted = findDeleted
depotPath = False
if "depot" in rootdir:
depotPath = True
p4path = '/'.join([rootdir, '*'])
if depotPath:
dirs = p4.run_dirs(p4path)
else:
dirs = p4.run_dirs('-H', p4path)
for dir in dirs:
dirName = os.path.basename(dir['dir'])
# subDir = '/'.join( [rootdir, dirName )] )
data = [dirName, "Folder", "", "", ""]
treeItem = TreeItem(data, self.rootItem)
self.rootItem.appendChild(treeItem)
treeItem.appendChild(None)
files = p4Filelist(dir['dir'], findDeleted)
for f in files:
fileName = os.path.basename(f['name'])
data = [fileName, f['type'], f[
'time'], f['action'], f['change']]
fileItem = TreeItem(data, treeItem)
treeItem.appendChild(fileItem)
# def populate(self, rootdir):
# rootdir = rootdir.replace('\\', '/')
# print "Scanning subfolders in {0}...".format(rootdir)
# import maya.cmds as cmds
# cmds.refresh()
# def scanDirectoryPerforce(root, treeItem):
# change = p4.run_opened()
# for item in perforceListDir(root):
# itemPath = "{0}/{1}".format(root, item['name'] ) # os.path.join(root, item)
# print "{0}{1}{2}".format( "".join(["\t" for i in range(depth)]), '+'
# if perforceIsDir(itemPath) else '-', item['name'] )
# data = [ item['name'], item['type'], item['time'], item['change'] ]
# childDir = TreeItem( data, treeItem)
# treeItem.appendChild( childDir )
# tmpDir = TreeItem( [ "TMP", "", "", "" ], childDir )
# childDir.appendChild( None )
# print itemPath, perforceIsDir( itemPath )
# if perforceIsDir( itemPath ):
# scanDirectoryPerforce(itemPath, childDir)
# def scanDirectory(root, treeItem):
# for item in os.listdir(root):
# itemPath = os.path.join(root, item)
# print "{0}{1}{2}".format( "".join(["\t" for i in range(depth)]), '+' if os.path.isdir(itemPath) else '-', item)
# childDir = TreeItem( [item], treeItem)
# treeItem.appendChild( childDir )
# if os.path.isdir( itemPath ):
# scanDirectory(itemPath, childDir)
# scanDirectoryPerforce(rootdir, self.rootItem )
# print dirName
# directory = "{0}:{1}".format(i, os.path.basename(dirName))
# childDir = TreeItem( [directory], self.rootItem)
# self.rootItem.appendChild( childDir )
# for fname in fileList:
# childFile = TreeItem(fname, childDir)
# childDir.appendChild([childFile])
# for i,c in enumerate("abcdefg"):
# child = TreeItem([i],self.rootItem)
# self.rootItem.appendChild(child)
def columnCount(self, parent):
return 5
def data(self, index, role):
column = index.column()
if not index.isValid():
return None
if role == QtCore.Qt.DisplayRole:
item = index.internalPointer()
return item.data[column]
elif role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(20, 20)
elif role == QtCore.Qt.DecorationRole:
if column == 1:
itemType = index.internalPointer().data[column]
isDeleted = index.internalPointer().data[3] == 'delete'
if isDeleted:
return QtGui.QIcon(r"/home/i7245143/src/MayaPerforce/Perforce/images/File0104.png")
if itemType == "Folder":
return QtGui.QIcon(r"/home/i7245143/src/MayaPerforce/Perforce/images/File0059.png")
elif "binary" in itemType:
return QtGui.QIcon(r"/home/i7245143/src/MayaPerforce/Perforce/images/File0315.png")
elif "text" in itemType:
return QtGui.QIcon(r"/home/i7245143/src/MayaPerforce/Perforce/images/File0027.png")
else:
return QtGui.QIcon(r"/home/i7245143/src/MayaPerforce/Perforce/images/File0106.png")
icon = QtGui.QFileIconProvider(QtGui.QFileIconProvider.Folder)
return icon
else:
return None
return None
def flags(self, index):
if not index.isValid():
return QtCore.Qt.NoItemFlags
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def headerData(self, section, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return ["Filename", "Type", "Modification Time", "Action", "Change"][section]
return None
def index(self, row, column, parent):
if not self.hasIndex(row, column, parent):
return QtCore.QModelIndex()
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
childItem = parentItem.childItems[row]
if childItem:
return self.createIndex(row, column, childItem)
else:
return QtCore.QModelIndex()
def parent(self, index):
if not index.isValid():
return QtCore.QModelIndex()
parentItem = index.internalPointer().parentItem
if parentItem == self.rootItem:
return QtCore.QModelIndex()
return self.createIndex(parentItem.row(), 0, parentItem)
def rootrowcount(self):
return len(self.rootItem.childItems)
def rowCount(self, parent):
if parent.column() > 0:
return 0
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
return len(parentItem.childItems)
# allFiles = p4.run_files("//depot/...")
# hiddenFiles = p4.run_files("//depot/.../.*")
# testData = [['assets', '.place-holder'], ['assets', 'heroTV', 'lookDev', 'heroTV_lookDev.ma'], ['assets', 'heroTV', 'lookDev', 'heroTv_lookdev.ma'], ['assets', 'heroTV', 'modelling', '.place-holder'], ['assets', 'heroTV', 'modelling', 'Old_TV.obj'], ['assets', 'heroTV', 'modelling', 'heroTv_wip.ma'], ['assets', 'heroTV', 'rigging', '.place-holder'], ['assets', 'heroTV', 'texturing', '.place-holder'], ['assets', 'heroTV', 'workspace.mel'], ['assets', 'lookDevSourceimages', 'Garage.EXR'], ['assets', 'lookDevSourceimages', 'UVtile.jpg'], ['assets', 'lookDevSourceimages', 'macbeth_background.jpg'], ['assets', 'lookDevTemplate.ma'], ['assets', 'previs_WIP.ma'], ['assets', 'previs_slapcomp_WIP.ma'], ['audio', '.place-holder'], ['finalEdit', 'delivery', '.place-holder'], ['finalEdit', 'projects', '.place-holder'], ['finalEdit', 'test'], ['finalEdit', 'test.ma'], ['shots', '.place-holder'], ['shots', 'space', 'space_sh_010', 'cg', 'maya', 'scenes', 'spc_sh_010_animBuild_WIP.ma']]
# result = {}
# files = [ item['depotFile'][8:].split('/') for item in allFiles ]
# for item in files:
# print item
# from collections import defaultdict
# deepestIndex, deepestPath = max(enumerate(files), key = lambda tup: len(tup[1]))
try:
print p4
except:
p4 = P4()
p4.user = "tminor"
p4.password = "contact_dev"
p4.port = "ssl:52.17.163.3:1666"
p4.connect()
p4.run_login()
reconnect()
# Iterate upwards until we have the full path to the node
def fullPath(idx):
result = [idx]
parent = idx.parent()
while True:
if not parent.isValid():
break
result.append(parent)
parent = parent.parent()
return list(reversed(result))
def populateSubDir(idx, root="//depot", findDeleted=False):
idxPathModel = fullPath(idx)
idxPathSubDirs = [idxPath.data() for idxPath in idxPathModel]
idxFullPath = os.path.join(*idxPathSubDirs)
if not idxFullPath:
idxFullPath = "."
# children = []
p4path = '/'.join([root, idxFullPath, '*'])
depotPath = False
if "depot" in root:
depotPath = True
if depotPath:
p4subdirs = p4.run_dirs(p4path)
else:
p4subdirs = p4.run_dirs('-H', p4path)
p4subdir_names = [child['dir'] for child in p4subdirs]
treeItem = idx.internalPointer()
# print idx.child(0,0).data(), p4subidrs
if not idx.child(0, 0).data() and p4subdirs:
# Pop empty "None" child
treeItem.popChild()
for p4child in p4subdir_names:
print p4child
data = [os.path.basename(p4child), "Folder", "", "", ""]
childData = TreeItem(data, treeItem)
treeItem.appendChild(childData)
childData.appendChild(None)
files = p4Filelist(p4child, findDeleted)
for f in files:
fileName = os.path.basename(f['name'])
data = [fileName, f['type'], f[
'time'], f['action'], f['change']]
fileData = TreeItem(data, childData)
childData.appendChild(fileData)
def tmp(*args):
idx = args[0]
children = []
i = 1
while True:
child = idx.child(i, 0)
print i, child.data()
if not child.isValid():
break
children.append(child)
i += 1
populateSubDir(child, findDeleted=False)
return
treeItem = idx.internalPointer()
idxPathModel = fullPath(idx, model.showDeleted)
idxPathSubDirs = [idxPath.data() for idxPath in idxPathModel]
idxFullPath = os.path.join(*idxPathSubDirs)
pathDepth = len(idxPathSubDirs)
children = []
p4path = "//{0}/{1}/*".format(p4.client, idxFullPath)
print p4path
p4children = p4.run_dirs("-H", p4path)
p4children_names = [child['dir'] for child in p4children]
if idx.child(0, 0).data() == "TMP":
for p4child in p4children_names:
data = [p4child, "", "", ""]
childData = TreeItem(data, idx)
treeItem.appendChild(childData)
i = 0
while True:
child = idx.child(i, 0)
if not child.isValid():
break
children.append(child)
i += 1
for child in children:
childIdx = child.internalPointer()
data = ["TEST", "TEST", "TEST", "TEST"]
childDir = TreeItem(data, childIdx)
childIdx.appendChild(childDir)
tmpDir = TreeItem(["TMP", "", "", "", ""], childDir)
childDir.appendChild(tmpDir)
# view.setModel(model)
view = QtGui.QTreeView()
view.expandAll()
view.setWindowTitle("Perforce Depot Files")
view.resize(512, 512)
view.expanded.connect(tmp)
model = TreeModel()
# model.populate("//{0}".format(p4.client), findDeleted=True)
model.populate("//depot", findDeleted=True)
view.setModel(model)
# populateSubDir( view.rootIndex() )
for i in range(model.rootrowcount()):
idx = model.index(i, 0, model.parent(QtCore.QModelIndex()))
treeItem = idx.internalPointer()
populateSubDir(idx)
# test = TreeItem( ["TEST", "", "", ""], treeItem )
# treeItem.appendChild( test )
view.setColumnWidth(0, 220)
view.setColumnWidth(1, 100)
view.setColumnWidth(2, 120)
view.setColumnWidth(3, 60)
view.show()
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ModelerGraphicItem.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtCore import Qt, QPointF, QRectF
from qgis.PyQt.QtGui import QIcon, QFont, QFontMetricsF, QPen, QBrush, QColor, QPolygonF
from qgis.PyQt.QtWidgets import QGraphicsItem, QMessageBox, QMenu
from processing.modeler.ModelerAlgorithm import ModelerParameter, Algorithm, ModelerOutput
from processing.modeler.ModelerParameterDefinitionDialog import ModelerParameterDefinitionDialog
from processing.modeler.ModelerParametersDialog import ModelerParametersDialog
pluginPath = os.path.split(os.path.dirname(__file__))[0]
class ModelerGraphicItem(QGraphicsItem):
BOX_HEIGHT = 30
BOX_WIDTH = 200
def __init__(self, element, model):
super(ModelerGraphicItem, self).__init__(None)
self.model = model
self.element = element
if isinstance(element, ModelerParameter):
icon = QIcon(os.path.join(pluginPath, 'images', 'input.png'))
self.pixmap = icon.pixmap(20, 20, state=QIcon.On)
self.text = element.param.description
elif isinstance(element, ModelerOutput):
# Output name
icon = QIcon(os.path.join(pluginPath, 'images', 'output.png'))
self.pixmap = icon.pixmap(20, 20, state=QIcon.On)
self.text = element.description
else:
self.text = element.description
self.pixmap = element.algorithm.getIcon().pixmap(15, 15)
self.arrows = []
self.setFlag(QGraphicsItem.ItemIsMovable, True)
self.setFlag(QGraphicsItem.ItemIsSelectable, True)
self.setFlag(QGraphicsItem.ItemSendsGeometryChanges, True)
self.setZValue(1000)
if not isinstance(element, ModelerOutput):
icon = QIcon(os.path.join(pluginPath, 'images', 'edit.png'))
pt = QPointF(ModelerGraphicItem.BOX_WIDTH / 2
- FlatButtonGraphicItem.WIDTH / 2,
ModelerGraphicItem.BOX_HEIGHT / 2
- FlatButtonGraphicItem.HEIGHT / 2 + 1)
self.editButton = FlatButtonGraphicItem(icon, pt, self.editElement)
self.editButton.setParentItem(self)
icon = QIcon(os.path.join(pluginPath, 'images', 'delete.png'))
pt = QPointF(ModelerGraphicItem.BOX_WIDTH / 2
- FlatButtonGraphicItem.WIDTH / 2,
- ModelerGraphicItem.BOX_HEIGHT / 2
+ FlatButtonGraphicItem.HEIGHT / 2 + 1)
self.deleteButton = FlatButtonGraphicItem(icon, pt,
self.removeElement)
self.deleteButton.setParentItem(self)
if isinstance(element, Algorithm):
alg = element.algorithm
if alg.parameters:
pt = self.getLinkPointForParameter(-1)
pt = QPointF(0, pt.y() + 2)
self.inButton = FoldButtonGraphicItem(pt, self.foldInput, self.element.paramsFolded)
self.inButton.setParentItem(self)
if alg.outputs:
pt = self.getLinkPointForOutput(-1)
pt = QPointF(0, pt.y() + 2)
self.outButton = FoldButtonGraphicItem(pt, self.foldOutput, self.element.outputsFolded)
self.outButton.setParentItem(self)
def foldInput(self, folded):
self.element.paramsFolded = folded
self.prepareGeometryChange()
if self.element.algorithm.outputs:
pt = self.getLinkPointForOutput(-1)
pt = QPointF(0, pt.y())
self.outButton.position = pt
for arrow in self.arrows:
arrow.updatePath()
self.update()
def foldOutput(self, folded):
self.element.outputsFolded = folded
self.prepareGeometryChange()
for arrow in self.arrows:
arrow.updatePath()
self.update()
def addArrow(self, arrow):
self.arrows.append(arrow)
def boundingRect(self):
font = QFont('Verdana', 8)
fm = QFontMetricsF(font)
unfolded = isinstance(self.element, Algorithm) and not self.element.paramsFolded
numParams = len(self.element.algorithm.parameters) if unfolded else 0
unfolded = isinstance(self.element, Algorithm) and not self.element.outputsFolded
numOutputs = len(self.element.algorithm.outputs) if unfolded else 0
hUp = fm.height() * 1.2 * (numParams + 2)
hDown = fm.height() * 1.2 * (numOutputs + 2)
rect = QRectF(-(ModelerGraphicItem.BOX_WIDTH + 2) / 2,
-(ModelerGraphicItem.BOX_HEIGHT + 2) / 2 - hUp,
ModelerGraphicItem.BOX_WIDTH + 2,
ModelerGraphicItem.BOX_HEIGHT + hDown + hUp)
return rect
def mouseDoubleClickEvent(self, event):
self.editElement()
def contextMenuEvent(self, event):
if isinstance(self.element, ModelerOutput):
return
popupmenu = QMenu()
removeAction = popupmenu.addAction('Remove')
removeAction.triggered.connect(self.removeElement)
editAction = popupmenu.addAction('Edit')
editAction.triggered.connect(self.editElement)
if isinstance(self.element, Algorithm):
if not self.element.active:
removeAction = popupmenu.addAction('Activate')
removeAction.triggered.connect(self.activateAlgorithm)
else:
deactivateAction = popupmenu.addAction('Deactivate')
deactivateAction.triggered.connect(self.deactivateAlgorithm)
popupmenu.exec_(event.screenPos())
def deactivateAlgorithm(self):
self.model.deactivateAlgorithm(self.element.name)
self.model.updateModelerView()
def activateAlgorithm(self):
if self.model.activateAlgorithm(self.element.name):
self.model.updateModelerView()
else:
QMessageBox.warning(None, 'Could not activate Algorithm',
'The selected algorithm depends on other currently non-active algorithms.\n'
'Activate them them before trying to activate it.')
def editElement(self):
if isinstance(self.element, ModelerParameter):
dlg = ModelerParameterDefinitionDialog(self.model,
param=self.element.param)
dlg.exec_()
if dlg.param is not None:
self.model.updateParameter(dlg.param)
self.element.param = dlg.param
self.text = dlg.param.description
self.update()
elif isinstance(self.element, Algorithm):
dlg = self.element.algorithm.getCustomModelerParametersDialog(self.model, self.element.name)
if not dlg:
dlg = ModelerParametersDialog(self.element.algorithm, self.model, self.element.name)
dlg.exec_()
if dlg.alg is not None:
dlg.alg.name = self.element.name
self.model.updateAlgorithm(dlg.alg)
self.model.updateModelerView()
def removeElement(self):
if isinstance(self.element, ModelerParameter):
if not self.model.removeParameter(self.element.param.name):
QMessageBox.warning(None, 'Could not remove element',
'Other elements depend on the selected one.\n'
'Remove them before trying to remove it.')
else:
self.model.updateModelerView()
elif isinstance(self.element, Algorithm):
if not self.model.removeAlgorithm(self.element.name):
QMessageBox.warning(None, 'Could not remove element',
'Other elements depend on the selected one.\n'
'Remove them before trying to remove it.')
else:
self.model.updateModelerView()
def getAdjustedText(self, text):
font = QFont('Verdana', 8)
fm = QFontMetricsF(font)
w = fm.width(text)
if w < self.BOX_WIDTH - 25 - FlatButtonGraphicItem.WIDTH:
return text
text = text[0:-3] + '...'
w = fm.width(text)
while w > self.BOX_WIDTH - 25 - FlatButtonGraphicItem.WIDTH:
text = text[0:-4] + '...'
w = fm.width(text)
return text
def paint(self, painter, option, widget=None):
rect = QRectF(-(ModelerGraphicItem.BOX_WIDTH + 2) / 2.0,
-(ModelerGraphicItem.BOX_HEIGHT + 2) / 2.0,
ModelerGraphicItem.BOX_WIDTH + 2,
ModelerGraphicItem.BOX_HEIGHT + 2)
painter.setPen(QPen(Qt.gray, 1))
color = QColor(125, 232, 232)
if isinstance(self.element, ModelerParameter):
color = QColor(179, 179, 255)
elif isinstance(self.element, Algorithm):
color = Qt.white
painter.setBrush(QBrush(color, Qt.SolidPattern))
painter.drawRect(rect)
font = QFont('Verdana', 8)
painter.setFont(font)
painter.setPen(QPen(Qt.black))
text = self.getAdjustedText(self.text)
if isinstance(self.element, Algorithm) and not self.element.active:
painter.setPen(QPen(Qt.gray))
text = text + "\n(deactivated)"
elif self.isSelected():
painter.setPen(QPen(Qt.blue))
fm = QFontMetricsF(font)
text = self.getAdjustedText(self.text)
h = fm.height()
pt = QPointF(-ModelerGraphicItem.BOX_WIDTH / 2 + 25, h / 2.0)
painter.drawText(pt, text)
painter.setPen(QPen(Qt.black))
if isinstance(self.element, Algorithm):
h = -(fm.height() * 1.2)
h = h - ModelerGraphicItem.BOX_HEIGHT / 2.0 + 5
pt = QPointF(-ModelerGraphicItem.BOX_WIDTH / 2 + 25, h)
painter.drawText(pt, 'In')
i = 1
if not self.element.paramsFolded:
for param in self.element.algorithm.parameters:
if not param.hidden:
text = self.getAdjustedText(param.description)
h = -(fm.height() * 1.2) * (i + 1)
h = h - ModelerGraphicItem.BOX_HEIGHT / 2.0 + 5
pt = QPointF(-ModelerGraphicItem.BOX_WIDTH / 2
+ 33, h)
painter.drawText(pt, text)
i += 1
h = fm.height() * 1.2
h = h + ModelerGraphicItem.BOX_HEIGHT / 2.0
pt = QPointF(-ModelerGraphicItem.BOX_WIDTH / 2 + 25, h)
painter.drawText(pt, 'Out')
if not self.element.outputsFolded:
for i, out in enumerate(self.element.algorithm.outputs):
text = self.getAdjustedText(out.description)
h = fm.height() * 1.2 * (i + 2)
h = h + ModelerGraphicItem.BOX_HEIGHT / 2.0
pt = QPointF(-ModelerGraphicItem.BOX_WIDTH / 2
+ 33, h)
painter.drawText(pt, text)
if self.pixmap:
painter.drawPixmap(-(ModelerGraphicItem.BOX_WIDTH / 2.0) + 3, -8,
self.pixmap)
def getLinkPointForParameter(self, paramIndex):
offsetX = 25
if isinstance(self.element, Algorithm) and self.element.paramsFolded:
paramIndex = -1
offsetX = 17
font = QFont('Verdana', 8)
fm = QFontMetricsF(font)
if isinstance(self.element, Algorithm):
h = -(fm.height() * 1.2) * (paramIndex + 2) - fm.height() / 2.0 + 8
h = h - ModelerGraphicItem.BOX_HEIGHT / 2.0
else:
h = 0
return QPointF(-ModelerGraphicItem.BOX_WIDTH / 2 + offsetX, h)
def getLinkPointForOutput(self, outputIndex):
if isinstance(self.element, Algorithm) and self.element.algorithm.outputs:
outputIndex = (outputIndex if not self.element.outputsFolded else -1)
text = self.getAdjustedText(self.element.algorithm.outputs[outputIndex].description)
font = QFont('Verdana', 8)
fm = QFontMetricsF(font)
w = fm.width(text)
h = fm.height() * 1.2 * (outputIndex + 1) + fm.height() / 2.0
y = h + ModelerGraphicItem.BOX_HEIGHT / 2.0 + 5
x = (-ModelerGraphicItem.BOX_WIDTH / 2 + 33 + w
+ 5 if not self.element.outputsFolded else 10)
return QPointF(x, y)
else:
return QPointF(0, 0)
def itemChange(self, change, value):
if change == QGraphicsItem.ItemPositionHasChanged:
for arrow in self.arrows:
arrow.updatePath()
self.element.pos = self.pos()
return value
def polygon(self):
font = QFont('Verdana', 8)
fm = QFontMetricsF(font)
hUp = fm.height() * 1.2 * (len(self.element.parameters) + 2)
hDown = fm.height() * 1.2 * (len(self.element.outputs) + 2)
pol = QPolygonF([
QPointF(-(ModelerGraphicItem.BOX_WIDTH + 2) / 2,
-(ModelerGraphicItem.BOX_HEIGHT + 2) / 2 - hUp),
QPointF(-(ModelerGraphicItem.BOX_WIDTH + 2) / 2,
(ModelerGraphicItem.BOX_HEIGHT + 2) / 2 + hDown),
QPointF((ModelerGraphicItem.BOX_WIDTH + 2) / 2,
(ModelerGraphicItem.BOX_HEIGHT + 2) / 2 + hDown),
QPointF((ModelerGraphicItem.BOX_WIDTH + 2) / 2,
-(ModelerGraphicItem.BOX_HEIGHT + 2) / 2 - hUp),
QPointF(-(ModelerGraphicItem.BOX_WIDTH + 2) / 2,
-(ModelerGraphicItem.BOX_HEIGHT + 2) / 2 - hUp)
])
return pol
class FlatButtonGraphicItem(QGraphicsItem):
WIDTH = 16
HEIGHT = 16
def __init__(self, icon, position, action):
super(FlatButtonGraphicItem, self).__init__(None)
self.setAcceptHoverEvents(True)
self.setFlag(QGraphicsItem.ItemIsMovable, False)
self.pixmap = icon.pixmap(self.WIDTH, self.HEIGHT,
state=QIcon.On)
self.position = position
self.isIn = False
self.action = action
def mousePressEvent(self, event):
self.action()
def paint(self, painter, option, widget=None):
pt = QPointF(-self.WIDTH / 2, -self.HEIGHT / 2) + self.position
rect = QRectF(pt.x(), pt.y(), self.WIDTH, self.HEIGHT)
if self.isIn:
painter.setPen(QPen(Qt.transparent, 1))
painter.setBrush(QBrush(Qt.lightGray,
Qt.SolidPattern))
else:
painter.setPen(QPen(Qt.transparent, 1))
painter.setBrush(QBrush(Qt.transparent,
Qt.SolidPattern))
painter.drawRect(rect)
painter.drawPixmap(pt.x(), pt.y(), self.pixmap)
def boundingRect(self):
rect = QRectF(self.position.x() - self.WIDTH / 2,
self.position.y() - self.HEIGHT / 2,
self.WIDTH,
self.HEIGHT)
return rect
def hoverEnterEvent(self, event):
self.isIn = True
self.update()
def hoverLeaveEvent(self, event):
self.isIn = False
self.update()
class FoldButtonGraphicItem(FlatButtonGraphicItem):
WIDTH = 11
HEIGHT = 11
def __init__(self, position, action, folded):
self.icons = {True: QIcon(os.path.join(pluginPath, 'images', 'plus.png')),
False: QIcon(os.path.join(pluginPath, 'images', 'minus.png'))}
self.folded = folded
icon = self.icons[self.folded]
super(FoldButtonGraphicItem, self).__init__(icon, position, action)
def mousePressEvent(self, event):
self.folded = not self.folded
icon = self.icons[self.folded]
self.pixmap = icon.pixmap(self.WIDTH, self.HEIGHT,
state=QIcon.On)
self.action(self.folded)
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant le paramètre 'éditer' de la commande 'banc'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmEditer(Parametre):
"""Paramètre 'éditer de la commande 'banc'"""
def __init__(self):
"""Constructeur du paramètre."""
Parametre.__init__(self, "éditer", "edit")
self.schema = "<cle>"
self.aide_courte = "ouvre l'éditeur de banc de poisson"
self.aide_longue = \
"Cette commande permet d'accéder à l'éditeur " \
"du banc de poisson indiqué."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
cle = dic_masques["cle"].cle
if cle not in importeur.peche.bancs:
personnage << "|err|Ce banc n'existe pas.|ff|"
return
banc = importeur.peche.bancs[cle]
editeur = importeur.interpreteur.construire_editeur(
"schooledit", personnage, banc)
personnage.contextes.ajouter(editeur)
editeur.actualiser()
|
'''Utility functions and classes used internally by Skype4Py.
'''
import sys
import weakref
import threading
from new import instancemethod
def chop(s, n=1, d=None):
'''Chops initial words from a string and returns a list of them and the rest of the string.
@param s: String to chop from.
@type s: str or unicode
@param n: Number of words to chop.
@type n: int
@param d: Optional delimeter. Any white-char by default.
@type d: str or unicode
@return: A list of n first words from the string followed by the rest of the string
(C{[w1, w2, ..., wn, rest_of_string]}).
@rtype: list of str or unicode
'''
spl = s.split(d, n)
if len(spl) == n:
spl.append(s[:0])
if len(spl) != n + 1:
raise ValueError('chop: Could not chop %d words from \'%s\'' % (n, s))
return spl
def args2dict(s):
'''Converts a string in 'ARG="value", ARG2="value2"' format into a dictionary.
@param s: Input string with comma-separated 'ARG="value"' strings.
@type s: str or unicode
@return: C{{'ARG': 'value'}} dictionary.
@rtype: dict
'''
d = {}
while s:
t, s = chop(s, 1, '=')
if s.startswith('"'):
i = 0
while True:
i = s.find('"', i+1)
# XXX How are the double-quotes escaped? The code below implements VisualBasic technique.
try:
if s[i+1] != '"':
break
else:
i += 1
except IndexError:
break
if i > 0:
d[t] = s[1:i]
s = s[i+1:]
else:
d[t] = s
break
else:
i = s.find(', ')
if i >= 0:
d[t] = s[:i]
s = s[i+2:]
else:
d[t] = s
break
return d
def quote(s, always=False):
'''Adds double-quotes to string if needed.
@param s: String to add double-quotes to.
@type s: str or unicode
@param always: If True, adds quotes even if the input string contains no spaces.
@type always: bool
@return: If the given string contains spaces or always=True, returns the string enclosed
in double-quotes (if it contained quotes too, they are preceded with a backslash).
Otherwise returns the string unchnaged.
@rtype: str or unicode
'''
if always or ' ' in s:
return '"%s"' % s.replace('"', '\\"')
return s
def esplit(s, d=None):
'''Splits a string into words.
@param s: String to split.
@type s: str or unicode
@param d: Optional delimeter. Any white-char by default.
@type d: str or unicode
@return: A list of words or C{[]} if the string was empty.
@rtype: list of str or unicode
@note: This function works like C{s.split(d)} except that it always returns an
empty list instead of C{['']} for empty strings.
'''
if s:
return s.split(d)
return []
def cndexp(condition, truevalue, falsevalue):
'''Simulates a conditional expression known from C or Python 2.5+.
@param condition: Boolean value telling what should be returned.
@type condition: bool, see note
@param truevalue: Value returned if condition was True.
@type truevalue: any
@param falsevalue: Value returned if condition was False.
@type falsevalue: any
@return: Either truevalue or falsevalue depending on condition.
@rtype: same as type of truevalue or falsevalue
@note: The type of condition parameter can be anything as long as
C{bool(condition)} returns a bool value.
'''
if condition:
return truevalue
return falsevalue
class _WeakMethod(object):
'''Helper class for WeakCallableRef function (see below).
Don't use directly.
'''
def __init__(self, method, callback=None):
'''__init__.
@param method: Method to be referenced.
@type method: method
@param callback: Callback to be called when the method is collected.
@type callback: callable
'''
self.im_func = method.im_func
try:
self.weak_im_self = weakref.ref(method.im_self, self._dies)
except TypeError:
self.weak_im_self = None
self.im_class = method.im_class
self.callback = callback
def __call__(self):
if self.weak_im_self:
im_self = self.weak_im_self()
if im_self is None:
return None
else:
im_self = None
return instancemethod(self.im_func, im_self, self.im_class)
def __repr__(self):
obj = self()
objrepr = repr(obj)
if obj is None:
objrepr = 'dead'
return '<weakref at 0x%x; %s>' % (id(self), objrepr)
def _dies(self, ref):
# weakref to im_self died
self.im_func = self.im_class = None
if self.callback is not None:
self.callback(self)
def WeakCallableRef(c, callback=None):
'''Creates and returns a new weak reference to a callable object.
In contrast to weakref.ref() works on all kinds of callables.
Usage is same as weakref.ref().
@param c: A callable that the weak reference should point at.
@type c: callable
@param callback: Callback called when the callable is collected (freed).
@type callback: callable
@return: A weak callable reference.
@rtype: weakref
'''
try:
return _WeakMethod(c, callback)
except AttributeError:
return weakref.ref(c, callback)
class _EventHandlingThread(threading.Thread):
def __init__(self, name=None):
'''__init__.
@param name: name
@type name: unicode
'''
threading.Thread.__init__(self, name='%s event handler' % name)
self.setDaemon(False)
self.lock = threading.Lock()
self.queue = []
def enqueue(self, target, args, kwargs):
'''enqueue.
@param target: Callable to be called.
@type target: callable
@param args: Positional arguments for the callable.
@type args: tuple
@param kwargs: Keyword arguments for the callable.
@type kwargs: dict
'''
self.queue.append((target, args, kwargs))
def run(self):
'''Executes all enqueued targets.
'''
while True:
try:
try:
self.lock.acquire()
h = self.queue[0]
del self.queue[0]
except IndexError:
break
finally:
self.lock.release()
h[0](*h[1], **h[2])
class EventHandlingBase(object):
'''This class is used as a base by all classes implementing event handlers.
Look at known subclasses (above in epydoc) to see which classes will allow you to
attach your own callables (event handlers) to certain events occuring in them.
Read the respective classes documentations to learn what events are provided by them. The
events are always defined in a class whose name consist of the name of the class it provides
events for followed by C{Events}). For example class L{ISkype} provides events defined in
L{ISkypeEvents}. The events class is always defined in the same submodule as the main class.
The events class is just informative. It tells you what events you can assign your event
handlers to, when do they occur and what arguments lists should your event handlers
accept.
There are three ways of attaching an event handler to an event.
1. C{Events} object.
Use this method if you need to attach many event handlers to many events.
Write your event handlers as methods of a class. The superclass of your class
doesn't matter, Skype4Py will just look for methods with apropriate names.
The names of the methods and their arguments lists can be found in respective
events classes (see above).
Pass an instance of this class as the C{Events} argument to the constructor of
a class whose events you are interested in. For example::
import Skype4Py
class MySkypeEvents:
def UserStatus(self, Status):
print 'The status of the user changed'
skype = Skype4Py.Skype(Events=MySkypeEvents())
The C{UserStatus} method will be called when the status of the user currently logged
into skype is changed.
2. C{On...} properties.
This method lets you use any callables as event handlers. Simply assign them to C{On...}
properties (where "C{...}" is the name of the event) of the object whose events you are
interested in. For example::
import Skype4Py
def user_status(Status):
print 'The status of the user changed'
skype = Skype4Py.Skype()
skype.OnUserStatus = user_status
The C{user_status} function will be called when the status of the user currently logged
into skype is changed.
The names of the events and their arguments lists should be taken from respective events
classes (see above). Note that there is no C{self} argument (which can be seen in the events
classes) simply because our event handler is a function, not a method.
3. C{RegisterEventHandler} / C{UnregisterEventHandler} methods.
This method, like the second one, also let you use any callables as event handlers. However,
it additionally let you assign many event handlers to a single event.
In this case, you use L{RegisterEventHandler} and L{UnregisterEventHandler} methods
of the object whose events you are interested in. For example::
import Skype4Py
def user_status(Status):
print 'The status of the user changed'
skype = Skype4Py.Skype()
skype.RegisterEventHandler('UserStatus', user_status)
The C{user_status} function will be called when the status of the user currently logged
into skype is changed.
The names of the events and their arguments lists should be taken from respective events
classes (see above). Note that there is no C{self} argument (which can be seen in the events
classes) simply because our event handler is a function, not a method.
B{Important notes!}
The event handlers are always called on a separate thread. At any given time, there is at most
one handling thread per event type. This means that when a lot of events of the same type are
generated at once, handling of an event will start only after the previous one is handled.
Handling of events of different types may happen simultaneously.
In case of second and third method, only weak references to the event handlers are stored. This
means that you must make sure that Skype4Py is not the only one having a reference to the callable
or else it will be garbage collected and silently removed from Skype4Py's handlers list. On the
other hand, it frees you from worrying about cyclic references.
'''
_EventNames = []
def __init__(self):
'''Initializes the object.
'''
self._EventHandlerObj = None
self._DefaultEventHandlers = {}
self._EventHandlers = {}
self._EventThreads = {}
for event in self._EventNames:
self._EventHandlers[event] = []
def _CallEventHandler(self, Event, *args, **kwargs):
'''Calls all event handlers defined for given Event (str), additional parameters
will be passed unchanged to event handlers, all event handlers are fired on
separate threads.
'''
# get list of relevant handlers
handlers = dict([(x, x()) for x in self._EventHandlers[Event]])
if None in handlers.values():
# cleanup
self._EventHandlers[Event] = list([x[0] for x in handlers.items() if x[1] is not None])
handlers = filter(None, handlers.values())
# try the On... handlers
try:
h = self._DefaultEventHandlers[Event]()
if h:
handlers.append(h)
except KeyError:
pass
# try the object handlers
try:
handlers.append(getattr(self._EventHandlerObj, Event))
except AttributeError:
pass
# if no handlers, leave
if not handlers:
return
# initialize event handling thread if needed
if Event in self._EventThreads:
t = self._EventThreads[Event]
t.lock.acquire()
if not self._EventThreads[Event].isAlive():
t = self._EventThreads[Event] = _EventHandlingThread(Event)
else:
t = self._EventThreads[Event] = _EventHandlingThread(Event)
# enqueue handlers in thread
for h in handlers:
t.enqueue(h, args, kwargs)
# start serial event processing
try:
t.lock.release()
except:
t.start()
def RegisterEventHandler(self, Event, Target):
'''Registers any callable as an event handler.
@param Event: Name of the event. For event names, see the respective C{...Events} class.
@type Event: str
@param Target: Callable to register as the event handler.
@type Target: callable
@return: True is callable was successfully registered, False if it was already registered.
@rtype: bool
@see: L{EventHandlingBase}
'''
if not callable(Target):
raise TypeError('%s is not callable' % repr(Target))
if Event not in self._EventHandlers:
raise ValueError('%s is not a valid %s event name' % (Event, self.__class__.__name__))
# get list of relevant handlers
handlers = dict([(x, x()) for x in self._EventHandlers[Event]])
if None in handlers.values():
# cleanup
self._EventHandlers[Event] = list([x[0] for x in handlers.items() if x[1] is not None])
if Target in handlers.values():
return False
self._EventHandlers[Event].append(WeakCallableRef(Target))
return True
def UnregisterEventHandler(self, Event, Target):
'''Unregisters a previously registered event handler (a callable).
@param Event: Name of the event. For event names, see the respective C{...Events} class.
@type Event: str
@param Target: Callable to unregister.
@type Target: callable
@return: True if callable was successfully unregistered, False if it wasn't registered first.
@rtype: bool
@see: L{EventHandlingBase}
'''
if not callable(Target):
raise TypeError('%s is not callable' % repr(Target))
if Event not in self._EventHandlers:
raise ValueError('%s is not a valid %s event name' % (Event, self.__class__.__name__))
# get list of relevant handlers
handlers = dict([(x, x()) for x in self._EventHandlers[Event]])
if None in handlers.values():
# cleanup
self._EventHandlers[Event] = list([x[0] for x in handlers.items() if x[1] is not None])
for wref, trg in handlers.items():
if trg == Target:
self._EventHandlers[Event].remove(wref)
return True
return False
def _SetDefaultEventHandler(self, Event, Target):
if Target:
if not callable(Target):
raise TypeError('%s is not callable' % repr(Target))
self._DefaultEventHandlers[Event] = WeakCallableRef(Target)
else:
try:
del self._DefaultEventHandlers[Event]
except KeyError:
pass
def _GetDefaultEventHandler(self, Event):
try:
return self._DefaultEventHandlers[Event]()
except KeyError:
pass
def _SetEventHandlerObj(self, Obj):
'''Registers an object (Obj) as event handler, object should contain methods with names
corresponding to event names, only one obj is allowed at a time.
'''
self._EventHandlerObj = Obj
@staticmethod
def __AddEvents_make_event(Event):
# TODO: rework to make compatible with cython
return property(lambda self: self._GetDefaultEventHandler(Event),
lambda self, value: self._SetDefaultEventHandler(Event, value))
@classmethod
def _AddEvents(cls, klass):
'''Adds events to class based on 'klass' attributes.'''
for event in dir(klass):
if not event.startswith('_'):
setattr(cls, 'On%s' % event, cls.__AddEvents_make_event(event))
cls._EventNames.append(event)
class Cached(object):
'''Base class for all cached objects.
Every object is identified by an Id specified as first parameter of the constructor.
Trying to create two objects with same Id yields the same object. Uses weak references
to allow the objects to be deleted normally.
@warning: C{__init__()} is always called, don't use it to prevent initializing an already
initialized object. Use C{_Init()} instead, it is called only once.
'''
_cache_ = weakref.WeakValueDictionary()
def __new__(cls, Id, *args, **kwargs):
h = cls, Id
try:
return cls._cache_[h]
except KeyError:
o = object.__new__(cls)
cls._cache_[h] = o
if hasattr(o, '_Init'):
o._Init(Id, *args, **kwargs)
return o
def __copy__(self):
return self
|
"""Variable transforms. Used for mapping to infinite intervals etc."""
from __future__ import print_function
from numpy import Inf
from numpy import hypot, sqrt, sign
from numpy import array, asfarray, empty_like, isscalar, all, equal
class VarTransform(object):
"""Base class for variable transforms."""
def inv_var_change_with_mask(self, t):
eq = equal.outer(t, self.var_inf)
mask = ~eq.any(axis=-1)
if (~mask).any():
if isscalar(t):
x = 0 # must masked; can pick any value, use 0
else:
t = asfarray(t)
x = empty_like(t)
x[mask] = self.inv_var_change(t[mask])
else:
x = self.inv_var_change(t)
return x, mask
def apply_with_inv_transform(self, f, t, def_val = 0, mul_by_deriv = False):
"""Apply function f to vartransform of t.
Accepts vector inputs. Values at infinity are set to def_val."""
x, mask = self.inv_var_change_with_mask(t)
if (~mask).any():
if isscalar(x):
y = def_val
else:
y = empty_like(x)
y[mask] = f(x[mask])
if mul_by_deriv:
y[mask] *= self.inv_var_change_deriv(t[mask])
y[~mask] = def_val
else:
y = f(x)
if mul_by_deriv:
y *= self.inv_var_change_deriv(t)
return y
class VarTransformIdentity(VarTransform):
"""The identity transform."""
def var_change(self, x):
return x
def inv_var_change(self, t):
return t
def inv_var_change_deriv(self, t):
return 1.0
var_min = -1.0
var_max = +1.0
var_inf = [] # parameter values corresponding to infinity. Do
# not distinguish +oo and -oo
### Variable transforms
class VarTransformReciprocal_PMInf(VarTransform):
"""Reciprocal variable transform."""
def __init__(self, exponent = 1):
self.exponent = exponent
def var_change(self, x):
#if x > 0:
# t = x / (x + 1.0)
#else:
# t = x / (1.0 - x)
t = x / (1.0 + abs(x))
return t
def inv_var_change(self, t):
#if t > 0:
# x = t / (1.0 - t)
#else:
# x = t / (1.0 + t)
x = t / (1.0 - abs(t))
return x
def inv_var_change_deriv(self, t):
return 1.0 / ((1.0 - abs(t)) * (1.0 - abs(t)))
var_min = -1.0
var_max = +1.0
var_inf = [-1.0, +1.0] # parameter values corresponding to infinity. Do
# not distinguish +oo and -oo
class VarTransformReciprocal_PInf(VarTransform):
"""Reciprocal variable transform.
Optionally an exponent different from 1 can be specified. If U is
given, than the tranform is into finite interval [L, U]."""
def __init__(self, L = 0, exponent = 1, U = None):
self.exponent = exponent
self.L = L
self.U = U
if self.L == 0:
self.offset = 1.0
else:
self.offset = abs(self.L) / 2
if U is not None:
self.var_min = self.var_change(U)
self.var_inf = []
def var_change(self, x):
#assert all(x >= self.L)
if self.exponent == 1:
t = self.offset / (x - self.L + self.offset)
elif self.exponent == 2:
t = sqrt(self.offset / (x - self.L + self.offset))
else:
t = (self.offset / (x - self.L + self.offset))**(1.0/self.exponent)
return t
def inv_var_change(self, t):
if self.exponent == 1:
x = self.L - self.offset + self.offset / t
else:
x = self.L - self.offset + self.offset / t**self.exponent
return x
def inv_var_change_deriv(self, t):
if self.exponent == 1:
der = self.offset / (t * t)
else:
der = self.offset * float(self.exponent) / t**(self.exponent + 1)
return der
var_min = 0
var_max = 1
var_inf = [0] # parameter values corresponding to infinity. Do
# not distinguish +oo and -oo
class VarTransformReciprocal_MInf(VarTransform):
"""Reciprocal variable transform.
Optionally an exponent different from 1 can be specified. If L is
given, than the tranform is into finite interval [L, U]."""
def __init__(self, U = 0, exponent = 1, L = None):
self.exponent = exponent
self.L = L
self.U = U
if self.U == 0:
self.offset = 1.0
else:
self.offset = abs(self.U) / 2
if L is not None:
self.var_min = self.var_change(L)
self.var_inf = []
def var_change(self, x):
#assert all(x <= self.U)
if self.exponent == 1:
t = -self.offset / (x - self.U - self.offset)
elif self.exponent == 2:
t = sqrt(-self.offset / (x - self.U - self.offset))
else:
t = (self.offset / abs(x - self.U - self.offset))**(1.0/self.exponent)
return t
def inv_var_change(self, t):
if self.exponent == 1:
x = self.U + self.offset - self.offset / t
elif self.exponent == 2:
x = self.U + self.offset - self.offset / (t*t)
else:
x = self.U + self.offset - self.offset / t**self.exponent
return x
def inv_var_change_deriv(self, t):
if self.exponent == 1:
der = self.offset / (t * t)
else:
der = self.offset * float(self.exponent) / t**(self.exponent + 1)
return der
var_min = 0
var_max = 1
var_inf = [0] # parameter values corresponding to infinity. Do
# not distinguish +oo and -oo
# variable transforms suggested by Boyd
class VarTransformAlgebraic_PMInf(VarTransform):
"""Variable transform suggested by Boyd.
Leads to Chebyshev rational functions."""
def __init__(self, c = 1):
self.c = c # this corresponds to Boyd's L param
def var_change(self, x):
t = x / hypot(self.c, x)
return t
def inv_var_change(self, t):
x = self.c * t / sqrt(1.0 - t*t)
return x
def inv_var_change_deriv(self, t):
t2 = t * t
der = t2 / sqrt((1.0 - t2)**3) + 1.0 / sqrt(1.0 - t2)
return self.c * der
var_min = -1.0
var_max = +1.0
var_inf = [-1.0, +1.0] # parameter values corresponding to infinity. Do
# not distinguish +oo and -oo
class VarTransformAlgebraic_PInf(VarTransform):
"""Variable transform suggested by Boyd."""
def __init__(self, L = 0, c = 1):
self.L = float(L) # lower bound
self.c = c # this corresponds to Boyd's L param
def var_change(self, x):
#assert all(x >= self.L)
if ~all(x >= self.L):
print("assert all(x >= self.L)")
print(x)
print(x < self.L)
t = (x - self.L - self.c) / (x - self.L + self.c)
return t
def inv_var_change(self, t):
x = self.L + self.c * (1.0 + t) / (1.0 - t)
return x
def inv_var_change_deriv(self, t):
der = 2.0 * self.c / (1.0 - t)**2
return der
var_min = -1.0
var_max = +1.0
var_inf = [+1.0] # parameter values corresponding to infinity. Do
# not distinguish +oo and -oo
class VarTransformAlgebraic_MInf(VarTransform):
"""Variable transform suggested by Boyd."""
def __init__(self, U = 0, c = 1):
self.U = float(U) # upper bound
self.c = c # this corresponds to Boyd's L param
def var_change(self, x):
#assert all(x <= self.U)
if ~all(x <= self.U):
print("assert all(x >= self.L)")
print(x)
print(x < self.U)
t = (-(x - self.U) - self.c) / (-(x - self.U) + self.c)
return t
def inv_var_change(self, t):
x = self.U - self.c * (1.0 + t) / (1.0 - t)
return x
def inv_var_change_deriv(self, t):
der = 2.0 * self.c / (1.0 - t)**2
return der
var_min = -1.0
var_max = +1.0
var_inf = [+1.0] # parameter values corresponding to infinity. Do
# not distinguish +oo and -oo
def plot_transformed(f, vt):
"""A debugging plot of f under variable transfom vt."""
from pylab import plot, show, linspace
T = linspace(vt.var_min, vt.var_max, 1000)
Y = [f(vt.inv_var_change(t)) if t not in vt.var_inf else 0 for t in T]
plot(T, Y, linewidth=5)
def plot_transformed_w_deriv(f, vt):
"""A debugging plot of f under variable transfom vt including the
derivative of inverse transform."""
from pylab import plot, show, linspace
T = linspace(vt.var_min, vt.var_max, 1000)
Y = [f(vt.inv_var_change(t))*vt.inv_var_change_deriv(t) if t not in vt.var_inf else 0 for t in T]
plot(T, Y, linewidth=5)
def plot_invtransformed_tail(f, vt):
from pylab import loglog, show, logspace
X = logspace(1, 50, 1000)
Y = f(vt.var_change(X))
loglog(X, Y)
if __name__ == "__main__":
vt = VarTransformAlgebraic_PMInf()
print(vt.inv_var_change_with_mask(array([-1,0,1])))
print(vt.inv_var_change_with_mask(-1))
print(vt.apply_with_inv_transform(lambda x: x+1, array([-1,0,1])))
print(vt.apply_with_inv_transform(lambda x: x+1, 0))
print(vt.apply_with_inv_transform(lambda x: x+1, -1))
from numpy import exp
from pylab import show
#plot_transformed(lambda x: 1.0/(1+x*x), VarTransformAlgebraic_PInf(1))
#plot_transformed(lambda x: exp(-x*x), VarTransformAlgebraic_PMInf())
#plot_transformed_w_deriv(lambda x: 1.0/(1+x*x), VarTransformAlgebraic_PMInf())
#plot_transformed_w_deriv(lambda x: exp(-x*x), VarTransformAlgebraic_PMInf())
#plot_transformed(lambda x: 1.0/(1+x*x), VarTransformReciprocal_PInf())
#plot_transformed(lambda x: exp(-x*x), VarTransformReciprocal_PInf())
#plot_transformed(lambda x: 1.0/(1+x**1.0), VarTransformReciprocal_PInf())
#plot_transformed(lambda x: 1.0/(1+x**1.2), VarTransformReciprocal_PInf())
#plot_transformed(lambda x: 1.0/(1+x**1.5), VarTransformReciprocal_PInf())
#plot_transformed(lambda x: 1.0/(1+x**2.0), VarTransformReciprocal_PInf())
#plot_transformed(lambda x: 1.0/(1+x**2.0), VarTransformIdentity())
#plot_transformed(lambda x: 1.0/(1+x**2.0), VarTransformReciprocal_PInf(U = 2))
#plot_transformed(lambda x: 1.0/(1+x**2.0), VarTransformReciprocal_MInf())
#plot_transformed(lambda x: 1.0/(1+x**2.0), VarTransformReciprocal_MInf(L = -2))
plot_invtransformed_tail(lambda x: x, VarTransformReciprocal_PInf(L = 10))
plot_invtransformed_tail(lambda x: 1-x, VarTransformAlgebraic_PInf(L = 10))
show()
|
from github import Github
g = Github("cobrabot", "dd31ac21736aeeaeac764ce1192c17e370679a25")
cobratoolbox = g.get_user("opencobra").get_repo("cobratoolbox")
contributors = {}
for contributor in cobratoolbox.get_stats_contributors():
a = 0
d = 0
c = 0
for week in contributor.weeks:
a += week.a
d += week.d
c += week.c
contributors[contributor.author.login] = {
'additions': a, 'deletions': d, 'commits': c, 'avatar': contributor.author.avatar_url}
print "name: %20s, additions: %10d, deletions: %10d, commits: %10d" % (contributor.author.login, a, d, c)
sorted_by_commits = sorted(contributors.items(), key=lambda x: x[1]['commits'])
table = '\n.. raw:: html\n\n <table style="margin:0px auto" width="100%">'
for k in range(0, 5):
table += """\n
<tr>
<td width="46px"><img src="%s" width=46 height=46 alt=""></td><td><a href="https://github.com/%s">%s</a></td>
<td width="46px"><img src="%s" width=46 height=46 alt=""></td><td><a href="https://github.com/%s">%s</a></td>
</tr>""" % (sorted_by_commits[-(2 * k + 1)][1]['avatar'], sorted_by_commits[-(2 * k + 1)][0], sorted_by_commits[-(2 * k + 1)][0],
sorted_by_commits[-(2 * (k + 1))][1]['avatar'], sorted_by_commits[-(2 * (k + 1))][0], sorted_by_commits[-(2 * (k + 1))][0])
table += "\n </table>"
with open("docs/source/contributors.rst", "w") as readme:
readme.write(table)
|
# Copyright 2016 Hewlett Packard Enterprise Development, LP.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from _restobject import RestObject
def ex50_get_csr(restobj, filename):
sys.stdout.write("\nEXAMPLE 50: Get CSR\n")
instances = restobj.search_for_type("HpHttpsCert.")
for instance in instances:
response = restobj.rest_get(instance["href"])
try:
csr_response = response.dict["CertificateSigningRequest"]
with open(filename, 'wb') as csroutput:
csroutput.write(csr_response)
csroutput.close()
sys.stdout.write("\tCSR Data saved successfully as \
"+ filename + "\n")
restobj.error_handler(response)
except KeyError:
sys.stdout.write("\tCSR cannot be accessed right now, \
please try again later")
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_https_url = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
# iLO_https_url acceptable examples:
# "https://10.0.0.100"
# "https://f250asha.americas.hpqcorp.net"
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
#Create a REST object
REST_OBJ = RestObject(iLO_https_url, iLO_account, iLO_password)
ex50_get_csr(REST_OBJ, "csr.txt")
|
# $Id$
##
## This file is part of pyFormex 0.8.9 (Fri Nov 9 10:49:51 CET 2012)
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: http://savannah.nongnu.org/projects/pyformex/
## Copyright 2004-2012 (C) Benedict Verhegghe ([email protected])
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""TrussFrame
"""
from __future__ import print_function
_status = 'checked'
_level = 'normal'
_topics = ['geometry']
_techniques = ['color']
from gui.draw import *
def run():
clear()
yf = [ 0.0, 0.2, 1.2, 2.2, 3.2, 4.2, 4.5 ] # y of nodes in frame columns
a = Formex([[[0.0,y]] for y in yf ])
b = connect([a,a],bias=[0,1]).translate([0.5,0.0,0.0])
b.setProp(3)
c = b.reflect(0)
d = connect([b,c],bias=[1,1])
d.setProp(2)
e = connect([b,c],bias=[1,2]).select([0,2]) + connect([b,c],bias=[2,1]).select([1,3])
e.setProp(1)
col = b+c+d+e
frame = col.translate([-4.0,0.0,0.0]) + col.translate([+4.0,0.0,0.0])
# Dakligger
h0 = 1.2 # hoogte in het midden
h1 = 0.5 # hoogte aan het einde
xd = [ 0, 0.6 ] + [ 0.6+i*1.2 for i in range(5)] # hor. positie knopen
ko = Formex([[[x,0.0]] for x in xd])
ond = connect([ko,ko],bias=[0,1])
bov = ond.translate(1,h0).shear(1,0,(h1-h0)/xd[-1])
tss = connect([ond,bov],bias=[1,1])
ond.setProp(2)
bov.setProp(4)
tss.setProp(5)
dakligger = (ond+bov+tss)
dakligger += dakligger.reflect(0)
frame += dakligger.translate([0,yf[-1],0])
draw(frame)
structure = frame.replic2(2,6,12.,3.,0,2)
clear()
draw(structure)
view('top')
view('right')
view('iso')
if __name__ == 'draw':
run()
# End
|
"""
Database Acess Layer
"""
import datetime
import itertools
import operator
from django.db.models import Q
from django.db import transaction
from django.core.exceptions import ObjectDoesNotExist
from openlets.core import models
def get_balance(persona, personb, currency):
"""Load a balance between two persons.
"""
return (models.PersonBalance.objects
.select_related(
'balance'
'balance__persons'
)
.get(
person=persona,
balance__persons=personb,
balance__currency=currency
).balance
)
def get_balances(user, include_balanced=False, credited=None):
"""Get the list of balances for a user. Filter out any where the value is
back to 0.
"""
q = models.PersonBalance.objects.filter(person=user.person)
if not include_balanced:
q = q.exclude(balance__value=0)
if credited is not None:
q = q.filter(credited=credited)
return q
def get_balances_many(persons, currency, credited, include_balanced=False):
"""Return a map of person -> balances."""
q = models.PersonBalance.objects.filter(
person__in=persons,
credited=credited,
balance__currency=currency
)
if not include_balanced:
q = q.exclude(balance__value=0)
return q
def get_pending_trans_for_user(user):
"""Get pending transactions for a user which were
created by some other user, and need to be accepted.
"""
return models.TransactionRecord.objects.filter(
target_person=user.person,
transaction__isnull=True,
rejected=False
)
def get_recent_trans_for_user(user, days=10, limit=15, pending_only=False):
"""Get recent transaction records for the user. These transaction records
may be confirmed.
"""
earliest_day = datetime.date.today() - datetime.timedelta(days)
q = models.TransactionRecord.objects.filter(
creator_person=user.person,
time_created__gte=earliest_day
)
if pending_only:
q = q.filter(transaction__isnull=True)
return q.order_by('-transaction_time')[:limit]
def get_exchange_rates(user):
"""Get exchange rates for the user."""
return models.ExchangeRate.objects.filter(
person=user.person
)
# TODO: tests
def get_transfer_history(user, filters):
"""Get a list of all transactions and resolutions for the user filtered
by form filters.
"""
query_sets = []
resolution_query, trans_query = [], []
now = datetime.datetime.now()
def conv(key, trans, resolution, coerce_val=None):
"""Helper to setup filters for both tables."""
val = filters.get(key)
if not val:
return
if coerce_val:
val = coerce_val(val)
if trans:
trans_query.append((trans, val))
if resolution:
resolution_query.append((resolution, val))
# Setup filters
transfer_type = filters.get('transfer_type')
conv('person', 'target_person', 'resolution__persons')
conv('transaction_type', 'from_receiver', 'credited', lambda x: x == 'charge')
conv('currency', 'currency', 'resolution__currency')
if filters.get('status') == 'rejected':
conv('status', 'rejected', None, lambda x: True)
else:
conv('status',
'transaction__time_confirmed__isnull',
'resolution__time_confirmed__isnull',
lambda x: x == 'pending'
)
conv('transaction_time',
'transaction_time__gt',
'resolution__time_confirmed__gt',
lambda d: now - datetime.timedelta(days=d)
)
conv('confirmed_time',
'transaction__time_confirmed__gt',
'resolution__time_confirmed__gt',
lambda d: now - datetime.timedelta(days=d)
)
# Query Transactions
if not transfer_type or transfer_type == 'transaction':
query_sets.append(models.TransactionRecord.objects.filter(
creator_person=user.person,
**dict(trans_query)
))
# Query Resolutions
if not transfer_type or transfer_type == 'resolution':
query_sets.append(models.PersonResolution.objects.filter(
person=user.person,
**dict(resolution_query)
))
# Merge results
return sorted(
itertools.chain.from_iterable(query_sets),
key=operator.attrgetter('transaction_time'),
reverse=True
)
def get_trans_record_for_user(trans_record_id, user):
"""Get a transaction record for a user."""
return models.TransactionRecord.objects.get(
id=trans_record_id,
target_person=user.person,
rejected=False
)
@transaction.commit_on_success
def reject_trans_record(trans_record_id, user):
"""Reject a transaction record where the user is the target."""
trans_record = get_trans_record_for_user(trans_record_id, user)
trans_record.rejected = True
trans_record.save()
@transaction.commit_on_success
def confirm_trans_record(trans_record):
"""Confirm a transaction record."""
# Build and save matching record
confirm_record = models.TransactionRecord(
creator_person=trans_record.target_person,
target_person=trans_record.creator_person,
from_receiver=not trans_record.from_receiver,
currency=trans_record.currency,
transaction_time=trans_record.transaction_time,
value=trans_record.value
)
transaction = models.Transaction()
transaction.save()
confirm_record.transaction_id = trans_record.transaction_id = transaction.id
confirm_record.save()
trans_record.save()
# Update the balance, or create a new one
update_balance(
trans_record,
trans_record.provider,
trans_record.receiver
)
# TODO: tests!
def update_balance(currency_type, provider, receiver):
"""Update or create a balance between two users for a currency. Should be
called from a method that was already created a transfer.
"""
try:
balance = get_balance(provider, receiver, currency=currency_type.currency)
except ObjectDoesNotExist:
return new_balance(currency_type, provider, receiver)
# Establish the direction of the transfer
if provider == balance.debted:
balance.value += currency_type.value
balance.save()
return balance
balance.value -= currency_type.value
if (balance.value) < 0:
balance.value = abs(balance.value)
for personbalance in balance.personbalance_set.all():
personbalance.credited = not personbalance.credited
personbalance.save()
balance.save()
# TODO: does this cascade to the personbalance ?
return balance
# TODO: tests
def new_balance(currency_type, provider, receiver):
balance = models.Balance(
currency=currency_type.currency,
value=currency_type.value
)
balance.save()
personbalancea = models.PersonBalance(
person=provider,
credited=False,
balance=balance
)
personbalanceb = models.PersonBalance(
person=receiver,
credited=True,
balance=balance
)
personbalancea.save()
personbalanceb.save()
balance.save()
return balance
def get_transaction_count(user):
"""Get a count of transaction records by this user."""
return models.TransactionRecord.objects.filter(
creator_person=user.person
).count()
def get_transaction_notifications(user, days=2):
"""Get recent transaction actions targetted at the user."""
now = datetime.datetime.now()
return models.TransactionRecord.objects.filter(
target_person=user.person,
time_created__gte=now - datetime.timedelta(days=days)
)
def get_recent_resolutions(user, days=2):
"""Get recent resolutions involsing the user."""
now = datetime.datetime.now()
return models.PersonResolution.objects.filter(
person=user.person,
resolution__time_confirmed__gte=now - datetime.timedelta(days=days)
)
|
# encoding: utf-8
# module PyQt4.QtCore
# from /usr/lib/python3/dist-packages/PyQt4/QtCore.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import sip as __sip
class QSemaphore(): # skipped bases: <class 'sip.simplewrapper'>
""" QSemaphore(int n=0) """
def acquire(self, int_n=1): # real signature unknown; restored from __doc__
""" QSemaphore.acquire(int n=1) """
pass
def available(self): # real signature unknown; restored from __doc__
""" QSemaphore.available() -> int """
return 0
def release(self, int_n=1): # real signature unknown; restored from __doc__
""" QSemaphore.release(int n=1) """
pass
def tryAcquire(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QSemaphore.tryAcquire(int n=1) -> bool
QSemaphore.tryAcquire(int, int) -> bool
"""
return False
def __init__(self, int_n=0): # real signature unknown; restored from __doc__
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
|
"""distutils.ccompiler
Contains CCompiler, an abstract base class that defines the interface
for the Distutils compiler abstraction model."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id$"
import sys, os, re
from types import *
from copy import copy
from distutils.errors import *
from distutils.spawn import spawn
from distutils.file_util import move_file
from distutils.dir_util import mkpath
from distutils.dep_util import newer_pairwise, newer_group
from distutils.util import split_quoted, execute
from distutils import log
class CCompiler:
"""Abstract base class to define the interface that must be implemented
by real compiler classes. Also has some utility methods used by
several compiler classes.
The basic idea behind a compiler abstraction class is that each
instance can be used for all the compile/link steps in building a
single project. Thus, attributes common to all of those compile and
link steps -- include directories, macros to define, libraries to link
against, etc. -- are attributes of the compiler instance. To allow for
variability in how individual files are treated, most of those
attributes may be varied on a per-compilation or per-link basis.
"""
# 'compiler_type' is a class attribute that identifies this class. It
# keeps code that wants to know what kind of compiler it's dealing with
# from having to import all possible compiler classes just to do an
# 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
# should really, really be one of the keys of the 'compiler_class'
# dictionary (see below -- used by the 'new_compiler()' factory
# function) -- authors of new compiler interface classes are
# responsible for updating 'compiler_class'!
compiler_type = None
# XXX things not handled by this compiler abstraction model:
# * client can't provide additional options for a compiler,
# e.g. warning, optimization, debugging flags. Perhaps this
# should be the domain of concrete compiler abstraction classes
# (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
# class should have methods for the common ones.
# * can't completely override the include or library searchg
# path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
# I'm not sure how widely supported this is even by Unix
# compilers, much less on other platforms. And I'm even less
# sure how useful it is; maybe for cross-compiling, but
# support for that is a ways off. (And anyways, cross
# compilers probably have a dedicated binary with the
# right paths compiled in. I hope.)
# * can't do really freaky things with the library list/library
# dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
# different versions of libfoo.a in different locations. I
# think this is useless without the ability to null out the
# library search path anyways.
# Subclasses that rely on the standard filename generation methods
# implemented below should override these; see the comment near
# those methods ('object_filenames()' et. al.) for details:
src_extensions = None # list of strings
obj_extension = None # string
static_lib_extension = None
shared_lib_extension = None # string
static_lib_format = None # format string
shared_lib_format = None # prob. same as static_lib_format
exe_extension = None # string
# Default language settings. language_map is used to detect a source
# file or Extension target language, checking source filenames.
# language_order is used to detect the language precedence, when deciding
# what language to use when mixing source types. For example, if some
# extension has two files with ".c" extension, and one with ".cpp", it
# is still linked as c++.
language_map = {".c" : "c",
".cc" : "c++",
".cpp" : "c++",
".cxx" : "c++",
".m" : "objc",
}
language_order = ["c++", "objc", "c"]
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
self.dry_run = dry_run
self.force = force
self.verbose = verbose
# 'output_dir': a common output directory for object, library,
# shared object, and shared library files
self.output_dir = None
# 'macros': a list of macro definitions (or undefinitions). A
# macro definition is a 2-tuple (name, value), where the value is
# either a string or None (no explicit value). A macro
# undefinition is a 1-tuple (name,).
self.macros = []
# 'include_dirs': a list of directories to search for include files
self.include_dirs = []
# 'libraries': a list of libraries to include in any link
# (library names, not filenames: eg. "foo" not "libfoo.a")
self.libraries = []
# 'library_dirs': a list of directories to search for libraries
self.library_dirs = []
# 'runtime_library_dirs': a list of directories to search for
# shared libraries/objects at runtime
self.runtime_library_dirs = []
# 'objects': a list of object files (or similar, such as explicitly
# named library files) to include on any link
self.objects = []
for key in self.executables.keys():
self.set_executable(key, self.executables[key])
# __init__ ()
def set_executables (self, **args):
"""Define the executables (and options for them) that will be run
to perform the various stages of compilation. The exact set of
executables that may be specified here depends on the compiler
class (via the 'executables' class attribute), but most will have:
compiler the C/C++ compiler
linker_so linker used to create shared objects and libraries
linker_exe linker used to create binary executables
archiver static library creator
On platforms with a command-line (Unix, DOS/Windows), each of these
is a string that will be split into executable name and (optional)
list of arguments. (Splitting the string is done similarly to how
Unix shells operate: words are delimited by spaces, but quotes and
backslashes can override this. See
'distutils.util.split_quoted()'.)
"""
# Note that some CCompiler implementation classes will define class
# attributes 'cpp', 'cc', etc. with hard-coded executable names;
# this is appropriate when a compiler class is for exactly one
# compiler/OS combination (eg. MSVCCompiler). Other compiler
# classes (UnixCCompiler, in particular) are driven by information
# discovered at run-time, since there are many different ways to do
# basically the same things with Unix C compilers.
for key in args.keys():
if key not in self.executables:
raise ValueError, \
"unknown executable '%s' for class %s" % \
(key, self.__class__.__name__)
self.set_executable(key, args[key])
# set_executables ()
def set_executable(self, key, value):
if type(value) is StringType:
setattr(self, key, split_quoted(value))
else:
setattr(self, key, value)
def _find_macro (self, name):
i = 0
for defn in self.macros:
if defn[0] == name:
return i
i = i + 1
return None
def _check_macro_definitions (self, definitions):
"""Ensures that every element of 'definitions' is a valid macro
definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do
nothing if all definitions are OK, raise TypeError otherwise.
"""
for defn in definitions:
if not (type (defn) is TupleType and
(len (defn) == 1 or
(len (defn) == 2 and
(type (defn[1]) is StringType or defn[1] is None))) and
type (defn[0]) is StringType):
raise TypeError, \
("invalid macro definition '%s': " % defn) + \
"must be tuple (string,), (string, string), or " + \
"(string, None)"
# -- Bookkeeping methods -------------------------------------------
def define_macro (self, name, value=None):
"""Define a preprocessor macro for all compilations driven by this
compiler object. The optional parameter 'value' should be a
string; if it is not supplied, then the macro will be defined
without an explicit value and the exact outcome depends on the
compiler used (XXX true? does ANSI say anything about this?)
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
defn = (name, value)
self.macros.append (defn)
def undefine_macro (self, name):
"""Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
'define_macro()' and undefined by 'undefine_macro()' the last call
takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to 'compile()'), then that
takes precedence.
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
undefn = (name,)
self.macros.append (undefn)
def add_include_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
header files. The compiler is instructed to search directories in
the order in which they are supplied by successive calls to
'add_include_dir()'.
"""
self.include_dirs.append (dir)
def set_include_dirs (self, dirs):
"""Set the list of directories that will be searched to 'dirs' (a
list of strings). Overrides any preceding calls to
'add_include_dir()'; subsequence calls to 'add_include_dir()' add
to the list passed to 'set_include_dirs()'. This does not affect
any list of standard include directories that the compiler may
search by default.
"""
self.include_dirs = copy (dirs)
def add_library (self, libname):
"""Add 'libname' to the list of libraries that will be included in
all links driven by this compiler object. Note that 'libname'
should *not* be the name of a file containing a library, but the
name of the library itself: the actual filename will be inferred by
the linker, the compiler, or the compiler class (depending on the
platform).
The linker will be instructed to link against libraries in the
order they were supplied to 'add_library()' and/or
'set_libraries()'. It is perfectly valid to duplicate library
names; the linker will be instructed to link against libraries as
many times as they are mentioned.
"""
self.libraries.append (libname)
def set_libraries (self, libnames):
"""Set the list of libraries to be included in all links driven by
this compiler object to 'libnames' (a list of strings). This does
not affect any standard system libraries that the linker may
include by default.
"""
self.libraries = copy (libnames)
def add_library_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
libraries specified to 'add_library()' and 'set_libraries()'. The
linker will be instructed to search for libraries in the order they
are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
"""
self.library_dirs.append (dir)
def set_library_dirs (self, dirs):
"""Set the list of library search directories to 'dirs' (a list of
strings). This does not affect any standard library search path
that the linker may search by default.
"""
self.library_dirs = copy (dirs)
def add_runtime_library_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
shared libraries at runtime.
"""
self.runtime_library_dirs.append (dir)
def set_runtime_library_dirs (self, dirs):
"""Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
"""
self.runtime_library_dirs = copy (dirs)
def add_link_object (self, object):
"""Add 'object' to the list of object files (or analogues, such as
explicitly named library files or the output of "resource
compilers") to be included in every link driven by this compiler
object.
"""
self.objects.append (object)
def set_link_objects (self, objects):
"""Set the list of object files (or analogues) to be included in
every link to 'objects'. This does not affect any standard object
files that the linker may include by default (such as system
libraries).
"""
self.objects = copy (objects)
# -- Private utility methods --------------------------------------
# (here for the convenience of subclasses)
# Helper method to prep compiler in subclass compile() methods
def _setup_compile(self, outdir, macros, incdirs, sources, depends,
extra):
"""Process arguments and decide which source files to compile."""
if outdir is None:
outdir = self.output_dir
elif type(outdir) is not StringType:
raise TypeError, "'output_dir' must be a string or None"
if macros is None:
macros = self.macros
elif type(macros) is ListType:
macros = macros + (self.macros or [])
else:
raise TypeError, "'macros' (if supplied) must be a list of tuples"
if incdirs is None:
incdirs = self.include_dirs
elif type(incdirs) in (ListType, TupleType):
incdirs = list(incdirs) + (self.include_dirs or [])
else:
raise TypeError, \
"'include_dirs' (if supplied) must be a list of strings"
if extra is None:
extra = []
# Get the list of expected output (object) files
objects = self.object_filenames(sources,
strip_dir=0,
output_dir=outdir)
assert len(objects) == len(sources)
pp_opts = gen_preprocess_options(macros, incdirs)
build = {}
for i in range(len(sources)):
src = sources[i]
obj = objects[i]
ext = os.path.splitext(src)[1]
self.mkpath(os.path.dirname(obj))
build[obj] = (src, ext)
return macros, objects, extra, pp_opts, build
def _get_cc_args(self, pp_opts, debug, before):
# works for unixccompiler, emxccompiler, cygwinccompiler
cc_args = pp_opts + ['-c']
if debug:
cc_args[:0] = ['-g']
if before:
cc_args[:0] = before
return cc_args
def _fix_compile_args (self, output_dir, macros, include_dirs):
"""Typecheck and fix-up some of the arguments to the 'compile()'
method, and return fixed-up values. Specifically: if 'output_dir'
is None, replaces it with 'self.output_dir'; ensures that 'macros'
is a list, and augments it with 'self.macros'; ensures that
'include_dirs' is a list, and augments it with 'self.include_dirs'.
Guarantees that the returned values are of the correct type,
i.e. for 'output_dir' either string or None, and for 'macros' and
'include_dirs' either list or None.
"""
if output_dir is None:
output_dir = self.output_dir
elif type (output_dir) is not StringType:
raise TypeError, "'output_dir' must be a string or None"
if macros is None:
macros = self.macros
elif type (macros) is ListType:
macros = macros + (self.macros or [])
else:
raise TypeError, "'macros' (if supplied) must be a list of tuples"
if include_dirs is None:
include_dirs = self.include_dirs
elif type (include_dirs) in (ListType, TupleType):
include_dirs = list (include_dirs) + (self.include_dirs or [])
else:
raise TypeError, \
"'include_dirs' (if supplied) must be a list of strings"
return output_dir, macros, include_dirs
# _fix_compile_args ()
def _prep_compile(self, sources, output_dir, depends=None):
"""Decide which souce files must be recompiled.
Determine the list of object files corresponding to 'sources',
and figure out which ones really need to be recompiled.
Return a list of all object files and a dictionary telling
which source files can be skipped.
"""
# Get the list of expected output (object) files
objects = self.object_filenames(sources, output_dir=output_dir)
assert len(objects) == len(sources)
# Return an empty dict for the "which source files can be skipped"
# return value to preserve API compatibility.
return objects, {}
def _fix_object_args (self, objects, output_dir):
"""Typecheck and fix up some arguments supplied to various methods.
Specifically: ensure that 'objects' is a list; if output_dir is
None, replace with self.output_dir. Return fixed versions of
'objects' and 'output_dir'.
"""
if type (objects) not in (ListType, TupleType):
raise TypeError, \
"'objects' must be a list or tuple of strings"
objects = list (objects)
if output_dir is None:
output_dir = self.output_dir
elif type (output_dir) is not StringType:
raise TypeError, "'output_dir' must be a string or None"
return (objects, output_dir)
def _fix_lib_args (self, libraries, library_dirs, runtime_library_dirs):
"""Typecheck and fix up some of the arguments supplied to the
'link_*' methods. Specifically: ensure that all arguments are
lists, and augment them with their permanent versions
(eg. 'self.libraries' augments 'libraries'). Return a tuple with
fixed versions of all arguments.
"""
if libraries is None:
libraries = self.libraries
elif type (libraries) in (ListType, TupleType):
libraries = list (libraries) + (self.libraries or [])
else:
raise TypeError, \
"'libraries' (if supplied) must be a list of strings"
if library_dirs is None:
library_dirs = self.library_dirs
elif type (library_dirs) in (ListType, TupleType):
library_dirs = list (library_dirs) + (self.library_dirs or [])
else:
raise TypeError, \
"'library_dirs' (if supplied) must be a list of strings"
if runtime_library_dirs is None:
runtime_library_dirs = self.runtime_library_dirs
elif type (runtime_library_dirs) in (ListType, TupleType):
runtime_library_dirs = (list (runtime_library_dirs) +
(self.runtime_library_dirs or []))
else:
raise TypeError, \
"'runtime_library_dirs' (if supplied) " + \
"must be a list of strings"
return (libraries, library_dirs, runtime_library_dirs)
# _fix_lib_args ()
def _need_link (self, objects, output_file):
"""Return true if we need to relink the files listed in 'objects'
to recreate 'output_file'.
"""
if self.force:
return 1
else:
if self.dry_run:
newer = newer_group (objects, output_file, missing='newer')
else:
newer = newer_group (objects, output_file)
return newer
# _need_link ()
def detect_language (self, sources):
"""Detect the language of a given file, or list of files. Uses
language_map, and language_order to do the job.
"""
if type(sources) is not ListType:
sources = [sources]
lang = None
index = len(self.language_order)
for source in sources:
base, ext = os.path.splitext(source)
extlang = self.language_map.get(ext)
try:
extindex = self.language_order.index(extlang)
if extindex < index:
lang = extlang
index = extindex
except ValueError:
pass
return lang
# detect_language ()
# -- Worker methods ------------------------------------------------
# (must be implemented by subclasses)
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
"""Preprocess a single C/C++ source file, named in 'source'.
Output will be written to file named 'output_file', or stdout if
'output_file' not supplied. 'macros' is a list of macro
definitions as for 'compile()', which will augment the macros set
with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
list of directory names that will be added to the default list.
Raises PreprocessError on failure.
"""
pass
def compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""Compile one or more source files.
'sources' must be a list of filenames, most likely C/C++
files, but in reality anything that can be handled by a
particular compiler and compiler class (eg. MSVCCompiler can
handle resource files in 'sources'). Return a list of object
filenames, one per source filename in 'sources'. Depending on
the implementation, not all source files will necessarily be
compiled, but all corresponding object filenames will be
returned.
If 'output_dir' is given, object files will be put under it, while
retaining their original path component. That is, "foo/bar.c"
normally compiles to "foo/bar.o" (for a Unix implementation); if
'output_dir' is "build", then it would compile to
"build/foo/bar.o".
'macros', if given, must be a list of macro definitions. A macro
definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
The former defines a macro; if the value is None, the macro is
defined without an explicit value. The 1-tuple case undefines a
macro. Later definitions/redefinitions/ undefinitions take
precedence.
'include_dirs', if given, must be a list of strings, the
directories to add to the default include file search path for this
compilation only.
'debug' is a boolean; if true, the compiler will be instructed to
output debug symbols in (or alongside) the object file(s).
'extra_preargs' and 'extra_postargs' are implementation- dependent.
On platforms that have the notion of a command-line (e.g. Unix,
DOS/Windows), they are most likely lists of strings: extra
command-line arguments to prepand/append to the compiler command
line. On other platforms, consult the implementation class
documentation. In any event, they are intended as an escape hatch
for those occasions when the abstract compiler framework doesn't
cut the mustard.
'depends', if given, is a list of filenames that all targets
depend on. If a source file is older than any file in
depends, then the source file will be recompiled. This
supports dependency tracking, but only at a coarse
granularity.
Raises CompileError on failure.
"""
# A concrete compiler class can either override this method
# entirely or implement _compile().
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# Return *all* object filenames, not just the ones we just built.
return objects
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
# A concrete compiler class that does not override compile()
# should implement _compile().
pass
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
"""Link a bunch of stuff together to create a static library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects', the extra object files supplied to
'add_link_object()' and/or 'set_link_objects()', the libraries
supplied to 'add_library()' and/or 'set_libraries()', and the
libraries supplied as 'libraries' (if any).
'output_libname' should be a library name, not a filename; the
filename will be inferred from the library name. 'output_dir' is
the directory where the library file will be put.
'debug' is a boolean; if true, debugging information will be
included in the library (note that on most platforms, it is the
compile step where this matters: the 'debug' flag is included here
just for consistency).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LibError on failure.
"""
pass
# values for target_desc parameter in link()
SHARED_OBJECT = "shared_object"
SHARED_LIBRARY = "shared_library"
EXECUTABLE = "executable"
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
"""Link a bunch of stuff together to create an executable or
shared library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects'. 'output_filename' should be a filename. If
'output_dir' is supplied, 'output_filename' is relative to it
(i.e. 'output_filename' can provide directory components if
needed).
'libraries' is a list of libraries to link against. These are
library names, not filenames, since they're translated into
filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
on Unix and "foo.lib" on DOS/Windows). However, they can include a
directory component, which means the linker will look in that
specific directory rather than searching all the normal locations.
'library_dirs', if supplied, should be a list of directories to
search for libraries that were specified as bare library names
(ie. no directory component). These are on top of the system
default and those supplied to 'add_library_dir()' and/or
'set_library_dirs()'. 'runtime_library_dirs' is a list of
directories that will be embedded into the shared library and used
to search for other shared libraries that *it* depends on at
run-time. (This may only be relevant on Unix.)
'export_symbols' is a list of symbols that the shared library will
export. (This appears to be relevant only on Windows.)
'debug' is as for 'compile()' and 'create_static_lib()', with the
slight distinction that it actually matters on most platforms (as
opposed to 'create_static_lib()', which includes a 'debug' flag
mostly for form's sake).
'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
of course that they supply command-line arguments for the
particular linker being used).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LinkError on failure.
"""
raise NotImplementedError
# Old 'link_*()' methods, rewritten to use the new 'link()' method.
def link_shared_lib (self,
objects,
output_libname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_LIBRARY, objects,
self.library_filename(output_libname, lib_type='shared'),
output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_shared_object (self,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_OBJECT, objects,
output_filename, output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_executable (self,
objects,
output_progname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
target_lang=None):
self.link(CCompiler.EXECUTABLE, objects,
self.executable_filename(output_progname), output_dir,
libraries, library_dirs, runtime_library_dirs, None,
debug, extra_preargs, extra_postargs, None, target_lang)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function; there is
# no appropriate default implementation so subclasses should
# implement all of these.
def library_dir_option (self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for libraries.
"""
raise NotImplementedError
def runtime_library_dir_option (self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
"""
raise NotImplementedError
def library_option (self, lib):
"""Return the compiler option to add 'dir' to the list of libraries
linked into the shared library or executable.
"""
raise NotImplementedError
def has_function(self, funcname,
includes=None,
include_dirs=None,
libraries=None,
library_dirs=None):
"""Return a boolean indicating whether funcname is supported on
the current platform. The optional arguments can be used to
augment the compilation environment.
"""
# this can't be included at module scope because it tries to
# import math which might not be available at that point - maybe
# the necessary logic should just be inlined?
import tempfile
if includes is None:
includes = []
if include_dirs is None:
include_dirs = []
if libraries is None:
libraries = []
if library_dirs is None:
library_dirs = []
fd, fname = tempfile.mkstemp(".c", funcname, text=True)
f = os.fdopen(fd, "w")
for incl in includes:
f.write("""#include "%s"\n""" % incl)
f.write("""\
main (int argc, char **argv) {
%s();
}
""" % funcname)
f.close()
try:
objects = self.compile([fname], include_dirs=include_dirs)
except CompileError:
return False
try:
self.link_executable(objects, "a.out",
libraries=libraries,
library_dirs=library_dirs)
except (LinkError, TypeError):
return False
return True
def find_library_file (self, dirs, lib, debug=0):
"""Search the specified list of directories for a static or shared
library file 'lib' and return the full path to that file. If
'debug' true, look for a debugging version (if that makes sense on
the current platform). Return None if 'lib' wasn't found in any of
the specified directories.
"""
raise NotImplementedError
# -- Filename generation methods -----------------------------------
# The default implementation of the filename generating methods are
# prejudiced towards the Unix/DOS/Windows view of the world:
# * object files are named by replacing the source file extension
# (eg. .c/.cpp -> .o/.obj)
# * library files (shared or static) are named by plugging the
# library name and extension into a format string, eg.
# "lib%s.%s" % (lib_name, ".a") for Unix static libraries
# * executables are named by appending an extension (possibly
# empty) to the program name: eg. progname + ".exe" for
# Windows
#
# To reduce redundant code, these methods expect to find
# several attributes in the current object (presumably defined
# as class attributes):
# * src_extensions -
# list of C/C++ source file extensions, eg. ['.c', '.cpp']
# * obj_extension -
# object file extension, eg. '.o' or '.obj'
# * static_lib_extension -
# extension for static library files, eg. '.a' or '.lib'
# * shared_lib_extension -
# extension for shared library/object files, eg. '.so', '.dll'
# * static_lib_format -
# format string for generating static library filenames,
# eg. 'lib%s.%s' or '%s.%s'
# * shared_lib_format
# format string for generating shared library filenames
# (probably same as static_lib_format, since the extension
# is one of the intended parameters to the format string)
# * exe_extension -
# extension for executable files, eg. '' or '.exe'
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % (ext, src_name)
if strip_dir:
base = os.path.basename(base)
obj_names.append(os.path.join(output_dir,
base + self.obj_extension))
return obj_names
def shared_object_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename (basename)
return os.path.join(output_dir, basename + self.shared_lib_extension)
def executable_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename (basename)
return os.path.join(output_dir, basename + (self.exe_extension or ''))
def library_filename(self, libname, lib_type='static', # or 'shared'
strip_dir=0, output_dir=''):
assert output_dir is not None
if lib_type not in ("static", "shared", "dylib"):
raise ValueError, "'lib_type' must be \"static\", \"shared\" or \"dylib\""
fmt = getattr(self, lib_type + "_lib_format")
ext = getattr(self, lib_type + "_lib_extension")
dir, base = os.path.split (libname)
filename = fmt % (base, ext)
if strip_dir:
dir = ''
return os.path.join(output_dir, dir, filename)
# -- Utility methods -----------------------------------------------
def announce (self, msg, level=1):
log.debug(msg)
def debug_print (self, msg):
from distutils.debug import DEBUG
if DEBUG:
print msg
def warn (self, msg):
sys.stderr.write ("warning: %s\n" % msg)
def execute (self, func, args, msg=None, level=1):
execute(func, args, msg, self.dry_run)
def spawn (self, cmd):
spawn (cmd, dry_run=self.dry_run)
def move_file (self, src, dst):
return move_file (src, dst, dry_run=self.dry_run)
def mkpath (self, name, mode=0777):
mkpath (name, mode, dry_run=self.dry_run)
# class CCompiler
# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
# type for that platform. Keys are interpreted as re match
# patterns. Order is important; platform mappings are preferred over
# OS names.
_default_compilers = (
# Platform string mappings
# on a cygwin built python we can use gcc like an ordinary UNIXish
# compiler
('cygwin.*', 'unix'),
('os2emx', 'emx'),
# OS name mappings
('posix', 'unix'),
('nt', 'msvc'),
('mac', 'mwerks'),
)
def get_default_compiler(osname=None, platform=None):
""" Determine the default compiler to use for the given platform.
osname should be one of the standard Python OS names (i.e. the
ones returned by os.name) and platform the common value
returned by sys.platform for the platform in question.
The default values are os.name and sys.platform in case the
parameters are not given.
"""
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
for pattern, compiler in _default_compilers:
if re.match(pattern, platform) is not None or \
re.match(pattern, osname) is not None:
return compiler
# Default to Unix compiler
return 'unix'
# Map compiler types to (module_name, class_name) pairs -- ie. where to
# find the code that implements an interface to this compiler. (The module
# is assumed to be in the 'distutils' package.)
compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler',
"standard UNIX-style compiler"),
'msvc': ('msvccompiler', 'MSVCCompiler',
"Microsoft Visual C++"),
'cygwin': ('cygwinccompiler', 'CygwinCCompiler',
"Cygwin port of GNU C Compiler for Win32"),
'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"),
'bcpp': ('bcppcompiler', 'BCPPCompiler',
"Borland C++ Compiler"),
'mwerks': ('mwerkscompiler', 'MWerksCompiler',
"MetroWerks CodeWarrior"),
'emx': ('emxccompiler', 'EMXCCompiler',
"EMX port of GNU C Compiler for OS/2"),
}
def show_compilers():
"""Print list of available compilers (used by the "--help-compiler"
options to "build", "build_ext", "build_clib").
"""
# XXX this "knows" that the compiler option it's describing is
# "--compiler", which just happens to be the case for the three
# commands that use it.
from distutils.fancy_getopt import FancyGetopt
compilers = []
for compiler in compiler_class.keys():
compilers.append(("compiler="+compiler, None,
compiler_class[compiler][2]))
compilers.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("List of available compilers:")
def new_compiler (plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0):
"""Generate an instance of some CCompiler subclass for the supplied
platform/compiler combination. 'plat' defaults to 'os.name'
(eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
for that platform. Currently only 'posix' and 'nt' are supported, and
the default compilers are "traditional Unix interface" (UnixCCompiler
class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
possible to ask for a Unix compiler object under Windows, and a
Microsoft compiler object under Unix -- if you supply a value for
'compiler', 'plat' is ignored.
"""
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError, msg
try:
module_name = "distutils." + module_name
__import__ (module_name)
module = sys.modules[module_name]
klass = vars(module)[class_name]
except ImportError:
raise DistutilsModuleError, \
"can't compile C/C++ code: unable to load module '%s'" % \
module_name
except KeyError:
raise DistutilsModuleError, \
("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name)
# XXX The None is necessary to preserve backwards compatibility
# with classes that expect verbose to be the first positional
# argument.
return klass (None, dry_run, force)
def gen_preprocess_options (macros, include_dirs):
"""Generate C pre-processor options (-D, -U, -I) as used by at least
two types of compilers: the typical Unix compiler and Visual C++.
'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
means undefine (-U) macro 'name', and (name,value) means define (-D)
macro 'name' to 'value'. 'include_dirs' is just a list of directory
names to be added to the header file search path (-I). Returns a list
of command-line options suitable for either Unix compilers or Visual
C++.
"""
# XXX it would be nice (mainly aesthetic, and so we don't generate
# stupid-looking command lines) to go over 'macros' and eliminate
# redundant definitions/undefinitions (ie. ensure that only the
# latest mention of a particular macro winds up on the command
# line). I don't think it's essential, though, since most (all?)
# Unix C compilers only pay attention to the latest -D or -U
# mention of a macro on their command line. Similar situation for
# 'include_dirs'. I'm punting on both for now. Anyways, weeding out
# redundancies like this should probably be the province of
# CCompiler, since the data structures used are inherited from it
# and therefore common to all CCompiler classes.
pp_opts = []
for macro in macros:
if not (type (macro) is TupleType and
1 <= len (macro) <= 2):
raise TypeError, \
("bad macro definition '%s': " +
"each element of 'macros' list must be a 1- or 2-tuple") % \
macro
if len (macro) == 1: # undefine this macro
pp_opts.append ("-U%s" % macro[0])
elif len (macro) == 2:
if macro[1] is None: # define with no explicit value
pp_opts.append ("-D%s" % macro[0])
else:
# XXX *don't* need to be clever about quoting the
# macro value here, because we're going to avoid the
# shell at all costs when we spawn the command!
pp_opts.append ("-D%s=%s" % macro)
for dir in include_dirs:
pp_opts.append ("-I%s" % dir)
return pp_opts
# gen_preprocess_options ()
def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
"""Generate linker options for searching library directories and
linking with specific libraries. 'libraries' and 'library_dirs' are,
respectively, lists of library names (not filenames!) and search
directories. Returns a list of command-line options suitable for use
with some compiler (depending on the two format strings passed in).
"""
lib_opts = []
for dir in library_dirs:
lib_opts.append (compiler.library_dir_option (dir))
for dir in runtime_library_dirs:
opt = compiler.runtime_library_dir_option (dir)
if type(opt) is ListType:
lib_opts = lib_opts + opt
else:
lib_opts.append (opt)
# XXX it's important that we *not* remove redundant library mentions!
# sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
# resolve all symbols. I just hope we never have to say "-lfoo obj.o
# -lbar" to get things to work -- that's certainly a possibility, but a
# pretty nasty way to arrange your C code.
for lib in libraries:
(lib_dir, lib_name) = os.path.split (lib)
if lib_dir:
lib_file = compiler.find_library_file ([lib_dir], lib_name)
if lib_file:
lib_opts.append (lib_file)
else:
compiler.warn ("no library file corresponding to "
"'%s' found (skipping)" % lib)
else:
lib_opts.append (compiler.library_option (lib))
return lib_opts
# gen_lib_options ()
|
<%
# Instance On/Off logic
# This code handles the turning a machine on/off depending on user input.
# It should be run after all normal create/update/delete logic.
-%>
class InstancePower(object):
def __init__(self, module, current_status):
self.module = module
self.current_status = current_status
self.desired_status = self.module.params.get('status')
def run(self):
# GcpRequest handles unicode text handling
if GcpRequest({'status': self.current_status}) == GcpRequest({'status': self.desired_status}):
return
elif self.desired_status == 'RUNNING':
self.start()
elif self.desired_status == 'TERMINATED':
self.stop()
elif self.desired_status == 'SUSPENDED':
self.module.fail_json(msg="Instances cannot be suspended using Ansible")
def start(self):
auth = GcpSession(self.module, 'compute')
wait_for_operation(self.module, auth.post(self._start_url()))
def stop(self):
auth = GcpSession(self.module, 'compute')
wait_for_operation(self.module, auth.post(self._stop_url()))
def _start_url(self):
return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances/{name}/start".format(**self.module.params)
def _stop_url(self):
return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances/{name}/stop".format(**self.module.params)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Zuza Software Foundation
#
# This file is part of Pootle.
#
# Pootle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from django import forms
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from .models import Agreement
def agreement_form_factory(pages, user, base_class=forms.Form):
"""Factory that builds an agreement form.
:param pages: Legal pages that need to be accepted by users.
:param user: User bound to the agreement form.
:param base_class: Base class for this form to inherit from.
:return: An `AgreementForm` class with `pages` as required checkboxes.
"""
class AgreementForm(base_class):
def __init__(self, *args, **kwargs):
super(AgreementForm, self).__init__(*args, **kwargs)
self._pages = pages
self._user = user
for page in self._pages:
self.add_page_field(page)
def save(self):
"""Saves user agreements."""
if hasattr(super(AgreementForm, self), 'save'):
# HACKISH: This is tightly coupled with `RegistrationForm`
# which returns the newly-registered user in its form's
# `save`. We should listen to the `user_registered` signal
# instead.
self._user = super(AgreementForm, self).save()
for page in self._pages:
agreement, created = Agreement.objects.get_or_create(
user=self._user, document=page,
)
agreement.save()
def legal_fields(self):
"""Returns any fields added by legal pages."""
return [field for field in self
if field.name.startswith('legal_')]
def add_page_field(self, page):
"""Adds `page` as a required field to this form."""
url = page.url and page.url or reverse('staticpages.display',
args=[page.virtual_path])
anchor = u'href="%s" class="fancybox"' % url
# Translators: The second '%s' is the title of a document
label = mark_safe(_("I have read and accept: <a %s>%s</a>",
(anchor, page.title,)))
field_name = 'legal_%d' % page.pk
self.fields[field_name] = forms.BooleanField(label=label,
required=True)
self.fields[field_name].widget.attrs['class'] = 'js-legalfield'
return AgreementForm
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the compute RPC API.
"""
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from nova import exception
from nova.i18n import _, _LW
from nova import objects
from nova.objects import base as objects_base
from nova.openstack.common import log as logging
from nova import rpc
rpcapi_opts = [
cfg.StrOpt('compute_topic',
default='compute',
help='The topic compute nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('compute',
help='Set a version cap for messages sent to compute services. If you '
'plan to do a live upgrade from havana to icehouse, you should '
'set this option to "icehouse-compat" before beginning the live '
'upgrade procedure.')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
LOG = logging.getLogger(__name__)
def _compute_host(host, instance):
'''Get the destination host for a message.
:param host: explicit host to send the message to.
:param instance: If an explicit host was not specified, use
instance['host']
:returns: A host
'''
if host:
return host
if not instance:
raise exception.NovaException(_('No compute host specified'))
if not instance['host']:
raise exception.NovaException(_('Unable to find host for '
'Instance %s') % instance['uuid'])
return instance['host']
class ComputeAPI(object):
'''Client side of the compute rpc API.
API version history:
* 1.0 - Initial version.
* 1.1 - Adds get_host_uptime()
* 1.2 - Adds check_can_live_migrate_[destination|source]
* 1.3 - Adds change_instance_metadata()
* 1.4 - Remove instance_uuid, add instance argument to
reboot_instance()
* 1.5 - Remove instance_uuid, add instance argument to
pause_instance(), unpause_instance()
* 1.6 - Remove instance_uuid, add instance argument to
suspend_instance()
* 1.7 - Remove instance_uuid, add instance argument to
get_console_output()
* 1.8 - Remove instance_uuid, add instance argument to
add_fixed_ip_to_instance()
* 1.9 - Remove instance_uuid, add instance argument to attach_volume()
* 1.10 - Remove instance_id, add instance argument to
check_can_live_migrate_destination()
* 1.11 - Remove instance_id, add instance argument to
check_can_live_migrate_source()
* 1.12 - Remove instance_uuid, add instance argument to
confirm_resize()
* 1.13 - Remove instance_uuid, add instance argument to detach_volume()
* 1.14 - Remove instance_uuid, add instance argument to finish_resize()
* 1.15 - Remove instance_uuid, add instance argument to
finish_revert_resize()
* 1.16 - Remove instance_uuid, add instance argument to
get_diagnostics()
* 1.17 - Remove instance_uuid, add instance argument to
get_vnc_console()
* 1.18 - Remove instance_uuid, add instance argument to inject_file()
* 1.19 - Remove instance_uuid, add instance argument to
inject_network_info()
* 1.20 - Remove instance_id, add instance argument to
post_live_migration_at_destination()
* 1.21 - Remove instance_uuid, add instance argument to
power_off_instance() and stop_instance()
* 1.22 - Remove instance_uuid, add instance argument to
power_on_instance() and start_instance()
* 1.23 - Remove instance_id, add instance argument to
pre_live_migration()
* 1.24 - Remove instance_uuid, add instance argument to
rebuild_instance()
* 1.25 - Remove instance_uuid, add instance argument to
remove_fixed_ip_from_instance()
* 1.26 - Remove instance_id, add instance argument to
remove_volume_connection()
* 1.27 - Remove instance_uuid, add instance argument to
rescue_instance()
* 1.28 - Remove instance_uuid, add instance argument to reset_network()
* 1.29 - Remove instance_uuid, add instance argument to
resize_instance()
* 1.30 - Remove instance_uuid, add instance argument to
resume_instance()
* 1.31 - Remove instance_uuid, add instance argument to revert_resize()
* 1.32 - Remove instance_id, add instance argument to
rollback_live_migration_at_destination()
* 1.33 - Remove instance_uuid, add instance argument to
set_admin_password()
* 1.34 - Remove instance_uuid, add instance argument to
snapshot_instance()
* 1.35 - Remove instance_uuid, add instance argument to
unrescue_instance()
* 1.36 - Remove instance_uuid, add instance argument to
change_instance_metadata()
* 1.37 - Remove instance_uuid, add instance argument to
terminate_instance()
* 1.38 - Changes to prep_resize():
* remove instance_uuid, add instance
* remove instance_type_id, add instance_type
* remove topic, it was unused
* 1.39 - Remove instance_uuid, add instance argument to run_instance()
* 1.40 - Remove instance_id, add instance argument to live_migration()
* 1.41 - Adds refresh_instance_security_rules()
* 1.42 - Add reservations arg to prep_resize(), resize_instance(),
finish_resize(), confirm_resize(), revert_resize() and
finish_revert_resize()
* 1.43 - Add migrate_data to live_migration()
* 1.44 - Adds reserve_block_device_name()
* 2.0 - Remove 1.x backwards compat
* 2.1 - Adds orig_sys_metadata to rebuild_instance()
* 2.2 - Adds slave_info parameter to add_aggregate_host() and
remove_aggregate_host()
* 2.3 - Adds volume_id to reserve_block_device_name()
* 2.4 - Add bdms to terminate_instance
* 2.5 - Add block device and network info to reboot_instance
* 2.6 - Remove migration_id, add migration to resize_instance
* 2.7 - Remove migration_id, add migration to confirm_resize
* 2.8 - Remove migration_id, add migration to finish_resize
* 2.9 - Add publish_service_capabilities()
* 2.10 - Adds filter_properties and request_spec to prep_resize()
* 2.11 - Adds soft_delete_instance() and restore_instance()
* 2.12 - Remove migration_id, add migration to revert_resize
* 2.13 - Remove migration_id, add migration to finish_revert_resize
* 2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
* 2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host
* 2.16 - Add instance_type to resize_instance
* 2.17 - Add get_backdoor_port()
* 2.18 - Add bdms to rebuild_instance
* 2.19 - Add node to run_instance
* 2.20 - Add node to prep_resize
* 2.21 - Add migrate_data dict param to pre_live_migration()
* 2.22 - Add recreate, on_shared_storage and host arguments to
rebuild_instance()
* 2.23 - Remove network_info from reboot_instance
* 2.24 - Added get_spice_console method
* 2.25 - Add attach_interface() and detach_interface()
* 2.26 - Add validate_console_port to ensure the service connects to
vnc on the correct port
* 2.27 - Adds 'reservations' to terminate_instance() and
soft_delete_instance()
... Grizzly supports message version 2.27. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.27.
* 2.28 - Adds check_instance_shared_storage()
* 2.29 - Made start_instance() and stop_instance() take new-world
instance objects
* 2.30 - Adds live_snapshot_instance()
* 2.31 - Adds shelve_instance(), shelve_offload_instance, and
unshelve_instance()
* 2.32 - Make reboot_instance take a new world instance object
* 2.33 - Made suspend_instance() and resume_instance() take new-world
instance objects
* 2.34 - Added swap_volume()
* 2.35 - Made terminate_instance() and soft_delete_instance() take
new-world instance objects
* 2.36 - Made pause_instance() and unpause_instance() take new-world
instance objects
* 2.37 - Added the legacy_bdm_in_spec parameter to run_instance
* 2.38 - Made check_can_live_migrate_[destination|source] take
new-world instance objects
* 2.39 - Made revert_resize() and confirm_resize() take new-world
instance objects
* 2.40 - Made reset_network() take new-world instance object
* 2.41 - Make inject_network_info take new-world instance object
* 2.42 - Splits snapshot_instance() into snapshot_instance() and
backup_instance() and makes them take new-world instance
objects.
* 2.43 - Made prep_resize() take new-world instance object
* 2.44 - Add volume_snapshot_create(), volume_snapshot_delete()
* 2.45 - Made resize_instance() take new-world objects
* 2.46 - Made finish_resize() take new-world objects
* 2.47 - Made finish_revert_resize() take new-world objects
... Havana supports message version 2.47. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.47.
* 2.48 - Make add_aggregate_host() and remove_aggregate_host() take
new-world objects
* ... - Remove live_snapshot() that was never actually used
* 3.0 - Remove 2.x compatibility
* 3.1 - Update get_spice_console() to take an instance object
* 3.2 - Update get_vnc_console() to take an instance object
* 3.3 - Update validate_console_port() to take an instance object
* 3.4 - Update rebuild_instance() to take an instance object
* 3.5 - Pass preserve_ephemeral flag to rebuild_instance()
* 3.6 - Make volume_snapshot_{create,delete} use new-world objects
* 3.7 - Update change_instance_metadata() to take an instance object
* 3.8 - Update set_admin_password() to take an instance object
* 3.9 - Update rescue_instance() to take an instance object
* 3.10 - Added get_rdp_console method
* 3.11 - Update unrescue_instance() to take an object
* 3.12 - Update add_fixed_ip_to_instance() to take an object
* 3.13 - Update remove_fixed_ip_from_instance() to take an object
* 3.14 - Update post_live_migration_at_destination() to take an object
* 3.15 - Adds filter_properties and node to unshelve_instance()
* 3.16 - Make reserve_block_device_name and attach_volume use new-world
objects, and add disk_bus and device_type params to
reserve_block_device_name, and bdm param to attach_volume
* 3.17 - Update attach_interface and detach_interface to take an object
* 3.18 - Update get_diagnostics() to take an instance object
* Removed inject_file(), as it was unused.
* 3.19 - Update pre_live_migration to take instance object
* 3.20 - Make restore_instance take an instance object
* 3.21 - Made rebuild take new-world BDM objects
* 3.22 - Made terminate_instance take new-world BDM objects
* 3.23 - Added external_instance_event()
* build_and_run_instance was added in Havana and not used or
documented.
... Icehouse supports message version 3.23. So, any changes to
existing methods in 3.x after that point should be done such that they
can handle the version_cap being set to 3.23.
* 3.24 - Update rescue_instance() to take optional rescue_image_ref
* 3.25 - Make detach_volume take an object
* 3.26 - Make live_migration() and
rollback_live_migration_at_destination() take an object
* ... Removed run_instance()
* 3.27 - Make run_instance() accept a new-world object
* 3.28 - Update get_console_output() to accept a new-world object
* 3.29 - Make check_instance_shared_storage accept a new-world object
* 3.30 - Make remove_volume_connection() accept a new-world object
* 3.31 - Add get_instance_diagnostics
* 3.32 - Add destroy_disks and migrate_data optional parameters to
rollback_live_migration_at_destination()
* 3.33 - Make build_and_run_instance() take a NetworkRequestList object
* 3.34 - Add get_serial_console method
* 3.35 - Make reserve_block_device_name return a BDM object
... Juno supports message version 3.35. So, any changes to
existing methods in 3.x after that point should be done such that they
can handle the version_cap being set to 3.35.
* 3.36 - Make build_and_run_instance() send a Flavor object
* 3.37 - Add clean_shutdown to stop, resize, rescue, shelve, and
shelve_offload
* 3.38 - Add clean_shutdown to prep_resize
'''
VERSION_ALIASES = {
'icehouse': '3.23',
'juno': '3.35',
}
def __init__(self):
super(ComputeAPI, self).__init__()
target = messaging.Target(topic=CONF.compute_topic, version='3.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.compute,
CONF.upgrade_levels.compute)
serializer = objects_base.NovaObjectSerializer()
self.client = self.get_client(target, version_cap, serializer)
# Cells overrides this
def get_client(self, target, version_cap, serializer):
return rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
def add_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Add aggregate host.
:param ctxt: request context
:param aggregate:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'add_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
version = '3.12'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'add_fixed_ip_to_instance',
instance=instance, network_id=network_id)
def attach_interface(self, ctxt, instance, network_id, port_id,
requested_ip):
version = '3.17'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'attach_interface',
instance=instance, network_id=network_id,
port_id=port_id, requested_ip=requested_ip)
def attach_volume(self, ctxt, instance, volume_id, mountpoint, bdm=None):
# NOTE(ndipanov): Remove volume_id and mountpoint on the next major
# version bump - they are not needed when using bdm objects.
version = '3.16'
kw = {'instance': instance, 'volume_id': volume_id,
'mountpoint': mountpoint, 'bdm': bdm}
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'attach_volume', **kw)
def change_instance_metadata(self, ctxt, instance, diff):
version = '3.7'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'change_instance_metadata',
instance=instance, diff=diff)
def _warn_buggy_live_migrations(self, data=None):
# NOTE(danms): We know that libvirt live migration with shared block
# storage was buggy (potential loss of data) before version 3.32.
# Since we need to support live migration with older clients, we need
# to warn the operator of this possibility. The logic below tries to
# decide if a warning should be emitted, assuming the positive if
# not sure. This can be removed when we bump to RPC API version 4.0.
if data:
if data.get('is_shared_block_storage') is not False:
# Shared block storage, or unknown
should_warn = True
else:
# Specifically not shared block storage
should_warn = False
else:
# Unknown, so warn to be safe
should_warn = True
if should_warn:
LOG.warning(_LW('Live migration with clients before RPC version '
'3.32 is known to be buggy with shared block '
'storage. See '
'https://bugs.launchpad.net/nova/+bug/1250751 for '
'more information!'))
def check_can_live_migrate_destination(self, ctxt, instance, destination,
block_migration, disk_over_commit):
if self.client.can_send_version('3.32'):
version = '3.32'
else:
version = '3.0'
self._warn_buggy_live_migrations()
cctxt = self.client.prepare(server=destination, version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_destination',
instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
if self.client.can_send_version('3.32'):
version = '3.32'
else:
version = '3.0'
self._warn_buggy_live_migrations()
source = _compute_host(None, instance)
cctxt = self.client.prepare(server=source, version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_source',
instance=instance,
dest_check_data=dest_check_data)
def check_instance_shared_storage(self, ctxt, instance, data):
if self.client.can_send_version('3.29'):
version = '3.29'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'check_instance_shared_storage',
instance=instance,
data=data)
def confirm_resize(self, ctxt, instance, migration, host,
reservations=None, cast=True):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
rpc_method = cctxt.cast if cast else cctxt.call
return rpc_method(ctxt, 'confirm_resize',
instance=instance, migration=migration,
reservations=reservations)
def detach_interface(self, ctxt, instance, port_id):
version = '3.17'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'detach_interface',
instance=instance, port_id=port_id)
def detach_volume(self, ctxt, instance, volume_id):
if self.client.can_send_version('3.25'):
version = '3.25'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'detach_volume',
instance=instance, volume_id=volume_id)
def finish_resize(self, ctxt, instance, migration, image, disk_info,
host, reservations=None):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'finish_resize',
instance=instance, migration=migration,
image=image, disk_info=disk_info, reservations=reservations)
def finish_revert_resize(self, ctxt, instance, migration, host,
reservations=None):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'finish_revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def get_console_output(self, ctxt, instance, tail_length):
if self.client.can_send_version('3.28'):
version = '3.28'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_console_output',
instance=instance, tail_length=tail_length)
def get_console_pool_info(self, ctxt, console_type, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_console_pool_info',
console_type=console_type)
def get_console_topic(self, ctxt, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_console_topic')
def get_diagnostics(self, ctxt, instance):
version = '3.18'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_diagnostics', instance=instance)
def get_instance_diagnostics(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
kwargs = {'instance': instance_p}
version = '3.31'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_instance_diagnostics', **kwargs)
def get_vnc_console(self, ctxt, instance, console_type):
version = '3.2'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_vnc_console',
instance=instance, console_type=console_type)
def get_spice_console(self, ctxt, instance, console_type):
version = '3.1'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_spice_console',
instance=instance, console_type=console_type)
def get_rdp_console(self, ctxt, instance, console_type):
version = '3.10'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_rdp_console',
instance=instance, console_type=console_type)
def get_serial_console(self, ctxt, instance, console_type):
version = '3.34'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_serial_console',
instance=instance, console_type=console_type)
def validate_console_port(self, ctxt, instance, port, console_type):
version = '3.3'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'validate_console_port',
instance=instance, port=port,
console_type=console_type)
def host_maintenance_mode(self, ctxt, host_param, mode, host):
'''Set host maintenance mode
:param ctxt: request context
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param mode:
:param host: This is the host to send the message to.
'''
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'host_maintenance_mode',
host=host_param, mode=mode)
def host_power_action(self, ctxt, action, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'host_power_action', action=action)
def inject_network_info(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'inject_network_info', instance=instance)
def live_migration(self, ctxt, instance, dest, block_migration, host,
migrate_data=None):
if self.client.can_send_version('3.26'):
version = '3.26'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'live_migration', instance=instance,
dest=dest, block_migration=block_migration,
migrate_data=migrate_data)
def pause_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'pause_instance', instance=instance)
def post_live_migration_at_destination(self, ctxt, instance,
block_migration, host):
version = '3.14'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'post_live_migration_at_destination',
instance=instance, block_migration=block_migration)
def pre_live_migration(self, ctxt, instance, block_migration, disk,
host, migrate_data=None):
version = '3.19'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'pre_live_migration',
instance=instance,
block_migration=block_migration,
disk=disk, migrate_data=migrate_data)
def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
filter_properties=None, node=None,
clean_shutdown=True):
instance_type_p = jsonutils.to_primitive(instance_type)
image_p = jsonutils.to_primitive(image)
msg_args = {'instance': instance,
'instance_type': instance_type_p,
'image': image_p,
'reservations': reservations,
'request_spec': request_spec,
'filter_properties': filter_properties,
'node': node,
'clean_shutdown': clean_shutdown}
version = '3.38'
if not self.client.can_send_version(version):
del msg_args['clean_shutdown']
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'prep_resize', **msg_args)
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reboot_instance',
instance=instance,
block_device_info=block_device_info,
reboot_type=reboot_type)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
preserve_ephemeral=False, kwargs=None):
# NOTE(danms): kwargs is only here for cells compatibility, don't
# actually send it to compute
extra = {'preserve_ephemeral': preserve_ephemeral}
version = '3.21'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
cctxt.cast(ctxt, 'rebuild_instance',
instance=instance, new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
recreate=recreate, on_shared_storage=on_shared_storage,
**extra)
def refresh_provider_fw_rules(self, ctxt, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'refresh_provider_fw_rules')
def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Remove aggregate host.
:param ctxt: request context
:param aggregate:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'remove_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
version = '3.13'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'remove_fixed_ip_from_instance',
instance=instance, address=address)
def remove_volume_connection(self, ctxt, instance, volume_id, host):
if self.client.can_send_version('3.30'):
version = '3.30'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'remove_volume_connection',
instance=instance, volume_id=volume_id)
def rescue_instance(self, ctxt, instance, rescue_password,
rescue_image_ref=None, clean_shutdown=True):
msg_args = {'rescue_password': rescue_password}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
msg_args['rescue_image_ref'] = rescue_image_ref
elif self.client.can_send_version('3.24'):
version = '3.24'
msg_args['rescue_image_ref'] = rescue_image_ref
else:
version = '3.9'
msg_args['instance'] = instance
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'rescue_instance', **msg_args)
def reset_network(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reset_network', instance=instance)
def resize_instance(self, ctxt, instance, migration, image, instance_type,
reservations=None, clean_shutdown=True):
instance_type_p = jsonutils.to_primitive(instance_type)
msg_args = {'instance': instance, 'migration': migration,
'image': image, 'reservations': reservations,
'instance_type': instance_type_p}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
else:
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resize_instance', **msg_args)
def resume_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resume_instance', instance=instance)
def revert_resize(self, ctxt, instance, migration, host,
reservations=None):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
cctxt.cast(ctxt, 'revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def rollback_live_migration_at_destination(self, ctxt, instance, host,
destroy_disks=True,
migrate_data=None):
if self.client.can_send_version('3.32'):
version = '3.32'
extra = {'destroy_disks': destroy_disks,
'migrate_data': migrate_data,
}
else:
version = '3.0'
extra = {}
self._warn_buggy_live_migrations(migrate_data)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'rollback_live_migration_at_destination',
instance=instance, **extra)
# NOTE(alaski): Remove this method when the scheduler rpc interface is
# bumped to 4.x as the only callers of this method will be removed.
def run_instance(self, ctxt, instance, host, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time, node=None, legacy_bdm_in_spec=True):
if self.client.can_send_version('3.27'):
version = '3.27'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
msg_kwargs = {'instance': instance, 'request_spec': request_spec,
'filter_properties': filter_properties,
'requested_networks': requested_networks,
'injected_files': injected_files,
'admin_password': admin_password,
'is_first_time': is_first_time, 'node': node,
'legacy_bdm_in_spec': legacy_bdm_in_spec}
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'run_instance', **msg_kwargs)
def set_admin_password(self, ctxt, instance, new_pass):
version = '3.8'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'set_admin_password',
instance=instance, new_pass=new_pass)
def set_host_enabled(self, ctxt, enabled, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'set_host_enabled', enabled=enabled)
def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'swap_volume',
instance=instance, old_volume_id=old_volume_id,
new_volume_id=new_volume_id)
def get_host_uptime(self, ctxt, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_host_uptime')
def reserve_block_device_name(self, ctxt, instance, device, volume_id,
disk_bus=None, device_type=None):
kw = {'instance': instance, 'device': device,
'volume_id': volume_id, 'disk_bus': disk_bus,
'device_type': device_type, 'return_bdm_object': True}
if self.client.can_send_version('3.35'):
version = '3.35'
else:
del kw['return_bdm_object']
version = '3.16'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
volume_bdm = cctxt.call(ctxt, 'reserve_block_device_name', **kw)
if not isinstance(volume_bdm, objects.BlockDeviceMapping):
volume_bdm = objects.BlockDeviceMapping.get_by_volume_id(
ctxt, volume_id)
return volume_bdm
def backup_instance(self, ctxt, instance, image_id, backup_type,
rotation):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'backup_instance',
instance=instance,
image_id=image_id,
backup_type=backup_type,
rotation=rotation)
def snapshot_instance(self, ctxt, instance, image_id):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'snapshot_instance',
instance=instance,
image_id=image_id)
def start_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'start_instance', instance=instance)
def stop_instance(self, ctxt, instance, do_cast=True, clean_shutdown=True):
msg_args = {'instance': instance}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
else:
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
rpc_method = cctxt.cast if do_cast else cctxt.call
return rpc_method(ctxt, 'stop_instance', **msg_args)
def suspend_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'suspend_instance', instance=instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None):
version = '3.22'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'terminate_instance',
instance=instance, bdms=bdms,
reservations=reservations)
def unpause_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unpause_instance', instance=instance)
def unrescue_instance(self, ctxt, instance):
version = '3.11'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unrescue_instance', instance=instance)
def soft_delete_instance(self, ctxt, instance, reservations=None):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'soft_delete_instance',
instance=instance, reservations=reservations)
def restore_instance(self, ctxt, instance):
version = '3.20'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'restore_instance', instance=instance)
def shelve_instance(self, ctxt, instance, image_id=None,
clean_shutdown=True):
msg_args = {'instance': instance, 'image_id': image_id}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
else:
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_instance', **msg_args)
def shelve_offload_instance(self, ctxt, instance,
clean_shutdown=True):
msg_args = {'instance': instance}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
else:
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_offload_instance', **msg_args)
def unshelve_instance(self, ctxt, instance, host, image=None,
filter_properties=None, node=None):
version = '3.15'
msg_kwargs = {
'instance': instance,
'image': image,
'filter_properties': filter_properties,
'node': node,
}
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'unshelve_instance', **msg_kwargs)
def volume_snapshot_create(self, ctxt, instance, volume_id,
create_info):
version = '3.6'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'volume_snapshot_create', instance=instance,
volume_id=volume_id, create_info=create_info)
def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id,
delete_info):
version = '3.6'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'volume_snapshot_delete', instance=instance,
volume_id=volume_id, snapshot_id=snapshot_id,
delete_info=delete_info)
def external_instance_event(self, ctxt, instances, events):
cctxt = self.client.prepare(
server=_compute_host(None, instances[0]),
version='3.23')
cctxt.cast(ctxt, 'external_instance_event', instances=instances,
events=events)
def build_and_run_instance(self, ctxt, instance, host, image, request_spec,
filter_properties, admin_password=None, injected_files=None,
requested_networks=None, security_groups=None,
block_device_mapping=None, node=None, limits=None):
version = '3.36'
if not self.client.can_send_version(version):
version = '3.33'
if 'instance_type' in filter_properties:
flavor = filter_properties['instance_type']
flavor_p = objects_base.obj_to_primitive(flavor)
filter_properties = dict(filter_properties,
instance_type=flavor_p)
if not self.client.can_send_version(version):
version = '3.23'
if requested_networks is not None:
requested_networks = [(network_id, address, port_id)
for (network_id, address, port_id, _) in
requested_networks.as_tuples()]
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'build_and_run_instance', instance=instance,
image=image, request_spec=request_spec,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping, node=node,
limits=limits)
class SecurityGroupAPI(object):
'''Client side of the security group rpc API.
API version history:
1.0 - Initial version.
1.41 - Adds refresh_instance_security_rules()
2.0 - Remove 1.x backwards compat
3.0 - Identical to 2.x, but has to be bumped at the same time as the
compute API since it's all together on the server side.
'''
def __init__(self):
super(SecurityGroupAPI, self).__init__()
target = messaging.Target(topic=CONF.compute_topic, version='3.0')
version_cap = ComputeAPI.VERSION_ALIASES.get(
CONF.upgrade_levels.compute, CONF.upgrade_levels.compute)
self.client = rpc.get_client(target, version_cap)
def refresh_security_group_rules(self, ctxt, security_group_id, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'refresh_security_group_rules',
security_group_id=security_group_id)
def refresh_security_group_members(self, ctxt, security_group_id,
host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'refresh_security_group_members',
security_group_id=security_group_id)
def refresh_instance_security_rules(self, ctxt, host, instance):
version = '3.0'
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'refresh_instance_security_rules',
instance=instance_p)
|
'''
Wifi Facade.
=============
The :class:`Wifi` is to provide access to the wifi of your mobile/ desktop
devices.
It currently supports `connecting`, `disconnecting`, `scanning`, `getting
available wifi network list` and `getting network information`.
Simple examples
---------------
To enable/ turn on wifi scanning::
>>> from plyer import wifi
>>> wifi.start_scanning()
Once the wifi is enabled/ turned on, then this command starts to scan
all the nearby available wifi networks.
To get network info::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> return wifi.get_network_info(name)
Returns network details of the network who's name/ssid is provided in the
`name` parameter.
To connect to a network::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> wifi.connect(network, parameters)
This connects to the network who's name/ssid is provided under `network`
parameter and along with other necessary methods for connection
which depends upon platform to platform.
please visit following files for more details about requirements of
`paramaters` argument in `connect` method:
plyer/platforms/win/wifi.py
plyer/platforms/macosx/wifi.py
plyer/platforms/win/wifi.py
To disconnect from wifi::
>>> from plyer import wifi
>>> wifi.disconnect()
This disconnects your device from any wifi network.
To get available wifi networks::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> return wifi.get_available_wifi()
This returns all the available wifi networks near the device.
Supported Platforms
-------------------
Windows, OS X, Linux
Ex: 6
----------
from plyer import wifi
wifi.enable()
This enables wifi device.
Ex: 7
----------
from plyer import wifi
wifi.disable()
This disable wifi device
'''
class Wifi:
'''
Wifi Facade.
'''
def is_enabled(self):
'''
Return enabled status of WiFi hardware.
'''
return self._is_enabled()
def is_connected(self, interface=None):
'''
Return connection state of WiFi interface.
.. versionadded:: 1.4.0
'''
return self._is_connected(interface=interface)
@property
def interfaces(self):
'''
List all available WiFi interfaces.
.. versionadded:: 1.4.0
'''
raise NotImplementedError()
def start_scanning(self, interface=None):
'''
Turn on scanning.
'''
return self._start_scanning(interface=interface)
def get_network_info(self, name):
'''
Return a dictionary of specified network.
'''
return self._get_network_info(name=name)
def get_available_wifi(self):
'''
Returns a list of all the available wifi.
'''
return self._get_available_wifi()
def connect(self, network, parameters, interface=None):
'''
Method to connect to some network.
'''
self._connect(
network=network,
parameters=parameters,
interface=interface
)
def disconnect(self, interface=None):
'''
To disconnect from some network.
'''
self._disconnect(interface=interface)
def enable(self):
'''
Wifi interface power state is set to "ON".
'''
self._enable()
def disable(self):
'''
Wifi interface power state is set to "OFF".
'''
self._disable()
# private
def _is_enabled(self):
raise NotImplementedError()
def _is_connected(self, interface=None):
raise NotImplementedError()
def _start_scanning(self, interface=None):
raise NotImplementedError()
def _get_network_info(self, **kwargs):
raise NotImplementedError()
def _get_available_wifi(self):
raise NotImplementedError()
def _connect(self, **kwargs):
raise NotImplementedError()
def _disconnect(self, interface=None):
raise NotImplementedError()
def _enable(self):
raise NotImplementedError()
def _disable(self):
raise NotImplementedError()
|
#!/usr/bin/env python
"""
Functions for creating temporary datasets
Used in test_views
"""
import os
import time
import argparse
from collections import defaultdict
import numpy as np
import PIL.Image
IMAGE_SIZE = 10
IMAGE_COUNT = 10 # per category
def create_classification_imageset(folder, image_size=None, image_count=None):
"""
Creates a folder of folders of images for classification
"""
if image_size is None:
image_size = IMAGE_SIZE
if image_count is None:
image_count = IMAGE_COUNT
# Stores the relative path of each image of the dataset
paths = defaultdict(list)
for class_name, pixel_index, rotation in [
('red-to-right', 0, 0),
('green-to-top', 1, 90),
('blue-to-left', 2, 180),
]:
os.makedirs(os.path.join(folder, class_name))
colors = np.linspace(200, 255, image_count)
for i, color in enumerate(colors):
pixel = [0, 0, 0]
pixel[pixel_index] = color
pil_img = _create_gradient_image(image_size, (0, 0, 0), pixel, rotation)
img_path = os.path.join(class_name, str(i) + '.png')
pil_img.save(os.path.join(folder, img_path))
paths[class_name].append(img_path)
return paths
def _create_gradient_image(size, color_from, color_to, rotation):
"""
Make an image with a color gradient with a specific rotation
"""
# create gradient
rgb_arrays = [np.linspace(color_from[x], color_to[x], size).astype('uint8') for x in range(3)]
gradient = np.concatenate(rgb_arrays)
# extend to 2d
picture = np.repeat(gradient, size)
picture.shape = (3, size, size)
# make image and rotate
image = PIL.Image.fromarray(picture.T)
image = image.rotate(rotation)
return image
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create-Imageset tool - DIGITS')
### Positional arguments
parser.add_argument('folder',
help='Where to save the images'
)
### Optional arguments
parser.add_argument('-s', '--image_size',
type=int,
help='Size of the images')
parser.add_argument('-c', '--image_count',
type=int,
help='How many images')
args = vars(parser.parse_args())
print 'Creating images at "%s" ...' % args['folder']
start_time = time.time()
create_classification_imageset(args['folder'],
image_size=args['image_size'],
image_count=args['image_count'],
)
print 'Done after %s seconds' % (time.time() - start_time,)
|
#!/usr/bin/python
# Author: Thomas Dimson [[email protected]]
# Date: January 2011
# For distribution details, see LICENSE
"""Queue server for npsgd modelling tasks.
The queue server is the data backend for NPSGD. It listens to both workers and
the web interface. The web interface populates it with requests while the workers
poll for requests and pull them off the queue. Additionally, the queue is
responsible for sending out confirmation code e-mail messages.
"""
import os
import sys
import anydbm
import shelve
import logging
import tornado.web
import tornado.ioloop
import tornado.escape
import tornado.httpserver
import threading
from datetime import datetime
from optparse import OptionParser
import npsgd.email_manager
from npsgd.email_manager import Email
from npsgd import model_manager
from npsgd.config import config
from npsgd.task_queue import TaskQueue
from npsgd.task_queue import TaskQueueException
from npsgd.confirmation_map import ConfirmationMap
from npsgd.model_manager import modelManager
glb = None
"""Queue globals object - assigned at startup."""
class QueueGlobals(object):
"""Queue state objects along with disk serialization mechanisms for them."""
def __init__(self, shelve):
self.shelve = shelve
self.shelveLock = threading.RLock()
self.idLock = threading.RLock()
self.taskQueue = TaskQueue()
self.confirmationMap = ConfirmationMap()
if shelve.has_key("idCounter"):
self.idCounter = shelve["idCounter"]
else:
self.idCounter = 0
self.loadDiskTaskQueue()
self.loadConfirmationMap()
self.expireWorkerTaskThread = ExpireWorkerTaskThread(self.taskQueue)
self.lastWorkerCheckin = datetime(1,1,1)
def loadDiskTaskQueue(self):
"""Load task queue from disk using the shelve reserved for the queue."""
if not self.shelve.has_key("taskQueue"):
logging.info("Unable to read task queue from disk db, starting fresh")
return
logging.info("Reading task queue from disk")
readTasks = 0
failedTasks = 0
taskDicts = self.shelve["taskQueue"]
for taskDict in taskDicts:
try:
task = modelManager.getModelFromTaskDict(taskDict)
except model_manager.InvalidModelError, e:
emailAddress = taskDict["emailAddress"]
subject = config.lostTaskEmailSubject.generate(full_name=taskDict["modelFullName"],
visibleId=taskDict["visibleId"])
body = config.lostTaskEmailTemplate.generate()
emailObject = Email(emailAddress, subject, body)
logging.info("Invalid model-version pair, notifying %s", emailAddress)
npsgd.email_manager.backgroundEmailSend(Email(emailAddress, subject, body))
failedTasks += 1
continue
readTasks += 1
self.taskQueue.putTask(task)
logging.info("Read %s tasks, failed while reading %s tasks", readTasks, failedTasks)
def loadConfirmationMap(self):
"""Load confirmation map ([code, modelDict] pairs) from shelve reserved for the queue."""
if not self.shelve.has_key("confirmationMap"):
logging.info("Unable to read confirmation map from disk db, starting fresh")
return
logging.info("Reading confirmation map from disk")
confirmationMapEntries = self.shelve["confirmationMap"]
readCodes = 0
failedCodes = 0
for code, taskDict in confirmationMapEntries.iteritems():
try:
task = modelManager.getModelFromTaskDict(taskDict)
except model_manager.InvalidModelError, e:
emailAddress = taskDict["emailAddress"]
subject = config.confirmationFailedEmailSubject.generate(full_name=taskDict["modelFullName"],
visibleId=taskDict["visibleId"])
body = config.confirmationFailedEmailTemplate.generate(code=code)
emailObject = Email(emailAddress, subject, body)
logging.info("Invalid model-version pair, notifying %s", emailAddress)
npsgd.email_manager.backgroundEmailSend(Email(emailAddress, subject, body))
failedCodes += 1
continue
readCodes += 1
self.confirmationMap.putRequestWithCode(task, code)
logging.info("Read %s codes, failed while reading %s codes", readCodes, failedCodes)
def syncShelve(self):
"""Serializes the task queue, confirmation map and id counter to disk using the queue shelve."""
try:
with self.shelveLock:
self.shelve["taskQueue"] = [e.asDict() \
for e in self.taskQueue.allRequests()]
self.shelve["confirmationMap"] = dict( (code, task.asDict())\
for (code, task) in self.confirmationMap.getRequestsWithCodes())
with self.idLock:
self.shelve["idCounter"] = self.idCounter
except pickle.PicklingError, e:
logging.warning("Unable sync task queue and confirmation error to disk due to a pickling (serialization error): %s", e)
return
logging.info("Synced queue and confirmation map to disk")
def touchWorkerCheckin(self):
self.lastWorkerCheckin = datetime.now()
def newTaskId(self):
with self.idLock:
self.idCounter += 1
return self.idCounter
class ExpireWorkerTaskThread(threading.Thread):
"""Task Expiration Thread
Moves tasks back into the queue whenever
We haven't heard from a worker in a while
"""
def __init__(self, taskQueue):
threading.Thread.__init__(self)
self.daemon = True
self.taskQueue = taskQueue
self.done = threading.Event()
def run(self):
logging.info("Expire worker task thread booting up...")
while True:
self.done.wait(config.keepAliveInterval)
if self.done.isSet():
break
badTasks = self.taskQueue.pullProcessingTasksOlderThan(
time.time() - config.keepAliveTimeout)
if len(badTasks) > 0:
logging.info("Found %d tasks to expire", len(badTasks))
for task in badTasks:
task.failureCount += 1
logging.warning("Task '%s' failed due to timeout (failure #%d)", task.taskId, task.failureCount)
if task.failureCount > config.maxJobFailures:
logging.warning("Exceeded max job failures, sending fail email")
npsgd.email_manager.backgroundEmailSend(task.failureEmail())
else:
logging.warning("Inserting task back in to queue with new taskId")
task.taskId = glb.newTaskId()
self.taskQueue.putTask(task)
class QueueRequestHandler(tornado.web.RequestHandler):
"""Superclass to all queue request methods."""
def checkSecret(self):
"""Checks the request for a 'secret' parameter that matches the queue's own."""
if self.get_argument("secret") == config.requestSecret:
return True
else:
self.write(tornado.escape.json_encode({"error": "bad_secret"}))
return False
class ClientModelCreate(QueueRequestHandler):
"""HTTP handler for clients creating a model request (before confirmation)."""
def post(self):
"""Post handler for model requests from the web daemon.
Attempts to build a model from its known models (essentially performing
parameter verification) then places a request in the queue if it succeeds.
Additionally, it will send out an e-mail to the user for confirmation of
the request
"""
if not self.checkSecret():
return
task_json = tornado.escape.json_decode(self.get_argument("task_json"))
task = modelManager.getModelFromTaskDict(task_json)
task.taskId = glb.newTaskId()
code = glb.confirmationMap.putRequest(task)
emailAddress = task.emailAddress
logging.info("Generated a request for %s, confirmation %s required", emailAddress, code)
subject = config.confirmEmailSubject.generate(task=task)
body = config.confirmEmailTemplate.generate(code=code, task=task, expireDelta=config.confirmTimeout)
emailObject = Email(emailAddress, subject, body)
npsgd.email_manager.backgroundEmailSend(emailObject)
glb.syncShelve()
self.write(tornado.escape.json_encode({
"response": {
"task" : task.asDict(),
"code" : code
}
}))
class ClientQueueHasWorkers(QueueRequestHandler):
"""Request handler for the web daemon to check if workers are available.
We keep track of the last time workers checked into the queue in order
to ensure that all requests can be processed.
"""
def get(self):
if not self.checkSecret():
return
td = datetime.now() - glb.lastWorkerCheckin
hasWorkers = (td.seconds + td.days * 24 * 3600) < config.keepAliveTimeout
self.write(tornado.escape.json_encode({
"response": {
"has_workers" : hasWorkers
}
}))
previouslyConfirmed = set()
class ClientConfirm(QueueRequestHandler):
"""HTTP handler for clients confirming a model request.
This handler moves requests from the confirmation map to the general
request queue for processing.
"""
def get(self, code):
global previouslyConfirmed
if not self.checkSecret():
return
try:
#Expire old confirmations first, just in case
glb.confirmationMap.expireConfirmations()
confirmedRequest = glb.confirmationMap.getRequest(code)
previouslyConfirmed.add(code)
except KeyError, e:
if code in previouslyConfirmed:
self.write(tornado.escape.json_encode({
"response": "already_confirmed"
}))
return
else:
raise tornado.web.HTTPError(404)
glb.taskQueue.putTask(confirmedRequest)
glb.syncShelve()
self.write(tornado.escape.json_encode({
"response": "okay"
}))
class WorkerInfo(QueueRequestHandler):
"""HTTP handler for workers checking into the queue."""
def get(self):
if not self.checkSecret():
return
glb.touchWorkerCheckin()
self.write("{}")
class WorkerTaskKeepAlive(QueueRequestHandler):
"""HTTP handler for workers pinging the queue while working on a task.
Having this request makes sure that we don't time out any jobs that
are currently being handled by some worker. If a worker goes down,
we will put the job back into the queue because this request won't have
been made.
"""
def get(self, taskIdString):
if not self.checkSecret():
return
glb.touchWorkerCheckin()
taskId = int(taskIdString)
logging.info("Got heartbeat for task id '%s'", taskId)
try:
task = glb.taskQueue.touchProcessingTaskById(taskId)
except TaskQueueException, e:
logging.info("Bad keep alive request: no such task id '%s' exists" % taskId)
self.write(tornado.escape.json_encode({
"error": {"type" : "bad_id" }
}))
self.write("{}")
class WorkerSucceededTask(QueueRequestHandler):
"""HTTP handler for workers telling the queue that they have succeeded processing.
After this request, the queue no longer needs to keep track of the job in any way
and declares it complete.
"""
def get(self, taskIdString):
if not self.checkSecret():
return
glb.touchWorkerCheckin()
taskId = int(taskIdString)
try:
task = glb.taskQueue.pullProcessingTaskById(taskId)
except TaskQueueException, e:
logging.info("Bad succeed request: no task id exists")
self.write(tornado.escape.json_encode({
"error": {"type" : "bad_id" }
}))
return
glb.syncShelve()
self.write(tornado.escape.json_encode({
"status": "okay"
}))
class WorkerHasTask(QueueRequestHandler):
"""HTTP handler for workers ensuring that a job still exists.
This handler helps eliminate certain race conditions in NPSGD. Before a
worker sends an e-mail with job results, it checks back with the queue to
make sure that the job hasn't already been handler by another worker
(this could happen if the queue declares that the first worker had timed out).
If there is no task with that id still in the processing list then
an e-mail being sent out would be a duplicate.
"""
def get(self, taskIdString):
if not self.checkSecret():
return
glb.touchWorkerCheckin()
taskId = int(taskIdString)
logging.info("Got 'has task' request for task of id '%d'", taskId)
if glb.taskQueue.hasProcessingTaskById(taskId):
self.write(tornado.escape.json_encode({
"response": "yes"
}))
else:
self.write(tornado.escape.json_encode({
"response": "no"
}))
class WorkerFailedTask(QueueRequestHandler):
"""HTTP handler for workers reporting failure to complete a job.
Upon failure, we will either recycle the request into the queue or we will
report a failure (with an e-mail message to the user).
"""
def get(self, taskIdString):
if not self.checkSecret():
return
glb.touchWorkerCheckin()
taskId = int(taskIdString)
try:
task = glb.taskQueue.pullProcessingTaskById(taskId)
except TaskQueueException, e:
logging.info("Bad failed request: no such task id exists, ignoring request")
self.write(tornado.escape.json_encode({
"error": {"type" : "bad_id" }
}))
return
task.failureCount += 1
logging.warning("Worker had a failure while processing task '%s' (failure #%d)",\
task.taskId, task.failureCount)
if task.failureCount >= config.maxJobFailures:
logging.warning("Max job failures found, sending failure email")
npsgd.email_manager.backgroundEmailSend(task.failureEmail())
else:
logging.warning("Returning task to queue for another attempt")
glb.taskQueue.putTask(task)
self.write(tornado.escape.json_encode({
"status": "okay"
}))
class WorkerTaskRequest(QueueRequestHandler):
"""HTTP handler for workers grabbings tasks off the queue."""
def post(self):
if not self.checkSecret():
return
modelVersions = tornado.escape.json_decode(self.get_argument("model_versions_json"))
glb.touchWorkerCheckin()
logging.info("Received worker task request with models %s", modelVersions)
if glb.taskQueue.isEmpty():
self.write(tornado.escape.json_encode({
"status": "empty_queue"
}))
else:
task = glb.taskQueue.pullNextVersioned(modelVersions)
if task == None:
logging.info("Found no models in queue matching worker's supported versions")
self.write(tornado.escape.json_encode({
"status": "no_version"
}))
else:
glb.taskQueue.putProcessingTask(task)
self.write(tornado.escape.json_encode({
"task": task.asDict()
}))
def main():
global glb
parser = OptionParser()
parser.add_option("-c", "--config", dest="config",
help="Config file", default="config.cfg")
parser.add_option("-p", "--port", dest="port",
help="Queue port number", default=9000)
parser.add_option('-l', '--log-filename', dest='log',
help="Log filename (use '-' for stderr)", default="-")
(options, args) = parser.parse_args()
config.loadConfig(options.config)
config.setupLogging(options.log)
model_manager.setupModels()
model_manager.startScannerThread()
if not os.path.exists(os.path.dirname(config.queueFile)):
logging.warning("Queue directory does not exist, attempting to create")
os.makedirs(os.path.dirname(config.queueFile))
try:
queueShelve = shelve.open(config.queueFile)
except anydbm.error:
logging.warning("Queue file '%s' is corrupt, removing and starting afresh", config.queueFile)
os.remove(config.queueFile)
queueShelve = shelve.open(config.queueFile)
try:
glb = QueueGlobals(queueShelve)
queueHTTP = tornado.httpserver.HTTPServer(tornado.web.Application([
(r"/worker_info", WorkerInfo),
(r"/client_model_create", ClientModelCreate),
(r"/client_queue_has_workers", ClientQueueHasWorkers),
(r"/client_confirm/(\w+)", ClientConfirm),
(r"/worker_failed_task/(\d+)", WorkerFailedTask),
(r"/worker_succeed_task/(\d+)", WorkerSucceededTask),
(r"/worker_has_task/(\d+)", WorkerHasTask),
(r"/worker_keep_alive_task/(\d+)", WorkerTaskKeepAlive),
(r"/worker_work_task", WorkerTaskRequest)
]))
queueHTTP.listen(options.port)
logging.info("NPSGD Queue Booted up, serving on port %d", options.port)
print >>sys.stderr, "NPSGD queue server listening on %d" % options.port
tornado.ioloop.IOLoop.instance().start()
finally:
queueShelve.close()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
#
## This file is part of INSPIRE.
## Copyright (C) 2012, 2013 CERN.
##
## INSPIRE is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## INSPIRE is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
##
## In applying this licence, CERN does not waive the privileges and immunities
## granted to it by virtue of its status as an Intergovernmental Organization
## or submit itself to any jurisdiction.
"""
INSPIRE overlay
----------------
INSPIRE overlay repository for Invenio.
"""
import os
from setuptools import setup, find_packages
packages = find_packages(exclude=['docs'])
# Load __version__, should not be done using import.
# http://python-packaging-user-guide.readthedocs.org/en/latest/tutorial.html
g = {}
with open(os.path.join('inspire', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='Inspire',
version=version,
url='https://github.com/inspirehep/inspire-next',
license='GPLv2',
author='CERN',
author_email='[email protected]',
description=__doc__,
long_description=open('README.rst', 'rt').read(),
packages=packages,
namespace_packages=["inspire", "inspire.ext", ],
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
"rt",
"HarvestingKit>=0.3",
"mixer==4.9.5",
"requests==2.3",
"raven==5.0.0",
"orcid",
"retrying"
],
extras_require={
'development': [
'Flask-DebugToolbar>=0.9',
'ipython',
'ipdb',
'kwalitee'
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GPLv2 License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
entry_points={
'invenio.config': [
"inspire = inspire.config"
]
},
test_suite='inspire.testsuite',
tests_require=[
'nose',
'Flask-Testing'
]
)
|
#!/usr/bin/python2
"""
bootalert
Sends email with hostname and IP address
Brian Parsons <[email protected]>
"""
import ConfigParser
import datetime
import re
import smtplib
import socket
import sys
import urllib2
# Get Hostname
hostname = socket.gethostname()
# Get current IP
try:
ipsite = urllib2.urlopen('http://ip.brian.is')
response = ipsite.read()
ips = re.findall("(?:\d{1,3}\.){3}\d{1,3}", response)
if type(ips) in [list, tuple, set]:
for record in ips:
newip = record
except IOError as e:
print('Connection error getting IP address: %s' % e.reason)
newip = 'Fetching IP address failed with: ' + e.reason[1]
try:
newip
except NameError:
print('Unable to find IP address in response from ip.brian.is.')
newip = 'Fetching IP address failed - no IP found in response from ip.brian.is'
print('Current IP: %s' % newip)
# Parse Config File
config = ConfigParser.ConfigParser()
config.read("/etc/conf.d/bootalert")
try:
confmailto = config.get("bootalert", "mailto")
confmailfrom = config.get("bootalert", "mailfrom")
except ConfigParser.NoSectionError:
print("Config file /etc/conf.d/bootalert not found")
# Send Message
# Get mail to address from conf file or default to root
try:
mailto = confmailto
except NameError:
mailto = "root"
# Get mail from address from conf file or default to root
try:
mailfrom = confmailfrom
except NameError:
mailfrom = "root"
now = datetime.datetime.now()
print("Sending mail from " + mailfrom + " to " + mailto + ".")
# compose boot email
messageheader = "From: Boot Alert <" + mailfrom + ">\n"
messageheader += "To: " + mailto + "\n"
messageheader += "Subject: " + hostname + "\n\n"
message = messageheader + hostname + " booted " + now.strftime("%a %b %d %H:%M:%S %Z %Y") + " with IP: " + newip + ".\n\n"
# send boot email
try:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(mailfrom, mailto, message)
except:
print("Error: unable to send boot alert email. Mail server running?")
sys.exit(1)
|
import symath
from symath.graph.algorithms import *
import symath.graph.generation as graphgen
import unittest
class TestDirectedGraph(unittest.TestCase):
def setUp(self):
self.x, self.y, self.z, self.w, self.e1, self.e2 = symath.symbols('x y z w e1 e2')
self.g = symath.graph.directed.DirectedGraph()
self.g.connect(self.x, self.y, self.e1)
self.g.connect(self.y, self.z, self.e2)
self.g.connect(self.x, self.y, self.e2)
self.g.connect(self.z, self.w)
self.g.connect(self.x, self.w)
def test_edges(self):
self.assertEqual(len(self.g.nodes[self.x].outgoing), 2)
def test_union(self):
og = symath.graph.directed.DirectedGraph()
og.connect(self.x, symath.symbols('ognode'))
og.union(self.g)
self.assertTrue(og.connectedQ(self.x, self.y))
def test_pathq(self):
self.assertTrue(pathQ(self.g, self.x, self.z))
def test_adj_matrix(self):
mp,m = self.g.adjacency_matrix()
self.assertEqual(m.shape[0], 4)
self.assertEqual(m[mp[self.x],mp[self.y]], 1)
self.assertEqual(m[mp[self.x],mp[self.x]], 0)
def test_random_generation(self):
randg = graphgen.random_graph(100, 0.05)
def test_edgevalue_disconnect(self):
g = symath.graph.directed.DirectedGraph()
g.connect(self.x, self.y, self.e1)
g.connect(self.x, self.y, self.e2)
g.disconnect(self.x, self.y)
self.assertFalse(g.connectedQ(self.x, self.y))
g.connect(self.x, self.y, self.e1)
g.connect(self.x, self.y, self.e2)
g.disconnect(self.x, self.y, self.e1)
self.assertTrue(g.connectedQ(self.x, self.y))
g.disconnect(self.x, self.y, self.e2)
self.assertFalse(g.connectedQ(self.x, self.y))
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for Admin Directory API client."""
from builtins import object
from googleapiclient import errors
from httplib2 import HttpLib2Error
from google.auth.exceptions import RefreshError
from google.cloud.forseti.common.gcp_api import _base_repository
from google.cloud.forseti.common.gcp_api import api_helpers
from google.cloud.forseti.common.gcp_api import errors as api_errors
from google.cloud.forseti.common.gcp_api import repository_mixins
from google.cloud.forseti.common.util import logger
LOGGER = logger.get_logger(__name__)
API_NAME = 'admin'
REQUIRED_SCOPES = frozenset([
'https://www.googleapis.com/auth/admin.directory.group.readonly',
'https://www.googleapis.com/auth/admin.directory.user.readonly'
])
GSUITE_AUTH_FAILURE_MESSAGE = (
'Failed to retrieve G Suite data due to authentication '
'failure. Please make sure your forseti_server_config.yaml '
'file contains the most updated information and enable G '
'Suite Groups Collection if you haven\'t done so. Instructions'
' on how to enable: https://forsetisecurity.org/docs/latest/'
'configure/inventory/gsuite.html')
class AdminDirectoryRepositoryClient(_base_repository.BaseRepositoryClient):
"""Admin Directory API Respository Client."""
def __init__(self,
credentials,
quota_max_calls=None,
quota_period=1.0,
use_rate_limiter=True,
cache_discovery=False,
cache=None):
"""Constructor.
Args:
credentials (object): An google.auth credentials object. The admin
directory API needs a service account credential with delegated
super admin role.
quota_max_calls (int): Allowed requests per <quota_period> for the
API.
quota_period (float): The time period to track requests over.
use_rate_limiter (bool): Set to false to disable the use of a rate
limiter for this service.
cache_discovery (bool): When set to true, googleapiclient will cache
HTTP requests to API discovery endpoints.
cache (googleapiclient.discovery_cache.base.Cache): instance of a
class that can cache API discovery documents. If None,
googleapiclient will attempt to choose a default.
"""
if not quota_max_calls:
use_rate_limiter = False
self._groups = None
self._members = None
self._users = None
super(AdminDirectoryRepositoryClient, self).__init__(
API_NAME, versions=['directory_v1'],
credentials=credentials,
quota_max_calls=quota_max_calls,
quota_period=quota_period,
use_rate_limiter=use_rate_limiter,
cache_discovery=cache_discovery,
cache=cache)
# Turn off docstrings for properties.
# pylint: disable=missing-return-doc, missing-return-type-doc
@property
def groups(self):
"""Returns an _AdminDirectoryGroupsRepository instance."""
if not self._groups:
self._groups = self._init_repository(
_AdminDirectoryGroupsRepository)
return self._groups
@property
def members(self):
"""Returns an _AdminDirectoryMembersRepository instance."""
if not self._members:
self._members = self._init_repository(
_AdminDirectoryMembersRepository)
return self._members
@property
def users(self):
"""Returns an _AdminDirectoryUsersRepository instance."""
if not self._users:
self._users = self._init_repository(
_AdminDirectoryUsersRepository)
return self._users
# pylint: enable=missing-return-doc, missing-return-type-doc
class _AdminDirectoryGroupsRepository(
repository_mixins.ListQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Admin Directory Groups repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_AdminDirectoryGroupsRepository, self).__init__(
key_field='', component='groups', **kwargs)
class _AdminDirectoryMembersRepository(
repository_mixins.ListQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Admin Directory Members repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_AdminDirectoryMembersRepository, self).__init__(
key_field='groupKey', component='members', **kwargs)
class _AdminDirectoryUsersRepository(
repository_mixins.ListQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Admin Directory Users repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_AdminDirectoryUsersRepository, self).__init__(
key_field='', component='users', **kwargs)
class AdminDirectoryClient(object):
"""GSuite Admin Directory API Client."""
def __init__(self, global_configs, **kwargs):
"""Initialize.
Args:
global_configs (dict): Global configurations.
**kwargs (dict): The kwargs.
"""
credentials = api_helpers.get_delegated_credential(
global_configs.get('domain_super_admin_email'),
REQUIRED_SCOPES)
max_calls, quota_period = api_helpers.get_ratelimiter_config(
global_configs, API_NAME)
cache_discovery = global_configs[
'cache_discovery'] if 'cache_discovery' in global_configs else False
self.repository = AdminDirectoryRepositoryClient(
credentials=credentials,
quota_max_calls=max_calls,
quota_period=quota_period,
use_rate_limiter=kwargs.get('use_rate_limiter', True),
cache_discovery=cache_discovery,
cache=global_configs.get('cache'))
def get_group_members(self, group_key):
"""Get all the members for specified groups.
Args:
group_key (str): The group's unique id assigned by the Admin API.
Returns:
list: A list of member objects from the API.
Raises:
api_errors.ApiExecutionError: If group member retrieval fails.
"""
try:
paged_results = self.repository.members.list(group_key)
result = api_helpers.flatten_list_results(paged_results, 'members')
LOGGER.debug('Getting all the members for group_key = %s,'
' result = %s', group_key, result)
return result
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(group_key, e)
def get_groups(self, customer_id='my_customer'):
"""Get all the groups for a given customer_id.
A note on customer_id='my_customer'. This is a magic string instead
of using the real customer id. See:
https://developers.google.com/admin-sdk/directory/v1/guides/manage-groups#get_all_domain_groups
Args:
customer_id (str): The customer id to scope the request to.
Returns:
list: A list of group objects returned from the API.
Raises:
api_errors.ApiExecutionError: If groups retrieval fails.
RefreshError: If the authentication fails.
"""
try:
paged_results = self.repository.groups.list(customer=customer_id)
flattened_results = api_helpers.flatten_list_results(
paged_results, 'groups')
LOGGER.debug('Getting all the groups for customer_id = %s,'
' flattened_results = %s',
customer_id, flattened_results)
return flattened_results
except RefreshError as e:
# Authentication failed, log before raise.
LOGGER.exception(GSUITE_AUTH_FAILURE_MESSAGE)
raise e
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError('groups', e)
def get_users(self, customer_id='my_customer'):
"""Get all the users for a given customer_id.
A note on customer_id='my_customer'. This is a magic string instead
of using the real customer id. See:
https://developers.google.com/admin-sdk/directory/v1/guides/manage-groups#get_all_domain_groups
Args:
customer_id (str): The customer id to scope the request to.
Returns:
list: A list of user objects returned from the API.
Raises:
api_errors.ApiExecutionError: If groups retrieval fails.
RefreshError: If the authentication fails.
"""
try:
paged_results = self.repository.users.list(customer=customer_id,
viewType='admin_view')
flattened_results = api_helpers.flatten_list_results(
paged_results, 'users')
LOGGER.debug('Getting all the users for customer_id = %s,'
' flattened_results = %s',
customer_id, flattened_results)
return flattened_results
except RefreshError as e:
# Authentication failed, log before raise.
LOGGER.exception(GSUITE_AUTH_FAILURE_MESSAGE)
raise e
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError('users', e)
|
# -*- coding: utf-8 -*-
#
#
# Copyright 2017 Rosen Vladimirov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields, api, _
import logging
_logger = logging.getLogger(__name__)
class purchase_order_line(models.Model):
_inherit = 'purchase.order.line'
margin_classification_id = fields.Many2one(
comodel_name='product.margin.classification',
string='Margin Classification',
readonly=True)
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', context=None
):
vals = super(purchase_order_line, self).onchange_product_id(
cr, uid, ids, pricelist_id, product_id, qty, uom_id, partner_id, date_order=date_order,
fiscal_position_id=fiscal_position_id, date_planned=date_planned,name=name, price_unit=price_unit,
state=state, context=context
)
if not product_id:
return vals
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
if product:
vals['value'].update({'margin_classification_id': product.product_tmpl_id.margin_classification_id})
return vals
# @api.model
# def _check_product_template(self):
# lines = []
# for line in self.order_line:
# template = False
# tmpl = line.product_id.product_tmpl_id
# if tmpl.margin_state in ('cheap', 'expensive'):
# if not template:
# lines.append((0, 0, {
# 'product_tmpl_id': tmpl.id,
# }))
# return lines
#
# @api.multi
# def purchase_confirm(self):
# self.ensure_one()
# super(PurchaseOrder, self).purchase_confirm()
# lines_for_update = self._check_product_template()
# if lines_for_update:
# ctx = {'default_wizard_line_ids': lines_for_update}
# pmc_checker_form = self.env.ref(
# 'product_margin_classification_bg.'
# 'view_product_template_mc_check_form', False)
# return {
# 'name': _("There is probably a changed cost price. Please check for possible consequences for final customer prices."),
# 'type': 'ir.actions.act_window',
# 'view_mode': 'form',
# 'res_model': 'product.margin.classification.check',
# 'views': [(pmc_checker_form.id, 'form')],
# 'view_id': pmc_checker_form.id,
# 'target': 'new',
# 'context': ctx,
# }
# else:
# self.signal_workflow('purchase_confirm')
|
# -*- coding: utf-8 -*-
import asyncio
import ccxt
import ccxt.async_support as ccxta # noqa: E402
import time
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
def sync_client(exchange):
client = getattr(ccxt, exchange)()
tickers = client.fetch_tickers()
return tickers
async def async_client(exchange):
client = getattr(ccxta, exchange)()
tickers = await client.fetch_tickers()
await client.close()
return tickers
async def multi_tickers(exchanges):
input_coroutines = [async_client(exchange) for exchange in exchanges]
tickers = await asyncio.gather(*input_coroutines, return_exceptions=True)
return tickers
if __name__ == '__main__':
# Consider review request rate limit in the methods you call
exchanges = ["coinex", "bittrex", "bitfinex", "poloniex", "hitbtc"]
tic = time.time()
a = asyncio.get_event_loop().run_until_complete(multi_tickers(exchanges))
print("async call spend:", time.time() - tic)
time.sleep(1)
tic = time.time()
a = [sync_client(exchange) for exchange in exchanges]
print("sync call spend:", time.time() - tic)
|
"""
@todo Clean up the LimitTreeMaker python file to not depend on these extra variables in cuts.py
"""
import os
from .. import Load, DirFromEnv
newLimitTreeMaker = Load('LimitTreeMaker')
def SetupFromEnv(ltm):
"""A function that sets up the LimitTreeMaker after sourcing a config file
@param ltm The LimitTreeMaker object to set up
"""
from ..CommonTools.FileConfigReader import SetupConfigFromEnv, SetFunctionFromEnv, LoadConfig
SetupConfigFromEnv(ltm)
DirFromEnv('CrombieOutLimitTreeDir')
SetFunctionFromEnv([
(ltm.SetOutDirectory, 'CrombieOutLimitTreeDir'),
])
for region in LoadConfig.cuts.regions:
if os.environ.get('CrombieExcept_' + region):
ltm.ReadExceptionConfig(os.environ['CrombieExcept_' + region], region)
def SetCuts(ltm, category):
from .. import LoadConfig
cuts = LoadConfig.cuts
for region in cuts.regions:
ltm.AddRegion(region,cuts.cut(category, region))
if region in cuts.additionKeys:
ltm.AddExceptionDataCut(region, cuts.additions[region][0])
ltm.AddExceptionWeightBranch(region, cuts.additions[region][1])
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
from nets import vgg
from sys import argv
from util import run_model
def main():
"""
You can also run these commands manually to generate the pb file
1. git clone https://github.com/tensorflow/models.git
2. export PYTHONPATH=Path_to_your_model_folder
3. python alexnet.py
"""
height, width = 224, 224
inputs = tf.Variable(tf.random_uniform((1, height, width, 3)), name='input')
net, end_points = vgg.vgg_19(inputs, is_training = False)
print("nodes in the graph")
for n in end_points:
print(n + " => " + str(end_points[n]))
net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))
run_model(net_outputs, argv[1], 'vgg_19', argv[3] == 'True')
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# encoding: utf-8
"""
cli.py
Created by Thomas Mangin on 2014-12-22.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
import sys
from exabgp.dep.cmd2 import cmd
from exabgp.version import version
class Completed (cmd.Cmd):
# use_rawinput = False
# prompt = ''
# doc_header = 'doc_header'
# misc_header = 'misc_header'
# undoc_header = 'undoc_header'
ruler = '-'
def __init__ (self,intro):
self.prompt = '%s> ' % intro
cmd.Cmd.__init__(self)
def completedefault (self, text, line, begidx, endidx):
commands = line.split()
local = self.completion
for command in commands:
if command in local:
local = local[command]
continue
break
return [_ for _ in local.keys() if _.startswith(text)]
def default (self,line):
print 'unrecognised syntax: ', line
def do_EOF (self):
return True
class SubMenu (Completed):
def do_exit (self,line):
return True
do_x = do_exit
class Attribute (SubMenu):
chars = ''.join(chr(_) for _ in range(ord('a'),ord('z')+1) + range(ord('0'),ord('9')+1) + [ord ('-')])
attribute = None
completion = {
'origin' : {
'igp': {
},
'egp': {
},
'incomplete': {
},
},
}
def __init__ (self,name):
self.name = name
SubMenu.__init__(self,'attribute %s' % name)
def do_origin (self,line):
if line in ('igp','egp','incomplete'):
self.attribute['origin'] = line
else:
print 'invalid origin'
def do_as_path (self,line):
pass
# next-hop
def do_med (self,line):
if not line.isdigit():
print 'invalid med, %s is not a number' % line
return
med = int(line)
if 0 > med < 65536:
print 'invalid med, %s is not a valid number' % line
self.attribute['origin'] = line
# local-preference
# atomic-aggregate
# aggregator
# community
# originator-id
# cluster-list
# extended-community
# psmi
# aigp
def do_show (self,line):
print 'attribute %s ' % self.name + ' '.join('%s %s' % (key,value) for key,value in self.attribute.iteritems())
class ExaBGP (Completed):
completion = {
'announce' : {
'route' : {
},
'l2vpn' : {
},
},
'neighbor': {
'include': {
},
'exclude': {
},
'reset': {
},
'list': {
},
},
'attribute' : {
},
'show': {
'routes' : {
'extensive': {
},
'minimal': {
},
},
},
'reload': {
},
'restart': {
},
}
def _update_prompt (self):
if self._neighbors:
self.prompt = '\n# neighbor ' + ', '.join(self._neighbors) + '\n> '
else:
self.prompt = '\n> '
##
## repeat last command
##
last = 'help'
def do_last (self, line):
"Print the input, replacing '$out' with the output of the last shell command"
# Obviously not robust
if hasattr(self, 'last_output'):
print line.replace('$out', self.last_output)
##
##
##
_neighbors = set()
def do_neighbor (self,line):
try:
action,ip = line.split()
except ValueError:
if line == 'reset':
print 'removed neighbors', ', '.join(self._neighbors)
self._neighbors = set()
self._update_prompt()
else:
print 'invalid syntax'
self.help_neighbor()
return
if action == 'include':
# check ip is an IP
# check ip is a known IP
self._neighbors.add(ip)
self._update_prompt()
elif action == 'exclude':
if ip in self._neighbors:
self._neighbors.remove(ip)
print 'neighbor excluded'
self._update_prompt()
else:
print 'invalid neighbor'
elif action == 'list':
print 'removed neighbors', ', '.join(self._neighbors)
else:
print 'invalid syntax'
self.help_neighbor()
def help_neighbor (self):
print "neighbor include <ip> : limit the action to the defined neighbors"
print "neighbor exclude <ip> : remove a particular neighbor"
print "neighbor reset : clear the neighbor previous set "
_attribute = {}
def do_attribute (self,name):
if not name:
self.help_attribute()
return
invalid = ''.join([_ for _ in name if _ not in Attribute.chars])
if invalid:
print 'invalid character(s) in attribute name: %s' % invalid
return
cli = Attribute(name)
cli.attribute = self._attribute.get(name,{})
cli.cmdloop()
def help_attribute (self):
print 'attribute <name>'
def do_quit (self,line):
return True
do_q = do_quit
if __name__ == '__main__':
if len(sys.argv) > 1:
ExaBGP().onecmd(' '.join(sys.argv[1:]))
else:
print "ExaBGP %s CLI" % version
ExaBGP('').cmdloop()
|
"""
Django settings for web project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2aw71z$!wz-rw9z)v+!6n#p)ke117ee%59lo+(ejlo0l65@_r^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["127.0.0.1"]
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': lambda r: True, # disables it
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'debug_toolbar',
#'haystack',
'web',
]
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(os.path.dirname(__file__), 'whoosh_index'),
# 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
# 'URL': 'http://127.0.0.1:8983/solr'
# ...or for multicore...
# 'URL': 'http://127.0.0.1:8983/solr/mysite',
},
}
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
SESSION_ENGINE = "django.contrib.sessions.backends.file"
ROOT_URLCONF = 'web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
from datetime import timedelta
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.db.models import Count
from django.utils import timezone
class Command(BaseCommand):
help = """Prune old, inactive user accounts.
Conditions for removing an user account:
- created 1+ month ago and never logged in.
Use case: visitor types in their email at the website but
never follows through with login.
- not logged in for 1 month, and has no checks
Use case: user wants to remove their account. So they
remove all checks and leave the account at that.
"""
def handle(self, *args, **options):
cutoff = timezone.now() - timedelta(days=31)
# Old accounts, never logged in
q = User.objects
q = q.filter(date_joined__lt=cutoff, last_login=None)
n1, _ = q.delete()
# Not logged in for 1 month, 0 checks
q = User.objects
q = q.annotate(n_checks=Count("check"))
q = q.filter(last_login__lt=cutoff, n_checks=0)
n2, _ = q.delete()
return "Done! Pruned %d user accounts." % (n1 + n2)
|
from __future__ import print_function
from setuptools import setup, find_packages
from acky import __version__
import sys
if sys.version_info <= (2, 7):
error = "ERROR: acky requires Python 2.7 or later"
print(error, file=sys.stderr)
sys.exit(1)
with open('README.rst') as f:
long_description = f.read()
setup(
name="acky",
version=__version__,
description="A consistent API to AWS",
long_description=long_description,
author="Matthew Wedgwood",
author_email="[email protected]",
url="http://github.com/RetailMeNot/acky",
install_requires=[
"botocore == 0.45.0",
],
packages=find_packages(),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Internet",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Internet",
],
license="MIT",
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-12 19:47
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('async_notifications', '0002_auto_20160515_0018'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('annotation', models.TextField(blank=True)),
('text', models.TextField(blank=True)),
('display_text', models.TextField(blank=True)),
],
options={
'verbose_name_plural': 'Answers',
'verbose_name': 'Answer',
},
),
migrations.CreateModel(
name='Observation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('context', models.TextField()),
('aproved', models.BooleanField(default=False)),
('answer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Answer')),
],
options={
'verbose_name_plural': 'Observations',
'verbose_name': 'Observation',
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=500)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'verbose_name_plural': 'Projects',
'verbose_name': 'Project',
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('class_to_load', models.CharField(max_length=30)),
('text', models.TextField()),
('help', models.TextField(blank=True)),
('answer_options', django.contrib.postgres.fields.jsonb.JSONField()),
('required', models.IntegerField(choices=[(0, 'Optional'), (1, 'Required'), (2, 'Required by hierarchy')], default=0)),
('order', models.CharField(blank=True, max_length=10)),
('auto', models.BooleanField(default=False)),
],
options={
'verbose_name_plural': 'Questions',
'verbose_name': 'Question',
},
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('template', django.contrib.postgres.fields.jsonb.JSONField(default=[{'human_name': 'General information', 'name': 'categ0', 'order': 0, 'subcategories': [{'human_name': 'General information', 'name': 'categ0_categ0', 'order': 0, 'question': [], 'questions': []}], 'subcategories_count': 1}])),
('questions', django.contrib.postgres.fields.jsonb.JSONField(default={})),
('opening_date', models.DateField()),
],
options={
'verbose_name_plural': 'Reports',
'verbose_name': 'Report',
},
),
migrations.CreateModel(
name='ReportByProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateField(verbose_name='Opening date')),
('submit_date', models.DateField(verbose_name='Submit date')),
('state', models.SmallIntegerField(choices=[(0, 'Submit pending'), (1, 'Unsubmitted'), (2, 'Aproved'), (3, 'Editing'), (4, 'Canceled'), (5, 'Rejected'), (6, 'In review')], default=0)),
('actions', models.TextField(blank=True, null=True)),
('review_percentage', models.SmallIntegerField(default=0)),
('complete', models.BooleanField(default=False)),
('make_another', models.BooleanField(default=False)),
('created_automatically', models.BooleanField(default=False)),
('creation_date', models.DateField(auto_now=True)),
('additional_info', models.TextField(blank=True, null=True)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Project')),
('report', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Report')),
],
options={
'verbose_name_plural': 'Reports by project',
'verbose_name': 'Report by project',
},
),
migrations.CreateModel(
name='ReportType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.TextField()),
('app_name', models.SlugField()),
('name', models.SlugField()),
('action_ok', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='action_ok', to='async_notifications.EmailTemplate')),
('report_end', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='report_end', to='async_notifications.EmailTemplate')),
('report_start', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='report_start', to='async_notifications.EmailTemplate')),
('responsable_change', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='responsable_change', to='async_notifications.EmailTemplate')),
('revision_turn', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='revision_turn', to='async_notifications.EmailTemplate')),
],
options={
'verbose_name_plural': 'Report types',
'verbose_name': 'Report type',
},
),
migrations.CreateModel(
name='Reviewer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.SmallIntegerField(choices=[(1, 'First'), (2, 'Second'), (3, 'Third'), (4, 'Fourth'), (5, 'Fifth'), (6, 'Sixth'), (7, 'Seventh')])),
('state', models.SmallIntegerField(choices=[(0, 'Unsupported'), (1, 'Supported'), (2, 'In review'), (3, 'Supported by the system'), (4, 'Unsupported by the system')], default=0)),
('active', models.BooleanField(default=True)),
('make_observations', models.BooleanField(default=False)),
('can_ask', models.BooleanField(default=False)),
('can_review', models.BooleanField(default=False)),
('assigned_automatically', models.BooleanField(default=False)),
('report', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.ReportByProject')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Reviewers',
'verbose_name': 'Reviewer',
},
),
migrations.CreateModel(
name='RevisionTree',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('assignment_criteria', models.CharField(max_length=100)),
('description', models.TextField()),
('report_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.ReportType')),
],
options={
'verbose_name_plural': 'Revision Tree',
'verbose_name': 'Revision Tree',
},
),
migrations.CreateModel(
name='RevisionTreeUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.SmallIntegerField(choices=[(1, 'First'), (2, 'Second'), (3, 'Third'), (4, 'Fourth'), (5, 'Fifth'), (6, 'Sixth'), (7, 'Seventh')])),
('make_observations', models.BooleanField(default=True)),
('can_ask', models.BooleanField(default=True)),
('can_review', models.BooleanField(default=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Revision Tree Users',
'verbose_name': 'Revision Tree User',
},
),
migrations.AddField(
model_name='revisiontree',
name='revision_tree_user',
field=models.ManyToManyField(to='report_builder.RevisionTreeUser'),
),
migrations.AddField(
model_name='report',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.ReportType'),
),
migrations.AddField(
model_name='question',
name='report',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Report'),
),
migrations.AddField(
model_name='observation',
name='reviewer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Reviewer'),
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Question'),
),
migrations.AddField(
model_name='answer',
name='report',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.ReportByProject'),
),
migrations.AddField(
model_name='answer',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
#!/usr/bin/env python
'''
DigitalOcean external inventory script
======================================
Generates Ansible inventory of DigitalOcean Droplets.
In addition to the --list and --host options used by Ansible, there are options
for generating JSON of other DigitalOcean data. This is useful when creating
droplets. For example, --regions will return all the DigitalOcean Regions.
This information can also be easily found in the cache file, whose default
location is /tmp/ansible-digital_ocean.cache).
The --pretty (-p) option pretty-prints the output for better human readability.
----
Although the cache stores all the information received from DigitalOcean,
the cache is not used for current droplet information (in --list, --host,
--all, and --droplets). This is so that accurate droplet information is always
found. You can force this script to use the cache with --force-cache.
----
Configuration is read from `digital_ocean.ini`, then from environment variables,
then and command-line arguments.
Most notably, the DigitalOcean Client ID and API Key must be specified. They
can be specified in the INI file or with the following environment variables:
export DO_CLIENT_ID='DO123' DO_API_KEY='abc123'
Alternatively, they can be passed on the command-line with --client-id and
--api-key.
If you specify DigitalOcean credentials in the INI file, a handy way to
get them into your environment (e.g., to use the digital_ocean module)
is to use the output of the --env option with export:
export $(digital_ocean.py --env)
----
The following groups are generated from --list:
- ID (droplet ID)
- NAME (droplet NAME)
- image_ID
- image_NAME
- distro_NAME (distribution NAME from image)
- region_ID
- region_NAME
- size_ID
- size_NAME
- status_STATUS
When run against a specific host, this script returns the following variables:
- do_created_at
- do_distroy
- do_id
- do_image
- do_image_id
- do_ip_address
- do_name
- do_region
- do_region_id
- do_size
- do_size_id
- do_status
-----
```
usage: digital_ocean.py [-h] [--list] [--host HOST] [--all]
[--droplets] [--regions] [--images] [--sizes]
[--ssh-keys] [--domains] [--pretty]
[--cache-path CACHE_PATH]
[--cache-max_age CACHE_MAX_AGE]
[--refresh-cache] [--client-id CLIENT_ID]
[--api-key API_KEY]
Produce an Ansible Inventory file based on DigitalOcean credentials
optional arguments:
-h, --help show this help message and exit
--list List all active Droplets as Ansible inventory
(default: True)
--host HOST Get all Ansible inventory variables about a specific
Droplet
--all List all DigitalOcean information as JSON
--droplets List Droplets as JSON
--regions List Regions as JSON
--images List Images as JSON
--sizes List Sizes as JSON
--ssh-keys List SSH keys as JSON
--domains List Domains as JSON
--pretty, -p Pretty-print results
--cache-path CACHE_PATH
Path to the cache files (default: .)
--cache-max_age CACHE_MAX_AGE
Maximum age of the cached items (default: 0)
--refresh-cache Force refresh of cache by making API requests to
DigitalOcean (default: False - use cache files)
--client-id CLIENT_ID, -c CLIENT_ID
DigitalOcean Client ID
--api-key API_KEY, -a API_KEY
DigitalOcean API Key
```
'''
# (c) 2013, Evan Wies <[email protected]>
#
# Inspired by the EC2 inventory plugin:
# https://github.com/ansible/ansible/blob/devel/plugins/inventory/ec2.py
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import os
import sys
import re
import argparse
from time import time
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
try:
from dopy.manager import DoError, DoManager
except ImportError as e:
print "failed=True msg='`dopy` library required for this script'"
sys.exit(1)
class DigitalOceanInventory(object):
###########################################################################
# Main execution path
###########################################################################
def __init__(self):
''' Main execution path '''
# DigitalOceanInventory data
self.data = {} # All DigitalOcean data
self.inventory = {} # Ansible Inventory
self.index = {} # Varous indices of Droplet metadata
# Define defaults
self.cache_path = '.'
self.cache_max_age = 0
# Read settings, environment variables, and CLI arguments
self.read_settings()
self.read_environment()
self.read_cli_args()
# Verify credentials were set
if not hasattr(self, 'client_id') or not hasattr(self, 'api_key'):
print '''Could not find values for DigitalOcean client_id and api_key.
They must be specified via either ini file, command line argument (--client-id and --api-key),
or environment variables (DO_CLIENT_ID and DO_API_KEY)'''
sys.exit(-1)
# env command, show DigitalOcean credentials
if self.args.env:
print "DO_CLIENT_ID=%s DO_API_KEY=%s" % (self.client_id, self.api_key)
sys.exit(0)
# Manage cache
self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache"
self.cache_refreshed = False
if not self.args.force_cache and self.args.refresh_cache or not self.is_cache_valid():
self.load_all_data_from_digital_ocean()
else:
self.load_from_cache()
if len(self.data) == 0:
if self.args.force_cache:
print '''Cache is empty and --force-cache was specified'''
sys.exit(-1)
self.load_all_data_from_digital_ocean()
else:
# We always get fresh droplets for --list, --host, --all, and --droplets
# unless --force-cache is specified
if not self.args.force_cache and (
self.args.list or self.args.host or self.args.all or self.args.droplets):
self.load_droplets_from_digital_ocean()
# Pick the json_data to print based on the CLI command
if self.args.droplets: json_data = { 'droplets': self.data['droplets'] }
elif self.args.regions: json_data = { 'regions': self.data['regions'] }
elif self.args.images: json_data = { 'images': self.data['images'] }
elif self.args.sizes: json_data = { 'sizes': self.data['sizes'] }
elif self.args.ssh_keys: json_data = { 'ssh_keys': self.data['ssh_keys'] }
elif self.args.domains: json_data = { 'domains': self.data['domains'] }
elif self.args.all: json_data = self.data
elif self.args.host: json_data = self.load_droplet_variables_for_host()
else: # '--list' this is last to make it default
json_data = self.inventory
if self.args.pretty:
print json.dumps(json_data, sort_keys=True, indent=2)
else:
print json.dumps(json_data)
# That's all she wrote...
###########################################################################
# Script configuration
###########################################################################
def read_settings(self):
''' Reads the settings from the digital_ocean.ini file '''
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini')
# Credentials
if config.has_option('digital_ocean', 'client_id'):
self.client_id = config.get('digital_ocean', 'client_id')
if config.has_option('digital_ocean', 'api_key'):
self.api_key = config.get('digital_ocean', 'api_key')
# Cache related
if config.has_option('digital_ocean', 'cache_path'):
self.cache_path = config.get('digital_ocean', 'cache_path')
if config.has_option('digital_ocean', 'cache_max_age'):
self.cache_max_age = config.getint('digital_ocean', 'cache_max_age')
def read_environment(self):
''' Reads the settings from environment variables '''
# Setup credentials
if os.getenv("DO_CLIENT_ID"): self.client_id = os.getenv("DO_CLIENT_ID")
if os.getenv("DO_API_KEY"): self.api_key = os.getenv("DO_API_KEY")
def read_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials')
parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)')
parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet')
parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON')
parser.add_argument('--droplets','-d', action='store_true', help='List Droplets as JSON')
parser.add_argument('--regions', action='store_true', help='List Regions as JSON')
parser.add_argument('--images', action='store_true', help='List Images as JSON')
parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON')
parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON')
parser.add_argument('--domains', action='store_true',help='List Domains as JSON')
parser.add_argument('--pretty','-p', action='store_true', help='Pretty-print results')
parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)')
parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)')
parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache')
parser.add_argument('--refresh-cache','-r', action='store_true', default=False, help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)')
parser.add_argument('--env','-e', action='store_true', help='Display DO_CLIENT_ID and DO_API_KEY')
parser.add_argument('--client-id','-c', action='store', help='DigitalOcean Client ID')
parser.add_argument('--api-key','-a', action='store', help='DigitalOcean API Key')
self.args = parser.parse_args()
if self.args.client_id: self.client_id = self.args.client_id
if self.args.api_key: self.api_key = self.args.api_key
if self.args.cache_path: self.cache_path = self.args.cache_path
if self.args.cache_max_age: self.cache_max_age = self.args.cache_max_age
# Make --list default if none of the other commands are specified
if (not self.args.droplets and not self.args.regions and not self.args.images and
not self.args.sizes and not self.args.ssh_keys and not self.args.domains and
not self.args.all and not self.args.host):
self.args.list = True
###########################################################################
# Data Management
###########################################################################
def load_all_data_from_digital_ocean(self):
''' Use dopy to get all the information from DigitalOcean and save data in cache files '''
manager = DoManager(self.client_id, self.api_key)
self.data = {}
self.data['droplets'] = self.sanitize_list(manager.all_active_droplets())
self.data['regions'] = self.sanitize_list(manager.all_regions())
self.data['images'] = self.sanitize_list(manager.all_images(filter=None))
self.data['sizes'] = self.sanitize_list(manager.sizes())
self.data['ssh_keys'] = self.sanitize_list(manager.all_ssh_keys())
self.data['domains'] = self.sanitize_list(manager.all_domains())
self.index = {}
self.index['region_to_name'] = self.build_index(self.data['regions'], 'id', 'name')
self.index['size_to_name'] = self.build_index(self.data['sizes'], 'id', 'name')
self.index['image_to_name'] = self.build_index(self.data['images'], 'id', 'name')
self.index['image_to_distro'] = self.build_index(self.data['images'], 'id', 'distribution')
self.index['host_to_droplet'] = self.build_index(self.data['droplets'], 'ip_address', 'id', False)
self.build_inventory()
self.write_to_cache()
def load_droplets_from_digital_ocean(self):
''' Use dopy to get droplet information from DigitalOcean and save data in cache files '''
manager = DoManager(self.client_id, self.api_key)
self.data['droplets'] = self.sanitize_list(manager.all_active_droplets())
self.index['host_to_droplet'] = self.build_index(self.data['droplets'], 'ip_address', 'id', False)
self.build_inventory()
self.write_to_cache()
def build_index(self, source_seq, key_from, key_to, use_slug=True):
dest_dict = {}
for item in source_seq:
name = (use_slug and item.has_key('slug')) and item['slug'] or item[key_to]
key = item[key_from]
dest_dict[key] = name
return dest_dict
def build_inventory(self):
'''Build Ansible inventory of droplets'''
self.inventory = {}
self.inventory['localhost'] = ['127.0.0.1']
# add all droplets by id and name
for droplet in self.data['droplets']:
dest = droplet['ip_address']
self.inventory[droplet['id']] = [dest]
self.push(self.inventory, droplet['name'], dest)
self.push(self.inventory, 'region_'+droplet['region_id'], dest)
self.push(self.inventory, 'image_' +droplet['image_id'], dest)
self.push(self.inventory, 'size_' +droplet['size_id'], dest)
self.push(self.inventory, 'status_'+droplet['status'], dest)
region_name = self.index['region_to_name'].get(droplet['region_id'])
if region_name:
self.push(self.inventory, 'region_'+region_name, dest)
size_name = self.index['size_to_name'].get(droplet['size_id'])
if size_name:
self.push(self.inventory, 'size_'+size_name, dest)
image_name = self.index['image_to_name'].get(droplet['image_id'])
if image_name:
self.push(self.inventory, 'image_'+image_name, dest)
distro_name = self.index['image_to_distro'].get(droplet['image_id'])
if distro_name:
self.push(self.inventory, 'distro_'+distro_name, dest)
def load_droplet_variables_for_host(self):
'''Generate a JSON reponse to a --host call'''
host = self.to_safe(str(self.args.host))
if not host in self.index['host_to_droplet']:
# try updating cache
if not self.args.force_cache:
self.load_all_data_from_digital_ocean()
if not host in self.index['host_to_droplet']:
# host might not exist anymore
return {}
droplet = None
if self.cache_refreshed:
for drop in self.data['droplets']:
if drop['ip_address'] == host:
droplet = self.sanitize_dict(drop)
break
else:
# Cache wasn't refreshed this run, so hit DigitalOcean API
manager = DoManager(self.client_id, self.api_key)
droplet_id = self.index['host_to_droplet'][host]
droplet = self.sanitize_dict(manager.show_droplet(droplet_id))
if not droplet:
return {}
# Put all the information in a 'do_' namespace
info = {}
for k, v in droplet.items():
info['do_'+k] = v
# Generate user-friendly variables (i.e. not the ID's)
if droplet.has_key('region_id'):
info['do_region'] = self.index['region_to_name'].get(droplet['region_id'])
if droplet.has_key('size_id'):
info['do_size'] = self.index['size_to_name'].get(droplet['size_id'])
if droplet.has_key('image_id'):
info['do_image'] = self.index['image_to_name'].get(droplet['image_id'])
info['do_distro'] = self.index['image_to_distro'].get(droplet['image_id'])
return info
###########################################################################
# Cache Management
###########################################################################
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_filename):
mod_time = os.path.getmtime(self.cache_filename)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
return True
return False
def load_from_cache(self):
''' Reads the data from the cache file and assigns it to member variables as Python Objects'''
cache = open(self.cache_filename, 'r')
json_data = cache.read()
cache.close()
data = json.loads(json_data)
self.data = data['data']
self.inventory = data['inventory']
self.index = data['index']
def write_to_cache(self):
''' Writes data in JSON format to a file '''
data = { 'data': self.data, 'index': self.index, 'inventory': self.inventory }
json_data = json.dumps(data, sort_keys=True, indent=2)
cache = open(self.cache_filename, 'w')
cache.write(json_data)
cache.close()
###########################################################################
# Utilities
###########################################################################
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in the dict '''
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element]
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
return re.sub("[^A-Za-z0-9\-\.]", "_", word)
def sanitize_dict(self, d):
new_dict = {}
for k, v in d.items():
if v != None:
new_dict[self.to_safe(str(k))] = self.to_safe(str(v))
return new_dict
def sanitize_list(self, seq):
new_seq = []
for d in seq:
new_seq.append(self.sanitize_dict(d))
return new_seq
###########################################################################
# Run the script
DigitalOceanInventory()
|
"""mapa URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
from cronotacografo.views import RegistroViewSet
router = routers.SimpleRouter()
router.register(r'registro', RegistroViewSet)
urlpatterns = [
url(r'^admin/', admin.site.urls),
# Endpoints da API
url(r'^api/', include(router.urls)),
# Autenticação da API
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
|
# -*- coding: utf-8 -*-
# Copyright 2019 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.6@@
import inspect
import logging
import new
import os
import pprint
import time
import traceback
from datetime import datetime
import autopep8
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.ext.deferred import deferred
from mcfw.exceptions import HttpBadRequestException, HttpNotFoundException, HttpConflictException
from mcfw.rpc import arguments, returns
from models import Script, ScriptFunction, LastScriptRun
from to import CreateScriptTO, RunResultTO, RunScriptTO, UpdateScriptTO
@returns(Script)
@arguments(script=CreateScriptTO)
def create_script(script):
return _put_script(Script(author=users.get_current_user()), script)
def get_script(script_id):
# type: (long) -> Script
script = Script.create_key(script_id).get()
if not script:
raise HttpNotFoundException('oca.error', {'message': 'Script with id %s not found' % script_id})
return script
@returns(Script)
@arguments(script_id=(int, long), script=UpdateScriptTO)
def update_script(script_id, script):
model = get_script(script_id)
if model.version != script.version:
msg = 'Cannot save script, it has been modified by %s on %s. Please reload the page.' % (model.modified_by,
model.modified_on)
raise HttpConflictException('oca.error', {'message': msg})
return _put_script(model, script)
def _get_script_function_models(script_name, source, old_functions, ignore_errors=False):
script_module = new.module(str(script_name))
try:
exec source in script_module.__dict__
except Exception:
logging.warn('Compilation failed for \'%s\'', script_name, exc_info=True)
if ignore_errors:
return []
msg = 'Could not compile script: %s' % traceback.format_exc()
raise HttpBadRequestException('oca.error', {'message': msg})
functions = inspect.getmembers(script_module,
lambda x: inspect.isfunction(x) and x.__module__ == script_module.__name__)
function_models = []
old_funcs = {f.name: f for f in old_functions}
lines = source.splitlines()
for f in functions:
f_name = unicode(f[0])
line_number = 1
for i, line in enumerate(lines):
if 'def %s' % f_name in line:
line_number = i + 1
break
if f_name in old_funcs:
updated_function = old_funcs[f_name]
updated_function.line_number = line_number
function_models.append(updated_function)
else:
function_models.append(ScriptFunction(name=f_name, line_number=line_number))
return function_models
def _put_script(model, script):
# type: (Script, UpdateScriptTO) -> Script
assert users.is_current_user_admin()
formatted_source = autopep8.fix_code(script.source, options={'max_line_length': 120})
model.populate(name=script.name,
source=formatted_source,
modified_by=users.get_current_user(),
modified_on=datetime.now(),
version=model.version + 1,
functions=_get_script_function_models(script.name, formatted_source, model.functions))
model.put()
return model
def get_scripts():
return Script.list().fetch(None, projection=[Script.name]) or _migrate_scripts()
def delete_script(script_id):
return Script.create_key(script_id).delete()
@arguments(script_id=(int, long), data=RunScriptTO)
def run_script(script_id, data):
# type: (long, RunScriptTO) -> RunResultTO
assert users.is_current_user_admin()
script = get_script(script_id)
task_id = None
run_result = None
if data.deferred:
task_id = deferred.defer(_run_deferred, script_id, data).name.decode('utf-8')
else:
run_result = _run_script(script, data)
for f in script.functions:
if f.name == data.function:
f.last_run = LastScriptRun(date=datetime.now(),
user=users.get_current_user(),
task_id=task_id,
request_id=run_result.request_id if run_result else None,
time=run_result.time if run_result else None,
succeeded=run_result.succeeded if run_result else True)
break
script.put()
result = f.last_run.to_dict()
if run_result:
result.update(run_result.to_dict())
result.update({'script': script.to_dict()})
result['user'] = unicode(result['user'])
return RunResultTO.from_dict(result)
def _run_script(script, function):
# type: (Script, RunScriptTO) -> RunResultTO
script_module = new.module(str(script.name))
exec script.source in script_module.__dict__
func = getattr(script_module, str(function.function))
start = time.time()
try:
result = pprint.pformat(func()).decode(errors='replace')
succeeded = True
except Exception:
result = traceback.format_exc().decode(errors='replace')
succeeded = False
return RunResultTO(result=result,
succeeded=succeeded,
time=time.time() - start,
request_id=os.environ.get('REQUEST_LOG_ID'))
def _run_deferred(script_id, function):
# type: (long, RunScriptTO) -> None
script = get_script(script_id)
run_result = _run_script(script, function)
logging.info('Result from running function "%s" in script "%s"', function.function, script.name)
logging.info(run_result.to_dict(exclude=['result']))
logging.info(run_result.result)
def _migrate_scripts():
from rogerthat.models import Code
scripts = []
for code in Code.all():
scripts.append(Script(name=code.name,
author=code.author,
modified_on=datetime.utcfromtimestamp(code.timestamp),
modified_by=code.author,
source=code.source,
functions=_get_script_function_models(code.name, code.source, [], ignore_errors=True),
version=code.version))
ndb.put_multi(scripts)
return scripts
|
from FileName import FileName
from MetaInfo import MetaInfo
from ProtectFlags import ProtectFlags
from TimeStamp import TimeStamp
from FSError import *
from FSString import FSString
import amitools.util.ByteSize as ByteSize
class ADFSNode:
def __init__(self, volume, parent):
self.volume = volume
self.blkdev = volume.blkdev
self.parent = parent
self.block_bytes = self.blkdev.block_bytes
self.block = None
self.name = None
self.valid = False
self.meta_info = None
def __str__(self):
return "%s:'%s'(@%d)" % (self.__class__.__name__, self.get_node_path_name(), self.block.blk_num)
def set_block(self, block):
self.block = block
self.name = FileName(FSString(self.block.name), is_intl=self.volume.is_intl)
self.valid = True
self.create_meta_info()
def create_meta_info(self):
self.meta_info = MetaInfo(self.block.protect, self.block.mod_ts, FSString(self.block.comment))
def get_file_name(self):
return self.name
def delete(self, wipe=False, all=False, update_ts=True):
if all:
self.delete_children(wipe, all, update_ts)
self.parent._delete(self, wipe, update_ts)
def delete_children(self, wipe, all, update_ts):
pass
def get_meta_info(self):
return self.meta_info
def change_meta_info(self, meta_info):
dirty = False
# dircache?
rebuild_dircache = False
if self.volume.is_dircache and self.parent != None:
record = self.parent.get_dircache_record(self.name.get_ami_str_name())
if record == None:
raise FSError(INTERNAL_ERROR, node=self)
else:
record = None
# alter protect flags
protect = meta_info.get_protect()
if protect != None and hasattr(self.block, 'protect'):
self.block.protect = protect
self.meta_info.set_protect(protect)
dirty = True
if record != None:
record.protect = protect
# alter mod time
mod_ts = meta_info.get_mod_ts()
if mod_ts != None:
self.block.mod_ts = mod_ts
self.meta_info.set_mod_ts(mod_ts)
dirty = True
if record != None:
record.mod_ts = mod_ts
# alter comment
comment = meta_info.get_comment()
if comment != None and hasattr(self.block, "comment"):
self.block.comment = comment.get_ami_str()
self.meta_info.set_comment(comment)
dirty = True
if record != None:
rebuild_dircache = len(record.comment) < comment
record.comment = comment.get_ami_str()
# really need update?
if dirty:
self.block.write()
# dirache update
if record != None:
self.parent.update_dircache_record(record, rebuild_dircache)
def change_comment(self, comment):
self.change_meta_info(MetaInfo(comment=comment))
def change_protect(self, protect):
self.change_meta_info(MetaInfo(protect=protect))
def change_protect_by_string(self, pr_str):
p = ProtectFlags()
p.parse(pr_str)
self.change_protect(p.mask)
def change_mod_ts(self, mod_ts):
self.change_meta_info(MetaInfo(mod_ts=mod_ts))
def change_mod_ts_by_string(self, tm_str):
t = TimeStamp()
t.parse(tm_str)
self.change_meta_info(MetaInfo(mod_ts=t))
def get_list_str(self, indent=0, all=False, detail=False):
istr = u' ' * indent
if detail:
extra = self.get_detail_str()
else:
extra = self.meta_info.get_str_line()
return u'%-40s %8s %s' % (istr + self.name.get_unicode_name(), self.get_size_str(), extra)
def list(self, indent=0, all=False, detail=False, encoding="UTF-8"):
print(self.get_list_str(indent=indent, all=all, detail=detail).encode(encoding))
def get_size_str(self):
# re-implemented in derived classes!
return ""
def get_blocks(self, with_data=False):
# re-implemented in derived classes!
return 0
def get_file_data(self):
return None
def dump_blocks(self, with_data=False):
blks = self.get_blocks(with_data)
for b in blks:
b.dump()
def get_node_path(self, with_vol=False):
if self.parent != None:
if not with_vol and self.parent.parent == None:
r = []
else:
r = self.parent.get_node_path()
else:
if not with_vol:
return []
r = []
r.append(self.name.get_unicode_name())
return r
def get_node_path_name(self, with_vol=False):
r = self.get_node_path()
return FSString(u"/".join(r))
def get_detail_str(self):
return ""
def get_block_usage(self, all=False, first=True):
return (0,0)
def get_file_bytes(self, all=False, first=True):
return (0,0)
def is_file(self):
return False
def is_dir(self):
return False
def get_info(self, all=False):
# block usage: data + fs blocks
(data,fs) = self.get_block_usage(all=all)
total = data + fs
bb = self.blkdev.block_bytes
btotal = total * bb
bdata = data * bb
bfs = fs * bb
prc_data = 10000 * data / total
prc_fs = 10000 - prc_data
res = []
res.append("sum: %10d %s %12d" % (total, ByteSize.to_byte_size_str(btotal), btotal))
res.append("data: %10d %s %12d %5.2f%%" % (data, ByteSize.to_byte_size_str(bdata), bdata, prc_data / 100.0))
res.append("fs: %10d %s %12d %5.2f%%" % (fs, ByteSize.to_byte_size_str(bfs), bfs, prc_fs / 100.0))
return res
|
import os
import sys
import logging
__all__ = ['logger']
try:
from colorama import init, Fore, Style
init(autoreset=False)
colors = {
'good' : Fore.GREEN,
'bad' : Fore.RED,
'vgood' : Fore.GREEN + Style.BRIGHT,
'vbad' : Fore.RED + Style.BRIGHT,
'std' : '', # Do not color "standard" text
'warn' : Fore.YELLOW + Style.BRIGHT,
'reset' : Style.RESET_ALL,
}
except ImportError:
colors = {
'good' : '',
'bad' : '',
'vgood' : '',
'vbad' : '',
'std' : '',
'warn' : '',
'reset' : '',
}
def get_console_width():
"""
Return width of available window area. Autodetection works for
Windows and POSIX platforms. Returns 80 for others
Code from http://bitbucket.org/techtonik/python-wget
"""
if os.name == 'nt':
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# get console handle
from ctypes import windll, Structure, byref
try:
from ctypes.wintypes import SHORT, WORD, DWORD
except ImportError:
# workaround for missing types in Python 2.5
from ctypes import (
c_short as SHORT, c_ushort as WORD, c_ulong as DWORD)
console_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
# CONSOLE_SCREEN_BUFFER_INFO Structure
class COORD(Structure):
_fields_ = [("X", SHORT), ("Y", SHORT)]
class SMALL_RECT(Structure):
_fields_ = [("Left", SHORT), ("Top", SHORT),
("Right", SHORT), ("Bottom", SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", DWORD)]
sbi = CONSOLE_SCREEN_BUFFER_INFO()
ret = windll.kernel32.GetConsoleScreenBufferInfo(console_handle, byref(sbi))
if ret == 0:
return 0
return sbi.srWindow.Right+1
elif os.name == 'posix':
from fcntl import ioctl
from termios import TIOCGWINSZ
from array import array
winsize = array("H", [0] * 4)
try:
ioctl(sys.stdout.fileno(), TIOCGWINSZ, winsize)
except IOError:
pass
return (winsize[1], winsize[0])[0]
return 80
class Logger(object):
VERBOSE = logging.DEBUG - 1
DEBUG = logging.DEBUG
INFO = logging.INFO
WARN = logging.WARNING
ERROR = logging.ERROR
FATAL = logging.FATAL
## This attribute is set to True when the user does not want colors
## by __init__.py
_NO_COLORS = False
def __init__(self,level=None):
self.indent = 0
self.level = level or Logger.INFO
self._stack = ['']
self.enabled = True
def disable_colors(self):
self._NO_COLORS = True
for k in colors.keys():
colors[k] = ''
def newline(self):
'''Print a newline character (\n) on Standard Output.'''
sys.stdout.write('\n')
def raise_last(self, exc):
raise exc(self.last_msg)
@property
def last_msg(self):
return self._stack[-1]
def ask(self, message=None, bool=None, choices=None, dont_ask=False):
if bool is not None:
if bool in (True, False) or (isinstance(bool, (list, tuple)) and len(bool) == 1):
if bool == False:
txt = "Cancel"
elif bool == True:
txt = "OK"
else:
txt = bool[0]
self.log(self.info, 'std', "%s, %s..."%(message, txt), addn=False)
if not dont_ask:
raw_input()
return
else:
if dont_ask:
self.log(self.info, 'std', '%s ? Yes'%message)
return True
while True:
self.log(self.info, 'std', "yes: "+bool[0])
self.log(self.info, 'std', "no: "+bool[1])
try:
self.log(self.info, 'std', '%s ? (y/[n]) '%message, addn=False)
ans = raw_input()
except Exception:
continue
# default choice : no
if not ans.strip():
return False
if ans not in 'yYnN':
continue
return ans in 'yY'
if choices:
if isinstance(choices, dict):
_data = choices
choices = choices.keys()
else:
_data = None
self.log(self.info, 'std', message)
for n, choice in enumerate(choices):
self.log(self.info, 'std', "%2d - %s"%(n+1, choice))
while True:
try:
ans = input('Your choice ? ')
except Exception:
self.log(self.info, 'std', "Please enter selected option's number.")
continue
if ans < 0 or ans > len(choices):
continue
break
idx = choices[ans-1]
return (_data[idx] if _data else idx)
def verbose(self, msg, *a, **kw):
self.log(self.VERBOSE, 'std', msg, *a, **kw)
def debug(self, msg, *a, **kw):
self.log(self.DEBUG, 'std', msg, *a, **kw)
def info(self, msg, *a, **kw):
self.log(self.INFO, 'std', msg, *a, **kw)
def success(self, msg, *a, **kw):
self.log(self.INFO, 'good', msg, *a, **kw)
def warn(self, msg, *a, **kw):
self.log(self.WARN, 'warn', msg, *a, **kw)
def error(self, msg, *a, **kw):
self.log(self.ERROR, 'bad', msg, *a, **kw)
exc = kw.get('exc', None)
if exc is not None:
raise exc(self.last_msg)
def fatal(self, msg, *a, **kw):
self.log(self.FATAL, 'vbad', msg, *a, **kw)
exc = kw.get('exc', None)
if exc is not None:
raise exc(self.last_msg)
def exit(self, msg=None, status=1):
if msg != None:
self.log(self.FATAL, 'vbad', msg)
sys.exit(status)
def log(self, level, col, msg, *a, **kw):
'''
This is the base function that logs all messages. This function prints a newline character too,
unless you specify ``addn=False``. When the message starts with a return character (\r) it automatically
cleans the line.
'''
if level >= self.level and self.enabled:
std = sys.stdout
if level >= self.ERROR:
std = sys.stderr
## We can pass to logger.log any object: it must have at least
## a __repr__ or a __str__ method.
msg = str(msg)
if msg.startswith('\r') or self.last_msg.startswith('\r'):
## We have to clear the line in case this message is longer than
## the previous
std.write('\r' + ' ' * get_console_width())
msg = '\r' + ' ' * self.indent + msg.lstrip('\r').format(*a)
else:
try:
msg = ' ' * self.indent + msg.format(*a)
except KeyError:
msg = ' ' * self.indent + msg
col, col_reset = colors[col], colors['reset']
if self._NO_COLORS:
col, col_reset = '', ''
std.write(col + msg + col_reset)
## Automatically adds a newline character
if kw.get('addn', True):
self.newline()
## flush() makes the log immediately readable
std.flush()
self._stack.append(msg)
logger = Logger()
if __name__ == '__main__':
print logger.ask("Beware, you enter a secret place", bool=True)
print logger.ask("Sorry, can't install this package", bool=False)
print logger.ask("Sorry, can't install this package", bool=['Press any key to continue'])
print logger.ask('Proceed', bool=('remove files', 'cancel'))
print logger.ask('Do you want to upgrade', bool=('upgrade version', 'keep working version'))
print logger.ask('Installation method', choices=('Egg based', 'Flat directory'))
print logger.ask('some dict', choices={'choice a': 'a', 'choice b': 'b', 'choice c': 'c'})
|
from typing import Dict, Optional
import networkx
from randovania.game_description.area import Area
from randovania.game_description.game_patches import GamePatches
from randovania.game_description.node import Node, DockNode, TeleporterNode, PickupNode, ResourceNode
from randovania.game_description.resources.pickup_index import PickupIndex
from randovania.game_description.resources.resource_info import ResourceInfo
from randovania.game_description.world_list import WorldList
def distances_to_node(world_list: WorldList, starting_node: Node,
*,
ignore_elevators: bool = True,
cutoff: Optional[int] = None,
patches: Optional[GamePatches] = None,
) -> Dict[Area, int]:
"""
Compute the shortest distance from a node to all reachable areas.
:param world_list:
:param starting_node:
:param ignore_elevators:
:param cutoff: Exclude areas with a length longer that cutoff.
:param patches:
:return: Dict keyed by area to shortest distance to starting_node.
"""
g = networkx.DiGraph()
dock_connections = patches.dock_connection if patches is not None else {}
elevator_connections = patches.elevator_connection if patches is not None else {}
for area in world_list.all_areas:
g.add_node(area)
for world in world_list.worlds:
for area in world.areas:
new_areas = set()
for node in area.nodes:
if isinstance(node, DockNode):
connection = dock_connections.get((area.area_asset_id, node.dock_index), node.default_connection)
new_areas.add(world.area_by_asset_id(connection.area_asset_id))
elif isinstance(node, TeleporterNode) and not ignore_elevators:
connection = elevator_connections.get(node.teleporter_instance_id, node.default_connection)
new_areas.add(world_list.area_by_area_location(connection))
for next_area in new_areas:
g.add_edge(area, next_area)
return networkx.single_source_shortest_path_length(g, world_list.nodes_to_area(starting_node), cutoff)
def pickup_index_to_node(world_list: WorldList, index: PickupIndex) -> PickupNode:
for node in world_list.all_nodes:
if isinstance(node, PickupNode) and node.pickup_index == index:
return node
raise ValueError(f"PickupNode with {index} not found.")
def node_with_resource(world_list: WorldList, resource: ResourceInfo) -> ResourceNode:
for node in world_list.all_nodes:
if isinstance(node, ResourceNode) and node.resource() == resource:
return node
raise ValueError(f"ResourceNode with {resource} not found.")
|
# Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import select
import socket
import threading
try:
import pycares
HAVE_CARES = True
except:
HAVE_CARES = False
# try:
# import gevent, gevent.socket
# HAVE_GEVENT = True
# except:
HAVE_GEVENT = False
# these are used by all resolvers
DNS_TIMEOUT = 5
DNS_TIMEOUT_VALUE = ""
def set_timeout(value):
global DNS_TIMEOUT
DNS_TIMEOUT = value
def set_timeout_value(value):
global DNS_TIMEOUT_VALUE
DNS_TIMEOUT_VALUE = value
# standard gethostbyname in thread
# http://code.activestate.com/recipes/473878/
def with_timeout(func, args=(), kwargs={}):
"""This function will spawn a thread and run the given function
using the args, kwargs and return the given default value if the
timeout_duration is exceeded.
"""
class ResultThread(threading.Thread):
daemon = True
def __init__(self):
threading.Thread.__init__(self)
self.result, self.error = None, None
def run(self):
try:
self.result = func(*args, **kwargs)
except Exception, e:
self.error = e
it = ResultThread()
it.start()
it.join(DNS_TIMEOUT)
if it.isAlive():
return DNS_TIMEOUT_VALUE
else:
if it.error:
raise it.error
return it.result
def resolve_thread(name):
return with_timeout(gethostbyname, (name,))
def gethostbyname(name):
try:
ip = socket.gethostbyname(name)
except socket.gaierror:
ip = ""
return ip
# C-ARES (http://c-ares.haxx.se/)
def resolve_cares(name):
# create new c-ares channel
careschan = pycares.Channel(timeout=DNS_TIMEOUT, tries=1)
# if we don't get a response we return the default value
result = Resultholder()
result.value = DNS_TIMEOUT_VALUE
def setresult_cb(res, error):
# ignore error and just take first result ip (randomized anyway)
if res and res.addresses:
result.value = res.addresses[0]
# resolve with cb
careschan.gethostbyname(name, socket.AF_INET, setresult_cb)
# now do the actual work
readfds, writefds = careschan.getsock()
canreadfds, canwritefds, _ = select.select(readfds, writefds, [],
DNS_TIMEOUT)
for rfd in canreadfds:
careschan.process_fd(rfd, -1)
# if the query did not succeed, setresult was not called and we just
# return result destroy the channel first to not leak anything
careschan.destroy()
return result.value
# workaround until py3 nonlocal (for c-ares and gevent)
class Resultholder:
pass
# gevent based resolver with timeout
"""def resolve_gevent(name):
result = resolve_gevent_real(name)
# if it failed, do this a second time because of strange libevent behavior
# basically sometimes the Timeout fires immediately instead of after
# DNS_TIMEOUT
if result == DNS_TIMEOUT_VALUE:
result = resolve_gevent_real(name)
return result
def resolve_gevent_real(name):
result = DNS_TIMEOUT_VALUE
with gevent.Timeout(DNS_TIMEOUT, False):
try:
result = gevent.socket.gethostbyname(name)
except socket.gaierror:
pass
return result
"""
# choose resolver automatically
def resolve(name):
if HAVE_CARES:
return resolve_cares(name)
# elif HAVE_GEVENT:
# return resolve_gevent(name)
else:
return resolve_thread(name)
# another alias
resolve_best = resolve
|
import sys
import argh
import asyncio
import warnings
from Bio import SeqIO
import pandas as pd
from tictax import tictax
def configure_warnings(show_warnings):
'''Show or suppress warnings, mainly for TreeSwift Tree.mrca() operations'''
if show_warnings:
warnings.filterwarnings('always')
else:
warnings.filterwarnings('ignore')
def kmer_lca(seqs_path: 'path to (optionally gzipped) fasta/fastq input',
fastq: 'input is fastq; disable autodetection' = False,
progress: 'show progress bar (sent to stderr)' = False):
'''
Lowest common ancestor sequence assignment using the One Codex API.
Streams annotated records to stdout in fasta format.
Taxa assigned using the One Codex 31mer LCA database.
'''
conf = tictax.config()
records = tictax.parse_seqs(seqs_path, fastq)
print('Classifying sequences…', file=sys.stderr)
asyncio.get_event_loop().run_until_complete(tictax.oc_classify(records,
conf['one_codex_api_key'],
progress,
True))
print('✓📌 ✓📌 ✓📌 ✓📌 ✓📌 ✓📌 ✓📌 ✓📌 ✓📌 ✓📌', file=sys.stderr)
def annotate_diamond(fasta_path: 'path to fasta input',
diamond_path: 'path to Diamond taxonomic classification output'):
'''
Annotate fasta headers with taxonomy information from Diamond
'''
records = tictax.parse_seqs(fasta_path)
annotated_records = tictax.annotate_diamond(records, diamond_path)
SeqIO.write(annotated_records, sys.stdout, 'fasta')
@argh.named('filter') # Avoids namespace collision in CLI
def filter_taxa(fasta_path: 'path to fasta input',
taxids: 'comma delimited list of taxon IDs',
unclassified: 'pass sequences unclassified at superkingdom level >(0)' = False,
discard: 'discard specified taxa' = False,
warnings: 'show warnings' = False):
'''
Customisable filtering of tictax flavoured fasta files
'''
configure_warnings(warnings)
records = SeqIO.parse(fasta_path, 'fasta')
filtered_records = tictax.filter_taxa(records,
map(int, taxids.split(',')),
unclassified,
discard)
SeqIO.write(filtered_records, sys.stdout, 'fasta')
def matrix(fasta_path: 'path to tictax annotated fasta input',
scafstats_path: 'path to BBMap scaftstats file'):
'''
Generate taxonomic count matrix from tictax classified contigs
'''
records = SeqIO.parse(fasta_path, 'fasta')
df = tictax.matrix(records, scafstats_path)
df.to_csv(sys.stdout)
def main():
argh.dispatch_commands([kmer_lca,
annotate_diamond,
filter_taxa,
matrix])
if __name__ == '__main__':
main()
|
import string
import os.path
import copy
import requests
import mimetypes
from file import settings
from django.http import HttpResponseRedirect, HttpResponse
from decorators import HttpOptionsDecorator, VoolksAPIAuthRequired
from django.views.decorators.csrf import csrf_exempt
def get_api_credentials(request):
''' Get app id and api key '''
app = request.META.get('HTTP_X_VOOLKS_APP_ID')
key = request.META.get('HTTP_X_VOOLKS_API_KEY')
if not key:
try:
key = request.GET.get('VoolksApiKey')
app = request.GET.get('VoolksAppId')
except(e):
pass
return (app, key)
@csrf_exempt
@HttpOptionsDecorator
@VoolksAPIAuthRequired
def get(request, name):
(app, key) = get_api_credentials(request)
try:
path = settings.MEDIA_ROOT + app + "-" + key + "-" + name
dest = open(path, 'r')
fileContent = dest.read()
dest.close()
mimeType = mimetypes.guess_type(path)
if mimeType == (None,None):
mimeType = "text/plain"
return HttpResponse(fileContent, content_type=mimeType)
except:
return HttpResponse("FILE NOT FOUND")
@csrf_exempt
@HttpOptionsDecorator
@VoolksAPIAuthRequired
def delete(request, name):
(app, key) = get_api_credentials(request)
try:
path = settings.MEDIA_ROOT + app + "-" + key + "-" + name
os.remove(path)
return HttpResponse("OK")
except:
return HttpResponse("ERROR")
@csrf_exempt
@HttpOptionsDecorator
@VoolksAPIAuthRequired
def create(request):
(app, key) = get_api_credentials(request)
if len(request.FILES) < 1:
return HttpResponse("NO_FILES_FOUND")
else:
fileKey = request.FILES.keys()[0]
file = request.FILES[fileKey]
# Check file size
if file.size > settings.MEDIA_MAX_FILE_SIZE:
return HttpResponse("FILE_SIZE_EXCEEDED")
# Check file extension
if file.name.split(".")[-1].lower() not in settings.MEDIA_PERMITTED_EXTENSIONS:
return HttpResponse("FILE_EXTENSION_NOT_PERMITTED")
path = settings.MEDIA_ROOT + app + "-" + key + "-" + file.name
dest = open(path, 'w+')
if file.multiple_chunks:
for c in file.chunks():
dest.write(c)
else:
dest.write(file.read())
dest.close()
return HttpResponseRedirect("/" + file.name + "?VoolksAppId=" + app + "&VoolksApiKey=" + key)
@csrf_exempt
@HttpOptionsDecorator
@VoolksAPIAuthRequired
def createBase64(request):
(app, key) = get_api_credentials(request)
#import pdb; pdb.set_trace();
if len(request.POST) < 1:
return HttpResponse("NO_FILES_FOUND")
else:
fileKey = request.POST.keys()[0]
# Check file size
if len(request.POST[fileKey]) > settings.MEDIA_MAX_FILE_SIZE:
return HttpResponse("FILE_SIZE_EXCEEDED")
filename = fileKey
# Check file extension
if filename.split(".")[-1].lower() not in settings.MEDIA_PERMITTED_EXTENSIONS:
return HttpResponse("FILE_EXTENSION_NOT_PERMITTED")
path = settings.MEDIA_ROOT + app + "-" + key + "-" + filename
dest = open(path, 'w+')
try:
dest.write(request.POST[fileKey][22:].decode('base64'))
except:
dest.write(request.POST[fileKey].decode('base64'))
dest.close()
#return HttpResponse("OK")
return HttpResponse(filename)
return HttpResponse("ERROR")
|
# Don't import unicode_literals because of a bug in py2 setuptools
# where package_data is expected to be str and not unicode.
from __future__ import absolute_import, division, print_function
# Ensure setuptools is available
import sys
try:
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
# Try to use ez_setup, but if not, continue anyway. The import is known
# to fail when installing from a tar.gz.
print('Could not import ez_setup', file=sys.stderr)
from setuptools import setup
install_reqs = ['urllib3',
'requests>=2.5.1',
'six>=1.3.0']
assert sys.version_info >= (2, 6), "We only support Python 2.6+"
with open('LICENSE') as f:
license = f.read()
dist = setup(
name='dropbox',
version='3.27',
description='Official Dropbox API Client',
author='Dropbox',
author_email='[email protected]',
url='http://www.dropbox.com/developers',
install_requires=install_reqs,
license=license,
zip_safe=False,
packages=['dropbox'],
package_data={'dropbox': ['trusted-certs.crt']},
platforms=['CPython 2.6', 'CPython 2.7'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
from JumpScale import j
import socket
import time
# import urllib.request, urllib.parse, urllib.error
try:
import urllib.request
import urllib.parse
import urllib.error
except:
import urllib.parse as urllib
class GraphiteClient:
def __init__(self):
self.__jslocation__ = "j.clients.graphite"
self._SERVER = '127.0.0.1'
self._CARBON_PORT = 2003
self._GRAPHITE_PORT = 8081
self._url = "http://%s:%s/render" % (self._SERVER, self._GRAPHITE_PORT)
# self.sock.connect((self.CARBON_SERVER, self.CARBON_PORT))
def send(self, msg):
"""
e.g. foo.bar.baz 20
"""
out = ""
for line in msg.split("\n"):
out += '%s %d\n' % (line, int(time.time()))
sock = socket.socket()
sock.connect((self._SERVER, self._CARBON_PORT))
sock.sendall(out)
sock.close()
def close(self):
pass
def query(self, query_=None, **kwargs):
import requests
query = query_.copy() if query_ else dict()
query.update(kwargs)
query['format'] = 'json'
if 'from_' in query:
query['from'] = query.pop('from_')
qs = urllib.parse.urlencode(query)
url = "%s?%s" % (self._url, qs)
return requests.get(url).json()
|
#!/usr/bin/python
## Math
import numpy as np
## Display
import pygame
import time
import math
## Ros
import rospy
from tf import transformations as tf_trans
## Ros Msgs
from std_msgs.msg import Header, Float64, Int64
from ieee2015_end_effector_servos.msg import Num
from geometry_msgs.msg import Point, PointStamped, PoseStamped, Pose, Quaternion
from dynamixel_msgs.msg import JointState
to_radians_one = 512
to_radians_two = 512
side_control = 1
large_control = 1
small_control = 1
past_location_one = 0
past_location_two = 0
SCREEN_DIM = (750, 750)
ORIGIN = np.array([SCREEN_DIM[0]/2.0, SCREEN_DIM[1]/2.0])
def round_point((x, y)):
'''Round and change point to centered coordinate system'''
return map(int, ((1000 * x) + ORIGIN[0], -(1000 * y) + ORIGIN[1]))
def unround_point((x, y)):
'''Change center-origin coordinates to pygame coordinates'''
return ((x - ORIGIN[0])/1000.0, (-y + ORIGIN[1])/1000.0)
def to_degrees(param):
if param < 0:
temp = 180 - math.fabs(param)
temp2 = temp * 2
return math.fabs(param) + temp2
else:
return param
def check_size(param, servo):
global past_location_one
global past_location_two
temp = int(param * 3.405)
if temp < 1023:
if servo == 1:
past_location_one = temp
if servo == 2:
past_location_two = temp
return temp
if temp > 1024:
if servo == 1:
return past_location_one
if servo == 2:
return past_location_two
class END(object):
def __init__(self):
rospy.init_node('SCARA_simulator')
self.base = np.array([0.0, 0.0], np.float32)
self.point = np.array([0.0, 0.0], np.float32)
self.point_two = np.array([0.0, 0.0], np.float32)
self.starting = np.array([2, -3.5], np.float32)
self.starting_two = np.array([-2, -3.5], np.float32)
self.starting_three = np.array([0, 1], np.float32)
self.desired_pos = rospy.Subscriber('/end_des_pose', PointStamped, self.got_des_pose)
self.desired_pos_two = rospy.Subscriber('/end_des_pose_two', PointStamped, self.got_des_pose_two)
def got_des_pose(self, msg):
'''Recieved desired arm pose'''
self.point = (msg.point.x, msg.point.y)
global to_radians_one
to_radians_one = math.atan2(msg.point.y, msg.point.x)
print to_radians_one
degrees_one = to_degrees(to_radians_one)
xl_format = check_size(degrees_one, 1)
to_radians_one = xl_format
print "TARGETING POSITION: ({}, {})".format(*self.point)
print "LARGE SERVO POSITION ", degrees_one, "radians"
print "LARGE SERVO POSITION: ", xl_format
base_pub = rospy.Publisher('/ieee2015_end_effector_servos', Num, queue_size=1)
base_pub.publish(side_control, to_radians_one, to_radians_two, large_control, small_control)
def got_des_pose_two(self, msg):
'''Recieved desired arm pose'''
self.point = (msg.point.x, msg.point.y)
global to_radians_two
to_radians_two = math.atan2(msg.point.y, msg.point.x) * (180/np.pi) + 60
degrees_two =to_degrees(to_radians_two)
xl_format = check_size(degrees_two, 2)
to_radians_two = xl_format
print "TARGETING POSITION: ({}, {})".format(*self.point)
print "SMALL SERVO moved to ", degrees_two, "radians"
print "SMALL SERVO POSITION: ", xl_format
base_pub = rospy.Publisher('/ieee2015_end_effector_servos', Num, queue_size=1)
base_pub.publish(side_control, to_radians_one)
def draw(self, display, new_base=(0, 0)):
'''Draw the whole arm'''
# Update positions given current
pygame.draw.circle(display, (255, 255, 50), round_point(self.base), int(300), 2)
pygame.draw.line(display, (255, 162, 0), round_point(self.base), round_point(self.point), 3)
pygame.draw.line(display, (255, 130, 0), round_point(self.base), round_point(self.point_two), 3)
pygame.draw.line(display, (255, 255, 255), round_point(self.base), round_point(self.starting), 1)
pygame.draw.line(display, (255, 255, 255), round_point(self.base), round_point(self.starting_two), 1)
pygame.draw.line(display, (255, 255, 255), round_point(self.base), round_point(self.starting_three), 1)
def main():
'''In principle, we can support an arbitrary number of servos in simulation'''
end_one = [END()]
global side_control
global large_control
global small_control
display = pygame.display.set_mode(SCREEN_DIM)
des_pose_pub_end = rospy.Publisher('/end_des_pose', PointStamped, queue_size=1)
des_pose_pub_end_two = rospy.Publisher('/end_des_pose_two', PointStamped, queue_size=1)
def publish_des_pos_end(pos):
'''Publish desired position of the arm end-effector based on click position'''
des_pose_pub_end.publish(
PointStamped(
header = Header(
stamp=rospy.Time.now(),
frame_id='/robot',
),
point=Point(
x=pos[0],
y=pos[1],
z=0
)
)
)
def publish_des_pos_two(pos):
des_pose_pub_end_two.publish(
PointStamped(
header = Header(
stamp=rospy.Time.now(),
frame_id='/robot',
),
point=Point(
x=pos[0],
y=pos[1],
z=0
)
)
)
clock = pygame.time.Clock()
while not rospy.is_shutdown():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
pt = pygame.mouse.get_pos()
publish_des_pos_end(unround_point(pt))
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_s:
pt = pygame.mouse.get_pos()
publish_des_pos_two(unround_point(pt))
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_z:
side_control = 1
print "CONTROL MODE: Wheel"
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_x:
side_control = 2
print "CONTROL MODE: Angle"
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
large_control = 1
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
large_control = 2
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_e:
small_control = 1
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
small_control = 2
t = time.time()
for arm in end_one:
arm.draw(display)
pygame.display.update()
clock.tick(20)
display.fill((0, 0, 0))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.dialogflowcx_v3.types import flow
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.dialogflow.cx.v3',
manifest={
'CreateVersionOperationMetadata',
'Version',
'ListVersionsRequest',
'ListVersionsResponse',
'GetVersionRequest',
'CreateVersionRequest',
'UpdateVersionRequest',
'DeleteVersionRequest',
'LoadVersionRequest',
},
)
class CreateVersionOperationMetadata(proto.Message):
r"""Metadata associated with the long running operation for
[Versions.CreateVersion][google.cloud.dialogflow.cx.v3.Versions.CreateVersion].
Attributes:
version (str):
Name of the created version. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
version = proto.Field(
proto.STRING,
number=1,
)
class Version(proto.Message):
r"""Represents a version of a flow.
Attributes:
name (str):
Format: projects/<Project
ID>/locations/<Location ID>/agents/<Agent
ID>/flows/<Flow ID>/versions/<Version ID>.
Version ID is a self-increasing number generated
by Dialogflow upon version creation.
display_name (str):
Required. The human-readable name of the
version. Limit of 64 characters.
description (str):
The description of the version. The maximum
length is 500 characters. If exceeded, the
request is rejected.
nlu_settings (google.cloud.dialogflowcx_v3.types.NluSettings):
Output only. The NLU settings of the flow at
version creation.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Create time of the version.
state (google.cloud.dialogflowcx_v3.types.Version.State):
Output only. The state of this version. This
field is read-only and cannot be set by create
and update methods.
"""
class State(proto.Enum):
r"""The state of the version."""
STATE_UNSPECIFIED = 0
RUNNING = 1
SUCCEEDED = 2
FAILED = 3
name = proto.Field(
proto.STRING,
number=1,
)
display_name = proto.Field(
proto.STRING,
number=2,
)
description = proto.Field(
proto.STRING,
number=3,
)
nlu_settings = proto.Field(
proto.MESSAGE,
number=4,
message=flow.NluSettings,
)
create_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
state = proto.Field(
proto.ENUM,
number=6,
enum=State,
)
class ListVersionsRequest(proto.Message):
r"""The request message for
[Versions.ListVersions][google.cloud.dialogflow.cx.v3.Versions.ListVersions].
Attributes:
parent (str):
Required. The [Flow][google.cloud.dialogflow.cx.v3.Flow] to
list all versions for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
page_size (int):
The maximum number of items to return in a
single page. By default 20 and at most 100.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListVersionsResponse(proto.Message):
r"""The response message for
[Versions.ListVersions][google.cloud.dialogflow.cx.v3.Versions.ListVersions].
Attributes:
versions (Sequence[google.cloud.dialogflowcx_v3.types.Version]):
A list of versions. There will be a maximum number of items
returned based on the page_size field in the request. The
list may in some cases be empty or contain fewer entries
than page_size even if this isn't the last page.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
versions = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Version',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetVersionRequest(proto.Message):
r"""The request message for
[Versions.GetVersion][google.cloud.dialogflow.cx.v3.Versions.GetVersion].
Attributes:
name (str):
Required. The name of the
[Version][google.cloud.dialogflow.cx.v3.Version]. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class CreateVersionRequest(proto.Message):
r"""The request message for
[Versions.CreateVersion][google.cloud.dialogflow.cx.v3.Versions.CreateVersion].
Attributes:
parent (str):
Required. The [Flow][google.cloud.dialogflow.cx.v3.Flow] to
create an [Version][google.cloud.dialogflow.cx.v3.Version]
for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
version (google.cloud.dialogflowcx_v3.types.Version):
Required. The version to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
version = proto.Field(
proto.MESSAGE,
number=2,
message='Version',
)
class UpdateVersionRequest(proto.Message):
r"""The request message for
[Versions.UpdateVersion][google.cloud.dialogflow.cx.v3.Versions.UpdateVersion].
Attributes:
version (google.cloud.dialogflowcx_v3.types.Version):
Required. The version to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to control which fields get updated.
Currently only ``description`` and ``display_name`` can be
updated.
"""
version = proto.Field(
proto.MESSAGE,
number=1,
message='Version',
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class DeleteVersionRequest(proto.Message):
r"""The request message for
[Versions.DeleteVersion][google.cloud.dialogflow.cx.v3.Versions.DeleteVersion].
Attributes:
name (str):
Required. The name of the
[Version][google.cloud.dialogflow.cx.v3.Version] to delete.
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class LoadVersionRequest(proto.Message):
r"""The request message for
[Versions.LoadVersion][google.cloud.dialogflow.cx.v3.Versions.LoadVersion].
Attributes:
name (str):
Required. The
[Version][google.cloud.dialogflow.cx.v3.Version] to be
loaded to draft flow. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
allow_override_agent_resources (bool):
This field is used to prevent accidental overwrite of other
agent resources, which can potentially impact other flow's
behavior. If ``allow_override_agent_resources`` is false,
conflicted agent-level resources will not be overridden
(i.e. intents, entities, webhooks).
"""
name = proto.Field(
proto.STRING,
number=1,
)
allow_override_agent_resources = proto.Field(
proto.BOOL,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.