repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringclasses 981
values | size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ChristianKniep/QNIB | serverfiles/usr/local/lib/networkx-1.6/build/lib/networkx/algorithms/tests/test_product.py | 3 | 10011 | import networkx as nx
from networkx import tensor_product,cartesian_product,lexicographic_product,strong_product
from nose.tools import assert_raises, assert_true, assert_equal
def test_tensor_product_raises():
G = nx.DiGraph()
H = nx.Graph()
assert_raises(nx.NetworkXError,tensor_product,G,H)
def test_tensor_product_null():
null=nx.null_graph()
empty10=nx.empty_graph(10)
K3=nx.complete_graph(3)
K10=nx.complete_graph(10)
P3=nx.path_graph(3)
P10=nx.path_graph(10)
# null graph
G=tensor_product(null,null)
assert_true(nx.is_isomorphic(G,null))
# null_graph X anything = null_graph and v.v.
G=tensor_product(null,empty10)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(null,K3)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(null,K10)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(null,P3)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(null,P10)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(empty10,null)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(K3,null)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(K10,null)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(P3,null)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(P10,null)
assert_true(nx.is_isomorphic(G,null))
def test_tensor_product_size():
P5 = nx.path_graph(5)
K3 = nx.complete_graph(3)
K5 = nx.complete_graph(5)
G=tensor_product(P5,K3)
assert_equal(nx.number_of_nodes(G),5*3)
G=tensor_product(K3,K5)
assert_equal(nx.number_of_nodes(G),3*5)
def test_tensor_product_classic_result():
K2 = nx.complete_graph(2)
G = nx.petersen_graph()
G = tensor_product(G,K2)
assert_true(nx.is_isomorphic(G,nx.desargues_graph()))
G = nx.cycle_graph(5)
G = tensor_product(G,K2)
assert_true(nx.is_isomorphic(G,nx.cycle_graph(10)))
G = nx.tetrahedral_graph()
G = tensor_product(G,K2)
assert_true(nx.is_isomorphic(G,nx.cubical_graph()))
def test_tensor_product_random():
G = nx.erdos_renyi_graph(10,2/10.)
H = nx.erdos_renyi_graph(10,2/10.)
GH = tensor_product(G,H)
for (u_G,u_H) in GH.nodes_iter():
for (v_G,v_H) in GH.nodes_iter():
if H.has_edge(u_H,v_H) and G.has_edge(u_G,v_G):
assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
else:
assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
def test_cartesian_product_multigraph():
G=nx.MultiGraph()
G.add_edge(1,2,key=0)
G.add_edge(1,2,key=1)
H=nx.MultiGraph()
H.add_edge(3,4,key=0)
H.add_edge(3,4,key=1)
GH=cartesian_product(G,H)
assert_equal( set(GH) , set([(1, 3), (2, 3), (2, 4), (1, 4)]))
assert_equal( set(GH.edges(keys=True)) ,
set([((1, 3), (2, 3), 0), ((1, 3), (2, 3), 1),
((1, 3), (1, 4), 0), ((1, 3), (1, 4), 1),
((2, 3), (2, 4), 0), ((2, 3), (2, 4), 1),
((2, 4), (1, 4), 0), ((2, 4), (1, 4), 1)]))
def test_cartesian_product_raises():
G = nx.DiGraph()
H = nx.Graph()
assert_raises(nx.NetworkXError,cartesian_product,G,H)
def test_cartesian_product_null():
null=nx.null_graph()
empty10=nx.empty_graph(10)
K3=nx.complete_graph(3)
K10=nx.complete_graph(10)
P3=nx.path_graph(3)
P10=nx.path_graph(10)
# null graph
G=cartesian_product(null,null)
assert_true(nx.is_isomorphic(G,null))
# null_graph X anything = null_graph and v.v.
G=cartesian_product(null,empty10)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(null,K3)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(null,K10)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(null,P3)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(null,P10)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(empty10,null)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(K3,null)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(K10,null)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(P3,null)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(P10,null)
assert_true(nx.is_isomorphic(G,null))
def test_cartesian_product_size():
# order(GXH)=order(G)*order(H)
K5=nx.complete_graph(5)
P5=nx.path_graph(5)
K3=nx.complete_graph(3)
G=cartesian_product(P5,K3)
assert_equal(nx.number_of_nodes(G),5*3)
assert_equal(nx.number_of_edges(G),
nx.number_of_edges(P5)*nx.number_of_nodes(K3)+
nx.number_of_edges(K3)*nx.number_of_nodes(P5))
G=cartesian_product(K3,K5)
assert_equal(nx.number_of_nodes(G),3*5)
assert_equal(nx.number_of_edges(G),
nx.number_of_edges(K5)*nx.number_of_nodes(K3)+
nx.number_of_edges(K3)*nx.number_of_nodes(K5))
def test_cartesian_product_classic():
# test some classic product graphs
P2 = nx.path_graph(2)
P3 = nx.path_graph(3)
# cube = 2-path X 2-path
G=cartesian_product(P2,P2)
G=cartesian_product(P2,G)
assert_true(nx.is_isomorphic(G,nx.cubical_graph()))
# 3x3 grid
G=cartesian_product(P3,P3)
assert_true(nx.is_isomorphic(G,nx.grid_2d_graph(3,3)))
def test_cartesian_product_random():
G = nx.erdos_renyi_graph(10,2/10.)
H = nx.erdos_renyi_graph(10,2/10.)
GH = cartesian_product(G,H)
for (u_G,u_H) in GH.nodes_iter():
for (v_G,v_H) in GH.nodes_iter():
if (u_G==v_G and H.has_edge(u_H,v_H)) or \
(u_H==v_H and G.has_edge(u_G,v_G)):
assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
else:
assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
def test_lexicographic_product_raises():
G = nx.DiGraph()
H = nx.Graph()
assert_raises(nx.NetworkXError,lexicographic_product,G,H)
def test_lexicographic_product_null():
null=nx.null_graph()
empty10=nx.empty_graph(10)
K3=nx.complete_graph(3)
K10=nx.complete_graph(10)
P3=nx.path_graph(3)
P10=nx.path_graph(10)
# null graph
G=lexicographic_product(null,null)
assert_true(nx.is_isomorphic(G,null))
# null_graph X anything = null_graph and v.v.
G=lexicographic_product(null,empty10)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(null,K3)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(null,K10)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(null,P3)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(null,P10)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(empty10,null)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(K3,null)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(K10,null)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(P3,null)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(P10,null)
assert_true(nx.is_isomorphic(G,null))
def test_lexicographic_product_size():
K5=nx.complete_graph(5)
P5=nx.path_graph(5)
K3=nx.complete_graph(3)
G=lexicographic_product(P5,K3)
assert_equal(nx.number_of_nodes(G),5*3)
G=lexicographic_product(K3,K5)
assert_equal(nx.number_of_nodes(G),3*5)
#No classic easily found classic results for lexicographic product
def test_lexicographic_product_random():
G = nx.erdos_renyi_graph(10,2/10.)
H = nx.erdos_renyi_graph(10,2/10.)
GH = lexicographic_product(G,H)
for (u_G,u_H) in GH.nodes_iter():
for (v_G,v_H) in GH.nodes_iter():
if G.has_edge(u_G,v_G) or (u_G==v_G and H.has_edge(u_H,v_H)):
assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
else:
assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
def test_strong_product_raises():
G = nx.DiGraph()
H = nx.Graph()
assert_raises(nx.NetworkXError,strong_product,G,H)
def test_strong_product_null():
null=nx.null_graph()
empty10=nx.empty_graph(10)
K3=nx.complete_graph(3)
K10=nx.complete_graph(10)
P3=nx.path_graph(3)
P10=nx.path_graph(10)
# null graph
G=strong_product(null,null)
assert_true(nx.is_isomorphic(G,null))
# null_graph X anything = null_graph and v.v.
G=strong_product(null,empty10)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(null,K3)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(null,K10)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(null,P3)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(null,P10)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(empty10,null)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(K3,null)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(K10,null)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(P3,null)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(P10,null)
assert_true(nx.is_isomorphic(G,null))
def test_strong_product_size():
K5=nx.complete_graph(5)
P5=nx.path_graph(5)
K3 = nx.complete_graph(3)
G=strong_product(P5,K3)
assert_equal(nx.number_of_nodes(G),5*3)
G=strong_product(K3,K5)
assert_equal(nx.number_of_nodes(G),3*5)
#No classic easily found classic results for strong product
def test_strong_product_random():
G = nx.erdos_renyi_graph(10,2/10.)
H = nx.erdos_renyi_graph(10,2/10.)
GH = strong_product(G,H)
for (u_G,u_H) in GH.nodes_iter():
for (v_G,v_H) in GH.nodes_iter():
if (u_G==v_G and H.has_edge(u_H,v_H)) or \
(u_H==v_H and G.has_edge(u_G,v_G)) or \
(G.has_edge(u_G,v_G) and H.has_edge(u_H,v_H)):
assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
else:
assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
| gpl-2.0 |
tafaRU/odoo | addons/l10n_multilang/__init__.py | 438 | 1082 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account
import l10n_multilang
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shiora/The-Perfect-Pokemon-Team-Balancer | libs/env/Lib/site-packages/whoosh/lang/porter2.py | 117 | 8314 | """An implementation of the Porter2 stemming algorithm.
See http://snowball.tartarus.org/algorithms/english/stemmer.html
Adapted from pyporter2 by Michael Dirolf.
This algorithm is more correct but (at least in this implementation)
several times slower than the original porter algorithm as implemented
in stemming.porter.
"""
import re
r_exp = re.compile(r"[^aeiouy]*[aeiouy]+[^aeiouy](\w*)")
ewss_exp1 = re.compile(r"^[aeiouy][^aeiouy]$")
ewss_exp2 = re.compile(r".*[^aeiouy][aeiouy][^aeiouywxY]$")
ccy_exp = re.compile(r"([aeiouy])y")
s1a_exp = re.compile(r"[aeiouy].")
s1b_exp = re.compile(r"[aeiouy]")
def get_r1(word):
# exceptional forms
if word.startswith('gener') or word.startswith('arsen'):
return 5
if word.startswith('commun'):
return 6
# normal form
match = r_exp.match(word)
if match:
return match.start(1)
return len(word)
def get_r2(word):
match = r_exp.match(word, get_r1(word))
if match:
return match.start(1)
return len(word)
def ends_with_short_syllable(word):
if len(word) == 2:
if ewss_exp1.match(word):
return True
if ewss_exp2.match(word):
return True
return False
def is_short_word(word):
if ends_with_short_syllable(word):
if get_r1(word) == len(word):
return True
return False
def remove_initial_apostrophe(word):
if word.startswith("'"):
return word[1:]
return word
def capitalize_consonant_ys(word):
if word.startswith('y'):
word = 'Y' + word[1:]
return ccy_exp.sub('\g<1>Y', word)
def step_0(word):
if word.endswith("'s'"):
return word[:-3]
if word.endswith("'s"):
return word[:-2]
if word.endswith("'"):
return word[:-1]
return word
def step_1a(word):
if word.endswith('sses'):
return word[:-4] + 'ss'
if word.endswith('ied') or word.endswith('ies'):
if len(word) > 4:
return word[:-3] + 'i'
else:
return word[:-3] + 'ie'
if word.endswith('us') or word.endswith('ss'):
return word
if word.endswith('s'):
preceding = word[:-1]
if s1a_exp.search(preceding):
return preceding
return word
return word
doubles = ('bb', 'dd', 'ff', 'gg', 'mm', 'nn', 'pp', 'rr', 'tt')
def ends_with_double(word):
for double in doubles:
if word.endswith(double):
return True
return False
def step_1b_helper(word):
if word.endswith('at') or word.endswith('bl') or word.endswith('iz'):
return word + 'e'
if ends_with_double(word):
return word[:-1]
if is_short_word(word):
return word + 'e'
return word
s1b_suffixes = ('ed', 'edly', 'ing', 'ingly')
def step_1b(word, r1):
if word.endswith('eedly'):
if len(word) - 5 >= r1:
return word[:-3]
return word
if word.endswith('eed'):
if len(word) - 3 >= r1:
return word[:-1]
return word
for suffix in s1b_suffixes:
if word.endswith(suffix):
preceding = word[:-len(suffix)]
if s1b_exp.search(preceding):
return step_1b_helper(preceding)
return word
return word
def step_1c(word):
if word.endswith('y') or word.endswith('Y') and len(word) > 1:
if word[-2] not in 'aeiouy':
if len(word) > 2:
return word[:-1] + 'i'
return word
def step_2_helper(word, r1, end, repl, prev):
if word.endswith(end):
if len(word) - len(end) >= r1:
if prev == []:
return word[:-len(end)] + repl
for p in prev:
if word[:-len(end)].endswith(p):
return word[:-len(end)] + repl
return word
return None
s2_triples = (('ization', 'ize', []),
('ational', 'ate', []),
('fulness', 'ful', []),
('ousness', 'ous', []),
('iveness', 'ive', []),
('tional', 'tion', []),
('biliti', 'ble', []),
('lessli', 'less', []),
('entli', 'ent', []),
('ation', 'ate', []),
('alism', 'al', []),
('aliti', 'al', []),
('ousli', 'ous', []),
('iviti', 'ive', []),
('fulli', 'ful', []),
('enci', 'ence', []),
('anci', 'ance', []),
('abli', 'able', []),
('izer', 'ize', []),
('ator', 'ate', []),
('alli', 'al', []),
('bli', 'ble', []),
('ogi', 'og', ['l']),
('li', '', ['c', 'd', 'e', 'g', 'h', 'k', 'm', 'n', 'r', 't']))
def step_2(word, r1):
for trip in s2_triples:
attempt = step_2_helper(word, r1, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
def step_3_helper(word, r1, r2, end, repl, r2_necessary):
if word.endswith(end):
if len(word) - len(end) >= r1:
if not r2_necessary:
return word[:-len(end)] + repl
else:
if len(word) - len(end) >= r2:
return word[:-len(end)] + repl
return word
return None
s3_triples = (('ational', 'ate', False),
('tional', 'tion', False),
('alize', 'al', False),
('icate', 'ic', False),
('iciti', 'ic', False),
('ative', '', True),
('ical', 'ic', False),
('ness', '', False),
('ful', '', False))
def step_3(word, r1, r2):
for trip in s3_triples:
attempt = step_3_helper(word, r1, r2, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
s4_delete_list = ('al', 'ance', 'ence', 'er', 'ic', 'able', 'ible', 'ant', 'ement',
'ment', 'ent', 'ism', 'ate', 'iti', 'ous', 'ive', 'ize')
def step_4(word, r2):
for end in s4_delete_list:
if word.endswith(end):
if len(word) - len(end) >= r2:
return word[:-len(end)]
return word
if word.endswith('sion') or word.endswith('tion'):
if len(word) - 3 >= r2:
return word[:-3]
return word
def step_5(word, r1, r2):
if word.endswith('l'):
if len(word) - 1 >= r2 and word[-2] == 'l':
return word[:-1]
return word
if word.endswith('e'):
if len(word) - 1 >= r2:
return word[:-1]
if len(word) - 1 >= r1 and not ends_with_short_syllable(word[:-1]):
return word[:-1]
return word
def normalize_ys(word):
return word.replace('Y', 'y')
exceptional_forms = {'skis': 'ski',
'skies': 'sky',
'dying': 'die',
'lying': 'lie',
'tying': 'tie',
'idly': 'idl',
'gently': 'gentl',
'ugly': 'ugli',
'early': 'earli',
'only': 'onli',
'singly': 'singl',
'sky': 'sky',
'news': 'news',
'howe': 'howe',
'atlas': 'atlas',
'cosmos': 'cosmos',
'bias': 'bias',
'andes': 'andes'}
exceptional_early_exit_post_1a = frozenset(['inning', 'outing', 'canning', 'herring',
'earring', 'proceed', 'exceed', 'succeed'])
def stem(word):
if len(word) <= 2:
return word
word = remove_initial_apostrophe(word)
# handle some exceptional forms
if word in exceptional_forms:
return exceptional_forms[word]
word = capitalize_consonant_ys(word)
r1 = get_r1(word)
r2 = get_r2(word)
word = step_0(word)
word = step_1a(word)
# handle some more exceptional forms
if word in exceptional_early_exit_post_1a:
return word
word = step_1b(word, r1)
word = step_1c(word)
word = step_2(word, r1)
word = step_3(word, r1, r2)
word = step_4(word, r2)
word = step_5(word, r1, r2)
word = normalize_ys(word)
return word
| gpl-2.0 |
nxnfufunezn/servo | tests/wpt/harness/wptrunner/wptmanifest/tests/test_static.py | 139 | 2863 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from cStringIO import StringIO
from ..backends import static
# There aren't many tests here because it turns out to be way more convenient to
# use test_serializer for the majority of cases
class TestStatic(unittest.TestCase):
def parse(self, input_str):
return self.parser.parse(StringIO(input_str))
def compile(self, input_text, input_data):
return static.compile(input_text, input_data)
def test_get_0(self):
data = """
key: value
[Heading 1]
other_key:
if a == 1: value_1
if a == 2: value_2
value_3
"""
manifest = self.compile(data, {"a": 2})
self.assertEquals(manifest.get("key"), "value")
children = list(item for item in manifest.iterchildren())
self.assertEquals(len(children), 1)
section = children[0]
self.assertEquals(section.name, "Heading 1")
self.assertEquals(section.get("other_key"), "value_2")
self.assertEquals(section.get("key"), "value")
def test_get_1(self):
data = """
key: value
[Heading 1]
other_key:
if a == 1: value_1
if a == 2: value_2
value_3
"""
manifest = self.compile(data, {"a": 3})
children = list(item for item in manifest.iterchildren())
section = children[0]
self.assertEquals(section.get("other_key"), "value_3")
def test_get_3(self):
data = """key:
if a == "1": value_1
if a[0] == "ab"[0]: value_2
"""
manifest = self.compile(data, {"a": "1"})
self.assertEquals(manifest.get("key"), "value_1")
manifest = self.compile(data, {"a": "ac"})
self.assertEquals(manifest.get("key"), "value_2")
def test_get_4(self):
data = """key:
if not a: value_1
value_2
"""
manifest = self.compile(data, {"a": True})
self.assertEquals(manifest.get("key"), "value_2")
manifest = self.compile(data, {"a": False})
self.assertEquals(manifest.get("key"), "value_1")
def test_api(self):
data = """key:
if a == 1.5: value_1
value_2
key_1: other_value
"""
manifest = self.compile(data, {"a": 1.5})
self.assertFalse(manifest.is_empty)
self.assertEquals(manifest.root, manifest)
self.assertTrue(manifest.has_key("key_1"))
self.assertFalse(manifest.has_key("key_2"))
self.assertEquals(set(manifest.iterkeys()), set(["key", "key_1"]))
self.assertEquals(set(manifest.itervalues()), set(["value_1", "other_value"]))
def test_is_empty_1(self):
data = """
[Section]
[Subsection]
"""
manifest = self.compile(data, {})
self.assertTrue(manifest.is_empty)
| mpl-2.0 |
Azure/azure-sdk-for-python | sdk/databoxedge/azure-mgmt-databoxedge/tests/test_cli_mgmt_databoxedge.py | 1 | 16519 | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 49
# Methods Covered : 49
# Examples Total : 49
# Examples Tested : 49
# Coverage % : 100
# ----------------------
import unittest
import azure.mgmt.databoxedge
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
AZURE_LOCATION = 'eastus'
class MgmtDataBoxEdgeTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtDataBoxEdgeTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.databoxedge.DataBoxEdgeManagementClient
)
@unittest.skip("skip test")
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_databoxedge(self, resource_group):
SERVICE_NAME = "myapimrndxyz"
DATA_BOX_EDGE_DEVICE_NAME = "mydivicename"
USER_NAME = "username"
ROLE_NAME = "rolename"
SHARE_NAME = "sharename"
ORDER_NAME = "ordername"
TRIGGER_NAME = "triggername"
STORAGE_ACCOUNT_NAME = "storageaccountname"
STORAGE_ACCOUNT_CREDENTIAL_NAME = "storageaccountcredentialname"
BANDWIDTH_SCHEDULE_NAME = "bandwidthschedulename"
CONTAINER_NAME = "containername"
OPERATIONS_STATUS_NAME = "operationsstatusname"
NETWORK_SETTING_NAME = "networksettingname"
UPDATE_SUMMARY_NAME = "updatesummaryname"
ALERT_NAME = "alertname"
JOB_NAME = "jobname"
SECURITY_SETTING_NAME = "securitysettingname"
# DataBoxEdgeDevicePut[put]
BODY = {
"location": "eastus",
"sku": {
"name": "Edge",
"tier": "Standard"
}
}
result = self.mgmt_client.devices.create_or_update(DATA_BOX_EDGE_DEVICE_NAME, BODY, resource_group.name)
result = result.result()
"""
# UserPut[put]
BODY = {
"encrypted_password": {
"value": "Password@1",
"encryption_algorithm": "None",
"encryption_cert_thumbprint": "blah"
},
"share_access_rights": []
}
result = self.mgmt_client.users.create_or_update(DATA_BOX_EDGE_DEVICE_NAME, USER_NAME, BODY, resource_group.name)
result = result.result()
# RolePut[put]
BODY = {
"kind": "IOT",
"host_platform": "Linux",
"io_tdevice_details": {
"device_id": "iotdevice",
"io_thost_hub": "iothub.azure-devices.net",
"authentication": {
"symmetric_key": {
"connection_string": {
"value": "Encrypted<<HostName=iothub.azure-devices.net;DeviceId=iotDevice;SharedAccessKey=2C750FscEas3JmQ8Bnui5yQWZPyml0/UiRt1bQwd8=>>",
"encryption_cert_thumbprint": "348586569999244",
"encryption_algorithm": "AES256"
}
}
}
},
"io_tedge_device_details": {
"device_id": "iotEdge",
"io_thost_hub": "iothub.azure-devices.net",
"authentication": {
"symmetric_key": {
"connection_string": {
"value": "Encrypted<<HostName=iothub.azure-devices.net;DeviceId=iotEdge;SharedAccessKey=2C750FscEas3JmQ8Bnui5yQWZPyml0/UiRt1bQwd8=>>",
"encryption_cert_thumbprint": "1245475856069999244",
"encryption_algorithm": "AES256"
}
}
}
},
"share_mappings": [],
"role_status": "Enabled"
}
result = self.mgmt_client.roles.create_or_update(DATA_BOX_EDGE_DEVICE_NAME, ROLE_NAME, BODY, resource_group.name)
result = result.result()
# SharePut[put]
BODY = {
"description": "",
"share_status": "Online",
"monitoring_status": "Enabled",
"azure_container_info": {
"storage_account_credential_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/" + DATA_BOX_EDGE_DEVICE_NAME + "/storageAccountCredentials/" + STORAGE_ACCOUNT_CREDENTIAL_NAME + "",
"container_name": "testContainerSMB",
"data_format": "BlockBlob"
},
"access_protocol": "SMB",
"user_access_rights": [
{
"user_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + resource_group.name + "/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/" + DATA_BOX_EDGE_DEVICE_NAME + "/users/" + USER_NAME + "",
"access_type": "Change"
}
],
"data_policy": "Cloud"
}
result = self.mgmt_client.shares.create_or_update(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, SHARE_NAME, BODY)
result = result.result()
# OrderPut[put]
BODY = {
"contact_information": {
"contact_person": "John Mcclane",
"company_name": "Microsoft",
"phone": "(800) 426-9400",
"email_list": [
"[email protected]"
]
},
"shipping_address": {
"address_line1": "Microsoft Corporation",
"address_line2": "One Microsoft Way",
"address_line3": "Redmond",
"postal_code": "98052",
"city": "WA",
"state": "WA",
"country": "USA"
}
}
result = self.mgmt_client.orders.create_or_update(DATA_BOX_EDGE_DEVICE_NAME, BODY, resource_group.name)
result = result.result()
# TriggerPut[put]
BODY = {
"properties": {
"custom_context_tag": "CustomContextTags-1235346475",
"source_info": {
"share_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/" + DATA_BOX_EDGE_DEVICE_NAME + "/shares/" + SHARE_NAME + ""
},
"sink_info": {
"role_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/" + DATA_BOX_EDGE_DEVICE_NAME + "/roles/" + ROLE_NAME + ""
}
},
"kind": "FileEvent"
}
result = self.mgmt_client.triggers.create_or_update(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, TRIGGER_NAME, BODY)
result = result.result()
# BandwidthSchedulePut[put]
BODY = {
"start": "0:0:0",
"stop": "13:59:0",
"rate_in_mbps": "100",
"days": [
"Sunday",
"Monday"
]
}
result = self.mgmt_client.bandwidth_schedules.create_or_update(DATA_BOX_EDGE_DEVICE_NAME, BANDWIDTH_SCHEDULE_NAME, BODY, resource_group.name)
result = result.result()
# SACPut[put]
BODY = {
"properties": {
"alias": "sac1",
"user_name": "cisbvt",
"account_key": {
"value": "lAeZEYi6rNP1/EyNaVUYmTSZEYyaIaWmwUsGwek0+xiZj54GM9Ue9/UA2ed/ClC03wuSit2XzM/cLRU5eYiFBwks23rGwiQOr3sruEL2a74EjPD050xYjA6M1I2hu/w2yjVHhn5j+DbXS4Xzi+rHHNZK3DgfDO3PkbECjPck+PbpSBjy9+6Mrjcld5DIZhUAeMlMHrFlg+WKRKB14o/og56u5/xX6WKlrMLEQ+y6E18dUwvWs2elTNoVO8PBE8SM/CfooX4AMNvaNdSObNBPdP+F6Lzc556nFNWXrBLRt0vC7s9qTiVRO4x/qCNaK/B4y7IqXMllwQFf4Np9UQ2ECA==",
"encryption_cert_thumbprint": "2A9D8D6BE51574B5461230AEF02F162C5F01AD31",
"encryption_algorithm": "AES256"
},
"ssl_status": "Disabled",
"account_type": "BlobStorage"
}
}
result = self.mgmt_client.storage_account_credentials.create_or_update(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, STORAGE_ACCOUNT_CREDENTIAL_NAME, BODY)
result = result.result()
# SACGet[get]
result = self.mgmt_client.storage_account_credentials.get(DATA_BOX_EDGE_DEVICE_NAME, STORAGE_ACCOUNT_CREDENTIAL_NAME, resource_group.name)
# BandwidthScheduleGet[get]
result = self.mgmt_client.bandwidth_schedules.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, BANDWIDTH_SCHEDULE_NAME)
# OperationsStatusGet[get]
result = self.mgmt_client.operations_status.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, OPERATIONS_STATUS_NAME)
# NetworkSettingsGet[get]
result = self.mgmt_client.devices.get_network_settings(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, NETWORK_SETTING_NAME)
# UpdateSummaryGet[get]
result = self.mgmt_client.devices.get_update_summary(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, UPDATE_SUMMARY_NAME)
# TriggerGet[get]
result = self.mgmt_client.triggers.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, TRIGGER_NAME)
# SACGetAllInDevice[get]
result = self.mgmt_client.storage_account_credentials.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# AlertGet[get]
result = self.mgmt_client.alerts.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, ALERT_NAME)
# ShareGet[get]
result = self.mgmt_client.shares.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, SHARE_NAME)
# OrderGet[get]
result = self.mgmt_client.orders.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, ORDER_NAME)
# UserGet[get]
result = self.mgmt_client.users.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, USER_NAME)
# RoleGet[get]
result = self.mgmt_client.roles.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, ROLE_NAME)
# JobsGet[get]
result = self.mgmt_client.jobs.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, JOB_NAME)
# BandwidthScheduleGetAllInDevice[get]
result = self.mgmt_client.bandwidth_schedules.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# TriggerGetAllInDevice[get]
result = self.mgmt_client.triggers.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# OrderGetAllInDevice[get]
result = self.mgmt_client.orders.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# AlertGetAllInDevice[get]
result = self.mgmt_client.alerts.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# ShareGetAllInDevice[get]
result = self.mgmt_client.shares.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# NodesGetAllInDevice[get]
result = self.mgmt_client.nodes.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# RoleGetAllInDevice[get]
result = self.mgmt_client.roles.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# DataBoxEdgeDeviceGetByName[get]
result = self.mgmt_client.devices.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# DataBoxEdgeDeviceGetByResourceGroup[get]
result = self.mgmt_client.devices.list_by_resource_group(resource_group.name)
# DataBoxEdgeDeviceGetBySubscription[get]
result = self.mgmt_client.devices.list_by_subscription()
# OperationsGet[get]
result = self.mgmt_client.operations.list()
# CreateOrUpdateSecuritySettings[post]
BODY = {
"properties": {
"device_admin_password": {
"value": "jJ5MvXa/AEWvwxviS92uCjatCXeyLYTy8jx/k105MjQRXT7i6Do8qpEcQ8d+OBbwmQTnwKW0CYyzzVRCc0uZcPCf6PsWtP4l6wvcKGAP66PwK68eEkTUOmp+wUHc4hk02kWmTWeAjBZkuDBP3xK1RnZo95g2RE4i1UgKNP5BEKCLd71O104DW3AWW41mh9XLWNOaxw+VjQY7wmvlE6XkvpkMhcGuha2u7lx8zi9ZkcMvJVYDYK36Fb/K3KhBAmDjjDmVq04jtBlcSTXQObt0nlj4BwGGtdrpeIpr67zqr5i3cPm6e6AleIaIhp6sI/uyGSMiT3oev2eg49u2ii7kVA==",
"encryption_algorithm": "AES256",
"encryption_cert_thumbprint": "7DCBDFC44ED968D232C9A998FC105B5C70E84BE0"
}
}
}
result = self.mgmt_client.devices.create_or_update_security_settings(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, SECURITY_SETTING_NAME, BODY)
result = result.result()
# ShareRefreshPost[post]
result = self.mgmt_client.shares.refresh(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, SHARE_NAME)
result = result.result()
# ExtendedInfoPost[post]
result = self.mgmt_client.devices.get_extended_information(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# UploadCertificatePost[post]
BODY = {
"properties": {
"certificate": "MIIC9DCCAdygAwIBAgIQWJae7GNjiI9Mcv/gJyrOPTANBgkqhkiG9w0BAQUFADASMRAwDgYDVQQDDAdXaW5kb3dzMB4XDTE4MTEyNzAwMTA0NVoXDTIxMTEyODAwMTA0NVowEjEQMA4GA1UEAwwHV2luZG93czCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKxkRExqxf0qH1avnyORptIbRC2yQwqe3EIbJ2FPKr5jtAppGeX/dGKrFSnX+7/0HFr77aJHafdpEAtOiLyJ4zCAVs0obZCCIq4qJdmjYUTU0UXH/w/YzXfQA0d9Zh9AN+NJBX9xj05NzgsT24fkgsK2v6mWJQXT7YcWAsl5sEYPnx1e+MrupNyVSL/RUJmrS+etJSysHtFeWRhsUhVAs1DD5ExJvBLU3WH0IsojEvpXcjrutB5/MDQNrd/StGI6WovoSSPH7FyT9tgERx+q+Yg3YUGzfaIPCctlrRGehcdtzdNoKd0rsX62yCq0U6POoSfwe22NJu41oAUMd7e6R8cCAwEAAaNGMEQwEwYDVR0lBAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFDd0VxnS3LnMIfwc7xW4b4IZWG5GMA4GA1UdDwEB/wQEAwIFIDANBgkqhkiG9w0BAQUFAAOCAQEAPQRby2u9celvtvL/DLEb5Vt3/tPStRQC5MyTD62L5RT/q8E6EMCXVZNkXF5WlWucLJi/18tY+9PNgP9xWLJh7kpSWlWdi9KPtwMqKDlEH8L2TnQdjimt9XuiCrTnoFy/1X2BGLY/rCaUJNSd15QCkz2xeW+Z+YSk2GwAc/A/4YfNpqSIMfNuPrT76o02VdD9WmJUA3fS/HY0sU9qgQRS/3F5/0EPS+HYQ0SvXCK9tggcCd4O050ytNBMJC9qMOJ7yE0iOrFfOJSCfDAuPhn/rHFh79Kn1moF+/CE+nc0/2RPiLC8r54/rt5dYyyxJDfXg0a3VrrX39W69WZGW5OXiw=="
}
}
result = self.mgmt_client.devices.upload_certificate(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, BODY)
# DownloadUpdatesPost[post]
result = self.mgmt_client.devices.download_updates(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
result = result.result()
# ScanForUpdatesPost[post]
result = self.mgmt_client.devices.scan_for_updates(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
result = result.result()
# InstallUpdatesPost[post]
result = self.mgmt_client.devices.install_updates(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
result = result.result()
"""
# DataBoxEdgeDevicePatch[patch]
BODY = {
"tags": {
"key1": "value1",
"key2": "value2"
}
}
result = self.mgmt_client.devices.update(DATA_BOX_EDGE_DEVICE_NAME, BODY, resource_group.name)
"""
# SACDelete[delete]
result = self.mgmt_client.storage_account_credentials.delete(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, STORAGE_ACCOUNT_CREDENTIAL_NAME)
result = result.result()
# BandwidthScheduleDelete[delete]
result = self.mgmt_client.bandwidth_schedules.delete(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, BANDWIDTH_SCHEDULE_NAME)
result = result.result()
# TriggerDelete[delete]
result = self.mgmt_client.triggers.delete(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, TRIGGER_NAME)
result = result.result()
# ShareDelete[delete]
result = self.mgmt_client.shares.delete(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, SHARE_NAME)
result = result.result()
# OrderDelete[delete]
result = self.mgmt_client.orders.delete(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, ORDER_NAME)
result = result.result()
# UserDelete[delete]
result = self.mgmt_client.users.delete(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, USER_NAME)
result = result.result()
# RoleDelete[delete]
result = self.mgmt_client.roles.delete(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, ROLE_NAME)
result = result.result()
"""
# DataBoxEdgeDeviceDelete[delete]
result = self.mgmt_client.devices.delete(DATA_BOX_EDGE_DEVICE_NAME, resource_group.name)
result = result.result()
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| mit |
dmacvicar/spacewalk | client/solaris/smartpm/smart/channels/rpm_md_info.py | 6 | 1125 | #
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <[email protected]>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart import _
kind = "package"
name = _("RPM MetaData")
description = _("""
Repository created with the rpm-metadata project.
""")
fields = [("baseurl", _("Base URL"), str, None,
_("URL where repodata/ subdirectory is found"))]
| gpl-2.0 |
nicolargo/intellij-community | python/helpers/docutils/parsers/rst/languages/sv.py | 57 | 3988 | # $Id: sv.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Adam Chodorowski <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Swedish language mappings for language-dependent features of reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
u'observera': 'attention',
u'caution (translation required)': 'caution',
u'fara': 'danger',
u'fel': 'error',
u'v\u00e4gledning': 'hint',
u'viktigt': 'important',
u'notera': 'note',
u'tips': 'tip',
u'varning': 'warning',
u'admonition (translation required)': 'admonition',
u'sidebar (translation required)': 'sidebar',
u'\u00e4mne': 'topic',
u'line-block (translation required)': 'line-block',
u'parsed-literal (translation required)': 'parsed-literal',
u'mellanrubrik': 'rubric',
u'epigraph (translation required)': 'epigraph',
u'highlights (translation required)': 'highlights',
u'pull-quote (translation required)': 'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
# u'fr\u00e5gor': 'questions',
# NOTE: A bit long, but recommended by http://www.nada.kth.se/dataterm/:
# u'fr\u00e5gor-och-svar': 'questions',
# u'vanliga-fr\u00e5gor': 'questions',
u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
u'list-table (translation required)': 'list-table',
u'meta': 'meta',
# u'bildkarta': 'imagemap', # FIXME: Translation might be too literal.
u'bild': 'image',
u'figur': 'figure',
u'inkludera': 'include',
u'r\u00e5': 'raw', # FIXME: Translation might be too literal.
u'ers\u00e4tt': 'replace',
u'unicode': 'unicode',
u'datum': 'date',
u'class (translation required)': 'class',
u'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
u'inneh\u00e5ll': 'contents',
u'sektionsnumrering': 'sectnum',
u'target-notes (translation required)': 'target-notes',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
# u'fotnoter': 'footnotes',
# u'citeringar': 'citations',
}
"""Swedish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'abbreviation (translation required)': 'abbreviation',
u'acronym (translation required)': 'acronym',
u'index (translation required)': 'index',
u'subscript (translation required)': 'subscript',
u'superscript (translation required)': 'superscript',
u'title-reference (translation required)': 'title-reference',
u'pep-reference (translation required)': 'pep-reference',
u'rfc-reference (translation required)': 'rfc-reference',
u'emphasis (translation required)': 'emphasis',
u'strong (translation required)': 'strong',
u'literal (translation required)': 'literal',
u'named-reference (translation required)': 'named-reference',
u'anonymous-reference (translation required)': 'anonymous-reference',
u'footnote-reference (translation required)': 'footnote-reference',
u'citation-reference (translation required)': 'citation-reference',
u'substitution-reference (translation required)': 'substitution-reference',
u'target (translation required)': 'target',
u'uri-reference (translation required)': 'uri-reference',
u'r\u00e5': 'raw',}
"""Mapping of Swedish role names to canonical role names for interpreted text.
"""
| apache-2.0 |
MrLoick/python-for-android | python-build/python-libs/gdata/src/gdata/tlslite/integration/POP3_TLS.py | 271 | 5466 | """TLS Lite + poplib."""
import socket
from poplib import POP3
from gdata.tlslite.TLSConnection import TLSConnection
from gdata.tlslite.integration.ClientHelper import ClientHelper
# POP TLS PORT
POP3_TLS_PORT = 995
class POP3_TLS(POP3, ClientHelper):
"""This class extends L{poplib.POP3} with TLS support."""
def __init__(self, host, port = POP3_TLS_PORT,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Create a new POP3_TLS.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type host: str
@param host: Server to connect to.
@type port: int
@param port: Port to connect to.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.host = host
self.port = port
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
### New code below (all else copied from poplib)
ClientHelper.__init__(self,
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
self.sock = TLSConnection(self.sock)
self.sock.closeSocket = True
ClientHelper._handshake(self, self.sock)
###
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
| apache-2.0 |
hkemmel/tal | affichage.py | 1 | 2209 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 14:34:25 2017
@author: manfred.madelaine
"""
import time
def affStart():
msg1 = "*** Binvenue dans i-Opinion ou Opinion Way ***"
msg2 = "Le logiciel d'analyse et de classification des revues cinématographiques !"
listMsg = []
listMsg.append("")
listMsg.append(msg1)
listMsg.append("")
listMsg.append(msg2)
listMsg.append("")
print(affBox(listMsg, 1, 1, len(msg2)))
delai()
def affEnd():
msg1 = "*** Opinion Way vous remercie de votre viste, à bientôt ! ***"
msg = []
msg.append(msg1)
box = affBox(msg, 1, 1, len(msg1)-1)
print(box)
def affMessage(msg):
deb = "\n\t--- "
fin = " ---\n\n"
print(deb + msg + fin)
delai()
def delai():
time.sleep(0.8)
"""
Affiche un message dans une boite
msg : message à afficher
x : décalage horizontal
y : décalage vertical
L : largeur de la boite
"""
def affBox(msg, x, y, L):
box = ""
#décalage vertical
box += multChaine("\n", y)
indiceLine = 0
#gestion d'une ligne
for txt in msg:
#bord suppérieur
if(indiceLine == 0):
#décalage horizontal
box += "\n" + multChaine("\t", x)
box += multChaine("-", L+3)
#décalage horizontal
box += "\n" + multChaine("\t", x)
esp = ""
mult = 1
#message
if(len(txt) < L ):
esp = " "
mult = (L - len(txt)) / 2
box += "| " + multChaine(esp, mult) + txt + multChaine(esp, mult) + " |"
#bord inférieur
if(indiceLine == len(msg) - 1 ):
#décalage horizontal
box += "\n" + multChaine("\t", x)
box += multChaine("-", L+3)
indiceLine += 1
box+="\n"
return(box)
def affErr():
affMessage("Votre réponse est incorrecte !")
def multChaine(chaine, mult):
i = 0
msg = ""
while i < mult:
msg += chaine
i += 1
return msg
| gpl-3.0 |
wangyikai/linux | tools/perf/scripts/python/net_dropmonitor.py | 1812 | 1749 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
LuizGsa21/p4-conference-central | models.py | 1 | 7226 | #!/usr/bin/env python
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = '[email protected] (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
import datetime
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty(default='')
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.KeyProperty(kind='Conference', repeated=True)
wishList = ndb.KeyProperty(kind='Session', repeated=True)
def toForm(self):
form = ProfileForm(
displayName=self.displayName,
mainEmail=self.mainEmail,
teeShirtSize=getattr(TeeShirtSize, self.teeShirtSize),
conferenceKeysToAttend=[key.urlsafe() for key in self.conferenceKeysToAttend]
)
form.check_initialized()
return form
def toMiniForm(self):
form = ProfileMiniForm(
displayName=self.displayName,
teeShirtSize=getattr(TeeShirtSize, self.teeShirtSize)
)
form.check_initialized()
return form
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
class Conference(ndb.Model):
"""Conference -- Conference object"""
required_fields_schema = ('name', 'organizerUserId', 'startDate', 'endDate')
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty(required=True)
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty(required=True)
month = ndb.IntegerProperty()
endDate = ndb.DateProperty(required=True)
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
@property
def sessions(self):
return Session.query(ancestor=self.key)
def toForm(self, display_name=''):
form = ConferenceForm(
websafeKey=self.key.urlsafe(),
name=self.name,
description=self.description,
organizerUserId=self.organizerUserId,
topics=self.topics,
city=self.city,
startDate=self.startDate.strftime('%Y-%m-%d'),
month=self.month,
endDate=self.endDate.strftime('%Y-%m-%d'),
maxAttendees=self.maxAttendees,
seatsAvailable=self.seatsAvailable,
organizerDisplayName=display_name
)
form.check_initialized()
return form
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) # DateTimeField()
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10) # DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
class Speaker(ndb.Model):
"""Speaker -- Speaker object"""
name = ndb.StringProperty(required=True)
class Session(ndb.Model):
"""Session -- Session object"""
required_fields_schema = ('name', 'speaker', 'duration', 'typeOfSession', 'date', 'startTime')
name = ndb.StringProperty(required=True)
highlights = ndb.StringProperty()
speaker = ndb.StructuredProperty(modelclass=Speaker, required=True)
duration = ndb.IntegerProperty(required=True)
typeOfSession = ndb.StringProperty(required=True)
date = ndb.DateProperty(required=True)
startTime = ndb.TimeProperty(required=True)
def toForm(self):
form = SessionForm(
websafeKey=self.key.urlsafe(),
name=self.name,
highlights=self.highlights,
speaker=self.speaker.name,
duration=self.duration,
typeOfSession=self.typeOfSession,
date=self.date.strftime('%Y-%m-%d'),
startTime=self.startTime.strftime('%H:%M')
)
form.check_initialized()
return form
class SessionForm(messages.Message):
"""SessionForm -- Session outbound form message"""
websafeKey = messages.StringField(1)
name = messages.StringField(2)
highlights = messages.StringField(3)
speaker = messages.StringField(4)
duration = messages.IntegerField(5)
typeOfSession = messages.StringField(6)
date = messages.StringField(7)
startTime = messages.StringField(8)
class SessionForms(messages.Message):
"""SessionForm -- multiple SessionForm outbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class SessionQueryForm(messages.Message):
"""SessionQueryForm -- Session query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class SessionQueryForms(messages.Message):
"""SessionQueryForms -- multiple SessionQueryForm inbound form message"""
filters = messages.MessageField(SessionQueryForm, 1, repeated=True)
| apache-2.0 |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/axis.py | 4 | 85084 | """
Classes for the ticks and x and y axis
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import rcParams
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.cbook as cbook
import matplotlib.font_manager as font_manager
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.scale as mscale
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.units as munits
import numpy as np
import warnings
GRIDLINE_INTERPOLATION_STEPS = 180
class Tick(artist.Artist):
"""
Abstract base class for the axis ticks, grid lines and labels
1 refers to the bottom of the plot for xticks and the left for yticks
2 refers to the top of the plot for xticks and the right for yticks
Publicly accessible attributes:
:attr:`tick1line`
a Line2D instance
:attr:`tick2line`
a Line2D instance
:attr:`gridline`
a Line2D instance
:attr:`label1`
a Text instance
:attr:`label2`
a Text instance
:attr:`gridOn`
a boolean which determines whether to draw the tickline
:attr:`tick1On`
a boolean which determines whether to draw the 1st tickline
:attr:`tick2On`
a boolean which determines whether to draw the 2nd tickline
:attr:`label1On`
a boolean which determines whether to draw tick label
:attr:`label2On`
a boolean which determines whether to draw tick label
"""
def __init__(self, axes, loc, label,
size=None, # points
width=None,
color=None,
tickdir=None,
pad=None,
labelsize=None,
labelcolor=None,
zorder=None,
gridOn=None, # defaults to axes.grid depending on
# axes.grid.which
tick1On=True,
tick2On=True,
label1On=True,
label2On=False,
major=True,
):
"""
bbox is the Bound2D bounding box in display coords of the Axes
loc is the tick location in data coords
size is the tick size in points
"""
artist.Artist.__init__(self)
if gridOn is None:
if major and (rcParams['axes.grid.which'] in ('both', 'major')):
gridOn = rcParams['axes.grid']
elif (not major) and (rcParams['axes.grid.which']
in ('both', 'minor')):
gridOn = rcParams['axes.grid']
else:
gridOn = False
self.set_figure(axes.figure)
self.axes = axes
name = self.__name__.lower()
self._name = name
self._loc = loc
if size is None:
if major:
size = rcParams['%s.major.size' % name]
else:
size = rcParams['%s.minor.size' % name]
self._size = size
if width is None:
if major:
width = rcParams['%s.major.width' % name]
else:
width = rcParams['%s.minor.width' % name]
self._width = width
if color is None:
color = rcParams['%s.color' % name]
self._color = color
if pad is None:
if major:
pad = rcParams['%s.major.pad' % name]
else:
pad = rcParams['%s.minor.pad' % name]
self._base_pad = pad
if labelcolor is None:
labelcolor = rcParams['%s.color' % name]
self._labelcolor = labelcolor
if labelsize is None:
labelsize = rcParams['%s.labelsize' % name]
self._labelsize = labelsize
if zorder is None:
if major:
zorder = mlines.Line2D.zorder + 0.01
else:
zorder = mlines.Line2D.zorder
self._zorder = zorder
self.apply_tickdir(tickdir)
self.tick1line = self._get_tick1line()
self.tick2line = self._get_tick2line()
self.gridline = self._get_gridline()
self.label1 = self._get_text1()
self.label = self.label1 # legacy name
self.label2 = self._get_text2()
self.gridOn = gridOn
self.tick1On = tick1On
self.tick2On = tick2On
self.label1On = label1On
self.label2On = label2On
self.update_position(loc)
def apply_tickdir(self, tickdir):
"""
Calculate self._pad and self._tickmarkers
"""
pass
def get_tickdir(self):
return self._tickdir
def get_tick_padding(self):
"""
Get the length of the tick outside of the axes.
"""
padding = {
'in': 0.0,
'inout': 0.5,
'out': 1.0
}
return self._size * padding[self._tickdir]
def get_children(self):
children = [self.tick1line, self.tick2line,
self.gridline, self.label1, self.label2]
return children
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
self.gridline.set_clip_path(clippath, transform)
self.stale = True
set_clip_path.__doc__ = artist.Artist.set_clip_path.__doc__
def get_pad_pixels(self):
return self.figure.dpi * self._base_pad / 72.0
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the Tick marks.
This function always returns false. It is more useful to test if the
axis as a whole contains the mouse rather than the set of tick marks.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
return False, {}
def set_pad(self, val):
"""
Set the tick label pad in points
ACCEPTS: float
"""
self._apply_params(pad=val)
self.stale = True
def get_pad(self):
'Get the value of the tick label pad in points'
return self._base_pad
def _get_text1(self):
'Get the default Text 1 instance'
pass
def _get_text2(self):
'Get the default Text 2 instance'
pass
def _get_tick1line(self):
'Get the default line2D instance for tick1'
pass
def _get_tick2line(self):
'Get the default line2D instance for tick2'
pass
def _get_gridline(self):
'Get the default grid Line2d instance for this tick'
pass
def get_loc(self):
'Return the tick location (data coords) as a scalar'
return self._loc
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
self.stale = False
return
renderer.open_group(self.__name__)
if self.gridOn:
self.gridline.draw(renderer)
if self.tick1On:
self.tick1line.draw(renderer)
if self.tick2On:
self.tick2line.draw(renderer)
if self.label1On:
self.label1.draw(renderer)
if self.label2On:
self.label2.draw(renderer)
renderer.close_group(self.__name__)
self.stale = False
def set_label1(self, s):
"""
Set the text of ticklabel
ACCEPTS: str
"""
self.label1.set_text(s)
self.stale = True
set_label = set_label1
def set_label2(self, s):
"""
Set the text of ticklabel2
ACCEPTS: str
"""
self.label2.set_text(s)
self.stale = True
def _set_artist_props(self, a):
a.set_figure(self.figure)
def get_view_interval(self):
'return the view Interval instance for the axis this tick is ticking'
raise NotImplementedError('Derived must override')
def _apply_params(self, **kw):
switchkw = ['gridOn', 'tick1On', 'tick2On', 'label1On', 'label2On']
switches = [k for k in kw if k in switchkw]
for k in switches:
setattr(self, k, kw.pop(k))
newmarker = [k for k in kw if k in ['size', 'width', 'pad', 'tickdir']]
if newmarker:
self._size = kw.pop('size', self._size)
# Width could be handled outside this block, but it is
# convenient to leave it here.
self._width = kw.pop('width', self._width)
self._base_pad = kw.pop('pad', self._base_pad)
# apply_tickdir uses _size and _base_pad to make _pad,
# and also makes _tickmarkers.
self.apply_tickdir(kw.pop('tickdir', self._tickdir))
self.tick1line.set_marker(self._tickmarkers[0])
self.tick2line.set_marker(self._tickmarkers[1])
for line in (self.tick1line, self.tick2line):
line.set_markersize(self._size)
line.set_markeredgewidth(self._width)
# _get_text1_transform uses _pad from apply_tickdir.
trans = self._get_text1_transform()[0]
self.label1.set_transform(trans)
trans = self._get_text2_transform()[0]
self.label2.set_transform(trans)
tick_kw = dict([kv for kv in six.iteritems(kw)
if kv[0] in ['color', 'zorder']])
if tick_kw:
self.tick1line.set(**tick_kw)
self.tick2line.set(**tick_kw)
for k, v in six.iteritems(tick_kw):
setattr(self, '_' + k, v)
label_list = [k for k in six.iteritems(kw)
if k[0] in ['labelsize', 'labelcolor']]
if label_list:
label_kw = dict([(k[5:], v) for (k, v) in label_list])
self.label1.set(**label_kw)
self.label2.set(**label_kw)
for k, v in six.iteritems(label_kw):
# for labelsize the text objects covert str ('small')
# -> points. grab the integer from the `Text` object
# instead of saving the string representation
v = getattr(self.label1, 'get_' + k)()
setattr(self, '_label' + k, v)
def update_position(self, loc):
'Set the location of tick in data coords with scalar *loc*'
raise NotImplementedError('Derived must override')
def _get_text1_transform(self):
raise NotImplementedError('Derived must override')
def _get_text2_transform(self):
raise NotImplementedError('Derived must override')
class XTick(Tick):
"""
Contains all the Artists needed to make an x tick - the tick line,
the label text and the grid line
"""
__name__ = 'xtick'
def _get_text1_transform(self):
return self.axes.get_xaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_xaxis_text2_transform(self._pad)
def apply_tickdir(self, tickdir):
if tickdir is None:
tickdir = rcParams['%s.direction' % self._name]
self._tickdir = tickdir
if self._tickdir == 'in':
self._tickmarkers = (mlines.TICKUP, mlines.TICKDOWN)
elif self._tickdir == 'inout':
self._tickmarkers = ('|', '|')
else:
self._tickmarkers = (mlines.TICKDOWN, mlines.TICKUP)
self._pad = self._base_pad + self.get_tick_padding()
self.stale = True
def _get_text1(self):
'Get the default Text instance'
# the y loc is 3 points below the min of y axis
# get the affine as an a,b,c,d,tx,ty list
# x in data coords, y in axes coords
trans, vert, horiz = self._get_text1_transform()
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text 2 instance'
# x in data coords, y in axes coords
trans, vert, horiz = self._get_text2_transform()
t = mtext.Text(
x=0, y=1,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0,), ydata=(0,), color=self._color,
linestyle='None', marker=self._tickmarkers[0],
markersize=self._size,
markeredgewidth=self._width, zorder=self._zorder)
l.set_transform(self.axes.get_xaxis_transform(which='tick1'))
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0,), ydata=(1,),
color=self._color,
linestyle='None',
marker=self._tickmarkers[1],
markersize=self._size,
markeredgewidth=self._width,
zorder=self._zorder)
l.set_transform(self.axes.get_xaxis_transform(which='tick2'))
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0.0, 0.0), ydata=(0, 1.0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
alpha=rcParams['grid.alpha'],
markersize=0)
l.set_transform(self.axes.get_xaxis_transform(which='grid'))
l.get_path()._interpolation_steps = GRIDLINE_INTERPOLATION_STEPS
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar *loc*'
x = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_xdata((x,))
if self.tick2On:
self.tick2line.set_xdata((x,))
if self.gridOn:
self.gridline.set_xdata((x,))
if self.label1On:
self.label1.set_x(x)
if self.label2On:
self.label2.set_x(x)
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
self.stale = True
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
class YTick(Tick):
"""
Contains all the Artists needed to make a Y tick - the tick line,
the label text and the grid line
"""
__name__ = 'ytick'
def _get_text1_transform(self):
return self.axes.get_yaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_yaxis_text2_transform(self._pad)
def apply_tickdir(self, tickdir):
if tickdir is None:
tickdir = rcParams['%s.direction' % self._name]
self._tickdir = tickdir
if self._tickdir == 'in':
self._tickmarkers = (mlines.TICKRIGHT, mlines.TICKLEFT)
elif self._tickdir == 'inout':
self._tickmarkers = ('_', '_')
else:
self._tickmarkers = (mlines.TICKLEFT, mlines.TICKRIGHT)
self._pad = self._base_pad + self.get_tick_padding()
self.stale = True
# how far from the y axis line the right of the ticklabel are
def _get_text1(self):
'Get the default Text instance'
# x in axes coords, y in data coords
trans, vert, horiz = self._get_text1_transform()
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text instance'
# x in axes coords, y in data coords
trans, vert, horiz = self._get_text2_transform()
t = mtext.Text(
x=1, y=0,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D((0,), (0,),
color=self._color,
marker=self._tickmarkers[0],
linestyle='None',
markersize=self._size,
markeredgewidth=self._width,
zorder=self._zorder)
l.set_transform(self.axes.get_yaxis_transform(which='tick1'))
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D((1,), (0,),
color=self._color,
marker=self._tickmarkers[1],
linestyle='None',
markersize=self._size,
markeredgewidth=self._width,
zorder=self._zorder)
l.set_transform(self.axes.get_yaxis_transform(which='tick2'))
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D(xdata=(0, 1), ydata=(0, 0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
alpha=rcParams['grid.alpha'],
markersize=0)
l.set_transform(self.axes.get_yaxis_transform(which='grid'))
l.get_path()._interpolation_steps = GRIDLINE_INTERPOLATION_STEPS
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar loc'
y = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_ydata((y,))
if self.tick2On:
self.tick2line.set_ydata((y,))
if self.gridOn:
self.gridline.set_ydata((y, ))
if self.label1On:
self.label1.set_y(y)
if self.label2On:
self.label2.set_y(y)
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
self.stale = True
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
class Ticker(object):
locator = None
formatter = None
class Axis(artist.Artist):
"""
Public attributes
* :attr:`axes.transData` - transform data coords to display coords
* :attr:`axes.transAxes` - transform axis coords to display coords
* :attr:`labelpad` - number of points between the axis and its label
"""
OFFSETTEXTPAD = 3
def __str__(self):
return self.__class__.__name__ \
+ "(%f,%f)" % tuple(self.axes.transAxes.transform_point((0, 0)))
def __init__(self, axes, pickradius=15):
"""
Init the axis with the parent Axes instance
"""
artist.Artist.__init__(self)
self.set_figure(axes.figure)
# Keep track of setting to the default value, this allows use to know
# if any of the following values is explicitly set by the user, so as
# to not overwrite their settings with any of our 'auto' settings.
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
self.isDefault_label = True
self.axes = axes
self.major = Ticker()
self.minor = Ticker()
self.callbacks = cbook.CallbackRegistry()
self._autolabelpos = True
self._smart_bounds = False
self.label = self._get_label()
self.labelpad = rcParams['axes.labelpad']
self.offsetText = self._get_offset_text()
self.majorTicks = []
self.minorTicks = []
self.pickradius = pickradius
# Initialize here for testing; later add API
self._major_tick_kw = dict()
self._minor_tick_kw = dict()
self.cla()
self._set_scale('linear')
def set_label_coords(self, x, y, transform=None):
"""
Set the coordinates of the label. By default, the x
coordinate of the y label is determined by the tick label
bounding boxes, but this can lead to poor alignment of
multiple ylabels if there are multiple axes. Ditto for the y
coodinate of the x label.
You can also specify the coordinate system of the label with
the transform. If None, the default coordinate system will be
the axes coordinate system (0,0) is (left,bottom), (0.5, 0.5)
is middle, etc
"""
self._autolabelpos = False
if transform is None:
transform = self.axes.transAxes
self.label.set_transform(transform)
self.label.set_position((x, y))
self.stale = True
def get_transform(self):
return self._scale.get_transform()
def get_scale(self):
return self._scale.name
def _set_scale(self, value, **kwargs):
self._scale = mscale.scale_factory(value, self, **kwargs)
self._scale.set_default_locators_and_formatters(self)
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
def limit_range_for_scale(self, vmin, vmax):
return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())
def get_children(self):
children = [self.label, self.offsetText]
majorticks = self.get_major_ticks()
minorticks = self.get_minor_ticks()
children.extend(majorticks)
children.extend(minorticks)
return children
def cla(self):
'clear the current axis'
self.set_major_locator(mticker.AutoLocator())
self.set_major_formatter(mticker.ScalarFormatter())
self.set_minor_locator(mticker.NullLocator())
self.set_minor_formatter(mticker.NullFormatter())
self.set_label_text('')
self._set_artist_props(self.label)
# Keep track of setting to the default value, this allows use to know
# if any of the following values is explicitly set by the user, so as
# to not overwrite their settings with any of our 'auto' settings.
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
self.isDefault_label = True
# Clear the callback registry for this axis, or it may "leak"
self.callbacks = cbook.CallbackRegistry()
# whether the grids are on
self._gridOnMajor = (rcParams['axes.grid'] and
rcParams['axes.grid.which'] in ('both', 'major'))
self._gridOnMinor = (rcParams['axes.grid'] and
rcParams['axes.grid.which'] in ('both', 'minor'))
self.label.set_text('')
self._set_artist_props(self.label)
self.reset_ticks()
self.converter = None
self.units = None
self.set_units(None)
self.stale = True
def reset_ticks(self):
# build a few default ticks; grow as necessary later; only
# define 1 so properties set on ticks will be copied as they
# grow
cbook.popall(self.majorTicks)
cbook.popall(self.minorTicks)
self.majorTicks.extend([self._get_tick(major=True)])
self.minorTicks.extend([self._get_tick(major=False)])
self._lastNumMajorTicks = 1
self._lastNumMinorTicks = 1
def set_tick_params(self, which='major', reset=False, **kw):
"""
Set appearance parameters for ticks and ticklabels.
For documentation of keyword arguments, see
:meth:`matplotlib.axes.Axes.tick_params`.
"""
dicts = []
if which == 'major' or which == 'both':
dicts.append(self._major_tick_kw)
if which == 'minor' or which == 'both':
dicts.append(self._minor_tick_kw)
kwtrans = self._translate_tick_kw(kw, to_init_kw=True)
for d in dicts:
if reset:
d.clear()
d.update(kwtrans)
if reset:
self.reset_ticks()
else:
if which == 'major' or which == 'both':
for tick in self.majorTicks:
tick._apply_params(**self._major_tick_kw)
if which == 'minor' or which == 'both':
for tick in self.minorTicks:
tick._apply_params(**self._minor_tick_kw)
if 'labelcolor' in kwtrans:
self.offsetText.set_color(kwtrans['labelcolor'])
self.stale = True
@staticmethod
def _translate_tick_kw(kw, to_init_kw=True):
# We may want to move the following function to
# a more visible location; or maybe there already
# is something like this.
def _bool(arg):
if cbook.is_string_like(arg):
if arg.lower() == 'on':
return True
if arg.lower() == 'off':
return False
raise ValueError('String "%s" should be "on" or "off"' % arg)
return bool(arg)
# The following lists may be moved to a more
# accessible location.
kwkeys0 = ['size', 'width', 'color', 'tickdir', 'pad',
'labelsize', 'labelcolor', 'zorder', 'gridOn',
'tick1On', 'tick2On', 'label1On', 'label2On']
kwkeys1 = ['length', 'direction', 'left', 'bottom', 'right', 'top',
'labelleft', 'labelbottom', 'labelright', 'labeltop']
kwkeys = kwkeys0 + kwkeys1
kwtrans = dict()
if to_init_kw:
if 'length' in kw:
kwtrans['size'] = kw.pop('length')
if 'direction' in kw:
kwtrans['tickdir'] = kw.pop('direction')
if 'left' in kw:
kwtrans['tick1On'] = _bool(kw.pop('left'))
if 'bottom' in kw:
kwtrans['tick1On'] = _bool(kw.pop('bottom'))
if 'right' in kw:
kwtrans['tick2On'] = _bool(kw.pop('right'))
if 'top' in kw:
kwtrans['tick2On'] = _bool(kw.pop('top'))
if 'labelleft' in kw:
kwtrans['label1On'] = _bool(kw.pop('labelleft'))
if 'labelbottom' in kw:
kwtrans['label1On'] = _bool(kw.pop('labelbottom'))
if 'labelright' in kw:
kwtrans['label2On'] = _bool(kw.pop('labelright'))
if 'labeltop' in kw:
kwtrans['label2On'] = _bool(kw.pop('labeltop'))
if 'colors' in kw:
c = kw.pop('colors')
kwtrans['color'] = c
kwtrans['labelcolor'] = c
# Maybe move the checking up to the caller of this method.
for key in kw:
if key not in kwkeys:
raise ValueError(
"keyword %s is not recognized; valid keywords are %s"
% (key, kwkeys))
kwtrans.update(kw)
else:
raise NotImplementedError("Inverse translation is deferred")
return kwtrans
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
for child in self.majorTicks + self.minorTicks:
child.set_clip_path(clippath, transform)
self.stale = True
def get_view_interval(self):
'return the Interval instance for this axis view limits'
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
raise NotImplementedError('Derived must override')
def get_data_interval(self):
'return the Interval instance for this axis data limits'
raise NotImplementedError('Derived must override')
def set_data_interval(self):
'''set the axis data limits'''
raise NotImplementedError('Derived must override')
def set_default_intervals(self):
'''set the default limits for the axis data and view interval if they
are not mutated'''
# this is mainly in support of custom object plotting. For
# example, if someone passes in a datetime object, we do not
# know automagically how to set the default min/max of the
# data and view limits. The unit conversion AxisInfo
# interface provides a hook for custom types to register
# default limits through the AxisInfo.default_limits
# attribute, and the derived code below will check for that
# and use it if is available (else just use 0..1)
pass
def _set_artist_props(self, a):
if a is None:
return
a.set_figure(self.figure)
def iter_ticks(self):
"""
Iterate through all of the major and minor ticks.
"""
majorLocs = self.major.locator()
majorTicks = self.get_major_ticks(len(majorLocs))
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i)
for i, val in enumerate(majorLocs)]
minorLocs = self.minor.locator()
minorTicks = self.get_minor_ticks(len(minorLocs))
self.minor.formatter.set_locs(minorLocs)
minorLabels = [self.minor.formatter(val, i)
for i, val in enumerate(minorLocs)]
major_minor = [
(majorTicks, majorLocs, majorLabels),
(minorTicks, minorLocs, minorLabels)]
for group in major_minor:
for tick in zip(*group):
yield tick
def get_ticklabel_extents(self, renderer):
"""
Get the extents of the tick labels on either side
of the axes.
"""
ticks_to_draw = self._update_ticks(renderer)
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
renderer)
if len(ticklabelBoxes):
bbox = mtransforms.Bbox.union(ticklabelBoxes)
else:
bbox = mtransforms.Bbox.from_extents(0, 0, 0, 0)
if len(ticklabelBoxes2):
bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)
else:
bbox2 = mtransforms.Bbox.from_extents(0, 0, 0, 0)
return bbox, bbox2
def set_smart_bounds(self, value):
"""set the axis to have smart bounds"""
self._smart_bounds = value
self.stale = True
def get_smart_bounds(self):
"""get whether the axis has smart bounds"""
return self._smart_bounds
def _update_ticks(self, renderer):
"""
Update ticks (position and labels) using the current data
interval of the axes. Returns a list of ticks that will be
drawn.
"""
interval = self.get_view_interval()
tick_tups = [t for t in self.iter_ticks()]
if self._smart_bounds:
# handle inverted limits
view_low, view_high = min(*interval), max(*interval)
data_low, data_high = self.get_data_interval()
if data_low > data_high:
data_low, data_high = data_high, data_low
locs = [ti[1] for ti in tick_tups]
locs.sort()
locs = np.array(locs)
if len(locs):
if data_low <= view_low:
# data extends beyond view, take view as limit
ilow = view_low
else:
# data stops within view, take best tick
cond = locs <= data_low
good_locs = locs[cond]
if len(good_locs) > 0:
# last tick prior or equal to first data point
ilow = good_locs[-1]
else:
# No ticks (why not?), take first tick
ilow = locs[0]
if data_high >= view_high:
# data extends beyond view, take view as limit
ihigh = view_high
else:
# data stops within view, take best tick
cond = locs >= data_high
good_locs = locs[cond]
if len(good_locs) > 0:
# first tick after or equal to last data point
ihigh = good_locs[0]
else:
# No ticks (why not?), take last tick
ihigh = locs[-1]
tick_tups = [ti for ti in tick_tups
if (ti[1] >= ilow) and (ti[1] <= ihigh)]
# so that we don't lose ticks on the end, expand out the interval ever
# so slightly. The "ever so slightly" is defined to be the width of a
# half of a pixel. We don't want to draw a tick that even one pixel
# outside of the defined axis interval.
if interval[0] <= interval[1]:
interval_expanded = interval
else:
interval_expanded = interval[1], interval[0]
if hasattr(self, '_get_pixel_distance_along_axis'):
# normally, one does not want to catch all exceptions that
# could possibly happen, but it is not clear exactly what
# exceptions might arise from a user's projection (their
# rendition of the Axis object). So, we catch all, with
# the idea that one would rather potentially lose a tick
# from one side of the axis or another, rather than see a
# stack trace.
# We also catch users warnings here. These are the result of
# invalid numpy calculations that may be the result of out of
# bounds on axis with finite allowed intervals such as geo
# projections i.e. Mollweide.
with np.errstate(invalid='ignore'):
try:
ds1 = self._get_pixel_distance_along_axis(
interval_expanded[0], -0.5)
except:
warnings.warn("Unable to find pixel distance along axis "
"for interval padding of ticks; assuming no "
"interval padding needed.")
ds1 = 0.0
if np.isnan(ds1):
ds1 = 0.0
try:
ds2 = self._get_pixel_distance_along_axis(
interval_expanded[1], +0.5)
except:
warnings.warn("Unable to find pixel distance along axis "
"for interval padding of ticks; assuming no "
"interval padding needed.")
ds2 = 0.0
if np.isnan(ds2):
ds2 = 0.0
interval_expanded = (interval_expanded[0] - ds1,
interval_expanded[1] + ds2)
ticks_to_draw = []
for tick, loc, label in tick_tups:
if tick is None:
continue
if not mtransforms.interval_contains(interval_expanded, loc):
continue
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
ticks_to_draw.append(tick)
return ticks_to_draw
def _get_tick_bboxes(self, ticks, renderer):
"""
Given the list of ticks, return two lists of bboxes. One for
tick lable1's and another for tick label2's.
"""
ticklabelBoxes = []
ticklabelBoxes2 = []
for tick in ticks:
if tick.label1On and tick.label1.get_visible():
extent = tick.label1.get_window_extent(renderer)
ticklabelBoxes.append(extent)
if tick.label2On and tick.label2.get_visible():
extent = tick.label2.get_window_extent(renderer)
ticklabelBoxes2.append(extent)
return ticklabelBoxes, ticklabelBoxes2
def get_tightbbox(self, renderer):
"""
Return a bounding box that encloses the axis. It only accounts
tick labels, axis label, and offsetText.
"""
if not self.get_visible():
return
ticks_to_draw = self._update_ticks(renderer)
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
renderer)
self._update_label_position(ticklabelBoxes, ticklabelBoxes2)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text(self.major.formatter.get_offset())
bb = []
for a in [self.label, self.offsetText]:
if a.get_visible():
bb.append(a.get_window_extent(renderer))
bb.extend(ticklabelBoxes)
bb.extend(ticklabelBoxes2)
bb = [b for b in bb if b.width != 0 or b.height != 0]
if bb:
_bbox = mtransforms.Bbox.union(bb)
return _bbox
else:
return None
def get_tick_padding(self):
values = []
if len(self.majorTicks):
values.append(self.majorTicks[0].get_tick_padding())
if len(self.minorTicks):
values.append(self.minorTicks[0].get_tick_padding())
if len(values):
return max(values)
return 0.0
@allow_rasterization
def draw(self, renderer, *args, **kwargs):
'Draw the axis lines, grid lines, tick lines and labels'
if not self.get_visible():
return
renderer.open_group(__name__)
ticks_to_draw = self._update_ticks(renderer)
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
renderer)
for tick in ticks_to_draw:
tick.draw(renderer)
# scale up the axis label box to also find the neighbors, not
# just the tick labels that actually overlap note we need a
# *copy* of the axis label box because we don't wan't to scale
# the actual bbox
self._update_label_position(ticklabelBoxes, ticklabelBoxes2)
self.label.draw(renderer)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text(self.major.formatter.get_offset())
self.offsetText.draw(renderer)
if 0: # draw the bounding boxes around the text for debug
for tick in self.majorTicks:
label = tick.label1
mpatches.bbox_artist(label, renderer)
mpatches.bbox_artist(self.label, renderer)
renderer.close_group(__name__)
self.stale = False
def _get_label(self):
raise NotImplementedError('Derived must override')
def _get_offset_text(self):
raise NotImplementedError('Derived must override')
def get_gridlines(self):
'Return the grid lines as a list of Line2D instance'
ticks = self.get_major_ticks()
return cbook.silent_list('Line2D gridline',
[tick.gridline for tick in ticks])
def get_label(self):
'Return the axis label as a Text instance'
return self.label
def get_offset_text(self):
'Return the axis offsetText as a Text instance'
return self.offsetText
def get_pickradius(self):
'Return the depth of the axis used by the picker'
return self.pickradius
def get_majorticklabels(self):
'Return a list of Text instances for the major ticklabels'
ticks = self.get_major_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text major ticklabel', labels1 + labels2)
def get_minorticklabels(self):
'Return a list of Text instances for the minor ticklabels'
ticks = self.get_minor_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text minor ticklabel', labels1 + labels2)
def get_ticklabels(self, minor=False, which=None):
"""
Get the x tick labels as a list of :class:`~matplotlib.text.Text`
instances.
Parameters
----------
minor : bool
If True return the minor ticklabels,
else return the major ticklabels
which : None, ('minor', 'major', 'both')
Overrides `minor`.
Selects which ticklabels to return
Returns
-------
ret : list
List of :class:`~matplotlib.text.Text` instances.
"""
if which is not None:
if which == 'minor':
return self.get_minorticklabels()
elif which == 'major':
return self.get_majorticklabels()
elif which == 'both':
return self.get_majorticklabels() + self.get_minorticklabels()
else:
raise ValueError("`which` must be one of ('minor', 'major', "
"'both') not " + str(which))
if minor:
return self.get_minorticklabels()
return self.get_majorticklabels()
def get_majorticklines(self):
'Return the major tick lines as a list of Line2D instances'
lines = []
ticks = self.get_major_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_minorticklines(self):
'Return the minor tick lines as a list of Line2D instances'
lines = []
ticks = self.get_minor_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_ticklines(self, minor=False):
'Return the tick lines as a list of Line2D instances'
if minor:
return self.get_minorticklines()
return self.get_majorticklines()
def get_majorticklocs(self):
"Get the major tick locations in data coordinates as a numpy array"
return self.major.locator()
def get_minorticklocs(self):
"Get the minor tick locations in data coordinates as a numpy array"
return self.minor.locator()
def get_ticklocs(self, minor=False):
"Get the tick locations in data coordinates as a numpy array"
if minor:
return self.minor.locator()
return self.major.locator()
def _get_tick(self, major):
'return the default tick instance'
raise NotImplementedError('derived must override')
def _copy_tick_props(self, src, dest):
'Copy the props from src tick to dest tick'
if src is None or dest is None:
return
dest.label1.update_from(src.label1)
dest.label2.update_from(src.label2)
dest.tick1line.update_from(src.tick1line)
dest.tick2line.update_from(src.tick2line)
dest.gridline.update_from(src.gridline)
dest.tick1On = src.tick1On
dest.tick2On = src.tick2On
dest.label1On = src.label1On
dest.label2On = src.label2On
def get_label_text(self):
'Get the text of the label'
return self.label.get_text()
def get_major_locator(self):
'Get the locator of the major ticker'
return self.major.locator
def get_minor_locator(self):
'Get the locator of the minor ticker'
return self.minor.locator
def get_major_formatter(self):
'Get the formatter of the major ticker'
return self.major.formatter
def get_minor_formatter(self):
'Get the formatter of the minor ticker'
return self.minor.formatter
def get_major_ticks(self, numticks=None):
'get the tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_major_locator()())
if len(self.majorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.majorTicks)):
tick = self._get_tick(major=True)
self.majorTicks.append(tick)
if self._lastNumMajorTicks < numticks:
protoTick = self.majorTicks[0]
for i in range(self._lastNumMajorTicks, len(self.majorTicks)):
tick = self.majorTicks[i]
if self._gridOnMajor:
tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMajorTicks = numticks
ticks = self.majorTicks[:numticks]
return ticks
def get_minor_ticks(self, numticks=None):
'get the minor tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_minor_locator()())
if len(self.minorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.minorTicks)):
tick = self._get_tick(major=False)
self.minorTicks.append(tick)
if self._lastNumMinorTicks < numticks:
protoTick = self.minorTicks[0]
for i in range(self._lastNumMinorTicks, len(self.minorTicks)):
tick = self.minorTicks[i]
if self._gridOnMinor:
tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMinorTicks = numticks
ticks = self.minorTicks[:numticks]
return ticks
def grid(self, b=None, which='major', **kwargs):
"""
Set the axis grid on or off; b is a boolean. Use *which* =
'major' | 'minor' | 'both' to set the grid for major or minor ticks.
If *b* is *None* and len(kwargs)==0, toggle the grid state. If
*kwargs* are supplied, it is assumed you want the grid on and *b*
will be set to True.
*kwargs* are used to set the line properties of the grids, e.g.,
xax.grid(color='r', linestyle='-', linewidth=2)
"""
if len(kwargs):
b = True
which = which.lower()
if which in ['minor', 'both']:
if b is None:
self._gridOnMinor = not self._gridOnMinor
else:
self._gridOnMinor = b
for tick in self.minorTicks: # don't use get_ticks here!
if tick is None:
continue
tick.gridOn = self._gridOnMinor
if len(kwargs):
tick.gridline.update(kwargs)
self._minor_tick_kw['gridOn'] = self._gridOnMinor
if which in ['major', 'both']:
if b is None:
self._gridOnMajor = not self._gridOnMajor
else:
self._gridOnMajor = b
for tick in self.majorTicks: # don't use get_ticks here!
if tick is None:
continue
tick.gridOn = self._gridOnMajor
if len(kwargs):
tick.gridline.update(kwargs)
self._major_tick_kw['gridOn'] = self._gridOnMajor
self.stale = True
def update_units(self, data):
"""
introspect *data* for units converter and update the
axis.converter instance if necessary. Return *True*
if *data* is registered for unit conversion.
"""
converter = munits.registry.get_converter(data)
if converter is None:
return False
neednew = self.converter != converter
self.converter = converter
default = self.converter.default_units(data, self)
if default is not None and self.units is None:
self.set_units(default)
if neednew:
self._update_axisinfo()
self.stale = True
return True
def _update_axisinfo(self):
"""
check the axis converter for the stored units to see if the
axis info needs to be updated
"""
if self.converter is None:
return
info = self.converter.axisinfo(self.units, self)
if info is None:
return
if info.majloc is not None and \
self.major.locator != info.majloc and self.isDefault_majloc:
self.set_major_locator(info.majloc)
self.isDefault_majloc = True
if info.minloc is not None and \
self.minor.locator != info.minloc and self.isDefault_minloc:
self.set_minor_locator(info.minloc)
self.isDefault_minloc = True
if info.majfmt is not None and \
self.major.formatter != info.majfmt and self.isDefault_majfmt:
self.set_major_formatter(info.majfmt)
self.isDefault_majfmt = True
if info.minfmt is not None and \
self.minor.formatter != info.minfmt and self.isDefault_minfmt:
self.set_minor_formatter(info.minfmt)
self.isDefault_minfmt = True
if info.label is not None and self.isDefault_label:
self.set_label_text(info.label)
self.isDefault_label = True
self.set_default_intervals()
def have_units(self):
return self.converter is not None or self.units is not None
def convert_units(self, x):
if self.converter is None:
self.converter = munits.registry.get_converter(x)
if self.converter is None:
return x
ret = self.converter.convert(x, self.units, self)
return ret
def set_units(self, u):
"""
set the units for axis
ACCEPTS: a units tag
"""
pchanged = False
if u is None:
self.units = None
pchanged = True
else:
if u != self.units:
self.units = u
pchanged = True
if pchanged:
self._update_axisinfo()
self.callbacks.process('units')
self.callbacks.process('units finalize')
self.stale = True
def get_units(self):
'return the units for axis'
return self.units
def set_label_text(self, label, fontdict=None, **kwargs):
""" Sets the text value of the axis label
ACCEPTS: A string value for the label
"""
self.isDefault_label = False
self.label.set_text(label)
if fontdict is not None:
self.label.update(fontdict)
self.label.update(kwargs)
self.stale = True
return self.label
def set_major_formatter(self, formatter):
"""
Set the formatter of the major ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.isDefault_majfmt = False
self.major.formatter = formatter
formatter.set_axis(self)
self.stale = True
def set_minor_formatter(self, formatter):
"""
Set the formatter of the minor ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.isDefault_minfmt = False
self.minor.formatter = formatter
formatter.set_axis(self)
self.stale = True
def set_major_locator(self, locator):
"""
Set the locator of the major ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.isDefault_majloc = False
self.major.locator = locator
locator.set_axis(self)
self.stale = True
def set_minor_locator(self, locator):
"""
Set the locator of the minor ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.isDefault_minloc = False
self.minor.locator = locator
locator.set_axis(self)
self.stale = True
def set_pickradius(self, pickradius):
"""
Set the depth of the axis used by the picker
ACCEPTS: a distance in points
"""
self.pickradius = pickradius
def set_ticklabels(self, ticklabels, *args, **kwargs):
"""
Set the text values of the tick labels. Return a list of Text
instances. Use *kwarg* *minor=True* to select minor ticks.
All other kwargs are used to update the text object properties.
As for get_ticklabels, label1 (left or bottom) is
affected for a given tick only if its label1On attribute
is True, and similarly for label2. The list of returned
label text objects consists of all such label1 objects followed
by all such label2 objects.
The input *ticklabels* is assumed to match the set of
tick locations, regardless of the state of label1On and
label2On.
ACCEPTS: sequence of strings or Text objects
"""
get_labels = []
for t in ticklabels:
# try calling get_text() to check whether it is Text object
# if it is Text, get label content
try:
get_labels.append(t.get_text())
# otherwise add the label to the list directly
except AttributeError:
get_labels.append(t)
# replace the ticklabels list with the processed one
ticklabels = get_labels
minor = kwargs.pop('minor', False)
if minor:
self.set_minor_formatter(mticker.FixedFormatter(ticklabels))
ticks = self.get_minor_ticks()
else:
self.set_major_formatter(mticker.FixedFormatter(ticklabels))
ticks = self.get_major_ticks()
ret = []
for tick_label, tick in zip(ticklabels, ticks):
# deal with label1
tick.label1.set_text(tick_label)
tick.label1.update(kwargs)
# deal with label2
tick.label2.set_text(tick_label)
tick.label2.update(kwargs)
# only return visible tick labels
if tick.label1On:
ret.append(tick.label1)
if tick.label2On:
ret.append(tick.label2)
self.stale = True
return ret
def set_ticks(self, ticks, minor=False):
"""
Set the locations of the tick marks from sequence ticks
ACCEPTS: sequence of floats
"""
# XXX if the user changes units, the information will be lost here
ticks = self.convert_units(ticks)
if len(ticks) > 1:
xleft, xright = self.get_view_interval()
if xright > xleft:
self.set_view_interval(min(ticks), max(ticks))
else:
self.set_view_interval(max(ticks), min(ticks))
if minor:
self.set_minor_locator(mticker.FixedLocator(ticks))
return self.get_minor_ticks(len(ticks))
else:
self.set_major_locator(mticker.FixedLocator(ticks))
return self.get_major_ticks(len(ticks))
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
raise NotImplementedError('Derived must override')
def _update_offset_text_postion(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
raise NotImplementedError('Derived must override')
def pan(self, numsteps):
'Pan *numsteps* (can be positive or negative)'
self.major.locator.pan(numsteps)
def zoom(self, direction):
"Zoom in/out on axis; if *direction* is >0 zoom in, else zoom out"
self.major.locator.zoom(direction)
def axis_date(self, tz=None):
"""
Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is a :class:`tzinfo` instance or a timezone string.
This timezone is used to create date labels.
"""
# By providing a sample datetime instance with the desired
# timezone, the registered converter can be selected,
# and the "units" attribute, which is the timezone, can
# be set.
import datetime
if isinstance(tz, six.string_types):
import pytz
tz = pytz.timezone(tz)
self.update_units(datetime.datetime(2009, 1, 1, 0, 0, 0, 0, tz))
def get_tick_space(self):
"""
Return the estimated number of ticks that can fit on the axis.
"""
# Must be overridden in the subclass
raise NotImplementedError()
def get_label_position(self):
"""
Return the label position (top or bottom)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (top or bottom)
ACCEPTS: [ 'top' | 'bottom' ]
"""
raise NotImplementedError()
def get_minpos(self):
raise NotImplementedError()
class XAxis(Axis):
__name__ = 'xaxis'
axis_name = 'x'
def contains(self, mouseevent):
"""Test whether the mouse event occured in the x axis.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
x, y = mouseevent.x, mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes, yaxes = trans.transform_point((x, y))
except ValueError:
return False, {}
l, b = self.axes.transAxes.transform_point((0, 0))
r, t = self.axes.transAxes.transform_point((1, 1))
inaxis = xaxes >= 0 and xaxes <= 1 and (
(y < b and y > b - self.pickradius) or
(y > t and y < t + self.pickradius))
return inaxis, {}
def _get_tick(self, major):
if major:
tick_kw = self._major_tick_kw
else:
tick_kw = self._minor_tick_kw
return XTick(self.axes, 0, '', major=major, **tick_kw)
def _get_label(self):
# x in axes coords, y in display coords (to be updated at draw
# time by _update_label_positions)
label = mtext.Text(x=0.5, y=0,
fontproperties=font_manager.FontProperties(
size=rcParams['axes.labelsize'],
weight=rcParams['axes.labelweight']),
color=rcParams['axes.labelcolor'],
verticalalignment='top',
horizontalalignment='center')
label.set_transform(mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()))
self._set_artist_props(label)
self.label_position = 'bottom'
return label
def _get_offset_text(self):
# x in axes coords, y in display coords (to be updated at draw time)
offsetText = mtext.Text(x=1, y=0,
fontproperties=font_manager.FontProperties(
size=rcParams['xtick.labelsize']),
color=rcParams['xtick.color'],
verticalalignment='top',
horizontalalignment='right')
offsetText.set_transform(mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform())
)
self._set_artist_props(offsetText)
self.offset_text_position = 'bottom'
return offsetText
def _get_pixel_distance_along_axis(self, where, perturb):
"""
Returns the amount, in data coordinates, that a single pixel
corresponds to in the locality given by "where", which is also given
in data coordinates, and is an x coordinate. "perturb" is the amount
to perturb the pixel. Usually +0.5 or -0.5.
Implementing this routine for an axis is optional; if present, it will
ensure that no ticks are lost due to round-off at the extreme ends of
an axis.
"""
# Note that this routine does not work for a polar axis, because of
# the 1e-10 below. To do things correctly, we need to use rmax
# instead of 1e-10 for a polar axis. But since we do not have that
# kind of information at this point, we just don't try to pad anything
# for the theta axis of a polar plot.
if self.axes.name == 'polar':
return 0.0
#
# first figure out the pixel location of the "where" point. We use
# 1e-10 for the y point, so that we remain compatible with log axes.
# transformation from data coords to display coords
trans = self.axes.transData
# transformation from display coords to data coords
transinv = trans.inverted()
pix = trans.transform_point((where, 1e-10))
# perturb the pixel
ptp = transinv.transform_point((pix[0] + perturb, pix[1]))
dx = abs(ptp[0] - where)
return dx
def set_label_position(self, position):
"""
Set the label position (top or bottom)
ACCEPTS: [ 'top' | 'bottom' ]
"""
if position == 'top':
self.label.set_verticalalignment('baseline')
elif position == 'bottom':
self.label.set_verticalalignment('top')
else:
msg = "Position accepts only [ 'top' | 'bottom' ]"
raise ValueError(msg)
self.label_position = position
self.stale = True
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
if not self._autolabelpos:
return
x, y = self.label.get_position()
if self.label_position == 'bottom':
try:
spine = self.axes.spines['bottom']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes + [spinebbox])
bottom = bbox.y0
self.label.set_position(
(x, bottom - self.labelpad * self.figure.dpi / 72.0)
)
else:
try:
spine = self.axes.spines['top']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox])
top = bbox.y1
self.label.set_position(
(x, top + self.labelpad * self.figure.dpi / 72.0)
)
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x, y = self.offsetText.get_position()
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
self.offsetText.set_position(
(x, bottom - self.OFFSETTEXTPAD * self.figure.dpi / 72.0)
)
def get_text_heights(self, renderer):
"""
Returns the amount of space one should reserve for text
above and below the axes. Returns a tuple (above, below)
"""
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
above = 0.0
if bbox2.height:
above += bbox2.height + padPixels
below = 0.0
if bbox.height:
below += bbox.height + padPixels
if self.get_label_position() == 'top':
above += self.label.get_window_extent(renderer).height + padPixels
else:
below += self.label.get_window_extent(renderer).height + padPixels
return above, below
def set_ticks_position(self, position):
"""
Set the ticks position (top, bottom, both, default or none)
both sets the ticks to appear on both positions, but does not
change the tick labels. 'default' resets the tick positions to
the default: ticks on both positions, labels at bottom. 'none'
can be used if you don't want any ticks. 'none' and 'both'
affect only the ticks, not the labels.
ACCEPTS: [ 'top' | 'bottom' | 'both' | 'default' | 'none' ]
"""
if position == 'top':
self.set_tick_params(which='both', top=True, labeltop=True,
bottom=False, labelbottom=False)
elif position == 'bottom':
self.set_tick_params(which='both', top=False, labeltop=False,
bottom=True, labelbottom=True)
elif position == 'both':
self.set_tick_params(which='both', top=True,
bottom=True)
elif position == 'none':
self.set_tick_params(which='both', top=False,
bottom=False)
elif position == 'default':
self.set_tick_params(which='both', top=True, labeltop=False,
bottom=True, labelbottom=True)
else:
raise ValueError("invalid position: %s" % position)
self.stale = True
def tick_top(self):
'use ticks only on top'
self.set_ticks_position('top')
def tick_bottom(self):
'use ticks only on bottom'
self.set_ticks_position('bottom')
def get_ticks_position(self):
"""
Return the ticks position (top, bottom, default or unknown)
"""
majt = self.majorTicks[0]
mT = self.minorTicks[0]
majorTop = ((not majt.tick1On) and majt.tick2On and
(not majt.label1On) and majt.label2On)
minorTop = ((not mT.tick1On) and mT.tick2On and
(not mT.label1On) and mT.label2On)
if majorTop and minorTop:
return 'top'
MajorBottom = (majt.tick1On and (not majt.tick2On) and
majt.label1On and (not majt.label2On))
MinorBottom = (mT.tick1On and (not mT.tick2On) and
mT.label1On and (not mT.label2On))
if MajorBottom and MinorBottom:
return 'bottom'
majorDefault = (majt.tick1On and majt.tick2On and
majt.label1On and (not majt.label2On))
minorDefault = (mT.tick1On and mT.tick2On and
mT.label1On and (not mT.label2On))
if majorDefault and minorDefault:
return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
def set_view_interval(self, vmin, vmax, ignore=False):
"""
If *ignore* is *False*, the order of vmin, vmax
does not matter; the original axis orientation will
be preserved. In addition, the view limits can be
expanded, but will not be reduced. This method is
for mpl internal use; for normal use, see
:meth:`~matplotlib.axes.Axes.set_xlim`.
"""
if ignore:
self.axes.viewLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
if Vmin < Vmax:
self.axes.viewLim.intervalx = (min(vmin, vmax, Vmin),
max(vmin, vmax, Vmax))
else:
self.axes.viewLim.intervalx = (max(vmin, vmax, Vmin),
min(vmin, vmax, Vmax))
def get_minpos(self):
return self.axes.dataLim.minposx
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervalx
def set_data_interval(self, vmin, vmax, ignore=False):
'set the axis data limits'
if ignore:
self.axes.dataLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
self.stale = True
def set_default_intervals(self):
'set the default limits for the axis interval if they are not mutated'
xmin, xmax = 0., 1.
dataMutated = self.axes.dataLim.mutatedx()
viewMutated = self.axes.viewLim.mutatedx()
if not dataMutated or not viewMutated:
if self.converter is not None:
info = self.converter.axisinfo(self.units, self)
if info.default_limits is not None:
valmin, valmax = info.default_limits
xmin = self.converter.convert(valmin, self.units, self)
xmax = self.converter.convert(valmax, self.units, self)
if not dataMutated:
self.axes.dataLim.intervalx = xmin, xmax
if not viewMutated:
self.axes.viewLim.intervalx = xmin, xmax
self.stale = True
def get_tick_space(self):
ends = self.axes.transAxes.transform([[0, 0], [1, 0]])
length = ((ends[1][0] - ends[0][0]) / self.axes.figure.dpi) * 72.0
tick = self._get_tick(True)
# There is a heuristic here that the aspect ratio of tick text
# is no more than 3:1
size = tick.label1.get_size() * 3
if size > 0:
return int(np.floor(length / size))
else:
return 2**31 - 1
class YAxis(Axis):
__name__ = 'yaxis'
axis_name = 'y'
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the y axis.
Returns *True* | *False*
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
x, y = mouseevent.x, mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes, yaxes = trans.transform_point((x, y))
except ValueError:
return False, {}
l, b = self.axes.transAxes.transform_point((0, 0))
r, t = self.axes.transAxes.transform_point((1, 1))
inaxis = yaxes >= 0 and yaxes <= 1 and (
(x < l and x > l - self.pickradius) or
(x > r and x < r + self.pickradius))
return inaxis, {}
def _get_tick(self, major):
if major:
tick_kw = self._major_tick_kw
else:
tick_kw = self._minor_tick_kw
return YTick(self.axes, 0, '', major=major, **tick_kw)
def _get_label(self):
# x in display coords (updated by _update_label_position)
# y in axes coords
label = mtext.Text(x=0, y=0.5,
# todo: get the label position
fontproperties=font_manager.FontProperties(
size=rcParams['axes.labelsize'],
weight=rcParams['axes.labelweight']),
color=rcParams['axes.labelcolor'],
verticalalignment='bottom',
horizontalalignment='center',
rotation='vertical',
rotation_mode='anchor')
label.set_transform(mtransforms.blended_transform_factory(
mtransforms.IdentityTransform(), self.axes.transAxes))
self._set_artist_props(label)
self.label_position = 'left'
return label
def _get_offset_text(self):
# x in display coords, y in axes coords (to be updated at draw time)
offsetText = mtext.Text(x=0, y=0.5,
fontproperties=font_manager.FontProperties(
size=rcParams['ytick.labelsize']
),
color=rcParams['ytick.color'],
verticalalignment='baseline',
horizontalalignment='left')
offsetText.set_transform(mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform())
)
self._set_artist_props(offsetText)
self.offset_text_position = 'left'
return offsetText
def _get_pixel_distance_along_axis(self, where, perturb):
"""
Returns the amount, in data coordinates, that a single pixel
corresponds to in the locality given by *where*, which is also given
in data coordinates, and is a y coordinate.
*perturb* is the amount to perturb the pixel. Usually +0.5 or -0.5.
Implementing this routine for an axis is optional; if present, it will
ensure that no ticks are lost due to round-off at the extreme ends of
an axis.
"""
#
# first figure out the pixel location of the "where" point. We use
# 1e-10 for the x point, so that we remain compatible with log axes.
# transformation from data coords to display coords
trans = self.axes.transData
# transformation from display coords to data coords
transinv = trans.inverted()
pix = trans.transform_point((1e-10, where))
# perturb the pixel
ptp = transinv.transform_point((pix[0], pix[1] + perturb))
dy = abs(ptp[1] - where)
return dy
def set_label_position(self, position):
"""
Set the label position (left or right)
ACCEPTS: [ 'left' | 'right' ]
"""
self.label.set_rotation_mode('anchor')
self.label.set_horizontalalignment('center')
if position == 'left':
self.label.set_verticalalignment('bottom')
elif position == 'right':
self.label.set_verticalalignment('top')
else:
msg = "Position accepts only [ 'left' | 'right' ]"
raise ValueError(msg)
self.label_position = position
self.stale = True
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
if not self._autolabelpos:
return
x, y = self.label.get_position()
if self.label_position == 'left':
try:
spine = self.axes.spines['left']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes + [spinebbox])
left = bbox.x0
self.label.set_position(
(left - self.labelpad * self.figure.dpi / 72.0, y)
)
else:
try:
spine = self.axes.spines['right']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox])
right = bbox.x1
self.label.set_position(
(right + self.labelpad * self.figure.dpi / 72.0, y)
)
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x, y = self.offsetText.get_position()
top = self.axes.bbox.ymax
self.offsetText.set_position(
(x, top + self.OFFSETTEXTPAD * self.figure.dpi / 72.0)
)
def set_offset_position(self, position):
x, y = self.offsetText.get_position()
if position == 'left':
x = 0
elif position == 'right':
x = 1
else:
msg = "Position accepts only [ 'left' | 'right' ]"
raise ValueError(msg)
self.offsetText.set_ha(position)
self.offsetText.set_position((x, y))
self.stale = True
def get_text_widths(self, renderer):
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
left = 0.0
if bbox.width:
left += bbox.width + padPixels
right = 0.0
if bbox2.width:
right += bbox2.width + padPixels
if self.get_label_position() == 'left':
left += self.label.get_window_extent(renderer).width + padPixels
else:
right += self.label.get_window_extent(renderer).width + padPixels
return left, right
def set_ticks_position(self, position):
"""
Set the ticks position (left, right, both, default or none)
'both' sets the ticks to appear on both positions, but does not
change the tick labels. 'default' resets the tick positions to
the default: ticks on both positions, labels at left. 'none'
can be used if you don't want any ticks. 'none' and 'both'
affect only the ticks, not the labels.
ACCEPTS: [ 'left' | 'right' | 'both' | 'default' | 'none' ]
"""
if position == 'right':
self.set_tick_params(which='both', right=True, labelright=True,
left=False, labelleft=False)
self.set_offset_position(position)
elif position == 'left':
self.set_tick_params(which='both', right=False, labelright=False,
left=True, labelleft=True)
self.set_offset_position(position)
elif position == 'both':
self.set_tick_params(which='both', right=True,
left=True)
elif position == 'none':
self.set_tick_params(which='both', right=False,
left=False)
elif position == 'default':
self.set_tick_params(which='both', right=True, labelright=False,
left=True, labelleft=True)
else:
raise ValueError("invalid position: %s" % position)
self.stale = True
def tick_right(self):
'use ticks only on right'
self.set_ticks_position('right')
def tick_left(self):
'use ticks only on left'
self.set_ticks_position('left')
def get_ticks_position(self):
"""
Return the ticks position (left, right, both or unknown)
"""
majt = self.majorTicks[0]
mT = self.minorTicks[0]
majorRight = ((not majt.tick1On) and majt.tick2On and
(not majt.label1On) and majt.label2On)
minorRight = ((not mT.tick1On) and mT.tick2On and
(not mT.label1On) and mT.label2On)
if majorRight and minorRight:
return 'right'
majorLeft = (majt.tick1On and (not majt.tick2On) and
majt.label1On and (not majt.label2On))
minorLeft = (mT.tick1On and (not mT.tick2On) and
mT.label1On and (not mT.label2On))
if majorLeft and minorLeft:
return 'left'
majorDefault = (majt.tick1On and majt.tick2On and
majt.label1On and (not majt.label2On))
minorDefault = (mT.tick1On and mT.tick2On and
mT.label1On and (not mT.label2On))
if majorDefault and minorDefault:
return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
def set_view_interval(self, vmin, vmax, ignore=False):
"""
If *ignore* is *False*, the order of vmin, vmax
does not matter; the original axis orientation will
be preserved. In addition, the view limits can be
expanded, but will not be reduced. This method is
for mpl internal use; for normal use, see
:meth:`~matplotlib.axes.Axes.set_ylim`.
"""
if ignore:
self.axes.viewLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
if Vmin < Vmax:
self.axes.viewLim.intervaly = (min(vmin, vmax, Vmin),
max(vmin, vmax, Vmax))
else:
self.axes.viewLim.intervaly = (max(vmin, vmax, Vmin),
min(vmin, vmax, Vmax))
self.stale = True
def get_minpos(self):
return self.axes.dataLim.minposy
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervaly
def set_data_interval(self, vmin, vmax, ignore=False):
'set the axis data limits'
if ignore:
self.axes.dataLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
self.stale = True
def set_default_intervals(self):
'set the default limits for the axis interval if they are not mutated'
ymin, ymax = 0., 1.
dataMutated = self.axes.dataLim.mutatedy()
viewMutated = self.axes.viewLim.mutatedy()
if not dataMutated or not viewMutated:
if self.converter is not None:
info = self.converter.axisinfo(self.units, self)
if info.default_limits is not None:
valmin, valmax = info.default_limits
ymin = self.converter.convert(valmin, self.units, self)
ymax = self.converter.convert(valmax, self.units, self)
if not dataMutated:
self.axes.dataLim.intervaly = ymin, ymax
if not viewMutated:
self.axes.viewLim.intervaly = ymin, ymax
self.stale = True
def get_tick_space(self):
ends = self.axes.transAxes.transform([[0, 0], [0, 1]])
length = ((ends[1][1] - ends[0][1]) / self.axes.figure.dpi) * 72.0
tick = self._get_tick(True)
# Having a spacing of at least 2 just looks good.
size = tick.label1.get_size() * 2.0
if size > 0:
return int(np.floor(length / size))
else:
return 2**31 - 1
| bsd-3-clause |
brettwooldridge/buck | third-party/py/pywatchman/pywatchman/encoding.py | 29 | 2957 | # Copyright 2016-present Facebook, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Facebook nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# no unicode literals
'''Module to deal with filename encoding on the local system, as returned by
Watchman.'''
import sys
from . import (
compat,
)
if compat.PYTHON3:
default_local_errors = 'surrogateescape'
def get_local_encoding():
if sys.platform == 'win32':
# Watchman always returns UTF-8 encoded strings on Windows.
return 'utf-8'
# On the Python 3 versions we support, sys.getfilesystemencoding never
# returns None.
return sys.getfilesystemencoding()
else:
# Python 2 doesn't support surrogateescape, so use 'strict' by
# default. Users can register a custom surrogateescape error handler and use
# that if they so desire.
default_local_errors = 'strict'
def get_local_encoding():
if sys.platform == 'win32':
# Watchman always returns UTF-8 encoded strings on Windows.
return 'utf-8'
fsencoding = sys.getfilesystemencoding()
if fsencoding is None:
# This is very unlikely to happen, but if it does, just use UTF-8
fsencoding = 'utf-8'
return fsencoding
def encode_local(s):
return s.encode(get_local_encoding(), default_local_errors)
def decode_local(bs):
return bs.decode(get_local_encoding(), default_local_errors)
| apache-2.0 |
gratteur/zim-desktop | zim/config/manager.py | 5 | 10560 | # -*- coding: utf-8 -*-
# Copyright 2013 Jaap Karssenberg <[email protected]>
from __future__ import with_statement
from weakref import WeakValueDictionary
from . import basedirs
from .dicts import INIConfigFile
from zim.fs import FileNotFoundError
from zim.signals import ConnectorMixin, SignalEmitter, SignalHandler
class ConfigManager(object):
'''This class defines an object that manages a set of config files.
The config manager abstracts the lookup of files using the XDG
search paths and ensures that there is only a single instance used
for each config file.
The config manager can switch the config file based on the config
X{profile} that is used. The profile is determined by the notebook
properties. However this object relies on it's creator to setup
the hooks to get the property from the notebook. Changes to the
profile are communicated to all users of the config by means of the
"changed" signals on L{ConfigFile} and L{ConfigDict} objects.
'''
def __init__(self, dir=None, dirs=None, profile=None):
'''Constructor
@param dir: the folder for reading and writing config files,
e.g. a C{Dir} or a C{VirtualConfigBackend} objects.
If no dir is given, the XDG basedirs are used and C{dirs} is
ignored.
@param dirs: list or generator of C{Dir} objects used as
search path when a config file does not exist on C{dir}
@param profile: initial profile name
'''
self.profile = profile
self._config_files = WeakValueDictionary()
self._config_dicts = WeakValueDictionary()
if dir is None:
assert dirs is None, "Do not provide 'dirs' without 'dir'"
self._dir = dir
self._dirs = dirs
def set_profile(self, profile):
'''Set the profile to use for the configuration
@param profile: the profile name or C{None}
'''
assert profile is None or isinstance(profile, basestring)
if profile != self.profile:
self.profile = profile
for path, conffile in self._config_files.items():
if path.startswith('<profile>/'):
file, defaults = self._get_file(path)
conffile.set_files(file, defaults)
# Updates will cascade through the dicts by the
# "changed" signals on various objects
def _get_file(self, filename):
basepath = filename.replace('<profile>/', '')
if self.profile:
path = filename.replace('<profile>/', 'profiles/%s/' % self.profile)
else:
path = basepath
if self._dir:
file = self._dir.file(path)
if self._dirs:
defaults = DefaultFileIter(self._dirs, path)
else:
defaults = DefaultFileIter([], path)
if self.profile and filename.startswith('<profile>/'):
mypath = filename.replace('<profile>/', '')
defaults.extra.insert(0, self._dir.file(mypath))
else:
file = basedirs.XDG_CONFIG_HOME.file('zim/' + path)
defaults = XDGConfigFileIter(basepath)
## Backward compatibility for profiles
if self.profile \
and filename in (
'<profile>/preferences.conf',
'<profile>/style.conf'
):
backwardfile = self._get_backward_file(filename)
defaults.extra.insert(0, backwardfile)
return file, defaults
def _get_backward_file(self, filename):
if filename == '<profile>/preferences.conf':
path = 'profiles/%s.conf' % self.profile
elif filename == '<profile>/style.conf':
path = 'styles/%s.conf' % self.profile
else:
raise AssertionError
if self._dir:
return self._dir.file(path)
else:
return basedirs.XDG_CONFIG_HOME.file('zim/' + path)
def get_config_file(self, filename):
'''Returns a C{ConfigFile} object for C{filename}'''
if filename not in self._config_files:
file, defaults = self._get_file(filename)
config_file = ConfigFile(file, defaults)
self._config_files[filename] = config_file
return self._config_files[filename]
def get_config_dict(self, filename):
'''Returns a C{SectionedConfigDict} object for C{filename}'''
if filename not in self._config_dicts:
file = self.get_config_file(filename)
config_dict = ConfigManagerINIConfigFile(file)
self._config_dicts[filename] = config_dict
return self._config_dicts[filename]
#def get_all_config_files(filename) - iterate multiple values ?
#def get_config_section(filename, section): - return section
def VirtualConfigManager(**data):
return ConfigManager(VirtualConfigBackend(**data))
class DefaultFileIter(object):
'''Generator for iterating default files
Will yield first the files in C{extra} followed by files that
are based on C{path} and C{dirs}. Yields only existing files.
'''
def __init__(self, dirs, path, extra=None):
self.path = path
self.dirs = dirs
self.extra = extra or []
def __iter__(self):
for file in self.extra:
if file.exists():
yield file
for dir in self.dirs:
file = dir.file(self.path)
if file.exists():
yield file
class XDGConfigDirsIter(object):
'''Generator for iterating XDG config dirs
Yields the "zim" subdir of each XDG config file.
'''
def __iter__(self):
from . import data_dirs # XXX
yield basedirs.XDG_CONFIG_HOME.subdir(('zim'))
for dir in basedirs.XDG_CONFIG_DIRS:
yield dir.subdir(('zim'))
for dir in data_dirs():
yield dir
class XDGConfigFileIter(DefaultFileIter):
'''Like C{DefaultFileIter}, but uses XDG config dirs'''
def __init__(self, path, extra=None):
self.path = path
self.dirs = XDGConfigDirsIter()
self.extra = extra or []
class ConfigManagerINIConfigFile(INIConfigFile):
'''Like L{INIConfigFile} but with autosave when the dict changes'''
def __init__(self, file):
INIConfigFile.__init__(self, file, monitor=True)
self.connect_after('changed', self.on_changed)
# autosave on changing the dict, connect after
# regular handlers to avoid getting stuck with a set
@SignalHandler
def on_changed(self, *a):
with self.on_file_changed.blocked():
self.write()
@SignalHandler
def on_file_changed(self, *a):
with self.on_changed.blocked():
INIConfigFile.on_file_changed(self, *a)
class ConfigFile(ConnectorMixin, SignalEmitter):
'''Container object for a config file
Maps to a "base" file in the home folder, used to write new values,
and an optional default file, which is used for reading only.
@ivar file: the underlying file object for the base config file
in the home folder
@ivar defaults: a generator that yields default files
@note: this class implement similar API to the L{File} class but
is explicitly not a sub-class of L{File} because config files should
typically not be moved, renamed, etc. It just implements the reading
and writing methods.
@signal: C{changed ()}: emitted when the
underlying file changed (based on C{gio} monitoring support)
or for file monitors or on profile switched
'''
# TODO __signals__
def __init__(self, file, defaults=None):
self.file = None
self.defaults = None
with self.blocked_signals('changed'):
self.set_files(file, defaults)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.file.path)
def __eq__(self, other):
return isinstance(other, ConfigFile) \
and other.file == self.file
def set_files(self, file, defaults=None):
if self.file:
self.disconnect_from(self.file)
self.file = file
self.defaults = defaults or []
#~ self.connectto(self.file, 'changed', self.on_file_changed)
self.emit('changed')
#~ def on_file_changed(self, file, *a):
#~ print "CONF FILE changed:", file
# TODO verify etag (we didn't write ourselves)
#~ self.emit('changed')
def check_has_changed_on_disk(self):
return True # we do not emit the signal if it is not real...
@property
def basename(self):
return self.file.basename
def touch(self):
'''Ensure the custom file in the home folder exists. Either by
copying a default config file, or touching an empty file.
Intended to be called before trying to edit the file with an
external editor.
'''
if not self.file.exists():
for default in self.defaults:
default.copyto(self.file)
break
else:
self.file.touch() # create empty file
def read(self, fail=False):
'''Read the base file or first default file
@param fail: if C{True} a L{FileNotFoundError} error is raised
when neither the base file or a default file are found. If
C{False} it will return C{''} for a non-existing file.
@returns: file content as a string
'''
try:
return self.file.read()
except FileNotFoundError:
for default in self.defaults:
return default.read()
else:
if fail:
raise
else:
return ''
def readlines(self, fail=False):
'''Read the base file or first default file
@param fail: if C{True} a L{FileNotFoundError} error is raised
when neither the base file or a default file are found. If
C{False} it will return C{[]} for a non-existing file.
@returns: file content as a list of lines
'''
try:
return self.file.readlines()
except FileNotFoundError:
for default in self.defaults:
return default.readlines()
else:
if fail:
raise
else:
return []
def write(self, text):
'''Write base file, see L{File.write()}'''
self.file.write(text)
def writelines(self, lines):
'''Write base file, see L{File.writelines()}'''
self.file.writelines(lines)
def remove(self):
'''Remove user file, leaves default files in place'''
if self.file.exists():
return self.file.remove()
class VirtualConfigBackend(object):
'''Virtual dir, mainly used for testing'''
def __init__(self, **data):
self._data = data
def file(self, path):
return VirtualConfigBackendFile(self._data, path)
class VirtualConfigBackendFile(object):
'''Virtual file, mainly used for testing'''
def __init__(self, data, path):
self._key = path
self._data = data
@property
def path(self):
return '<virtual>/' + self._key
@property
def basename(self):
import os
return os.path.basename(self.path)
def connect(self, handler, *a):
pass
def disconnect(self, handler):
pass
def exists(self):
return self._key in self._data \
and self._data[self._key] is not None
def touch(self):
self._data.setdefault(self._key, '')
def copyto(self, other):
text = self.read()
other.write(text)
def read(self):
try:
text = self._data[self._key]
except KeyError:
raise FileNotFoundError(self)
else:
if text is None:
raise FileNotFoundError(self)
else:
return text
def readlines(self):
text = self.read()
return text.splitlines(True)
def write(self, text):
self._data[self._key] = text or ''
def writelines(self, lines):
self._data[self._key] = ''.join(lines) or ''
def remove(self):
del self._data[self._key]
| gpl-2.0 |
PyBossa/pybossa | pybossa/default_settings.py | 1 | 4813 | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
DEBUG = False
# webserver host and port
HOST = '0.0.0.0'
PORT = 5000
SECRET = 'foobar'
SECRET_KEY = 'my-session-secret'
ITSDANGEROUSKEY = 'its-dangerous-key'
## project configuration
BRAND = 'PYBOSSA'
TITLE = 'PYBOSSA'
COPYRIGHT = 'Set Your Institution'
DESCRIPTION = 'Set the description in your config'
TERMSOFUSE = 'http://okfn.org/terms-of-use/'
DATAUSE = 'http://opendatacommons.org/licenses/by/'
LOGO = ''
DEFAULT_LOCALE = 'en'
LOCALES = [('en', 'English'), ('es', u'Español'),
('it', 'Italiano'), ('fr', u'Français'),
('ja', u'日本語'), ('el', u'ελληνικά')]
## Default THEME
THEME = 'default'
## Default number of apps per page
APPS_PER_PAGE = 20
## Default allowed extensions
ALLOWED_EXTENSIONS = ['js', 'css', 'png', 'jpg', 'jpeg', 'gif', 'zip']
UPLOAD_METHOD = 'local'
## Default number of users shown in the leaderboard
LEADERBOARD = 20
## Default configuration for debug toolbar
ENABLE_DEBUG_TOOLBAR = False
# Cache default key prefix
REDIS_SENTINEL = [('localhost', 26379)]
REDIS_MASTER = 'mymaster'
REDIS_DB = 0
REDIS_KEYPREFIX = 'pybossa_cache'
## Default cache timeouts
# Project cache
AVATAR_TIMEOUT = 30 * 24 * 60 * 60
APP_TIMEOUT = 15 * 60
REGISTERED_USERS_TIMEOUT = 15 * 60
ANON_USERS_TIMEOUT = 5 * 60 * 60
STATS_FRONTPAGE_TIMEOUT = APP_TIMEOUT
STATS_APP_TIMEOUT = 12 * 60 * 60
STATS_DRAFT_TIMEOUT = 24 * 60 * 60
N_APPS_PER_CATEGORY_TIMEOUT = 60 * 60
BROWSE_TASKS_TIMEOUT = 3 * 60 * 60
# Category cache
CATEGORY_TIMEOUT = 24 * 60 * 60
# User cache
USER_TIMEOUT = 15 * 60
USER_TOP_TIMEOUT = 24 * 60 * 60
USER_TOTAL_TIMEOUT = 24 * 60 * 60
# Project Presenters
PRESENTERS = ["basic", "image", "sound", "video", "map", "pdf"]
# Default Google Docs spreadsheet template tasks URLs
TEMPLATE_TASKS = {
'image': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdHFEN29mZUF0czJWMUhIejF6dWZXdkE&usp=sharing",
'sound': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdEczcWduOXRUb1JUc1VGMmJtc2xXaXc&usp=sharing",
'video': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdGZ2UGhxSTJjQl9YNVhfUVhGRUdoRWc&usp=sharing",
'map': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdGZnbjdwcnhKRVNlN1dGXy0tTnNWWXc&usp=sharing",
'pdf': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdEVVamc0R0hrcjlGdXRaUXlqRXlJMEE&usp=sharing"}
# Rate limits default values
LIMIT = 300
PER = 15 * 60
# Expiration time for password protected project cookies
PASSWD_COOKIE_TIMEOUT = 60 * 30
# Expiration time for account confirmation / password recovery links
ACCOUNT_LINK_EXPIRATION = 5 * 60 * 60
# Rate limits default values
LIMIT = 300
PER = 15 * 60
# Disable new account confirmation (via email)
ACCOUNT_CONFIRMATION_DISABLED = True
# Send emails weekly update every
WEEKLY_UPDATE_STATS = 'Sunday'
# Enable Server Sent Events
SSE = False
# Pro user features. False will make the feature available to all regular users,
# while True will make it available only to pro users
PRO_FEATURES = {
'auditlog': True,
'webhooks': True,
'updated_exports': True,
'notify_blog_updates': True,
'project_weekly_report': True,
'autoimporter': True,
'better_stats': True
}
CORS_RESOURCES = {r"/api/*": {"origins": "*",
"allow_headers": ['Content-Type',
'Authorization'],
"max_age": 21600
}}
FAILED_JOBS_RETRIES = 3
FAILED_JOBS_MAILS = 7
FULLTEXTSEARCH_LANGUAGE = 'english'
STRICT_SLASHES = True
# Background jobs default time outs
MINUTE = 60
TIMEOUT = 10 * MINUTE
# OneSignal GCM Sender ID
# DO NOT MODIFY THIS
GCM_SENDER_ID = "482941778795"
# Unpublish inactive projects
UNPUBLISH_PROJECTS = True
# TTL for ZIP files of personal data
TTL_ZIP_SEC_FILES = 3
# Default cryptopan key
CRYPTOPAN_KEY = '32-char-str-for-AES-key-and-pad.'
# Instruct PYBOSSA to generate absolute paths or not for avatars
AVATAR_ABSOLUTE = True
# Spam accounts to avoid
SPAM = []
| agpl-3.0 |
computersalat/ansible | test/support/integration/plugins/modules/postgresql_query.py | 53 | 10477 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Felix Archambault
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = r'''
---
module: postgresql_query
short_description: Run PostgreSQL queries
description:
- Runs arbitrary PostgreSQL queries.
- Can run queries from SQL script files.
- Does not run against backup files. Use M(postgresql_db) with I(state=restore)
to run queries on files made by pg_dump/pg_dumpall utilities.
version_added: '2.8'
options:
query:
description:
- SQL query to run. Variables can be escaped with psycopg2 syntax
U(http://initd.org/psycopg/docs/usage.html).
type: str
positional_args:
description:
- List of values to be passed as positional arguments to the query.
When the value is a list, it will be converted to PostgreSQL array.
- Mutually exclusive with I(named_args).
type: list
elements: raw
named_args:
description:
- Dictionary of key-value arguments to pass to the query.
When the value is a list, it will be converted to PostgreSQL array.
- Mutually exclusive with I(positional_args).
type: dict
path_to_script:
description:
- Path to SQL script on the remote host.
- Returns result of the last query in the script.
- Mutually exclusive with I(query).
type: path
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
aliases:
- login_db
autocommit:
description:
- Execute in autocommit mode when the query can't be run inside a transaction block
(e.g., VACUUM).
- Mutually exclusive with I(check_mode).
type: bool
default: no
version_added: '2.9'
encoding:
description:
- Set the client encoding for the current session (e.g. C(UTF-8)).
- The default is the encoding defined by the database.
type: str
version_added: '2.10'
seealso:
- module: postgresql_db
author:
- Felix Archambault (@archf)
- Andrew Klychkov (@Andersson007)
- Will Rouesnel (@wrouesnel)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Simple select query to acme db
postgresql_query:
db: acme
query: SELECT version()
- name: Select query to db acme with positional arguments and non-default credentials
postgresql_query:
db: acme
login_user: django
login_password: mysecretpass
query: SELECT * FROM acme WHERE id = %s AND story = %s
positional_args:
- 1
- test
- name: Select query to test_db with named_args
postgresql_query:
db: test_db
query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
named_args:
id_val: 1
story_val: test
- name: Insert query to test_table in db test_db
postgresql_query:
db: test_db
query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
- name: Run queries from SQL script using UTF-8 client encoding for session
postgresql_query:
db: test_db
path_to_script: /var/lib/pgsql/test.sql
positional_args:
- 1
encoding: UTF-8
- name: Example of using autocommit parameter
postgresql_query:
db: test_db
query: VACUUM
autocommit: yes
- name: >
Insert data to the column of array type using positional_args.
Note that we use quotes here, the same as for passing JSON, etc.
postgresql_query:
query: INSERT INTO test_table (array_column) VALUES (%s)
positional_args:
- '{1,2,3}'
# Pass list and string vars as positional_args
- name: Set vars
set_fact:
my_list:
- 1
- 2
- 3
my_arr: '{1, 2, 3}'
- name: Select from test table by passing positional_args as arrays
postgresql_query:
query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
positional_args:
- '{{ my_list }}'
- '{{ my_arr|string }}'
'''
RETURN = r'''
query:
description: Query that was tried to be executed.
returned: always
type: str
sample: 'SELECT * FROM bar'
statusmessage:
description: Attribute containing the message returned by the command.
returned: always
type: str
sample: 'INSERT 0 1'
query_result:
description:
- List of dictionaries in column:value form representing returned rows.
returned: changed
type: list
sample: [{"Column": "Value1"},{"Column": "Value2"}]
rowcount:
description: Number of affected rows.
returned: changed
type: int
sample: 5
'''
try:
from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
from psycopg2.extras import DictCursor
except ImportError:
# it is needed for checking 'no result to fetch' in main(),
# psycopg2 availability will be checked by connect_to_db() into
# ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.postgres import (
connect_to_db,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
# ===========================================
# Module execution.
#
def list_to_pg_array(elem):
"""Convert the passed list to PostgreSQL array
represented as a string.
Args:
elem (list): List that needs to be converted.
Returns:
elem (str): String representation of PostgreSQL array.
"""
elem = str(elem).strip('[]')
elem = '{' + elem + '}'
return elem
def convert_elements_to_pg_arrays(obj):
"""Convert list elements of the passed object
to PostgreSQL arrays represented as strings.
Args:
obj (dict or list): Object whose elements need to be converted.
Returns:
obj (dict or list): Object with converted elements.
"""
if isinstance(obj, dict):
for (key, elem) in iteritems(obj):
if isinstance(elem, list):
obj[key] = list_to_pg_array(elem)
elif isinstance(obj, list):
for i, elem in enumerate(obj):
if isinstance(elem, list):
obj[i] = list_to_pg_array(elem)
return obj
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
query=dict(type='str'),
db=dict(type='str', aliases=['login_db']),
positional_args=dict(type='list', elements='raw'),
named_args=dict(type='dict'),
session_role=dict(type='str'),
path_to_script=dict(type='path'),
autocommit=dict(type='bool', default=False),
encoding=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=(('positional_args', 'named_args'),),
supports_check_mode=True,
)
query = module.params["query"]
positional_args = module.params["positional_args"]
named_args = module.params["named_args"]
path_to_script = module.params["path_to_script"]
autocommit = module.params["autocommit"]
encoding = module.params["encoding"]
if autocommit and module.check_mode:
module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
if path_to_script and query:
module.fail_json(msg="path_to_script is mutually exclusive with query")
if positional_args:
positional_args = convert_elements_to_pg_arrays(positional_args)
elif named_args:
named_args = convert_elements_to_pg_arrays(named_args)
if path_to_script:
try:
with open(path_to_script, 'rb') as f:
query = to_native(f.read())
except Exception as e:
module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
if encoding is not None:
db_connection.set_client_encoding(encoding)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Prepare args:
if module.params.get("positional_args"):
arguments = module.params["positional_args"]
elif module.params.get("named_args"):
arguments = module.params["named_args"]
else:
arguments = None
# Set defaults:
changed = False
# Execute query:
try:
cursor.execute(query, arguments)
except Exception as e:
if not autocommit:
db_connection.rollback()
cursor.close()
db_connection.close()
module.fail_json(msg="Cannot execute SQL '%s' %s: %s" % (query, arguments, to_native(e)))
statusmessage = cursor.statusmessage
rowcount = cursor.rowcount
try:
query_result = [dict(row) for row in cursor.fetchall()]
except Psycopg2ProgrammingError as e:
if to_native(e) == 'no results to fetch':
query_result = {}
except Exception as e:
module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
if 'SELECT' not in statusmessage:
if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage:
s = statusmessage.split()
if len(s) == 3:
if statusmessage.split()[2] != '0':
changed = True
elif len(s) == 2:
if statusmessage.split()[1] != '0':
changed = True
else:
changed = True
else:
changed = True
if module.check_mode:
db_connection.rollback()
else:
if not autocommit:
db_connection.commit()
kw = dict(
changed=changed,
query=cursor.query,
statusmessage=statusmessage,
query_result=query_result,
rowcount=rowcount if rowcount >= 0 else 0,
)
cursor.close()
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()
| gpl-3.0 |
Naoto-Imamachi/MIRAGE | scripts/module/preparation/phastcons_score_list.py | 1 | 3683 | #!usr/bin/env python
import sys
import re
import shelve
from parameter.common_parameters import common_parameters
import utils.setting_utils as utils
utils.now_time("phastcons_score_list script starting...")
p = utils.Bunch(common_parameters)
def main():
utils.now_time("Input_file: " + p.phastcons_score_list_db_input)
utils.now_time("Reference_file: " + p.phastcons_score_list_reference)
utils.now_time("Output_file: " + p.phastcons_score_list_db_output)
output_merge = p.phastcons_score_list_db_output + 'phastCons46way_Refseq_for_MIRAGE_CDS.db' #'phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db'
output_merge_shelve = shelve.open(output_merge)
#for x in ['chr21']:
for x in ['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX','chrY','chrM']:
ref_s = p.phastcons_score_list_reference #mirBase, Refseq etc...
ref_file = open(ref_s,'r')
input_s = p.phastcons_score_list_db_input + x + '.phastCons46way_Refseq_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19.db'
output_s = p.phastcons_score_list_db_output + x + '.phastCons46way_Refseq_for_MIRAGE_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db'
input_shelve = shelve.open(input_s)
output_shelve = shelve.open(output_s)
score_list_dict = {}
for line in ref_file:
line = line.rstrip()
data = line.split("\t")
chrom = data[0]
if not chrom == x:
continue
strand = data[5]
if len(data) >= 12: #12bed format
exon_block = data[10].split(',')
exon_block.pop() #Remove the last item ''
exon_st = data[11].split(',')
exon_st.pop() #Remove the last item ''
name = data[3]
score_list_dict[name] = []
for y in range(len(exon_block)):
st = int(data[1]) + int(exon_st[y])
ed = int(data[1]) + int(exon_st[y]) + int(exon_block[y])
length = ed - st
for z in range(length):
score = input_shelve[str(st)]
score_list_dict[name].append(score)
st += 1
if strand == '-':
rev_score = score_list_dict[name][::-1]
score_list_dict[name] = rev_score
elif len(data) >= 3: #6bed format
st = int(data[1])
ed = int(data[2])
length = ed - st
name = data[3]
score_list_dict[name] = []
for z in range(length):
score = input_shelve[str(st)]
score_list_dict[name].append(score)
st += 1
if strand == '-':
rev_score = score_list_dict[name][::-1]
score_list_dict[name] = rev_score
else:
print('ERROR: Your BED format file have less than three column.')
print ('BED format file need to have at least three column [chr, st, ed]...')
sys.exit(1)
output_shelve.update(score_list_dict)
output_merge_shelve.update(score_list_dict)
input_shelve.close()
output_shelve.close()
utils.now_time("phastcons_score_list script was successfully finished!!")
output_merge_shelve.close()
if __name__ == '__main__':
main()
| mit |
guewen/odoo | addons/project_issue/res_config.py | 441 | 1492 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class project_issue_settings(osv.osv_memory):
_name = 'project.config.settings'
_inherit = ['project.config.settings', 'fetchmail.config.settings']
_columns = {
'fetchmail_issue': fields.boolean("Create issues from an incoming email account ",
fetchmail_model='project.issue', fetchmail_name='Incoming Issues',
help="""Allows you to configure your incoming mail server, and create issues from incoming emails."""),
}
| agpl-3.0 |
matthiasdiener/spack | var/spack/repos/builtin/packages/paml/package.py | 5 | 2221 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Paml(MakefilePackage):
"""PAML is a package of programs for phylogenetic analyses of DNA or
protein sewuences using maximum likelihood."""
homepage = "http://abacus.gene.ucl.ac.uk/software/paml.html"
url = "http://abacus.gene.ucl.ac.uk/software/paml4.9e.tgz"
version('4.9e', 'ac5a062bfea1f4eaac79008434030acf')
build_directory = 'src'
def install(self, spec, prefix):
mkdirp(prefix.bin)
with working_dir(self.build_directory):
install('baseml', prefix.bin)
install('basemlg', prefix.bin)
install('chi2', prefix.bin)
install('codeml', prefix.bin)
install('evolver', prefix.bin)
install('infinitesites', prefix.bin)
install('mcmctree', prefix.bin)
install('pamp', prefix.bin)
install('yn00', prefix.bin)
install_tree('dat', prefix.dat)
install_tree('Technical', prefix.Technical)
| lgpl-2.1 |
nyasara/azuremono-docker | IronPython-2.7.4/Lib/encodings/cp861.py | 93 | 35587 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP861.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp861',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00d0, # LATIN CAPITAL LETTER ETH
0x008c: 0x00f0, # LATIN SMALL LETTER ETH
0x008d: 0x00de, # LATIN CAPITAL LETTER THORN
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00fe, # LATIN SMALL LETTER THORN
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x0098: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00a5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00a6: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00a7: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xd0' # 0x008b -> LATIN CAPITAL LETTER ETH
u'\xf0' # 0x008c -> LATIN SMALL LETTER ETH
u'\xde' # 0x008d -> LATIN CAPITAL LETTER THORN
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xfe' # 0x0095 -> LATIN SMALL LETTER THORN
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xdd' # 0x0097 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xfd' # 0x0098 -> LATIN SMALL LETTER Y WITH ACUTE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xc1' # 0x00a4 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcd' # 0x00a5 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xd3' # 0x00a6 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xda' # 0x00a7 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a3: 0x009c, # POUND SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c1: 0x00a4, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cd: 0x00a5, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d0: 0x008b, # LATIN CAPITAL LETTER ETH
0x00d3: 0x00a6, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00da: 0x00a7, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x0097, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00de: 0x008d, # LATIN CAPITAL LETTER THORN
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f0: 0x008c, # LATIN SMALL LETTER ETH
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x0098, # LATIN SMALL LETTER Y WITH ACUTE
0x00fe: 0x0095, # LATIN SMALL LETTER THORN
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit |
alrusdi/lettuce | tests/integration/lib/Django-1.2.5/tests/regressiontests/templates/unicode.py | 39 | 1290 | # -*- coding: utf-8 -*-
from unittest import TestCase
from django.template import Template, TemplateEncodingError, Context
from django.utils.safestring import SafeData
class UnicodeTests(TestCase):
def test_template(self):
# Templates can be created from unicode strings.
t1 = Template(u'ŠĐĆŽćžšđ {{ var }}')
# Templates can also be created from bytestrings. These are assumed to
# be encoded using UTF-8.
s = '\xc5\xa0\xc4\x90\xc4\x86\xc5\xbd\xc4\x87\xc5\xbe\xc5\xa1\xc4\x91 {{ var }}'
t2 = Template(s)
s = '\x80\xc5\xc0'
self.assertRaises(TemplateEncodingError, Template, s)
# Contexts can be constructed from unicode or UTF-8 bytestrings.
c1 = Context({"var": "foo"})
c2 = Context({u"var": "foo"})
c3 = Context({"var": u"Đđ"})
c4 = Context({u"var": "\xc4\x90\xc4\x91"})
# Since both templates and all four contexts represent the same thing,
# they all render the same (and are returned as unicode objects and
# "safe" objects as well, for auto-escaping purposes).
self.assertEqual(t1.render(c3), t2.render(c3))
self.assertTrue(isinstance(t1.render(c3), unicode))
self.assertTrue(isinstance(t1.render(c3), SafeData))
| gpl-3.0 |
HarborYuan/cashier | env/Lib/site-packages/wheel/signatures/__init__.py | 70 | 3766 | """
Create and verify jws-js format Ed25519 signatures.
"""
import json
from ..util import urlsafe_b64decode, urlsafe_b64encode, native, binary
__all__ = ['sign', 'verify']
ed25519ll = None
ALG = "Ed25519"
def get_ed25519ll():
"""Lazy import-and-test of ed25519 module"""
global ed25519ll
if not ed25519ll:
try:
import ed25519ll # fast (thousands / s)
except (ImportError, OSError): # pragma nocover
from . import ed25519py as ed25519ll # pure Python (hundreds / s)
test()
return ed25519ll
def sign(payload, keypair):
"""Return a JWS-JS format signature given a JSON-serializable payload and
an Ed25519 keypair."""
get_ed25519ll()
#
header = {
"alg": ALG,
"jwk": {
"kty": ALG, # alg -> kty in jwk-08.
"vk": native(urlsafe_b64encode(keypair.vk))
}
}
encoded_header = urlsafe_b64encode(binary(json.dumps(header, sort_keys=True)))
encoded_payload = urlsafe_b64encode(binary(json.dumps(payload, sort_keys=True)))
secured_input = b".".join((encoded_header, encoded_payload))
sig_msg = ed25519ll.crypto_sign(secured_input, keypair.sk)
signature = sig_msg[:ed25519ll.SIGNATUREBYTES]
encoded_signature = urlsafe_b64encode(signature)
return {"recipients":
[{"header": native(encoded_header),
"signature": native(encoded_signature)}],
"payload": native(encoded_payload)}
def assertTrue(condition, message=""):
if not condition:
raise ValueError(message)
def verify(jwsjs):
"""Return (decoded headers, payload) if all signatures in jwsjs are
consistent, else raise ValueError.
Caller must decide whether the keys are actually trusted."""
get_ed25519ll()
# XXX forbid duplicate keys in JSON input using object_pairs_hook (2.7+)
recipients = jwsjs["recipients"]
encoded_payload = binary(jwsjs["payload"])
headers = []
for recipient in recipients:
assertTrue(len(recipient) == 2, "Unknown recipient key {0}".format(recipient))
h = binary(recipient["header"])
s = binary(recipient["signature"])
header = json.loads(native(urlsafe_b64decode(h)))
assertTrue(header["alg"] == ALG,
"Unexpected algorithm {0}".format(header["alg"]))
if "alg" in header["jwk"] and "kty" not in header["jwk"]:
header["jwk"]["kty"] = header["jwk"]["alg"] # b/w for JWK < -08
assertTrue(header["jwk"]["kty"] == ALG, # true for Ed25519
"Unexpected key type {0}".format(header["jwk"]["kty"]))
vk = urlsafe_b64decode(binary(header["jwk"]["vk"]))
secured_input = b".".join((h, encoded_payload))
sig = urlsafe_b64decode(s)
sig_msg = sig+secured_input
verified_input = native(ed25519ll.crypto_sign_open(sig_msg, vk))
verified_header, verified_payload = verified_input.split('.')
verified_header = binary(verified_header)
decoded_header = native(urlsafe_b64decode(verified_header))
headers.append(json.loads(decoded_header))
verified_payload = binary(verified_payload)
# only return header, payload that have passed through the crypto library.
payload = json.loads(native(urlsafe_b64decode(verified_payload)))
return headers, payload
def test():
kp = ed25519ll.crypto_sign_keypair()
payload = {'test': 'onstartup'}
jwsjs = json.loads(json.dumps(sign(payload, kp)))
verify(jwsjs)
jwsjs['payload'] += 'x'
try:
verify(jwsjs)
except ValueError:
pass
else: # pragma no cover
raise RuntimeError("No error from bad wheel.signatures payload.")
| mit |
DerekK88/PICwriter | picwriter/components/stripslotconverter.py | 1 | 9317 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import gdspy
import picwriter.toolkit as tk
class StripSlotConverter(tk.Component):
"""Strip-to-Slot Side Converter Cell class. Adiabatically transforms a strip to a slot waveguide mode, with two sections. Section 1 introduces a narrow waveguide alongside the input strip waveguide and gradually lowers the gap between the strip waveguide and narrow side waveguide. Section 2 gradually converts the widths of the two waveguides until they are equal to the slot rail widths.
Args:
* **wgt_input** (WaveguideTemplate): WaveguideTemplate object for the input waveguide (should be either of type `strip` or `slot`).
* **wgt_output** (WaveguideTemplate): WaveguideTemplate object for the output waveguide (should be either of type `strip` or `slot`, opposite of the input type).
* **length1** (float): Length of section 1 that gradually changes the distance between the two waveguides.
* **length2** (float): Length of section 2 that gradually changes the widths of the two waveguides until equal to the slot waveguide rail widths.
* **start_rail_width** (float): Width of the narrow waveguide appearing next to the strip waveguide.
* **end_strip_width** (float): Width of the strip waveguide at the end of `length1` and before `length2`
* **d** (float): Distance between the outer edge of the strip waveguide and the start of the slot waveguide rail.
Keyword Args:
* **input_strip** (Boolean): If `True`, sets the input port to be the strip waveguide side. If `False`, slot waveguide is on the input. Defaults to `None`, in which case the input port waveguide template is used to choose.
* **port** (tuple): Cartesian coordinate of the input port. Defaults to (0,0).
* **direction** (string): Direction that the component will point *towards*, can be of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, OR an angle (float, in radians)
Members:
* **portlist** (dict): Dictionary with the relevant port information
Portlist format:
* portlist['input'] = {'port': (x1,y1), 'direction': 'dir1'}
* portlist['output'] = {'port': (x2, y2), 'direction': 'dir2'}
Where in the above (x1,y1) is the same as the 'port' input, (x2, y2) is the end of the taper, and 'dir1', 'dir2' are of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, *or* an angle in *radians*.
'Direction' points *towards* the waveguide that will connect to it.
Note: The waveguide and cladding layer/datatype are taken from the `wgt_slot` by default.
"""
def __init__(
self,
wgt_input,
wgt_output,
length1,
length2,
start_rail_width,
end_strip_width,
d,
input_strip=None,
port=(0, 0),
direction="EAST",
):
tk.Component.__init__(self, "StripSlotConverter", locals())
self.portlist = {}
if (not isinstance(input_strip, bool)) and (input_strip != None):
raise ValueError(
"Invalid input provided for `input_strip`. Please specify a boolean."
)
if input_strip == None:
# Auto-detect based on wgt_input
self.input_strip = (
wgt_input.wg_type == "strip" or wgt_input.wg_type == "swg"
)
else:
# User-override
self.input_strip = input_strip
if self.input_strip:
self.wgt_strip = wgt_input
self.wgt_slot = wgt_output
else:
self.wgt_strip = wgt_output
self.wgt_slot = wgt_input
self.wg_spec = {
"layer": wgt_output.wg_layer,
"datatype": wgt_output.wg_datatype,
}
self.clad_spec = {
"layer": wgt_output.clad_layer,
"datatype": wgt_output.clad_datatype,
}
self.length1 = length1
self.length2 = length2
self.d = d
self.start_rail_width = start_rail_width
self.end_strip_width = end_strip_width
self.port = port
self.direction = direction
self.__build_cell()
self.__build_ports()
""" Translate & rotate the ports corresponding to this specific component object
"""
self._auto_transform_()
def __build_cell(self):
# Sequentially build all the geometric shapes using polygons
# Add strip waveguide taper for region 1
x0, y0 = (0, 0)
pts = [
(x0, y0 - self.wgt_strip.wg_width / 2.0),
(x0, y0 + self.wgt_strip.wg_width / 2.0),
(
x0 + self.length1,
y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width,
),
(x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0),
]
strip1 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the thin side waveguide for region 1
pts = [
(x0, y0 + self.wgt_strip.wg_width / 2.0 + self.d),
(x0, y0 + self.wgt_strip.wg_width / 2.0 + self.d + self.start_rail_width),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot
+ self.start_rail_width,
),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot,
),
]
thin_strip = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the bottom rail for region 2
pts = [
(
x0 + self.length1,
y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width,
),
(x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0),
(x0 + self.length1 + self.length2, y0 - self.wgt_slot.wg_width / 2.0),
(
x0 + self.length1 + self.length2,
y0 - self.wgt_slot.wg_width / 2.0 + self.wgt_slot.rail,
),
]
rail1 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the top rail for region 2
pts = [
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot
+ self.start_rail_width,
),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot,
),
(
x0 + self.length1 + self.length2,
y0 + self.wgt_slot.wg_width / 2.0 - self.wgt_slot.rail,
),
(x0 + self.length1 + self.length2, y0 + self.wgt_slot.wg_width / 2.0),
]
rail2 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add a cladding polygon
pts = [
(x0, y0 + self.wgt_strip.clad_width + self.wgt_strip.wg_width / 2.0),
(
x0 + self.length1 + self.length2,
y0 + self.wgt_slot.clad_width + self.wgt_slot.wg_width / 2.0,
),
(
x0 + self.length1 + self.length2,
y0 - self.wgt_slot.clad_width - self.wgt_slot.wg_width / 2.0,
),
(x0, y0 - self.wgt_strip.clad_width - self.wgt_strip.wg_width / 2.0),
]
clad = gdspy.Polygon(
pts, layer=self.wgt_strip.clad_layer, datatype=self.wgt_strip.clad_datatype
)
self.add(strip1)
self.add(thin_strip)
self.add(rail1)
self.add(rail2)
self.add(clad)
def __build_ports(self):
# Portlist format:
# example: example: {'port':(x_position, y_position), 'direction': 'NORTH'}
self.portlist["input"] = {"port": (0, 0), "direction": "WEST"}
self.portlist["output"] = {
"port": (self.length1 + self.length2, 0),
"direction": "EAST",
}
if __name__ == "__main__":
from . import *
top = gdspy.Cell("top")
wgt_strip = WaveguideTemplate(bend_radius=50, wg_type="strip", wg_width=0.7)
wgt_slot = WaveguideTemplate(bend_radius=50, wg_type="slot", wg_width=0.7, slot=0.2)
wg1 = Waveguide([(0, 0), (100, 0)], wgt_strip)
tk.add(top, wg1)
ssc = StripSlotConverter(
wgt_strip,
wgt_slot,
length1=15.0,
length2=15.0,
start_rail_width=0.1,
end_strip_width=0.4,
d=1.0,
**wg1.portlist["output"]
)
tk.add(top, ssc)
(x1, y1) = ssc.portlist["output"]["port"]
wg2 = Waveguide([(x1, y1), (x1 + 100, y1)], wgt_slot)
tk.add(top, wg2)
gdspy.LayoutViewer(cells=top)
# gdspy.write_gds('StripSlotConverter.gds', unit=1.0e-6, precision=1.0e-9)
| mit |
EricForgy/JuliaBox | container/interactive/IJulia/tornado/src/gdrivesync.py | 4 | 9487 | import base64
import shutil
import os
import hashlib
import time
import datetime
import pytz
import isodate
from oauth2client.client import OAuth2Credentials
from oauth2client import GOOGLE_REVOKE_URI, GOOGLE_TOKEN_URI, GOOGLE_AUTH_URI
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
class GDriveSync:
"""Synchronizes folders from Google Drive.
Requires credentials to be provided as base64 encoded JSON representation of OAuth2Credentials, in form field gauth.
If credentials are not found, the Google authentication plugin is invoked
with state as ask_gdrive (/jboxauth/google?state=ask_gdrive). On successful
authentication and authorization, the plugin must call JuliaBox.init_gauth_tok
on the browser with appropriately formatted credentials.
"""
CREDSB64 = None
CREDS = None
GAUTH = None
DRIVE = None
LOCAL_TZ_OFFSET = 0
def __init__(self, loc):
self.loc = loc
with open(os.path.join(loc, '.gdrive')) as f:
self.gfolder = f.read().strip()
def repo_hash(self):
return hashlib.sha1('_'.join([self.loc, self.gfolder])).hexdigest()
def repo_name(self):
return os.path.basename(self.loc) + ' (' + self.gfolder + ')'
def sync(self):
self._sync_folder(self.loc, GDriveSync.folder_id(self.gfolder))
def _sync_folder(self, loc, gfolder):
# list local folder
loc_flist = {}
for f in os.listdir(loc):
if f.startswith('.'):
continue
full_path = os.path.join(loc, f)
is_dir = os.path.isdir(full_path)
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(full_path), pytz.utc)
# + datetime.timedelta(seconds=GDriveSync.LOCAL_TZ_OFFSET)
loc_flist[f] = {'fullpath': full_path, 'is_dir': is_dir, 'mtime': mtime}
# list remote folder
gdrive_flist = {}
for f in GDriveSync.DRIVE.ListFile({'q': "'" + gfolder + "' in parents and trashed=false"}).GetList():
fname = f['title']
full_path = os.path.join(loc, fname)
is_dir = ('application/vnd.google-apps.folder' in f['mimeType'])
mtime = GDriveSync.parse_gdrive_time(f['modifiedDate'])
gdrive_flist[fname] = {'fullpath': full_path, 'is_dir': is_dir, 'mtime': mtime, 'id': f['id']}
parent_spec = [{"kind": "drive#fileLink", "id": gfolder}]
# for all files in local folder
for f, attrs in loc_flist.items():
# if it is a folder
if attrs['is_dir']:
# if file not on remote create remote folder, remove file from local list, add to remote list
if f not in gdrive_flist:
gdrive_file = GDriveSync.DRIVE.CreateFile({
'title': f,
'mimeType': 'application/vnd.google-apps.folder',
'parents': parent_spec,
'modifiedDate': attrs['mtime']
})
gdrive_file.Upload()
gdrive_flist[f] = {
'fullpath': attrs['full_path'],
'is_dir': attrs['is_dir'],
'mtime': attrs['mtime'],
'id': gdrive_file['id']
}
del loc_flist[f]
else: # it is a file
# if file not on remote, upload local file, remove file from local list
if f not in gdrive_flist:
GDriveSync._upload(attrs['fullpath'], parents=parent_spec)
del loc_flist[f]
else:
gf_attrs = gdrive_flist[f]
# if file in remote is older, upload local file
tdiff = (attrs['mtime'] - gf_attrs['mtime']).total_seconds()
# print("existing file tdiff: " + str(tdiff))
if tdiff >= 1:
GDriveSync._upload(attrs['fullpath'], parents=None, remid=gf_attrs['id'])
# if file on remote is newer, download remote file
elif tdiff <= -1:
GDriveSync._download(attrs['fullpath'], gf_attrs['id'])
#else:
# print("already in sync " + attrs['fullpath'])
# remove file from both lists
del loc_flist[f]
del gdrive_flist[f]
# for files remaining in remote list
for f, gf_attrs in gdrive_flist.items():
# create local folder if it does not exist
fullpath = gf_attrs['fullpath']
if gf_attrs['is_dir']:
if not os.path.exists(fullpath):
os.makedirs(fullpath)
# download remote file, remove from remote list
else:
GDriveSync._download(fullpath, gf_attrs['id'])
del gdrive_flist[f]
# gdrive_flist should only have folders if any
# for folders remaining in remote list call _sync_folder recursively on them
for f, gf_attrs in gdrive_flist.items():
self._sync_folder(gf_attrs['fullpath'], gf_attrs['id'])
@staticmethod
def _upload(locpath, parents=None, remid=None):
fname = os.path.basename(locpath)
# print("uploading " + fname + " to " + locpath + ", parents: " + str(parents) + ", remid: " + str(remid))
gdrive_file = GDriveSync.DRIVE.CreateFile({'id': remid}) if (remid is not None) else \
GDriveSync.DRIVE.CreateFile({'title': fname, 'parents': parents})
gdrive_file.SetContentFile(locpath)
gdrive_file.Upload()
GDriveSync._sync_file_time(locpath, gdrive_file)
@staticmethod
def _download(locpath, remid):
# print("downloading " + locpath + " from " + remid)
gdrive_file = GDriveSync.DRIVE.CreateFile({'id': remid})
gdrive_file.GetContentFile(locpath)
GDriveSync._sync_file_time(locpath, gdrive_file)
@staticmethod
def _sync_file_time(locpath, gdrive_file):
gdrive_file.FetchMetadata()
mtime = GDriveSync.parse_gdrive_time(gdrive_file['modifiedDate'])
timestamp = (mtime - datetime.datetime.fromtimestamp(0, pytz.utc)).total_seconds()
# print("setting file time to " + str(mtime) + " timestamp: " + str(timestamp))
os.utime(locpath, (timestamp, timestamp))
@staticmethod
def parse_gdrive_time(tm):
if None != tm:
tm = isodate.parse_datetime(tm)
return tm
@staticmethod
def local_time_offset():
"""Return offset of local zone from GMT"""
if time.localtime().tm_isdst and time.daylight:
return time.altzone
else:
return time.timezone
@staticmethod
def init_creds(credsb64):
GDriveSync.LOCAL_TZ_OFFSET = GDriveSync.local_time_offset()
if GDriveSync.CREDSB64 == credsb64:
return
creds_json = base64.b64decode(credsb64)
creds = OAuth2Credentials.from_json(creds_json)
GDriveSync.CREDS = creds
GDriveSync.CREDSB64 = credsb64
gauth = GoogleAuth()
gauth.settings = {
'client_config_backend': 'settings',
'client_config_file': 'client_secrets.json',
'save_credentials': False,
'oauth_scope': ['https://www.googleapis.com/auth/drive'],
'client_config': {
'client_id': creds.client_id,
'client_secret': creds.client_secret,
'auth_uri': GOOGLE_AUTH_URI,
'token_uri': GOOGLE_TOKEN_URI,
'revoke_uri': GOOGLE_REVOKE_URI,
'redirect_uri': 'http://juliabox.org/jboxauth/google/'
}
}
gauth.LoadClientConfigSettings()
gauth.credentials = creds
GDriveSync.GAUTH = gauth
GDriveSync.DRIVE = GoogleDrive(gauth)
@staticmethod
def folder_name(gfolder):
return gfolder.split('/')[-2]
@staticmethod
def folder_id(gfolder):
return gfolder.split('/')[-1]
@staticmethod
def clone(gfolder, loc, overwrite=False):
if overwrite and os.path.exists(loc):
shutil.rmtree(loc)
# create the folder and .gdrive file
os.mkdir(loc)
with open(os.path.join(loc, '.gdrive'), 'w') as f:
f.write(gfolder)
GDriveSync._clone_gfolder(GDriveSync.folder_id(gfolder), loc)
return GDriveSync(loc)
@staticmethod
def _clone_gfolder(gfolder, loc):
drive = GDriveSync.DRIVE
for f in drive.ListFile({'q': "'" + gfolder + "' in parents and trashed=false"}).GetList():
fpath = os.path.join(loc, f['title'])
if 'application/vnd.google-apps.folder' in f['mimeType']:
os.mkdir(fpath)
GDriveSync._clone_gfolder(f['id'], fpath)
else:
GDriveSync._download(fpath, f['id'])
@staticmethod
def scan_repo_paths(dirs):
repos = []
for d in dirs:
for pth in os.listdir(d):
if pth.startswith('.'):
continue
fpth = os.path.join(d, pth)
if os.path.isdir(fpth):
gdrive_pth = os.path.join(fpth, '.gdrive')
if os.path.isfile(gdrive_pth):
repos.append(fpth)
return repos
| mit |
dzamie/weasyl | weasyl/blocktag.py | 1 | 4024 | # blocktag.py
from error import PostgresError
import define as d
import profile
import searchtag
from libweasyl import ratings
from weasyl.cache import region
# For blocked tags, `rating` refers to the lowest rating for which that tag is
# blocked; for example, (X, Y, 10) would block tag Y for all ratings, whereas
# (X, Y, 30) would block tag Y for only adult ratings.
def check(userid, submitid=None, charid=None, journalid=None):
"""
Returns True if the submission, character, or journal contains a search tag
that the user has blocked, else False.
"""
if not userid:
return False
if submitid:
map_table = "searchmapsubmit"
content_table = "submission"
id_field = "submitid"
target = submitid
elif charid:
map_table = "searchmapchar"
content_table = "character"
id_field = "charid"
target = charid
else:
map_table = "searchmapjournal"
content_table = "journal"
id_field = "journalid"
target = journalid
query = """
SELECT EXISTS (
SELECT 0 FROM {map_table} searchmap
INNER JOIN {content_table} content ON searchmap.targetid = content.{id_field}
WHERE searchmap.targetid = %(id)s
AND content.userid != %(user)s
AND searchmap.tagid IN (
SELECT blocktag.tagid FROM blocktag
WHERE userid = %(user)s AND blocktag.rating <= content.rating)) AS block
""".format(map_table=map_table, content_table=content_table, id_field=id_field)
return d.engine.execute(query, id=target, user=userid).first().block
def check_list(rating, tags, blocked_tags):
return any(rating >= b['rating'] and b['title'] in tags for b in blocked_tags)
def suggest(userid, target):
if not target:
return []
return d.execute("SELECT title FROM searchtag"
" WHERE title LIKE '%s%%' AND tagid NOT IN (SELECT tagid FROM blocktag WHERE userid = %i)"
" ORDER BY title LIMIT 10", [target, userid], options="within")
def select(userid):
return [{
"title": i[0],
"rating": i[1],
} for i in d.execute("SELECT st.title, bt.rating FROM searchtag st "
" INNER JOIN blocktag bt ON st.tagid = bt.tagid"
" WHERE bt.userid = %i"
" ORDER BY st.title", [userid])]
@region.cache_on_arguments()
@d.record_timing
def cached_select(userid):
return select(userid)
def insert(userid, tagid=None, title=None, rating=None):
if rating not in ratings.CODE_MAP:
rating = ratings.GENERAL.code
profile.check_user_rating_allowed(userid, rating)
if tagid:
tag = int(tagid)
try:
d.engine.execute("INSERT INTO blocktag VALUES (%s, %s, %s)", userid, tag, rating)
except PostgresError:
return
elif title:
tag_name = d.get_search_tag(title)
try:
d.engine.execute("""
INSERT INTO blocktag (userid, tagid, rating)
VALUES (
%(user)s,
(SELECT tagid FROM searchtag WHERE title = %(tag_name)s),
%(rating)s
)
""", user=userid, tag_name=tag_name, rating=rating)
except PostgresError:
try:
tag = searchtag.create(title)
except PostgresError:
return
d.engine.execute("INSERT INTO blocktag VALUES (%s, %s, %s)", userid, tag, rating)
cached_select.invalidate(userid)
def remove(userid, tagid=None, title=None):
if tagid:
d.execute("DELETE FROM blocktag WHERE (userid, tagid) = (%i, %i)", [userid, tagid])
elif title:
d.execute("DELETE FROM blocktag WHERE (userid, tagid) = (%i, (SELECT tagid FROM searchtag WHERE title = '%s'))",
[userid, d.get_search_tag(title)])
cached_select.invalidate(userid)
| apache-2.0 |
phihag/youtube-dl | youtube_dl/extractor/primesharetv.py | 73 | 1853 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
sanitized_Request,
urlencode_postdata,
)
class PrimeShareTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?primeshare\.tv/download/(?P<id>[\da-zA-Z]+)'
_TEST = {
'url': 'http://primeshare.tv/download/238790B611',
'md5': 'b92d9bf5461137c36228009f31533fbc',
'info_dict': {
'id': '238790B611',
'ext': 'mp4',
'title': 'Public Domain - 1960s Commercial - Crest Toothpaste-YKsuFona',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if '>File not exist<' in webpage:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
fields = self._hidden_inputs(webpage)
headers = {
'Referer': url,
'Content-Type': 'application/x-www-form-urlencoded',
}
wait_time = int(self._search_regex(
r'var\s+cWaitTime\s*=\s*(\d+)',
webpage, 'wait time', default=7)) + 1
self._sleep(wait_time, video_id)
req = sanitized_Request(
url, urlencode_postdata(fields), headers)
video_page = self._download_webpage(
req, video_id, 'Downloading video page')
video_url = self._search_regex(
r"url\s*:\s*'([^']+\.primeshare\.tv(?::443)?/file/[^']+)'",
video_page, 'video url')
title = self._html_search_regex(
r'<h1>Watch\s*(?: )?\s*\((.+?)(?:\s*\[\.\.\.\])?\)\s*(?: )?\s*<strong>',
video_page, 'title')
return {
'id': video_id,
'url': video_url,
'title': title,
'ext': 'mp4',
}
| unlicense |
atruberg/django-custom | django/contrib/admin/templatetags/log.py | 114 | 2125 | from django import template
from django.contrib.admin.models import LogEntry
register = template.Library()
class AdminLogNode(template.Node):
def __init__(self, limit, varname, user):
self.limit, self.varname, self.user = limit, varname, user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
if self.user is None:
context[self.varname] = LogEntry.objects.all().select_related('content_type', 'user')[:self.limit]
else:
user_id = self.user
if not user_id.isdigit():
user_id = context[self.user].pk
context[self.varname] = LogEntry.objects.filter(user__pk__exact=user_id).select_related('content_type', 'user')[:int(self.limit)]
return ''
@register.tag
def get_admin_log(parser, token):
"""
Populates a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``context_var_containing_user_obj`` can be a hard-coded integer
(user ID) or the name of a template context variable containing the user
object whose ID you want.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError(
"'get_admin_log' statements require two arguments")
if not tokens[1].isdigit():
raise template.TemplateSyntaxError(
"First argument to 'get_admin_log' must be an integer")
if tokens[2] != 'as':
raise template.TemplateSyntaxError(
"Second argument to 'get_admin_log' must be 'as'")
if len(tokens) > 4:
if tokens[4] != 'for_user':
raise template.TemplateSyntaxError(
"Fourth argument to 'get_admin_log' must be 'for_user'")
return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(tokens[5] if len(tokens) > 5 else None))
| bsd-3-clause |
henrytao-me/openerp.positionq | openerp/addons/pad_project/__openerp__.py | 119 | 1478 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Pad on tasks',
'version': '1.0',
'category': 'Project Management',
'description': """
This module adds a PAD in all project kanban views.
===================================================
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['project', 'pad'],
'data': ['project_task.xml'],
'demo': [],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
guewen/OpenUpgrade | openerp/service/server.py | 32 | 35650 | #-----------------------------------------------------------
# Threaded, Gevent and Prefork Servers
#-----------------------------------------------------------
import datetime
import errno
import logging
import os
import os.path
import platform
import psutil
import random
if os.name == 'posix':
import resource
else:
resource = None
import select
import signal
import socket
import subprocess
import sys
import threading
import time
import unittest2
import werkzeug.serving
try:
import fcntl
except ImportError:
pass
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = lambda x: None
import openerp
from openerp.modules.registry import RegistryManager
from openerp.release import nt_service_name
import openerp.tools.config as config
from openerp.tools.misc import stripped_sys_argv, dumpstacks
_logger = logging.getLogger(__name__)
SLEEP_INTERVAL = 60 # 1 min
#----------------------------------------------------------
# Werkzeug WSGI servers patched
#----------------------------------------------------------
class BaseWSGIServerNoBind(werkzeug.serving.BaseWSGIServer):
""" werkzeug Base WSGI Server patched to skip socket binding. PreforkServer
use this class, sets the socket and calls the process_request() manually
"""
def __init__(self, app):
werkzeug.serving.BaseWSGIServer.__init__(self, "1", "1", app)
def server_bind(self):
# we dont bind beause we use the listen socket of PreforkServer#socket
# instead we close the socket
if self.socket:
self.socket.close()
def server_activate(self):
# dont listen as we use PreforkServer#socket
pass
class RequestHandler(werkzeug.serving.WSGIRequestHandler):
def setup(self):
# flag the current thread as handling a http request
super(RequestHandler, self).setup()
me = threading.currentThread()
me.name = 'openerp.service.http.request.%s' % (me.ident,)
# _reexec() should set LISTEN_* to avoid connection refused during reload time. It
# should also work with systemd socket activation. This is currently untested
# and not yet used.
class ThreadedWSGIServerReloadable(werkzeug.serving.ThreadedWSGIServer):
""" werkzeug Threaded WSGI Server patched to allow reusing a listen socket
given by the environement, this is used by autoreload to keep the listen
socket open when a reload happens.
"""
def __init__(self, host, port, app):
super(ThreadedWSGIServerReloadable, self).__init__(host, port, app,
handler=RequestHandler)
def server_bind(self):
envfd = os.environ.get('LISTEN_FDS')
if envfd and os.environ.get('LISTEN_PID') == str(os.getpid()):
self.reload_socket = True
self.socket = socket.fromfd(int(envfd), socket.AF_INET, socket.SOCK_STREAM)
# should we os.close(int(envfd)) ? it seem python duplicate the fd.
else:
self.reload_socket = False
super(ThreadedWSGIServerReloadable, self).server_bind()
def server_activate(self):
if not self.reload_socket:
super(ThreadedWSGIServerReloadable, self).server_activate()
#----------------------------------------------------------
# AutoReload watcher
#----------------------------------------------------------
class AutoReload(object):
def __init__(self, server):
self.server = server
self.files = {}
self.modules = {}
import pyinotify
class EventHandler(pyinotify.ProcessEvent):
def __init__(self, autoreload):
self.autoreload = autoreload
def process_IN_CREATE(self, event):
_logger.debug('File created: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
def process_IN_MODIFY(self, event):
_logger.debug('File modified: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
self.wm = pyinotify.WatchManager()
self.handler = EventHandler(self)
self.notifier = pyinotify.Notifier(self.wm, self.handler, timeout=0)
mask = pyinotify.IN_MODIFY | pyinotify.IN_CREATE # IN_MOVED_FROM, IN_MOVED_TO ?
for path in openerp.modules.modules.ad_paths:
_logger.info('Watching addons folder %s', path)
self.wm.add_watch(path, mask, rec=True)
def process_data(self, files):
xml_files = [i for i in files if i.endswith('.xml')]
for i in xml_files:
for path in openerp.modules.modules.ad_paths:
if i.startswith(path):
# find out wich addons path the file belongs to
# and extract it's module name
right = i[len(path) + 1:].split('/')
if len(right) < 2:
continue
module = right[0]
self.modules[module] = 1
if self.modules:
_logger.info('autoreload: xml change detected, autoreload activated')
restart()
def process_python(self, files):
# process python changes
py_files = [i for i in files if i.endswith('.py')]
py_errors = []
# TODO keep python errors until they are ok
if py_files:
for i in py_files:
try:
source = open(i, 'rb').read() + '\n'
compile(source, i, 'exec')
except SyntaxError:
py_errors.append(i)
if py_errors:
_logger.info('autoreload: python code change detected, errors found')
for i in py_errors:
_logger.info('autoreload: SyntaxError %s', i)
else:
_logger.info('autoreload: python code updated, autoreload activated')
restart()
def check_thread(self):
# Check if some files have been touched in the addons path.
# If true, check if the touched file belongs to an installed module
# in any of the database used in the registry manager.
while 1:
while self.notifier.check_events(1000):
self.notifier.read_events()
self.notifier.process_events()
l = self.files.keys()
self.files.clear()
self.process_data(l)
self.process_python(l)
def run(self):
t = threading.Thread(target=self.check_thread)
t.setDaemon(True)
t.start()
_logger.info('AutoReload watcher running')
#----------------------------------------------------------
# Servers: Threaded, Gevented and Prefork
#----------------------------------------------------------
class CommonServer(object):
def __init__(self, app):
# TODO Change the xmlrpc_* options to http_*
self.app = app
# config
self.interface = config['xmlrpc_interface'] or '0.0.0.0'
self.port = config['xmlrpc_port']
# runtime
self.pid = os.getpid()
def close_socket(self, sock):
""" Closes a socket instance cleanly
:param sock: the network socket to close
:type sock: socket.socket
"""
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error, e:
# On OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
raise
sock.close()
class ThreadedServer(CommonServer):
def __init__(self, app):
super(ThreadedServer, self).__init__(app)
self.main_thread_id = threading.currentThread().ident
# Variable keeping track of the number of calls to the signal handler defined
# below. This variable is monitored by ``quit_on_signals()``.
self.quit_signals_received = 0
#self.socket = None
self.httpd = None
def signal_handler(self, sig, frame):
if sig in [signal.SIGINT, signal.SIGTERM]:
# shutdown on kill -INT or -TERM
self.quit_signals_received += 1
if self.quit_signals_received > 1:
# logging.shutdown was already called at this point.
sys.stderr.write("Forced shutdown.\n")
os._exit(0)
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
self.quit_signals_received += 1
def cron_thread(self, number):
while True:
time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style
registries = openerp.modules.registry.RegistryManager.registries
_logger.debug('cron%d polling for jobs', number)
for db_name, registry in registries.items():
while True and registry.ready:
acquired = openerp.addons.base.ir.ir_cron.ir_cron._acquire_job(db_name)
if not acquired:
break
def cron_spawn(self):
""" Start the above runner function in a daemon thread.
The thread is a typical daemon thread: it will never quit and must be
terminated when the main process exits - with no consequence (the processing
threads it spawns are not marked daemon).
"""
# Force call to strptime just before starting the cron thread
# to prevent time.strptime AttributeError within the thread.
# See: http://bugs.python.org/issue7980
datetime.datetime.strptime('2012-01-01', '%Y-%m-%d')
for i in range(openerp.tools.config['max_cron_threads']):
def target():
self.cron_thread(i)
t = threading.Thread(target=target, name="openerp.service.cron.cron%d" % i)
t.setDaemon(True)
t.start()
_logger.debug("cron%d started!" % i)
def http_thread(self):
def app(e, s):
return self.app(e, s)
self.httpd = ThreadedWSGIServerReloadable(self.interface, self.port, app)
self.httpd.serve_forever()
def http_spawn(self):
t = threading.Thread(target=self.http_thread, name="openerp.service.httpd")
t.setDaemon(True)
t.start()
_logger.info('HTTP service (werkzeug) running on %s:%s', self.interface, self.port)
def start(self, stop=False):
_logger.debug("Setting signal handlers")
if os.name == 'posix':
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
elif os.name == 'nt':
import win32api
win32api.SetConsoleCtrlHandler(lambda sig: self.signal_handler(sig, None), 1)
test_mode = config['test_enable'] or config['test_file']
if not stop or test_mode:
# some tests need the http deamon to be available...
self.http_spawn()
if not stop:
# only relevant if we are not in "--stop-after-init" mode
self.cron_spawn()
def stop(self):
""" Shutdown the WSGI server. Wait for non deamon threads.
"""
_logger.info("Initiating shutdown")
_logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
if self.httpd:
self.httpd.shutdown()
self.close_socket(self.httpd.socket)
# Manually join() all threads before calling sys.exit() to allow a second signal
# to trigger _force_quit() in case some non-daemon threads won't exit cleanly.
# threading.Thread.join() should not mask signals (at least in python 2.5).
me = threading.currentThread()
_logger.debug('current thread: %r', me)
for thread in threading.enumerate():
_logger.debug('process %r (%r)', thread, thread.isDaemon())
if thread != me and not thread.isDaemon() and thread.ident != self.main_thread_id:
while thread.isAlive():
_logger.debug('join and sleep')
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
_logger.debug('--')
openerp.modules.registry.RegistryManager.delete_all()
logging.shutdown()
def run(self, preload=None, stop=False):
""" Start the http server and the cron thread then wait for a signal.
The first SIGINT or SIGTERM signal will initiate a graceful shutdown while
a second one if any will force an immediate exit.
"""
self.start(stop=stop)
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Wait for a first signal to be handled. (time.sleep will be interrupted
# by the signal handler.) The try/except is for the win32 case.
try:
while self.quit_signals_received == 0:
time.sleep(60)
except KeyboardInterrupt:
pass
self.stop()
def reload(self):
os.kill(self.pid, signal.SIGHUP)
class GeventServer(CommonServer):
def __init__(self, app):
super(GeventServer, self).__init__(app)
self.port = config['longpolling_port']
self.httpd = None
def watch_parent(self, beat=4):
import gevent
ppid = os.getppid()
while True:
if ppid != os.getppid():
pid = os.getpid()
_logger.info("LongPolling (%s) Parent changed", pid)
# suicide !!
os.kill(pid, signal.SIGTERM)
return
gevent.sleep(beat)
def start(self):
import gevent
from gevent.wsgi import WSGIServer
if os.name == 'posix':
signal.signal(signal.SIGQUIT, dumpstacks)
gevent.spawn(self.watch_parent)
self.httpd = WSGIServer((self.interface, self.port), self.app)
_logger.info('Evented Service (longpolling) running on %s:%s', self.interface, self.port)
self.httpd.serve_forever()
def stop(self):
import gevent
self.httpd.stop()
gevent.shutdown()
def run(self, preload, stop):
self.start()
self.stop()
class PreforkServer(CommonServer):
""" Multiprocessing inspired by (g)unicorn.
PreforkServer (aka Multicorn) currently uses accept(2) as dispatching
method between workers but we plan to replace it by a more intelligent
dispatcher to will parse the first HTTP request line.
"""
def __init__(self, app):
# config
self.address = (config['xmlrpc_interface'] or '0.0.0.0', config['xmlrpc_port'])
self.population = config['workers']
self.timeout = config['limit_time_real']
self.limit_request = config['limit_request']
# working vars
self.beat = 4
self.app = app
self.pid = os.getpid()
self.socket = None
self.workers_http = {}
self.workers_cron = {}
self.workers = {}
self.generation = 0
self.queue = []
self.long_polling_pid = None
def pipe_new(self):
pipe = os.pipe()
for fd in pipe:
# non_blocking
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
# close_on_exec
flags = fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
return pipe
def pipe_ping(self, pipe):
try:
os.write(pipe[1], '.')
except IOError, e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def signal_handler(self, sig, frame):
if len(self.queue) < 5 or sig == signal.SIGCHLD:
self.queue.append(sig)
self.pipe_ping(self.pipe)
else:
_logger.warn("Dropping signal: %s", sig)
def worker_spawn(self, klass, workers_registry):
self.generation += 1
worker = klass(self)
pid = os.fork()
if pid != 0:
worker.pid = pid
self.workers[pid] = worker
workers_registry[pid] = worker
return worker
else:
worker.run()
sys.exit(0)
def long_polling_spawn(self):
nargs = stripped_sys_argv()
cmd = nargs[0]
cmd = os.path.join(os.path.dirname(cmd), "openerp-gevent")
nargs[0] = cmd
popen = subprocess.Popen(nargs)
self.long_polling_pid = popen.pid
def worker_pop(self, pid):
if pid in self.workers:
_logger.debug("Worker (%s) unregistered", pid)
try:
self.workers_http.pop(pid, None)
self.workers_cron.pop(pid, None)
u = self.workers.pop(pid)
u.close()
except OSError:
return
def worker_kill(self, pid, sig):
try:
os.kill(pid, sig)
except OSError, e:
if e.errno == errno.ESRCH:
self.worker_pop(pid)
def process_signals(self):
while len(self.queue):
sig = self.queue.pop(0)
if sig in [signal.SIGINT, signal.SIGTERM]:
raise KeyboardInterrupt
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
raise KeyboardInterrupt
elif sig == signal.SIGQUIT:
# dump stacks on kill -3
self.dumpstacks()
elif sig == signal.SIGTTIN:
# increase number of workers
self.population += 1
elif sig == signal.SIGTTOU:
# decrease number of workers
self.population -= 1
def process_zombie(self):
# reap dead workers
while 1:
try:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if (status >> 8) == 3:
msg = "Critial worker error (%s)"
_logger.critical(msg, wpid)
raise Exception(msg % wpid)
self.worker_pop(wpid)
except OSError, e:
if e.errno == errno.ECHILD:
break
raise
def process_timeout(self):
now = time.time()
for (pid, worker) in self.workers.items():
if worker.watchdog_timeout is not None and \
(now - worker.watchdog_time) >= worker.watchdog_timeout:
_logger.error("Worker (%s) timeout", pid)
self.worker_kill(pid, signal.SIGKILL)
def process_spawn(self):
while len(self.workers_http) < self.population:
self.worker_spawn(WorkerHTTP, self.workers_http)
while len(self.workers_cron) < config['max_cron_threads']:
self.worker_spawn(WorkerCron, self.workers_cron)
if not self.long_polling_pid:
self.long_polling_spawn()
def sleep(self):
try:
# map of fd -> worker
fds = dict([(w.watchdog_pipe[0], w) for k, w in self.workers.items()])
fd_in = fds.keys() + [self.pipe[0]]
# check for ping or internal wakeups
ready = select.select(fd_in, [], [], self.beat)
# update worker watchdogs
for fd in ready[0]:
if fd in fds:
fds[fd].watchdog_time = time.time()
try:
# empty pipe
while os.read(fd, 1):
pass
except OSError, e:
if e.errno not in [errno.EAGAIN]:
raise
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def start(self):
# wakeup pipe, python doesnt throw EINTR when a syscall is interrupted
# by a signal simulating a pseudo SA_RESTART. We write to a pipe in the
# signal handler to overcome this behaviour
self.pipe = self.pipe_new()
# set signal handlers
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGTTIN, self.signal_handler)
signal.signal(signal.SIGTTOU, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
# listen to socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(0)
self.socket.bind(self.address)
self.socket.listen(8 * self.population)
def stop(self, graceful=True):
if self.long_polling_pid is not None:
# FIXME make longpolling process handle SIGTERM correctly
self.worker_kill(self.long_polling_pid, signal.SIGKILL)
self.long_polling_pid = None
if graceful:
_logger.info("Stopping gracefully")
limit = time.time() + self.timeout
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
while self.workers and time.time() < limit:
self.process_zombie()
time.sleep(0.1)
else:
_logger.info("Stopping forcefully")
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
self.socket.close()
def run(self, preload, stop):
self.start()
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Empty the cursor pool, we dont want them to be shared among forked workers.
openerp.sql_db.close_all()
_logger.debug("Multiprocess starting")
while 1:
try:
#_logger.debug("Multiprocess beat (%s)",time.time())
self.process_signals()
self.process_zombie()
self.process_timeout()
self.process_spawn()
self.sleep()
except KeyboardInterrupt:
_logger.debug("Multiprocess clean stop")
self.stop()
break
except Exception, e:
_logger.exception(e)
self.stop(False)
return -1
class Worker(object):
""" Workers """
def __init__(self, multi):
self.multi = multi
self.watchdog_time = time.time()
self.watchdog_pipe = multi.pipe_new()
# Can be set to None if no watchdog is desired.
self.watchdog_timeout = multi.timeout
self.ppid = os.getpid()
self.pid = None
self.alive = True
# should we rename into lifetime ?
self.request_max = multi.limit_request
self.request_count = 0
def setproctitle(self, title=""):
setproctitle('openerp: %s %s %s' % (self.__class__.__name__, self.pid, title))
def close(self):
os.close(self.watchdog_pipe[0])
os.close(self.watchdog_pipe[1])
def signal_handler(self, sig, frame):
self.alive = False
def sleep(self):
try:
select.select([self.multi.socket], [], [], self.multi.beat)
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def process_limit(self):
if resource is None:
return
# If our parent changed sucide
if self.ppid != os.getppid():
_logger.info("Worker (%s) Parent changed", self.pid)
self.alive = False
# check for lifetime
if self.request_count >= self.request_max:
_logger.info("Worker (%d) max request (%s) reached.", self.pid, self.request_count)
self.alive = False
# Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
rss, vms = psutil.Process(os.getpid()).get_memory_info()
if vms > config['limit_memory_soft']:
_logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, vms)
self.alive = False # Commit suicide after the request.
# VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (config['limit_memory_hard'], hard))
# SIGXCPU (exceeded CPU time) signal handler will raise an exception.
r = resource.getrusage(resource.RUSAGE_SELF)
cpu_time = r.ru_utime + r.ru_stime
def time_expired(n, stack):
_logger.info('Worker (%d) CPU time limit (%s) reached.', config['limit_time_cpu'])
# We dont suicide in such case
raise Exception('CPU time limit exceeded.')
signal.signal(signal.SIGXCPU, time_expired)
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['limit_time_cpu'], hard))
def process_work(self):
pass
def start(self):
self.pid = os.getpid()
self.setproctitle()
_logger.info("Worker %s (%s) alive", self.__class__.__name__, self.pid)
# Reseed the random number generator
random.seed()
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(self.multi.socket, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(self.multi.socket, fcntl.F_SETFD, flags)
# reset blocking status
self.multi.socket.setblocking(0)
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def stop(self):
pass
def run(self):
try:
self.start()
while self.alive:
self.process_limit()
self.multi.pipe_ping(self.watchdog_pipe)
self.sleep()
self.process_work()
_logger.info("Worker (%s) exiting. request_count: %s.", self.pid, self.request_count)
self.stop()
except Exception:
_logger.exception("Worker (%s) Exception occured, exiting..." % self.pid)
# should we use 3 to abort everything ?
sys.exit(1)
class WorkerHTTP(Worker):
""" HTTP Request workers """
def process_request(self, client, addr):
client.setblocking(1)
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(client, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(client, fcntl.F_SETFD, flags)
# do request using BaseWSGIServerNoBind monkey patched with socket
self.server.socket = client
# tolerate broken pipe when the http client closes the socket before
# receiving the full reply
try:
self.server.process_request(client, addr)
except IOError, e:
if e.errno != errno.EPIPE:
raise
self.request_count += 1
def process_work(self):
try:
client, addr = self.multi.socket.accept()
self.process_request(client, addr)
except socket.error, e:
if e[0] not in (errno.EAGAIN, errno.ECONNABORTED):
raise
def start(self):
Worker.start(self)
self.server = BaseWSGIServerNoBind(self.multi.app)
class WorkerCron(Worker):
""" Cron workers """
def __init__(self, multi):
super(WorkerCron, self).__init__(multi)
# process_work() below process a single database per call.
# The variable db_index is keeping track of the next database to
# process.
self.db_index = 0
def sleep(self):
# Really sleep once all the databases have been processed.
if self.db_index == 0:
interval = SLEEP_INTERVAL + self.pid % 10 # chorus effect
time.sleep(interval)
def _db_list(self):
if config['db_name']:
db_names = config['db_name'].split(',')
else:
db_names = openerp.service.db.exp_list(True)
return db_names
def process_work(self):
rpc_request = logging.getLogger('openerp.netsvc.rpc.request')
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
_logger.debug("WorkerCron (%s) polling for jobs", self.pid)
db_names = self._db_list()
if len(db_names):
self.db_index = (self.db_index + 1) % len(db_names)
db_name = db_names[self.db_index]
self.setproctitle(db_name)
if rpc_request_flag:
start_time = time.time()
start_rss, start_vms = psutil.Process(os.getpid()).get_memory_info()
import openerp.addons.base as base
base.ir.ir_cron.ir_cron._acquire_job(db_name)
openerp.modules.registry.RegistryManager.delete(db_name)
# dont keep cursors in multi database mode
if len(db_names) > 1:
openerp.sql_db.close_db(db_name)
if rpc_request_flag:
run_time = time.time() - start_time
end_rss, end_vms = psutil.Process(os.getpid()).get_memory_info()
vms_diff = (end_vms - start_vms) / 1024
logline = '%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % \
(db_name, run_time, start_vms / 1024, end_vms / 1024, vms_diff)
_logger.debug("WorkerCron (%s) %s", self.pid, logline)
self.request_count += 1
if self.request_count >= self.request_max and self.request_max < len(db_names):
_logger.error("There are more dabatases to process than allowed "
"by the `limit_request` configuration variable: %s more.",
len(db_names) - self.request_max)
else:
self.db_index = 0
def start(self):
os.nice(10) # mommy always told me to be nice with others...
Worker.start(self)
self.multi.socket.close()
#----------------------------------------------------------
# start/stop public api
#----------------------------------------------------------
server = None
def load_server_wide_modules():
for m in openerp.conf.server_wide_modules:
try:
openerp.modules.module.load_openerp_module(m)
except Exception:
msg = ''
if m == 'web':
msg = """
The `web` module is provided by the addons found in the `openerp-web` project.
Maybe you forgot to add those addons in your addons_path configuration."""
_logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
def _reexec(updated_modules=None):
"""reexecute openerp-server process with (nearly) the same arguments"""
if openerp.tools.osutil.is_running_as_nt_service():
subprocess.call('net stop {0} && net start {0}'.format(nt_service_name), shell=True)
exe = os.path.basename(sys.executable)
args = stripped_sys_argv()
args += ["-u", ','.join(updated_modules)]
if not args or args[0] != exe:
args.insert(0, exe)
os.execv(sys.executable, args)
def load_test_file_yml(registry, test_file):
with registry.cursor() as cr:
openerp.tools.convert_yaml_import(cr, 'base', file(test_file), 'test', {}, 'init')
if config['test_commit']:
_logger.info('test %s has been commited', test_file)
cr.commit()
else:
_logger.info('test %s has been rollbacked', test_file)
cr.rollback()
def load_test_file_py(registry, test_file):
# Locate python module based on its filename and run the tests
test_path, _ = os.path.splitext(os.path.abspath(test_file))
for mod_name, mod_mod in sys.modules.items():
if mod_mod:
mod_path, _ = os.path.splitext(getattr(mod_mod, '__file__', ''))
if test_path == mod_path:
suite = unittest2.TestSuite()
for t in unittest2.TestLoader().loadTestsFromModule(mod_mod):
suite.addTest(t)
_logger.log(logging.INFO, 'running tests %s.', mod_mod.__name__)
stream = openerp.modules.module.TestStream()
result = unittest2.TextTestRunner(verbosity=2, stream=stream).run(suite)
success = result.wasSuccessful()
if hasattr(registry._assertion_report,'report_result'):
registry._assertion_report.report_result(success)
if not success:
_logger.error('%s: at least one error occurred in a test', test_file)
def preload_registries(dbnames):
""" Preload a registries, possibly run a test file."""
# TODO: move all config checks to args dont check tools.config here
config = openerp.tools.config
test_file = config['test_file']
dbnames = dbnames or []
rc = 0
for dbname in dbnames:
try:
update_module = config['init'] or config['update']
registry = RegistryManager.new(dbname, update_module=update_module)
# run test_file if provided
if test_file:
_logger.info('loading test file %s', test_file)
if test_file.endswith('yml'):
load_test_file_yml(registry, test_file)
elif test_file.endswith('py'):
load_test_file_py(registry, test_file)
if registry._assertion_report.failures:
rc += 1
except Exception:
_logger.critical('Failed to initialize database `%s`.', dbname, exc_info=True)
return -1
return rc
def start(preload=None, stop=False):
""" Start the openerp http server and cron processor.
"""
global server
load_server_wide_modules()
if openerp.evented:
server = GeventServer(openerp.service.wsgi_server.application)
elif config['workers']:
server = PreforkServer(openerp.service.wsgi_server.application)
else:
server = ThreadedServer(openerp.service.wsgi_server.application)
if config['auto_reload']:
autoreload = AutoReload(server)
autoreload.run()
rc = server.run(preload, stop)
# like the legend of the phoenix, all ends with beginnings
if getattr(openerp, 'phoenix', False):
modules = []
if config['auto_reload']:
modules = autoreload.modules.keys()
_reexec(modules)
return rc if rc else 0
def restart():
""" Restart the server
"""
if os.name == 'nt':
# run in a thread to let the current thread return response to the caller.
threading.Thread(target=_reexec).start()
else:
os.kill(server.pid, signal.SIGHUP)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MoamerEncsConcordiaCa/tensorflow | tensorflow/python/ops/spectral_ops.py | 38 | 4414 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Spectral operators (e.g. FFT, RFFT).
@@fft
@@ifft
@@fft2d
@@ifft2d
@@fft3d
@@ifft3d
@@rfft
@@irfft
@@rfft2d
@@irfft2d
@@rfft3d
@@irfft3d
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.util.all_util import remove_undocumented
def _infer_fft_length_for_rfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` RFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
return _array_ops.shape(input_tensor)[-fft_rank:]
# Otherwise, return a constant.
return _ops.convert_to_tensor(fft_shape.as_list(), _dtypes.int32)
def _infer_fft_length_for_irfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` IRFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
fft_length = _array_ops.unstack(_array_ops.shape(input_tensor)[-fft_rank:])
fft_length[-1] = _math_ops.maximum(0, 2 * (fft_length[-1] - 1))
return _array_ops.stack(fft_length)
# Otherwise, return a constant.
fft_length = fft_shape.as_list()
if fft_length:
fft_length[-1] = max(0, 2 * (fft_length[-1] - 1))
return _ops.convert_to_tensor(fft_length, _dtypes.int32)
def _rfft_wrapper(fft_fn, fft_rank, default_name):
"""Wrapper around gen_spectral_ops.rfft* that infers fft_length argument."""
def _rfft(input_tensor, fft_length=None, name=None):
with _ops.name_scope(name, default_name,
[input_tensor, fft_length]) as name:
input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.float32)
if fft_length is None:
fft_length = _infer_fft_length_for_rfft(input_tensor, fft_rank)
else:
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
return fft_fn(input_tensor, fft_length, name)
_rfft.__doc__ = fft_fn.__doc__
return _rfft
def _irfft_wrapper(ifft_fn, fft_rank, default_name):
"""Wrapper around gen_spectral_ops.irfft* that infers fft_length argument."""
def _irfft(input_tensor, fft_length=None, name=None):
with _ops.name_scope(name, default_name,
[input_tensor, fft_length]) as name:
input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.complex64)
if fft_length is None:
fft_length = _infer_fft_length_for_irfft(input_tensor, fft_rank)
else:
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
return ifft_fn(input_tensor, fft_length, name)
_irfft.__doc__ = ifft_fn.__doc__
return _irfft
fft = gen_spectral_ops.fft
ifft = gen_spectral_ops.ifft
fft2d = gen_spectral_ops.fft2d
ifft2d = gen_spectral_ops.ifft2d
fft3d = gen_spectral_ops.fft3d
ifft3d = gen_spectral_ops.ifft3d
rfft = _rfft_wrapper(gen_spectral_ops.rfft, 1, "rfft")
irfft = _irfft_wrapper(gen_spectral_ops.irfft, 1, "irfft")
rfft2d = _rfft_wrapper(gen_spectral_ops.rfft2d, 2, "rfft2d")
irfft2d = _irfft_wrapper(gen_spectral_ops.irfft2d, 2, "irfft2d")
rfft3d = _rfft_wrapper(gen_spectral_ops.rfft3d, 3, "rfft3d")
irfft3d = _irfft_wrapper(gen_spectral_ops.irfft3d, 3, "irfft3d")
remove_undocumented(__name__)
| apache-2.0 |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/scipy/stats/_binned_statistic.py | 10 | 25912 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import callable, xrange
from scipy._lib._numpy_compat import suppress_warnings
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]),
array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]),
array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])
>>> ret.statistic
array([[ 2., 1.],
[ 1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std','min','max']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if(statistic != 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
smin = np.zeros(Ndim)
smax = np.zeros(Ndim)
for i in xrange(Ndim):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in xrange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in xrange(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into, in each dimension
sampBin = [
np.digitize(sample[:, i], edges[i])
for i in xrange(Ndim)
]
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in xrange(Ndim):
# Find the rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
binnumbers = np.ravel_multi_index(sampBin, nbin)
result = np.empty([Vdim, nbin.prod()], float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
flatsum2 = np.bincount(binnumbers, values[vv] ** 2)
result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -
(flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic == 'sum':
result.fill(0)
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.median(values[vv, binnumbers == i])
elif statistic == 'min':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.min(values[vv, binnumbers == i])
elif statistic == 'max':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.max(values[vv, binnumbers == i])
elif callable(statistic):
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
sup.filter(RuntimeWarning)
try:
null = statistic([])
except:
null = np.nan
result.fill(null)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = statistic(values[vv, binnumbers == i])
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, nbin))
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = [slice(None)] + Ndim * [slice(1, -1)]
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if(expand_binnumbers and Ndim > 1):
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`reulst`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers)
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/idlelib/IdleHistory.py | 122 | 4052 | "Implement Idle Shell history mechanism with History class"
from idlelib.configHandler import idleConf
class History:
''' Implement Idle Shell history mechanism.
store - Store source statement (called from PyShell.resetoutput).
fetch - Fetch stored statement matching prefix already entered.
history_next - Bound to <<history-next>> event (default Alt-N).
history_prev - Bound to <<history-prev>> event (default Alt-P).
'''
def __init__(self, text):
'''Initialize data attributes and bind event methods.
.text - Idle wrapper of tk Text widget, with .bell().
.history - source statements, possibly with multiple lines.
.prefix - source already entered at prompt; filters history list.
.pointer - index into history.
.cyclic - wrap around history list (or not).
'''
self.text = text
self.history = []
self.prefix = None
self.pointer = None
self.cyclic = idleConf.GetOption("main", "History", "cyclic", 1, "bool")
text.bind("<<history-previous>>", self.history_prev)
text.bind("<<history-next>>", self.history_next)
def history_next(self, event):
"Fetch later statement; start with ealiest if cyclic."
self.fetch(reverse=False)
return "break"
def history_prev(self, event):
"Fetch earlier statement; start with most recent."
self.fetch(reverse=True)
return "break"
def fetch(self, reverse):
'''Fetch statememt and replace current line in text widget.
Set prefix and pointer as needed for successive fetches.
Reset them to None, None when returning to the start line.
Sound bell when return to start line or cannot leave a line
because cyclic is False.
'''
nhist = len(self.history)
pointer = self.pointer
prefix = self.prefix
if pointer is not None and prefix is not None:
if self.text.compare("insert", "!=", "end-1c") or \
self.text.get("iomark", "end-1c") != self.history[pointer]:
pointer = prefix = None
self.text.mark_set("insert", "end-1c") # != after cursor move
if pointer is None or prefix is None:
prefix = self.text.get("iomark", "end-1c")
if reverse:
pointer = nhist # will be decremented
else:
if self.cyclic:
pointer = -1 # will be incremented
else: # abort history_next
self.text.bell()
return
nprefix = len(prefix)
while 1:
pointer += -1 if reverse else 1
if pointer < 0 or pointer >= nhist:
self.text.bell()
if not self.cyclic and pointer < 0: # abort history_prev
return
else:
if self.text.get("iomark", "end-1c") != prefix:
self.text.delete("iomark", "end-1c")
self.text.insert("iomark", prefix)
pointer = prefix = None
break
item = self.history[pointer]
if item[:nprefix] == prefix and len(item) > nprefix:
self.text.delete("iomark", "end-1c")
self.text.insert("iomark", item)
break
self.text.see("insert")
self.text.tag_remove("sel", "1.0", "end")
self.pointer = pointer
self.prefix = prefix
def store(self, source):
"Store Shell input statement into history list."
source = source.strip()
if len(source) > 2:
# avoid duplicates
try:
self.history.remove(source)
except ValueError:
pass
self.history.append(source)
self.pointer = None
self.prefix = None
if __name__ == "__main__":
from unittest import main
main('idlelib.idle_test.test_idlehistory', verbosity=2, exit=False)
| gpl-3.0 |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/flask/blueprints.py | 169 | 16872 | # -*- coding: utf-8 -*-
"""
flask.blueprints
~~~~~~~~~~~~~~~~
Blueprints are the recommended way to implement larger or more
pluggable applications in Flask 0.7 and later.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from .helpers import _PackageBoundObject, _endpoint_from_view_func
class BlueprintSetupState(object):
"""Temporary holder object for registering a blueprint with the
application. An instance of this class is created by the
:meth:`~flask.Blueprint.make_setup_state` method and later passed
to all register callback functions.
"""
def __init__(self, blueprint, app, options, first_registration):
#: a reference to the current application
self.app = app
#: a reference to the blueprint that created this setup state.
self.blueprint = blueprint
#: a dictionary with all options that were passed to the
#: :meth:`~flask.Flask.register_blueprint` method.
self.options = options
#: as blueprints can be registered multiple times with the
#: application and not everything wants to be registered
#: multiple times on it, this attribute can be used to figure
#: out if the blueprint was registered in the past already.
self.first_registration = first_registration
subdomain = self.options.get('subdomain')
if subdomain is None:
subdomain = self.blueprint.subdomain
#: The subdomain that the blueprint should be active for, ``None``
#: otherwise.
self.subdomain = subdomain
url_prefix = self.options.get('url_prefix')
if url_prefix is None:
url_prefix = self.blueprint.url_prefix
#: The prefix that should be used for all URLs defined on the
#: blueprint.
self.url_prefix = url_prefix
#: A dictionary with URL defaults that is added to each and every
#: URL that was defined with the blueprint.
self.url_defaults = dict(self.blueprint.url_values_defaults)
self.url_defaults.update(self.options.get('url_defaults', ()))
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
"""
if self.url_prefix:
rule = self.url_prefix + rule
options.setdefault('subdomain', self.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = self.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
class Blueprint(_PackageBoundObject):
"""Represents a blueprint. A blueprint is an object that records
functions that will be called with the
:class:`~flask.blueprints.BlueprintSetupState` later to register functions
or other things on the main application. See :ref:`blueprints` for more
information.
.. versionadded:: 0.7
"""
warn_on_modifications = False
_got_registered_once = False
def __init__(self, name, import_name, static_folder=None,
static_url_path=None, template_folder=None,
url_prefix=None, subdomain=None, url_defaults=None,
root_path=None):
_PackageBoundObject.__init__(self, import_name, template_folder,
root_path=root_path)
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self.static_folder = static_folder
self.static_url_path = static_url_path
self.deferred_functions = []
if url_defaults is None:
url_defaults = {}
self.url_values_defaults = url_defaults
def record(self, func):
"""Registers a function that is called when the blueprint is
registered on the application. This function is called with the
state as argument as returned by the :meth:`make_setup_state`
method.
"""
if self._got_registered_once and self.warn_on_modifications:
from warnings import warn
warn(Warning('The blueprint was already registered once '
'but is getting modified now. These changes '
'will not show up.'))
self.deferred_functions.append(func)
def record_once(self, func):
"""Works like :meth:`record` but wraps the function in another
function that will ensure the function is only called once. If the
blueprint is registered a second time on the application, the
function passed is not called.
"""
def wrapper(state):
if state.first_registration:
func(state)
return self.record(update_wrapper(wrapper, func))
def make_setup_state(self, app, options, first_registration=False):
"""Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
object that is later passed to the register callback functions.
Subclasses can override this to return a subclass of the setup state.
"""
return BlueprintSetupState(self, app, options, first_registration)
def register(self, app, options, first_registration=False):
"""Called by :meth:`Flask.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
:func:`~flask.Flask.register_blueprint` are directly forwarded to this
method in the `options` dictionary.
"""
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder:
state.add_url_rule(self.static_url_path + '/<path:filename>',
view_func=self.send_static_file,
endpoint='static')
for deferred in self.deferred_functions:
deferred(state)
def route(self, rule, **options):
"""Like :meth:`Flask.route` but for a blueprint. The endpoint for the
:func:`url_for` function is prefixed with the name of the blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
the :func:`url_for` function is prefixed with the name of the blueprint.
"""
if endpoint:
assert '.' not in endpoint, "Blueprint endpoints should not contain dots"
self.record(lambda s:
s.add_url_rule(rule, endpoint, view_func, **options))
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator
def app_template_filter(self, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.template_filter` but for a blueprint.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_filter(f, name=name)
return f
return decorator
def add_app_template_filter(self, f, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.add_template_filter` but for a blueprint. Works exactly
like the :meth:`app_template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.filters[name or f.__name__] = f
self.record_once(register_template)
def app_template_test(self, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.template_test` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_test(f, name=name)
return f
return decorator
def add_app_template_test(self, f, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.add_template_test` but for a blueprint. Works exactly
like the :meth:`app_template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.tests[name or f.__name__] = f
self.record_once(register_template)
def app_template_global(self, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.template_global` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_global(f, name=name)
return f
return decorator
def add_app_template_global(self, f, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.add_template_global` but for a blueprint. Works exactly
like the :meth:`app_template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.globals[name or f.__name__] = f
self.record_once(register_template)
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like :meth:`Flask.before_request`. Such a function is executed
before each request, even if outside of a blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def before_app_first_request(self, f):
"""Like :meth:`Flask.before_first_request`. Such a function is
executed before the first request to the application.
"""
self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
return f
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def teardown_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. This
function is only executed when tearing down requests handled by a
function of that blueprint. Teardown request functions are executed
when the request context is popped, even when no actual request was
performed.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(self.name, []).append(f))
return f
def teardown_app_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. Such a
function is executed when tearing down each request, even if outside of
the blueprint.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(None, []).append(f))
return f
def context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. This
function is only executed for requests handled by a blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(self.name, []).append(f))
return f
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record_once(lambda s: s.app.errorhandler(code)(f))
return f
return decorator
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for this
blueprint. It's called before the view functions are called and
can modify the url values provided.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(self.name, []).append(f))
return f
def url_defaults(self, f):
"""Callback function for URL defaults for this blueprint. It's called
with the endpoint and values and should update the values passed
in place.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(self.name, []).append(f))
return f
def app_url_value_preprocessor(self, f):
"""Same as :meth:`url_value_preprocessor` but application wide.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(None, []).append(f))
return f
def app_url_defaults(self, f):
"""Same as :meth:`url_defaults` but application wide.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(None, []).append(f))
return f
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
"""
def decorator(f):
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator
def register_error_handler(self, code_or_exception, f):
"""Non-decorator version of the :meth:`errorhandler` error attach
function, akin to the :meth:`~flask.Flask.register_error_handler`
application-wide function of the :class:`~flask.Flask` object but
for error handlers limited to this blueprint.
.. versionadded:: 0.11
"""
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
| apache-2.0 |
cloudbase/nova-virtualbox | nova/virt/ironic/patcher.py | 7 | 7408 | # coding=utf-8
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Helper classes for Ironic HTTP PATCH creation.
"""
from oslo_config import cfg
from oslo_serialization import jsonutils
import six
CONF = cfg.CONF
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
def create(node):
"""Create an instance of the appropriate DriverFields class.
:param node: a node object returned from ironicclient
:returns: GenericDriverFields or a subclass thereof, as appropriate
for the supplied node.
"""
if 'pxe' in node.driver:
return PXEDriverFields(node)
else:
return GenericDriverFields(node)
class GenericDriverFields(object):
def __init__(self, node):
self.node = node
def get_deploy_patch(self, instance, image_meta, flavor,
preserve_ephemeral=None):
"""Build a patch to add the required fields to deploy a node.
:param instance: the instance object.
:param image_meta: the metadata associated with the instance
image.
:param flavor: the flavor object.
:param preserve_ephemeral: preserve_ephemeral status (bool) to be
specified during rebuild.
:returns: a json-patch with the fields that needs to be updated.
"""
patch = []
patch.append({'path': '/instance_info/image_source', 'op': 'add',
'value': image_meta['id']})
patch.append({'path': '/instance_info/root_gb', 'op': 'add',
'value': str(instance.root_gb)})
patch.append({'path': '/instance_info/swap_mb', 'op': 'add',
'value': str(flavor['swap'])})
if instance.ephemeral_gb:
patch.append({'path': '/instance_info/ephemeral_gb',
'op': 'add',
'value': str(instance.ephemeral_gb)})
if CONF.default_ephemeral_format:
patch.append({'path': '/instance_info/ephemeral_format',
'op': 'add',
'value': CONF.default_ephemeral_format})
if preserve_ephemeral is not None:
patch.append({'path': '/instance_info/preserve_ephemeral',
'op': 'add', 'value': str(preserve_ephemeral)})
capabilities = {}
# read the flavor and get the extra_specs value.
extra_specs = flavor.get('extra_specs')
# scan through the extra_specs values and ignore the keys
# not starting with keyword 'capabilities'.
for key, val in six.iteritems(extra_specs):
if not key.startswith('capabilities:'):
continue
# split the extra_spec key to remove the keyword
# 'capabilities' and get the actual key.
capabilities_string, capabilities_key = key.split(':', 1)
if capabilities_key:
capabilities[capabilities_key] = val
if capabilities:
patch.append({'path': '/instance_info/capabilities',
'op': 'add', 'value': jsonutils.dumps(capabilities)})
return patch
def get_cleanup_patch(self, instance, network_info, flavor):
"""Build a patch to clean up the fields.
:param instance: the instance object.
:param network_info: the instance network information.
:param flavor: the flavor object.
:returns: a json-patch with the fields that needs to be updated.
"""
return []
class PXEDriverFields(GenericDriverFields):
def _get_kernel_ramdisk_dict(self, flavor):
"""Get the deploy ramdisk and kernel IDs from the flavor.
:param flavor: the flavor object.
:returns: a dict with the pxe options for the deploy ramdisk and
kernel if the IDs were found in the flavor, otherwise an empty
dict is returned.
"""
extra_specs = flavor['extra_specs']
deploy_kernel = extra_specs.get('baremetal:deploy_kernel_id')
deploy_ramdisk = extra_specs.get('baremetal:deploy_ramdisk_id')
deploy_ids = {}
if deploy_kernel and deploy_ramdisk:
deploy_ids['pxe_deploy_kernel'] = deploy_kernel
deploy_ids['pxe_deploy_ramdisk'] = deploy_ramdisk
return deploy_ids
def get_deploy_patch(self, instance, image_meta, flavor,
preserve_ephemeral=None):
"""Build a patch to add the required fields to deploy a node.
Build a json-patch to add the required fields to deploy a node
using the PXE driver.
:param instance: the instance object.
:param image_meta: the metadata associated with the instance
image.
:param flavor: the flavor object.
:param preserve_ephemeral: preserve_ephemeral status (bool) to be
specified during rebuild.
:returns: a json-patch with the fields that needs to be updated.
"""
patch = super(PXEDriverFields, self).get_deploy_patch(
instance, image_meta, flavor, preserve_ephemeral)
# TODO(lucasagomes): Remove it in Kilo. This is for backwards
# compatibility with Icehouse. If flavor contains both ramdisk
# and kernel ids, use them.
for key, value in self._get_kernel_ramdisk_dict(flavor).items():
patch.append({'path': '/driver_info/%s' % key,
'op': 'add', 'value': value})
return patch
def get_cleanup_patch(self, instance, network_info, flavor):
"""Build a patch to clean up the fields.
Build a json-patch to remove the fields used to deploy a node
using the PXE driver. Note that the fields added to the Node's
instance_info don't need to be removed because they are purged
during the Node's tear down.
:param instance: the instance object.
:param network_info: the instance network information.
:param flavor: the flavor object.
:returns: a json-patch with the fields that needs to be updated.
"""
patch = super(PXEDriverFields, self).get_cleanup_patch(
instance, network_info, flavor)
# TODO(lucasagomes): Remove it in Kilo. This is for backwards
# compatibility with Icehouse. If flavor contains a ramdisk and
# kernel id remove it from nodes as part of the tear down process
for key in self._get_kernel_ramdisk_dict(flavor):
if key in self.node.driver_info:
patch.append({'op': 'remove',
'path': '/driver_info/%s' % key})
return patch
| apache-2.0 |
KyleJamesWalker/ansible | lib/ansible/modules/cloud/openstack/_quantum_subnet.py | 12 | 10250 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: quantum_subnet
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use M(os_subnet) instead.
version_added: "1.2"
short_description: Add/remove subnet from a network
description:
- Add/remove subnet from a network
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: True
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: True
auth_url:
description:
- The keystone URL for authentication
required: false
default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
network_name:
description:
- Name of the network to which the subnet should be attached
required: true
default: None
name:
description:
- The name of the subnet that should be created
required: true
default: None
cidr:
description:
- The CIDR representation of the subnet that should be assigned to the subnet
required: true
default: None
tenant_name:
description:
- The name of the tenant for whom the subnet should be created
required: false
default: None
ip_version:
description:
- The IP version of the subnet 4 or 6
required: false
default: 4
enable_dhcp:
description:
- Whether DHCP should be enabled for this subnet.
required: false
default: true
gateway_ip:
description:
- The ip that would be assigned to the gateway for this subnet
required: false
default: None
dns_nameservers:
description:
- DNS nameservers for this subnet, comma-separated
required: false
default: None
version_added: "1.4"
allocation_pool_start:
description:
- From the subnet pool the starting address from which the IP should be allocated
required: false
default: None
allocation_pool_end:
description:
- From the subnet pool the last IP that should be assigned to the virtual machines
required: false
default: None
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
- name: Create a subnet for a tenant with the specified subnet
quantum_subnet:
state: present
login_username: admin
login_password: admin
login_tenant_name: admin
tenant_name: tenant1
network_name: network1
name: net1subnet
cidr: 192.168.0.0/24
'''
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
_os_keystone = None
_os_tenant_id = None
_os_network_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception as e:
module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception as e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception as e:
module.fail_json(msg = " Error in connecting to neutron: %s" % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
_os_tenant_id = _os_keystone.tenant_id
else:
tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_net_id(neutron, module):
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['network_name'],
}
try:
networks = neutron.list_networks(**kwargs)
except Exception as e:
module.fail_json(msg="Error in listing neutron networks: %s" % e.message)
if not networks['networks']:
return None
return networks['networks'][0]['id']
def _get_subnet_id(module, neutron):
global _os_network_id
subnet_id = None
_os_network_id = _get_net_id(neutron, module)
if not _os_network_id:
module.fail_json(msg = "network id of network not found.")
else:
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['name'],
}
try:
subnets = neutron.list_subnets(**kwargs)
except Exception as e:
module.fail_json( msg = " Error in getting the subnet list:%s " % e.message)
if not subnets['subnets']:
return None
return subnets['subnets'][0]['id']
def _create_subnet(module, neutron):
neutron.format = 'json'
subnet = {
'name': module.params['name'],
'ip_version': module.params['ip_version'],
'enable_dhcp': module.params['enable_dhcp'],
'tenant_id': _os_tenant_id,
'gateway_ip': module.params['gateway_ip'],
'dns_nameservers': module.params['dns_nameservers'],
'network_id': _os_network_id,
'cidr': module.params['cidr'],
}
if module.params['allocation_pool_start'] and module.params['allocation_pool_end']:
allocation_pools = [
{
'start' : module.params['allocation_pool_start'],
'end' : module.params['allocation_pool_end']
}
]
subnet.update({'allocation_pools': allocation_pools})
if not module.params['gateway_ip']:
subnet.pop('gateway_ip')
if module.params['dns_nameservers']:
subnet['dns_nameservers'] = module.params['dns_nameservers'].split(',')
else:
subnet.pop('dns_nameservers')
try:
new_subnet = neutron.create_subnet(dict(subnet=subnet))
except Exception as e:
module.fail_json(msg = "Failure in creating subnet: %s" % e.message)
return new_subnet['subnet']['id']
def _delete_subnet(module, neutron, subnet_id):
try:
neutron.delete_subnet(subnet_id)
except Exception as e:
module.fail_json( msg = "Error in deleting subnet: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
network_name = dict(required=True),
cidr = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
ip_version = dict(default='4', choices=['4', '6']),
enable_dhcp = dict(default='true', type='bool'),
gateway_ip = dict(default=None),
dns_nameservers = dict(default=None),
allocation_pool_start = dict(default=None),
allocation_pool_end = dict(default=None),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
subnet_id = _get_subnet_id(module, neutron)
if not subnet_id:
subnet_id = _create_subnet(module, neutron)
module.exit_json(changed = True, result = "Created" , id = subnet_id)
else:
module.exit_json(changed = False, result = "success" , id = subnet_id)
else:
subnet_id = _get_subnet_id(module, neutron)
if not subnet_id:
module.exit_json(changed = False, result = "success")
else:
_delete_subnet(module, neutron, subnet_id)
module.exit_json(changed = True, result = "deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
IronLanguages/ironpython3 | Src/StdLib/Lib/email/encoders.py | 146 | 1786 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Encodings and related functions."""
__all__ = [
'encode_7or8bit',
'encode_base64',
'encode_noop',
'encode_quopri',
]
from base64 import encodebytes as _bencode
from quopri import encodestring as _encodestring
def _qencode(s):
enc = _encodestring(s, quotetabs=True)
# Must encode spaces, which quopri.encodestring() doesn't do
return enc.replace(b' ', b'=20')
def encode_base64(msg):
"""Encode the message's payload in Base64.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload(decode=True)
encdata = str(_bencode(orig), 'ascii')
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'base64'
def encode_quopri(msg):
"""Encode the message's payload in quoted-printable.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload(decode=True)
encdata = _qencode(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'quoted-printable'
def encode_7or8bit(msg):
"""Set the Content-Transfer-Encoding header to 7bit or 8bit."""
orig = msg.get_payload(decode=True)
if orig is None:
# There's no payload. For backwards compatibility we use 7bit
msg['Content-Transfer-Encoding'] = '7bit'
return
# We play a trick to make this go fast. If decoding from ASCII succeeds,
# we know the data must be 7bit, otherwise treat it as 8bit.
try:
orig.decode('ascii')
except UnicodeError:
msg['Content-Transfer-Encoding'] = '8bit'
else:
msg['Content-Transfer-Encoding'] = '7bit'
def encode_noop(msg):
"""Do nothing."""
| apache-2.0 |
YehudaItkin/virt-test | virttest/staging/backports/__init__.py | 23 | 3497 | """
This module contains backported functions that are not present in Python 2.4
but are standard in more recent versions.
"""
import re
import sys
# Import backported modules
import simplejson
import collections
import itertools
if not hasattr(itertools, 'product'):
import _itertools
itertools.product = _itertools.product
# pylint: disable=I0011,W0622
# noinspection PyShadowingBuiltins
def _next(*args):
"""
Retrieve the next item from the iterator by calling its next() method.
If default is given, it is returned if the iterator is exhausted,
otherwise StopIteration is raised.
New in version 2.6.
:param iterator: the iterator
:type iterator: iterator
:param default: the value to return if the iterator raises StopIteration
:type default: object
:return: The object returned by iterator.next()
:rtype: object
"""
if len(args) == 2:
try:
return args[0].next()
except StopIteration:
return args[1]
elif len(args) > 2:
raise TypeError("next expected at most 2 arguments, %s" % len(args))
else:
return args[0].next()
# pylint: disable=W0622
# noinspection PyShadowingBuiltins
def _any(iterable):
"""
From http://stackoverflow.com/questions/3785433/python-backports-for-some-methods
:codeauthor: Tim Pietzcker http://stackoverflow.com/users/20670/tim-pietzcker
licensed under cc-wiki with attribution required
"""
for element in iterable:
if element:
return True
return False
# pylint: disable=W0622
# noinspection PyShadowingBuiltins
def _all(iterable):
"""
From http://stackoverflow.com/questions/3785433/python-backports-for-some-methods
:codeauthor: Tim Pietzcker http://stackoverflow.com/users/20670/tim-pietzcker
licensed under cc-wiki with attribution required
"""
for element in iterable:
if not element:
return False
return True
# Adapted from http://code.activestate.com/recipes/576847/
# :codeauthor: Vishal Sapre
# :license: MIT
BIN_HEX_DICT = {
'0': '0000', '1': '0001', '2': '0010', '3': '0011', '4': '0100',
'5': '0101', '6': '0110', '7': '0111', '8': '1000', '9': '1001',
'a': '1010', 'b': '1011', 'c': '1100', 'd': '1101', 'e': '1110',
'f': '1111', 'L': ''}
# match left leading zeroes, but don't match a single 0 for the case of
# bin(0) == '0b0'
BIN_ZSTRIP = re.compile(r'^0*(?=[01])')
# pylint: disable=W0622
# noinspection PyShadowingBuiltins
def _bin(number):
"""
Adapted from http://code.activestate.com/recipes/576847/
:codeauthor: Vishal Sapre
:license: MIT
A foolishly simple look-up method of getting binary string from an integer
This happens to be faster than all other ways!!!
"""
# =========================================================
# create hex of int, remove '0x'. now for each hex char,
# look up binary string, append in list and join at the end.
# =========================================================
# replace leading left zeroes with '0b'
tmp = [BIN_HEX_DICT[hstr] for hstr in hex(number)[2:]]
return BIN_ZSTRIP.sub('0b', ''.join(tmp))
if not hasattr(__builtins__, 'next'):
next = _next
else:
next = next
if not hasattr(__builtins__, 'any'):
any = _any
else:
any = any
if not hasattr(__builtins__, 'all'):
all = _all
else:
all = all
if not hasattr(__builtins__, 'bin'):
bin = _bin
else:
bin = bin
| gpl-2.0 |
EndyKaufman/django-postgres-angularjs-blog | app/manager/migrations/0006_properties.py | 1 | 1170 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-24 14:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('manager', '0005_add_fields_and_set_defaults'),
]
operations = [
migrations.CreateModel(
name='Properties',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=512, unique=True)),
('value', models.TextField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='date created')),
('updated', models.DateTimeField(auto_now=True, null=True, verbose_name='date updated')),
('created_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| mit |
interfect/cjdns | node_build/dependencies/libuv/build/gyp/pylib/gyp/input.py | 292 | 114315 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependencies.add(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| gpl-3.0 |
Andy-Amoy/shadowsocks | shadowsocks/asyncdns.py | 655 | 17416 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_IPV4 = 0
STATUS_IPV6 = 1
class DNSResolver(object):
def __init__(self, server_list=None):
self._loop = None
self._hosts = {}
self._hostname_status = {}
self._hostname_to_cb = {}
self._cb_to_hostname = {}
self._cache = lru_cache.LRUCache(timeout=300)
self._sock = None
if server_list is None:
self._servers = None
self._parse_resolv()
else:
self._servers = server_list
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
def _parse_resolv(self):
self._servers = []
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if line:
if line.startswith(b'nameserver'):
parts = line.split()
if len(parts) >= 2:
server = parts[1]
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append(server)
except IOError:
pass
if not self._servers:
self._servers = ['8.8.4.4', '8.8.8.8']
def _parse_hosts(self):
etc_path = '/etc/hosts'
if 'WINDIR' in os.environ:
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if common.is_ip(ip):
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
loop.add(self._sock, eventloop.POLL_IN, self)
loop.add_periodic(self.handle_periodic)
def _call_callback(self, hostname, ip, error=None):
callbacks = self._hostname_to_cb.get(hostname, [])
for callback in callbacks:
if callback in self._cb_to_hostname:
del self._cb_to_hostname[callback]
if ip or error:
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unknown hostname %s' % hostname))
if hostname in self._hostname_to_cb:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
if not ip and self._hostname_status.get(hostname, STATUS_IPV6) \
== STATUS_IPV4:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) == STATUS_IPV6:
for question in response.questions:
if question[1] == QTYPE_AAAA:
self._call_callback(hostname, None)
break
def handle_event(self, sock, fd, event):
if sock != self._sock:
return
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN, self)
else:
data, addr = sock.recvfrom(1024)
if addr[0] not in self._servers:
logging.warn('received a packet other than our dns')
return
self._handle_data(data)
def handle_periodic(self):
self._cache.sweep()
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _send_req(self, hostname, qtype):
req = build_request(hostname, qtype)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
self._sock.sendto(req, (server, 53))
def resolve(self, hostname, callback):
if type(hostname) != bytes:
hostname = hostname.encode('utf8')
if not hostname:
callback(None, Exception('empty hostname'))
elif common.is_ip(hostname):
callback((hostname, hostname), None)
elif hostname in self._hosts:
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif hostname in self._cache:
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
else:
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
arr = self._hostname_to_cb.get(hostname, None)
if not arr:
self._hostname_status[hostname] = STATUS_IPV4
self._send_req(hostname, QTYPE_A)
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
else:
arr.append(callback)
# TODO send again only if waited too long
self._send_req(hostname, QTYPE_A)
def close(self):
if self._sock:
if self._loop:
self._loop.remove_periodic(self.handle_periodic)
self._loop.remove(self._sock)
self._sock.close()
self._sock = None
def test():
dns_resolver = DNSResolver()
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
global counter
counter = 0
def make_callback():
global counter
def callback(result, error):
global counter
# TODO: what can we assert?
print(result, error)
counter += 1
if counter == 9:
dns_resolver.close()
loop.stop()
a_callback = callback
return a_callback
assert(make_callback() != make_callback())
dns_resolver.resolve(b'google.com', make_callback())
dns_resolver.resolve('google.com', make_callback())
dns_resolver.resolve('example.com', make_callback())
dns_resolver.resolve('ipv6.google.com', make_callback())
dns_resolver.resolve('www.facebook.com', make_callback())
dns_resolver.resolve('ns2.google.com', make_callback())
dns_resolver.resolve('invalid.@!#$%^&[email protected]', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
loop.run()
if __name__ == '__main__':
test()
| apache-2.0 |
robinro/ansible-modules-extras | network/f5/bigip_sys_db.py | 23 | 5861 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_sys_db
short_description: Manage BIG-IP system database variables
description:
- Manage BIG-IP system database variables
version_added: "2.2"
options:
key:
description:
- The database variable to manipulate.
required: true
state:
description:
- The state of the variable on the system. When C(present), guarantees
that an existing variable is set to C(value). When C(reset) sets the
variable back to the default value. At least one of value and state
C(reset) are required.
required: false
default: present
choices:
- present
- reset
value:
description:
- The value to set the key to. At least one of value and state C(reset)
are required.
required: false
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires BIG-IP version 12.0.0 or greater
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Set the boot.quiet DB variable on the BIG-IP
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "boot.quiet"
value: "disable"
delegate_to: localhost
- name: Disable the initial setup screen
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "setup.run"
value: "false"
delegate_to: localhost
- name: Reset the initial setup screen
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "setup.run"
state: "reset"
delegate_to: localhost
'''
RETURN = '''
name:
description: The key in the system database that was specified
returned: changed and success
type: string
sample: "setup.run"
default_value:
description: The default value of the key
returned: changed and success
type: string
sample: "true"
value:
description: The value that you set the key to
returned: changed and success
type: string
sample: "false"
'''
try:
from f5.bigip import ManagementRoot
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
class BigIpSysDb(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def flush(self):
result = dict()
state = self.params['state']
value = self.params['value']
if not state == 'reset' and not value:
raise F5ModuleError(
"When setting a key, a value must be supplied"
)
current = self.read()
if self.params['check_mode']:
if value == current:
changed = False
else:
changed = True
else:
if state == "present":
changed = self.present()
elif state == "reset":
changed = self.reset()
current = self.read()
result.update(
name=current.name,
default_value=current.defaultValue,
value=current.value
)
result.update(dict(changed=changed))
return result
def read(self):
dbs = self.api.tm.sys.dbs.db.load(
name=self.params['key']
)
return dbs
def present(self):
current = self.read()
if current.value == self.params['value']:
return False
current.update(value=self.params['value'])
current.refresh()
if current.value != self.params['value']:
raise F5ModuleError(
"Failed to set the DB variable"
)
return True
def reset(self):
current = self.read()
default = current.defaultValue
if current.value == default:
return False
current.update(value=default)
current.refresh()
if current.value != current.defaultValue:
raise F5ModuleError(
"Failed to reset the DB variable"
)
return True
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
key=dict(required=True),
state=dict(default='present', choices=['present', 'reset']),
value=dict(required=False, default=None)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
try:
obj = BigIpSysDb(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
bert9bert/statsmodels | statsmodels/tsa/statespace/kalman_filter.py | 2 | 86079 | """
State Space Representation and Kalman Filter
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
import numpy as np
from .representation import OptionWrapper, Representation, FrozenRepresentation
from .tools import (validate_vector_shape, validate_matrix_shape,
reorder_missing_matrix, reorder_missing_vector)
from . import tools
from statsmodels.tools.sm_exceptions import ValueWarning
# Define constants
FILTER_CONVENTIONAL = 0x01 # Durbin and Koopman (2012), Chapter 4
FILTER_EXACT_INITIAL = 0x02 # ibid., Chapter 5.6
FILTER_AUGMENTED = 0x04 # ibid., Chapter 5.7
FILTER_SQUARE_ROOT = 0x08 # ibid., Chapter 6.3
FILTER_UNIVARIATE = 0x10 # ibid., Chapter 6.4
FILTER_COLLAPSED = 0x20 # ibid., Chapter 6.5
FILTER_EXTENDED = 0x40 # ibid., Chapter 10.2
FILTER_UNSCENTED = 0x80 # ibid., Chapter 10.3
INVERT_UNIVARIATE = 0x01
SOLVE_LU = 0x02
INVERT_LU = 0x04
SOLVE_CHOLESKY = 0x08
INVERT_CHOLESKY = 0x10
STABILITY_FORCE_SYMMETRY = 0x01
MEMORY_STORE_ALL = 0
MEMORY_NO_FORECAST = 0x01
MEMORY_NO_PREDICTED = 0x02
MEMORY_NO_FILTERED = 0x04
MEMORY_NO_LIKELIHOOD = 0x08
MEMORY_NO_GAIN = 0x10
MEMORY_NO_SMOOTHING = 0x20
MEMORY_NO_STD_FORECAST = 0x40
MEMORY_CONSERVE = (
MEMORY_NO_FORECAST | MEMORY_NO_PREDICTED | MEMORY_NO_FILTERED |
MEMORY_NO_LIKELIHOOD | MEMORY_NO_GAIN | MEMORY_NO_SMOOTHING |
MEMORY_NO_STD_FORECAST
)
TIMING_INIT_PREDICTED = 0
TIMING_INIT_FILTERED = 1
class KalmanFilter(Representation):
r"""
State space representation of a time series process, with Kalman filter
Parameters
----------
k_endog : array_like or integer
The observed time-series process :math:`y` if array like or the
number of variables in the process if an integer.
k_states : int
The dimension of the unobserved state process.
k_posdef : int, optional
The dimension of a guaranteed positive definite covariance matrix
describing the shocks in the measurement equation. Must be less than
or equal to `k_states`. Default is `k_states`.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
results_class : class, optional
Default results class to use to save filtering output. Default is
`FilterResults`. If specified, class must extend from `FilterResults`.
**kwargs
Keyword arguments may be used to provide values for the filter,
inversion, and stability methods. See `set_filter_method`,
`set_inversion_method`, and `set_stability_method`.
Keyword arguments may be used to provide default values for state space
matrices. See `Representation` for more details.
Notes
-----
There are several types of options available for controlling the Kalman
filter operation. All options are internally held as bitmasks, but can be
manipulated by setting class attributes, which act like boolean flags. For
more information, see the `set_*` class method documentation. The options
are:
filter_method
The filtering method controls aspects of which
Kalman filtering approach will be used.
inversion_method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
stability_method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
conserve_memory
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
filter_timing
By default, the Kalman filter follows Durbin and Koopman, 2012, in
initializing the filter with predicted values. Kim and Nelson, 1999,
instead initialize the filter with filtered values, which is
essentially just a different timing convention.
The `filter_method` and `inversion_method` options intentionally allow
the possibility that multiple methods will be indicated. In the case that
multiple methods are selected, the underlying Kalman filter will attempt to
select the optional method given the input data.
For example, it may be that INVERT_UNIVARIATE and SOLVE_CHOLESKY are
indicated (this is in fact the default case). In this case, if the
endogenous vector is 1-dimensional (`k_endog` = 1), then INVERT_UNIVARIATE
is used and inversion reduces to simple division, and if it has a larger
dimension, the Cholesky decomposition along with linear solving (rather
than explicit matrix inversion) is used. If only SOLVE_CHOLESKY had been
set, then the Cholesky decomposition method would *always* be used, even in
the case of 1-dimensional data.
See Also
--------
FilterResults
statsmodels.tsa.statespace.representation.Representation
"""
filter_methods = [
'filter_conventional', 'filter_exact_initial', 'filter_augmented',
'filter_square_root', 'filter_univariate', 'filter_collapsed',
'filter_extended', 'filter_unscented'
]
filter_conventional = OptionWrapper('filter_method', FILTER_CONVENTIONAL)
"""
(bool) Flag for conventional Kalman filtering.
"""
filter_exact_initial = OptionWrapper('filter_method', FILTER_EXACT_INITIAL)
"""
(bool) Flag for exact initial Kalman filtering. Not implemented.
"""
filter_augmented = OptionWrapper('filter_method', FILTER_AUGMENTED)
"""
(bool) Flag for augmented Kalman filtering. Not implemented.
"""
filter_square_root = OptionWrapper('filter_method', FILTER_SQUARE_ROOT)
"""
(bool) Flag for square-root Kalman filtering. Not implemented.
"""
filter_univariate = OptionWrapper('filter_method', FILTER_UNIVARIATE)
"""
(bool) Flag for univariate filtering of multivariate observation vector.
"""
filter_collapsed = OptionWrapper('filter_method', FILTER_COLLAPSED)
"""
(bool) Flag for Kalman filtering with collapsed observation vector.
"""
filter_extended = OptionWrapper('filter_method', FILTER_EXTENDED)
"""
(bool) Flag for extended Kalman filtering. Not implemented.
"""
filter_unscented = OptionWrapper('filter_method', FILTER_UNSCENTED)
"""
(bool) Flag for unscented Kalman filtering. Not implemented.
"""
inversion_methods = [
'invert_univariate', 'solve_lu', 'invert_lu', 'solve_cholesky',
'invert_cholesky'
]
invert_univariate = OptionWrapper('inversion_method', INVERT_UNIVARIATE)
"""
(bool) Flag for univariate inversion method (recommended).
"""
solve_lu = OptionWrapper('inversion_method', SOLVE_LU)
"""
(bool) Flag for LU and linear solver inversion method.
"""
invert_lu = OptionWrapper('inversion_method', INVERT_LU)
"""
(bool) Flag for LU inversion method.
"""
solve_cholesky = OptionWrapper('inversion_method', SOLVE_CHOLESKY)
"""
(bool) Flag for Cholesky and linear solver inversion method (recommended).
"""
invert_cholesky = OptionWrapper('inversion_method', INVERT_CHOLESKY)
"""
(bool) Flag for Cholesky inversion method.
"""
stability_methods = ['stability_force_symmetry']
stability_force_symmetry = (
OptionWrapper('stability_method', STABILITY_FORCE_SYMMETRY)
)
"""
(bool) Flag for enforcing covariance matrix symmetry
"""
memory_options = [
'memory_store_all', 'memory_no_forecast', 'memory_no_predicted',
'memory_no_filtered', 'memory_no_likelihood', 'memory_no_gain',
'memory_no_smoothing', 'memory_no_std_forecast', 'memory_conserve'
]
memory_store_all = OptionWrapper('conserve_memory', MEMORY_STORE_ALL)
"""
(bool) Flag for storing all intermediate results in memory (default).
"""
memory_no_forecast = OptionWrapper('conserve_memory', MEMORY_NO_FORECAST)
"""
(bool) Flag to prevent storing forecasts.
"""
memory_no_predicted = OptionWrapper('conserve_memory', MEMORY_NO_PREDICTED)
"""
(bool) Flag to prevent storing predicted state and covariance matrices.
"""
memory_no_filtered = OptionWrapper('conserve_memory', MEMORY_NO_FILTERED)
"""
(bool) Flag to prevent storing filtered state and covariance matrices.
"""
memory_no_likelihood = (
OptionWrapper('conserve_memory', MEMORY_NO_LIKELIHOOD)
)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_no_gain = OptionWrapper('conserve_memory', MEMORY_NO_GAIN)
"""
(bool) Flag to prevent storing the Kalman gain matrices.
"""
memory_no_smoothing = OptionWrapper('conserve_memory', MEMORY_NO_SMOOTHING)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_no_std_forecast = (
OptionWrapper('conserve_memory', MEMORY_NO_STD_FORECAST))
"""
(bool) Flag to prevent storing standardized forecast errors.
"""
memory_conserve = OptionWrapper('conserve_memory', MEMORY_CONSERVE)
"""
(bool) Flag to conserve the maximum amount of memory.
"""
timing_options = [
'timing_init_predicted', 'timing_init_filtered'
]
timing_init_predicted = OptionWrapper('filter_timing',
TIMING_INIT_PREDICTED)
"""
(bool) Flag for the default timing convention (Durbin and Koopman, 2012).
"""
timing_init_filtered = OptionWrapper('filter_timing', TIMING_INIT_FILTERED)
"""
(bool) Flag for the alternate timing convention (Kim and Nelson, 2012).
"""
# Default filter options
filter_method = FILTER_CONVENTIONAL
"""
(int) Filtering method bitmask.
"""
inversion_method = INVERT_UNIVARIATE | SOLVE_CHOLESKY
"""
(int) Inversion method bitmask.
"""
stability_method = STABILITY_FORCE_SYMMETRY
"""
(int) Stability method bitmask.
"""
conserve_memory = MEMORY_STORE_ALL
"""
(int) Memory conservation bitmask.
"""
filter_timing = TIMING_INIT_PREDICTED
"""
(int) Filter timing.
"""
def __init__(self, k_endog, k_states, k_posdef=None,
loglikelihood_burn=0, tolerance=1e-19, results_class=None,
kalman_filter_classes=None, **kwargs):
super(KalmanFilter, self).__init__(
k_endog, k_states, k_posdef, **kwargs
)
# Setup the underlying Kalman filter storage
self._kalman_filters = {}
# Filter options
self.loglikelihood_burn = loglikelihood_burn
self.results_class = (
results_class if results_class is not None else FilterResults
)
# Options
self.prefix_kalman_filter_map = (
kalman_filter_classes
if kalman_filter_classes is not None
else tools.prefix_kalman_filter_map.copy())
self.set_filter_method(**kwargs)
self.set_inversion_method(**kwargs)
self.set_stability_method(**kwargs)
self.set_conserve_memory(**kwargs)
self.set_filter_timing(**kwargs)
self.tolerance = tolerance
@property
def _kalman_filter(self):
prefix = self.prefix
if prefix in self._kalman_filters:
return self._kalman_filters[prefix]
return None
def _initialize_filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
tolerance=None, filter_timing=None,
loglikelihood_burn=None):
if filter_method is None:
filter_method = self.filter_method
if inversion_method is None:
inversion_method = self.inversion_method
if stability_method is None:
stability_method = self.stability_method
if conserve_memory is None:
conserve_memory = self.conserve_memory
if loglikelihood_burn is None:
loglikelihood_burn = self.loglikelihood_burn
if filter_timing is None:
filter_timing = self.filter_timing
if tolerance is None:
tolerance = self.tolerance
# Make sure we have endog
if self.endog is None:
raise RuntimeError('Must bind a dataset to the model before'
' filtering or smoothing.')
# Initialize the representation matrices
prefix, dtype, create_statespace = self._initialize_representation()
# Determine if we need to (re-)create the filter
# (definitely need to recreate if we recreated the _statespace object)
create_filter = create_statespace or prefix not in self._kalman_filters
if not create_filter:
kalman_filter = self._kalman_filters[prefix]
create_filter = (
not kalman_filter.conserve_memory == conserve_memory or
not kalman_filter.loglikelihood_burn == loglikelihood_burn
)
# If the dtype-specific _kalman_filter does not exist (or if we need
# to re-create it), create it
if create_filter:
if prefix in self._kalman_filters:
# Delete the old filter
del self._kalman_filters[prefix]
# Setup the filter
cls = self.prefix_kalman_filter_map[prefix]
self._kalman_filters[prefix] = cls(
self._statespaces[prefix], filter_method, inversion_method,
stability_method, conserve_memory, filter_timing, tolerance,
loglikelihood_burn
)
# Otherwise, update the filter parameters
else:
kalman_filter = self._kalman_filters[prefix]
kalman_filter.set_filter_method(filter_method, False)
kalman_filter.inversion_method = inversion_method
kalman_filter.stability_method = stability_method
kalman_filter.filter_timing = filter_timing
kalman_filter.tolerance = tolerance
# conserve_memory and loglikelihood_burn changes always lead to
# re-created filters
return prefix, dtype, create_filter, create_statespace
def set_filter_method(self, filter_method=None, **kwargs):
r"""
Set the filtering method
The filtering method controls aspects of which Kalman filtering
approach will be used.
Parameters
----------
filter_method : integer, optional
Bitmask value to set the filter method to. See notes for details.
**kwargs
Keyword arguments may be used to influence the filter method by
setting individual boolean flags. See notes for details.
Notes
-----
The filtering method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
FILTER_CONVENTIONAL = 0x01
Conventional Kalman filter.
FILTER_UNIVARIATE = 0x10
Univariate approach to Kalman filtering. Overrides conventional
method if both are specified.
FILTER_COLLAPSED = 0x20
Collapsed approach to Kalman filtering. Will be used *in addition*
to conventional or univariate filtering.
Note that only the first method is available if using a Scipy version
older than 0.16.
If the bitmask is set directly via the `filter_method` argument, then
the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the filter method may also be specified by directly modifying
the class attributes which are defined similarly to the keyword
arguments.
The default filtering method is FILTER_CONVENTIONAL.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.filter_method
1
>>> mod.ssm.filter_conventional
True
>>> mod.ssm.filter_univariate = True
>>> mod.ssm.filter_method
17
>>> mod.ssm.set_filter_method(filter_univariate=False,
... filter_collapsed=True)
>>> mod.ssm.filter_method
33
>>> mod.ssm.set_filter_method(filter_method=1)
>>> mod.ssm.filter_conventional
True
>>> mod.ssm.filter_univariate
False
>>> mod.ssm.filter_collapsed
False
>>> mod.ssm.filter_univariate = True
>>> mod.ssm.filter_method
17
"""
if filter_method is not None:
self.filter_method = filter_method
for name in KalmanFilter.filter_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
if self._compatibility_mode and not self.filter_method == 1:
raise NotImplementedError('Only conventional Kalman filtering'
' is available. Consider updating'
' dependencies for more options.')
def set_inversion_method(self, inversion_method=None, **kwargs):
r"""
Set the inversion method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
Parameters
----------
inversion_method : integer, optional
Bitmask value to set the inversion method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the inversion method by
setting individual boolean flags. See notes for details.
Notes
-----
The inversion method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
INVERT_UNIVARIATE = 0x01
If the endogenous time series is univariate, then inversion can be
performed by simple division. If this flag is set and the time
series is univariate, then division will always be used even if
other flags are also set.
SOLVE_LU = 0x02
Use an LU decomposition along with a linear solver (rather than
ever actually inverting the matrix).
INVERT_LU = 0x04
Use an LU decomposition along with typical matrix inversion.
SOLVE_CHOLESKY = 0x08
Use a Cholesky decomposition along with a linear solver.
INVERT_CHOLESKY = 0x10
Use an Cholesky decomposition along with typical matrix inversion.
If the bitmask is set directly via the `inversion_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the inversion method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default inversion method is `INVERT_UNIVARIATE | SOLVE_CHOLESKY`
Several things to keep in mind are:
- If the filtering method is specified to be univariate, then simple
division is always used regardless of the dimension of the endogenous
time series.
- Cholesky decomposition is about twice as fast as LU decomposition,
but it requires that the matrix be positive definite. While this
should generally be true, it may not be in every case.
- Using a linear solver rather than true matrix inversion is generally
faster and is numerically more stable.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.inversion_method
1
>>> mod.ssm.solve_cholesky
True
>>> mod.ssm.invert_univariate
True
>>> mod.ssm.invert_lu
False
>>> mod.ssm.invert_univariate = False
>>> mod.ssm.inversion_method
8
>>> mod.ssm.set_inversion_method(solve_cholesky=False,
... invert_cholesky=True)
>>> mod.ssm.inversion_method
16
"""
if inversion_method is not None:
self.inversion_method = inversion_method
for name in KalmanFilter.inversion_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_stability_method(self, stability_method=None, **kwargs):
r"""
Set the numerical stability method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
Parameters
----------
stability_method : integer, optional
Bitmask value to set the stability method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the stability method by
setting individual boolean flags. See notes for details.
Notes
-----
The stability method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
STABILITY_FORCE_SYMMETRY = 0x01
If this flag is set, symmetry of the predicted state covariance
matrix is enforced at each iteration of the filter, where each
element is set to the average of the corresponding elements in the
upper and lower triangle.
If the bitmask is set directly via the `stability_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the stability method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default stability method is `STABILITY_FORCE_SYMMETRY`
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.stability_method
1
>>> mod.ssm.stability_force_symmetry
True
>>> mod.ssm.stability_force_symmetry = False
>>> mod.ssm.stability_method
0
"""
if stability_method is not None:
self.stability_method = stability_method
for name in KalmanFilter.stability_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_conserve_memory(self, conserve_memory=None, **kwargs):
r"""
Set the memory conservation method
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
Parameters
----------
conserve_memory : integer, optional
Bitmask value to set the memory conservation method to. See notes
for details.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
Notes
-----
The memory conservation method is defined by a collection of boolean
flags, and is internally stored as a bitmask. The methods available
are:
MEMORY_STORE_ALL = 0
Store all intermediate matrices. This is the default value.
MEMORY_NO_FORECAST = 0x01
Do not store the forecast, forecast error, or forecast error
covariance matrices. If this option is used, the `predict` method
from the results class is unavailable.
MEMORY_NO_PREDICTED = 0x02
Do not store the predicted state or predicted state covariance
matrices.
MEMORY_NO_FILTERED = 0x04
Do not store the filtered state or filtered state covariance
matrices.
MEMORY_NO_LIKELIHOOD = 0x08
Do not store the vector of loglikelihood values for each
observation. Only the sum of the loglikelihood values is stored.
MEMORY_NO_GAIN = 0x10
Do not store the Kalman gain matrices.
MEMORY_NO_SMOOTHING = 0x20
Do not store temporary variables related to Klaman smoothing. If
this option is used, smoothing is unavailable.
MEMORY_NO_SMOOTHING = 0x20
Do not store standardized forecast errors.
MEMORY_CONSERVE
Do not store any intermediate matrices.
Note that if using a Scipy version less than 0.16, the options
MEMORY_NO_GAIN, MEMORY_NO_SMOOTHING, and MEMORY_NO_STD_FORECAST
have no effect.
If the bitmask is set directly via the `conserve_memory` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the memory conservation method may also be specified by
directly modifying the class attributes which are defined similarly to
the keyword arguments.
The default memory conservation method is `MEMORY_STORE_ALL`, so that
all intermediate matrices are stored.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm..conserve_memory
0
>>> mod.ssm.memory_no_predicted
False
>>> mod.ssm.memory_no_predicted = True
>>> mod.ssm.conserve_memory
2
>>> mod.ssm.set_conserve_memory(memory_no_filtered=True,
... memory_no_forecast=True)
>>> mod.ssm.conserve_memory
7
"""
if conserve_memory is not None:
self.conserve_memory = conserve_memory
for name in KalmanFilter.memory_options:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_filter_timing(self, alternate_timing=None, **kwargs):
r"""
Set the filter timing convention
By default, the Kalman filter follows Durbin and Koopman, 2012, in
initializing the filter with predicted values. Kim and Nelson, 1999,
instead initialize the filter with filtered values, which is
essentially just a different timing convention.
Parameters
----------
alternate_timing : integer, optional
Whether or not to use the alternate timing convention. Default is
unspecified.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
"""
if alternate_timing is not None:
self.filter_timing = int(alternate_timing)
if 'timing_init_predicted' in kwargs:
self.filter_timing = int(not kwargs['timing_init_predicted'])
if 'timing_init_filtered' in kwargs:
self.filter_timing = int(kwargs['timing_init_filtered'])
if (self._compatibility_mode and
self.filter_timing == TIMING_INIT_FILTERED):
raise NotImplementedError('Only "predicted" Kalman filter'
' timing is available. Consider'
' updating dependencies for more'
' options.')
def _filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
filter_timing=None, tolerance=None, loglikelihood_burn=None,
complex_step=False):
# Initialize the filter
prefix, dtype, create_filter, create_statespace = (
self._initialize_filter(
filter_method, inversion_method, stability_method,
conserve_memory, filter_timing, tolerance, loglikelihood_burn
)
)
kfilter = self._kalman_filters[prefix]
# Initialize the state
self._initialize_state(prefix=prefix, complex_step=complex_step)
# Run the filter
kfilter()
tmp = np.array(kfilter.loglikelihood)
tmp2 = np.array(kfilter.predicted_state)
return kfilter
def filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None, filter_timing=None,
tolerance=None, loglikelihood_burn=None, complex_step=False):
r"""
Apply the Kalman filter to the statespace model.
Parameters
----------
filter_method : int, optional
Determines which Kalman filter to use. Default is conventional.
inversion_method : int, optional
Determines which inversion technique to use. Default is by Cholesky
decomposition.
stability_method : int, optional
Determines which numerical stability techniques to use. Default is
to enforce symmetry of the predicted state covariance matrix.
conserve_memory : int, optional
Determines what output from the filter to store. Default is to
store everything.
filter_timing : int, optional
Determines the timing convention of the filter. Default is that
from Durbin and Koopman (2012), in which the filter is initialized
with predicted values.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
Notes
-----
This function by default does not compute variables required for
smoothing.
"""
if conserve_memory is None:
conserve_memory = self.conserve_memory | MEMORY_NO_SMOOTHING
# Run the filter
kfilter = self._filter(
filter_method, inversion_method, stability_method, conserve_memory,
filter_timing, tolerance, loglikelihood_burn, complex_step)
tmp = np.array(kfilter.loglikelihood)
# Create the results object
results = self.results_class(self)
results.update_representation(self)
results.update_filter(kfilter)
return results
def loglike(self, **kwargs):
r"""
Calculate the loglikelihood associated with the statespace model.
Parameters
----------
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Returns
-------
loglike : float
The joint loglikelihood.
"""
if self.memory_no_likelihood:
raise RuntimeError('Cannot compute loglikelihood if'
' MEMORY_NO_LIKELIHOOD option is selected.')
kwargs['conserve_memory'] = MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD
kfilter = self._filter(**kwargs)
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
return np.sum(kfilter.loglikelihood[loglikelihood_burn:])
def loglikeobs(self, **kwargs):
r"""
Calculate the loglikelihood for each observation associated with the
statespace model.
Parameters
----------
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Notes
-----
If `loglikelihood_burn` is positive, then the entries in the returned
loglikelihood vector are set to be zero for those initial time periods.
Returns
-------
loglike : array of float
Array of loglikelihood values for each observation.
"""
if self.memory_no_likelihood:
raise RuntimeError('Cannot compute loglikelihood if'
' MEMORY_NO_LIKELIHOOD option is selected.')
kwargs['conserve_memory'] = MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD
kfilter = self._filter(**kwargs)
llf_obs = np.array(kfilter.loglikelihood, copy=True)
# Set any burned observations to have zero likelihood
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
llf_obs[:loglikelihood_burn] = 0
return llf_obs
def simulate(self, nsimulations, measurement_shocks=None,
state_shocks=None, initial_state=None):
r"""
Simulate a new time series following the state space model
Parameters
----------
nsimulations : int
The number of observations to simulate. If the model is
time-invariant this can be any number. If the model is
time-varying, then this number must be less than or equal to the
number
measurement_shocks : array_like, optional
If specified, these are the shocks to the measurement equation,
:math:`\varepsilon_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_endog`, where `k_endog` is the
same as in the state space model.
state_shocks : array_like, optional
If specified, these are the shocks to the state equation,
:math:`\eta_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the
same as in the state space model.
initial_state : array_like, optional
If specified, this is the state vector at time zero, which should
be shaped (`k_states` x 1), where `k_states` is the same as in the
state space model. If unspecified, but the model has been
initialized, then that initialization is used. If unspecified and
the model has not been initialized, then a vector of zeros is used.
Note that this is not included in the returned `simulated_states`
array.
Returns
-------
simulated_obs : array
An (nsimulations x k_endog) array of simulated observations.
simulated_states : array
An (nsimulations x k_states) array of simulated states.
"""
time_invariant = self.time_invariant
# Check for valid number of simulations
if not time_invariant and nsimulations > self.nobs:
raise ValueError('In a time-varying model, cannot create more'
' simulations than there are observations.')
# Check / generate measurement shocks
if measurement_shocks is not None:
measurement_shocks = np.array(measurement_shocks)
if measurement_shocks.ndim == 0:
measurement_shocks = measurement_shocks[np.newaxis, np.newaxis]
elif measurement_shocks.ndim == 1:
measurement_shocks = measurement_shocks[:, np.newaxis]
if not measurement_shocks.shape == (nsimulations, self.k_endog):
raise ValueError('Invalid shape of provided measurement'
' shocks. Required (%d, %d)'
% (nsimulations, self.k_endog))
elif self.shapes['obs_cov'][-1] == 1:
measurement_shocks = np.random.multivariate_normal(
mean=np.zeros(self.k_endog), cov=self['obs_cov'],
size=nsimulations)
# Check / generate state shocks
if state_shocks is not None:
state_shocks = np.array(state_shocks)
if state_shocks.ndim == 0:
state_shocks = state_shocks[np.newaxis, np.newaxis]
elif state_shocks.ndim == 1:
state_shocks = state_shocks[:, np.newaxis]
if not state_shocks.shape == (nsimulations, self.k_posdef):
raise ValueError('Invalid shape of provided state shocks.'
' Required (%d, %d).'
% (nsimulations, self.k_posdef))
elif self.shapes['state_cov'][-1] == 1:
state_shocks = np.random.multivariate_normal(
mean=np.zeros(self.k_posdef), cov=self['state_cov'],
size=nsimulations)
# Get the initial states
if initial_state is not None:
initial_state = np.array(initial_state)
if initial_state.ndim == 0:
initial_state = initial_state[np.newaxis]
elif (initial_state.ndim > 1 and
not initial_state.shape == (self.k_states, 1)):
raise ValueError('Invalid shape of provided initial state'
' vector. Required (%d, 1)' % self.k_states)
elif self.initialization == 'known':
initial_state = np.random.multivariate_normal(
self._initial_state, self._initial_state_cov)
elif self.initialization == 'stationary':
from scipy.linalg import solve_discrete_lyapunov
# (I - T)^{-1} c = x => (I - T) x = c
initial_state_mean = np.linalg.solve(
np.eye(self.k_states) - self['transition', :, :, 0],
self['state_intercept', :, 0])
R = self['selection', :, :, 0]
Q = self['state_cov', :, :, 0]
selected_state_cov = R.dot(Q).dot(R.T)
initial_state_cov = solve_discrete_lyapunov(
self['transition', :, :, 0], selected_state_cov)
initial_state = np.random.multivariate_normal(
initial_state_mean, initial_state_cov)
elif self.initialization == 'approximate_diffuse':
initial_state = np.zeros(self.k_states)
else:
initial_state = np.zeros(self.k_states)
return self._simulate(nsimulations, measurement_shocks, state_shocks,
initial_state)
def _simulate(self, nsimulations, measurement_shocks, state_shocks,
initial_state):
time_invariant = self.time_invariant
# Holding variables for the simulations
simulated_obs = np.zeros((nsimulations, self.k_endog),
dtype=self.dtype)
simulated_states = np.zeros((nsimulations+1, self.k_states),
dtype=self.dtype)
simulated_states[0] = initial_state
# Perform iterations to create the new time series
obs_intercept_t = 0
design_t = 0
state_intercept_t = 0
transition_t = 0
selection_t = 0
for t in range(nsimulations):
# Get the current shocks (this accomodates time-varying matrices)
if measurement_shocks is None:
measurement_shock = np.random.multivariate_normal(
mean=np.zeros(self.k_endog), cov=self['obs_cov', :, :, t])
else:
measurement_shock = measurement_shocks[t]
if state_shocks is None:
state_shock = np.random.multivariate_normal(
mean=np.zeros(self.k_posdef),
cov=self['state_cov', :, :, t])
else:
state_shock = state_shocks[t]
# Get current-iteration matrices
if not time_invariant:
obs_intercept_t = 0 if self.obs_intercept.shape[-1] == 1 else t
design_t = 0 if self.design.shape[-1] == 1 else t
state_intercept_t = (
0 if self.state_intercept.shape[-1] == 1 else t)
transition_t = 0 if self.transition.shape[-1] == 1 else t
selection_t = 0 if self.selection.shape[-1] == 1 else t
obs_intercept = self['obs_intercept', :, obs_intercept_t]
design = self['design', :, :, design_t]
state_intercept = self['state_intercept', :, state_intercept_t]
transition = self['transition', :, :, transition_t]
selection = self['selection', :, :, selection_t]
# Iterate the measurement equation
simulated_obs[t] = (
obs_intercept + np.dot(design, simulated_states[t]) +
measurement_shock)
# Iterate the state equation
simulated_states[t+1] = (
state_intercept + np.dot(transition, simulated_states[t]) +
np.dot(selection, state_shock))
return simulated_obs, simulated_states[:-1]
def impulse_responses(self, steps=10, impulse=0, orthogonalized=False,
cumulative=False, **kwargs):
r"""
Impulse response function
Parameters
----------
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 10. Note that the initial impulse is not counted as a
step, so if `steps=1`, the output will have 2 entries.
impulse : int or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1` where `k_posdef` is the same as in the state
space model. Alternatively, a custom impulse vector may be
provided; must be a column vector with shape `(k_posdef, 1)`.
orthogonalized : boolean, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : boolean, optional
Whether or not to return cumulative impulse responses. Default is
False.
**kwargs
If the model is time-varying and `steps` is greater than the number
of observations, any of the state space representation matrices
that are time-varying must have updated values provided for the
out-of-sample steps.
For example, if `design` is a time-varying component, `nobs` is 10,
and `steps` is 15, a (`k_endog` x `k_states` x 5) matrix must be
provided with the new design matrix values.
Returns
-------
impulse_responses : array
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. A (steps + 1 x k_endog) array.
Notes
-----
Intercepts in the measurement and state equation are ignored when
calculating impulse responses.
"""
# Since the first step is the impulse itself, we actually want steps+1
steps += 1
# Check for what kind of impulse we want
if type(impulse) == int:
if impulse >= self.k_posdef or impulse < 0:
raise ValueError('Invalid value for `impulse`. Must be the'
' index of one of the state innovations.')
# Create the (non-orthogonalized) impulse vector
idx = impulse
impulse = np.zeros(self.k_posdef)
impulse[idx] = 1
else:
impulse = np.array(impulse)
if impulse.ndim > 1:
impulse = np.squeeze(impulse)
if not impulse.shape == (self.k_posdef,):
raise ValueError('Invalid impulse vector. Must be shaped'
' (%d,)' % self.k_posdef)
# Orthogonalize the impulses, if requested, using Cholesky on the
# first state covariance matrix
if orthogonalized:
state_chol = np.linalg.cholesky(self.state_cov[:, :, 0])
impulse = np.dot(state_chol, impulse)
# If we have a time-invariant system, we can solve for the IRF directly
if self.time_invariant:
# Get the state space matrices
design = self.design[:, :, 0]
transition = self.transition[:, :, 0]
selection = self.selection[:, :, 0]
# Holding arrays
irf = np.zeros((steps, self.k_endog), dtype=self.dtype)
states = np.zeros((steps, self.k_states), dtype=self.dtype)
# First iteration
states[0] = np.dot(selection, impulse)
irf[0] = np.dot(design, states[0])
# Iterations
for t in range(1, steps):
states[t] = np.dot(transition, states[t-1])
irf[t] = np.dot(design, states[t])
# Otherwise, create a new model
else:
# Get the basic model components
representation = {}
for name, shape in self.shapes.items():
if name in ['obs', 'obs_intercept', 'state_intercept']:
continue
representation[name] = getattr(self, name)
# Allow additional specification
warning = ('Model has time-invariant %s matrix, so the %s'
' argument to `irf` has been ignored.')
exception = ('Impulse response functions for models with'
' time-varying %s matrix requires an updated'
' time-varying matrix for any periods beyond those in'
' the original model.')
for name, shape in self.shapes.items():
if name in ['obs', 'obs_intercept', 'state_intercept']:
continue
if representation[name].shape[-1] == 1:
if name in kwargs:
warn(warning % (name, name), ValueWarning)
elif name not in kwargs:
raise ValueError(exception % name)
else:
mat = np.asarray(kwargs[name])
validate_matrix_shape(name, mat.shape, shape[0],
shape[1], steps)
if mat.ndim < 3 or not mat.shape[2] == steps:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
# Setup the new statespace representation
model_kwargs = {
'filter_method': self.filter_method,
'inversion_method': self.inversion_method,
'stability_method': self.stability_method,
'conserve_memory': self.conserve_memory,
'tolerance': self.tolerance,
'loglikelihood_burn': self.loglikelihood_burn
}
model_kwargs.update(representation)
model = KalmanFilter(np.zeros(self.endog.T.shape), self.k_states,
self.k_posdef, **model_kwargs)
model.initialize_approximate_diffuse()
model._initialize_filter()
model._initialize_state()
# Get the impulse response function via simulation of the state
# space model, but with other shocks set to zero
# Since simulate returns the zero-th period, we need to simulate
# steps + 1 periods and exclude the zero-th observation.
steps += 1
measurement_shocks = np.zeros((steps, self.k_endog))
state_shocks = np.zeros((steps, self.k_posdef))
state_shocks[0] = impulse
irf, _ = model.simulate(
steps, measurement_shocks=measurement_shocks,
state_shocks=state_shocks)
irf = irf[1:]
# Get the cumulative response if requested
if cumulative:
irf = np.cumsum(irf, axis=0)
return irf
class FilterResults(FrozenRepresentation):
"""
Results from applying the Kalman filter to a state space model.
Parameters
----------
model : Representation
A Statespace representation
Attributes
----------
nobs : int
Number of observations.
k_endog : int
The dimension of the observation series.
k_states : int
The dimension of the unobserved state process.
k_posdef : int
The dimension of a guaranteed positive definite
covariance matrix describing the shocks in the
measurement equation.
dtype : dtype
Datatype of representation matrices
prefix : str
BLAS prefix of representation matrices
shapes : dictionary of name,tuple
A dictionary recording the shapes of each of the
representation matrices as tuples.
endog : array
The observation vector.
design : array
The design matrix, :math:`Z`.
obs_intercept : array
The intercept for the observation equation, :math:`d`.
obs_cov : array
The covariance matrix for the observation equation :math:`H`.
transition : array
The transition matrix, :math:`T`.
state_intercept : array
The intercept for the transition equation, :math:`c`.
selection : array
The selection matrix, :math:`R`.
state_cov : array
The covariance matrix for the state equation :math:`Q`.
missing : array of bool
An array of the same size as `endog`, filled
with boolean values that are True if the
corresponding entry in `endog` is NaN and False
otherwise.
nmissing : array of int
An array of size `nobs`, where the ith entry
is the number (between 0 and `k_endog`) of NaNs in
the ith row of the `endog` array.
time_invariant : bool
Whether or not the representation matrices are time-invariant
initialization : str
Kalman filter initialization method.
initial_state : array_like
The state vector used to initialize the Kalamn filter.
initial_state_cov : array_like
The state covariance matrix used to initialize the Kalamn filter.
filter_method : int
Bitmask representing the Kalman filtering method
inversion_method : int
Bitmask representing the method used to
invert the forecast error covariance matrix.
stability_method : int
Bitmask representing the methods used to promote
numerical stability in the Kalman filter
recursions.
conserve_memory : int
Bitmask representing the selected memory conservation method.
filter_timing : int
Whether or not to use the alternate timing convention.
tolerance : float
The tolerance at which the Kalman filter
determines convergence to steady-state.
loglikelihood_burn : int
The number of initial periods during which
the loglikelihood is not recorded.
converged : bool
Whether or not the Kalman filter converged.
period_converged : int
The time period in which the Kalman filter converged.
filtered_state : array
The filtered state vector at each time period.
filtered_state_cov : array
The filtered state covariance matrix at each time period.
predicted_state : array
The predicted state vector at each time period.
predicted_state_cov : array
The predicted state covariance matrix at each time period.
kalman_gain : array
The Kalman gain at each time period.
forecasts : array
The one-step-ahead forecasts of observations at each time period.
forecasts_error : array
The forecast errors at each time period.
forecasts_error_cov : array
The forecast error covariance matrices at each time period.
llf_obs : array
The loglikelihood values at each time period.
"""
_filter_attributes = [
'filter_method', 'inversion_method', 'stability_method',
'conserve_memory', 'filter_timing', 'tolerance', 'loglikelihood_burn',
'converged', 'period_converged', 'filtered_state',
'filtered_state_cov', 'predicted_state', 'predicted_state_cov',
'tmp1', 'tmp2', 'tmp3', 'tmp4', 'forecasts',
'forecasts_error', 'forecasts_error_cov', 'llf_obs',
'collapsed_forecasts', 'collapsed_forecasts_error',
'collapsed_forecasts_error_cov',
]
_filter_options = (
KalmanFilter.filter_methods + KalmanFilter.stability_methods +
KalmanFilter.inversion_methods + KalmanFilter.memory_options
)
_attributes = FrozenRepresentation._model_attributes + _filter_attributes
def __init__(self, model):
super(FilterResults, self).__init__(model)
# Setup caches for uninitialized objects
self._kalman_gain = None
self._standardized_forecasts_error = None
def update_representation(self, model, only_options=False):
"""
Update the results to match a given model
Parameters
----------
model : Representation
The model object from which to take the updated values.
only_options : boolean, optional
If set to true, only the filter options are updated, and the state
space representation is not updated. Default is False.
Notes
-----
This method is rarely required except for internal usage.
"""
if not only_options:
super(FilterResults, self).update_representation(model)
# Save the options as boolean variables
for name in self._filter_options:
setattr(self, name, getattr(model, name, None))
def update_filter(self, kalman_filter):
"""
Update the filter results
Parameters
----------
kalman_filter : KalmanFilter
The model object from which to take the updated values.
Notes
-----
This method is rarely required except for internal usage.
"""
# State initialization
self.initial_state = np.array(
kalman_filter.model.initial_state, copy=True
)
self.initial_state_cov = np.array(
kalman_filter.model.initial_state_cov, copy=True
)
# Save Kalman filter parameters
self.filter_method = kalman_filter.filter_method
self.inversion_method = kalman_filter.inversion_method
self.stability_method = kalman_filter.stability_method
self.conserve_memory = kalman_filter.conserve_memory
self.filter_timing = kalman_filter.filter_timing
self.tolerance = kalman_filter.tolerance
self.loglikelihood_burn = kalman_filter.loglikelihood_burn
# Save Kalman filter output
self.converged = bool(kalman_filter.converged)
self.period_converged = kalman_filter.period_converged
self.filtered_state = np.array(kalman_filter.filtered_state, copy=True)
self.filtered_state_cov = np.array(
kalman_filter.filtered_state_cov, copy=True
)
self.predicted_state = np.array(
kalman_filter.predicted_state, copy=True
)
self.predicted_state_cov = np.array(
kalman_filter.predicted_state_cov, copy=True
)
# Reset caches
has_missing = np.sum(self.nmissing) > 0
if not self._compatibility_mode and not (self.memory_no_std_forecast or
self.invert_lu or
self.solve_lu or
self.filter_collapsed):
if has_missing:
self._standardized_forecasts_error = np.array(
reorder_missing_vector(
kalman_filter.standardized_forecast_error,
self.missing, prefix=self.prefix))
else:
self._standardized_forecasts_error = np.array(
kalman_filter.standardized_forecast_error, copy=True)
else:
self._standardized_forecasts_error = None
if not self._compatibility_mode:
# In the partially missing data case, all entries will
# be in the upper left submatrix rather than the correct placement
# Re-ordering does not make sense in the collapsed case.
if has_missing and (not self.memory_no_gain and
not self.filter_collapsed):
self._kalman_gain = np.array(reorder_missing_matrix(
kalman_filter.kalman_gain, self.missing, reorder_cols=True,
prefix=self.prefix))
self.tmp1 = np.array(reorder_missing_matrix(
kalman_filter.tmp1, self.missing, reorder_cols=True,
prefix=self.prefix))
self.tmp2 = np.array(reorder_missing_vector(
kalman_filter.tmp2, self.missing, prefix=self.prefix))
self.tmp3 = np.array(reorder_missing_matrix(
kalman_filter.tmp3, self.missing, reorder_rows=True,
prefix=self.prefix))
self.tmp4 = np.array(reorder_missing_matrix(
kalman_filter.tmp4, self.missing, reorder_cols=True,
reorder_rows=True, prefix=self.prefix))
else:
self._kalman_gain = np.array(
kalman_filter.kalman_gain, copy=True)
self.tmp1 = np.array(kalman_filter.tmp1, copy=True)
self.tmp2 = np.array(kalman_filter.tmp2, copy=True)
self.tmp3 = np.array(kalman_filter.tmp3, copy=True)
self.tmp4 = np.array(kalman_filter.tmp4, copy=True)
else:
self._kalman_gain = None
# Note: use forecasts rather than forecast, so as not to interfer
# with the `forecast` methods in subclasses
self.forecasts = np.array(kalman_filter.forecast, copy=True)
self.forecasts_error = np.array(
kalman_filter.forecast_error, copy=True
)
self.forecasts_error_cov = np.array(
kalman_filter.forecast_error_cov, copy=True
)
self.llf_obs = np.array(kalman_filter.loglikelihood, copy=True)
# If there was missing data, save the original values from the Kalman
# filter output, since below will set the values corresponding to
# the missing observations to nans.
self.missing_forecasts = None
self.missing_forecasts_error = None
self.missing_forecasts_error_cov = None
if np.sum(self.nmissing) > 0:
# Copy the provided arrays (which are as the Kalman filter dataset)
# into new variables
self.missing_forecasts = np.copy(self.forecasts)
self.missing_forecasts_error = np.copy(self.forecasts_error)
self.missing_forecasts_error_cov = (
np.copy(self.forecasts_error_cov)
)
# Save the collapsed values
self.collapsed_forecasts = None
self.collapsed_forecasts_error = None
self.collapsed_forecasts_error_cov = None
if self.filter_collapsed:
# Copy the provided arrays (which are from the collapsed dataset)
# into new variables
self.collapsed_forecasts = self.forecasts[:self.k_states, :]
self.collapsed_forecasts_error = (
self.forecasts_error[:self.k_states, :]
)
self.collapsed_forecasts_error_cov = (
self.forecasts_error_cov[:self.k_states, :self.k_states, :]
)
# Recreate the original arrays (which should be from the original
# dataset) in the appropriate dimension
self.forecasts = np.zeros((self.k_endog, self.nobs))
self.forecasts_error = np.zeros((self.k_endog, self.nobs))
self.forecasts_error_cov = (
np.zeros((self.k_endog, self.k_endog, self.nobs))
)
# Fill in missing values in the forecast, forecast error, and
# forecast error covariance matrix (this is required due to how the
# Kalman filter implements observations that are either partly or
# completely missing)
# Construct the predictions, forecasts
if not (self.memory_no_forecast or self.memory_no_predicted):
for t in range(self.nobs):
design_t = 0 if self.design.shape[2] == 1 else t
obs_cov_t = 0 if self.obs_cov.shape[2] == 1 else t
obs_intercept_t = 0 if self.obs_intercept.shape[1] == 1 else t
# For completely missing observations, the Kalman filter will
# produce forecasts, but forecast errors and the forecast
# error covariance matrix will be zeros - make them nan to
# improve clarity of results.
if self.nmissing[t] > 0:
mask = ~self.missing[:, t].astype(bool)
# We can recover forecasts
# For partially missing observations, the Kalman filter
# will produce all elements (forecasts, forecast errors,
# forecast error covariance matrices) as usual, but their
# dimension will only be equal to the number of non-missing
# elements, and their location in memory will be in the
# first blocks (e.g. for the forecasts_error, the first
# k_endog - nmissing[t] columns will be filled in),
# regardless of which endogenous variables they refer to
# (i.e. the non- missing endogenous variables for that
# observation). Furthermore, the forecast error covariance
# matrix is only valid for those elements. What is done is
# to set all elements to nan for these observations so that
# they are flagged as missing. The variables
# missing_forecasts, etc. then provide the forecasts, etc.
# provided by the Kalman filter, from which the data can be
# retrieved if desired.
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t], self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = np.nan
self.forecasts_error[mask, t] = (
self.endog[mask, t] - self.forecasts[mask, t])
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
# In the collapsed case, everything just needs to be rebuilt
# for the original observed data, since the Kalman filter
# produced these values for the collapsed data.
elif self.filter_collapsed:
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t], self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = (
self.endog[:, t] - self.forecasts[:, t]
)
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
@property
def kalman_gain(self):
"""
Kalman gain matrices
"""
if self._kalman_gain is None:
# k x n
self._kalman_gain = np.zeros(
(self.k_states, self.k_endog, self.nobs), dtype=self.dtype)
for t in range(self.nobs):
# In the case of entirely missing observations, let the Kalman
# gain be zeros.
if self.nmissing[t] == self.k_endog:
continue
design_t = 0 if self.design.shape[2] == 1 else t
transition_t = 0 if self.transition.shape[2] == 1 else t
if self.nmissing[t] == 0:
self._kalman_gain[:, :, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[:, :, design_t]),
np.linalg.inv(self.forecasts_error_cov[:, :, t])
)
)
else:
mask = ~self.missing[:, t].astype(bool)
F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
self._kalman_gain[:, mask, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[mask, :, design_t]),
np.linalg.inv(F[:, :, 0])
)
)
return self._kalman_gain
@property
def standardized_forecasts_error(self):
"""
Standardized forecast errors
Notes
-----
The forecast errors produced by the Kalman filter are
.. math::
v_t \sim N(0, F_t)
Hypothesis tests are usually applied to the standardized residuals
.. math::
v_t^s = B_t v_t \sim N(0, I)
where :math:`B_t = L_t^{-1}` and :math:`F_t = L_t L_t'`; then
:math:`F_t^{-1} = (L_t')^{-1} L_t^{-1} = B_t' B_t`; :math:`B_t`
and :math:`L_t` are lower triangular. Finally,
:math:`B_t v_t \sim N(0, B_t F_t B_t')` and
:math:`B_t F_t B_t' = L_t^{-1} L_t L_t' (L_t')^{-1} = I`.
Thus we can rewrite :math:`v_t^s = L_t^{-1} v_t` or
:math:`L_t v_t^s = v_t`; the latter equation is the form required to
use a linear solver to recover :math:`v_t^s`. Since :math:`L_t` is
lower triangular, we can use a triangular solver (?TRTRS).
"""
if self._standardized_forecasts_error is None:
if self.k_endog == 1:
self._standardized_forecasts_error = (
self.forecasts_error /
self.forecasts_error_cov[0, 0, :]**0.5)
else:
from scipy import linalg
self._standardized_forecasts_error = np.zeros(
self.forecasts_error.shape, dtype=self.dtype)
for t in range(self.forecasts_error_cov.shape[2]):
if self.nmissing[t] > 0:
self._standardized_forecasts_error[:, t] = np.nan
if self.nmissing[t] < self.k_endog:
mask = ~self.missing[:, t].astype(bool)
F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
upper, _ = linalg.cho_factor(F[:, :, 0])
self._standardized_forecasts_error[mask, t] = (
linalg.solve_triangular(
upper, self.forecasts_error[mask, t], trans=1))
return self._standardized_forecasts_error
def predict(self, start=None, end=None, dynamic=None, **kwargs):
r"""
In-sample and out-of-sample prediction for state space models generally
Parameters
----------
start : int, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast will be at start.
end : int, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast will be at end.
dynamic : int, optional
Offset relative to `start` at which to begin dynamic prediction.
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
**kwargs
If the prediction range is outside of the sample range, any
of the state space representation matrices that are time-varying
must have updated values provided for the out-of-sample range.
For example, of `obs_intercept` is a time-varying component and
the prediction range extends 10 periods beyond the end of the
sample, a (`k_endog` x 10) matrix must be provided with the new
intercept values.
Returns
-------
results : PredictionResults
A PredictionResults object.
Notes
-----
All prediction is performed by applying the deterministic part of the
measurement equation using the predicted state variables.
Out-of-sample prediction first applies the Kalman filter to missing
data for the number of periods desired to obtain the predicted states.
"""
# Cannot predict if we do not have appropriate arrays
if self.memory_no_forecast or self.memory_no_predicted:
raise ValueError('Predict is not possible if memory conservation'
' has been used to avoid storing forecasts or'
' predicted values.')
# Get the start and the end of the entire prediction range
if start is None:
start = 0
elif start < 0:
raise ValueError('Cannot predict values previous to the sample.')
if end is None:
end = self.nobs
# Prediction and forecasting is performed by iterating the Kalman
# Kalman filter through the entire range [0, end]
# Then, everything is returned corresponding to the range [start, end].
# In order to perform the calculations, the range is separately split
# up into the following categories:
# - static: (in-sample) the Kalman filter is run as usual
# - dynamic: (in-sample) the Kalman filter is run, but on missing data
# - forecast: (out-of-sample) the Kalman filter is run, but on missing
# data
# Short-circuit if end is before start
if end <= start:
raise ValueError('End of prediction must be after start.')
# Get the number of forecasts to make after the end of the sample
nforecast = max(0, end - self.nobs)
# Get the number of dynamic prediction periods
# If `dynamic=True`, then assume that we want to begin dynamic
# prediction at the start of the sample prediction.
if dynamic is True:
dynamic = 0
# If `dynamic=False`, then assume we want no dynamic prediction
if dynamic is False:
dynamic = None
ndynamic = 0
if dynamic is not None:
# Replace the relative dynamic offset with an absolute offset
dynamic = start + dynamic
# Validate the `dynamic` parameter
if dynamic < 0:
raise ValueError('Dynamic prediction cannot begin prior to the'
' first observation in the sample.')
elif dynamic > end:
warn('Dynamic prediction specified to begin after the end of'
' prediction, and so has no effect.', ValueWarning)
dynamic = None
elif dynamic > self.nobs:
warn('Dynamic prediction specified to begin during'
' out-of-sample forecasting period, and so has no'
' effect.', ValueWarning)
dynamic = None
# Get the total size of the desired dynamic forecasting component
# Note: the first `dynamic` periods of prediction are actually
# *not* dynamic, because dynamic prediction begins at observation
# `dynamic`.
if dynamic is not None:
ndynamic = max(0, min(end, self.nobs) - dynamic)
# Get the number of in-sample static predictions
nstatic = min(end, self.nobs) if dynamic is None else dynamic
# Construct the design and observation intercept and covariance
# matrices for start-npadded:end. If not time-varying in the original
# model, then they will be copied over if none are provided in
# `kwargs`. Otherwise additional matrices must be provided in `kwargs`.
representation = {}
for name, shape in self.shapes.items():
if name == 'obs':
continue
representation[name] = getattr(self, name)
# Update the matrices from kwargs for forecasts
warning = ('Model has time-invariant %s matrix, so the %s'
' argument to `predict` has been ignored.')
exception = ('Forecasting for models with time-varying %s matrix'
' requires an updated time-varying matrix for the'
' period to be forecasted.')
if nforecast > 0:
for name, shape in self.shapes.items():
if name == 'obs':
continue
if representation[name].shape[-1] == 1:
if name in kwargs:
warn(warning % (name, name), ValueWarning)
elif name not in kwargs:
raise ValueError(exception % name)
else:
mat = np.asarray(kwargs[name])
if len(shape) == 2:
validate_vector_shape(name, mat.shape,
shape[0], nforecast)
if mat.ndim < 2 or not mat.shape[1] == nforecast:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
else:
validate_matrix_shape(name, mat.shape, shape[0],
shape[1], nforecast)
if mat.ndim < 3 or not mat.shape[2] == nforecast:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
# Update the matrices from kwargs for dynamic prediction in the case
# that `end` is less than `nobs` and `dynamic` is less than `end`. In
# this case, any time-varying matrices in the default `representation`
# will be too long, causing an error to be thrown below in the
# KalmanFilter(...) construction call, because the endog has length
# nstatic + ndynamic + nforecast, whereas the time-varying matrices
# from `representation` have length nobs.
if ndynamic > 0 and end < self.nobs:
for name, shape in self.shapes.items():
if not name == 'obs' and representation[name].shape[-1] > 1:
representation[name] = representation[name][..., :end]
# Construct the predicted state and covariance matrix for each time
# period depending on whether that time period corresponds to
# one-step-ahead prediction, dynamic prediction, or out-of-sample
# forecasting.
# If we only have simple prediction, then we can use the already saved
# Kalman filter output
if ndynamic == 0 and nforecast == 0:
results = self
else:
# Construct the new endogenous array.
endog = np.empty((self.k_endog, ndynamic + nforecast))
endog.fill(np.nan)
endog = np.asfortranarray(np.c_[self.endog[:, :nstatic], endog])
# Setup the new statespace representation
model_kwargs = {
'filter_method': self.filter_method,
'inversion_method': self.inversion_method,
'stability_method': self.stability_method,
'conserve_memory': self.conserve_memory,
'filter_timing': self.filter_timing,
'tolerance': self.tolerance,
'loglikelihood_burn': self.loglikelihood_burn
}
model_kwargs.update(representation)
model = KalmanFilter(
endog, self.k_states, self.k_posdef, **model_kwargs
)
model.initialize_known(
self.initial_state,
self.initial_state_cov
)
model._initialize_filter()
model._initialize_state()
results = self._predict(nstatic, ndynamic, nforecast, model)
return PredictionResults(results, start, end, nstatic, ndynamic,
nforecast)
def _predict(self, nstatic, ndynamic, nforecast, model):
# Note: this doesn't use self, and can either be a static method or
# moved outside the class altogether.
# Get the underlying filter
kfilter = model._kalman_filter
# Save this (which shares memory with the memoryview on which the
# Kalman filter will be operating) so that we can replace actual data
# with predicted data during dynamic forecasting
endog = model._representations[model.prefix]['obs']
for t in range(kfilter.model.nobs):
# Run the Kalman filter for the first `nstatic` periods (for
# which dynamic computation will not be performed)
if t < nstatic:
next(kfilter)
# Perform dynamic prediction
elif t < nstatic + ndynamic:
design_t = 0 if model.design.shape[2] == 1 else t
obs_intercept_t = 0 if model.obs_intercept.shape[1] == 1 else t
# Unconditional value is the intercept (often zeros)
endog[:, t] = model.obs_intercept[:, obs_intercept_t]
# If t > 0, then we can condition the forecast on the state
if t > 0:
# Predict endog[:, t] given `predicted_state` calculated in
# previous iteration (i.e. t-1)
endog[:, t] += np.dot(
model.design[:, :, design_t],
kfilter.predicted_state[:, t]
)
# Advance Kalman filter
next(kfilter)
# Perform any (one-step-ahead) forecasting
else:
next(kfilter)
# Return the predicted state and predicted state covariance matrices
results = FilterResults(model)
results.update_representation(model)
results.update_filter(kfilter)
return results
class PredictionResults(FilterResults):
r"""
Results of in-sample and out-of-sample prediction for state space models
generally
Parameters
----------
results : FilterResults
Output from filtering, corresponding to the prediction desired
start : int
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast will be at start.
end : int
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast will be at end.
nstatic : int
Number of in-sample static predictions (these are always the first
elements of the prediction output).
ndynamic : int
Number of in-sample dynamic predictions (these always follow the static
predictions directly, and are directly followed by the forecasts).
nforecast : int
Number of in-sample forecasts (these always follow the dynamic
predictions directly).
Attributes
----------
npredictions : int
Number of observations in the predicted series; this is not necessarily
the same as the number of observations in the original model from which
prediction was performed.
start : int
Zero-indexed observation number at which to start prediction,
i.e., the first predict will be at `start`; this is relative to the
original model from which prediction was performed.
end : int
Zero-indexed observation number at which to end prediction,
i.e., the last predict will be at `end`; this is relative to the
original model from which prediction was performed.
nstatic : int
Number of in-sample static predictions.
ndynamic : int
Number of in-sample dynamic predictions.
nforecast : int
Number of in-sample forecasts.
endog : array
The observation vector.
design : array
The design matrix, :math:`Z`.
obs_intercept : array
The intercept for the observation equation, :math:`d`.
obs_cov : array
The covariance matrix for the observation equation :math:`H`.
transition : array
The transition matrix, :math:`T`.
state_intercept : array
The intercept for the transition equation, :math:`c`.
selection : array
The selection matrix, :math:`R`.
state_cov : array
The covariance matrix for the state equation :math:`Q`.
filtered_state : array
The filtered state vector at each time period.
filtered_state_cov : array
The filtered state covariance matrix at each time period.
predicted_state : array
The predicted state vector at each time period.
predicted_state_cov : array
The predicted state covariance matrix at each time period.
forecasts : array
The one-step-ahead forecasts of observations at each time period.
forecasts_error : array
The forecast errors at each time period.
forecasts_error_cov : array
The forecast error covariance matrices at each time period.
Notes
-----
The provided ranges must be conformable, meaning that it must be that
`end - start == nstatic + ndynamic + nforecast`.
This class is essentially a view to the FilterResults object, but
returning the appropriate ranges for everything.
"""
representation_attributes = [
'endog', 'design', 'design', 'obs_intercept',
'obs_cov', 'transition', 'state_intercept', 'selection',
'state_cov'
]
filter_attributes = [
'filtered_state', 'filtered_state_cov',
'predicted_state', 'predicted_state_cov',
'forecasts', 'forecasts_error', 'forecasts_error_cov'
]
def __init__(self, results, start, end, nstatic, ndynamic, nforecast):
# Save the filter results object
self.results = results
# Save prediction ranges
self.npredictions = start - end
self.start = start
self.end = end
self.nstatic = nstatic
self.ndynamic = ndynamic
self.nforecast = nforecast
def __getattr__(self, attr):
"""
Provide access to the representation and filtered output in the
appropriate range (`start` - `end`).
"""
# Prevent infinite recursive lookups
if attr[0] == '_':
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
_attr = '_' + attr
# Cache the attribute
if not hasattr(self, _attr):
if attr == 'endog' or attr in self.filter_attributes:
# Get a copy
value = getattr(self.results, attr).copy()
# Subset to the correct time frame
value = value[..., self.start:self.end]
elif attr in self.representation_attributes:
value = getattr(self.results, attr).copy()
# If a time-invariant matrix, return it. Otherwise, subset to
# the correct period.
if value.shape[-1] == 1:
value = value[..., 0]
else:
value = value[..., self.start:self.end]
else:
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
setattr(self, _attr, value)
return getattr(self, _attr)
| bsd-3-clause |
rjw57/cubbie | migrations/versions/316bb58e84f_add_user_identities.py | 1 | 1110 | """add user_identities
Revision ID: 316bb58e84f
Revises: 38c8ec357e0
Create Date: 2015-03-11 01:40:12.157458
"""
# revision identifiers, used by Alembic.
revision = '316bb58e84f'
down_revision = '38c8ec357e0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user_identities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('provider', sa.Text(), nullable=False),
sa.Column('provider_user_id', sa.Text(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_user_identities_provider_provider_id', 'user_identities', ['provider', 'provider_user_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('idx_user_identities_provider_provider_id', table_name='user_identities')
op.drop_table('user_identities')
### end Alembic commands ###
| mit |
njl/pycon | symposion/schedule/tests/factories.py | 3 | 1750 | import datetime
import factory
import factory.fuzzy
from pycon.tests.factories import PyConTutorialProposalFactory
from symposion.conference.models import Section
from symposion.conference.tests.factories import SectionFactory
from symposion.speakers.tests.factories import SpeakerFactory
from ..models import Presentation, Slot, SlotKind, Day, Schedule
class ScheduleFactory(factory.DjangoModelFactory):
class Meta:
model = Schedule
section = factory.SubFactory(SectionFactory)
class DayFactory(factory.DjangoModelFactory):
class Meta:
model = Day
schedule = factory.SubFactory(ScheduleFactory)
date = factory.fuzzy.FuzzyDate(start_date=datetime.date(1900, 1, 1))
class SlotKindFactory(factory.DjangoModelFactory):
class Meta:
model = SlotKind
schedule = factory.SubFactory(ScheduleFactory)
label = factory.fuzzy.FuzzyText()
class SlotFactory(factory.DjangoModelFactory):
class Meta:
model = Slot
day = factory.SubFactory(DayFactory)
# .kind and .day both need to point at the same schedule
kind = factory.SubFactory(
SlotKindFactory,
schedule=factory.LazyAttribute(lambda kind: kind.factory_parent.day.schedule)
)
start = factory.LazyAttribute(lambda n: datetime.time())
end = factory.LazyAttribute(lambda n: datetime.time())
class PresentationFactory(factory.DjangoModelFactory):
class Meta:
model = Presentation
title = 'Presentation'
description = 'Description'
abstract = 'Abstract'
speaker = factory.SubFactory(SpeakerFactory)
proposal_base = factory.SubFactory(PyConTutorialProposalFactory)
section = Section.objects.get(slug='tutorials')
slot = factory.SubFactory(SlotFactory)
| bsd-3-clause |
cwgreene/Nanostructure-Simulator | utils/plot_trajectories.py | 1 | 1140 | import os
import sys
import re
import pylab
def parse_trajectory_line(line):
trajectory = []
for x,y in re.findall("\(([0-9.]+), ([0-9.]+)\)",line):
trajectory.append((float(x),float(y)))
return trajectory
def generate_trajectories(file):
#get rid fo two first lines
file.readline()
file.readline()
#parse each line
for line in file:
yield parse_trajectory_line(line)
def open_trajectory_file(n):
for filename in os.listdir("results"):
if re.match(str(n)+"traj",filename):
return open("results/"+filename)
raise "File not found"
def display_trajectories(n):
input =""
file = open_trajectory_file(n)
trajectory_gen = generate_trajectories(file)
trajectory = trajectory_gen.next()
interactive = True
i = 0
while input != 'q':
first = map(lambda x: x[0],trajectory)
second = map(lambda x: x[1],trajectory)
pylab.plot(first,second)
if interactive:
input = raw_input()
if input == "go":
i += 1
interactive=False
if i %100 == 0:
print i
raw_input()
try:
trajectory=trajectory_gen.next()
except:
print "Done"
break
if __name__=="__main__":
display_trajectories(sys.argv[1])
| mit |
guymakam/Kodi-Israel | plugin.video.israelive/resources/lib/livestreamer/requests/packages/chardet/big5prober.py | 2931 | 1684 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| gpl-2.0 |
danieljaouen/ansible | lib/ansible/plugins/inventory/auto.py | 25 | 2196 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: auto
plugin_type: inventory
author:
- Matt Davis <@nitzmahone>
short_description: Loads and executes an inventory plugin specified in a YAML config
description:
- By whitelisting C(auto) as the final inventory plugin, any YAML inventory config file with a
C(plugin) key at its root will automatically cause the named plugin to be loaded and executed with that
config. This effectively provides automatic whitelisting of all installed/accessible inventory plugins.
- To disable this behavior, remove C(auto) from the C(INVENTORY_ENABLED) config element.
'''
EXAMPLES = '''
# This plugin is not intended for direct use; it is a fallback mechanism for automatic whitelisting of
# all installed inventory plugins.
'''
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin
from ansible.plugins.loader import inventory_loader
class InventoryModule(BaseInventoryPlugin):
NAME = 'auto'
def verify_file(self, path):
if not path.endswith('.yml') and not path.endswith('.yaml'):
return False
return super(InventoryModule, self).verify_file(path)
def parse(self, inventory, loader, path, cache=True):
config_data = loader.load_from_file(path, cache=False)
plugin_name = config_data.get('plugin')
if not plugin_name:
raise AnsibleParserError("no root 'plugin' key found, '{0}' is not a valid YAML inventory plugin config file".format(path))
plugin = inventory_loader.get(plugin_name)
if not plugin:
raise AnsibleParserError("inventory config '{0}' specifies unknown plugin '{1}'".format(path, plugin_name))
if not plugin.verify_file(path):
raise AnsibleParserError("inventory config '{0}' could not be verified by plugin '{1}'".format(path, plugin_name))
plugin.parse(inventory, loader, path, cache=cache)
| gpl-3.0 |
jonathansick/androcmd | scripts/phat_baseline_test.py | 1 | 3612 | #!/usr/bin/env python
# encoding: utf-8
"""
Grid computation of dust attenuation for old vs. young stellar populations.
2015-05-12 - Created by Jonathan Sick
"""
import argparse
from androcmd.phatpipeline import PhatCatalog
from androcmd.baselineexp import SolarZPipeline, ThreeZPipeline
def main():
args = parse_args()
if args.pipeline == 'solarz':
# Use the single-Z solar pipeline
Pipeline = SolarZPipeline
elif args.pipeline == 'threez':
# Use the three-metallicity track pipeline
Pipeline = ThreeZPipeline
isoc = dict(isoc_kind='parsec_CAF09_v1.2S',
photsys_version='yang')
pipeline = Pipeline(brick=23,
root_dir=args.model_name,
isoc_args=isoc)
if args.fit is not None:
dataset = PhatCatalog(args.brick)
pipeline.fit(args.fit, [args.fit], dataset)
if args.plot_hess is not None:
from androcmd.baselineexp import plot_fit_hess_grid
dataset = PhatCatalog(args.brick)
plot_fit_hess_grid(args.plot_hess, pipeline, dataset)
if args.plot_diff is not None:
from androcmd.baselineexp import plot_diff_hess_grid
dataset = PhatCatalog(args.brick)
plot_diff_hess_grid(args.plot_diff, pipeline, dataset)
if args.plot_sfh is not None:
from androcmd.baselineexp import sfh_comparison_plot
dataset = PhatCatalog(args.brick)
sfh_comparison_plot(args.plot_sfh, pipeline, dataset)
if args.plot_zsfh is not None:
from androcmd.baselineexp import plot_sfh_metallicity_trends
dataset = PhatCatalog(args.brick)
for fit_key in args.plot_zsfh:
plot_path = "{model}_b{brick:d}_zsfh_{key}".format(
model=args.model_name, brick=args.brick, key=fit_key)
plot_sfh_metallicity_trends(plot_path, pipeline, dataset, fit_key)
if args.chi_table is not None:
from androcmd.baselineexp import tabulate_fit_chi
dataset = PhatCatalog(args.brick)
tabulate_fit_chi(args.chi_table, pipeline, dataset)
if args.plot_isoc is not None:
from androcmd.baselineexp import plot_isocs, plot_isocs_lewis
dataset = PhatCatalog(args.brick)
plot_isocs(args.plot_isoc, pipeline, dataset)
plot_isocs_lewis(args.plot_isoc + '_lewis', pipeline, dataset)
if args.plot_lock is not None:
from androcmd.baselineexp import plot_lockfile
plot_lockfile(args.plot_lock, pipeline)
def parse_args():
parser = argparse.ArgumentParser(
description="Model a brick with differential old/young dust.")
parser.add_argument('model_name')
parser.add_argument('brick', type=int)
parser.add_argument('--fit',
choices=['lewis', 'acs_rgb', 'acs_all',
'oir_all', 'ir_rgb'],
default=None)
parser.add_argument('--pipeline',
choices=['solarz', 'threez'],
default='solarz')
parser.add_argument('--plot-hess', default=None)
parser.add_argument('--plot-diff', default=None)
parser.add_argument('--plot-sfh', default=None)
parser.add_argument('--chi-table', default=None)
parser.add_argument('--plot-zsfh', nargs='*', default=None,
choices=['lewis', 'acs_rgb', 'acs_all',
'oir_all', 'ir_rgb'])
parser.add_argument('--plot-isoc', default=None)
parser.add_argument('--plot-lock', default=None)
return parser.parse_args()
if __name__ == '__main__':
main()
| mit |
vlinhd11/vlinhd11-android-scripting | python/gdata/tests/gdata_tests/codesearch_test.py | 133 | 1930 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeffrey Scudder)'
import unittest
import gdata.codesearch
import gdata.test_data
class CodeSearchDataTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.codesearch.CodesearchFeedFromString(
gdata.test_data.CODE_SEARCH_FEED)
def testCorrectXmlConversion(self):
self.assert_(self.feed.id.text ==
'http://www.google.com/codesearch/feeds/search?q=malloc')
self.assert_(len(self.feed.entry) == 10)
for entry in self.feed.entry:
if entry.id.text == ('http://www.google.com/codesearch?hl=en&q=+ma'
'lloc+show:LDjwp-Iqc7U:84hEYaYsZk8:xDGReDhvNi0&sa=N&ct=rx&cd=1'
'&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoco'
'nf-2.60/autoconf.html-002&cs_p=http://www.gnu.org&cs_f=softwa'
're/autoconf/manual/autoconf-2.60/autoconf.html-002#first'):
self.assert_(len(entry.match) == 4)
for match in entry.match:
if match.line_number == '4':
self.assert_(match.type == 'text/html')
self.assert_(entry.file.name ==
'software/autoconf/manual/autoconf-2.60/autoconf.html-002')
self.assert_(entry.package.name == 'http://www.gnu.org')
self.assert_(entry.package.uri == 'http://www.gnu.org')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/scipy/stats/_stats_mstats_common.py | 12 | 8157 | from collections import namedtuple
import numpy as np
from . import distributions
__all__ = ['_find_repeats', 'linregress', 'theilslopes']
def linregress(x, y=None):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
rvalue : float
correlation coefficient
pvalue : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero.
stderr : float
Standard error of the estimated gradient.
See also
--------
optimize.curve_fit : Use non-linear least squares to fit a function to data.
optimize.leastsq : Minimize the sum of squares of a set of equations.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
('r-squared:', 0.080402268539028335)
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
return LinregressResult(slope, intercept, r, prob, sterrest)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
| mit |
lhellebr/spacewalk | backend/server/rhnLib.py | 1 | 8211 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import hashlib
import string
import base64
import posixpath
from spacewalk.common.rhnLib import parseRPMName
from spacewalk.common.rhnLog import log_debug
from spacewalk.common.rhnException import rhnFault
# architecture work
from rhnMapping import check_package_arch
def computeSignature(*fields):
# Init the hash
m = hashlib.new('sha256')
for i in fields:
# use str(i) since some of the fields may be non-string
m.update(str(i))
return base64.encodestring(m.digest()).rstrip()
# 'n_n-n-v.v.v-r_r.r:e.ARCH.rpm' ---> [n,v,r,e,a]
def parseRPMFilename(pkgFilename):
"""
IN: Package Name: xxx-yyy-ver.ver.ver-rel.rel_rel:e.ARCH.rpm (string)
Understood rules:
o Name can have nearly any char, but end in a - (well seperated by).
Any character; may include - as well.
o Version cannot have a -, but ends in one.
o Release should be an actual number, and can't have any -'s.
o Release can include the Epoch, e.g.: 2:4 (4 is the epoch)
o Epoch: Can include anything except a - and the : seperator???
XXX: Is epoch info above correct?
OUT: [n,e,v,r, arch].
"""
if type(pkgFilename) != type(''):
raise rhnFault(21, str(pkgFilename)) # Invalid arg.
pkgFilename = os.path.basename(pkgFilename)
# Check that this is a package NAME (with arch.rpm) and strip
# that crap off.
pkg = string.split(pkgFilename, '.')
# 'rpm' at end?
if string.lower(pkg[-1]) not in ['rpm', 'deb']:
raise rhnFault(21, 'neither an rpm nor a deb package name: %s' % pkgFilename)
# Valid architecture next?
if check_package_arch(pkg[-2]) is None:
raise rhnFault(21, 'Incompatible architecture found: %s' % pkg[-2])
_arch = pkg[-2]
# Nuke that arch.rpm.
pkg = string.join(pkg[:-2], '.')
ret = list(parseRPMName(pkg))
if ret:
ret.append(_arch)
return ret
# XXX TBD where to place this function - it has to be accessible from several
# places
def normalize_server_arch(arch):
log_debug(4, 'server arch', arch)
if arch is None:
return ''
arch = str(arch)
if '-' in arch:
# Already normalized
return arch
# Fix the arch if need be
suffix = '-redhat-linux'
arch = arch + suffix
return arch
class InvalidAction(Exception):
""" An error class to signal when we can not handle an action """
pass
class EmptyAction(Exception):
""" An error class that signals that we encountered an internal error
trying to handle an action through no fault of the client
"""
pass
class ShadowAction(Exception):
""" An error class for actions that should not get to the client """
pass
def transpose_to_hash(arr, column_names):
""" Handy function to transpose an array from row-based to column-based,
with named columns.
"""
result = []
for c in column_names:
result.append([])
colnum = len(column_names)
for r in arr:
if len(r) != colnum:
raise Exception(
"Mismatching number of columns: expected %s, got %s; %s" % (
colnum, len(r), r))
for i in range(len(r)):
result[i].append(r[i])
# Now build the hash labeled with the column names
rh = {}
for i in range(len(column_names)):
rh[column_names[i]] = result[i]
return rh
def get_package_path(nevra, org_id, source=0, prepend="", omit_epoch=None,
package_type='rpm', checksum_type=None, checksum=None):
""" Computes a package path, optionally prepending a prefix
The path will look like
<prefix>/<org_id>/checksum[:3]/n/e:v-r/a/checksum/n-v-r.a.rpm if not omit_epoch
<prefix>/<org_id>/checksum[:3]/n/v-r/a/checksum/n-v-r.a.rpm if omit_epoch
"""
name, epoch, version, release, pkgarch = nevra
# dirarch and pkgarch are special-cased for source rpms
if source:
dirarch = 'SRPMS'
else:
dirarch = pkgarch
if org_id in ['', None]:
org = "NULL"
else:
org = org_id
if not omit_epoch and epoch not in [None, '']:
version = str(epoch) + ':' + version
# normpath sanitizes the path (removing duplicated / and such)
template = os.path.normpath(prepend +
"/%s/%s/%s/%s-%s/%s/%s/%s-%s-%s.%s.%s")
return template % (org, checksum[:3], name, version, release, dirarch, checksum,
name, nevra[2], release, pkgarch, package_type)
# bug #161989
# It seems that our software was written specifically for rpms in far too many
# ways. Here's a little bit of a hack function that will return the package path
# (as in from get_package_path) but without the filename appended.
# This enables us to append an arbitrary file name that is not restricted to the
# form: name-version-release.arch.type
def get_package_path_without_package_name(nevra, org_id, prepend="",
checksum_type=None, checksum=None):
"""return a package path without the package name appended"""
return os.path.dirname(get_package_path(nevra, org_id, prepend=prepend,
checksum_type=checksum_type, checksum=checksum))
class CallableObj:
""" Generic callable object """
def __init__(self, name, func):
self.func = func
self.name = name
def __call__(self, *args, **kwargs):
return self.func(self.name, *args, **kwargs)
def make_evr(nvre, source=False):
""" IN: 'e:name-version-release' or 'name-version-release:e'
OUT: {'name':name, 'version':version, 'release':release, 'epoch':epoch }
"""
if ":" in nvre:
nvr, epoch = nvre.rsplit(":", 1)
if "-" in epoch:
nvr, epoch = epoch, nvr
else:
nvr, epoch = nvre, ""
nvr_parts = nvr.rsplit("-", 2)
if len(nvr_parts) != 3:
raise rhnFault(err_code=21, err_text="NVRE is missing name, version, or release.")
result = dict(zip(["name", "version", "release"], nvr_parts))
result["epoch"] = epoch
if source and result["release"].endswith(".src"):
result["release"] = result["release"][:-4]
return result
def _is_secure_path(path):
path = posixpath.normpath(path)
return not (path.startswith('/') or path.startswith('../'))
def get_crash_path(org_id, system_id, crash):
"""For a given org_id, system_id and crash, return relative path to a crash directory."""
path = os.path.join('systems', org_id, system_id, 'crashes', crash)
if _is_secure_path(path):
return path
else:
return None
def get_crashfile_path(org_id, system_id, crash, filename):
"""For a given org_id, system_id, crash and filename, return relative path to a crash file."""
path = os.path.join(get_crash_path(org_id, system_id, crash), filename)
if _is_secure_path(path):
return path
else:
return None
def get_action_path(org_id, system_id, action_id):
"""For a given org_id, system_id, and action_id, return relative path to a store directory."""
path = os.path.join('systems', str(org_id), str(system_id), 'actions', str(action_id))
if _is_secure_path(path):
return path
def get_actionfile_path(org_id, system_id, action_id, filename):
"""For a given org_id, system_id, action_id, and file, return relative path to a file."""
path = os.path.join(get_action_path(org_id, system_id, action_id), str(filename))
if _is_secure_path(path):
return path
| gpl-2.0 |
h4ck3rm1k3/MapNickAutotools | scons/scons-local-1.2.0/SCons/Tool/suncc.py | 12 | 1857 | """SCons.Tool.suncc
Tool-specific initialization for Sun Solaris (Forte) CC and cc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/suncc.py 3842 2008/12/20 22:59:52 scons"
import SCons.Util
import cc
def generate(env):
"""
Add Builders and construction variables for Forte C and C++ compilers
to an Environment.
"""
cc.generate(env)
env['CXX'] = 'CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
return env.Detect('CC')
| lgpl-2.1 |
SabunMacTavish/CTF-Platform | api/autogenerators/rtfm.py | 2 | 1943 | __author__ = "Collin Petty"
__copyright__ = "Carnegie Mellon University"
__license__ = "MIT"
__maintainer__ = ["Collin Petty", "Peter Chapman"]
__credits__ = ["David Brumely", "Collin Petty", "Peter Chapman"]
__email__ = ["[email protected]", "[email protected]"]
__status__ = "Production"
import tempfile
import os
import random
import string
template_file = "rtfm.txt"
templates = "autogenerators/templates/"
def validate_dependencies():
print "DEPENDENCY CHECK - rtfm.py (autogen)"
if not os.path.exists(_template_path()):
print "ERROR - Read the Manual - Could not find the template file (%s)" % template_file
return False
return True
def generate():
template = open(_template_path(), 'r').read()
key = ''.join(random.choice(string.ascii_lowercase) for _ in range(12))
template = template.replace('###KEY###', key)
shift = random.randint(1, 26)
out_text = _caesar(template, shift)
output = tempfile.NamedTemporaryFile(delete=False, suffix=".txt")
output.write(out_text)
output.close()
return [os.path.abspath(output.name)], key, """<p>On the back of the broken panel you see a recovery\
<a href='###file_1_url###' target='_blank'>manual</a>. You need to find the emergency repair key in\
order to put the robot into <code>autoboot</code> mode, but it appears to be ciphered using a Caesar cipher.</p>"""
def _template_path():
return templates + template_file
def _caesar(text, shift):
ret = list()
for t in text:
t = ord(t)
if t in range(ord('a'), ord('z')+1):
ret.append(((t - ord('a') + shift) % 26) + ord('a'))
elif t in range(ord('A'), ord('Z')+1):
ret.append(((t - ord('A') + shift) % 26) + ord('A'))
elif t in range(ord('0'), ord('9')+1):
ret.append(((t - ord('0') + shift) % 10) + ord('0'))
else:
ret.append(t)
return string.joinfields(map(chr, ret), "") | mit |
esthermm/odoomrp-wip | stock_quant_valuation/models/stock_quant.py | 8 | 1040 | # -*- coding: utf-8 -*-
# (c) 2016 Ainara Galdona - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import fields, api, models
from openerp.addons import decimal_precision as dp
class StockQuant(models.Model):
_inherit = 'stock.quant'
@api.multi
@api.depends("product_id", "product_id.manual_standard_cost", "qty")
def _compute_manual_value(self):
for record in self:
record.manual_value = (record.product_id.manual_standard_cost *
record.qty)
@api.multi
@api.depends('cost', 'qty')
def _compute_real_value(self):
for record in self:
record.real_value = record.cost * record.qty
manual_value = fields.Float(
string="Manual Value", store=True, compute="_compute_manual_value",
digits=dp.get_precision('Product Price'))
real_value = fields.Float(
string="Real Value", store=True, compute="_compute_real_value",
digits=dp.get_precision('Product Price'))
| agpl-3.0 |
jy723/ardupilot | mk/PX4/Tools/genmsg/src/genmsg/names.py | 215 | 5223 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
PRN_SEPARATOR = '/'
import re
def normalize_package_context(package_context):
package_context = package_context.strip()
while package_context.endswith(PRN_SEPARATOR):
package_context = package_context[:-1]
return package_context
#######################################################################
# RESOURCE NAMES
# resource names refer to entities in a file system
def resource_name(res_pkg_name, name, my_pkg=None):
"""
Convert package name + resource into a fully qualified resource name
@param res_pkg_name: name of package resource is located in
@type res_pkg_name: str
@param name: resource base name
@type name: str
@param my_pkg: name of package resource is being referred to
in. If specified, name will be returned in local form if
res_pkg_name is my_pkg
@type my_pkg: str
@return: name for resource
@rtype: str
"""
if res_pkg_name != my_pkg:
return res_pkg_name+PRN_SEPARATOR+name
return name
def resource_name_base(name):
"""
pkg/typeName -> typeName, typeName -> typeName
Convert fully qualified resource name into the package-less resource name
@param name: package resource name, e.g. 'std_msgs/String'
@type name: str
@return: resource name sans package-name scope
@rtype: str
"""
return name[name.rfind(PRN_SEPARATOR)+1:]
def resource_name_package(name):
"""
pkg/typeName -> pkg, typeName -> None
@param name: package resource name, e.g. 'std_msgs/String'
@type name: str
@return: package name of resource
@rtype: str
"""
if not PRN_SEPARATOR in name:
return None
return name[:name.find(PRN_SEPARATOR)]
def package_resource_name(name):
"""
Split a name into its package and resource name parts, e.g. 'std_msgs/String -> std_msgs, String'
@param name: package resource name, e.g. 'std_msgs/String'
@type name: str
@return: package name, resource name
@rtype: str
@raise ValueError: if name is invalid
"""
if PRN_SEPARATOR in name:
val = tuple(name.split(PRN_SEPARATOR))
if len(val) != 2:
raise ValueError("invalid name [%s]"%name)
else:
return val
else:
return '', name
################################################################################
# NAME VALIDATORS
#ascii char followed by (alphanumeric, _, /)
RESOURCE_NAME_LEGAL_CHARS_P = re.compile('^[A-Za-z][\w_\/]*$')
def is_legal_resource_name(name):
"""
Check if name is a legal ROS name for filesystem resources
(alphabetical character followed by alphanumeric, underscore, or
forward slashes). This constraint is currently not being enforced,
but may start getting enforced in later versions of ROS.
@param name: Name
@type name: str
"""
# resource names can be unicode due to filesystem
if name is None:
return False
m = RESOURCE_NAME_LEGAL_CHARS_P.match(name)
# '//' check makes sure there isn't double-slashes
return m is not None and m.group(0) == name and not '//' in name
BASE_RESOURCE_NAME_LEGAL_CHARS_P = re.compile('^[A-Za-z][\w_]*$') #ascii char followed by (alphanumeric, _)
def is_legal_resource_base_name(name):
"""
Validates that name is a legal resource base name. A base name has
no package context, e.g. "String".
"""
# resource names can be unicode due to filesystem
if name is None:
return False
m = BASE_RESOURCE_NAME_LEGAL_CHARS_P.match(name)
return m is not None and m.group(0) == name
| gpl-3.0 |
INM-6/nest-git-migration | topology/examples/conncomp.py | 13 | 4213 | # -*- coding: utf-8 -*-
#
# conncomp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module Example
Create two 30x30 layers with nodes composed of one pyramidal cell
and one interneuron. Connect with two projections, one pyr->pyr, one
pyr->in, and visualize.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
'''
import pylab
pylab.ion()
import nest
import nest.topology as topo
nest.ResetKernel()
nest.set_verbosity('M_WARNING')
# create two test layers
nest.CopyModel('iaf_neuron', 'pyr')
nest.CopyModel('iaf_neuron', 'in')
a = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': ['pyr', 'in']})
b = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': ['pyr', 'in']})
topo.ConnectLayers(a, b, {'connection_type': 'divergent',
'sources': {'model': 'pyr'},
'targets': {'model': 'pyr'},
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.5,
'weights': 1.0,
'delays': 1.0})
topo.ConnectLayers(a, b, {'connection_type': 'divergent',
'sources': {'model': 'pyr'},
'targets': {'model': 'in'},
'mask': {'circular': {'radius': 1.0}},
'kernel': 0.2,
'weights': 1.0,
'delays': 1.0})
pylab.clf()
# plot targets of neurons in different grid locations
for ctr in [[15,15]]:
# obtain node id for center: pick first node of composite
ctr_id = topo.GetElement(a, ctr)
# get all projection targets of center neuron
tgts = [ci[1] for ci in nest.GetConnections(ctr_id)]
# get positions of targets
tpyr = pylab.array(tuple(zip(*[topo.GetPosition([n])[0] for n in tgts
if nest.GetStatus([n],'model')[0]=='pyr'])))
tin = pylab.array(tuple(zip(*[topo.GetPosition([n])[0] for n in tgts
if nest.GetStatus([n],'model')[0]=='in'])))
# scatter-plot
pylab.scatter(tpyr[0]-0.02, tpyr[1]-0.02, 20, 'b', zorder = 10)
pylab.scatter(tin[0] +0.02, tin[1] +0.02, 20, 'r', zorder = 10)
# mark locations with background grey circle
pylab.plot(tpyr[0],tpyr[1],'o',markerfacecolor=(0.7,0.7,0.7),
markersize=10,markeredgewidth=0,zorder=1,label='_nolegend_')
pylab.plot(tin[0], tin[1] ,'o',markerfacecolor=(0.7,0.7,0.7),
markersize=10,markeredgewidth=0,zorder=1,label='_nolegend_')
# mark sender position with transparent red circle
ctrpos = topo.GetPosition(ctr_id)[0]
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=0.15, zorder = 99,
fc = 'r', alpha = 0.4, ec = 'none'))
# mark mask positions with open red/blue circles
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=0.5, zorder = 2,
fc = 'none', ec = 'b', lw=3))
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=1.0, zorder = 2,
fc = 'none', ec = 'r', lw=3))
# mark layer edge
pylab.gca().add_patch(pylab.Rectangle((-1.5,-1.5), 3.0, 3.0, zorder = 1,
fc = 'none', ec = 'k', lw=3))
# beautify
pylab.axes().set_xticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.axes().set_yticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.grid(True)
pylab.axis([-1.6, 1.6, -1.6, 1.6])
pylab.axes().set_aspect('equal', 'box')
| gpl-2.0 |
geomagpy/magpy | magpy/lib/format_dtu.py | 3 | 5567 | """
MagPy
Auxiliary input filter - WIC/WIK
Written by Roman Leonhardt June 2012
- contains test and read function, toDo: write function
"""
from __future__ import print_function
from magpy.stream import *
def isDTU1(filename):
"""
Checks whether a file is ASCII DTU (type1) format used within the DTU's FGE network
Characteristic features are:
"""
try:
temp = open(filename, 'rt').readline()
except:
return False
try:
if not temp.startswith('FILENAME: '):
elem = temp.split()
if len(elem) == 6:
try:
testtime = datetime.strptime(elem[0],"%H:%M:%S")
except:
return False
else:
return False
except:
return False
return True
def readDTU1(filename, headonly=False, **kwargs):
"""
Reading DTU1 format data.
Looks like:
FILENAME: GDH4_20091215.sec
INST. TYPE: Primary magnetometer
INSTRUMENT: FGE S0120 E0192
FILTER: Electronic lowpass
ADC: ICP 7017 vers. B2.3
SOFTWARE: FG_ComData vers. 3.04
CHANNELS: 6 Time,x,y,z,T1,T2
TIME 1 hh:mm:ss PC clock, UT, timeserver
x 400 nT/V variation horizontal magnetic north in nT
y 400 nT/V variation horizontal magnetic east in nT
z 400 nT/V variation vertical in nT
T1 0 Kelvin/v no temp sensor on pendulum
T2 320 Kelvin/V electronic temp in Kelvin, sensor: AD592
DATA:
00:00:01 124.04 134.08 -17.68 0.00 291.90
00:00:02 124.00 134.00 -17.68 0.00 291.90
00:00:03 124.08 134.00 -17.64 0.00 291.90
"""
fh = open(filename, 'rt')
# read file and split text into channels
data = []
getfile = True
key = None
stream = DataStream()
# Check whether header infromation is already present
headers = {}
# get day from filename (platform independent)
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
splitpath = os.path.split(filename)
daystring = splitpath[1].split('.')
daystring = daystring[0].split('_')
print(daystring[1])
try:
day = datetime.strftime(datetime.strptime(daystring[1] , "%Y%m%d"),"%Y-%m-%d")
except:
logging.warning("Wrong dateformat in Filename %s" % daystring[0])
return []
# Select only files within eventually defined time range
if starttime:
if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if endtime:
if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if getfile:
for line in fh:
elem = line.split()
if line.isspace():
# blank line
pass
elif line.startswith('FILENAME:'):
pass
elif line.startswith('INST. TYPE:'):
tmp = line.split(':')[1]
headers['InstrumentType'] = tmp.lstrip()
elif line.startswith('INSTRUMENT:'):
tmp = line.split(':')[1]
headers['Instrument'] = tmp.lstrip()
elif line.startswith('FILTER:'):
tmp = line.split(':')[1]
headers['Filter'] = tmp.lstrip()
elif line.startswith('ADC:'):
tmp = line.split(':')[1]
headers['ADC'] = tmp.lstrip()
elif line.startswith('SOFTWARE:'):
tmp = line.split(':')[1]
headers['Software'] = tmp.lstrip()
elif line.startswith('CHANNELS:'):
tmp = line.split(':')[1]
headers['Channels'] = tmp.lstrip()
elif line.startswith('TIME'):
pass
elif line.startswith('x'):
pass
elif line.startswith('y'):
pass
elif line.startswith('z'):
pass
elif line.startswith('T1'):
pass
elif line.startswith('T2'):
pass
elif line.startswith('DATA:'):
pass
elif headonly:
# skip data for option headonly
continue
else:
row = LineStruct()
try:
row.time=date2num(datetime.strptime(day+'T'+elem[0],"%Y-%m-%dT%H:%M:%S"))
try:
row.x = float(elem[1])
except:
row.x = float('nan')
try:
row.y = float(elem[2])
except:
row.y = float('nan')
try:
row.z = float(elem[3])
except:
row.z = float('nan')
try:
row.t1 = float(elem[4])
except:
row.t1 = float('nan')
try:
row.t2 = float(elem[5])
except:
row.t2 = float('nan')
except:
#raise ValueError, "Wrong date format in %s" % filename
pass
stream.add(row)
fh.close()
else:
headers = stream.header
stream =[]
return DataStream(stream, headers)
| bsd-3-clause |
m-r-hunt/invaders | enemies.py | 1 | 6646 | # Invaders
# Copyright (C) 2013 Maximilian Hunt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os, random, pygame, projectiles, score_counter
class EnemySprite(pygame.sprite.Sprite):
# Class for one enemy invader.
def __init__(self, image, position, bullet_group):
# image: relative path to an image pygame can load
# position: (x, y) coordinates on screen
# bullet_group: pygame.sprite.Group to put fired bullets in
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image)
self.position = position
self.rect = self.image.get_rect()
self.rect.center = position
self.bullet_group = bullet_group
def update(self, dv, score, collisions):
# Update this enemy. Should be called once per frame.
# dv: (x, y) vector for movement this frame
# score: a Score to increment on death
# collisions: a dictionary of collisions, possibly containing this object
# Handle any collisions given
if self in collisions:
death = False
for bullet in collisions[self]:
if (bullet.origin != self):
bullet.kill()
death = True
if (death == True):
score.increment()
self.kill()
# Update position
self.position = (self.position[0] + dv[0], self.position[1] + dv[1])
self.rect.center = self.position
def y(self):
# Return height (y coordinate).
return self.position[1]
def fire(self):
# (Possibly) fire a bullet down.
if (random.randrange(100) < 2):
bounds = (0-100, 800+100, 0-100, 600+100)
bullet = projectiles.Bullet(os.path.join("Resources", "Enemy Bullet.png"), self.position, (0, 5), bounds, self)
self.bullet_group.add(bullet)
class EnemyColumn(pygame.sprite.Group):
# Class for one column in a formation of enemies.
# Exists so we can easily fire only the lowest enemy in each column
# Remembers its own x coordinate, everything else happens inside the actual enemies
def __init__(self, x_position):
# x_position: integer x coordinate
pygame.sprite.Group.__init__(self)
self.x_position = x_position
def update(self, dv, score, collisions):
# Update this column. Should be called once per frame.
# dv: (x, y) vector for movement this frame
# score: a Score to pass to contained EnemySprites
# collisions: a dictionary of collisions to pass to contained EnemySprites
# Return (x, y), x of this column and y of lowest contained Sprite.
self.x_position += dv[0]
# Update contained sprites
for i in self.sprites():
i.update(dv, score, collisions)
# Compute biggest y, ask that EnemySprite to fire.
max_y = 0
if (len(self) != 0):
for i in self.sprites():
if (i.y() > max_y):
max_y = i.y()
bottom_enemy = i
bottom_enemy.fire()
return self.x_position, max_y
class EnemyFormation(pygame.sprite.Group):
# Class for a whole formation of enemies.
# Contains both EnemyColumns and EnemySprites
# Magic numbers: Base speed stepped horizontally or vertically each frame.
H_STEP = 2
V_STEP = 10
def __init__(self, topleft, layout, bounds, bullet_group):
pygame.sprite.Group.__init__(self)
self.columns = []
columns, rows = layout
# Generate all the enemies and columns.
for i in range(0, columns):
column_x = topleft[0] + i*64
enemy_column = EnemyColumn(topleft[0] + i*64)
for j in range(0, rows):
new_enemy = EnemySprite(os.path.join("resources", "Enemy.png"), (column_x, topleft[1] + j*64), bullet_group)
enemy_column.add(new_enemy)
self.add(new_enemy)
self.columns.append(enemy_column)
# Direction: +1 for right, -1 for left (i.e. +-ve x direction)
self.current_direction = +1
self.left_bound, self.right_bound, self.bottom_bound = bounds
self.total = columns * rows
def update(self, score, collisions):
# Update this formation. Should be called once per frame.
# score: a Score to pass to contained EnemyColumns
# collisions: a dictionary of collisions to pass to contained EnemyColumns
# Returns (bool, bool). First is True if this formation is still in a good state, False if it needs resetting.
# Second is True if this is because it's now empty, False if it has reached the bottom of the screen.
direction_change = too_low = False
# Compute factor to move faster when we have fewer remaining members.
scale = int(float(self.total)/float(len(self)))
# Update columns
for i in self.columns:
x, y = i.update((scale*self.current_direction*self.H_STEP, 0), score, collisions)
# Remove empty columns
if (len(i.sprites()) == 0):
self.columns.remove(i)
# Notice if we've gone too low
elif (y > self.bottom_bound):
too_low = True
# Remember to change direction when we reach screen edges
elif (x < self.left_bound or x > self.right_bound):
direction_change = True
# Indicate we're empty
if (len(self.columns) == 0):
return False, True
# Indicate we reached the bottom of the screen.
elif too_low:
return False, False
# Drop down and change direction
elif direction_change:
self.current_direction *= -1
for i in self.columns:
i.update((scale*self.current_direction*self.H_STEP, self.V_STEP), score, [])
# If we made it here, everything's fine.
return True, True | gpl-2.0 |
MERegistro/meregistro | django/contrib/admin/templatetags/admin_list.py | 43 | 12835 | import datetime
from django.conf import settings
from django.contrib.admin.util import lookup_field, display_for_field, label_for_field
from django.contrib.admin.views.main import ALL_VAR, EMPTY_CHANGELIST_VALUE
from django.contrib.admin.views.main import ORDER_VAR, ORDER_TYPE_VAR, PAGE_VAR, SEARCH_VAR
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_unicode, force_unicode
from django.template import Library
register = Library()
DOT = '.'
def paginator_number(cl,i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return u'... '
elif i == cl.page_num:
return mark_safe(u'<span class="this-page">%d</span> ' % (i+1))
else:
return mark_safe(u'<a href="%s"%s>%d</a> ' % (escape(cl.get_query_string({PAGE_VAR: i})), (i == cl.paginator.num_pages-1 and ' class="end"' or ''), i+1))
paginator_number = register.simple_tag(paginator_number)
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
pagination = register.inclusion_tag('admin/pagination.html')(pagination)
def result_headers(cl):
"""
Generates the list column headers.
"""
lookup_opts = cl.lookup_opts
for i, field_name in enumerate(cl.list_display):
header, attr = label_for_field(field_name, cl.model,
model_admin = cl.model_admin,
return_attr = True
)
if attr:
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": header,
"class_attrib": mark_safe(' class="action-checkbox-column"')
}
continue
# It is a non-field, but perhaps one that is sortable
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
yield {"text": header}
continue
# So this _is_ a sortable non-field. Go to the yield
# after the else clause.
else:
admin_order_field = None
th_classes = []
new_order_type = 'asc'
if field_name == cl.order_field or admin_order_field == cl.order_field:
th_classes.append('sorted %sending' % cl.order_type.lower())
new_order_type = {'asc': 'desc', 'desc': 'asc'}[cl.order_type.lower()]
yield {
"text": header,
"sortable": True,
"url": cl.get_query_string({ORDER_VAR: i, ORDER_TYPE_VAR: new_order_type}),
"class_attrib": mark_safe(th_classes and ' class="%s"' % ' '.join(th_classes) or '')
}
def _boolean_icon(field_val):
BOOLEAN_MAPPING = {True: 'yes', False: 'no', None: 'unknown'}
return mark_safe(u'<img src="%simg/admin/icon-%s.gif" alt="%s" />' % (settings.ADMIN_MEDIA_PREFIX, BOOLEAN_MAPPING[field_val], field_val))
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except (AttributeError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = _boolean_icon(value)
else:
result_repr = smart_unicode(value)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if not allow_tags:
result_repr = escape(result_repr)
else:
result_repr = mark_safe(result_repr)
else:
if value is None:
result_repr = EMPTY_CHANGELIST_VALUE
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = escape(field_val)
else:
result_repr = display_for_field(value, f)
if isinstance(f, models.DateField) or isinstance(f, models.TimeField):
row_class = ' class="nowrap"'
if force_unicode(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = repr(force_unicode(value))[1:]
yield mark_safe(u'<%s%s><a href="%s"%s>%s</a></%s>' % \
(table_tag, row_class, url, (cl.is_popup and ' onclick="opener.dismissRelatedLookupPopup(window, %s); return false;"' % result_id or ''), conditional_escape(result_repr), table_tag))
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if form and field_name in form.fields:
bf = form[field_name]
result_repr = mark_safe(force_unicode(bf.errors) + force_unicode(bf))
else:
result_repr = conditional_escape(result_repr)
yield mark_safe(u'<td%s>%s</td>' % (row_class, result_repr))
if form and not form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(u'<td>%s</td>' % force_unicode(form[cl.model._meta.pk.name]))
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield list(items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield list(items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_unicode(form[cl.model._meta.pk.name]))
def result_list(cl):
"""
Displays the headers and data list together
"""
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': list(result_headers(cl)),
'results': list(results(cl))}
result_list = register.inclusion_tag("admin/change_list_results.html")(result_list)
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda d: cl.get_query_string(d, [field_generic])
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.query_set.filter(**{year_field: year_lookup, month_field: month_lookup}).dates(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.query_set.filter(**{year_field: year_lookup}).dates(field_name, 'month')
return {
'show' : True,
'back': {
'link' : link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = cl.query_set.dates(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
date_hierarchy = register.inclusion_tag('admin/date_hierarchy.html')(date_hierarchy)
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
search_form = register.inclusion_tag('admin/search_form.html')(search_form)
def admin_list_filter(cl, spec):
return {'title': spec.title(), 'choices' : list(spec.choices(cl))}
admin_list_filter = register.inclusion_tag('admin/filter.html')(admin_list_filter)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
admin_actions = register.inclusion_tag("admin/actions.html", takes_context=True)(admin_actions)
| bsd-3-clause |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/datasets/tests/test_samples_generator.py | 3 | 7262 | import numpy as np
from numpy.testing import assert_equal, assert_approx_equal, \
assert_array_almost_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_less
from .. import make_classification
from .. import make_multilabel_classification
from .. import make_hastie_10_2
from .. import make_regression
from .. import make_blobs
from .. import make_friedman1
from .. import make_friedman2
from .. import make_friedman3
from .. import make_low_rank_matrix
from .. import make_sparse_coded_signal
from .. import make_sparse_uncorrelated
from .. import make_spd_matrix
from .. import make_swiss_roll
from .. import make_s_curve
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_multilabel_classification():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_approx_equal(np.std(y - np.dot(X, c)), 1.0, significant=2)
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_approx_equal(np.std(y - np.dot(X, c)), 1.0, significant=2)
def test_make_blobs():
X, y = make_blobs(n_samples=50, n_features=2,
centers=[[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]],
random_state=0)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, 10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_equal(X[:, 0], t * np.cos(t))
assert_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_equal(X[:, 0], np.sin(t))
assert_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
| agpl-3.0 |
CalvinHsu1223/LinuxCNC-EtherCAT-HAL-Driver | configs/sim/gscreen_custom/gscreen_handler.py | 25 | 4194 | # This is a handler file for using Gscreen's infrastructure
# to load a completely custom glade screen
# The only things that really matters is that it's saved as a GTK builder project,
# the toplevel window is caller window1 (The default name) and you connect a destroy
# window signal else you can't close down linuxcnc
class HandlerClass:
# this will be pretty standard to gain access to everything
# emc is for control and status of linuxcnc
# data is important data from gscreen and linuxcnc
# widgets is all the widgets from the glade files
# gscreen is for access to gscreens methods
#
# we added setting the gremlin DRO on from the startup,
# a global variable for the number of key presses,
# and make only the active axis buttons visible
def __init__(self, halcomp,builder,useropts,gscreen):
self.emc = gscreen.emc
self.data = gscreen.data
self.widgets = gscreen.widgets
self.gscreen = gscreen
self.nhits = 0
self.widgets.gremlin.set_property('enable_dro',True)
for i in ("x","y","z","a","b","c","u","v","w","s"):
if i in self.data.axis_list:
self.widgets["axis_%s"%i].set_visible(True)
self.widgets.offsetpage1.set_row_visible("1",False)
# This is a new method for a couple of widgets we added callbacks to.
# The argument 'widget' is a reference to the actual widget that called.
# In this way we can use this method on a bunch of widgets without knowing
# their name ahead of time.
def on_button_press(self,widget,data=None):
global nhits
self.nhits += 1
widget.set_label("hits: %d" % self.nhits)
# This method is overriden from gscreen
# We selected this method name in the glade file as a callback.
# Since this method name is the same as one in gscreen,
# gscreen won't connect a callback to it's method.
# Meaning this is the only one called.
def on_estop_clicked(self,*args):
print "estop"
if self.data.estopped:
self.emc.estop_reset(1)
else:
self.emc.machine_off(1)
self.emc.estop(1)
self.widgets.on_label.set_text("Machine Off")
return True
# This is a new method for our new button
# we selected this method name in the glade file as a callback
def on_machine_state_clicked(self,*args):
if self.data.estopped:
return
elif not self.data.machine_on:
self.emc.machine_on(1)
self.widgets.on_label.set_text("Machine On")
else:
self.emc.machine_off(1)
self.widgets.on_label.set_text("Machine Off")
# here we override gscreen's method of hiding the cursor
# by writing a method with the same name that gscreen connects a signal to.
# and our new method in fact calls a sound method and then the hide cursor method
# that are both in gscreen
# So now we get a sound when we hide and show the pointer
def on_hide_cursor(self,widget):
self.gscreen.audio.set_sound(self.data.alert_sound)
self.gscreen.audio.run()
self.gscreen.on_hide_cursor(None)
# every 100 milli seconds this gets called
# we add calls to the regular functions for the widgets we are using.
# and add any extra calls/code
def periodic(self):
self.gscreen.update_mdi_spindle_button()
self.gscreen.update_spindle_bar()
self.gscreen.update_active_gcodes()
self.gscreen.update_active_mcodes()
self.gscreen.update_aux_coolant_pins()
self.gscreen.update_feed_speed_label()
self.gscreen.update_tool_label()
self.gscreen.update_coolant_leds()
self.gscreen.update_estop_led()
self.gscreen.update_machine_on_led()
self.gscreen.update_limit_override()
self.gscreen.update_override_label()
self.gscreen.update_jog_rate_label()
self.gscreen.update_mode_label()
self.gscreen.update_units_button_label()
def get_handlers(halcomp,builder,useropts,gscreen):
return [HandlerClass(halcomp,builder,useropts,gscreen)]
| gpl-2.0 |
bowang/tensorflow | tensorflow/compiler/tests/ternary_ops_test.py | 101 | 4286 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for ternary operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class TernaryOpsTest(XLATestCase):
def _testTernary(self, op, a, b, c, expected):
with self.test_session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
pc = array_ops.placeholder(dtypes.as_dtype(c.dtype), c.shape, name="c")
output = op(pa, pb, pc)
result = session.run(output, {pa: a, pb: b, pc: c})
self.assertAllClose(result, expected, rtol=1e-3)
def testLinspace(self):
self._testTernary(
math_ops.linspace,
np.float32(1),
np.float32(2),
np.int32(1),
expected=np.array([1], dtype=np.float32))
self._testTernary(
math_ops.linspace,
np.float32(1),
np.float32(4),
np.int32(3),
expected=np.array([1, 2.5, 4], dtype=np.float32))
def testRange(self):
self._testTernary(
math_ops.range,
np.int32(1),
np.int32(2),
np.int32(1),
expected=np.array([1], dtype=np.int32))
self._testTernary(
math_ops.range,
np.int32(1),
np.int32(7),
np.int32(2),
expected=np.array([1, 3, 5], dtype=np.int32))
def testSelect(self):
self._testTernary(
array_ops.where,
np.array(0, dtype=np.bool),
np.array(2, dtype=np.float32),
np.array(7, dtype=np.float32),
expected=np.array(7, dtype=np.float32))
self._testTernary(
array_ops.where,
np.array(1, dtype=np.bool),
np.array([1, 2, 3, 4], dtype=np.float32),
np.array([5, 6, 7, 8], dtype=np.float32),
expected=np.array([1, 2, 3, 4], dtype=np.float32))
self._testTernary(
array_ops.where,
np.array(0, dtype=np.bool),
np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32),
np.array([[7, 8], [9, 10], [11, 12]], dtype=np.float32),
expected=np.array([[7, 8], [9, 10], [11, 12]], dtype=np.float32))
self._testTernary(
array_ops.where,
np.array([0, 1, 1, 0], dtype=np.bool),
np.array([1, 2, 3, 4], dtype=np.float32),
np.array([5, 6, 7, 8], dtype=np.float32),
expected=np.array([5, 2, 3, 8], dtype=np.float32))
self._testTernary(
array_ops.where,
np.array([0, 1, 0], dtype=np.bool),
np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32),
np.array([[7, 8], [9, 10], [11, 12]], dtype=np.float32),
expected=np.array([[7, 8], [3, 4], [11, 12]], dtype=np.float32))
def testSlice(self):
for dtype in self.numeric_types:
self._testTernary(
array_ops.slice,
np.array([[], [], []], dtype=dtype),
np.array([1, 0], dtype=np.int32),
np.array([2, 0], dtype=np.int32),
expected=np.array([[], []], dtype=dtype))
self._testTernary(
array_ops.slice,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
np.array([2, 1], dtype=np.int32),
expected=np.array([[2], [5]], dtype=dtype))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
thaihungle/deepexp | rare-mann/mimic_gen.py | 1 | 5981 | import numpy as np
import os
import random
import pickle
class MimicDataLoader(object):
def __init__(self, data_folder, batch_size=1, max_sequence=10, max_iter=None, split = 0.75, train_keep=1):
super(MimicDataLoader, self).__init__()
self.data_folder = data_folder
self.batch_size = batch_size
self.num_step = max_sequence
self.max_iter = max_iter
self.num_iter = 0
self.input_map=pickle.load(open(data_folder+'/dig_map.pkl','rb'))
self.ouput_map = pickle.load(open(data_folder + '/proc_map.pkl', 'rb'))
self.all_input = pickle.load(open(data_folder+'/dig_input.pkl','rb'))
self.all_output = pickle.load(open(data_folder + '/proc_output.pkl', 'rb'))
self.output_size = self.all_output.shape[1]
if len(np.shape(self.all_output))>1:
self.all_output = np.argmax(self.all_output, axis=1)
print(self.all_output[:10])
print(self.all_output.shape)
self.num_samples=self.all_input.shape[0]
print('num samples {}'.format(self.num_samples))
lindex=list(range(self.num_samples))
# random.shuffle(lindex)
self.train_data_indexes = lindex[:int(self.num_samples*split*train_keep)]
self.test_data_indexes = lindex[int(self.num_samples*split):]
self.is_training=True
self.data_offset=0
self.input_size=self.all_input.shape[1]
print('num train samples: {}'.format(len(self.train_data_indexes)))
print('train index: {} ...'.format(self.train_data_indexes[:10]))
print('num test samples: {}'.format(len(self.test_data_indexes)))
print('test index: {} ...'.format(self.test_data_indexes[:10]))
print('num classes: {}'.format(self.output_size))
print('num steps per episode: {}'.format(self.num_step))
print('batch size: {}'.format(self.batch_size))
def fetch_all(self):
train_x=[]
train_y=[]
test_x=[]
test_y=[]
for ind in self.train_data_indexes:
train_x.append(self.all_input[ind])
train_y.append(self.all_output[ind])
for ind in self.test_data_indexes:
test_x.append(self.all_input[ind])
test_y.append(self.all_output[ind])
return np.asarray(train_x), np.asarray(train_y), np.asarray(test_x), np.asarray(test_y)
def fetch_batch(self, is_training=True):
if is_training:
list_index=self.train_data_indexes
else:
list_index=self.test_data_indexes
indexes = np.zeros((self.batch_size, self.num_step), dtype=np.int32)
for i in range(self.batch_size):
indexes[i, :] = np.random.choice(len(list_index), self.num_step, replace=False)
# print('-------------')
# print(indexes[:10])
all_inputs=[]
all_outputs=[]
for s in range(self.num_step):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b,:]=self.all_input[list_index[indexes[b,s]]]
all_inputs.append(example_inputs.astype('float32'))
example_outputs[b] = self.all_output[list_index[indexes[b, s]]]
all_outputs.append(example_outputs.astype('int32'))
return all_inputs, all_outputs
def fetch_batch_full(self, is_training, is_rand=True):
if is_training:
list_index=self.train_data_indexes
else:
list_index=self.test_data_indexes
num_t = len(list_index)
indexes = np.zeros((self.batch_size, num_t),dtype=np.int32)
for i in range(self.batch_size):
if is_rand:
indexes[i, :] = np.random.choice(len(list_index), num_t, replace=False)
else:
indexes[i, :] = np.asarray(list(range(len(list_index))))
# indexes = np.zeros((self.batch_size, num_t), dtype=np.int32)
# for i in range(self.batch_size):
# indexes[i,:]=np.arange(num_t)
all_inputs=[]
all_outputs=[]
for s in range(num_t):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b,:]=self.all_input[list_index[indexes[b,s]]]
all_inputs.append(example_inputs.astype('float32'))
example_outputs[b] = self.all_output[list_index[indexes[b, s]]]
all_outputs.append(example_outputs.astype('int32'))
return all_inputs, all_outputs
# indexes just have shape (batch,)
def predict_index2data(self, list_index, indexes):
all_inputs = []
all_outputs = []
for s in range(self.num_step):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b, :] = self.all_input[list_index[indexes[b]]]
all_inputs.append(example_inputs)
example_outputs[b] = self.all_output[list_index[indexes[b]]]
all_outputs.append(example_outputs)
return all_inputs, all_outputs
def predict_online_index2data(self, list_index, indexes):
all_inputs = []
all_outputs = []
for s in range(self.num_step):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b, :] = self.all_input[list_index[indexes[b]]]
all_inputs.append(example_inputs)
example_outputs[b] = self.all_output[list_index[indexes[b]]]
all_outputs.append(example_outputs)
return all_inputs, all_outputs
| mit |
persandstrom/home-assistant | homeassistant/components/device_tracker/luci.py | 4 | 5240 | """
Support for OpenWRT (luci) routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.luci/
"""
import json
import logging
import re
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.exceptions import HomeAssistantError
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import (
CONF_HOST, CONF_USERNAME, CONF_PASSWORD, CONF_SSL)
_LOGGER = logging.getLogger(__name__)
DEFAULT_SSL = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean
})
class InvalidLuciTokenError(HomeAssistantError):
"""When an invalid token is detected."""
pass
def get_scanner(hass, config):
"""Validate the configuration and return a Luci scanner."""
scanner = LuciDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class LuciDeviceScanner(DeviceScanner):
"""This class queries a wireless router running OpenWrt firmware."""
def __init__(self, config):
"""Initialize the scanner."""
host = config[CONF_HOST]
protocol = 'http' if not config[CONF_SSL] else 'https'
self.origin = '{}://{}'.format(protocol, host)
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);")
self.last_results = {}
self.refresh_token()
self.mac2name = None
self.success_init = self.token is not None
def refresh_token(self):
"""Get a new token."""
self.token = _get_token(self.origin, self.username, self.password)
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if self.mac2name is None:
url = '{}/cgi-bin/luci/rpc/uci'.format(self.origin)
result = _req_json_rpc(
url, 'get_all', 'dhcp', params={'auth': self.token})
if result:
hosts = [x for x in result.values()
if x['.type'] == 'host' and
'mac' in x and 'name' in x]
mac2name_list = [
(x['mac'].upper(), x['name']) for x in hosts]
self.mac2name = dict(mac2name_list)
else:
# Error, handled in the _req_json_rpc
return
return self.mac2name.get(device.upper(), None)
def _update_info(self):
"""Ensure the information from the Luci router is up to date.
Returns boolean if scanning successful.
"""
if not self.success_init:
return False
_LOGGER.info("Checking ARP")
url = '{}/cgi-bin/luci/rpc/sys'.format(self.origin)
try:
result = _req_json_rpc(
url, 'net.arptable', params={'auth': self.token})
except InvalidLuciTokenError:
_LOGGER.info("Refreshing token")
self.refresh_token()
return False
if result:
self.last_results = []
for device_entry in result:
# Check if the Flags for each device contain
# NUD_REACHABLE and if so, add it to last_results
if int(device_entry['Flags'], 16) & 0x2:
self.last_results.append(device_entry['HW address'])
return True
return False
def _req_json_rpc(url, method, *args, **kwargs):
"""Perform one JSON RPC operation."""
data = json.dumps({'method': method, 'params': args})
try:
res = requests.post(url, data=data, timeout=5, **kwargs)
except requests.exceptions.Timeout:
_LOGGER.exception("Connection to the router timed out")
return
if res.status_code == 200:
try:
result = res.json()
except ValueError:
# If json decoder could not parse the response
_LOGGER.exception("Failed to parse response from luci")
return
try:
return result['result']
except KeyError:
_LOGGER.exception("No result in response from luci")
return
elif res.status_code == 401:
# Authentication error
_LOGGER.exception(
"Failed to authenticate, check your username and password")
return
elif res.status_code == 403:
_LOGGER.error("Luci responded with a 403 Invalid token")
raise InvalidLuciTokenError
else:
_LOGGER.error("Invalid response from luci: %s", res)
def _get_token(origin, username, password):
"""Get authentication token for the given configuration."""
url = '{}/cgi-bin/luci/rpc/auth'.format(origin)
return _req_json_rpc(url, 'login', username, password)
| apache-2.0 |
abadger/ansible-modules-core | network/nxos/nxos_vxlan_vtep_vni.py | 19 | 19617 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_vxlan_vtep_vni
version_added: "2.2"
short_description: Creates a Virtual Network Identifier member (VNI)
description:
- Creates a Virtual Network Identifier member (VNI) for an NVE
overlay interface.
author: Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
notes:
- default, where supported, restores params default value.
options:
interface:
description:
- Interface name for the VXLAN Network Virtualization Endpoint.
required: true
vni:
description:
- ID of the Virtual Network Identifier.
required: true
assoc_vrf:
description:
- This attribute is used to identify and separate processing VNIs
that are associated with a VRF and used for routing. The VRF
and VNI specified with this command must match the configuration
of the VNI under the VRF.
required: false
choices: ['true','false']
default: null
ingress_replication:
description:
- Specifies mechanism for host reachability advertisement.
required: false
choices: ['bgp','static']
default: null
multicast_group:
description:
- The multicast group (range) of the VNI. Valid values are
string and keyword 'default'.
required: false
default: null
peer_list:
description:
- Set the ingress-replication static peer list. Valid values
are an array, a space-separated string of ip addresses,
or the keyword 'default'.
required: false
default: null
suppress_arp:
description:
- Suppress arp under layer 2 VNI.
required: false
choices: ['true','false']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
include_defaults:
description:
- Specify to use or not the complete running configuration
for module operations.
required: false
default: true
choices: ['true','true']
config:
description:
- Configuration string to be used for module operations. If not
specified, the module will use the current running configuration.
required: false
default: null
save:
description:
- Specify to save the running configuration after
module operations.
required: false
default: false
choices: ['true','false']
'''
EXAMPLES = '''
- nxos_vxlan_vtep_vni:
interface: nve1
vni: 6000
ingress_replication: default
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"ingress_replication": "default", "interface": "nve1", "vni": "6000"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"assoc_vrf": false, "ingress_replication": "", "interface": "nve1",
"multicast_group": "", "peer_list": [],
"suppress_arp": false, "vni": "6000"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface nve1", "member vni 6000"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
BOOL_PARAMS = ['suppress_arp']
PARAM_TO_COMMAND_KEYMAP = {
'assoc_vrf': 'associate-vrf',
'interface': 'interface',
'vni': 'member vni',
'ingress_replication': 'ingress-replication protocol',
'multicast_group': 'mcast-group',
'peer_list': 'peer-ip',
'suppress_arp': 'suppress-arp'
}
PARAM_TO_DEFAULT_KEYMAP = {}
WARNINGS = []
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
if arg in BOOL_PARAMS:
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = False
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
else:
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')
return value
def check_interface(module, netcfg):
config = str(netcfg)
REGEX = re.compile(r'(?:interface nve)(?P<value>.*)$', re.M)
value = ''
if 'interface nve' in config:
value = 'nve{0}'.format(REGEX.search(config).group('value'))
return value
def get_custom_value(arg, config, module):
splitted_config = config.splitlines()
if arg == 'assoc_vrf':
value = False
if 'associate-vrf' in config:
value = True
elif arg == 'peer_list':
value = []
REGEX = re.compile(r'(?:peer-ip\s)(?P<peer_value>.*)$', re.M)
for line in splitted_config:
peer_value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in line:
peer_value = REGEX.search(line).group('peer_value')
if peer_value:
value.append(peer_value)
return value
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
custom = [
'assoc_vrf',
'peer_list'
]
interface_exist = check_interface(module, netcfg)
if interface_exist:
parents = ['interface {0}'.format(interface_exist)]
temp_config = netcfg.get_section(parents)
if 'associate-vrf' in temp_config:
parents.append('member vni {0} associate-vrf'.format(
module.params['vni']))
config = netcfg.get_section(parents)
elif 'member vni' in temp_config:
parents.append('member vni {0}'.format(module.params['vni']))
config = netcfg.get_section(parents)
else:
config = {}
if config:
for arg in args:
if arg not in ['interface', 'vni']:
if arg in custom:
existing[arg] = get_custom_value(arg, config, module)
else:
existing[arg] = get_value(arg, config, module)
existing['interface'] = interface_exist
existing['vni'] = module.params['vni']
return existing, interface_exist
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.iteritems():
if key == 'associate-vrf':
command = 'member vni {0} {1}'.format(module.params['vni'], key)
if value:
commands.append(command)
else:
commands.append('no {0}'.format(command))
elif key == 'peer-ip' and value != 'default':
for peer in value:
commands.append('{0} {1}'.format(key, peer))
elif value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
if key == 'peer-ip':
for peer in existing_value:
commands.append('no {0} {1}'.format(key, peer))
else:
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
commands.append('no {0}'.format(key.lower()))
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
vni_command = 'member vni {0}'.format(module.params['vni'])
ingress_replication_command = 'ingress-replication protocol static'
interface_command = 'interface {0}'.format(module.params['interface'])
if ingress_replication_command in commands:
static_level_cmds = [cmd for cmd in commands if 'peer' in cmd]
parents = [interface_command, vni_command, ingress_replication_command]
candidate.add(static_level_cmds, parents=parents)
commands = [cmd for cmd in commands if 'peer' not in cmd]
if vni_command in commands:
parents = [interface_command]
commands.remove(vni_command)
if module.params['assoc_vrf'] is None:
parents.append(vni_command)
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
if existing['assoc_vrf']:
commands = ['no member vni {0} associate-vrf'.format(
module.params['vni'])]
else:
commands = ['no member vni {0}'.format(module.params['vni'])]
parents = ['interface {0}'.format(module.params['interface'])]
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
vni=dict(required=True, type='str'),
assoc_vrf=dict(required=False, type='bool'),
multicast_group=dict(required=False, type='str'),
peer_list=dict(required=False, type='list'),
suppress_arp=dict(required=False, type='bool'),
ingress_replication=dict(required=False, type='str',
choices=['bgp', 'static', 'default']),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
if module.params['assoc_vrf']:
mutually_exclusive_params = ['multicast_group',
'suppress_arp',
'ingress_replication']
for param in mutually_exclusive_params:
if module.params[param]:
module.fail_json(msg='assoc_vrf cannot be used with '
'{0} param'.format(param))
if module.params['peer_list']:
if module.params['ingress_replication'] != 'static':
module.fail_json(msg='ingress_replication=static is required '
'when using peer_list param')
else:
peer_list = module.params['peer_list']
if peer_list[0] == 'default':
module.params['peer_list'] = 'default'
else:
stripped_peer_list = map(str.strip, peer_list)
module.params['peer_list'] = stripped_peer_list
state = module.params['state']
args = [
'assoc_vrf',
'interface',
'vni',
'ingress_replication',
'multicast_group',
'peer_list',
'suppress_arp'
]
existing, interface_exist = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.iteritems()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.iteritems():
if key != 'interface':
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if state == 'present' or (state == 'absent' and existing):
if not interface_exist:
WARNINGS.append("The proposed NVE interface does not exist. "
"Use nxos_interface to create it first.")
elif interface_exist != module.params['interface']:
module.fail_json(msg='Only 1 NVE interface is allowed on '
'the switch.')
elif (existing and state == 'absent' and
existing['vni'] != module.params['vni']):
module.fail_json(msg="ERROR: VNI delete failed: Could not find"
" vni node for {0}".format(
module.params['vni']),
existing_vni=existing['vni'])
else:
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
result['connected'] = module.connected
if module._verbosity > 0:
end_state, interface_exist = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ukanga/SickRage | lib/html5lib/sanitizer.py | 805 | 16428 | from __future__ import absolute_import, division, unicode_literals
import re
from xml.sax.saxutils import escape, unescape
from .tokenizer import HTMLTokenizer
from .constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs']
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in list(tokenTypes.keys()):
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
return self.allowed_token(token, token_type)
else:
return self.disallowed_token(token, token_type)
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def allowed_token(self, token, token_type):
if "data" in token:
attrs = dict([(name, val) for name, val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if attr not in attrs:
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
# remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace("\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if 'style' in attrs:
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name, val] for name, val in list(attrs.items())]
return token
def disallowed_token(self, token, token_type):
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]])
token["data"] = "<%s%s>" % (token["name"], attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
if token["type"] in list(tokenTypes.keys()):
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
def sanitize_css(self, style):
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False, parser=None):
# Change case matching defaults as we only output lowercase html anyway
# This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName, parser=parser)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
| gpl-3.0 |
openprocurement/restkit | restkit/filters.py | 2 | 3801 | # -*- coding: utf-8 -
#
# This file is part of restkit released under the MIT license.
# See the NOTICE for more information.
import base64
import re
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from urlparse import urlunparse
from restkit.oauth2 import Request, SignatureMethod_HMAC_SHA1
class BasicAuth(object):
""" Simple filter to manage basic authentification"""
def __init__(self, username, password):
self.credentials = (username, password)
def on_request(self, request):
encode = base64.b64encode("%s:%s" % self.credentials)
request.headers['Authorization'] = 'Basic %s' % encode
def validate_consumer(consumer):
""" validate a consumer agains oauth2.Consumer object """
if not hasattr(consumer, "key"):
raise ValueError("Invalid consumer.")
return consumer
def validate_token(token):
""" validate a token agains oauth2.Token object """
if token is not None and not hasattr(token, "key"):
raise ValueError("Invalid token.")
return token
class OAuthFilter(object):
""" oauth filter """
def __init__(self, path, consumer, token=None, method=None,
realm=""):
""" Init OAuthFilter
:param path: path or regexp. * mean all path on wicth oauth can be
applied.
:param consumer: oauth consumer, instance of oauth2.Consumer
:param token: oauth token, instance of oauth2.Token
:param method: oauth signature method
token and method signature are optionnals. Consumer should be an
instance of `oauth2.Consumer`, token an instance of `oauth2.Toke`
signature method an instance of `oauth2.SignatureMethod`.
"""
if path.endswith('*'):
self.match = re.compile("%s.*" % path.rsplit('*', 1)[0])
else:
self.match = re.compile("%s$" % path)
self.consumer = validate_consumer(consumer)
self.token = validate_token(token)
self.method = method or SignatureMethod_HMAC_SHA1()
self.realm = realm
def on_path(self, request):
path = request.parsed_url.path or "/"
return (self.match.match(path) is not None)
def on_request(self, request):
if not self.on_path(request):
return
params = {}
form = False
parsed_url = request.parsed_url
if request.body and request.body is not None:
ctype = request.headers.iget('content-type')
if ctype is not None and \
ctype.startswith('application/x-www-form-urlencoded'):
# we are in a form try to get oauth params from here
form = True
params = dict(parse_qsl(request.body))
# update params from quey parameters
params.update(parse_qsl(parsed_url.query))
raw_url = urlunparse((parsed_url.scheme, parsed_url.netloc,
parsed_url.path, '', '', ''))
oauth_req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=request.method,
http_url=raw_url, parameters=params,
is_form_encoded=form)
oauth_req.sign_request(self.method, self.consumer, self.token)
if form:
request.body = oauth_req.to_postdata()
request.headers['Content-Length'] = len(request.body)
elif request.method in ('GET', 'HEAD'):
request.original_url = request.url
request.url = oauth_req.to_url()
else:
oauth_headers = oauth_req.to_header(realm=self.realm)
request.headers.update(oauth_headers)
| apache-2.0 |
DxCx/nzbToMedia | libs/beets/ui/commands.py | 4 | 50834 | # This file is part of beets.
# Copyright 2014, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module provides the default commands for beets' command-line
interface.
"""
from __future__ import print_function
import logging
import os
import time
import itertools
import codecs
import platform
import beets
from beets import ui
from beets.ui import print_, input_, decargs
from beets import autotag
from beets.autotag import recommendation
from beets.autotag import hooks
from beets import plugins
from beets import importer
from beets import util
from beets.util import syspath, normpath, ancestry, displayable_path
from beets.util.functemplate import Template
from beets import library
from beets import config
from beets.util.confit import _package_path
# Global logger.
log = logging.getLogger('beets')
# The list of default subcommands. This is populated with Subcommand
# objects that can be fed to a SubcommandsOptionParser.
default_commands = []
# Utilities.
def _do_query(lib, query, album, also_items=True):
"""For commands that operate on matched items, performs a query
and returns a list of matching items and a list of matching
albums. (The latter is only nonempty when album is True.) Raises
a UserError if no items match. also_items controls whether, when
fetching albums, the associated items should be fetched also.
"""
if album:
albums = list(lib.albums(query))
items = []
if also_items:
for al in albums:
items += al.items()
else:
albums = []
items = list(lib.items(query))
if album and not albums:
raise ui.UserError('No matching albums found.')
elif not album and not items:
raise ui.UserError('No matching items found.')
return items, albums
# fields: Shows a list of available fields for queries and format strings.
fields_cmd = ui.Subcommand('fields',
help='show fields available for queries and format strings')
def fields_func(lib, opts, args):
def _print_rows(names):
print(" " + "\n ".join(names))
def _show_plugin_fields(album):
plugin_fields = []
for plugin in plugins.find_plugins():
if album:
fdict = plugin.album_template_fields
else:
fdict = plugin.template_fields
plugin_fields += fdict.keys()
if plugin_fields:
print("Template fields from plugins:")
_print_rows(plugin_fields)
print("Item fields:")
_print_rows(library.ITEM_KEYS)
_show_plugin_fields(False)
print("\nAlbum fields:")
_print_rows(library.ALBUM_KEYS)
_show_plugin_fields(True)
fields_cmd.func = fields_func
default_commands.append(fields_cmd)
# import: Autotagger and importer.
VARIOUS_ARTISTS = u'Various Artists'
# Importer utilities and support.
def disambig_string(info):
"""Generate a string for an AlbumInfo or TrackInfo object that
provides context that helps disambiguate similar-looking albums and
tracks.
"""
disambig = []
if info.data_source and info.data_source != 'MusicBrainz':
disambig.append(info.data_source)
if isinstance(info, hooks.AlbumInfo):
if info.media:
if info.mediums > 1:
disambig.append(u'{0}x{1}'.format(
info.mediums, info.media
))
else:
disambig.append(info.media)
if info.year:
disambig.append(unicode(info.year))
if info.country:
disambig.append(info.country)
if info.label:
disambig.append(info.label)
if info.albumdisambig:
disambig.append(info.albumdisambig)
if disambig:
return u', '.join(disambig)
def dist_string(dist):
"""Formats a distance (a float) as a colorized similarity percentage
string.
"""
out = '%.1f%%' % ((1 - dist) * 100)
if dist <= config['match']['strong_rec_thresh'].as_number():
out = ui.colorize('green', out)
elif dist <= config['match']['medium_rec_thresh'].as_number():
out = ui.colorize('yellow', out)
else:
out = ui.colorize('red', out)
return out
def penalty_string(distance, limit=None):
"""Returns a colorized string that indicates all the penalties
applied to a distance object.
"""
penalties = []
for key in distance.keys():
key = key.replace('album_', '')
key = key.replace('track_', '')
key = key.replace('_', ' ')
penalties.append(key)
if penalties:
if limit and len(penalties) > limit:
penalties = penalties[:limit] + ['...']
return ui.colorize('yellow', '(%s)' % ', '.join(penalties))
def show_change(cur_artist, cur_album, match):
"""Print out a representation of the changes that will be made if an
album's tags are changed according to `match`, which must be an AlbumMatch
object.
"""
def show_album(artist, album):
if artist:
album_description = u' %s - %s' % (artist, album)
elif album:
album_description = u' %s' % album
else:
album_description = u' (unknown album)'
print_(album_description)
def format_index(track_info):
"""Return a string representing the track index of the given
TrackInfo or Item object.
"""
if isinstance(track_info, hooks.TrackInfo):
index = track_info.index
medium_index = track_info.medium_index
medium = track_info.medium
mediums = match.info.mediums
else:
index = medium_index = track_info.track
medium = track_info.disc
mediums = track_info.disctotal
if config['per_disc_numbering']:
if mediums > 1:
return u'{0}-{1}'.format(medium, medium_index)
else:
return unicode(medium_index)
else:
return unicode(index)
# Identify the album in question.
if cur_artist != match.info.artist or \
(cur_album != match.info.album and
match.info.album != VARIOUS_ARTISTS):
artist_l, artist_r = cur_artist or '', match.info.artist
album_l, album_r = cur_album or '', match.info.album
if artist_r == VARIOUS_ARTISTS:
# Hide artists for VA releases.
artist_l, artist_r = u'', u''
artist_l, artist_r = ui.colordiff(artist_l, artist_r)
album_l, album_r = ui.colordiff(album_l, album_r)
print_("Correcting tags from:")
show_album(artist_l, album_l)
print_("To:")
show_album(artist_r, album_r)
else:
print_(u"Tagging:\n {0.artist} - {0.album}".format(match.info))
# Data URL.
if match.info.data_url:
print_('URL:\n %s' % match.info.data_url)
# Info line.
info = []
# Similarity.
info.append('(Similarity: %s)' % dist_string(match.distance))
# Penalties.
penalties = penalty_string(match.distance)
if penalties:
info.append(penalties)
# Disambiguation.
disambig = disambig_string(match.info)
if disambig:
info.append(ui.colorize('lightgray', '(%s)' % disambig))
print_(' '.join(info))
# Tracks.
pairs = match.mapping.items()
pairs.sort(key=lambda (_, track_info): track_info.index)
# Build up LHS and RHS for track difference display. The `lines` list
# contains ``(lhs, rhs, width)`` tuples where `width` is the length (in
# characters) of the uncolorized LHS.
lines = []
medium = disctitle = None
for item, track_info in pairs:
# Medium number and title.
if medium != track_info.medium or disctitle != track_info.disctitle:
media = match.info.media or 'Media'
if match.info.mediums > 1 and track_info.disctitle:
lhs = '%s %s: %s' % (media, track_info.medium,
track_info.disctitle)
elif match.info.mediums > 1:
lhs = '%s %s' % (media, track_info.medium)
elif track_info.disctitle:
lhs = '%s: %s' % (media, track_info.disctitle)
else:
lhs = None
if lhs:
lines.append((lhs, '', 0))
medium, disctitle = track_info.medium, track_info.disctitle
# Titles.
new_title = track_info.title
if not item.title.strip():
# If there's no title, we use the filename.
cur_title = displayable_path(os.path.basename(item.path))
lhs, rhs = cur_title, new_title
else:
cur_title = item.title.strip()
lhs, rhs = ui.colordiff(cur_title, new_title)
lhs_width = len(cur_title)
# Track number change.
cur_track, new_track = format_index(item), format_index(track_info)
if cur_track != new_track:
if item.track in (track_info.index, track_info.medium_index):
color = 'lightgray'
else:
color = 'red'
if (cur_track + new_track).count('-') == 1:
lhs_track, rhs_track = ui.colorize(color, cur_track), \
ui.colorize(color, new_track)
else:
color = 'red'
lhs_track, rhs_track = ui.color_diff_suffix(cur_track,
new_track)
templ = ui.colorize(color, u' (#') + u'{0}' + \
ui.colorize(color, u')')
lhs += templ.format(lhs_track)
rhs += templ.format(rhs_track)
lhs_width += len(cur_track) + 4
# Length change.
if item.length and track_info.length and \
abs(item.length - track_info.length) > \
config['ui']['length_diff_thresh'].as_number():
cur_length = ui.human_seconds_short(item.length)
new_length = ui.human_seconds_short(track_info.length)
lhs_length, rhs_length = ui.color_diff_suffix(cur_length,
new_length)
templ = ui.colorize('red', u' (') + u'{0}' + \
ui.colorize('red', u')')
lhs += templ.format(lhs_length)
rhs += templ.format(rhs_length)
lhs_width += len(cur_length) + 3
# Penalties.
penalties = penalty_string(match.distance.tracks[track_info])
if penalties:
rhs += ' %s' % penalties
if lhs != rhs:
lines.append((' * %s' % lhs, rhs, lhs_width))
elif config['import']['detail']:
lines.append((' * %s' % lhs, '', lhs_width))
# Print each track in two columns, or across two lines.
col_width = (ui.term_width() - len(''.join([' * ', ' -> ']))) // 2
if lines:
max_width = max(w for _, _, w in lines)
for lhs, rhs, lhs_width in lines:
if not rhs:
print_(lhs)
elif max_width > col_width:
print_(u'%s ->\n %s' % (lhs, rhs))
else:
pad = max_width - lhs_width
print_(u'%s%s -> %s' % (lhs, ' ' * pad, rhs))
# Missing and unmatched tracks.
if match.extra_tracks:
print_('Missing tracks:')
for track_info in match.extra_tracks:
line = ' ! %s (#%s)' % (track_info.title, format_index(track_info))
if track_info.length:
line += ' (%s)' % ui.human_seconds_short(track_info.length)
print_(ui.colorize('yellow', line))
if match.extra_items:
print_('Unmatched tracks:')
for item in match.extra_items:
line = ' ! %s (#%s)' % (item.title, format_index(item))
if item.length:
line += ' (%s)' % ui.human_seconds_short(item.length)
print_(ui.colorize('yellow', line))
def show_item_change(item, match):
"""Print out the change that would occur by tagging `item` with the
metadata from `match`, a TrackMatch object.
"""
cur_artist, new_artist = item.artist, match.info.artist
cur_title, new_title = item.title, match.info.title
if cur_artist != new_artist or cur_title != new_title:
cur_artist, new_artist = ui.colordiff(cur_artist, new_artist)
cur_title, new_title = ui.colordiff(cur_title, new_title)
print_("Correcting track tags from:")
print_(" %s - %s" % (cur_artist, cur_title))
print_("To:")
print_(" %s - %s" % (new_artist, new_title))
else:
print_("Tagging track: %s - %s" % (cur_artist, cur_title))
# Data URL.
if match.info.data_url:
print_('URL:\n %s' % match.info.data_url)
# Info line.
info = []
# Similarity.
info.append('(Similarity: %s)' % dist_string(match.distance))
# Penalties.
penalties = penalty_string(match.distance)
if penalties:
info.append(penalties)
# Disambiguation.
disambig = disambig_string(match.info)
if disambig:
info.append(ui.colorize('lightgray', '(%s)' % disambig))
print_(' '.join(info))
def _summary_judment(rec):
"""Determines whether a decision should be made without even asking
the user. This occurs in quiet mode and when an action is chosen for
NONE recommendations. Return an action or None if the user should be
queried. May also print to the console if a summary judgment is
made.
"""
if config['import']['quiet']:
if rec == recommendation.strong:
return importer.action.APPLY
else:
action = config['import']['quiet_fallback'].as_choice({
'skip': importer.action.SKIP,
'asis': importer.action.ASIS,
})
elif rec == recommendation.none:
action = config['import']['none_rec_action'].as_choice({
'skip': importer.action.SKIP,
'asis': importer.action.ASIS,
'ask': None,
})
else:
return None
if action == importer.action.SKIP:
print_('Skipping.')
elif action == importer.action.ASIS:
print_('Importing as-is.')
return action
def choose_candidate(candidates, singleton, rec, cur_artist=None,
cur_album=None, item=None, itemcount=None):
"""Given a sorted list of candidates, ask the user for a selection
of which candidate to use. Applies to both full albums and
singletons (tracks). Candidates are either AlbumMatch or TrackMatch
objects depending on `singleton`. for albums, `cur_artist`,
`cur_album`, and `itemcount` must be provided. For singletons,
`item` must be provided.
Returns the result of the choice, which may SKIP, ASIS, TRACKS, or
MANUAL or a candidate (an AlbumMatch/TrackMatch object).
"""
# Sanity check.
if singleton:
assert item is not None
else:
assert cur_artist is not None
assert cur_album is not None
# Zero candidates.
if not candidates:
if singleton:
print_("No matching recordings found.")
opts = ('Use as-is', 'Skip', 'Enter search', 'enter Id',
'aBort')
else:
print_("No matching release found for {0} tracks."
.format(itemcount))
print_('For help, see: '
'http://beets.readthedocs.org/en/latest/faq.html#nomatch')
opts = ('Use as-is', 'as Tracks', 'Group albums', 'Skip',
'Enter search', 'enter Id', 'aBort')
sel = ui.input_options(opts)
if sel == 'u':
return importer.action.ASIS
elif sel == 't':
assert not singleton
return importer.action.TRACKS
elif sel == 'e':
return importer.action.MANUAL
elif sel == 's':
return importer.action.SKIP
elif sel == 'b':
raise importer.ImportAbort()
elif sel == 'i':
return importer.action.MANUAL_ID
elif sel == 'g':
return importer.action.ALBUMS
else:
assert False
# Is the change good enough?
bypass_candidates = False
if rec != recommendation.none:
match = candidates[0]
bypass_candidates = True
while True:
# Display and choose from candidates.
require = rec <= recommendation.low
if not bypass_candidates:
# Display list of candidates.
print_(u'Finding tags for {0} "{1} - {2}".'.format(
u'track' if singleton else u'album',
item.artist if singleton else cur_artist,
item.title if singleton else cur_album,
))
print_(u'Candidates:')
for i, match in enumerate(candidates):
# Index, metadata, and distance.
line = [
u'{0}.'.format(i + 1),
u'{0} - {1}'.format(
match.info.artist,
match.info.title if singleton else match.info.album,
),
u'({0})'.format(dist_string(match.distance)),
]
# Penalties.
penalties = penalty_string(match.distance, 3)
if penalties:
line.append(penalties)
# Disambiguation
disambig = disambig_string(match.info)
if disambig:
line.append(ui.colorize('lightgray', '(%s)' % disambig))
print_(' '.join(line))
# Ask the user for a choice.
if singleton:
opts = ('Skip', 'Use as-is', 'Enter search', 'enter Id',
'aBort')
else:
opts = ('Skip', 'Use as-is', 'as Tracks', 'Group albums',
'Enter search', 'enter Id', 'aBort')
sel = ui.input_options(opts, numrange=(1, len(candidates)))
if sel == 's':
return importer.action.SKIP
elif sel == 'u':
return importer.action.ASIS
elif sel == 'm':
pass
elif sel == 'e':
return importer.action.MANUAL
elif sel == 't':
assert not singleton
return importer.action.TRACKS
elif sel == 'b':
raise importer.ImportAbort()
elif sel == 'i':
return importer.action.MANUAL_ID
elif sel == 'g':
return importer.action.ALBUMS
else: # Numerical selection.
match = candidates[sel - 1]
if sel != 1:
# When choosing anything but the first match,
# disable the default action.
require = True
bypass_candidates = False
# Show what we're about to do.
if singleton:
show_item_change(item, match)
else:
show_change(cur_artist, cur_album, match)
# Exact match => tag automatically if we're not in timid mode.
if rec == recommendation.strong and not config['import']['timid']:
return match
# Ask for confirmation.
if singleton:
opts = ('Apply', 'More candidates', 'Skip', 'Use as-is',
'Enter search', 'enter Id', 'aBort')
else:
opts = ('Apply', 'More candidates', 'Skip', 'Use as-is',
'as Tracks', 'Group albums', 'Enter search', 'enter Id',
'aBort')
default = config['import']['default_action'].as_choice({
'apply': 'a',
'skip': 's',
'asis': 'u',
'none': None,
})
if default is None:
require = True
sel = ui.input_options(opts, require=require, default=default)
if sel == 'a':
return match
elif sel == 'g':
return importer.action.ALBUMS
elif sel == 's':
return importer.action.SKIP
elif sel == 'u':
return importer.action.ASIS
elif sel == 't':
assert not singleton
return importer.action.TRACKS
elif sel == 'e':
return importer.action.MANUAL
elif sel == 'b':
raise importer.ImportAbort()
elif sel == 'i':
return importer.action.MANUAL_ID
def manual_search(singleton):
"""Input either an artist and album (for full albums) or artist and
track name (for singletons) for manual search.
"""
artist = input_('Artist:')
name = input_('Track:' if singleton else 'Album:')
return artist.strip(), name.strip()
def manual_id(singleton):
"""Input an ID, either for an album ("release") or a track ("recording").
"""
prompt = u'Enter {0} ID:'.format('recording' if singleton else 'release')
return input_(prompt).strip()
class TerminalImportSession(importer.ImportSession):
"""An import session that runs in a terminal.
"""
def choose_match(self, task):
"""Given an initial autotagging of items, go through an interactive
dance with the user to ask for a choice of metadata. Returns an
AlbumMatch object, ASIS, or SKIP.
"""
# Show what we're tagging.
print_()
print_(displayable_path(task.paths, u'\n') +
u' ({0} items)'.format(len(task.items)))
# Take immediate action if appropriate.
action = _summary_judment(task.rec)
if action == importer.action.APPLY:
match = task.candidates[0]
show_change(task.cur_artist, task.cur_album, match)
return match
elif action is not None:
return action
# Loop until we have a choice.
candidates, rec = task.candidates, task.rec
while True:
# Ask for a choice from the user.
choice = choose_candidate(candidates, False, rec, task.cur_artist,
task.cur_album, itemcount=len(task.items))
# Choose which tags to use.
if choice in (importer.action.SKIP, importer.action.ASIS,
importer.action.TRACKS, importer.action.ALBUMS):
# Pass selection to main control flow.
return choice
elif choice is importer.action.MANUAL:
# Try again with manual search terms.
search_artist, search_album = manual_search(False)
_, _, candidates, rec = autotag.tag_album(
task.items, search_artist, search_album
)
elif choice is importer.action.MANUAL_ID:
# Try a manually-entered ID.
search_id = manual_id(False)
if search_id:
_, _, candidates, rec = autotag.tag_album(
task.items, search_id=search_id
)
else:
# We have a candidate! Finish tagging. Here, choice is an
# AlbumMatch object.
assert isinstance(choice, autotag.AlbumMatch)
return choice
def choose_item(self, task):
"""Ask the user for a choice about tagging a single item. Returns
either an action constant or a TrackMatch object.
"""
print_()
print_(task.item.path)
candidates, rec = task.candidates, task.rec
# Take immediate action if appropriate.
action = _summary_judment(task.rec)
if action == importer.action.APPLY:
match = candidates[0]
show_item_change(task.item, match)
return match
elif action is not None:
return action
while True:
# Ask for a choice.
choice = choose_candidate(candidates, True, rec, item=task.item)
if choice in (importer.action.SKIP, importer.action.ASIS):
return choice
elif choice == importer.action.TRACKS:
assert False # TRACKS is only legal for albums.
elif choice == importer.action.MANUAL:
# Continue in the loop with a new set of candidates.
search_artist, search_title = manual_search(True)
candidates, rec = autotag.tag_item(task.item, search_artist,
search_title)
elif choice == importer.action.MANUAL_ID:
# Ask for a track ID.
search_id = manual_id(True)
if search_id:
candidates, rec = autotag.tag_item(task.item,
search_id=search_id)
else:
# Chose a candidate.
assert isinstance(choice, autotag.TrackMatch)
return choice
def resolve_duplicate(self, task):
"""Decide what to do when a new album or item seems similar to one
that's already in the library.
"""
log.warn("This %s is already in the library!" %
("album" if task.is_album else "item"))
if config['import']['quiet']:
# In quiet mode, don't prompt -- just skip.
log.info('Skipping.')
sel = 's'
else:
sel = ui.input_options(
('Skip new', 'Keep both', 'Remove old')
)
if sel == 's':
# Skip new.
task.set_choice(importer.action.SKIP)
elif sel == 'k':
# Keep both. Do nothing; leave the choice intact.
pass
elif sel == 'r':
# Remove old.
task.remove_duplicates = True
else:
assert False
def should_resume(self, path):
return ui.input_yn(u"Import of the directory:\n{0}\n"
"was interrupted. Resume (Y/n)?"
.format(displayable_path(path)))
# The import command.
def import_files(lib, paths, query):
"""Import the files in the given list of paths or matching the
query.
"""
# Check the user-specified directories.
for path in paths:
fullpath = syspath(normpath(path))
if not config['import']['singletons'] and not os.path.isdir(fullpath):
raise ui.UserError(u'not a directory: {0}'.format(
displayable_path(path)))
elif config['import']['singletons'] and not os.path.exists(fullpath):
raise ui.UserError(u'no such file: {0}'.format(
displayable_path(path)))
# Check parameter consistency.
if config['import']['quiet'] and config['import']['timid']:
raise ui.UserError("can't be both quiet and timid")
# Open the log.
if config['import']['log'].get() is not None:
logpath = config['import']['log'].as_filename()
try:
logfile = codecs.open(syspath(logpath), 'a', 'utf8')
except IOError:
raise ui.UserError(u"could not open log file for writing: %s" %
displayable_path(logpath))
print(u'import started', time.asctime(), file=logfile)
else:
logfile = None
# Never ask for input in quiet mode.
if config['import']['resume'].get() == 'ask' and \
config['import']['quiet']:
config['import']['resume'] = False
session = TerminalImportSession(lib, logfile, paths, query)
try:
session.run()
finally:
# If we were logging, close the file.
if logfile:
print(u'', file=logfile)
logfile.close()
# Emit event.
plugins.send('import', lib=lib, paths=paths)
import_cmd = ui.Subcommand('import', help='import new music',
aliases=('imp', 'im'))
import_cmd.parser.add_option('-c', '--copy', action='store_true',
default=None, help="copy tracks into library directory (default)")
import_cmd.parser.add_option('-C', '--nocopy', action='store_false',
dest='copy', help="don't copy tracks (opposite of -c)")
import_cmd.parser.add_option('-w', '--write', action='store_true',
default=None, help="write new metadata to files' tags (default)")
import_cmd.parser.add_option('-W', '--nowrite', action='store_false',
dest='write', help="don't write metadata (opposite of -w)")
import_cmd.parser.add_option('-a', '--autotag', action='store_true',
dest='autotag', help="infer tags for imported files (default)")
import_cmd.parser.add_option('-A', '--noautotag', action='store_false',
dest='autotag',
help="don't infer tags for imported files (opposite of -a)")
import_cmd.parser.add_option('-p', '--resume', action='store_true',
default=None, help="resume importing if interrupted")
import_cmd.parser.add_option('-P', '--noresume', action='store_false',
dest='resume', help="do not try to resume importing")
import_cmd.parser.add_option('-q', '--quiet', action='store_true',
dest='quiet', help="never prompt for input: skip albums instead")
import_cmd.parser.add_option('-l', '--log', dest='log',
help='file to log untaggable albums for later review')
import_cmd.parser.add_option('-s', '--singletons', action='store_true',
help='import individual tracks instead of full albums')
import_cmd.parser.add_option('-t', '--timid', dest='timid',
action='store_true', help='always confirm all actions')
import_cmd.parser.add_option('-L', '--library', dest='library',
action='store_true', help='retag items matching a query')
import_cmd.parser.add_option('-i', '--incremental', dest='incremental',
action='store_true', help='skip already-imported directories')
import_cmd.parser.add_option('-I', '--noincremental', dest='incremental',
action='store_false', help='do not skip already-imported directories')
import_cmd.parser.add_option('--flat', dest='flat',
action='store_true', help='import an entire tree as a single album')
import_cmd.parser.add_option('-g', '--group-albums', dest='group_albums',
action='store_true', help='group tracks in a folder into seperate albums')
def import_func(lib, opts, args):
config['import'].set_args(opts)
# Special case: --copy flag suppresses import_move (which would
# otherwise take precedence).
if opts.copy:
config['import']['move'] = False
if opts.library:
query = decargs(args)
paths = []
else:
query = None
paths = args
if not paths:
raise ui.UserError('no path specified')
import_files(lib, paths, query)
import_cmd.func = import_func
default_commands.append(import_cmd)
# list: Query and show library contents.
def list_items(lib, query, album, fmt):
"""Print out items in lib matching query. If album, then search for
albums instead of single items.
"""
tmpl = Template(ui._pick_format(album, fmt))
if album:
for album in lib.albums(query):
ui.print_obj(album, lib, tmpl)
else:
for item in lib.items(query):
ui.print_obj(item, lib, tmpl)
list_cmd = ui.Subcommand('list', help='query the library', aliases=('ls',))
list_cmd.parser.add_option('-a', '--album', action='store_true',
help='show matching albums instead of tracks')
list_cmd.parser.add_option('-p', '--path', action='store_true',
help='print paths for matched items or albums')
list_cmd.parser.add_option('-f', '--format', action='store',
help='print with custom format', default=None)
def list_func(lib, opts, args):
if opts.path:
fmt = '$path'
else:
fmt = opts.format
list_items(lib, decargs(args), opts.album, fmt)
list_cmd.func = list_func
default_commands.append(list_cmd)
# update: Update library contents according to on-disk tags.
def update_items(lib, query, album, move, pretend):
"""For all the items matched by the query, update the library to
reflect the item's embedded tags.
"""
with lib.transaction():
items, _ = _do_query(lib, query, album)
# Walk through the items and pick up their changes.
affected_albums = set()
for item in items:
# Item deleted?
if not os.path.exists(syspath(item.path)):
ui.print_obj(item, lib)
ui.print_(ui.colorize('red', u' deleted'))
if not pretend:
item.remove(True)
affected_albums.add(item.album_id)
continue
# Did the item change since last checked?
if item.current_mtime() <= item.mtime:
log.debug(u'skipping %s because mtime is up to date (%i)' %
(displayable_path(item.path), item.mtime))
continue
# Read new data.
try:
item.read()
except Exception as exc:
log.error(u'error reading {0}: {1}'.format(
displayable_path(item.path), exc))
continue
# Special-case album artist when it matches track artist. (Hacky
# but necessary for preserving album-level metadata for non-
# autotagged imports.)
if not item.albumartist:
old_item = lib.get_item(item.id)
if old_item.albumartist == old_item.artist == item.artist:
item.albumartist = old_item.albumartist
item._dirty.discard('albumartist')
# Check for and display changes.
changed = ui.show_model_changes(item,
fields=library.ITEM_KEYS_META)
# Save changes.
if not pretend:
if changed:
# Move the item if it's in the library.
if move and lib.directory in ancestry(item.path):
item.move()
item.store()
affected_albums.add(item.album_id)
else:
# The file's mtime was different, but there were no
# changes to the metadata. Store the new mtime,
# which is set in the call to read(), so we don't
# check this again in the future.
item.store()
# Skip album changes while pretending.
if pretend:
return
# Modify affected albums to reflect changes in their items.
for album_id in affected_albums:
if album_id is None: # Singletons.
continue
album = lib.get_album(album_id)
if not album: # Empty albums have already been removed.
log.debug('emptied album %i' % album_id)
continue
first_item = album.items().get()
# Update album structure to reflect an item in it.
for key in library.ALBUM_KEYS_ITEM:
album[key] = first_item[key]
album.store()
# Move album art (and any inconsistent items).
if move and lib.directory in ancestry(first_item.path):
log.debug('moving album %i' % album_id)
album.move()
update_cmd = ui.Subcommand('update',
help='update the library', aliases=('upd','up',))
update_cmd.parser.add_option('-a', '--album', action='store_true',
help='match albums instead of tracks')
update_cmd.parser.add_option('-M', '--nomove', action='store_false',
default=True, dest='move', help="don't move files in library")
update_cmd.parser.add_option('-p', '--pretend', action='store_true',
help="show all changes but do nothing")
update_cmd.parser.add_option('-f', '--format', action='store',
help='print with custom format', default=None)
def update_func(lib, opts, args):
update_items(lib, decargs(args), opts.album, opts.move, opts.pretend)
update_cmd.func = update_func
default_commands.append(update_cmd)
# remove: Remove items from library, delete files.
def remove_items(lib, query, album, delete):
"""Remove items matching query from lib. If album, then match and
remove whole albums. If delete, also remove files from disk.
"""
# Get the matching items.
items, albums = _do_query(lib, query, album)
# Show all the items.
for item in items:
ui.print_obj(item, lib)
# Confirm with user.
print_()
if delete:
prompt = 'Really DELETE %i files (y/n)?' % len(items)
else:
prompt = 'Really remove %i items from the library (y/n)?' % \
len(items)
if not ui.input_yn(prompt, True):
return
# Remove (and possibly delete) items.
with lib.transaction():
for obj in (albums if album else items):
obj.remove(delete)
remove_cmd = ui.Subcommand('remove',
help='remove matching items from the library', aliases=('rm',))
remove_cmd.parser.add_option("-d", "--delete", action="store_true",
help="also remove files from disk")
remove_cmd.parser.add_option('-a', '--album', action='store_true',
help='match albums instead of tracks')
def remove_func(lib, opts, args):
remove_items(lib, decargs(args), opts.album, opts.delete)
remove_cmd.func = remove_func
default_commands.append(remove_cmd)
# stats: Show library/query statistics.
def show_stats(lib, query, exact):
"""Shows some statistics about the matched items."""
items = lib.items(query)
total_size = 0
total_time = 0.0
total_items = 0
artists = set()
albums = set()
for item in items:
if exact:
total_size += os.path.getsize(item.path)
else:
total_size += int(item.length * item.bitrate / 8)
total_time += item.length
total_items += 1
artists.add(item.artist)
albums.add(item.album)
size_str = '' + ui.human_bytes(total_size)
if exact:
size_str += ' ({0} bytes)'.format(total_size)
print_("""Tracks: {0}
Total time: {1} ({2:.2f} seconds)
Total size: {3}
Artists: {4}
Albums: {5}""".format(total_items, ui.human_seconds(total_time), total_time,
size_str, len(artists), len(albums)))
stats_cmd = ui.Subcommand('stats',
help='show statistics about the library or a query')
stats_cmd.parser.add_option('-e', '--exact', action='store_true',
help='get exact file sizes')
def stats_func(lib, opts, args):
show_stats(lib, decargs(args), opts.exact)
stats_cmd.func = stats_func
default_commands.append(stats_cmd)
# version: Show current beets version.
def show_version(lib, opts, args):
print_('beets version %s' % beets.__version__)
# Show plugins.
names = [p.name for p in plugins.find_plugins()]
if names:
print_('plugins:', ', '.join(names))
else:
print_('no plugins loaded')
version_cmd = ui.Subcommand('version',
help='output version information')
version_cmd.func = show_version
default_commands.append(version_cmd)
# modify: Declaratively change metadata.
def modify_items(lib, mods, dels, query, write, move, album, confirm):
"""Modifies matching items according to key=value assignments."""
# Parse key=value specifications into a dictionary.
model_cls = library.Album if album else library.Item
fsets = {}
for mod in mods:
key, value = mod.split('=', 1)
fsets[key] = model_cls._parse(key, value)
# Get the items to modify.
items, albums = _do_query(lib, query, album, False)
objs = albums if album else items
# Apply changes *temporarily*, preview them, and collect modified
# objects.
print_('Modifying %i %ss.' % (len(objs), 'album' if album else 'item'))
changed = set()
for obj in objs:
for field, value in fsets.iteritems():
obj[field] = value
for field in dels:
del obj[field]
if ui.show_model_changes(obj):
changed.add(obj)
# Still something to do?
if not changed:
print_('No changes to make.')
return
# Confirm action.
if confirm:
extra = ' and write tags' if write else ''
if not ui.input_yn('Really modify%s (Y/n)?' % extra):
return
# Apply changes to database.
with lib.transaction():
for obj in changed:
if move:
cur_path = obj.path
if lib.directory in ancestry(cur_path): # In library?
log.debug('moving object %s' % cur_path)
obj.move()
obj.store()
# Apply tags if requested.
if write:
if album:
changed_items = itertools.chain(*(a.items() for a in changed))
else:
changed_items = changed
for item in changed_items:
try:
item.write()
except library.FileOperationError as exc:
log.error(exc)
modify_cmd = ui.Subcommand('modify',
help='change metadata fields', aliases=('mod',))
modify_cmd.parser.add_option('-M', '--nomove', action='store_false',
default=True, dest='move', help="don't move files in library")
modify_cmd.parser.add_option('-w', '--write', action='store_true',
default=None, help="write new metadata to files' tags (default)")
modify_cmd.parser.add_option('-W', '--nowrite', action='store_false',
dest='write', help="don't write metadata (opposite of -w)")
modify_cmd.parser.add_option('-a', '--album', action='store_true',
help='modify whole albums instead of tracks')
modify_cmd.parser.add_option('-y', '--yes', action='store_true',
help='skip confirmation')
modify_cmd.parser.add_option('-f', '--format', action='store',
help='print with custom format', default=None)
def modify_func(lib, opts, args):
args = decargs(args)
mods = []
dels = []
query = []
for arg in args:
if arg.endswith('!') and '=' not in arg and ':' not in arg:
dels.append(arg[:-1])
elif '=' in arg:
mods.append(arg)
else:
query.append(arg)
if not mods and not dels:
raise ui.UserError('no modifications specified')
write = opts.write if opts.write is not None else \
config['import']['write'].get(bool)
modify_items(lib, mods, dels, query, write, opts.move, opts.album,
not opts.yes)
modify_cmd.func = modify_func
default_commands.append(modify_cmd)
# move: Move/copy files to the library or a new base directory.
def move_items(lib, dest, query, copy, album):
"""Moves or copies items to a new base directory, given by dest. If
dest is None, then the library's base directory is used, making the
command "consolidate" files.
"""
items, albums = _do_query(lib, query, album, False)
objs = albums if album else items
action = 'Copying' if copy else 'Moving'
entity = 'album' if album else 'item'
log.info('%s %i %ss.' % (action, len(objs), entity))
for obj in objs:
log.debug('moving: %s' % obj.path)
obj.move(copy, basedir=dest)
obj.store()
move_cmd = ui.Subcommand('move',
help='move or copy items', aliases=('mv',))
move_cmd.parser.add_option('-d', '--dest', metavar='DIR', dest='dest',
help='destination directory')
move_cmd.parser.add_option('-c', '--copy', default=False, action='store_true',
help='copy instead of moving')
move_cmd.parser.add_option('-a', '--album', default=False, action='store_true',
help='match whole albums instead of tracks')
def move_func(lib, opts, args):
dest = opts.dest
if dest is not None:
dest = normpath(dest)
if not os.path.isdir(dest):
raise ui.UserError('no such directory: %s' % dest)
move_items(lib, dest, decargs(args), opts.copy, opts.album)
move_cmd.func = move_func
default_commands.append(move_cmd)
# write: Write tags into files.
def write_items(lib, query, pretend):
"""Write tag information from the database to the respective files
in the filesystem.
"""
items, albums = _do_query(lib, query, False, False)
for item in items:
# Item deleted?
if not os.path.exists(syspath(item.path)):
log.info(u'missing file: {0}'.format(
util.displayable_path(item.path)
))
continue
# Get an Item object reflecting the "clean" (on-disk) state.
try:
clean_item = library.Item.from_path(item.path)
except Exception as exc:
log.error(u'error reading {0}: {1}'.format(
displayable_path(item.path), exc
))
continue
# Check for and display changes.
changed = ui.show_model_changes(item, clean_item,
library.ITEM_KEYS_WRITABLE, always=True)
if changed and not pretend:
try:
item.write()
except library.FileOperationError as exc:
log.error(exc)
write_cmd = ui.Subcommand('write', help='write tag information to files')
write_cmd.parser.add_option('-p', '--pretend', action='store_true',
help="show all changes but do nothing")
def write_func(lib, opts, args):
write_items(lib, decargs(args), opts.pretend)
write_cmd.func = write_func
default_commands.append(write_cmd)
# config: Show and edit user configuration.
config_cmd = ui.Subcommand('config',
help='show or edit the user configuration')
config_cmd.parser.add_option('-p', '--paths', action='store_true',
help='show files that configuration was loaded from')
config_cmd.parser.add_option('-e', '--edit', action='store_true',
help='edit user configuration with $EDITOR')
config_cmd.parser.add_option('-d', '--defaults', action='store_true',
help='include the default configuration')
def config_func(lib, opts, args):
# Make sure lazy configuration is loaded
config.resolve()
# Print paths.
if opts.paths:
filenames = []
for source in config.sources:
if not opts.defaults and source.default:
continue
if source.filename:
filenames.append(source.filename)
# In case the user config file does not exist, prepend it to the
# list.
user_path = config.user_config_path()
if user_path not in filenames:
filenames.insert(0, user_path)
for filename in filenames:
print(filename)
# Open in editor.
elif opts.edit:
path = config.user_config_path()
if 'EDITOR' in os.environ:
editor = os.environ['EDITOR']
args = [editor, editor, path]
elif platform.system() == 'Darwin':
args = ['open', 'open', '-n', path]
elif platform.system() == 'Windows':
# On windows we can execute arbitrary files. The os will
# take care of starting an appropriate application
args = [path, path]
else:
# Assume Unix
args = ['xdg-open', 'xdg-open', path]
try:
os.execlp(*args)
except OSError:
raise ui.UserError("Could not edit configuration. Please"
"set the EDITOR environment variable.")
# Dump configuration.
else:
print(config.dump(full=opts.defaults))
config_cmd.func = config_func
default_commands.append(config_cmd)
# completion: print completion script
completion_cmd = ui.Subcommand('completion',
help='print shell script that provides command line completion')
def print_completion(*args):
for line in completion_script(default_commands + plugins.commands()):
print(line, end='')
if not (os.path.isfile(u'/etc/bash_completion') or
os.path.isfile(u'/usr/share/bash-completion/bash_completion') or
os.path.isfile(u'/usr/share/local/bash-completion/bash_completion')):
log.warn(u'Warning: Unable to find the bash-completion package. '
u'Command line completion might not work.')
def completion_script(commands):
"""Yield the full completion shell script as strings.
``commands`` is alist of ``ui.Subcommand`` instances to generate
completion data for.
"""
base_script = os.path.join(_package_path('beets.ui'), 'completion_base.sh')
with open(base_script, 'r') as base_script:
yield base_script.read()
options = {}
aliases = {}
command_names = []
# Collect subcommands
for cmd in commands:
name = cmd.name
command_names.append(name)
for alias in cmd.aliases:
aliases[alias] = name
options[name] = {'flags': [], 'opts': []}
for opts in cmd.parser._get_all_options()[1:]:
if opts.action in ('store_true', 'store_false'):
option_type = 'flags'
else:
option_type = 'opts'
options[name][option_type].extend(
opts._short_opts + opts._long_opts
)
# Add global options
options['_global'] = {
'flags': ['-v', '--verbose'],
'opts': '-l --library -c --config -d --directory -h --help'.split(' ')
}
# Help subcommand
command_names.append('help')
# Add flags common to all commands
options['_common'] = {
'flags': ['-h', '--help']
}
# Start generating the script
yield "_beet() {\n"
# Command names
yield " local commands='%s'\n" % ' '.join(command_names)
yield "\n"
# Command aliases
yield " local aliases='%s'\n" % ' '.join(aliases.keys())
for alias, cmd in aliases.items():
yield " local alias__%s=%s\n" % (alias, cmd)
yield '\n'
# Fields
yield " fields='%s'\n" % ' '.join(
set(library.ITEM_KEYS + library.ALBUM_KEYS))
# Command options
for cmd, opts in options.items():
for option_type, option_list in opts.items():
if option_list:
option_list = ' '.join(option_list)
yield " local %s__%s='%s'\n" % (option_type, cmd, option_list)
yield ' _beet_dispatch\n'
yield '}\n'
completion_cmd.func = print_completion
completion_cmd.hide = True
default_commands.append(completion_cmd)
| gpl-3.0 |
miguelinux/vbox | src/VBox/ValidationKit/testmanager/batch/regen_sched_queues.py | 3 | 4295 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: regen_sched_queues.py $
# pylint: disable=C0301
"""
Interface used by the admin to regenerate scheduling queues.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 101450 $"
# Standard python imports
import sys;
import os;
from optparse import OptionParser;
# Add Test Manager's modules path
g_ksTestManagerDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
sys.path.append(g_ksTestManagerDir);
# Test Manager imports
from testmanager.core.db import TMDatabaseConnection;
from testmanager.core.schedulerbase import SchedulerBase;
from testmanager.core.schedgroup import SchedGroupLogic;
class RegenSchedQueues(object): # pylint: disable=R0903
"""
Regenerates all the scheduling queues.
"""
def __init__(self):
"""
Parse command line.
"""
oParser = OptionParser();
oParser.add_option('-q', '--quiet', dest = 'fQuiet', action = 'store_true', default = False,
help = 'Quiet execution');
oParser.add_option('-u', '--uid', dest = 'uid', action = 'store', type = 'int', default = 1,
help = 'User ID to accredit with this job');
oParser.add_option('--profile', dest = 'fProfile', action = 'store_true', default = False,
help = 'User ID to accredit with this job');
(self.oConfig, _) = oParser.parse_args();
def doIt(self):
"""
Does the job.
"""
oDb = TMDatabaseConnection();
aoGroups = SchedGroupLogic(oDb).getAll();
iRc = 0;
for oGroup in aoGroups:
if not self.oConfig.fQuiet:
print '%s (ID %#d):' % (oGroup.sName, oGroup.idSchedGroup,);
try:
(aoErrors, asMessages) = SchedulerBase.recreateQueue(oDb, self.oConfig.uid, oGroup.idSchedGroup, 2);
except Exception as oXcpt:
oDb.rollback();
print ' !!Hit exception processing "%s": %s' % (oGroup.sName, oXcpt,);
else:
if len(aoErrors) == 0:
if not self.oConfig.fQuiet:
print ' Successfully regenerated.';
else:
iRc = 1;
print ' %d errors:' % (len(aoErrors,));
for oError in aoErrors:
if oError[1] is None:
print ' !!%s' % (oError[0],);
else:
print ' !!%s (%s)' % (oError[0], oError[1]);
if len(asMessages) > 0 and not self.oConfig.fQuiet:
print ' %d messages:' % (len(asMessages),);
for sMsg in asMessages:
print ' ##%s' % (sMsg,);
return iRc;
@staticmethod
def main():
""" Main function. """
oMain = RegenSchedQueues();
if oMain.oConfig.fProfile is not True:
iRc = oMain.doIt();
else:
import cProfile;
oProfiler = cProfile.Profile();
iRc = oProfiler.runcall(oMain.doIt);
oProfiler.print_stats(sort = 'time');
oProfiler = None;
return iRc;
if __name__ == '__main__':
sys.exit(RegenSchedQueues().main());
| gpl-2.0 |
tadeas482/android_kernel_u8500 | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
arunhotra/tensorflow | tensorflow/python/ops/sparse_ops_test.py | 5 | 7592 | """Tests for Python ops defined in sparse_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import types
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import googletest
class SparseToIndicatorTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self, dtype):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, types.int64))
def _SparseTensor_2x3x4(self, dtype):
ind = np.array([
[0, 0, 1],
[0, 1, 0], [0, 1, 2],
[1, 0, 3],
[1, 1, 1], [1, 1, 3],
[1, 2, 2]])
val = np.array([1, 10, 12, 103, 111, 113, 122])
shape = np.array([2, 3, 4])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, types.int64))
def testInt32(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(types.int32)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = ((0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33))
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testInt64(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(types.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = [(0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testHigherRank(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_2x3x4(types.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 200).eval()
expected_output = np.zeros((2, 3, 200), dtype=np.bool)
expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12),
(1, 0, 103), (1, 1, 111), (1, 1, 113), (1, 2, 122)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
class SparseRetainTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, types.int32),
constant_op.constant(shape, types.int64))
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 1, 0], dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0], [1, 4], [3, 2]])
self.assertAllEqual(output.values, [0, 14, 32])
self.assertAllEqual(output.shape, [5, 6])
def testRetainNone(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
to_retain = np.zeros((6,), dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, np.array([]).reshape((0, 2)))
self.assertAllEqual(output.values, [])
self.assertAllEqual(output.shape, [5, 6])
def testMismatchedRetainShape(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 0], dtype=np.bool)
with self.assertRaises(ValueError):
sparse_ops.sparse_retain(sp_input, to_retain)
class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, types.int32),
constant_op.constant(shape, types.int64))
def _SparseTensor_String5x6(self):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array(["a", "b", "c", "d", "e", "f"])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, types.string),
constant_op.constant(shape, types.int64))
def _SparseTensor_2x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]])
val = np.array([0, 10, 13, 14])
shape = np.array([2, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, types.int32),
constant_op.constant(shape, types.int64))
def testFillNumber(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testFillString(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_String5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, ""))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, ["a", "b", "c", "d", "", "e", "f", ""])
self.assertAllEqual(output.shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testNoEmptyRows(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4]])
self.assertAllEqual(output.values, [0, 10, 13, 14])
self.assertAllEqual(output.shape, [2, 6])
self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/docs/_ext/applyxrefs.py | 322 | 1834 | """Adds xref targets to the top of files."""
import os
import sys
testing = False
DONT_TOUCH = (
'./index.txt',
)
def target_name(fn):
if fn.endswith('.txt'):
fn = fn[:-4]
return '_' + fn.lstrip('./').replace('/', '-')
def process_file(fn, lines):
lines.insert(0, '\n')
lines.insert(0, '.. %s:\n' % target_name(fn))
try:
with open(fn, 'w') as fp:
fp.writelines(lines)
except IOError:
print("Can't open %s for writing. Not touching it." % fn)
def has_target(fn):
try:
with open(fn, 'r') as fp:
lines = fp.readlines()
except IOError:
print("Can't open or read %s. Not touching it." % fn)
return (True, None)
# print fn, len(lines)
if len(lines) < 1:
print("Not touching empty file %s." % fn)
return (True, None)
if lines[0].startswith('.. _'):
return (True, None)
return (False, lines)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
argv.extend('.')
files = []
for root in argv[1:]:
for (dirpath, dirnames, filenames) in os.walk(root):
files.extend((dirpath, f) for f in filenames)
files.sort()
files = [os.path.join(p, fn) for p, fn in files if fn.endswith('.txt')]
# print files
for fn in files:
if fn in DONT_TOUCH:
print("Skipping blacklisted file %s." % fn)
continue
target_found, lines = has_target(fn)
if not target_found:
if testing:
print('%s: %s' % (fn, lines[0]))
else:
print("Adding xref to %s" % fn)
process_file(fn, lines)
else:
print("Skipping %s: already has a xref" % fn)
if __name__ == '__main__':
sys.exit(main())
| mit |
inmomentsoftware/teams | wtforms/ext/django/fields.py | 175 | 4580 | """
Useful form fields for use with the Django ORM.
"""
from __future__ import unicode_literals
import datetime
import operator
try:
from django.conf import settings
from django.utils import timezone
has_timezone = True
except ImportError:
has_timezone = False
from wtforms import fields, widgets
from wtforms.compat import string_types
from wtforms.validators import ValidationError
__all__ = (
'ModelSelectField', 'QuerySetSelectField', 'DateTimeField'
)
class QuerySetSelectField(fields.SelectFieldBase):
"""
Given a QuerySet either at initialization or inside a view, will display a
select drop-down field of choices. The `data` property actually will
store/keep an ORM model instance, not the ID. Submitting a choice which is
not in the queryset will result in a validation error.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` or `__unicode__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for the blank choice can be set by specifying the
`blank_text` parameter.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, queryset=None, get_label=None, allow_blank=False, blank_text='', **kwargs):
super(QuerySetSelectField, self).__init__(label, validators, **kwargs)
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if queryset is not None:
self.queryset = queryset.all() # Make sure the queryset is fresh
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
def _get_data(self):
if self._formdata is not None:
for obj in self.queryset:
if obj.pk == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for obj in self.queryset:
yield (obj.pk, self.get_label(obj), obj == self.data)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = int(valuelist[0])
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for obj in self.queryset:
if self.data == obj:
break
else:
raise ValidationError(self.gettext('Not a valid choice'))
class ModelSelectField(QuerySetSelectField):
"""
Like a QuerySetSelectField, except takes a model class instead of a
queryset and lists everything in it.
"""
def __init__(self, label=None, validators=None, model=None, **kwargs):
super(ModelSelectField, self).__init__(label, validators, queryset=model._default_manager.all(), **kwargs)
class DateTimeField(fields.DateTimeField):
"""
Adds support for Django's timezone utilities.
Requires Django >= 1.5
"""
def __init__(self, *args, **kwargs):
if not has_timezone:
raise ImportError('DateTimeField requires Django >= 1.5')
super(DateTimeField, self).__init__(*args, **kwargs)
def process_formdata(self, valuelist):
super(DateTimeField, self).process_formdata(valuelist)
date = self.data
if settings.USE_TZ and date is not None and timezone.is_naive(date):
current_timezone = timezone.get_current_timezone()
self.data = timezone.make_aware(date, current_timezone)
def _value(self):
date = self.data
if settings.USE_TZ and isinstance(date, datetime.datetime) and timezone.is_aware(date):
self.data = timezone.localtime(date)
return super(DateTimeField, self)._value()
| agpl-3.0 |
WhiteNeo-/NeoKernel-L | scripts/gcc-wrapper.py | 234 | 4095 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"mmu.c:602",
"return_address.c:62",
"swab.h:49",
"SemaLambda.cpp:946",
"CGObjCGNU.cpp:1414",
"BugReporter.h:146",
"RegionStore.cpp:1904",
"SymbolManager.cpp:484",
"RewriteObjCFoundationAPI.cpp:737",
"RewriteObjCFoundationAPI.cpp:696",
"CommentParser.cpp:394",
"CommentParser.cpp:391",
"CommentParser.cpp:356",
"LegalizeDAG.cpp:3646",
"IRBuilder.h:844",
"DataLayout.cpp:193",
"transport.c:653",
"xt_socket.c:307",
"xt_socket.c:161",
"inet_hashtables.h:356",
"xc4000.c:1049",
"xc4000.c:1063",
"f_qdss.c:586",
"mipi_tc358764_dsi2lvds.c:746",
"dynamic_debug.h:75",
"hci_conn.c:407",
"f_qdss.c:740",
"mipi_novatek.c:569",
"swab.h:34",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
boppreh/keyboard | setup.py | 1 | 1333 | """
Usage instructions:
- If you are installing: `python setup.py install`
- If you are developing: `python setup.py sdist --format=zip bdist_wheel --universal bdist_wininst && twine check dist/*`
"""
import keyboard
from setuptools import setup
setup(
name='keyboard',
version=keyboard.version,
author='BoppreH',
author_email='[email protected]',
packages=['keyboard'],
url='https://github.com/boppreh/keyboard',
license='MIT',
description='Hook and simulate keyboard events on Windows and Linux',
keywords = 'keyboard hook simulate hotkey',
# Wheel creation breaks with Windows newlines.
# https://github.com/pypa/setuptools/issues/1126
long_description=keyboard.__doc__.replace('\r\n', '\n'),
long_description_content_type='text/markdown',
install_requires=["pyobjc; sys_platform=='darwin'"], # OSX-specific dependency
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
| mit |
srsman/odoo | addons/l10n_be/wizard/l10n_be_vat_intra.py | 332 | 14728 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Adapted by Noviat to
# - make the 'mand_id' field optional
# - support Noviat tax code scheme
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import base64
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.report import report_sxw
class partner_vat_intra(osv.osv_memory):
"""
Partner Vat Intra
"""
_name = "partner.vat.intra"
_description = 'Partner VAT Intra'
def _get_xml_data(self, cr, uid, context=None):
if context.get('file_save', False):
return base64.encodestring(context['file_save'].encode('utf8'))
return ''
def _get_europe_country(self, cursor, user, context=None):
return self.pool.get('res.country').search(cursor, user, [('code', 'in', ['AT', 'BG', 'CY', 'CZ', 'DK', 'EE', 'FI', 'FR', 'DE', 'GR', 'HU', 'IE', 'IT', 'LV', 'LT', 'LU', 'MT', 'NL', 'PL', 'PT', 'RO', 'SK', 'SI', 'ES', 'SE', 'GB'])])
_columns = {
'name': fields.char('File Name'),
'period_code': fields.char('Period Code', size=6, required=True, help='''This is where you have to set the period code for the intracom declaration using the format: ppyyyy
PP can stand for a month: from '01' to '12'.
PP can stand for a trimester: '31','32','33','34'
The first figure means that it is a trimester,
The second figure identify the trimester.
PP can stand for a complete fiscal year: '00'.
YYYY stands for the year (4 positions).
'''
),
'period_ids': fields.many2many('account.period', 'account_period_rel', 'acc_id', 'period_id', 'Period (s)', help = 'Select here the period(s) you want to include in your intracom declaration'),
'tax_code_id': fields.many2one('account.tax.code', 'Company', domain=[('parent_id', '=', False)], help="Keep empty to use the user's company", required=True),
'test_xml': fields.boolean('Test XML file', help="Sets the XML output as test file"),
'mand_id' : fields.char('Reference', help="Reference given by the Representative of the sending company."),
'msg': fields.text('File created', readonly=True),
'no_vat': fields.text('Partner With No VAT', readonly=True, help="The Partner whose VAT number is not defined and they are not included in XML File."),
'file_save' : fields.binary('Save File', readonly=True),
'country_ids': fields.many2many('res.country', 'vat_country_rel', 'vat_id', 'country_id', 'European Countries'),
'comments': fields.text('Comments'),
}
def _get_tax_code(self, cr, uid, context=None):
obj_tax_code = self.pool.get('account.tax.code')
obj_user = self.pool.get('res.users')
company_id = obj_user.browse(cr, uid, uid, context=context).company_id.id
tax_code_ids = obj_tax_code.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', False)], context=context)
return tax_code_ids and tax_code_ids[0] or False
_defaults = {
'country_ids': _get_europe_country,
'file_save': _get_xml_data,
'name': 'vat_intra.xml',
'tax_code_id': _get_tax_code,
}
def _get_datas(self, cr, uid, ids, context=None):
"""Collects require data for vat intra xml
:param ids: id of wizard.
:return: dict of all data to be used to generate xml for Partner VAT Intra.
:rtype: dict
"""
if context is None:
context = {}
obj_user = self.pool.get('res.users')
obj_sequence = self.pool.get('ir.sequence')
obj_partner = self.pool.get('res.partner')
xmldict = {}
post_code = street = city = country = data_clientinfo = ''
seq = amount_sum = 0
wiz_data = self.browse(cr, uid, ids[0], context=context)
comments = wiz_data.comments
if wiz_data.tax_code_id:
data_company = wiz_data.tax_code_id.company_id
else:
data_company = obj_user.browse(cr, uid, uid, context=context).company_id
# Get Company vat
company_vat = data_company.partner_id.vat
if not company_vat:
raise osv.except_osv(_('Insufficient Data!'),_('No VAT number associated with your company.'))
company_vat = company_vat.replace(' ','').upper()
issued_by = company_vat[:2]
if len(wiz_data.period_code) != 6:
raise osv.except_osv(_('Error!'), _('Period code is not valid.'))
if not wiz_data.period_ids:
raise osv.except_osv(_('Insufficient Data!'),_('Please select at least one Period.'))
p_id_list = obj_partner.search(cr, uid, [('vat','!=',False)], context=context)
if not p_id_list:
raise osv.except_osv(_('Insufficient Data!'),_('No partner has a VAT number associated with him.'))
seq_declarantnum = obj_sequence.get(cr, uid, 'declarantnum')
dnum = company_vat[2:] + seq_declarantnum[-4:]
addr = obj_partner.address_get(cr, uid, [data_company.partner_id.id], ['invoice'])
email = data_company.partner_id.email or ''
phone = data_company.partner_id.phone or ''
if addr.get('invoice',False):
ads = obj_partner.browse(cr, uid, [addr['invoice']])[0]
city = (ads.city or '')
post_code = (ads.zip or '')
if ads.street:
street = ads.street
if ads.street2:
street += ' '
street += ads.street2
if ads.country_id:
country = ads.country_id.code
if not country:
country = company_vat[:2]
if not email:
raise osv.except_osv(_('Insufficient Data!'),_('No email address associated with the company.'))
if not phone:
raise osv.except_osv(_('Insufficient Data!'),_('No phone associated with the company.'))
xmldict.update({
'company_name': data_company.name,
'company_vat': company_vat,
'vatnum': company_vat[2:],
'mand_id': wiz_data.mand_id,
'sender_date': str(time.strftime('%Y-%m-%d')),
'street': street,
'city': city,
'post_code': post_code,
'country': country,
'email': email,
'phone': phone.replace('/','').replace('.','').replace('(','').replace(')','').replace(' ',''),
'period': wiz_data.period_code,
'clientlist': [],
'comments': comments,
'issued_by': issued_by,
})
#tax code 44: services
#tax code 46L: normal good deliveries
#tax code 46T: ABC good deliveries
#tax code 48xxx: credite note on tax code xxx
codes = ('44', '46L', '46T', '48s44', '48s46L', '48s46T')
cr.execute('''SELECT p.name As partner_name, l.partner_id AS partner_id, p.vat AS vat,
(CASE WHEN t.code = '48s44' THEN '44'
WHEN t.code = '48s46L' THEN '46L'
WHEN t.code = '48s46T' THEN '46T'
ELSE t.code END) AS intra_code,
SUM(CASE WHEN t.code in ('48s44','48s46L','48s46T') THEN -l.tax_amount ELSE l.tax_amount END) AS amount
FROM account_move_line l
LEFT JOIN account_tax_code t ON (l.tax_code_id = t.id)
LEFT JOIN res_partner p ON (l.partner_id = p.id)
WHERE t.code IN %s
AND l.period_id IN %s
AND t.company_id = %s
GROUP BY p.name, l.partner_id, p.vat, intra_code''', (codes, tuple([p.id for p in wiz_data.period_ids]), data_company.id))
p_count = 0
for row in cr.dictfetchall():
if not row['vat']:
row['vat'] = ''
p_count += 1
seq += 1
amt = row['amount'] or 0.0
amount_sum += amt
intra_code = row['intra_code'] == '44' and 'S' or (row['intra_code'] == '46L' and 'L' or (row['intra_code'] == '46T' and 'T' or ''))
xmldict['clientlist'].append({
'partner_name': row['partner_name'],
'seq': seq,
'vatnum': row['vat'][2:].replace(' ','').upper(),
'vat': row['vat'],
'country': row['vat'][:2],
'amount': round(amt,2),
'intra_code': row['intra_code'],
'code': intra_code})
xmldict.update({'dnum': dnum, 'clientnbr': str(seq), 'amountsum': round(amount_sum,2), 'partner_wo_vat': p_count})
return xmldict
def create_xml(self, cursor, user, ids, context=None):
"""Creates xml that is to be exported and sent to estate for partner vat intra.
:return: Value for next action.
:rtype: dict
"""
mod_obj = self.pool.get('ir.model.data')
xml_data = self._get_datas(cursor, user, ids, context=context)
month_quarter = xml_data['period'][:2]
year = xml_data['period'][2:]
data_file = ''
# Can't we do this by etree?
data_head = """<?xml version="1.0" encoding="ISO-8859-1"?>
<ns2:IntraConsignment xmlns="http://www.minfin.fgov.be/InputCommon" xmlns:ns2="http://www.minfin.fgov.be/IntraConsignment" IntraListingsNbr="1">
<ns2:Representative>
<RepresentativeID identificationType="NVAT" issuedBy="%(issued_by)s">%(vatnum)s</RepresentativeID>
<Name>%(company_name)s</Name>
<Street>%(street)s</Street>
<PostCode>%(post_code)s</PostCode>
<City>%(city)s</City>
<CountryCode>%(country)s</CountryCode>
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Representative>""" % (xml_data)
if xml_data['mand_id']:
data_head += '\n\t\t<ns2:RepresentativeReference>%(mand_id)s</ns2:RepresentativeReference>' % (xml_data)
data_comp_period = '\n\t\t<ns2:Declarant>\n\t\t\t<VATNumber>%(vatnum)s</VATNumber>\n\t\t\t<Name>%(company_name)s</Name>\n\t\t\t<Street>%(street)s</Street>\n\t\t\t<PostCode>%(post_code)s</PostCode>\n\t\t\t<City>%(city)s</City>\n\t\t\t<CountryCode>%(country)s</CountryCode>\n\t\t\t<EmailAddress>%(email)s</EmailAddress>\n\t\t\t<Phone>%(phone)s</Phone>\n\t\t</ns2:Declarant>' % (xml_data)
if month_quarter.startswith('3'):
data_comp_period += '\n\t\t<ns2:Period>\n\t\t\t<ns2:Quarter>'+month_quarter[1]+'</ns2:Quarter> \n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
elif month_quarter.startswith('0') and month_quarter.endswith('0'):
data_comp_period+= '\n\t\t<ns2:Period>\n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
else:
data_comp_period += '\n\t\t<ns2:Period>\n\t\t\t<ns2:Month>'+month_quarter+'</ns2:Month> \n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
data_clientinfo = ''
for client in xml_data['clientlist']:
if not client['vatnum']:
raise osv.except_osv(_('Insufficient Data!'),_('No vat number defined for %s.') % client['partner_name'])
data_clientinfo +='\n\t\t<ns2:IntraClient SequenceNumber="%(seq)s">\n\t\t\t<ns2:CompanyVATNumber issuedBy="%(country)s">%(vatnum)s</ns2:CompanyVATNumber>\n\t\t\t<ns2:Code>%(code)s</ns2:Code>\n\t\t\t<ns2:Amount>%(amount).2f</ns2:Amount>\n\t\t</ns2:IntraClient>' % (client)
data_decl = '\n\t<ns2:IntraListing SequenceNumber="1" ClientsNbr="%(clientnbr)s" DeclarantReference="%(dnum)s" AmountSum="%(amountsum).2f">' % (xml_data)
data_file += data_head + data_decl + data_comp_period + data_clientinfo + '\n\t\t<ns2:Comment>%(comments)s</ns2:Comment>\n\t</ns2:IntraListing>\n</ns2:IntraConsignment>' % (xml_data)
context = dict(context or {})
context['file_save'] = data_file
model_data_ids = mod_obj.search(cursor, user,[('model','=','ir.ui.view'),('name','=','view_vat_intra_save')], context=context)
resource_id = mod_obj.read(cursor, user, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Save'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'partner.vat.intra',
'views': [(resource_id,'form')],
'view_id': 'view_vat_intra_save',
'type': 'ir.actions.act_window',
'target': 'new',
}
def preview(self, cr, uid, ids, context=None):
xml_data = self._get_datas(cr, uid, ids, context=context)
datas = {
'ids': [],
'model': 'partner.vat.intra',
'form': xml_data
}
return self.pool['report'].get_action(
cr, uid, [], 'l10n_be.report_l10nvatintraprint', data=datas, context=context
)
class vat_intra_print(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(vat_intra_print, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
class wrapped_vat_intra_print(osv.AbstractModel):
_name = 'report.l10n_be.report_l10nvatintraprint'
_inherit = 'report.abstract_report'
_template = 'l10n_be.report_l10nvatintraprint'
_wrapped_report_class = vat_intra_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
2Minutes/davos-dev | davos/core/utils.py | 1 | 7692 |
import re
import sys
import os
import os.path as osp
from fnmatch import fnmatch
from pytd.gui.dialogs import promptDialog
from pytd.util.logutils import logMsg
from pytd.util.sysutils import importModule, toStr, inDevMode, getCaller
from pytd.util.fsutils import pathSplitDirs, pathResolve, pathNorm, pathJoin
from pytd.util.fsutils import jsonRead, jsonWrite, isDirStat, parseDirContent
from pytd.util.strutils import padded
_VERS_SPLIT_REXP = re.compile(r'-(v[0-9]+)')
def getConfigModule(sProjectName):
try:
sConfPkg = os.environ.get("DAVOS_CONF_PACKAGE", "davos.config")
sConfigModule = sConfPkg + '.' + sProjectName
modobj = importModule(sConfigModule)
except ImportError:
raise ImportError("No config module named '{}'".format(sConfigModule))
return modobj
def versionFromName(sFileName):
vers = _VERS_SPLIT_REXP.findall(sFileName)
return int(vers[-1].strip('v')) if vers else None
def mkVersionSuffix(v):
if not isinstance(v, int):
raise TypeError("argument must be of type <int>. Got {}.".format(type(v)))
return "".join(('-v', padded(v)))
def findVersionFields(s):
return _VERS_SPLIT_REXP.findall(s)
def promptForComment(**kwargs):
sComment = ""
bOk = False
result = promptDialog(title='Please...',
message='Leave a comment: ',
button=['OK', 'Cancel'],
defaultButton='OK',
cancelButton='Cancel',
dismissString='Cancel',
scrollableField=True,
**kwargs)
if result == 'Cancel':
logMsg("Cancelled !" , warning=True)
elif result == 'OK':
sComment = promptDialog(query=True, text=True)
bOk = True
return sComment, bOk
def projectNameFromPath(p):
sConfPkg = os.environ.get("DAVOS_CONF_PACKAGE", "davos.config")
pkg = importModule(sConfPkg)
sPkgDirPath = os.path.dirname(pkg.__file__)
sDirList = pathSplitDirs(p)
for sFilename in os.listdir(sPkgDirPath):
bIgnored = False
for sPatrn in ("__*", ".*", "*.pyc"):
if fnmatch(sFilename, sPatrn):
bIgnored = True
break
if bIgnored:
continue
sModName = os.path.splitext(sFilename)[0]
m = importModule(sConfPkg + '.' + sModName)
sProjDir = m.project.dir_name
if sProjDir in sDirList:
return sModName
return ""
def splitStep(sTaskName):
return sTaskName.rsplit("|", 1) if ("|" in sTaskName) else ("", sTaskName)
def damasServerPort():
return os.getenv("DAMAS_DEV_PORT", "8443") if inDevMode() else "8443"
def loadPrefs():
global DAVOS_PREFS
try:
p = pathResolve(r"%USERPROFILE%\davos_prefs.json")
DAVOS_PREFS = jsonRead(p)
except EnvironmentError:
DAVOS_PREFS = {}
return DAVOS_PREFS
def savePrefs():
global DAVOS_PREFS
if DAVOS_PREFS:
p = pathResolve(r"%USERPROFILE%\davos_prefs.json")
jsonWrite(p, DAVOS_PREFS)
def setPref(in_sKey, value):
global DAVOS_PREFS
if "|" not in in_sKey:
DAVOS_PREFS[in_sKey] = value
return
sKeyList = in_sKey.split("|")
iLastKey = len(sKeyList) - 1
currPrefs = DAVOS_PREFS
sPrevKey = ""
prevPrefs = None
for i, sKey in enumerate(sKeyList):
if not isinstance(currPrefs, dict):
prevPrefs[sPrevKey] = {}
currPrefs = prevPrefs[sPrevKey]
if i == iLastKey:
currPrefs[sKey] = value
return
if sKey not in currPrefs:
currPrefs[sKey] = {}
prevPrefs = currPrefs
sPrevKey = sKey
currPrefs = currPrefs[sKey]
def getPref(in_sKey, default=None):
global DAVOS_PREFS
if "|" not in in_sKey:
return DAVOS_PREFS.get(in_sKey, default)
sKeyList = in_sKey.split("|")
iLastKey = len(sKeyList) - 1
currPrefs = DAVOS_PREFS
for i, sKey in enumerate(sKeyList):
if not isinstance(currPrefs, dict):
k = "|".join(sKeyList[:(i + 1)])
logMsg("Not a pref dictionary: '{}'.".format(k), warning=True)
return default
if i == iLastKey:
return currPrefs.get(sKey, default)
if sKey in currPrefs:
currPrefs = currPrefs[sKey]
else:
logMsg("No such pref: '{}'.".format(in_sKey), warning=True)
return default
_ICON_DIR_PATH = ""
def mkIconPath(sRelPath):
global _ICON_DIR_PATH
if (not _ICON_DIR_PATH) or (not osp.exists(_ICON_DIR_PATH)):
p = sys.modules["davos"].__file__
p = osp.abspath(osp.join(osp.dirname(p), "..", "resources", "icon"))
_ICON_DIR_PATH = p
return pathJoin(_ICON_DIR_PATH, sRelPath)
def writePackContent(sPackDirPath, dirStat=None):
sPackDirPath = pathNorm(sPackDirPath)
if not dirStat:
dirStat = os.stat(sPackDirPath)
sJsonPath = mkPackFilePath(sPackDirPath)
iMtime = 0
if not osp.exists(sJsonPath):
iMtime = dirStat.st_mtime
iAtime = dirStat.st_atime
try:
open(sJsonPath, 'a+b').close() # create json file so it is listed by parseDirContent()
dirContent = parseDirContent(sPackDirPath)
jsonWrite(sJsonPath, dirContent, sort_keys=True)
finally:
if iMtime:
os.utime(sPackDirPath, (iAtime, iMtime))
return dirContent
def readPackContent(sPackDirPath, fail=True):
try:
dirContent = jsonRead(mkPackFilePath(sPackDirPath))
except EnvironmentError as e:
if fail:
raise
logMsg(toStr(e), warning=True)
dirContent = parseDirContent(sPackDirPath)
return dirContent
def mkPackFilePath(sPackDirPath):
return pathJoin(sPackDirPath, "_package.json")
_ISPACK_REXP = re.compile(r".+_pkg[^/\w].+", re.I)
def assertPack(p, dirStat=None):
if not dirStat:
dirStat = os.stat(pathNorm(p))
if isPack(p, fail=True, dirStat=dirStat):
return dirStat
return None
def belowPack(p):
p = pathNorm(p)
if os.environ["IN_SEB_MODE"]:
return True if _belowPack(p) else _belowOldPack(p)
else:
return _belowPack(p)
def isPack(p, fail=False, dirStat=None):
p = pathNorm(p)
if os.environ["IN_SEB_MODE"]:
bPackPath = True if _isPack(p) else _isOldPack(p)
else:
bPackPath = _isPack(p)
if not bPackPath:
if fail:
sMsg = ("Directory NOT a package (should start with 'pkg_' or 'lyr_'): '{}'."
.format(osp.basename(p)))
raise EnvironmentError(sMsg)
else:
return False
if dirStat and not isDirStat(dirStat):
if fail:
raise EnvironmentError("Package path NOT a directory: '{}'".format(p))
else:
return False
return True
def _belowPack(p):
p = osp.dirname(p)
for sDirName in pathSplitDirs(p):
if _isPack(sDirName):
return True
return False
def _isPack(p):
sBaseName = osp.basename(p) if "/" in p else p
if "_" not in sBaseName:
return False
sPrefix = sBaseName.split("_", 1)[0]
if not sPrefix:
return False
return (sPrefix.lower() + "_") in ("pkg_", "lyr_")
def _belowOldPack(p):
p = osp.dirname(p)
if "_pkg/" in p.lower():
return True
if _ISPACK_REXP.match(p):
return True
return False
def _isOldPack(p):
sName = osp.basename(p)
if sName.lower().endswith("_pkg"):
return True
if _ISPACK_REXP.match(sName):
return True
return False
| gpl-3.0 |
openstack/poppy | poppy/metrics/blueflood/services.py | 2 | 4570 | # Copyright (c) 2016 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_context import context as context_utils
from oslo_log import log
from poppy.metrics import base
from poppy.metrics.blueflood.utils import client
from poppy.metrics.blueflood.utils import errors
from poppy.metrics.blueflood.utils import helper
LOG = log.getLogger(__name__)
class ServicesController(base.ServicesController):
def __init__(self, driver):
super(ServicesController, self).__init__(driver)
self.driver = driver
def _result_formatter(self, response):
resp_dict = []
if not response.ok:
LOG.warning("BlueFlood Metrics Response status Code:{0} "
"Response Text: {1} "
"Request URL: {2}".format(response.status_code,
response.text,
response.url))
return resp_dict
else:
serialized_response = response.json()
try:
values = serialized_response['values']
for val in values:
m = {}
m['timestamp'] = helper.datetime_from_epoch(
int(val['timestamp']))
m['count'] = val['sum']
resp_dict.append(m)
except KeyError:
msg = 'content from {0} not conforming ' \
'to API contracts'.format(response.url)
LOG.warning(msg)
raise errors.BlueFloodApiSchemaError(msg)
# sort the resp_dict by timestamp ascending
resp_dict = sorted(resp_dict, key=lambda x: x['timestamp'])
return resp_dict
def read(self, metric_names, from_timestamp, to_timestamp, resolution):
"""read metrics from metrics driver.
"""
curr_resolution = \
helper.resolution_converter_seconds_to_enum(resolution)
context_dict = context_utils.get_current().to_dict()
project_id = context_dict['tenant']
auth_token = None
if self.driver.metrics_conf.use_keystone_auth:
auth_token = context_dict['auth_token']
tenanted_blueflood_url = \
self.driver.metrics_conf.blueflood_url.format(
project_id=project_id
)
from_timestamp = int(helper.datetime_to_epoch(from_timestamp))
to_timestamp = int(helper.datetime_to_epoch(to_timestamp))
urls = []
params = {
'to': to_timestamp,
'from': from_timestamp,
'resolution': curr_resolution
}
for metric_name in metric_names:
tenanted_blueflood_url_with_metric = helper.join_url(
tenanted_blueflood_url, metric_name.strip().replace(" ", ""))
LOG.info("Querying BlueFlood Metric: {0}".format(
tenanted_blueflood_url_with_metric))
urls.append(helper.set_qs_on_url(
tenanted_blueflood_url_with_metric,
**params))
executors = self.driver.metrics_conf.no_of_executors
blueflood_client = client.BlueFloodMetricsClient(token=auth_token,
project_id=project_id,
executors=executors)
results = blueflood_client.async_requests(urls)
reordered_metric_names = []
for result in results:
metric_name = helper.retrieve_last_relative_url(result.url)
reordered_metric_names.append(metric_name)
formatted_results = []
for metric_name, result in zip(reordered_metric_names, results):
formatted_result = self._result_formatter(result)
# NOTE(TheSriram): Tuple to pass the associated metric name, along
# with the formatted result
formatted_results.append((metric_name, formatted_result))
return formatted_results
| apache-2.0 |
solarsail/aerosol-tools | clustatlib/clucsv.py | 1 | 3752 | import numpy as np
import os
import os.path
class csvbuilder:
def __init__(self, cs):
self.cs = cs
if not os.path.isdir('csv'):
os.mkdir('csv')
def month_type_csv(self, site = None):
label = 'all' if site == None else site
values, percentages = self.cs.month_type_stat(site)
header = ",".join(["type{},%".format(t) for t in range(1, len(values)+1)])
header = "month," + header
all = []
for i in range(len(values)):
all.append(values[i])
all.append(percentages[i])
mat = np.matrix(all)
mat = mat.transpose().tolist()
content = []
for i in range(12):
content.append("%d,%s" % (i+1, ','.join([str(field) for field in mat[i]])))
content = '\n'.join(content)
with open("csv/month_type_%s.csv" % label, 'w') as outfile:
outfile.write('\n'.join((header, content)))
def year_type_csv(self, start_year, end_year, site = None):
label = 'all' if site == None else site
values, percentages = self.cs.year_type_stat(start_year, end_year, site)
header = ",".join(["type{},%".format(t) for t in range(1, len(values)+1)])
header = "year," + header
all = []
for i in range(len(values)):
all.append(values[i])
all.append(percentages[i])
mat = np.matrix(all)
mat = mat.transpose().tolist()
content = []
for i in range(start_year, end_year+1):
content.append("%d,%s" % (i, ','.join([str(field) for field in mat[i-start_year]])))
content = '\n'.join(content)
with open("csv/year_type_%s.csv" % label, 'w') as outfile:
outfile.write('\n'.join((header, content)))
def type_csv(self):
header = "type,count,percentage%"
all = self.cs.type_stat()
content = '\n'.join([','.join([str(field) for field in row]) for row in all])
with open("csv/type_count.csv", 'w') as outfile:
outfile.write('\n'.join((header, content)))
def site_type_csv(self):
all, types = self.cs.site_type_stat()
header = ",".join(["type{},%".format(t) for t in range(1, types+1)])
header = "site," + header
content = '\n'.join([','.join([str(field) for field in row]) for row in all])
with open("csv/site_type_count.csv", 'w') as outfile:
outfile.write('\n'.join((header, content)))
def type_stat_csv(self):
header = "type,refr440,refr675,refr870,refr1020,refi440,refi675,refi870,refi1020,volmedianradf,stddevf,volconf,volmedianradc,stddevc,volconc,ssa675,ssa870,ssa1020,asy440,asy675,asy870,sphericity"
list1 = self.cs.type_means()
list2 = self.cs.type_stddev()
l = []
for i in range(len(list1)):
l.append(list1[i])
stddevline = list(list2[i])
stddevline[0] = "stddev"
l.append(stddevline)
content = '\n'.join([','.join([str(field) for field in row]) for row in l])
with open("csv/type_stat.csv", 'w') as outfile:
outfile.write('\n'.join((header, content)))
def distances_csv(self):
clus, dist_mat = self.cs.all_distances()
header = "," + ",".join([str(cid) for cid in clus])
lines = []
first = 1
cur = 0
for clu in clus:
lines.append(str(clu) + ',' * first + ','.join(str(d) for d in dist_mat[cur:cur+len(clus)-first+1]))
cur += len(clus) - first + 1
first += 1
content = '\n'.join(lines)
with open("csv/distance_stat.csv", 'w') as outfile:
outfile.write('\n'.join((header, content))) | gpl-3.0 |
stenskjaer/scrapy | scrapy/commands/parse.py | 108 | 8286 | from __future__ import print_function
import logging
from w3lib.url import is_url
from scrapy.commands import ScrapyCommand
from scrapy.http import Request
from scrapy.item import BaseItem
from scrapy.utils import display
from scrapy.utils.conf import arglist_to_dict
from scrapy.utils.spider import iterate_spider_output, spidercls_for_request
from scrapy.exceptions import UsageError
logger = logging.getLogger(__name__)
class Command(ScrapyCommand):
requires_project = True
spider = None
items = {}
requests = {}
first_response = None
def syntax(self):
return "[options] <url>"
def short_desc(self):
return "Parse URL (using its spider) and print the results"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("--spider", dest="spider", default=None, \
help="use this spider without looking for one")
parser.add_option("-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE", \
help="set spider argument (may be repeated)")
parser.add_option("--pipelines", action="store_true", \
help="process items through pipelines")
parser.add_option("--nolinks", dest="nolinks", action="store_true", \
help="don't show links to follow (extracted requests)")
parser.add_option("--noitems", dest="noitems", action="store_true", \
help="don't show scraped items")
parser.add_option("--nocolour", dest="nocolour", action="store_true", \
help="avoid using pygments to colorize the output")
parser.add_option("-r", "--rules", dest="rules", action="store_true", \
help="use CrawlSpider rules to discover the callback")
parser.add_option("-c", "--callback", dest="callback", \
help="use this callback for parsing, instead looking for a callback")
parser.add_option("-d", "--depth", dest="depth", type="int", default=1, \
help="maximum depth for parsing requests [default: %default]")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", \
help="print each depth level one by one")
@property
def max_level(self):
levels = self.items.keys() + self.requests.keys()
if levels: return max(levels)
else: return 0
def add_items(self, lvl, new_items):
old_items = self.items.get(lvl, [])
self.items[lvl] = old_items + new_items
def add_requests(self, lvl, new_reqs):
old_reqs = self.requests.get(lvl, [])
self.requests[lvl] = old_reqs + new_reqs
def print_items(self, lvl=None, colour=True):
if lvl is None:
items = [item for lst in self.items.values() for item in lst]
else:
items = self.items.get(lvl, [])
print("# Scraped Items ", "-"*60)
display.pprint([dict(x) for x in items], colorize=colour)
def print_requests(self, lvl=None, colour=True):
if lvl is None:
levels = self.requests.keys()
if levels:
requests = self.requests[max(levels)]
else:
requests = []
else:
requests = self.requests.get(lvl, [])
print("# Requests ", "-"*65)
display.pprint(requests, colorize=colour)
def print_results(self, opts):
colour = not opts.nocolour
if opts.verbose:
for level in xrange(1, self.max_level+1):
print('\n>>> DEPTH LEVEL: %s <<<' % level)
if not opts.noitems:
self.print_items(level, colour)
if not opts.nolinks:
self.print_requests(level, colour)
else:
print('\n>>> STATUS DEPTH LEVEL %s <<<' % self.max_level)
if not opts.noitems:
self.print_items(colour=colour)
if not opts.nolinks:
self.print_requests(colour=colour)
def run_callback(self, response, cb):
items, requests = [], []
for x in iterate_spider_output(cb(response)):
if isinstance(x, (BaseItem, dict)):
items.append(x)
elif isinstance(x, Request):
requests.append(x)
return items, requests
def get_callback_from_rules(self, spider, response):
if getattr(spider, 'rules', None):
for rule in spider.rules:
if rule.link_extractor.matches(response.url) and rule.callback:
return rule.callback
else:
logger.error('No CrawlSpider rules found in spider %(spider)r, '
'please specify a callback to use for parsing',
{'spider': spider.name})
def set_spidercls(self, url, opts):
spider_loader = self.crawler_process.spider_loader
if opts.spider:
try:
self.spidercls = spider_loader.load(opts.spider)
except KeyError:
logger.error('Unable to find spider: %(spider)s',
{'spider': opts.spider})
else:
self.spidercls = spidercls_for_request(spider_loader, Request(url))
if not self.spidercls:
logger.error('Unable to find spider for: %(url)s',
{'url': url})
request = Request(url, opts.callback)
_start_requests = lambda s: [self.prepare_request(s, request, opts)]
self.spidercls.start_requests = _start_requests
def start_parsing(self, url, opts):
self.crawler_process.crawl(self.spidercls, **opts.spargs)
self.pcrawler = list(self.crawler_process.crawlers)[0]
self.crawler_process.start()
if not self.first_response:
logger.error('No response downloaded for: %(url)s',
{'url': url})
def prepare_request(self, spider, request, opts):
def callback(response):
# memorize first request
if not self.first_response:
self.first_response = response
# determine real callback
cb = response.meta['_callback']
if not cb:
if opts.rules and self.first_response == response:
cb = self.get_callback_from_rules(spider, response)
else:
cb = 'parse'
if not callable(cb):
cb_method = getattr(spider, cb, None)
if callable(cb_method):
cb = cb_method
else:
logger.error('Cannot find callback %(callback)r in spider: %(spider)s',
{'callback': callback, 'spider': spider.name})
return
# parse items and requests
depth = response.meta['_depth']
items, requests = self.run_callback(response, cb)
if opts.pipelines:
itemproc = self.pcrawler.engine.scraper.itemproc
for item in items:
itemproc.process_item(item, spider)
self.add_items(depth, items)
self.add_requests(depth, requests)
if depth < opts.depth:
for req in requests:
req.meta['_depth'] = depth + 1
req.meta['_callback'] = req.callback
req.callback = callback
return requests
request.meta['_depth'] = 1
request.meta['_callback'] = request.callback
request.callback = callback
return request
def process_options(self, args, opts):
ScrapyCommand.process_options(self, args, opts)
try:
opts.spargs = arglist_to_dict(opts.spargs)
except ValueError:
raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False)
def run(self, args, opts):
# parse arguments
if not len(args) == 1 or not is_url(args[0]):
raise UsageError()
else:
url = args[0]
# prepare spidercls
self.set_spidercls(url, opts)
if self.spidercls and opts.depth > 0:
self.start_parsing(url, opts)
self.print_results(opts)
| bsd-3-clause |
Benster900/mhn | server/mhn/api/views.py | 9 | 12576 | import json
from StringIO import StringIO
import csv
from uuid import uuid1
from sqlalchemy import func
from sqlalchemy.exc import IntegrityError
from flask import Blueprint, request, jsonify, make_response
from bson.errors import InvalidId
from mhn import db, csrf
from mhn.api import errors
from mhn.api.models import (
Sensor, Rule, DeployScript as Script,
DeployScript, RuleSource)
from mhn.api.decorators import deploy_auth, sensor_auth, token_auth
from mhn.common.utils import error_response
from mhn.common.clio import Clio
from mhn.auth import current_user, login_required
api = Blueprint('api', __name__, url_prefix='/api')
# Endpoints for the Sensor resource.
@api.route('/sensor/', methods=['POST'])
@csrf.exempt
@deploy_auth
def create_sensor():
missing = Sensor.check_required(request.json)
if missing:
return error_response(
errors.API_FIELDS_MISSING.format(missing), 400)
else:
sensor = Sensor(**request.json)
sensor.uuid = str(uuid1())
sensor.ip = request.remote_addr
Clio().authkey.new(**sensor.new_auth_dict()).post()
try:
db.session.add(sensor)
db.session.commit()
except IntegrityError:
return error_response(
errors.API_SENSOR_EXISTS.format(request.json['name']), 400)
else:
return jsonify(sensor.to_dict())
@api.route('/sensor/', methods=['GET'])
@token_auth
def get_sensors():
req = request.args.to_dict()
if 'api_key' in req:
del req['api_key']
resp = make_response(json.dumps([s.to_dict() for s in Sensor.query.filter_by(**req)]))
resp.headers['Content-Type'] = "application/json"
return resp
@api.route('/sensor/<uuid>/', methods=['PUT'])
@csrf.exempt
def update_sensor(uuid):
sensor = Sensor.query.filter_by(uuid=uuid).first_or_404()
for field in request.json.keys():
if field in Sensor.editable_fields():
setattr(sensor, field, request.json[field])
elif field in Sensor.fields():
return error_response(
errors.API_FIELD_NOT_EDITABLE.format(field), 400)
else:
return error_response(
errors.API_FIELD_INVALID.format(field), 400)
else:
try:
db.session.commit()
except IntegrityError:
return error_response(
errors.API_SENSOR_EXISTS.format(request.json['name']), 400)
return jsonify(sensor.to_dict())
@api.route('/sensor/<uuid>/', methods=['DELETE'])
@login_required
def delete_sensor(uuid):
sensor = Sensor.query.filter_by(uuid=uuid).first_or_404()
Clio().authkey.delete(identifier=uuid)
db.session.delete(sensor)
db.session.commit()
return jsonify({})
@api.route('/sensor/<uuid>/connect/', methods=['POST'])
@csrf.exempt
@sensor_auth
def connect_sensor(uuid):
sensor = Sensor.query.filter_by(uuid=uuid).first_or_404()
sensor.ip = request.remote_addr
db.session.commit()
return jsonify(sensor.to_dict())
# Utility functions that generalize the GET
# requests of resources from Mnemosyne.
def _get_one_resource(resource, res_id):
try:
res = resource.get(_id=res_id)
except InvalidId:
res = None
if not res:
return error_response(errors.API_RESOURCE_NOT_FOUND, 404)
else:
return jsonify(res.to_dict())
def _get_query_resource(resource, query):
options = {}
if 'limit' in query:
options['limit'] = int(query['limit'])
results = list(resource.get(options, **query))
return jsonify(
data=[r.to_dict() for r in results],
meta={
'size': len(results),
'query': query,
'options': options
}
)
# Now let's make use these methods in the views.
@api.route('/feed/<feed_id>/', methods=['GET'])
@token_auth
def get_feed(feed_id):
return _get_one_resource(Clio().hpfeed, feed_id)
@api.route('/session/<session_id>/', methods=['GET'])
@token_auth
def get_session(session_id):
return _get_one_resource(Clio().session, session_id)
@api.route('/url/<url_id>/', methods=['GET'])
@token_auth
def get_url(url_id):
return _get_one_resource(Clio().url, url_id)
@api.route('/file/<file_id>/', methods=['GET'])
@token_auth
def get_file(file_id):
return _get_one_resource(Clio().file, file_id)
@api.route('/dork/<dork_id>/', methods=['GET'])
@token_auth
def get_dork(dork_id):
return _get_one_resource(Clio().dork, dork_id)
@api.route('/metadata/<metadata_id>/', methods=['GET'])
@token_auth
def get_metadatum(metadata_id):
return _get_one_resource(Clio().metadata, metadata_id)
@api.route('/feed/', methods=['GET'])
@token_auth
def get_feeds():
return _get_query_resource(Clio().hpfeed, request.args.to_dict())
@api.route('/session/', methods=['GET'])
@token_auth
def get_sessions():
return _get_query_resource(Clio().session, request.args.to_dict())
@api.route('/url/', methods=['GET'])
@token_auth
def get_urls():
return _get_query_resource(Clio().url, request.args.to_dict())
@api.route('/file/', methods=['GET'])
@token_auth
def get_files():
return _get_query_resource(Clio().file, request.args.to_dict())
@api.route('/dork/', methods=['GET'])
@token_auth
def get_dorks():
return _get_query_resource(Clio().dork, request.args.to_dict())
@api.route('/metadata/', methods=['GET'])
@token_auth
def get_metadata():
return _get_query_resource(Clio().metadata, request.args.to_dict())
@api.route('/top_attackers/', methods=['GET'])
@token_auth
def top_attackers():
options = request.args.to_dict()
limit = int(options.get('limit', '1000'))
hours_ago = int(options.get('hours_ago', '4'))
extra = dict(options)
for name in ('hours_ago', 'limit', 'api_key',):
if name in extra:
del extra[name]
for name in options.keys():
if name not in ('hours_ago', 'limit',):
del options[name]
results = Clio().session._tops(['source_ip', 'honeypot'], top=limit, hours_ago=hours_ago, **extra)
return jsonify(
data=results,
meta={
'size': len(results),
'query': 'top_attackers',
'options': options
}
)
@api.route('/attacker_stats/<ip>/', methods=['GET'])
@token_auth
def attacker_stats(ip):
options = request.args.to_dict()
hours_ago = int(options.get('hours_ago', '720')) # 30 days
for name in options.keys():
if name not in ('hours_ago', 'limit',):
del options[name]
results = Clio().session.attacker_stats(ip, hours_ago=hours_ago)
return jsonify(
data=results,
meta={
'query': 'attacker_stats',
'options': options
}
)
def get_tags(rec):
tags = [rec['honeypot'], rec['protocol'], 'port-{}'.format(rec['destination_port']),]
meta = rec['meta']
if len(meta) > 0:
meta = meta[0]
else:
meta = {}
for meta_key in ['app', 'os', 'link',]:
value = meta.get(meta_key)
if value:
tags.append(value.replace(',', '').replace('\t', ' '))
return tags
@api.route('/intel_feed.csv/', methods=['GET'])
@token_auth
def intel_feed_csv():
fieldnames = ['source_ip', 'count', 'tags', ]
results = get_intel_feed()
outf = StringIO()
wr = csv.DictWriter(outf, fieldnames=fieldnames, delimiter='\t', lineterminator='\n')
wr.writeheader()
for rec in results['data']:
wr.writerow({
'count': rec['count'],
'source_ip': rec['source_ip'],
'tags': ','.join(get_tags(rec)),
})
response_data = outf.getvalue()
outf.close()
response = make_response(response_data)
response.headers['Content-type'] = 'text/plain'
return response
@api.route('/intel_feed/', methods=['GET'])
@token_auth
def intel_feed():
results = get_intel_feed()
return jsonify(**results)
def get_intel_feed():
options = request.args.to_dict()
limit = int(options.get('limit', '1000'))
hours_ago = int(options.get('hours_ago', '4'))
extra = dict(options)
for name in ('hours_ago', 'limit', 'api_key',):
if name in extra:
del extra[name]
for name in options.keys():
if name not in ('hours_ago', 'limit',):
del options[name]
extra['ne__protocol'] = 'pcap'
results = Clio().session._tops(['source_ip', 'honeypot', 'protocol', 'destination_port'], top=limit, hours_ago=hours_ago, **extra)
results = [r for r in results if r['protocol'] != 'ftpdatalisten']
cache = {}
for r in results:
source_ip = r['source_ip']
if source_ip not in cache:
# TODO: may want to make one big query to mongo here...
cache[source_ip] = [m.to_dict() for m in Clio().metadata.get(ip=r['source_ip'], honeypot='p0f')]
r['meta'] = cache[source_ip]
return {
'data':results,
'meta':{
'size': len(results),
'query': 'intel_feed',
'options': options
}
}
@api.route('/rule/<rule_id>/', methods=['PUT'])
@token_auth
def update_rule(rule_id):
rule = Rule.query.filter_by(id=rule_id).first_or_404()
for field in request.json.keys():
if field in Rule.editable_fields():
setattr(rule, field, request.json[field])
elif field in Rule.fields():
return error_response(
errors.API_FIELD_NOT_EDITABLE.format(field), 400)
else:
return error_response(
errors.API_FIELD_INVALID.format(field), 400)
else:
db.session.commit()
return jsonify(rule.to_dict())
@api.route('/rule/', methods=['GET'])
@sensor_auth
def get_rules():
# Getting active rules.
if request.args.get('plaintext') in ['1', 'true']:
# Requested rendered rules in plaintext.
resp = make_response(Rule.renderall())
resp.headers['Content-Disposition'] = "attachment; filename=mhn.rules"
return resp
else:
# Responding with active rules.
rules = Rule.query.filter_by(is_active=True).\
group_by(Rule.sid).\
having(func.max(Rule.rev))
resp = make_response(json.dumps([ru.to_dict() for ru in rules]))
resp.headers['Content-Type'] = "application/json"
return resp
@api.route('/rulesources/', methods=['POST'])
@login_required
def create_rule_source():
missing = RuleSource.check_required(request.json)
if missing:
return error_response(
errors.API_FIELDS_MISSING.format(missing), 400)
else:
rsource = RuleSource(**request.json)
try:
db.session.add(rsource)
db.session.commit()
except IntegrityError:
return error_response(
errors.API_SOURCE_EXISTS.format(request.json['uri']), 400)
else:
return jsonify(rsource.to_dict())
@api.route('/rulesources/<rs_id>/', methods=['DELETE'])
@login_required
def delete_rule_source(rs_id):
source = RuleSource.query.filter_by(id=rs_id).first_or_404()
db.session.delete(source)
db.session.commit()
return jsonify({})
@api.route('/script/', methods=['POST'])
@login_required
def create_script():
missing = Script.check_required(request.json)
if missing:
return error_response(
errors.API_FIELDS_MISSING.format(missing), 400)
else:
script = Script(**request.json)
script.user = current_user
db.session.add(script)
db.session.commit()
return jsonify(script.to_dict())
@api.route('/script/', methods=['PUT', 'PATCH'])
@login_required
def update_script():
script = Script.query.get(request.json.get('id'))
script.user = current_user
for editable in Script.editable_fields():
if editable in request.json:
setattr(script, editable, request.json[editable])
db.session.add(script)
db.session.commit()
return jsonify(script.to_dict())
@api.route('/script/', methods=['GET'])
def get_script():
if request.args.get('script_id'):
script = DeployScript.query.get(request.args.get('script_id'))
else:
script = DeployScript.query.order_by(DeployScript.date.desc()).first()
if request.args.get('text') in ['1', 'true']:
resp = make_response(script.script)
resp.headers['Content-Disposition'] = "attachment; filename=deploy.sh"
return resp
else:
return jsonify(script.to_dict())
| lgpl-2.1 |
HazyResearch/metal | metal/logging/writer.py | 1 | 4223 | import copy
import json
import os
from collections import defaultdict
from subprocess import check_output
from time import strftime
from metal.utils import recursive_transform
class LogWriter(object):
"""Class for writing simple JSON logs at end of runs, with interface for
storing per-iter data as well.
Config contains:
log_dir: (str) The path to the base log directory, or defaults to
current working directory.
run_dir: (str) The name of the sub-directory, or defaults to the date,
strftime("%Y_%m_%d").
run_name: (str) The name of the run + the time, or defaults to the time,
strftime("%H_%M_%S).
writer_metrics: (list) An optional whitelist of metrics to write,
ignoring all others. (If None, write all available metrics).
Log is saved to 'log_dir/run_dir/{run_name}_H_M_S.json'
"""
def __init__(
self,
log_dir=None,
run_dir=None,
run_name=None,
writer_metrics=[],
verbose=True,
**kwargs,
):
start_date = strftime("%Y_%m_%d")
start_time = strftime("%H_%M_%S")
# Set logging subdirectory + make sure exists
log_dir = log_dir or os.getcwd()
run_dir = run_dir or start_date
if run_name is not None:
run_name = f"{run_name}_{start_time}"
else:
run_name = start_time
self.log_subdir = os.path.join(log_dir, run_dir, run_name)
if not os.path.exists(self.log_subdir):
os.makedirs(self.log_subdir)
# Save other settings
self.writer_metrics = writer_metrics
self.verbose = verbose
# Initialize log
# Note we have a separate section for during-run metrics
commit = check_output(["git", "rev-parse", "--short", "HEAD"]).strip()
self.log_dict = {
"start_date": start_date,
"start_time": start_time,
"commit": str(commit),
"config": None,
"run_log": defaultdict(list),
}
def add_scalar(self, name, val, i):
# Note: Does not handle deduplication of (name, val) entries w same i
if not self.writer_metrics or name in self.write_metrics:
if val is not None:
val = float(val)
self.log_dict["run_log"][name].append((i, val))
return True
else:
return False
def write(self, config=None, metrics=None):
self.write_run_log()
if config is not None:
self.write_config(config)
if metrics is not None:
self.write_metrics(metrics)
def write_log(self):
"""Dump log output to file"""
log_path = os.path.join(self.log_subdir, "log.json")
if self.verbose:
print(f"Writing log to {log_path}")
with open(log_path, "w") as f:
json.dump(self.log_dict, f, indent=1)
def write_config(self, config, config_name="config"):
"""Dump config dict to file"""
config_path = os.path.join(self.log_subdir, f"{config_name}.json")
if self.verbose:
print(f"Writing config to {config_path}")
with open(config_path, "w") as f:
config = self._sanitize_config(config)
json.dump(config, f, indent=1)
def write_metrics(self, metrics):
metrics_path = os.path.join(self.log_subdir, "metrics.json")
if self.verbose:
print(f"Writing metrics to {metrics_path}")
with open(metrics_path, "w") as f:
json.dump(metrics, f, indent=1)
def close(self):
pass
def _sanitize_config(self, config):
config = copy.deepcopy(config)
# Replace individual functions
is_func = lambda x: callable(x)
replace_with_name = lambda f: str(f)
config = recursive_transform(config, is_func, replace_with_name)
# Replace lists of functions
is_func_list = lambda x: isinstance(x, list) and all(is_func(f) for f in x)
replace_with_names = lambda x: [replace_with_name(f) for f in x]
config = recursive_transform(config, is_func_list, replace_with_names)
return config
| apache-2.0 |
alex1818/capstone | bindings/python/capstone/xcore_const.py | 37 | 3353 | # For Capstone Engine. AUTO-GENERATED FILE, DO NOT EDIT [xcore_const.py]
# Operand type for instruction's operands
XCORE_OP_INVALID = 0
XCORE_OP_REG = 1
XCORE_OP_IMM = 2
XCORE_OP_MEM = 3
# XCore registers
XCORE_REG_INVALID = 0
XCORE_REG_CP = 1
XCORE_REG_DP = 2
XCORE_REG_LR = 3
XCORE_REG_SP = 4
XCORE_REG_R0 = 5
XCORE_REG_R1 = 6
XCORE_REG_R2 = 7
XCORE_REG_R3 = 8
XCORE_REG_R4 = 9
XCORE_REG_R5 = 10
XCORE_REG_R6 = 11
XCORE_REG_R7 = 12
XCORE_REG_R8 = 13
XCORE_REG_R9 = 14
XCORE_REG_R10 = 15
XCORE_REG_R11 = 16
# pseudo registers
XCORE_REG_PC = 17
XCORE_REG_SCP = 18
XCORE_REG_SSR = 19
XCORE_REG_ET = 20
XCORE_REG_ED = 21
XCORE_REG_SED = 22
XCORE_REG_KEP = 23
XCORE_REG_KSP = 24
XCORE_REG_ID = 25
XCORE_REG_ENDING = 26
# XCore instruction
XCORE_INS_INVALID = 0
XCORE_INS_ADD = 1
XCORE_INS_ANDNOT = 2
XCORE_INS_AND = 3
XCORE_INS_ASHR = 4
XCORE_INS_BAU = 5
XCORE_INS_BITREV = 6
XCORE_INS_BLA = 7
XCORE_INS_BLAT = 8
XCORE_INS_BL = 9
XCORE_INS_BF = 10
XCORE_INS_BT = 11
XCORE_INS_BU = 12
XCORE_INS_BRU = 13
XCORE_INS_BYTEREV = 14
XCORE_INS_CHKCT = 15
XCORE_INS_CLRE = 16
XCORE_INS_CLRPT = 17
XCORE_INS_CLRSR = 18
XCORE_INS_CLZ = 19
XCORE_INS_CRC8 = 20
XCORE_INS_CRC32 = 21
XCORE_INS_DCALL = 22
XCORE_INS_DENTSP = 23
XCORE_INS_DGETREG = 24
XCORE_INS_DIVS = 25
XCORE_INS_DIVU = 26
XCORE_INS_DRESTSP = 27
XCORE_INS_DRET = 28
XCORE_INS_ECALLF = 29
XCORE_INS_ECALLT = 30
XCORE_INS_EDU = 31
XCORE_INS_EEF = 32
XCORE_INS_EET = 33
XCORE_INS_EEU = 34
XCORE_INS_ENDIN = 35
XCORE_INS_ENTSP = 36
XCORE_INS_EQ = 37
XCORE_INS_EXTDP = 38
XCORE_INS_EXTSP = 39
XCORE_INS_FREER = 40
XCORE_INS_FREET = 41
XCORE_INS_GETD = 42
XCORE_INS_GET = 43
XCORE_INS_GETN = 44
XCORE_INS_GETR = 45
XCORE_INS_GETSR = 46
XCORE_INS_GETST = 47
XCORE_INS_GETTS = 48
XCORE_INS_INCT = 49
XCORE_INS_INIT = 50
XCORE_INS_INPW = 51
XCORE_INS_INSHR = 52
XCORE_INS_INT = 53
XCORE_INS_IN = 54
XCORE_INS_KCALL = 55
XCORE_INS_KENTSP = 56
XCORE_INS_KRESTSP = 57
XCORE_INS_KRET = 58
XCORE_INS_LADD = 59
XCORE_INS_LD16S = 60
XCORE_INS_LD8U = 61
XCORE_INS_LDA16 = 62
XCORE_INS_LDAP = 63
XCORE_INS_LDAW = 64
XCORE_INS_LDC = 65
XCORE_INS_LDW = 66
XCORE_INS_LDIVU = 67
XCORE_INS_LMUL = 68
XCORE_INS_LSS = 69
XCORE_INS_LSUB = 70
XCORE_INS_LSU = 71
XCORE_INS_MACCS = 72
XCORE_INS_MACCU = 73
XCORE_INS_MJOIN = 74
XCORE_INS_MKMSK = 75
XCORE_INS_MSYNC = 76
XCORE_INS_MUL = 77
XCORE_INS_NEG = 78
XCORE_INS_NOT = 79
XCORE_INS_OR = 80
XCORE_INS_OUTCT = 81
XCORE_INS_OUTPW = 82
XCORE_INS_OUTSHR = 83
XCORE_INS_OUTT = 84
XCORE_INS_OUT = 85
XCORE_INS_PEEK = 86
XCORE_INS_REMS = 87
XCORE_INS_REMU = 88
XCORE_INS_RETSP = 89
XCORE_INS_SETCLK = 90
XCORE_INS_SET = 91
XCORE_INS_SETC = 92
XCORE_INS_SETD = 93
XCORE_INS_SETEV = 94
XCORE_INS_SETN = 95
XCORE_INS_SETPSC = 96
XCORE_INS_SETPT = 97
XCORE_INS_SETRDY = 98
XCORE_INS_SETSR = 99
XCORE_INS_SETTW = 100
XCORE_INS_SETV = 101
XCORE_INS_SEXT = 102
XCORE_INS_SHL = 103
XCORE_INS_SHR = 104
XCORE_INS_SSYNC = 105
XCORE_INS_ST16 = 106
XCORE_INS_ST8 = 107
XCORE_INS_STW = 108
XCORE_INS_SUB = 109
XCORE_INS_SYNCR = 110
XCORE_INS_TESTCT = 111
XCORE_INS_TESTLCL = 112
XCORE_INS_TESTWCT = 113
XCORE_INS_TSETMR = 114
XCORE_INS_START = 115
XCORE_INS_WAITEF = 116
XCORE_INS_WAITET = 117
XCORE_INS_WAITEU = 118
XCORE_INS_XOR = 119
XCORE_INS_ZEXT = 120
XCORE_INS_ENDING = 121
# Group of XCore instructions
XCORE_GRP_INVALID = 0
# Generic groups
XCORE_GRP_JUMP = 1
XCORE_GRP_ENDING = 2
| bsd-3-clause |
hawkeyexp/plugin.video.netflix | resources/lib/services/nfsession/session/base.py | 1 | 2055 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Copyright (C) 2019 Stefano Gottardo - @CastagnaIT
Initialize the netflix session
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import resources.lib.common as common
from resources.lib.database.db_utils import TABLE_SESSION
from resources.lib.globals import G
from resources.lib.utils.logging import LOG
class SessionBase(object):
"""Initialize the netflix session"""
session = None
"""The requests.session object to handle communication to Netflix"""
verify_ssl = True
"""Use SSL verification when performing requests"""
# Functions from derived classes to allow perform particular operations in parent classes
external_func_activate_profile = None # (set by nfsession_op.py)
def __init__(self):
self.verify_ssl = bool(G.ADDON.getSettingBool('ssl_verification'))
self._init_session()
def _init_session(self):
"""Initialize the session to use for all future connections"""
try:
self.session.close()
LOG.info('Session closed')
except AttributeError:
pass
from requests import session
self.session = session()
self.session.max_redirects = 10 # Too much redirects should means some problem
self.session.headers.update({
'User-Agent': common.get_user_agent(enable_android_mediaflag_fix=True),
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'www.netflix.com'
})
LOG.info('Initialized new session')
@property
def auth_url(self):
"""Access rights to make HTTP requests on an endpoint"""
return G.LOCAL_DB.get_value('auth_url', table=TABLE_SESSION)
@auth_url.setter
def auth_url(self, value):
G.LOCAL_DB.set_value('auth_url', value, TABLE_SESSION)
| mit |
Wuguanping/Server_Manage_Plugin | Openstack_Plugin/ironic-plugin-pike/ironic/tests/unit/drivers/modules/irmc/test_common.py | 3 | 10969 | # Copyright 2015 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for common methods used by iRMC modules.
"""
import mock
from oslo_config import cfg
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules.irmc import common as irmc_common
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.drivers import third_party_driver_mock_specs \
as mock_specs
from ironic.tests.unit.objects import utils as obj_utils
class IRMCValidateParametersTestCase(db_base.DbTestCase):
def setUp(self):
super(IRMCValidateParametersTestCase, self).setUp()
self.node = obj_utils.create_test_node(
self.context,
driver='fake_irmc',
driver_info=db_utils.get_test_irmc_info())
def test_parse_driver_info(self):
info = irmc_common.parse_driver_info(self.node)
self.assertEqual('1.2.3.4', info['irmc_address'])
self.assertEqual('admin0', info['irmc_username'])
self.assertEqual('fake0', info['irmc_password'])
self.assertEqual(60, info['irmc_client_timeout'])
self.assertEqual(80, info['irmc_port'])
self.assertEqual('digest', info['irmc_auth_method'])
self.assertEqual('ipmitool', info['irmc_sensor_method'])
self.assertEqual('v2c', info['irmc_snmp_version'])
self.assertEqual(161, info['irmc_snmp_port'])
self.assertEqual('public', info['irmc_snmp_community'])
self.assertFalse(info['irmc_snmp_security'])
def test_parse_driver_option_default(self):
self.node.driver_info = {
"irmc_address": "1.2.3.4",
"irmc_username": "admin0",
"irmc_password": "fake0",
}
info = irmc_common.parse_driver_info(self.node)
self.assertEqual('basic', info['irmc_auth_method'])
self.assertEqual(443, info['irmc_port'])
self.assertEqual(60, info['irmc_client_timeout'])
self.assertEqual('ipmitool', info['irmc_sensor_method'])
def test_parse_driver_info_missing_address(self):
del self.node.driver_info['irmc_address']
self.assertRaises(exception.MissingParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_missing_username(self):
del self.node.driver_info['irmc_username']
self.assertRaises(exception.MissingParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_missing_password(self):
del self.node.driver_info['irmc_password']
self.assertRaises(exception.MissingParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_timeout(self):
self.node.driver_info['irmc_client_timeout'] = 'qwe'
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_port(self):
self.node.driver_info['irmc_port'] = 'qwe'
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_auth_method(self):
self.node.driver_info['irmc_auth_method'] = 'qwe'
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_sensor_method(self):
self.node.driver_info['irmc_sensor_method'] = 'qwe'
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_missing_multiple_params(self):
del self.node.driver_info['irmc_password']
del self.node.driver_info['irmc_address']
e = self.assertRaises(exception.MissingParameterValue,
irmc_common.parse_driver_info, self.node)
self.assertIn('irmc_password', str(e))
self.assertIn('irmc_address', str(e))
def test_parse_driver_info_invalid_snmp_version(self):
self.node.driver_info['irmc_snmp_version'] = 'v3x'
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_snmp_port(self):
self.node.driver_info['irmc_snmp_port'] = '161'
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_snmp_community(self):
self.node.driver_info['irmc_snmp_version'] = 'v2c'
self.node.driver_info['irmc_snmp_community'] = 100
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_snmp_security(self):
self.node.driver_info['irmc_snmp_version'] = 'v3'
self.node.driver_info['irmc_snmp_security'] = 100
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_empty_snmp_security(self):
self.node.driver_info['irmc_snmp_version'] = 'v3'
self.node.driver_info['irmc_snmp_security'] = ''
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
class IRMCCommonMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IRMCCommonMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_irmc")
self.info = db_utils.get_test_irmc_info()
self.node = obj_utils.create_test_node(
self.context,
driver='fake_irmc',
driver_info=self.info)
@mock.patch.object(irmc_common, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
def test_get_irmc_client(self, mock_scci):
self.info['irmc_port'] = 80
self.info['irmc_auth_method'] = 'digest'
self.info['irmc_client_timeout'] = 60
mock_scci.get_client.return_value = 'get_client'
returned_mock_scci_get_client = irmc_common.get_irmc_client(self.node)
mock_scci.get_client.assert_called_with(
self.info['irmc_address'],
self.info['irmc_username'],
self.info['irmc_password'],
port=self.info['irmc_port'],
auth_method=self.info['irmc_auth_method'],
client_timeout=self.info['irmc_client_timeout'])
self.assertEqual('get_client', returned_mock_scci_get_client)
def test_update_ipmi_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ipmi_info = {
"ipmi_address": "1.2.3.4",
"ipmi_username": "admin0",
"ipmi_password": "fake0",
}
task.node.driver_info = self.info
irmc_common.update_ipmi_properties(task)
actual_info = task.node.driver_info
expected_info = dict(self.info, **ipmi_info)
self.assertEqual(expected_info, actual_info)
@mock.patch.object(irmc_common, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
def test_get_irmc_report(self, mock_scci):
self.info['irmc_port'] = 80
self.info['irmc_auth_method'] = 'digest'
self.info['irmc_client_timeout'] = 60
mock_scci.get_report.return_value = 'get_report'
returned_mock_scci_get_report = irmc_common.get_irmc_report(self.node)
mock_scci.get_report.assert_called_with(
self.info['irmc_address'],
self.info['irmc_username'],
self.info['irmc_password'],
port=self.info['irmc_port'],
auth_method=self.info['irmc_auth_method'],
client_timeout=self.info['irmc_client_timeout'])
self.assertEqual('get_report', returned_mock_scci_get_report)
def test_out_range_port(self):
self.assertRaises(ValueError, cfg.CONF.set_override,
'port', 60, 'irmc')
def test_out_range_auth_method(self):
self.assertRaises(ValueError, cfg.CONF.set_override,
'auth_method', 'fake', 'irmc')
def test_out_range_sensor_method(self):
self.assertRaises(ValueError, cfg.CONF.set_override,
'sensor_method', 'fake', 'irmc')
@mock.patch.object(irmc_common, 'elcm',
spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC)
def test_set_secure_boot_mode_enable(self, mock_elcm):
mock_elcm.set_secure_boot_mode.return_value = 'set_secure_boot_mode'
info = irmc_common.parse_driver_info(self.node)
irmc_common.set_secure_boot_mode(self.node, True)
mock_elcm.set_secure_boot_mode.assert_called_once_with(
info, True)
@mock.patch.object(irmc_common, 'elcm',
spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC)
def test_set_secure_boot_mode_disable(self, mock_elcm):
mock_elcm.set_secure_boot_mode.return_value = 'set_secure_boot_mode'
info = irmc_common.parse_driver_info(self.node)
irmc_common.set_secure_boot_mode(self.node, False)
mock_elcm.set_secure_boot_mode.assert_called_once_with(
info, False)
@mock.patch.object(irmc_common, 'elcm',
spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC)
@mock.patch.object(irmc_common, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
def test_set_secure_boot_mode_fail(self, mock_scci, mock_elcm):
irmc_common.scci.SCCIError = Exception
mock_elcm.set_secure_boot_mode.side_effect = Exception
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IRMCOperationError,
irmc_common.set_secure_boot_mode,
task.node, True)
info = irmc_common.parse_driver_info(task.node)
mock_elcm.set_secure_boot_mode.assert_called_once_with(
info, True)
| apache-2.0 |
erickt/hue | apps/beeswax/src/beeswax/design.py | 26 | 9530 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The HQLdesign class can (de)serialize a design to/from a QueryDict.
"""
import json
import logging
import os
import re
import urlparse
import django.http
from django import forms
from desktop.lib.django_forms import BaseSimpleFormSet, MultiForm
from desktop.lib.django_mako import render_to_string
from hadoop.cluster import get_hdfs
LOG = logging.getLogger(__name__)
SERIALIZATION_VERSION = '0.4.1'
def hql_query(hql, database='default', query_type=None):
data_dict = json.loads('{"query": {"email_notify": false, "query": null, "type": 0, "is_parameterized": true, "database": "default"}, '
'"functions": [], "VERSION": "0.4.1", "file_resources": [], "settings": []}')
if not (isinstance(hql, str) or isinstance(hql, unicode)):
raise Exception('Requires a SQL text query of type <str>, <unicode> and not %s' % type(hql))
data_dict['query']['query'] = strip_trailing_semicolon(hql)
data_dict['query']['database'] = database
if query_type:
data_dict['query']['type'] = query_type
hql_design = HQLdesign()
hql_design._data_dict = data_dict
return hql_design
class HQLdesign(object):
"""
Represents an HQL design, with methods to perform (de)serialization.
We support queries that aren't parameterized, in case users
want to use "$" natively, but we leave that as an advanced
option to turn off.
"""
_QUERY_ATTRS = [ 'query', 'type', 'is_parameterized', 'email_notify', 'database' ]
_SETTINGS_ATTRS = [ 'key', 'value' ]
_FILE_RES_ATTRS = [ 'type', 'path' ]
_FUNCTIONS_ATTRS = [ 'name', 'class_name' ]
def __init__(self, form=None, query_type=None):
"""Initialize the design from a valid form data."""
if form is not None:
assert isinstance(form, MultiForm)
self._data_dict = {
'query': normalize_form_dict(form.query, HQLdesign._QUERY_ATTRS),
'settings': normalize_formset_dict(form.settings, HQLdesign._SETTINGS_ATTRS),
'file_resources': normalize_formset_dict(form.file_resources, HQLdesign._FILE_RES_ATTRS),
'functions': normalize_formset_dict(form.functions, HQLdesign._FUNCTIONS_ATTRS)
}
if query_type is not None:
self._data_dict['query']['type'] = query_type
def dumps(self):
"""Returns the serialized form of the design in a string"""
dic = self._data_dict.copy()
dic['VERSION'] = SERIALIZATION_VERSION
return json.dumps(dic)
@property
def hql_query(self):
return self._data_dict['query']['query']
@hql_query.setter
def hql_query(self, query):
self._data_dict['query']['query'] = query
@property
def query(self):
return self._data_dict['query'].copy()
@property
def settings(self):
return list(self._data_dict['settings'])
@property
def file_resources(self):
return list(self._data_dict['file_resources'])
@property
def functions(self):
return list(self._data_dict['functions'])
def get_configuration_statements(self):
configuration = []
for f in self.file_resources:
if not urlparse.urlsplit(f['path']).scheme:
scheme = get_hdfs().fs_defaultfs
else:
scheme = ''
configuration.append(render_to_string("hql_resource.mako", dict(type=f['type'], path=f['path'], scheme=scheme)))
for f in self.functions:
configuration.append(render_to_string("hql_function.mako", f))
return configuration
def get_query_dict(self):
# We construct the mform to use its structure and prefix. We don't actually bind data to the forms.
from beeswax.forms import QueryForm
mform = QueryForm()
mform.bind()
res = django.http.QueryDict('', mutable=True)
res.update(denormalize_form_dict(
self._data_dict['query'], mform.query, HQLdesign._QUERY_ATTRS))
res.update(denormalize_formset_dict(
self._data_dict['settings'], mform.settings, HQLdesign._SETTINGS_ATTRS))
res.update(denormalize_formset_dict(
self._data_dict['file_resources'], mform.file_resources, HQLdesign._FILE_RES_ATTRS))
res.update(denormalize_formset_dict(
self._data_dict['functions'], mform.functions, HQLdesign._FUNCTIONS_ATTRS))
return res
@staticmethod
def loads(data):
"""Returns an HQLdesign from the serialized form"""
dic = json.loads(data)
dic = dict(map(lambda k: (str(k), dic.get(k)), dic.keys()))
if dic['VERSION'] != SERIALIZATION_VERSION:
LOG.error('Design version mismatch. Found %s; expect %s' % (dic['VERSION'], SERIALIZATION_VERSION))
# Convert to latest version
del dic['VERSION']
if 'type' not in dic['query'] or dic['query']['type'] is None:
dic['query']['type'] = 0
if 'database' not in dic['query']:
dic['query']['database'] = 'default'
design = HQLdesign()
design._data_dict = dic
return design
def get_query(self):
return self._data_dict["query"]
@property
def statement_count(self):
return len(self.statements)
def get_query_statement(self, n=0):
return self.statements[n]
@property
def statements(self):
hql_query = strip_trailing_semicolon(self.hql_query)
return [strip_trailing_semicolon(statement.strip()) for statement in split_statements(hql_query)]
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def split_statements(hql):
"""
Split statments at semicolons ignoring the ones inside
quotes and comments. The comment symbols that come
inside quotes should be ignored.
"""
statements = []
current = ''
prev = ''
between_quotes = None
is_comment = None
lines = hql.splitlines()
for line in lines:
for c in line:
current += c
if c in ('"', "'") and prev != '\\' and is_comment is None:
if between_quotes == c:
between_quotes = None
elif between_quotes is None:
between_quotes = c
elif c == '-' and prev == '-' and between_quotes is None and is_comment is None:
is_comment = True
elif c == ';':
if between_quotes is None and is_comment is None:
current = current.strip()
# Strip off the trailing semicolon
current = current[:-1]
if len(current) > 1:
statements.append(current)
current = ''
# This character holds no significance if it was escaped within a string
if prev == '\\' and between_quotes is not None:
c = ''
prev = c
is_comment = None
prev = os.linesep
if current != '':
current += os.linesep
if current and current != ';':
current = current.strip()
statements.append(current)
return statements
def normalize_form_dict(form, attr_list):
"""
normalize_form_dict(form, attr_list) -> A dictionary of (attr, value)
Each attr is a field name. And the value is obtained by looking up the form's data dict.
"""
assert isinstance(form, forms.Form)
res = { }
for attr in attr_list:
res[attr] = form.cleaned_data.get(attr)
return res
def normalize_formset_dict(formset, attr_list):
"""
normalize_formset_dict(formset, attr_list) -> A list of dictionary of (attr, value)
"""
assert isinstance(formset, BaseSimpleFormSet)
res = [ ]
for form in formset.forms:
res.append(normalize_form_dict(form, attr_list))
return res
def denormalize_form_dict(data_dict, form, attr_list):
"""
denormalize_form_dict(data_dict, form, attr_list) -> A QueryDict with the attributes set
"""
assert isinstance(form, forms.Form)
res = django.http.QueryDict('', mutable=True)
for attr in attr_list:
try:
res[str(form.add_prefix(attr))] = data_dict[attr]
except KeyError:
pass
return res
def denormalize_formset_dict(data_dict_list, formset, attr_list):
"""
denormalize_formset_dict(data_dict, form, attr_list) -> A QueryDict with the attributes set
"""
assert isinstance(formset, BaseSimpleFormSet)
res = django.http.QueryDict('', mutable=True)
for i, data_dict in enumerate(data_dict_list):
prefix = formset.make_prefix(i)
form = formset.form(prefix=prefix)
res.update(denormalize_form_dict(data_dict, form, attr_list))
res[prefix + '-_exists'] = 'True'
res[str(formset.management_form.add_prefix('next_form_id'))] = str(len(data_dict_list))
return res
def __str__(self):
return '%s: %s' % (self.__class__, self.query)
_SEMICOLON_WHITESPACE = re.compile(";\s*$")
def strip_trailing_semicolon(query):
"""As a convenience, we remove trailing semicolons from queries."""
s = _SEMICOLON_WHITESPACE.split(query, 2)
if len(s) > 1:
assert len(s) == 2
assert s[1] == ''
return s[0]
| apache-2.0 |
playerNaN/NaNPyGameEngine | engine.py | 1 | 5921 | import pygame
import sys
import os
from collections import namedtuple
import time
import resourcemanager
ColorList = namedtuple("ColorList", "black white red green blue")
colors = ColorList((0,0,0),(0xFF,0xFF,0xFF),(0xFF,0,0),(0,0xFF,0),(0,0,0xFF))
PyListener = namedtuple("PyListener", "condition effect")
PyEventListener = namedtuple("PyEventListener","events condition effect")
class Pyengine:
def __init__(self,size):
pygame.init()
self.__size = size
self.__fps = 60
self.__bg = colors.white
self.__fg = colors.black
self.__on_update = []
self.__on_draw = []
self.__keys_down = {}
self.__listeners = []
self.__event_handlers = []
self.__mouse_down = {}
self.__display = None
self.__screen_centerX = size[0]/2
self.__scaleX = 1.0
self.__scaleY = 1.0
self.__screen_centerY = size[1]/2
self.__clock = pygame.time.Clock()
self.__buffer_surface = None
self.__resource_manager = resourcemanager.ResourceManager()
self.__animators = {}
def add_animator(self,name,animator):
self.__animators[name] = animator
def remove_animator(self,name):
del self.__animators[name]
def get_animator(self,name):
return self.__animators[name]
def set_scale_x(self,x):
self.__scaleX = x
def get_scale_x(self):
return self.__scaleX
def set_scale_y(self,y):
self.__scaleY = y
def get_scale_y(self):
return self.__scaleY
def set_scale(self,s):
self.__scaleX = s[0]
self.__scaleY = s[1]
def get_scale(self):
return (self.__scaleX,self.__scaleY)
def set_fg(self,fg):
self.__fg = fg
def get_fg(self):
return self.__fg
def set_bg(self,bg):
self.__bg = bg
def get_bg(self):
return self.__bg
def get_display(self):
return self.__display()
def set_screen_center_x(self,x):
self.__screen_centerX = x
def get_screen_center_x(self):
return self.__screen_centerX
def set_screen_center_y(self,y):
self.__screen_centerY = y
def get_screen_center_y(self):
return self.__screen_centerY
def set_screen_center(self,pos):
self.__screen_centerX = pos[0]
self.__screen_centerY = pos[1]
def get_screen_center(self):
return (self.__screen_centerX,self.__screen_centerY)
def get_buffer_surface(self):
return self.__buffer_surface
def get_resource_manager(self):
return self.__resource_manager
def update_all_animators(self):
ms = self.__clock.get_time()
for i in self.__animators:
self.__animators[i].update(ms)
def draw_all_animators(self):
for i in self.__animators:
self.draw_image(self.__animators[i].get_current_image(),self.__animators[i].get_position())
def handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.exit()
elif event.type == pygame.KEYDOWN:
self.__keys_down[event.key] = True
elif event.type == pygame.KEYUP:
self.__keys_down[event.key] = False
elif event.type == pygame.MOUSEBUTTONDOWN:
self.__mouse_down = True
elif event.type == pygame.MOUSEBUTTONUP:
self.__mouse_down = False
for handler in self.__event_handlers:
if event.type in handler.events and handler.condition(self,event):
handler.effect(self,event)
def draw_image(self,name,pos):
self.__buffer_surface.blit(self.__resource_manager.get_image(name),pos)
def is_key_down(self,key):
if not key in self.__keys_down:
return False
return self.__keys_down[key]
def is_mouse_button_down(self,button):
if not button in self.__mouse_down:
return False
return self.__mouse_down[button]
def run(self):
screen = pygame.display.set_mode(self.__size)
self.__display = screen
oldTime = time.time()
while True:
spf = 1.0 / self.__fps
self.handle_events()
self.update()
self.draw(screen)
self.__clock.tick(self.__fps)
def exit(self):
pygame.display.quit()
pygame.quit()
sys.exit()
def update(self):
self.update_all_animators()
for l in self.__listeners:
if l.condition(self):
l.effect(self)
def draw(self,display):
self.__buffer_surface = pygame.Surface(display.get_size())
display.fill(colors.red)
self.__buffer_surface.fill(self.__bg)
for od in self.__on_draw:
od(self,self.__buffer_surface)
self.draw_all_animators()
src_size = (self.__size[0]/self.__scaleX,self.__size[1]/self.__scaleY)
top = self.__screen_centerY - src_size[1] / 2
left = self.__screen_centerX - src_size[0] / 2
cropped = pygame.Surface(src_size)
cropped.blit(self.__buffer_surface,(0,0),(left,top,src_size[0],src_size[1]))
cropped = pygame.transform.scale(cropped,self.__size)
display.blit(cropped,(0,0))
pygame.display.update((0,0,self.__size[0],self.__size[1]))
def add_draw_listener(self,f):
self.__on_draw.append(f)
def add_listener(self,condition,effect):
self.__listeners.append(PyListener(condition,effect))
def add_on_update(self,effect):
self.__add_listener(lambda s:True,effect)
def add_event_listener(self,events,condition,effect):
self.__event_handlers.append(PyEventListener(events,condition,effect))
def set_fps(self,fps):
self.__fps = fps
def get_fps(self):
return self.__fps
| unlicense |
pinnamur/titanium_mobile | support/iphone/provisioner.py | 34 | 3613 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Install a provisioning profile
#
import os, sys, subprocess, re, time, poorjson, types
from xml.dom.minidom import parseString
import codecs
from OpenSSL import crypto
def dequote(s):
if s[0:1] == '"':
return s[1:-1]
return s
def getText(nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
def make_map(dict):
props = {}
curkey = None
for i in dict.childNodes:
if i.nodeType == 1:
if i.nodeName == 'key':
curkey = str(getText(i.childNodes)).strip()
elif i.nodeName == 'dict':
props[curkey] = make_map(i)
curkey = None
elif i.nodeName == 'array':
s = i.getElementsByTagName('string')
if len(s):
txt = ''
for t in s:
txt+=getText(t.childNodes)
props[curkey]=txt
else:
props[curkey]=None
curkey = None
else:
props[curkey] = getText(i.childNodes)
curkey = None
return props
def find_dict_element(dict,name):
found = False
for i in dict.childNodes:
if i.nodeType == 1:
if i.nodeName == 'key':
if str(getText(i.childNodes)).strip() == name:
found = True
elif found:
return i
return None
def get_cert(dict):
certs_array = find_dict_element(dict, 'DeveloperCertificates')
if certs_array:
certs_array = certs_array.getElementsByTagName('data')
if not certs_array or not len(certs_array):
return None
cert_text = str(getText(certs_array[0].childNodes)).strip()
cert_text = "-----BEGIN CERTIFICATE-----\n" + cert_text + "\n-----END CERTIFICATE-----\n"
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_text)
return cert
def main(args):
if len(args)!=2:
print "%s <provisioning_file>" % os.path.basename(args[0])
sys.exit(1)
try:
xml = os.path.abspath(os.path.expanduser(dequote(args[1].decode("utf-8"))))
f = open(xml,'rb').read()
b = f.index('<?xml')
e = f.index('</plist>')
xml_content = f[b:e+8]
dom = parseString(xml_content)
dict = dom.getElementsByTagName('dict')[0]
props = make_map(dict)
profile_type = 'unknown'
if len(re.findall('ProvisionedDevices',xml_content)) > 0:
profile_type = 'development'
try:
cert = get_cert(dict)
if cert and re.search('Distribution:', cert.get_subject().commonName):
profile_type = 'adhoc'
except Exception, e:
sys.stderr.write('ERROR: %s\n' % str(e))
else:
profile_type = 'distribution'
name = props['Name']
name = name.decode('string_escape').decode('utf-8')
entitlements = props['Entitlements']
appid = entitlements['application-identifier']
appid_prefix = props['ApplicationIdentifierPrefix']
uuid = props['UUID']
bundle_id = appid.replace(appid_prefix+'.','')
# check to see if xcode is already running
output = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE).communicate()[0]
is_xcode = re.findall(r'Xcode.app',output)
xcode = len(is_xcode) > 0
# now we need to install the cert
# we essentially open xcode causing the cert to be installed
# automagically (but -g tells it to stay in the background)
cmd = "open -g \"%s\"" % xml
os.system(cmd)
# only kill Xcode if it wasn't already running
if xcode == False:
# give it a sec to install before killing it
time.sleep(1.5)
cmd = "killall Xcode"
os.system(cmd)
print poorjson.PoorJSON().dump({'type':profile_type,'appid':bundle_id, 'prefix':appid_prefix, 'name':name, 'uuid': uuid})
sys.exit(0)
except Exception, e:
print e
sys.exit(10)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 |
gsehub/edx-platform | lms/djangoapps/lti_provider/tests/test_tasks.py | 12 | 4312 | """
Tests for the LTI outcome service handlers, both in outcomes.py and in tasks.py
"""
import ddt
from django.test import TestCase
from mock import MagicMock, patch
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
import lti_provider.tasks as tasks
from lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService
from student.tests.factories import UserFactory
class BaseOutcomeTest(TestCase):
"""
Super type for tests of both the leaf and composite outcome celery tasks.
"""
def setUp(self):
super(BaseOutcomeTest, self).setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.usage_key = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='block_id'
)
self.user = UserFactory.create()
self.consumer = LtiConsumer(
consumer_name='Lti Consumer Name',
consumer_key='consumer_key',
consumer_secret='consumer_secret',
instance_guid='tool_instance_guid'
)
self.consumer.save()
outcome = OutcomeService(
lis_outcome_service_url='http://example.com/service_url',
lti_consumer=self.consumer
)
outcome.save()
self.assignment = GradedAssignment(
user=self.user,
course_key=self.course_key,
usage_key=self.usage_key,
outcome_service=outcome,
lis_result_sourcedid='sourcedid',
version_number=1,
)
self.assignment.save()
self.send_score_update_mock = self.setup_patch(
'lti_provider.outcomes.send_score_update', None
)
def setup_patch(self, function_name, return_value):
"""
Patch a method with a given return value, and return the mock
"""
mock = MagicMock(return_value=return_value)
new_patch = patch(function_name, new=mock)
new_patch.start()
self.addCleanup(new_patch.stop)
return mock
@ddt.ddt
class SendLeafOutcomeTest(BaseOutcomeTest):
"""
Tests for the send_leaf_outcome method in tasks.py
"""
shard = 4
@ddt.data(
(2.0, 2.0, 1.0),
(2.0, 0.0, 0.0),
(1, 2, 0.5),
)
@ddt.unpack
def test_outcome_with_score(self, earned, possible, expected):
tasks.send_leaf_outcome(
self.assignment.id,
earned,
possible
)
self.send_score_update_mock.assert_called_once_with(self.assignment, expected)
@ddt.ddt
class SendCompositeOutcomeTest(BaseOutcomeTest):
"""
Tests for the send_composite_outcome method in tasks.py
"""
shard = 4
def setUp(self):
super(SendCompositeOutcomeTest, self).setUp()
self.descriptor = MagicMock()
self.descriptor.location = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='problem',
)
self.course_grade = MagicMock()
self.course_grade_mock = self.setup_patch(
'lti_provider.tasks.CourseGradeFactory.read', self.course_grade
)
self.module_store = MagicMock()
self.module_store.get_item = MagicMock(return_value=self.descriptor)
self.check_result_mock = self.setup_patch(
'lti_provider.tasks.modulestore',
self.module_store
)
@ddt.data(
(2.0, 2.0, 1.0),
(2.0, 0.0, 0.0),
(1, 2, 0.5),
)
@ddt.unpack
def test_outcome_with_score_score(self, earned, possible, expected):
self.course_grade.score_for_module = MagicMock(return_value=(earned, possible))
tasks.send_composite_outcome(
self.user.id, unicode(self.course_key), self.assignment.id, 1
)
self.send_score_update_mock.assert_called_once_with(self.assignment, expected)
def test_outcome_with_outdated_version(self):
self.assignment.version_number = 2
self.assignment.save()
tasks.send_composite_outcome(
self.user.id, unicode(self.course_key), self.assignment.id, 1
)
self.assertEqual(self.course_grade_mock.call_count, 0)
| agpl-3.0 |
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/aio/operations/_virtual_machine_scale_set_rolling_upgrades_operations.py | 1 | 21363 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineScaleSetRollingUpgradesOperations:
"""VirtualMachineScaleSetRollingUpgradesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _cancel_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._cancel_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_cancel_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel'} # type: ignore
async def begin_cancel(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Cancels the current virtual machine scale set rolling upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._cancel_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel'} # type: ignore
async def _start_os_upgrade_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._start_os_upgrade_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_os_upgrade_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade'} # type: ignore
async def begin_start_os_upgrade(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Starts a rolling upgrade to move all virtual machine scale set instances to the latest
available Platform Image OS version. Instances which are already running the latest available
OS version are not affected.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_os_upgrade_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_os_upgrade.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade'} # type: ignore
async def _start_extension_upgrade_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._start_extension_upgrade_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_extension_upgrade_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade'} # type: ignore
async def begin_start_extension_upgrade(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Starts a rolling upgrade to move all extensions for all virtual machine scale set instances to
the latest available extension version. Instances which are already running the latest
extension versions are not affected.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_extension_upgrade_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_extension_upgrade.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade'} # type: ignore
async def get_latest(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> "_models.RollingUpgradeStatusInfo":
"""Gets the status of the latest virtual machine scale set rolling upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RollingUpgradeStatusInfo, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_06_01.models.RollingUpgradeStatusInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RollingUpgradeStatusInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get_latest.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RollingUpgradeStatusInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_latest.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest'} # type: ignore
| mit |
hyqneuron/pylearn2-maxsom | pylearn2/sandbox/cuda_convnet/tests/test_common.py | 49 | 2802 | __authors__ = "Ian Goodfellow, David Warde-Farley"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow, David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from pylearn2.sandbox.cuda_convnet.img_acts import ImageActs
from theano.sandbox.cuda import gpu_from_host
from theano import function
from theano.tensor import as_tensor_variable
def test_reject_rect():
for cls in (FilterActs, ImageActs):
# Tests that running FilterActs with a non-square
# kernel is an error
rng = np.random.RandomState([2012, 10, 9])
batch_size = 5
rows = 10
cols = 9
channels = 3
filter_rows = 4
filter_cols = filter_rows + 1
num_filters = 6
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
if cls is ImageActs:
output = cls()(gpu_images, gpu_filters,
as_tensor_variable((rows, cols)))
else:
output = cls()(gpu_images, gpu_filters)
f = function([], output)
try:
output = f()
except ValueError:
continue
assert False
def test_reject_bad_filt_number():
for cls in (FilterActs, ImageActs):
# Tests that running FilterActs with a # of filters per
# group that is not 16 is an error
rng = np.random.RandomState([2012, 10, 9])
batch_size = 5
rows = 10
cols = 9
channels = 3
filter_rows = 4
filter_cols = filter_rows
num_filters = 6
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
if cls is ImageActs:
output = cls()(gpu_images, gpu_filters,
as_tensor_variable((rows, cols)))
else:
output = cls()(gpu_images, gpu_filters)
f = function([], output)
try:
output = f()
except ValueError:
continue
assert False
| bsd-3-clause |
pianomania/scikit-learn | sklearn/utils/tests/test_random.py | 85 | 7349 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
Affix/CouchPotatoServer | libs/pyutil/repeatable_random.py | 106 | 3622 | """
If you execute force_repeatability() then the following things are changed in the runtime:
1. random.random() and its sibling functions, and random.Random.seed() in the random module are seeded with a known seed so that they will return the same sequence on each run.
2. os.urandom() is replaced by a fake urandom that returns a pseudorandom sequence.
3. time.time() is replaced by a fake time that returns an incrementing number. (Original time.time is available as time.realtime.)
Which seed will be used?
If the environment variable REPEATABLE_RANDOMNESS_SEED is set, then it will use that. Else, it will use the current real time. In either case it logs the seed that it used.
Caveats:
1. If some code has acquired a random.Random object before force_repeatability() is executed, then that Random object will produce non-reproducible results. For example, the tempfile module in the Python Standard Library does this.
2. Likewise if some code called time.time() before force_repeatability() was called, then it will have gotten a real time stamp. For example, trial does this. (Then it later subtracts that real timestamp from a faketime timestamp to calculate elapsed time, resulting in a large negative elapsed time.)
3. Fake urandom has an added constraint for performance reasons -- you can't ask it for more than 64 bytes of randomness at a time. (I couldn't figure out how to generate large fake random strings efficiently.)
"""
import os, random, time
if not hasattr(time, "realtime"):
time.realtime = time.time
if not hasattr(os, "realurandom"):
os.realurandom = os.urandom
if not hasattr(random, "realseed"):
random.realseed = random.seed
tdelta = 0
seeded = False
def force_repeatability():
now = 1043659734.0
def faketime():
global tdelta
tdelta += 1
return now + tdelta
time.faketime = faketime
time.time = faketime
from idlib import i2b
def fakeurandom(n):
if n > 64:
raise ("Can't produce more than 64 bytes of pseudorandomness efficiently.")
elif n == 0:
return ''
else:
z = i2b(random.getrandbits(n*8))
x = z + "0" * (n-len(z))
assert len(x) == n
return x
os.fakeurandom = fakeurandom
os.urandom = fakeurandom
global seeded
if not seeded:
SEED = os.environ.get('REPEATABLE_RANDOMNESS_SEED', None)
if SEED is None:
# Generate a seed which is integral and fairly short (to ease cut-and-paste, writing it down, etc.).
t = time.realtime()
subsec = t % 1
t += (subsec * 1000000)
t %= 1000000
SEED = long(t)
import sys
sys.stdout.write("REPEATABLE_RANDOMNESS_SEED: %s\n" % SEED) ; sys.stdout.flush()
sys.stdout.write("In order to reproduce this run of the code, set the environment variable \"REPEATABLE_RANDOMNESS_SEED\" to %s before executing.\n" % SEED) ; sys.stdout.flush()
random.seed(SEED)
def seed_which_refuses(a):
sys.stdout.write("I refuse to reseed to %s. Go away!\n" % (a,)) ; sys.stdout.flush()
return
random.realseed = random.seed
random.seed = seed_which_refuses
seeded = True
import setutil
setutil.RandomSet.DETERMINISTIC = True
def restore_real_clock():
time.time = time.realtime
def restore_real_urandom():
os.urandom = os.realurandom
def restore_real_seed():
random.seed = random.realseed
def restore_non_repeatability():
restore_real_seed()
restore_real_urandom()
restore_real_clock()
| gpl-3.0 |
avoinsystems/odoo | addons/account_voucher/account_voucher.py | 132 | 85482 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from openerp.tools import float_compare
from openerp.report import report_sxw
import openerp
class res_currency(osv.osv):
_inherit = "res.currency"
def _get_current_rate(self, cr, uid, ids, raise_on_no_rate=True, context=None):
if context is None:
context = {}
res = super(res_currency, self)._get_current_rate(cr, uid, ids, raise_on_no_rate, context=context)
if context.get('voucher_special_currency') in ids and context.get('voucher_special_currency_rate'):
res[context.get('voucher_special_currency')] = context.get('voucher_special_currency_rate')
return res
class account_voucher(osv.osv):
def _check_paid(self, cr, uid, ids, name, args, context=None):
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
res[voucher.id] = any([((line.account_id.type, 'in', ('receivable', 'payable')) and line.reconcile_id) for line in voucher.move_ids])
return res
def _get_type(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('type', False)
def _get_period(self, cr, uid, context=None):
if context is None: context = {}
if context.get('period_id', False):
return context.get('period_id')
periods = self.pool.get('account.period').find(cr, uid, context=context)
return periods and periods[0] or False
def _make_journal_search(self, cr, uid, ttype, context=None):
journal_pool = self.pool.get('account.journal')
return journal_pool.search(cr, uid, [('type', '=', ttype)], limit=1)
def _get_journal(self, cr, uid, context=None):
if context is None: context = {}
invoice_pool = self.pool.get('account.invoice')
journal_pool = self.pool.get('account.journal')
if context.get('invoice_id', False):
invoice = invoice_pool.browse(cr, uid, context['invoice_id'], context=context)
journal_id = journal_pool.search(cr, uid, [
('currency', '=', invoice.currency_id.id), ('company_id', '=', invoice.company_id.id)
], limit=1, context=context)
return journal_id and journal_id[0] or False
if context.get('journal_id', False):
return context.get('journal_id')
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
return context.get('search_default_journal_id')
ttype = context.get('type', 'bank')
if ttype in ('payment', 'receipt'):
ttype = 'bank'
res = self._make_journal_search(cr, uid, ttype, context=context)
return res and res[0] or False
def _get_tax(self, cr, uid, context=None):
if context is None: context = {}
journal_pool = self.pool.get('account.journal')
journal_id = context.get('journal_id', False)
if not journal_id:
ttype = context.get('type', 'bank')
res = journal_pool.search(cr, uid, [('type', '=', ttype)], limit=1)
if not res:
return False
journal_id = res[0]
if not journal_id:
return False
journal = journal_pool.browse(cr, uid, journal_id, context=context)
account_id = journal.default_credit_account_id or journal.default_debit_account_id
if account_id and account_id.tax_ids:
tax_id = account_id.tax_ids[0].id
return tax_id
return False
def _get_payment_rate_currency(self, cr, uid, context=None):
"""
Return the default value for field payment_rate_currency_id: the currency of the journal
if there is one, otherwise the currency of the user's company
"""
if context is None: context = {}
journal_pool = self.pool.get('account.journal')
journal_id = context.get('journal_id', False)
if journal_id:
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if journal.currency:
return journal.currency.id
#no journal given in the context, use company currency as default
return self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
def _get_currency(self, cr, uid, context=None):
if context is None: context = {}
journal_pool = self.pool.get('account.journal')
journal_id = context.get('journal_id', False)
if journal_id:
if isinstance(journal_id, (list, tuple)):
# sometimes journal_id is a pair (id, display_name)
journal_id = journal_id[0]
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if journal.currency:
return journal.currency.id
return self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
def _get_partner(self, cr, uid, context=None):
if context is None: context = {}
return context.get('partner_id', False)
def _get_reference(self, cr, uid, context=None):
if context is None: context = {}
return context.get('reference', False)
def _get_narration(self, cr, uid, context=None):
if context is None: context = {}
return context.get('narration', False)
def _get_amount(self, cr, uid, context=None):
if context is None:
context= {}
return context.get('amount', 0.0)
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if context is None: context = {}
return [(r['id'], (r['number'] or _('Voucher'))) for r in self.read(cr, uid, ids, ['number'], context, load='_classic_write')]
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
mod_obj = self.pool.get('ir.model.data')
if context is None: context = {}
if view_type == 'form':
if not view_id and context.get('invoice_type'):
if context.get('invoice_type') in ('out_invoice', 'out_refund'):
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_receipt_form')
else:
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_payment_form')
result = result and result[1] or False
view_id = result
if not view_id and context.get('line_type'):
if context.get('line_type') == 'customer':
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_receipt_form')
else:
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_payment_form')
result = result and result[1] or False
view_id = result
res = super(account_voucher, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
doc = etree.XML(res['arch'])
if context.get('type', 'sale') in ('purchase', 'payment'):
nodes = doc.xpath("//field[@name='partner_id']")
for node in nodes:
node.set('context', "{'default_customer': 0, 'search_default_supplier': 1, 'default_supplier': 1}")
if context.get('invoice_type','') in ('in_invoice', 'in_refund'):
node.set('string', _("Supplier"))
res['arch'] = etree.tostring(doc)
return res
def _compute_writeoff_amount(self, cr, uid, line_dr_ids, line_cr_ids, amount, type):
debit = credit = 0.0
sign = type == 'payment' and -1 or 1
for l in line_dr_ids:
if isinstance(l, dict):
debit += l['amount']
for l in line_cr_ids:
if isinstance(l, dict):
credit += l['amount']
return amount - sign * (credit - debit)
def onchange_line_ids(self, cr, uid, ids, line_dr_ids, line_cr_ids, amount, voucher_currency, type, context=None):
context = context or {}
if not line_dr_ids and not line_cr_ids:
return {'value':{'writeoff_amount': 0.0}}
# resolve lists of commands into lists of dicts
line_dr_ids = self.resolve_2many_commands(cr, uid, 'line_dr_ids', line_dr_ids, ['amount'], context)
line_cr_ids = self.resolve_2many_commands(cr, uid, 'line_cr_ids', line_cr_ids, ['amount'], context)
#compute the field is_multi_currency that is used to hide/display options linked to secondary currency on the voucher
is_multi_currency = False
#loop on the voucher lines to see if one of these has a secondary currency. If yes, we need to see the options
for voucher_line in line_dr_ids+line_cr_ids:
line_id = voucher_line.get('id') and self.pool.get('account.voucher.line').browse(cr, uid, voucher_line['id'], context=context).move_line_id.id or voucher_line.get('move_line_id')
if line_id and self.pool.get('account.move.line').browse(cr, uid, line_id, context=context).currency_id:
is_multi_currency = True
break
return {'value': {'writeoff_amount': self._compute_writeoff_amount(cr, uid, line_dr_ids, line_cr_ids, amount, type), 'is_multi_currency': is_multi_currency}}
def _get_journal_currency(self, cr, uid, ids, name, args, context=None):
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
res[voucher.id] = voucher.journal_id.currency and voucher.journal_id.currency.id or voucher.company_id.currency_id.id
return res
def _get_writeoff_amount(self, cr, uid, ids, name, args, context=None):
if not ids: return {}
currency_obj = self.pool.get('res.currency')
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
debit = credit = 0.0
sign = voucher.type == 'payment' and -1 or 1
for l in voucher.line_dr_ids:
debit += l.amount
for l in voucher.line_cr_ids:
credit += l.amount
currency = voucher.currency_id or voucher.company_id.currency_id
res[voucher.id] = currency_obj.round(cr, uid, currency, voucher.amount - sign * (credit - debit))
return res
def _paid_amount_in_company_currency(self, cr, uid, ids, name, args, context=None):
if context is None:
context = {}
res = {}
ctx = context.copy()
for v in self.browse(cr, uid, ids, context=context):
ctx.update({'date': v.date})
#make a new call to browse in order to have the right date in the context, to get the right currency rate
voucher = self.browse(cr, uid, v.id, context=ctx)
ctx.update({
'voucher_special_currency': voucher.payment_rate_currency_id and voucher.payment_rate_currency_id.id or False,
'voucher_special_currency_rate': voucher.currency_id.rate * voucher.payment_rate,})
res[voucher.id] = self.pool.get('res.currency').compute(cr, uid, voucher.currency_id.id, voucher.company_id.currency_id.id, voucher.amount, context=ctx)
return res
def _get_currency_help_label(self, cr, uid, currency_id, payment_rate, payment_rate_currency_id, context=None):
"""
This function builds a string to help the users to understand the behavior of the payment rate fields they can specify on the voucher.
This string is only used to improve the usability in the voucher form view and has no other effect.
:param currency_id: the voucher currency
:type currency_id: integer
:param payment_rate: the value of the payment_rate field of the voucher
:type payment_rate: float
:param payment_rate_currency_id: the value of the payment_rate_currency_id field of the voucher
:type payment_rate_currency_id: integer
:return: translated string giving a tip on what's the effect of the current payment rate specified
:rtype: str
"""
rml_parser = report_sxw.rml_parse(cr, uid, 'currency_help_label', context=context)
currency_pool = self.pool.get('res.currency')
currency_str = payment_rate_str = ''
if currency_id:
currency_str = rml_parser.formatLang(1, currency_obj=currency_pool.browse(cr, uid, currency_id, context=context))
if payment_rate_currency_id:
payment_rate_str = rml_parser.formatLang(payment_rate, currency_obj=currency_pool.browse(cr, uid, payment_rate_currency_id, context=context))
currency_help_label = _('At the operation date, the exchange rate was\n%s = %s') % (currency_str, payment_rate_str)
return currency_help_label
def _fnct_currency_help_label(self, cr, uid, ids, name, args, context=None):
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
res[voucher.id] = self._get_currency_help_label(cr, uid, voucher.currency_id.id, voucher.payment_rate, voucher.payment_rate_currency_id.id, context=context)
return res
_name = 'account.voucher'
_description = 'Accounting Voucher'
_inherit = ['mail.thread']
_order = "date desc, id desc"
# _rec_name = 'number'
_track = {
'state': {
'account_voucher.mt_voucher_state_change': lambda self, cr, uid, obj, ctx=None: True,
},
}
_columns = {
'type':fields.selection([
('sale','Sale'),
('purchase','Purchase'),
('payment','Payment'),
('receipt','Receipt'),
],'Default Type', readonly=True, states={'draft':[('readonly',False)]}),
'name':fields.char('Memo', readonly=True, states={'draft':[('readonly',False)]}),
'date':fields.date('Date', readonly=True, select=True, states={'draft':[('readonly',False)]},
help="Effective date for accounting entries", copy=False),
'journal_id':fields.many2one('account.journal', 'Journal', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'account_id':fields.many2one('account.account', 'Account', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'line_ids':fields.one2many('account.voucher.line', 'voucher_id', 'Voucher Lines',
readonly=True, copy=True,
states={'draft':[('readonly',False)]}),
'line_cr_ids':fields.one2many('account.voucher.line','voucher_id','Credits',
domain=[('type','=','cr')], context={'default_type':'cr'}, readonly=True, states={'draft':[('readonly',False)]}),
'line_dr_ids':fields.one2many('account.voucher.line','voucher_id','Debits',
domain=[('type','=','dr')], context={'default_type':'dr'}, readonly=True, states={'draft':[('readonly',False)]}),
'period_id': fields.many2one('account.period', 'Period', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'narration':fields.text('Notes', readonly=True, states={'draft':[('readonly',False)]}),
'currency_id': fields.function(_get_journal_currency, type='many2one', relation='res.currency', string='Currency', readonly=True, required=True),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'state':fields.selection(
[('draft','Draft'),
('cancel','Cancelled'),
('proforma','Pro-forma'),
('posted','Posted')
], 'Status', readonly=True, track_visibility='onchange', copy=False,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed Voucher. \
\n* The \'Pro-forma\' when voucher is in Pro-forma status,voucher does not have an voucher number. \
\n* The \'Posted\' status is used when user create voucher,a voucher number is generated and voucher entries are created in account \
\n* The \'Cancelled\' status is used when user cancel voucher.'),
'amount': fields.float('Total', digits_compute=dp.get_precision('Account'), required=True, readonly=True, states={'draft':[('readonly',False)]}),
'tax_amount':fields.float('Tax Amount', digits_compute=dp.get_precision('Account'), readonly=True),
'reference': fields.char('Ref #', readonly=True, states={'draft':[('readonly',False)]},
help="Transaction reference number.", copy=False),
'number': fields.char('Number', readonly=True, copy=False),
'move_id':fields.many2one('account.move', 'Account Entry', copy=False),
'move_ids': fields.related('move_id','line_id', type='one2many', relation='account.move.line', string='Journal Items', readonly=True),
'partner_id':fields.many2one('res.partner', 'Partner', change_default=1, readonly=True, states={'draft':[('readonly',False)]}),
'audit': fields.related('move_id','to_check', type='boolean', help='Check this box if you are unsure of that journal entry and if you want to note it as \'to be reviewed\' by an accounting expert.', relation='account.move', string='To Review'),
'paid': fields.function(_check_paid, string='Paid', type='boolean', help="The Voucher has been totally paid."),
'pay_now':fields.selection([
('pay_now','Pay Directly'),
('pay_later','Pay Later or Group Funds'),
],'Payment', select=True, readonly=True, states={'draft':[('readonly',False)]}),
'tax_id': fields.many2one('account.tax', 'Tax', readonly=True, states={'draft':[('readonly',False)]}, domain=[('price_include','=', False)], help="Only for tax excluded from price"),
'pre_line':fields.boolean('Previous Payments ?', required=False),
'date_due': fields.date('Due Date', readonly=True, select=True, states={'draft':[('readonly',False)]}),
'payment_option':fields.selection([
('without_writeoff', 'Keep Open'),
('with_writeoff', 'Reconcile Payment Balance'),
], 'Payment Difference', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="This field helps you to choose what you want to do with the eventual difference between the paid amount and the sum of allocated amounts. You can either choose to keep open this difference on the partner's account, or reconcile it with the payment(s)"),
'writeoff_acc_id': fields.many2one('account.account', 'Counterpart Account', readonly=True, states={'draft': [('readonly', False)]}),
'comment': fields.char('Counterpart Comment', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'analytic_id': fields.many2one('account.analytic.account','Write-Off Analytic Account', readonly=True, states={'draft': [('readonly', False)]}),
'writeoff_amount': fields.function(_get_writeoff_amount, string='Difference Amount', type='float', readonly=True, help="Computed as the difference between the amount stated in the voucher and the sum of allocation on the voucher lines."),
'payment_rate_currency_id': fields.many2one('res.currency', 'Payment Rate Currency', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'payment_rate': fields.float('Exchange Rate', digits=(12,6), required=True, readonly=True, states={'draft': [('readonly', False)]},
help='The specific rate that will be used, in this voucher, between the selected currency (in \'Payment Rate Currency\' field) and the voucher currency.'),
'paid_amount_in_company_currency': fields.function(_paid_amount_in_company_currency, string='Paid Amount in Company Currency', type='float', readonly=True),
'is_multi_currency': fields.boolean('Multi Currency Voucher', help='Fields with internal purpose only that depicts if the voucher is a multi currency one or not'),
'currency_help_label': fields.function(_fnct_currency_help_label, type='text', string="Helping Sentence", help="This sentence helps you to know how to specify the payment rate by giving you the direct effect it has"),
}
_defaults = {
'period_id': _get_period,
'partner_id': _get_partner,
'journal_id':_get_journal,
'currency_id': _get_currency,
'reference': _get_reference,
'narration':_get_narration,
'amount': _get_amount,
'type':_get_type,
'state': 'draft',
'pay_now': 'pay_now',
'name': '',
'date': fields.date.context_today,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.voucher',context=c),
'tax_id': _get_tax,
'payment_option': 'without_writeoff',
'comment': _('Write-Off'),
'payment_rate': 1.0,
'payment_rate_currency_id': _get_payment_rate_currency,
}
def compute_tax(self, cr, uid, ids, context=None):
tax_pool = self.pool.get('account.tax')
partner_pool = self.pool.get('res.partner')
position_pool = self.pool.get('account.fiscal.position')
voucher_line_pool = self.pool.get('account.voucher.line')
voucher_pool = self.pool.get('account.voucher')
if context is None: context = {}
for voucher in voucher_pool.browse(cr, uid, ids, context=context):
voucher_amount = 0.0
for line in voucher.line_ids:
voucher_amount += line.untax_amount or line.amount
line.amount = line.untax_amount or line.amount
voucher_line_pool.write(cr, uid, [line.id], {'amount':line.amount, 'untax_amount':line.untax_amount})
if not voucher.tax_id:
self.write(cr, uid, [voucher.id], {'amount':voucher_amount, 'tax_amount':0.0})
continue
tax = [tax_pool.browse(cr, uid, voucher.tax_id.id, context=context)]
partner = partner_pool.browse(cr, uid, voucher.partner_id.id, context=context) or False
taxes = position_pool.map_tax(cr, uid, partner and partner.property_account_position or False, tax)
tax = tax_pool.browse(cr, uid, taxes, context=context)
total = voucher_amount
total_tax = 0.0
if not tax[0].price_include:
for line in voucher.line_ids:
for tax_line in tax_pool.compute_all(cr, uid, tax, line.amount, 1).get('taxes', []):
total_tax += tax_line.get('amount', 0.0)
total += total_tax
else:
for line in voucher.line_ids:
line_total = 0.0
line_tax = 0.0
for tax_line in tax_pool.compute_all(cr, uid, tax, line.untax_amount or line.amount, 1).get('taxes', []):
line_tax += tax_line.get('amount', 0.0)
line_total += tax_line.get('price_unit')
total_tax += line_tax
untax_amount = line.untax_amount or line.amount
voucher_line_pool.write(cr, uid, [line.id], {'amount':line_total, 'untax_amount':untax_amount})
self.write(cr, uid, [voucher.id], {'amount':total, 'tax_amount':total_tax})
return True
def onchange_price(self, cr, uid, ids, line_ids, tax_id, partner_id=False, context=None):
context = context or {}
tax_pool = self.pool.get('account.tax')
partner_pool = self.pool.get('res.partner')
position_pool = self.pool.get('account.fiscal.position')
if not line_ids:
line_ids = []
res = {
'tax_amount': False,
'amount': False,
}
voucher_total = 0.0
# resolve the list of commands into a list of dicts
line_ids = self.resolve_2many_commands(cr, uid, 'line_ids', line_ids, ['amount'], context)
total_tax = 0.0
for line in line_ids:
line_amount = 0.0
line_amount = line.get('amount',0.0)
if tax_id:
tax = [tax_pool.browse(cr, uid, tax_id, context=context)]
if partner_id:
partner = partner_pool.browse(cr, uid, partner_id, context=context) or False
taxes = position_pool.map_tax(cr, uid, partner and partner.property_account_position or False, tax)
tax = tax_pool.browse(cr, uid, taxes, context=context)
if not tax[0].price_include:
for tax_line in tax_pool.compute_all(cr, uid, tax, line_amount, 1).get('taxes', []):
total_tax += tax_line.get('amount')
voucher_total += line_amount
total = voucher_total + total_tax
res.update({
'amount': total or voucher_total,
'tax_amount': total_tax
})
return {
'value': res
}
def onchange_term_id(self, cr, uid, ids, term_id, amount):
term_pool = self.pool.get('account.payment.term')
terms = False
due_date = False
default = {'date_due':False}
if term_id and amount:
terms = term_pool.compute(cr, uid, term_id, amount)
if terms:
due_date = terms[-1][0]
default.update({
'date_due':due_date
})
return {'value':default}
def onchange_journal_voucher(self, cr, uid, ids, line_ids=False, tax_id=False, price=0.0, partner_id=False, journal_id=False, ttype=False, company_id=False, context=None):
"""price
Returns a dict that contains new values and context
@param partner_id: latest value from user input for field partner_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
default = {
'value':{},
}
if not partner_id or not journal_id:
return default
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
journal = journal_pool.browse(cr, uid, journal_id, context=context)
partner = partner_pool.browse(cr, uid, partner_id, context=context)
account_id = False
tr_type = False
if journal.type in ('sale','sale_refund'):
account_id = partner.property_account_receivable.id
tr_type = 'sale'
elif journal.type in ('purchase', 'purchase_refund','expense'):
account_id = partner.property_account_payable.id
tr_type = 'purchase'
else:
if not journal.default_credit_account_id or not journal.default_debit_account_id:
raise osv.except_osv(_('Error!'), _('Please define default credit/debit accounts on the journal "%s".') % (journal.name))
if ttype in ('sale', 'receipt'):
account_id = journal.default_debit_account_id.id
elif ttype in ('purchase', 'payment'):
account_id = journal.default_credit_account_id.id
else:
account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id
tr_type = 'receipt'
default['value']['account_id'] = account_id
default['value']['type'] = ttype or tr_type
vals = self.onchange_journal(cr, uid, ids, journal_id, line_ids, tax_id, partner_id, time.strftime('%Y-%m-%d'), price, ttype, company_id, context)
default['value'].update(vals.get('value'))
return default
def onchange_rate(self, cr, uid, ids, rate, amount, currency_id, payment_rate_currency_id, company_id, context=None):
res = {'value': {'paid_amount_in_company_currency': amount, 'currency_help_label': self._get_currency_help_label(cr, uid, currency_id, rate, payment_rate_currency_id, context=context)}}
if rate and amount and currency_id:
company_currency = self.pool.get('res.company').browse(cr, uid, company_id, context=context).currency_id
#context should contain the date, the payment currency and the payment rate specified on the voucher
amount_in_company_currency = self.pool.get('res.currency').compute(cr, uid, currency_id, company_currency.id, amount, context=context)
res['value']['paid_amount_in_company_currency'] = amount_in_company_currency
return res
def onchange_amount(self, cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=None):
if context is None:
context = {}
ctx = context.copy()
ctx.update({'date': date})
#read the voucher rate with the right date in the context
currency_id = currency_id or self.pool.get('res.company').browse(cr, uid, company_id, context=ctx).currency_id.id
voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate']
ctx.update({
'voucher_special_currency': payment_rate_currency_id,
'voucher_special_currency_rate': rate * voucher_rate})
res = self.recompute_voucher_lines(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=ctx)
vals = self.onchange_rate(cr, uid, ids, rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx)
for key in vals.keys():
res[key].update(vals[key])
return res
def recompute_payment_rate(self, cr, uid, ids, vals, currency_id, date, ttype, journal_id, amount, context=None):
if context is None:
context = {}
#on change of the journal, we need to set also the default value for payment_rate and payment_rate_currency_id
currency_obj = self.pool.get('res.currency')
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
company_id = journal.company_id.id
payment_rate = 1.0
currency_id = currency_id or journal.company_id.currency_id.id
payment_rate_currency_id = currency_id
ctx = context.copy()
ctx.update({'date': date})
o2m_to_loop = False
if ttype == 'receipt':
o2m_to_loop = 'line_cr_ids'
elif ttype == 'payment':
o2m_to_loop = 'line_dr_ids'
if o2m_to_loop and 'value' in vals and o2m_to_loop in vals['value']:
for voucher_line in vals['value'][o2m_to_loop]:
if not isinstance(voucher_line, dict):
continue
if voucher_line['currency_id'] != currency_id:
# we take as default value for the payment_rate_currency_id, the currency of the first invoice that
# is not in the voucher currency
payment_rate_currency_id = voucher_line['currency_id']
tmp = currency_obj.browse(cr, uid, payment_rate_currency_id, context=ctx).rate
payment_rate = tmp / currency_obj.browse(cr, uid, currency_id, context=ctx).rate
break
vals['value'].update({
'payment_rate': payment_rate,
'currency_id': currency_id,
'payment_rate_currency_id': payment_rate_currency_id
})
#read the voucher rate with the right date in the context
voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate']
ctx.update({
'voucher_special_currency_rate': payment_rate * voucher_rate,
'voucher_special_currency': payment_rate_currency_id})
res = self.onchange_rate(cr, uid, ids, payment_rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx)
for key in res.keys():
vals[key].update(res[key])
return vals
def basic_onchange_partner(self, cr, uid, ids, partner_id, journal_id, ttype, context=None):
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
res = {'value': {'account_id': False}}
if not partner_id or not journal_id:
return res
journal = journal_pool.browse(cr, uid, journal_id, context=context)
partner = partner_pool.browse(cr, uid, partner_id, context=context)
account_id = False
if journal.type in ('sale','sale_refund'):
account_id = partner.property_account_receivable.id
elif journal.type in ('purchase', 'purchase_refund','expense'):
account_id = partner.property_account_payable.id
elif ttype in ('sale', 'receipt'):
account_id = journal.default_debit_account_id.id
elif ttype in ('purchase', 'payment'):
account_id = journal.default_credit_account_id.id
else:
account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id
res['value']['account_id'] = account_id
return res
def onchange_partner_id(self, cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=None):
if not journal_id:
return {}
if context is None:
context = {}
#TODO: comment me and use me directly in the sales/purchases views
res = self.basic_onchange_partner(cr, uid, ids, partner_id, journal_id, ttype, context=context)
if ttype in ['sale', 'purchase']:
return res
ctx = context.copy()
# not passing the payment_rate currency and the payment_rate in the context but it's ok because they are reset in recompute_payment_rate
ctx.update({'date': date})
vals = self.recompute_voucher_lines(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=ctx)
vals2 = self.recompute_payment_rate(cr, uid, ids, vals, currency_id, date, ttype, journal_id, amount, context=context)
for key in vals.keys():
res[key].update(vals[key])
for key in vals2.keys():
res[key].update(vals2[key])
#TODO: can probably be removed now
#TODO: onchange_partner_id() should not returns [pre_line, line_dr_ids, payment_rate...] for type sale, and not
# [pre_line, line_cr_ids, payment_rate...] for type purchase.
# We should definitively split account.voucher object in two and make distinct on_change functions. In the
# meanwhile, bellow lines must be there because the fields aren't present in the view, what crashes if the
# onchange returns a value for them
if ttype == 'sale':
del(res['value']['line_dr_ids'])
del(res['value']['pre_line'])
del(res['value']['payment_rate'])
elif ttype == 'purchase':
del(res['value']['line_cr_ids'])
del(res['value']['pre_line'])
del(res['value']['payment_rate'])
return res
def recompute_voucher_lines(self, cr, uid, ids, partner_id, journal_id, price, currency_id, ttype, date, context=None):
"""
Returns a dict that contains new values and context
@param partner_id: latest value from user input for field partner_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
def _remove_noise_in_o2m():
"""if the line is partially reconciled, then we must pay attention to display it only once and
in the good o2m.
This function returns True if the line is considered as noise and should not be displayed
"""
if line.reconcile_partial_id:
if currency_id == line.currency_id.id:
if line.amount_residual_currency <= 0:
return True
else:
if line.amount_residual <= 0:
return True
return False
if context is None:
context = {}
context_multi_currency = context.copy()
currency_pool = self.pool.get('res.currency')
move_line_pool = self.pool.get('account.move.line')
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
line_pool = self.pool.get('account.voucher.line')
#set default values
default = {
'value': {'line_dr_ids': [], 'line_cr_ids': [], 'pre_line': False},
}
# drop existing lines
line_ids = ids and line_pool.search(cr, uid, [('voucher_id', '=', ids[0])])
for line in line_pool.browse(cr, uid, line_ids, context=context):
if line.type == 'cr':
default['value']['line_cr_ids'].append((2, line.id))
else:
default['value']['line_dr_ids'].append((2, line.id))
if not partner_id or not journal_id:
return default
journal = journal_pool.browse(cr, uid, journal_id, context=context)
partner = partner_pool.browse(cr, uid, partner_id, context=context)
currency_id = currency_id or journal.company_id.currency_id.id
total_credit = 0.0
total_debit = 0.0
account_type = None
if context.get('account_id'):
account_type = self.pool['account.account'].browse(cr, uid, context['account_id'], context=context).type
if ttype == 'payment':
if not account_type:
account_type = 'payable'
total_debit = price or 0.0
else:
total_credit = price or 0.0
if not account_type:
account_type = 'receivable'
if not context.get('move_line_ids', False):
ids = move_line_pool.search(cr, uid, [('state','=','valid'), ('account_id.type', '=', account_type), ('reconcile_id', '=', False), ('partner_id', '=', partner_id)], context=context)
else:
ids = context['move_line_ids']
invoice_id = context.get('invoice_id', False)
company_currency = journal.company_id.currency_id.id
move_lines_found = []
#order the lines by most old first
ids.reverse()
account_move_lines = move_line_pool.browse(cr, uid, ids, context=context)
#compute the total debit/credit and look for a matching open amount or invoice
for line in account_move_lines:
if _remove_noise_in_o2m():
continue
if invoice_id:
if line.invoice.id == invoice_id:
#if the invoice linked to the voucher line is equal to the invoice_id in context
#then we assign the amount on that line, whatever the other voucher lines
move_lines_found.append(line.id)
elif currency_id == company_currency:
#otherwise treatments is the same but with other field names
if line.amount_residual == price:
#if the amount residual is equal the amount voucher, we assign it to that voucher
#line, whatever the other voucher lines
move_lines_found.append(line.id)
break
#otherwise we will split the voucher amount on each line (by most old first)
total_credit += line.credit or 0.0
total_debit += line.debit or 0.0
elif currency_id == line.currency_id.id:
if line.amount_residual_currency == price:
move_lines_found.append(line.id)
break
total_credit += line.credit and line.amount_currency or 0.0
total_debit += line.debit and line.amount_currency or 0.0
remaining_amount = price
#voucher line creation
for line in account_move_lines:
if _remove_noise_in_o2m():
continue
if line.currency_id and currency_id == line.currency_id.id:
amount_original = abs(line.amount_currency)
amount_unreconciled = abs(line.amount_residual_currency)
else:
#always use the amount booked in the company currency as the basis of the conversion into the voucher currency
amount_original = currency_pool.compute(cr, uid, company_currency, currency_id, line.credit or line.debit or 0.0, context=context_multi_currency)
amount_unreconciled = currency_pool.compute(cr, uid, company_currency, currency_id, abs(line.amount_residual), context=context_multi_currency)
line_currency_id = line.currency_id and line.currency_id.id or company_currency
rs = {
'name':line.move_id.name,
'type': line.credit and 'dr' or 'cr',
'move_line_id':line.id,
'account_id':line.account_id.id,
'amount_original': amount_original,
'amount': (line.id in move_lines_found) and min(abs(remaining_amount), amount_unreconciled) or 0.0,
'date_original':line.date,
'date_due':line.date_maturity,
'amount_unreconciled': amount_unreconciled,
'currency_id': line_currency_id,
}
remaining_amount -= rs['amount']
#in case a corresponding move_line hasn't been found, we now try to assign the voucher amount
#on existing invoices: we split voucher amount by most old first, but only for lines in the same currency
if not move_lines_found:
if currency_id == line_currency_id:
if line.credit:
amount = min(amount_unreconciled, abs(total_debit))
rs['amount'] = amount
total_debit -= amount
else:
amount = min(amount_unreconciled, abs(total_credit))
rs['amount'] = amount
total_credit -= amount
if rs['amount_unreconciled'] == rs['amount']:
rs['reconcile'] = True
if rs['type'] == 'cr':
default['value']['line_cr_ids'].append(rs)
else:
default['value']['line_dr_ids'].append(rs)
if len(default['value']['line_cr_ids']) > 0:
default['value']['pre_line'] = 1
elif len(default['value']['line_dr_ids']) > 0:
default['value']['pre_line'] = 1
default['value']['writeoff_amount'] = self._compute_writeoff_amount(cr, uid, default['value']['line_dr_ids'], default['value']['line_cr_ids'], price, ttype)
return default
def onchange_payment_rate_currency(self, cr, uid, ids, currency_id, payment_rate, payment_rate_currency_id, date, amount, company_id, context=None):
if context is None:
context = {}
res = {'value': {}}
if currency_id:
#set the default payment rate of the voucher and compute the paid amount in company currency
ctx = context.copy()
ctx.update({'date': date})
#read the voucher rate with the right date in the context
voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate']
ctx.update({
'voucher_special_currency_rate': payment_rate * voucher_rate,
'voucher_special_currency': payment_rate_currency_id})
vals = self.onchange_rate(cr, uid, ids, payment_rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx)
for key in vals.keys():
res[key].update(vals[key])
return res
def onchange_date(self, cr, uid, ids, date, currency_id, payment_rate_currency_id, amount, company_id, context=None):
"""
@param date: latest value from user input for field date
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
if context is None:
context ={}
res = {'value': {}}
#set the period of the voucher
period_pool = self.pool.get('account.period')
currency_obj = self.pool.get('res.currency')
ctx = context.copy()
ctx.update({'company_id': company_id, 'account_period_prefer_normal': True})
voucher_currency_id = currency_id or self.pool.get('res.company').browse(cr, uid, company_id, context=ctx).currency_id.id
pids = period_pool.find(cr, uid, date, context=ctx)
if pids:
res['value'].update({'period_id':pids[0]})
if payment_rate_currency_id:
ctx.update({'date': date})
payment_rate = 1.0
if payment_rate_currency_id != currency_id:
tmp = currency_obj.browse(cr, uid, payment_rate_currency_id, context=ctx).rate
payment_rate = tmp / currency_obj.browse(cr, uid, voucher_currency_id, context=ctx).rate
vals = self.onchange_payment_rate_currency(cr, uid, ids, voucher_currency_id, payment_rate, payment_rate_currency_id, date, amount, company_id, context=context)
vals['value'].update({'payment_rate': payment_rate})
for key in vals.keys():
res[key].update(vals[key])
return res
def onchange_journal(self, cr, uid, ids, journal_id, line_ids, tax_id, partner_id, date, amount, ttype, company_id, context=None):
if context is None:
context = {}
if not journal_id:
return False
journal_pool = self.pool.get('account.journal')
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if ttype in ('sale', 'receipt'):
account_id = journal.default_debit_account_id
elif ttype in ('purchase', 'payment'):
account_id = journal.default_credit_account_id
else:
account_id = journal.default_credit_account_id or journal.default_debit_account_id
tax_id = False
if account_id and account_id.tax_ids:
tax_id = account_id.tax_ids[0].id
vals = {'value':{} }
if ttype in ('sale', 'purchase'):
vals = self.onchange_price(cr, uid, ids, line_ids, tax_id, partner_id, context)
vals['value'].update({'tax_id':tax_id,'amount': amount})
currency_id = False
if journal.currency:
currency_id = journal.currency.id
else:
currency_id = journal.company_id.currency_id.id
period_ids = self.pool['account.period'].find(cr, uid, dt=date, context=dict(context, company_id=company_id))
vals['value'].update({
'currency_id': currency_id,
'payment_rate_currency_id': currency_id,
'period_id': period_ids and period_ids[0] or False
})
#in case we want to register the payment directly from an invoice, it's confusing to allow to switch the journal
#without seeing that the amount is expressed in the journal currency, and not in the invoice currency. So to avoid
#this common mistake, we simply reset the amount to 0 if the currency is not the invoice currency.
if context.get('payment_expected_currency') and currency_id != context.get('payment_expected_currency'):
vals['value']['amount'] = 0
amount = 0
if partner_id:
res = self.onchange_partner_id(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context)
for key in res.keys():
vals[key].update(res[key])
return vals
def onchange_company(self, cr, uid, ids, partner_id, journal_id, currency_id, company_id, context=None):
"""
If the company changes, check that the journal is in the right company.
If not, fetch a new journal.
"""
journal_pool = self.pool['account.journal']
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if journal.company_id.id != company_id:
# can not guess type of journal, better remove it
return {'value': {'journal_id': False}}
return {}
def button_proforma_voucher(self, cr, uid, ids, context=None):
self.signal_workflow(cr, uid, ids, 'proforma_voucher')
return {'type': 'ir.actions.act_window_close'}
def proforma_voucher(self, cr, uid, ids, context=None):
self.action_move_line_create(cr, uid, ids, context=context)
return True
def action_cancel_draft(self, cr, uid, ids, context=None):
self.create_workflow(cr, uid, ids)
self.write(cr, uid, ids, {'state':'draft'})
return True
def cancel_voucher(self, cr, uid, ids, context=None):
reconcile_pool = self.pool.get('account.move.reconcile')
move_pool = self.pool.get('account.move')
move_line_pool = self.pool.get('account.move.line')
for voucher in self.browse(cr, uid, ids, context=context):
# refresh to make sure you don't unlink an already removed move
voucher.refresh()
for line in voucher.move_ids:
# refresh to make sure you don't unreconcile an already unreconciled entry
line.refresh()
if line.reconcile_id:
move_lines = [move_line.id for move_line in line.reconcile_id.line_id]
move_lines.remove(line.id)
reconcile_pool.unlink(cr, uid, [line.reconcile_id.id])
if len(move_lines) >= 2:
move_line_pool.reconcile_partial(cr, uid, move_lines, 'auto',context=context)
if voucher.move_id:
move_pool.button_cancel(cr, uid, [voucher.move_id.id])
move_pool.unlink(cr, uid, [voucher.move_id.id])
res = {
'state':'cancel',
'move_id':False,
}
self.write(cr, uid, ids, res)
return True
def unlink(self, cr, uid, ids, context=None):
for t in self.read(cr, uid, ids, ['state'], context=context):
if t['state'] not in ('draft', 'cancel'):
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete voucher(s) which are already opened or paid.'))
return super(account_voucher, self).unlink(cr, uid, ids, context=context)
def onchange_payment(self, cr, uid, ids, pay_now, journal_id, partner_id, ttype='sale'):
res = {}
if not partner_id:
return res
res = {}
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
if pay_now == 'pay_later':
partner = partner_pool.browse(cr, uid, partner_id)
journal = journal_pool.browse(cr, uid, journal_id)
if journal.type in ('sale','sale_refund'):
account_id = partner.property_account_receivable.id
elif journal.type in ('purchase', 'purchase_refund','expense'):
account_id = partner.property_account_payable.id
elif ttype in ('sale', 'receipt'):
account_id = journal.default_debit_account_id.id
elif ttype in ('purchase', 'payment'):
account_id = journal.default_credit_account_id.id
else:
account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id
if account_id:
res['account_id'] = account_id
return {'value':res}
def _sel_context(self, cr, uid, voucher_id, context=None):
"""
Select the context to use accordingly if it needs to be multicurrency or not.
:param voucher_id: Id of the actual voucher
:return: The returned context will be the same as given in parameter if the voucher currency is the same
than the company currency, otherwise it's a copy of the parameter with an extra key 'date' containing
the date of the voucher.
:rtype: dict
"""
company_currency = self._get_company_currency(cr, uid, voucher_id, context)
current_currency = self._get_current_currency(cr, uid, voucher_id, context)
if current_currency <> company_currency:
context_multi_currency = context.copy()
voucher = self.pool.get('account.voucher').browse(cr, uid, voucher_id, context)
context_multi_currency.update({'date': voucher.date})
return context_multi_currency
return context
def first_move_line_get(self, cr, uid, voucher_id, move_id, company_currency, current_currency, context=None):
'''
Return a dict to be use to create the first account move line of given voucher.
:param voucher_id: Id of voucher what we are creating account_move.
:param move_id: Id of account move where this line will be added.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: mapping between fieldname and value of account move line to create
:rtype: dict
'''
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
debit = credit = 0.0
# TODO: is there any other alternative then the voucher type ??
# ANSWER: We can have payment and receipt "In Advance".
# TODO: Make this logic available.
# -for sale, purchase we have but for the payment and receipt we do not have as based on the bank/cash journal we can not know its payment or receipt
if voucher.type in ('purchase', 'payment'):
credit = voucher.paid_amount_in_company_currency
elif voucher.type in ('sale', 'receipt'):
debit = voucher.paid_amount_in_company_currency
if debit < 0: credit = -debit; debit = 0.0
if credit < 0: debit = -credit; credit = 0.0
sign = debit - credit < 0 and -1 or 1
#set the first line of the voucher
move_line = {
'name': voucher.name or '/',
'debit': debit,
'credit': credit,
'account_id': voucher.account_id.id,
'move_id': move_id,
'journal_id': voucher.journal_id.id,
'period_id': voucher.period_id.id,
'partner_id': voucher.partner_id.id,
'currency_id': company_currency <> current_currency and current_currency or False,
'amount_currency': (sign * abs(voucher.amount) # amount < 0 for refunds
if company_currency != current_currency else 0.0),
'date': voucher.date,
'date_maturity': voucher.date_due
}
return move_line
def account_move_get(self, cr, uid, voucher_id, context=None):
'''
This method prepare the creation of the account move related to the given voucher.
:param voucher_id: Id of voucher for which we are creating account_move.
:return: mapping between fieldname and value of account move to create
:rtype: dict
'''
seq_obj = self.pool.get('ir.sequence')
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
if voucher.number:
name = voucher.number
elif voucher.journal_id.sequence_id:
if not voucher.journal_id.sequence_id.active:
raise osv.except_osv(_('Configuration Error !'),
_('Please activate the sequence of selected journal !'))
c = dict(context)
c.update({'fiscalyear_id': voucher.period_id.fiscalyear_id.id})
name = seq_obj.next_by_id(cr, uid, voucher.journal_id.sequence_id.id, context=c)
else:
raise osv.except_osv(_('Error!'),
_('Please define a sequence on the journal.'))
if not voucher.reference:
ref = name.replace('/','')
else:
ref = voucher.reference
move = {
'name': name,
'journal_id': voucher.journal_id.id,
'narration': voucher.narration,
'date': voucher.date,
'ref': ref,
'period_id': voucher.period_id.id,
}
return move
def _get_exchange_lines(self, cr, uid, line, move_id, amount_residual, company_currency, current_currency, context=None):
'''
Prepare the two lines in company currency due to currency rate difference.
:param line: browse record of the voucher.line for which we want to create currency rate difference accounting
entries
:param move_id: Account move wher the move lines will be.
:param amount_residual: Amount to be posted.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: the account move line and its counterpart to create, depicted as mapping between fieldname and value
:rtype: tuple of dict
'''
if amount_residual > 0:
account_id = line.voucher_id.company_id.expense_currency_exchange_account_id
if not account_id:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_form')
msg = _("You should configure the 'Loss Exchange Rate Account' to manage automatically the booking of accounting entries related to differences between exchange rates.")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
else:
account_id = line.voucher_id.company_id.income_currency_exchange_account_id
if not account_id:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_form')
msg = _("You should configure the 'Gain Exchange Rate Account' to manage automatically the booking of accounting entries related to differences between exchange rates.")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
# Even if the amount_currency is never filled, we need to pass the foreign currency because otherwise
# the receivable/payable account may have a secondary currency, which render this field mandatory
if line.account_id.currency_id:
account_currency_id = line.account_id.currency_id.id
else:
account_currency_id = company_currency <> current_currency and current_currency or False
move_line = {
'journal_id': line.voucher_id.journal_id.id,
'period_id': line.voucher_id.period_id.id,
'name': _('change')+': '+(line.name or '/'),
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': line.voucher_id.partner_id.id,
'currency_id': account_currency_id,
'amount_currency': 0.0,
'quantity': 1,
'credit': amount_residual > 0 and amount_residual or 0.0,
'debit': amount_residual < 0 and -amount_residual or 0.0,
'date': line.voucher_id.date,
}
move_line_counterpart = {
'journal_id': line.voucher_id.journal_id.id,
'period_id': line.voucher_id.period_id.id,
'name': _('change')+': '+(line.name or '/'),
'account_id': account_id.id,
'move_id': move_id,
'amount_currency': 0.0,
'partner_id': line.voucher_id.partner_id.id,
'currency_id': account_currency_id,
'quantity': 1,
'debit': amount_residual > 0 and amount_residual or 0.0,
'credit': amount_residual < 0 and -amount_residual or 0.0,
'date': line.voucher_id.date,
}
return (move_line, move_line_counterpart)
def _convert_amount(self, cr, uid, amount, voucher_id, context=None):
'''
This function convert the amount given in company currency. It takes either the rate in the voucher (if the
payment_rate_currency_id is relevant) either the rate encoded in the system.
:param amount: float. The amount to convert
:param voucher: id of the voucher on which we want the conversion
:param context: to context to use for the conversion. It may contain the key 'date' set to the voucher date
field in order to select the good rate to use.
:return: the amount in the currency of the voucher's company
:rtype: float
'''
if context is None:
context = {}
currency_obj = self.pool.get('res.currency')
voucher = self.browse(cr, uid, voucher_id, context=context)
return currency_obj.compute(cr, uid, voucher.currency_id.id, voucher.company_id.currency_id.id, amount, context=context)
def voucher_move_line_create(self, cr, uid, voucher_id, line_total, move_id, company_currency, current_currency, context=None):
'''
Create one account move line, on the given account move, per voucher line where amount is not 0.0.
It returns Tuple with tot_line what is total of difference between debit and credit and
a list of lists with ids to be reconciled with this format (total_deb_cred,list_of_lists).
:param voucher_id: Voucher id what we are working with
:param line_total: Amount of the first line, which correspond to the amount we should totally split among all voucher lines.
:param move_id: Account move wher those lines will be joined.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: Tuple build as (remaining amount not allocated on voucher lines, list of account_move_line created in this method)
:rtype: tuple(float, list of int)
'''
if context is None:
context = {}
move_line_obj = self.pool.get('account.move.line')
currency_obj = self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
tot_line = line_total
rec_lst_ids = []
date = self.read(cr, uid, [voucher_id], ['date'], context=context)[0]['date']
ctx = context.copy()
ctx.update({'date': date})
voucher = self.pool.get('account.voucher').browse(cr, uid, voucher_id, context=ctx)
voucher_currency = voucher.journal_id.currency or voucher.company_id.currency_id
ctx.update({
'voucher_special_currency_rate': voucher_currency.rate * voucher.payment_rate ,
'voucher_special_currency': voucher.payment_rate_currency_id and voucher.payment_rate_currency_id.id or False,})
prec = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
for line in voucher.line_ids:
#create one move line per voucher line where amount is not 0.0
# AND (second part of the clause) only if the original move line was not having debit = credit = 0 (which is a legal value)
if not line.amount and not (line.move_line_id and not float_compare(line.move_line_id.debit, line.move_line_id.credit, precision_digits=prec) and not float_compare(line.move_line_id.debit, 0.0, precision_digits=prec)):
continue
# convert the amount set on the voucher line into the currency of the voucher's company
# this calls res_curreny.compute() with the right context, so that it will take either the rate on the voucher if it is relevant or will use the default behaviour
amount = self._convert_amount(cr, uid, line.untax_amount or line.amount, voucher.id, context=ctx)
# if the amount encoded in voucher is equal to the amount unreconciled, we need to compute the
# currency rate difference
if line.amount == line.amount_unreconciled:
if not line.move_line_id:
raise osv.except_osv(_('Wrong voucher line'),_("The invoice you are willing to pay is not valid anymore."))
sign = line.type =='dr' and -1 or 1
currency_rate_difference = sign * (line.move_line_id.amount_residual - amount)
else:
currency_rate_difference = 0.0
move_line = {
'journal_id': voucher.journal_id.id,
'period_id': voucher.period_id.id,
'name': line.name or '/',
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': voucher.partner_id.id,
'currency_id': line.move_line_id and (company_currency <> line.move_line_id.currency_id.id and line.move_line_id.currency_id.id) or False,
'analytic_account_id': line.account_analytic_id and line.account_analytic_id.id or False,
'quantity': 1,
'credit': 0.0,
'debit': 0.0,
'date': voucher.date
}
if amount < 0:
amount = -amount
if line.type == 'dr':
line.type = 'cr'
else:
line.type = 'dr'
if (line.type=='dr'):
tot_line += amount
move_line['debit'] = amount
else:
tot_line -= amount
move_line['credit'] = amount
if voucher.tax_id and voucher.type in ('sale', 'purchase'):
move_line.update({
'account_tax_id': voucher.tax_id.id,
})
# compute the amount in foreign currency
foreign_currency_diff = 0.0
amount_currency = False
if line.move_line_id:
# We want to set it on the account move line as soon as the original line had a foreign currency
if line.move_line_id.currency_id and line.move_line_id.currency_id.id != company_currency:
# we compute the amount in that foreign currency.
if line.move_line_id.currency_id.id == current_currency:
# if the voucher and the voucher line share the same currency, there is no computation to do
sign = (move_line['debit'] - move_line['credit']) < 0 and -1 or 1
amount_currency = sign * (line.amount)
else:
# if the rate is specified on the voucher, it will be used thanks to the special keys in the context
# otherwise we use the rates of the system
amount_currency = currency_obj.compute(cr, uid, company_currency, line.move_line_id.currency_id.id, move_line['debit']-move_line['credit'], context=ctx)
if line.amount == line.amount_unreconciled:
foreign_currency_diff = line.move_line_id.amount_residual_currency - abs(amount_currency)
move_line['amount_currency'] = amount_currency
voucher_line = move_line_obj.create(cr, uid, move_line)
rec_ids = [voucher_line, line.move_line_id.id]
if not currency_obj.is_zero(cr, uid, voucher.company_id.currency_id, currency_rate_difference):
# Change difference entry in company currency
exch_lines = self._get_exchange_lines(cr, uid, line, move_id, currency_rate_difference, company_currency, current_currency, context=context)
new_id = move_line_obj.create(cr, uid, exch_lines[0],context)
move_line_obj.create(cr, uid, exch_lines[1], context)
rec_ids.append(new_id)
if line.move_line_id and line.move_line_id.currency_id and not currency_obj.is_zero(cr, uid, line.move_line_id.currency_id, foreign_currency_diff):
# Change difference entry in voucher currency
move_line_foreign_currency = {
'journal_id': line.voucher_id.journal_id.id,
'period_id': line.voucher_id.period_id.id,
'name': _('change')+': '+(line.name or '/'),
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': line.voucher_id.partner_id.id,
'currency_id': line.move_line_id.currency_id.id,
'amount_currency': -1 * foreign_currency_diff,
'quantity': 1,
'credit': 0.0,
'debit': 0.0,
'date': line.voucher_id.date,
}
new_id = move_line_obj.create(cr, uid, move_line_foreign_currency, context=context)
rec_ids.append(new_id)
if line.move_line_id.id:
rec_lst_ids.append(rec_ids)
return (tot_line, rec_lst_ids)
def writeoff_move_line_get(self, cr, uid, voucher_id, line_total, move_id, name, company_currency, current_currency, context=None):
'''
Set a dict to be use to create the writeoff move line.
:param voucher_id: Id of voucher what we are creating account_move.
:param line_total: Amount remaining to be allocated on lines.
:param move_id: Id of account move where this line will be added.
:param name: Description of account move line.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: mapping between fieldname and value of account move line to create
:rtype: dict
'''
currency_obj = self.pool.get('res.currency')
move_line = {}
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
current_currency_obj = voucher.currency_id or voucher.journal_id.company_id.currency_id
if not currency_obj.is_zero(cr, uid, current_currency_obj, line_total):
diff = line_total
account_id = False
write_off_name = ''
if voucher.payment_option == 'with_writeoff':
account_id = voucher.writeoff_acc_id.id
write_off_name = voucher.comment
elif voucher.partner_id:
if voucher.type in ('sale', 'receipt'):
account_id = voucher.partner_id.property_account_receivable.id
else:
account_id = voucher.partner_id.property_account_payable.id
else:
# fallback on account of voucher
account_id = voucher.account_id.id
sign = voucher.type == 'payment' and -1 or 1
move_line = {
'name': write_off_name or name,
'account_id': account_id,
'move_id': move_id,
'partner_id': voucher.partner_id.id,
'date': voucher.date,
'credit': diff > 0 and diff or 0.0,
'debit': diff < 0 and -diff or 0.0,
'amount_currency': company_currency <> current_currency and (sign * -1 * voucher.writeoff_amount) or 0.0,
'currency_id': company_currency <> current_currency and current_currency or False,
'analytic_account_id': voucher.analytic_id and voucher.analytic_id.id or False,
}
return move_line
def _get_company_currency(self, cr, uid, voucher_id, context=None):
'''
Get the currency of the actual company.
:param voucher_id: Id of the voucher what i want to obtain company currency.
:return: currency id of the company of the voucher
:rtype: int
'''
return self.pool.get('account.voucher').browse(cr,uid,voucher_id,context).journal_id.company_id.currency_id.id
def _get_current_currency(self, cr, uid, voucher_id, context=None):
'''
Get the currency of the voucher.
:param voucher_id: Id of the voucher what i want to obtain current currency.
:return: currency id of the voucher
:rtype: int
'''
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
return voucher.currency_id.id or self._get_company_currency(cr,uid,voucher.id,context)
def action_move_line_create(self, cr, uid, ids, context=None):
'''
Confirm the vouchers given in ids and create the journal entries for each of them
'''
if context is None:
context = {}
move_pool = self.pool.get('account.move')
move_line_pool = self.pool.get('account.move.line')
for voucher in self.browse(cr, uid, ids, context=context):
local_context = dict(context, force_company=voucher.journal_id.company_id.id)
if voucher.move_id:
continue
company_currency = self._get_company_currency(cr, uid, voucher.id, context)
current_currency = self._get_current_currency(cr, uid, voucher.id, context)
# we select the context to use accordingly if it's a multicurrency case or not
context = self._sel_context(cr, uid, voucher.id, context)
# But for the operations made by _convert_amount, we always need to give the date in the context
ctx = context.copy()
ctx.update({'date': voucher.date})
# Create the account move record.
move_id = move_pool.create(cr, uid, self.account_move_get(cr, uid, voucher.id, context=context), context=context)
# Get the name of the account_move just created
name = move_pool.browse(cr, uid, move_id, context=context).name
# Create the first line of the voucher
move_line_id = move_line_pool.create(cr, uid, self.first_move_line_get(cr,uid,voucher.id, move_id, company_currency, current_currency, local_context), local_context)
move_line_brw = move_line_pool.browse(cr, uid, move_line_id, context=context)
line_total = move_line_brw.debit - move_line_brw.credit
rec_list_ids = []
if voucher.type == 'sale':
line_total = line_total - self._convert_amount(cr, uid, voucher.tax_amount, voucher.id, context=ctx)
elif voucher.type == 'purchase':
line_total = line_total + self._convert_amount(cr, uid, voucher.tax_amount, voucher.id, context=ctx)
# Create one move line per voucher line where amount is not 0.0
line_total, rec_list_ids = self.voucher_move_line_create(cr, uid, voucher.id, line_total, move_id, company_currency, current_currency, context)
# Create the writeoff line if needed
ml_writeoff = self.writeoff_move_line_get(cr, uid, voucher.id, line_total, move_id, name, company_currency, current_currency, local_context)
if ml_writeoff:
move_line_pool.create(cr, uid, ml_writeoff, local_context)
# We post the voucher.
self.write(cr, uid, [voucher.id], {
'move_id': move_id,
'state': 'posted',
'number': name,
})
if voucher.journal_id.entry_posted:
move_pool.post(cr, uid, [move_id], context={})
# We automatically reconcile the account move lines.
reconcile = False
for rec_ids in rec_list_ids:
if len(rec_ids) >= 2:
reconcile = move_line_pool.reconcile_partial(cr, uid, rec_ids, writeoff_acc_id=voucher.writeoff_acc_id.id, writeoff_period_id=voucher.period_id.id, writeoff_journal_id=voucher.journal_id.id)
return True
class account_voucher_line(osv.osv):
_name = 'account.voucher.line'
_description = 'Voucher Lines'
_order = "move_line_id"
# If the payment is in the same currency than the invoice, we keep the same amount
# Otherwise, we compute from invoice currency to payment currency
def _compute_balance(self, cr, uid, ids, name, args, context=None):
currency_pool = self.pool.get('res.currency')
rs_data = {}
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
ctx.update({'date': line.voucher_id.date})
voucher_rate = self.pool.get('res.currency').read(cr, uid, line.voucher_id.currency_id.id, ['rate'], context=ctx)['rate']
ctx.update({
'voucher_special_currency': line.voucher_id.payment_rate_currency_id and line.voucher_id.payment_rate_currency_id.id or False,
'voucher_special_currency_rate': line.voucher_id.payment_rate * voucher_rate})
res = {}
company_currency = line.voucher_id.journal_id.company_id.currency_id.id
voucher_currency = line.voucher_id.currency_id and line.voucher_id.currency_id.id or company_currency
move_line = line.move_line_id or False
if not move_line:
res['amount_original'] = 0.0
res['amount_unreconciled'] = 0.0
elif move_line.currency_id and voucher_currency==move_line.currency_id.id:
res['amount_original'] = abs(move_line.amount_currency)
res['amount_unreconciled'] = abs(move_line.amount_residual_currency)
else:
#always use the amount booked in the company currency as the basis of the conversion into the voucher currency
res['amount_original'] = currency_pool.compute(cr, uid, company_currency, voucher_currency, move_line.credit or move_line.debit or 0.0, context=ctx)
res['amount_unreconciled'] = currency_pool.compute(cr, uid, company_currency, voucher_currency, abs(move_line.amount_residual), context=ctx)
rs_data[line.id] = res
return rs_data
def _currency_id(self, cr, uid, ids, name, args, context=None):
'''
This function returns the currency id of a voucher line. It's either the currency of the
associated move line (if any) or the currency of the voucher or the company currency.
'''
res = {}
for line in self.browse(cr, uid, ids, context=context):
move_line = line.move_line_id
if move_line:
res[line.id] = move_line.currency_id and move_line.currency_id.id or move_line.company_id.currency_id.id
else:
res[line.id] = line.voucher_id.currency_id and line.voucher_id.currency_id.id or line.voucher_id.company_id.currency_id.id
return res
_columns = {
'voucher_id':fields.many2one('account.voucher', 'Voucher', required=1, ondelete='cascade'),
'name':fields.char('Description',),
'account_id':fields.many2one('account.account','Account', required=True),
'partner_id':fields.related('voucher_id', 'partner_id', type='many2one', relation='res.partner', string='Partner'),
'untax_amount':fields.float('Untax Amount'),
'amount':fields.float('Amount', digits_compute=dp.get_precision('Account')),
'reconcile': fields.boolean('Full Reconcile'),
'type':fields.selection([('dr','Debit'),('cr','Credit')], 'Dr/Cr'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'move_line_id': fields.many2one('account.move.line', 'Journal Item', copy=False),
'date_original': fields.related('move_line_id','date', type='date', relation='account.move.line', string='Date', readonly=1),
'date_due': fields.related('move_line_id','date_maturity', type='date', relation='account.move.line', string='Due Date', readonly=1),
'amount_original': fields.function(_compute_balance, multi='dc', type='float', string='Original Amount', store=True, digits_compute=dp.get_precision('Account')),
'amount_unreconciled': fields.function(_compute_balance, multi='dc', type='float', string='Open Balance', store=True, digits_compute=dp.get_precision('Account')),
'company_id': fields.related('voucher_id','company_id', relation='res.company', type='many2one', string='Company', store=True, readonly=True),
'currency_id': fields.function(_currency_id, string='Currency', type='many2one', relation='res.currency', readonly=True),
}
_defaults = {
'name': '',
}
def onchange_reconcile(self, cr, uid, ids, reconcile, amount, amount_unreconciled, context=None):
vals = {'amount': 0.0}
if reconcile:
vals = { 'amount': amount_unreconciled}
return {'value': vals}
def onchange_amount(self, cr, uid, ids, amount, amount_unreconciled, context=None):
vals = {}
if amount:
vals['reconcile'] = (amount == amount_unreconciled)
return {'value': vals}
def onchange_move_line_id(self, cr, user, ids, move_line_id, context=None):
"""
Returns a dict that contains new values and context
@param move_line_id: latest value from user input for field move_line_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
move_line_pool = self.pool.get('account.move.line')
if move_line_id:
move_line = move_line_pool.browse(cr, user, move_line_id, context=context)
if move_line.credit:
ttype = 'dr'
else:
ttype = 'cr'
res.update({
'account_id': move_line.account_id.id,
'type': ttype,
'currency_id': move_line.currency_id and move_line.currency_id.id or move_line.company_id.currency_id.id,
})
return {
'value':res,
}
def default_get(self, cr, user, fields_list, context=None):
"""
Returns default values for fields
@param fields_list: list of fields, for which default values are required to be read
@param context: context arguments, like lang, time zone
@return: Returns a dict that contains default values for fields
"""
if context is None:
context = {}
journal_id = context.get('journal_id', False)
partner_id = context.get('partner_id', False)
journal_pool = self.pool.get('account.journal')
partner_pool = self.pool.get('res.partner')
values = super(account_voucher_line, self).default_get(cr, user, fields_list, context=context)
if (not journal_id) or ('account_id' not in fields_list):
return values
journal = journal_pool.browse(cr, user, journal_id, context=context)
account_id = False
ttype = 'cr'
if journal.type in ('sale', 'sale_refund'):
account_id = journal.default_credit_account_id and journal.default_credit_account_id.id or False
ttype = 'cr'
elif journal.type in ('purchase', 'expense', 'purchase_refund'):
account_id = journal.default_debit_account_id and journal.default_debit_account_id.id or False
ttype = 'dr'
elif partner_id:
partner = partner_pool.browse(cr, user, partner_id, context=context)
if context.get('type') == 'payment':
ttype = 'dr'
account_id = partner.property_account_payable.id
elif context.get('type') == 'receipt':
account_id = partner.property_account_receivable.id
values.update({
'account_id':account_id,
'type':ttype
})
return values
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chouseknecht/ansible | lib/ansible/module_utils/network/junos/facts/vlans/vlans.py | 21 | 3564 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The junos vlans fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from copy import deepcopy
from ansible.module_utils._text import to_bytes
from ansible.module_utils.network.common import utils
from ansible.module_utils.network.junos.argspec.vlans.vlans import VlansArgs
from ansible.module_utils.network.junos.utils.utils import get_resource_config
from ansible.module_utils.six import string_types
try:
from lxml import etree
HAS_LXML = True
except ImportError:
HAS_LXML = False
class VlansFacts(object):
""" The junos vlans fact class
"""
def __init__(self, module, subspec='config', options='options'):
self._module = module
self.argument_spec = VlansArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for vlans
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
if not HAS_LXML:
self._module.fail_json(msg='lxml is not installed.')
if not data:
config_filter = """
<configuration>
<vlans>
</vlans>
</configuration>
"""
data = get_resource_config(connection, config_filter=config_filter)
if isinstance(data, string_types):
data = etree.fromstring(to_bytes(data,
errors='surrogate_then_replace'))
resources = data.xpath('configuration/vlans/vlan')
objs = []
for resource in resources:
if resource is not None:
obj = self.render_config(self.generated_spec, resource)
if obj:
objs.append(obj)
facts = {}
if objs:
facts['vlans'] = []
params = utils.validate_config(self.argument_spec,
{'config': objs})
for cfg in params['config']:
facts['vlans'].append(utils.remove_empties(cfg))
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
def render_config(self, spec, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
config = deepcopy(spec)
config['name'] = utils.get_xml_conf_arg(conf, 'name')
config['vlan_id'] = utils.get_xml_conf_arg(conf, 'vlan-id')
config['description'] = utils.get_xml_conf_arg(conf, 'description')
return utils.remove_empties(config)
| gpl-3.0 |
caseydavenport/calico-containers | tests/st/calicoctl/test_autodetection.py | 2 | 4864 | # Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose.plugins.attrib import attr
from tests.st.test_base import TestBase
from tests.st.utils.docker_host import DockerHost
from tests.st.utils.utils import ETCD_CA, ETCD_CERT, \
ETCD_KEY, ETCD_HOSTNAME_SSL, ETCD_SCHEME, get_ip
from tests.st.utils.exceptions import CommandExecError
if ETCD_SCHEME == "https":
ADDITIONAL_DOCKER_OPTIONS = "--cluster-store=etcd://%s:2379 " \
"--cluster-store-opt kv.cacertfile=%s " \
"--cluster-store-opt kv.certfile=%s " \
"--cluster-store-opt kv.keyfile=%s " % \
(ETCD_HOSTNAME_SSL, ETCD_CA, ETCD_CERT,
ETCD_KEY)
else:
ADDITIONAL_DOCKER_OPTIONS = "--cluster-store=etcd://%s:2379 " % \
get_ip()
class TestAutodetection(TestBase):
@attr('slow')
def test_autodetection(self):
"""
Test using different IP autodetection methods.
We run a multi-host test for this to test explicit selection of
"first-found" and also "interface" and "can-reach" detection methods.
"""
with DockerHost('host1',
additional_docker_options=ADDITIONAL_DOCKER_OPTIONS,
start_calico=False) as host1, \
DockerHost('host2',
additional_docker_options=ADDITIONAL_DOCKER_OPTIONS,
start_calico=False) as host2, \
DockerHost('host3',
additional_docker_options=ADDITIONAL_DOCKER_OPTIONS,
start_calico=False) as host3:
# Start the node on host1 using first-found auto-detection
# method.
host1.start_calico_node(
"--ip=autodetect --ip-autodetection-method=first-found")
# Attempt to start the node on host2 using can-reach auto-detection
# method using a bogus DNS name. This should fail.
try:
host2.start_calico_node(
"--ip=autodetect --ip-autodetection-method=can-reach=XXX.YYY.ZZZ.XXX")
except CommandExecError:
pass
else:
raise AssertionError("Command expected to fail but did not")
# Start the node on host2 using can-reach auto-detection method
# using the IP address of host1. This should succeed.
host2.start_calico_node(
"--ip=autodetect --ip-autodetection-method=can-reach=" + host1.ip)
# Attempt to start the node on host3 using interface auto-detection
# method using a bogus interface name. This should fail.
try:
host3.start_calico_node(
"--ip=autodetect --ip-autodetection-method=interface=BogusInterface")
except CommandExecError:
pass
else:
raise AssertionError("Command expected to fail but did not")
# Start the node on host2 using can-reach auto-detection method
# using the IP address of host1. This should succeed.
host3.start_calico_node(
"--ip=autodetect --ip-autodetection-method=interface=eth0")
# Create a network and a workload on each host.
network1 = host1.create_network("subnet1")
workload_host1 = host1.create_workload("workload1", network=network1)
workload_host2 = host2.create_workload("workload2", network=network1)
workload_host3 = host3.create_workload("workload3", network=network1)
# Allow network to converge
self.assert_true(workload_host1.check_can_ping(workload_host3.ip, retries=10))
# Check connectivity in both directions
self.assert_ip_connectivity(workload_list=[workload_host1,
workload_host2,
workload_host3],
ip_pass_list=[workload_host1.ip,
workload_host2.ip,
workload_host3.ip])
| apache-2.0 |
hyperized/ansible | lib/ansible/modules/network/f5/bigip_ucs_fetch.py | 38 | 18824 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_ucs_fetch
short_description: Fetches a UCS file from remote nodes
description:
- This module is used for fetching UCS files from remote machines and
storing them locally in a file tree, organized by hostname. Note that
this module is written to transfer UCS files that might not be present,
so a missing remote UCS won't be an error unless fail_on_missing is
set to 'yes'.
version_added: 2.5
options:
backup:
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
create_on_missing:
description:
- Creates the UCS based on the value of C(src) if the file does not already
exist on the remote system.
type: bool
default: yes
dest:
description:
- A directory to save the UCS file into.
type: path
required: True
encryption_password:
description:
- Password to use to encrypt the UCS file if desired.
type: str
fail_on_missing:
description:
- Make the module fail if the UCS file on the remote system is missing.
type: bool
default: no
force:
description:
- If C(no), the file will only be transferred if the destination does not
exist.
type: bool
default: yes
src:
description:
- The name of the UCS file to create on the remote server for downloading
type: str
notes:
- BIG-IP provides no way to get a checksum of the UCS files on the system
via any interface except, perhaps, logging in directly to the box (which
would not support appliance mode). Therefore, the best this module can
do is check for the existence of the file on disk; no check-summing.
- If you are using this module with either Ansible Tower or Ansible AWX, you
should be aware of how these Ansible products execute jobs in restricted
environments. More information can be found here
https://clouddocs.f5.com/products/orchestration/ansible/devel/usage/module-usage-with-tower.html
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Download a new UCS
bigip_ucs_fetch:
src: cs_backup.ucs
dest: /tmp/cs_backup.ucs
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
checksum:
description: The SHA1 checksum of the downloaded file
returned: success or changed
type: str
sample: 7b46bbe4f8ebfee64761b5313855618f64c64109
dest:
description: Location on the ansible host that the UCS was saved to
returned: success
type: str
sample: /path/to/file.txt
src:
description:
- Name of the UCS file on the remote BIG-IP to download. If not
specified, then this will be a randomly generated filename
returned: changed
type: str
sample: cs_backup.ucs
backup_file:
description: Name of backup file created
returned: changed and if backup=yes
type: str
sample: /path/to/file.txt.2015-02-12@22:09~
gid:
description: Group id of the UCS file, after execution
returned: success
type: int
sample: 100
group:
description: Group of the UCS file, after execution
returned: success
type: str
sample: httpd
owner:
description: Owner of the UCS file, after execution
returned: success
type: str
sample: httpd
uid:
description: Owner id of the UCS file, after execution
returned: success
type: int
sample: 100
md5sum:
description: The MD5 checksum of the downloaded file
returned: changed or success
type: str
sample: 96cacab4c259c4598727d7cf2ceb3b45
mode:
description: Permissions of the target UCS, after execution
returned: success
type: str
sample: 0644
size:
description: Size of the target UCS, after execution
returned: success
type: int
sample: 1220
'''
import os
import re
import tempfile
from ansible.module_utils.basic import AnsibleModule
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.icontrol import download_file
from library.module_utils.network.f5.icontrol import tmos_version
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.icontrol import download_file
from ansible.module_utils.network.f5.icontrol import tmos_version
class Parameters(AnsibleF5Parameters):
updatables = []
returnables = [
'dest',
'src',
'md5sum',
'checksum',
'backup_file']
api_attributes = []
api_map = {}
@property
def options(self):
result = []
if self.passphrase:
result.append(dict(
passphrase=self.want.passphrase
))
return result
@property
def src(self):
if self._values['src'] is not None:
return self._values['src']
result = next(tempfile._get_candidate_names()) + '.ucs'
self._values['src'] = result
return result
@property
def fulldest(self):
result = None
if os.path.isdir(self.dest):
result = os.path.join(self.dest, self.src)
else:
if os.path.exists(os.path.dirname(self.dest)):
result = self.dest
else:
try:
# os.path.exists() can return false in some
# circumstances where the directory does not have
# the execute bit for the current user set, in
# which case the stat() call will raise an OSError
os.stat(os.path.dirname(result))
except OSError as e:
if "permission denied" in str(e).lower():
raise F5ModuleError(
"Destination directory {0} is not accessible".format(os.path.dirname(result))
)
raise F5ModuleError(
"Destination directory {0} does not exist".format(os.path.dirname(result))
)
if not os.access(os.path.dirname(result), os.W_OK):
raise F5ModuleError(
"Destination {0} not writable".format(os.path.dirname(result))
)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
def exec_module(self):
if self.is_version_v1():
manager = self.get_manager('v1')
else:
manager = self.get_manager('v2')
return manager.exec_module()
def get_manager(self, type):
if type == 'v1':
return V1Manager(**self.kwargs)
elif type == 'v2':
return V2Manager(**self.kwargs)
def is_version_v1(self):
"""Checks to see if the TMOS version is less than 12.1.0
Versions prior to 12.1.0 have a bug which prevents the REST
API from properly listing any UCS files when you query the
/mgmt/tm/sys/ucs endpoint. Therefore you need to do everything
through tmsh over REST.
:return: bool
"""
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('12.1.0'):
return True
else:
return False
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = Parameters(params=self.module.params)
self.changes = UsableChanges()
def exec_module(self):
result = dict()
self.present()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=True))
return result
def present(self):
if self.exists():
self.update()
else:
self.create()
def update(self):
if os.path.exists(self.want.fulldest):
if not self.want.force:
raise F5ModuleError(
"File '{0}' already exists".format(self.want.fulldest)
)
self.execute()
def _get_backup_file(self):
return self.module.backup_local(self.want.fulldest)
def execute(self):
try:
if self.want.backup:
if os.path.exists(self.want.fulldest):
backup_file = self._get_backup_file()
self.changes.update({'backup_file': backup_file})
self.download()
except IOError:
raise F5ModuleError(
"Failed to copy: {0} to {1}".format(self.want.src, self.want.fulldest)
)
self._set_checksum()
self._set_md5sum()
file_args = self.module.load_file_common_arguments(self.module.params)
return self.module.set_fs_attributes_if_different(file_args, True)
def _set_checksum(self):
try:
result = self.module.sha1(self.want.fulldest)
self.want.update({'checksum': result})
except ValueError:
pass
def _set_md5sum(self):
try:
result = self.module.md5(self.want.fulldest)
self.want.update({'md5sum': result})
except ValueError:
pass
def create(self):
if self.want.fail_on_missing:
raise F5ModuleError(
"UCS '{0}' was not found".format(self.want.src)
)
if not self.want.create_on_missing:
raise F5ModuleError(
"UCS '{0}' was not found".format(self.want.src)
)
if self.module.check_mode:
return True
if self.want.create_on_missing:
self.create_on_device()
self.execute()
return True
def create_on_device(self):
if self.want.passphrase:
params = dict(
command='save',
name=self.want.src,
options=[{'passphrase': self.want.encryption_password}]
)
else:
params = dict(
command='save',
name=self.want.src,
)
uri = "https://{0}:{1}/mgmt/tm/sys/ucs".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def download(self):
self.download_from_device(self.want.dest)
if os.path.exists(self.want.dest):
return True
raise F5ModuleError(
"Failed to download the remote file"
)
class V1Manager(BaseManager):
def __init__(self, *args, **kwargs):
super(V1Manager, self).__init__(**kwargs)
self.remote_dir = '/var/config/rest/madm'
def read_current(self):
result = None
output = self.read_current_from_device()
if 'commandResult' in output:
result = self._read_ucs_files_from_output(output['commandResult'])
return result
def read_current_from_device(self):
params = dict(
command='run',
utilCmdArgs='-c "tmsh list sys ucs"'
)
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
def _read_ucs_files_from_output(self, output):
search = re.compile(r'filename\s+(.*)').search
lines = output.split("\n")
result = [m.group(1) for m in map(search, lines) if m]
return result
def exists(self):
collection = self.read_current()
base = os.path.basename(self.want.src)
if any(base == os.path.basename(x) for x in collection):
return True
return False
def download_from_device(self, dest):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/madm/{2}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.filename
)
try:
download_file(self.client, url, dest)
except F5ModuleError:
raise F5ModuleError(
"Failed to download the file."
)
if os.path.exists(self.want.dest):
return True
return False
def _move_to_download(self):
move_path = '/var/local/ucs/{0} {1}/{0}'.format(
self.want.filename, self.remote_dir
)
params = dict(
command='run',
utilCmdArgs=move_path
)
uri = "https://{0}:{1}/mgmt/tm/util/unix-mv/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
if 'commandResult' in response:
if 'cannot stat' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
class V2Manager(BaseManager):
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/ucs".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
def read_current(self):
collection = self.read_current_from_device()
if 'items' not in collection:
return []
resources = collection['items']
result = [x['apiRawValues']['filename'] for x in resources]
return result
def exists(self):
collection = self.read_current()
base = os.path.basename(self.want.src)
if any(base == os.path.basename(x) for x in collection):
return True
return False
def download_from_device(self, dest):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/ucs-downloads/{2}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.src
)
try:
download_file(self.client, url, dest)
except F5ModuleError:
raise F5ModuleError(
"Failed to download the file."
)
if os.path.exists(self.want.dest):
return True
return False
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
backup=dict(
default='no',
type='bool'
),
create_on_missing=dict(
default='yes',
type='bool'
),
encryption_password=dict(no_log=True),
dest=dict(
required=True,
type='path'
),
force=dict(
default='yes',
type='bool'
),
fail_on_missing=dict(
default='no',
type='bool'
),
src=dict()
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.add_file_common_args = True
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
add_file_common_args=spec.add_file_common_args
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
muminoff/savollar | savollar/pipelines.py | 1 | 2093 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don"t forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from scrapy.conf import settings
from scrapy import log
from elasticsearch import Elasticsearch
from uuid import uuid1
from savollar.models import SavolModel
class ElasticSearchIndexPipeline(object):
def process_item(self, item, spider):
es = Elasticsearch([
{"host": settings["ELASTICSEARCH_HOST"]},
])
valid = True
for data in item:
if not data:
raise DropItem("Missing %s of item from %s" %(data, item["link"]))
if valid:
es.index(
index=settings["ELASTICSEARCH_INDEX"],
doc_type="info",
id=str(uuid1()),
body=dict(item)
)
log.msg("Item indexed to ElasticSearch database %s:%s" %
(settings["ELASTICSEARCH_HOST"], settings["ELASTICSEARCH_PORT"]),
level=log.DEBUG, spider=spider)
return item
class CassandraExportPipleline(object):
def process_item(self, item, spider):
valid = True
for data in item:
if not data:
raise DropItem("Missing %s of item from %s" %(data, item["link"]))
if valid:
model = SavolModel()
model.title = item["title"]
model.question = item["question"]
model.answer = item["answer"]
model.author = item["author"]
model.permalink = item["permalink"]
model.year = int(item["year"])
model.month = int(item["month"])
model.date = int(item["date"])
model.tags = item["title"].split()
model.save()
log.msg("Item exported to Cassandra database %s/%s" %
(settings["CASSANDRA_HOST"], settings["CASSANDRA_KEYSPACE"]),
level=log.DEBUG, spider=spider)
return item
| apache-2.0 |
thnee/ansible | lib/ansible/modules/cloud/vmware/vmware_guest_custom_attributes.py | 31 | 8326 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright, (c) 2018, Ansible Project
# Copyright, (c) 2018, Abhijeet Kasurde <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_guest_custom_attributes
short_description: Manage custom attributes from VMware for the given virtual machine
description:
- This module can be used to add, remove and update custom attributes for the given virtual machine.
version_added: 2.7
author:
- Jimmy Conner (@cigamit)
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
name:
description:
- Name of the virtual machine to work with.
- This is required parameter, if C(uuid) or C(moid) is not supplied.
type: str
state:
description:
- The action to take.
- If set to C(present), then custom attribute is added or updated.
- If set to C(absent), then custom attribute is removed.
default: 'present'
choices: ['present', 'absent']
type: str
uuid:
description:
- UUID of the virtual machine to manage if known. This is VMware's unique identifier.
- This is required parameter, if C(name) or C(moid) is not supplied.
type: str
moid:
description:
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
- This is required if C(name) or C(uuid) is not supplied.
version_added: '2.9'
type: str
use_instance_uuid:
description:
- Whether to use the VMware instance UUID rather than the BIOS UUID.
default: no
type: bool
version_added: '2.8'
folder:
description:
- Absolute path to find an existing guest.
- This is required parameter, if C(name) is supplied and multiple virtual machines with same name are found.
type: str
datacenter:
description:
- Datacenter name where the virtual machine is located in.
required: True
type: str
attributes:
description:
- A list of name and value of custom attributes that needs to be manage.
- Value of custom attribute is not required and will be ignored, if C(state) is set to C(absent).
default: []
type: list
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Add virtual machine custom attributes
vmware_guest_custom_attributes:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
uuid: 421e4592-c069-924d-ce20-7e7533fab926
state: present
attributes:
- name: MyAttribute
value: MyValue
delegate_to: localhost
register: attributes
- name: Add multiple virtual machine custom attributes
vmware_guest_custom_attributes:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
uuid: 421e4592-c069-924d-ce20-7e7533fab926
state: present
attributes:
- name: MyAttribute
value: MyValue
- name: MyAttribute2
value: MyValue2
delegate_to: localhost
register: attributes
- name: Remove virtual machine Attribute
vmware_guest_custom_attributes:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
uuid: 421e4592-c069-924d-ce20-7e7533fab926
state: absent
attributes:
- name: MyAttribute
delegate_to: localhost
register: attributes
- name: Remove virtual machine Attribute using Virtual Machine MoID
vmware_guest_custom_attributes:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
moid: vm-42
state: absent
attributes:
- name: MyAttribute
delegate_to: localhost
register: attributes
'''
RETURN = """
custom_attributes:
description: metadata about the virtual machine attributes
returned: always
type: dict
sample: {
"mycustom": "my_custom_value",
"mycustom_2": "my_custom_value_2",
"sample_1": "sample_1_value",
"sample_2": "sample_2_value",
"sample_3": "sample_3_value"
}
"""
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
class VmAttributeManager(PyVmomi):
def __init__(self, module):
super(VmAttributeManager, self).__init__(module)
def set_custom_field(self, vm, user_fields):
result_fields = dict()
change_list = list()
changed = False
for field in user_fields:
field_key = self.check_exists(field['name'])
found = False
field_value = field.get('value', '')
for k, v in [(x.name, v.value) for x in self.custom_field_mgr for v in vm.customValue if x.key == v.key]:
if k == field['name']:
found = True
if v != field_value:
if not self.module.check_mode:
self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field_value)
result_fields[k] = field_value
change_list.append(True)
if not found and field_value != "":
if not field_key and not self.module.check_mode:
field_key = self.content.customFieldsManager.AddFieldDefinition(name=field['name'], moType=vim.VirtualMachine)
change_list.append(True)
if not self.module.check_mode:
self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field_value)
result_fields[field['name']] = field_value
if any(change_list):
changed = True
return {'changed': changed, 'failed': False, 'custom_attributes': result_fields}
def check_exists(self, field):
for x in self.custom_field_mgr:
if x.name == field:
return x
return False
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
datacenter=dict(type='str'),
name=dict(type='str'),
folder=dict(type='str'),
uuid=dict(type='str'),
moid=dict(type='str'),
use_instance_uuid=dict(type='bool', default=False),
state=dict(type='str', default='present',
choices=['absent', 'present']),
attributes=dict(
type='list',
default=[],
options=dict(
name=dict(type='str', required=True),
value=dict(type='str'),
)
),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[
['name', 'uuid', 'moid']
],
)
if module.params.get('folder'):
# FindByInventoryPath() does not require an absolute path
# so we should leave the input folder path unmodified
module.params['folder'] = module.params['folder'].rstrip('/')
pyv = VmAttributeManager(module)
results = {'changed': False, 'failed': False, 'instance': dict()}
# Check if the virtual machine exists before continuing
vm = pyv.get_vm()
if vm:
# virtual machine already exists
if module.params['state'] == "present":
results = pyv.set_custom_field(vm, module.params['attributes'])
elif module.params['state'] == "absent":
results = pyv.set_custom_field(vm, module.params['attributes'])
module.exit_json(**results)
else:
# virtual machine does not exists
vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))
module.fail_json(msg="Unable to manage custom attributes for non-existing"
" virtual machine %s" % vm_id)
if __name__ == '__main__':
main()
| gpl-3.0 |
shanot/imp | modules/rmf/examples/link.py | 2 | 1236 | ## \example rmf/link.py
# This example is like module/rmf/pdb.py except that instead of creating a
# new hierarchy from the rmf file, it simply links the existing hierarchy
# to the file. This mechanism can be used for loading multiple
# conformations for scoring or other analysis without having to set up
# restraints and things each time.
from __future__ import print_function
import IMP.atom
import IMP.rmf
import RMF
import sys
IMP.setup_from_argv(sys.argv, "link")
m = IMP.Model()
# Create a new IMP.atom.Hierarchy from the contents of the pdb file
h = IMP.atom.read_pdb(IMP.rmf.get_example_path("simple.pdb"), m)
tfn = "link.rmf"
print("File name is", tfn)
# open the file, clearing any existing contents
rh = RMF.create_rmf_file(tfn)
# add the hierarchy to the file
IMP.rmf.add_hierarchies(rh, [h])
# add the current configuration to the file as frame 0
IMP.rmf.save_frame(rh)
# close the file
del rh
# reopen it, don't clear the file when opening it
rh = RMF.open_rmf_file_read_only(tfn)
# link to the existing pdb hierarchy
IMP.rmf.link_hierarchies(rh, [h])
# load the same coordinates in, ok, that is not very exciting
IMP.rmf.load_frame(rh, RMF.FrameID(0))
print("Try running rmf_display or rmf_show on", tfn)
| gpl-3.0 |
Subsets and Splits