repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringclasses 981
values | size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
franciscod/python-telegram-bot | telegram/inlinequeryresultvideo.py | 2 | 2581 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram
InlineQueryResultVideo"""
from telegram import InlineQueryResult, InlineKeyboardMarkup, InputMessageContent
class InlineQueryResultVideo(InlineQueryResult):
def __init__(self,
id,
video_url,
mime_type,
thumb_url,
title,
caption=None,
video_width=None,
video_height=None,
video_duration=None,
description=None,
reply_markup=None,
input_message_content=None,
**kwargs):
# Required
super(InlineQueryResultVideo, self).__init__('video', id)
self.video_url = video_url
self.mime_type = mime_type
self.thumb_url = thumb_url
self.title = title
# Optional
if caption:
self.caption = caption
if video_width:
self.video_width = video_width
if video_height:
self.video_height = video_height
if video_duration:
self.video_duration = video_duration
if description:
self.description = description
if reply_markup:
self.reply_markup = reply_markup
if input_message_content:
self.input_message_content = input_message_content
@staticmethod
def de_json(data):
data = super(InlineQueryResultVideo, InlineQueryResultVideo).de_json(data)
data['reply_markup'] = InlineKeyboardMarkup.de_json(data.get('reply_markup'))
data['input_message_content'] = InputMessageContent.de_json(data.get(
'input_message_content'))
return InlineQueryResultVideo(**data)
| gpl-2.0 |
shingonoide/odoo | addons/purchase/report/purchase_report.py | 50 | 7689 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# Please note that these reports are not multi-currency !!!
#
from openerp.osv import fields,osv
from openerp import tools
class purchase_report(osv.osv):
_name = "purchase.report"
_description = "Purchases Orders"
_auto = False
_columns = {
'date': fields.datetime('Order Date', readonly=True, help="Date on which this document has been created"), # TDE FIXME master: rename into date_order
'state': fields.selection([('draft', 'Request for Quotation'),
('confirmed', 'Waiting Supplier Ack'),
('approved', 'Approved'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')],'Order Status', readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'picking_type_id': fields.many2one('stock.warehouse', 'Warehouse', readonly=True),
'location_id': fields.many2one('stock.location', 'Destination', readonly=True),
'partner_id':fields.many2one('res.partner', 'Supplier', readonly=True),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', readonly=True),
'date_approve':fields.date('Date Approved', readonly=True),
'expected_date':fields.date('Expected Date', readonly=True),
'validator' : fields.many2one('res.users', 'Validated By', readonly=True),
'product_uom' : fields.many2one('product.uom', 'Reference Unit of Measure', required=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'user_id':fields.many2one('res.users', 'Responsible', readonly=True),
'delay':fields.float('Days to Validate', digits=(16,2), readonly=True),
'delay_pass':fields.float('Days to Deliver', digits=(16,2), readonly=True),
'quantity': fields.integer('Unit Quantity', readonly=True), # TDE FIXME master: rename into unit_quantity
'price_total': fields.float('Total Price', readonly=True),
'price_average': fields.float('Average Price', readonly=True, group_operator="avg"),
'negociation': fields.float('Purchase-Standard Price', readonly=True, group_operator="avg"),
'price_standard': fields.float('Products Value', readonly=True, group_operator="sum"),
'nbr': fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines
'category_id': fields.many2one('product.category', 'Category', readonly=True)
}
_order = 'date desc, price_total desc'
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'purchase_report')
cr.execute("""
create or replace view purchase_report as (
WITH currency_rate (currency_id, rate, date_start, date_end) AS (
SELECT r.currency_id, r.rate, r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
)
select
min(l.id) as id,
s.date_order as date,
l.state,
s.date_approve,
s.minimum_planned_date as expected_date,
s.dest_address_id,
s.pricelist_id,
s.validator,
spt.warehouse_id as picking_type_id,
s.partner_id as partner_id,
s.create_uid as user_id,
s.company_id as company_id,
l.product_id,
t.categ_id as category_id,
t.uom_id as product_uom,
s.location_id as location_id,
sum(l.product_qty/u.factor*u2.factor) as quantity,
extract(epoch from age(s.date_approve,s.date_order))/(24*60*60)::decimal(16,2) as delay,
extract(epoch from age(l.date_planned,s.date_order))/(24*60*60)::decimal(16,2) as delay_pass,
count(*) as nbr,
sum(l.price_unit/cr.rate*l.product_qty)::decimal(16,2) as price_total,
avg(100.0 * (l.price_unit/cr.rate*l.product_qty) / NULLIF(ip.value_float*l.product_qty/u.factor*u2.factor, 0.0))::decimal(16,2) as negociation,
sum(ip.value_float*l.product_qty/u.factor*u2.factor)::decimal(16,2) as price_standard,
(sum(l.product_qty*l.price_unit/cr.rate)/NULLIF(sum(l.product_qty/u.factor*u2.factor),0.0))::decimal(16,2) as price_average
from purchase_order_line l
join purchase_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
LEFT JOIN ir_property ip ON (ip.name='standard_price' AND ip.res_id=CONCAT('product.template,',t.id) AND ip.company_id=s.company_id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
left join stock_picking_type spt on (spt.id=s.picking_type_id)
join currency_rate cr on (cr.currency_id = s.currency_id and
cr.date_start <= coalesce(s.date_order, now()) and
(cr.date_end is null or cr.date_end > coalesce(s.date_order, now())))
group by
s.company_id,
s.create_uid,
s.partner_id,
u.factor,
s.location_id,
l.price_unit,
s.date_approve,
l.date_planned,
l.product_uom,
s.minimum_planned_date,
s.pricelist_id,
s.validator,
s.dest_address_id,
l.product_id,
t.categ_id,
s.date_order,
l.state,
spt.warehouse_id,
u.uom_type,
u.category_id,
t.uom_id,
u.id,
u2.factor
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
2ndQuadrant/ansible | test/runner/lib/docker_util.py | 29 | 7331 | """Functions for accessing docker via the docker cli."""
from __future__ import absolute_import, print_function
import json
import os
import time
from lib.executor import (
SubprocessError,
)
from lib.util import (
ApplicationError,
run_command,
common_environment,
display,
find_executable,
)
from lib.config import (
EnvironmentConfig,
)
BUFFER_SIZE = 256 * 256
def docker_available():
"""
:rtype: bool
"""
return find_executable('docker', required=False)
def get_docker_container_id():
"""
:rtype: str | None
"""
path = '/proc/self/cgroup'
if not os.path.exists(path):
return None
with open(path) as cgroup_fd:
contents = cgroup_fd.read()
paths = [line.split(':')[2] for line in contents.splitlines()]
container_ids = set(path.split('/')[2] for path in paths if path.startswith('/docker/'))
if not container_ids:
return None
if len(container_ids) == 1:
return container_ids.pop()
raise ApplicationError('Found multiple container_id candidates: %s\n%s' % (sorted(container_ids), contents))
def get_docker_container_ip(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
:rtype: str
"""
results = docker_inspect(args, container_id)
ipaddress = results[0]['NetworkSettings']['IPAddress']
return ipaddress
def get_docker_networks(args, container_id):
"""
:param args: EnvironmentConfig
:param container_id: str
:rtype: list[str]
"""
results = docker_inspect(args, container_id)
networks = sorted(results[0]['NetworkSettings']['Networks'])
return networks
def docker_pull(args, image):
"""
:type args: EnvironmentConfig
:type image: str
"""
if ('@' in image or ':' in image) and docker_images(args, image):
display.info('Skipping docker pull of existing image with tag or digest: %s' % image, verbosity=2)
return
if not args.docker_pull:
display.warning('Skipping docker pull for "%s". Image may be out-of-date.' % image)
return
for _ in range(1, 10):
try:
docker_command(args, ['pull', image])
return
except SubprocessError:
display.warning('Failed to pull docker image "%s". Waiting a few seconds before trying again.' % image)
time.sleep(3)
raise ApplicationError('Failed to pull docker image "%s".' % image)
def docker_put(args, container_id, src, dst):
"""
:type args: EnvironmentConfig
:type container_id: str
:type src: str
:type dst: str
"""
# avoid 'docker cp' due to a bug which causes 'docker rm' to fail
with open(src, 'rb') as src_fd:
docker_exec(args, container_id, ['dd', 'of=%s' % dst, 'bs=%s' % BUFFER_SIZE],
options=['-i'], stdin=src_fd, capture=True)
def docker_get(args, container_id, src, dst):
"""
:type args: EnvironmentConfig
:type container_id: str
:type src: str
:type dst: str
"""
# avoid 'docker cp' due to a bug which causes 'docker rm' to fail
with open(dst, 'wb') as dst_fd:
docker_exec(args, container_id, ['dd', 'if=%s' % src, 'bs=%s' % BUFFER_SIZE],
options=['-i'], stdout=dst_fd, capture=True)
def docker_run(args, image, options, cmd=None):
"""
:type args: EnvironmentConfig
:type image: str
:type options: list[str] | None
:type cmd: list[str] | None
:rtype: str | None, str | None
"""
if not options:
options = []
if not cmd:
cmd = []
for _ in range(1, 3):
try:
return docker_command(args, ['run'] + options + [image] + cmd, capture=True)
except SubprocessError as ex:
display.error(ex)
display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image)
time.sleep(3)
raise ApplicationError('Failed to run docker image "%s".' % image)
def docker_images(args, image):
"""
:param args: CommonConfig
:param image: str
:rtype: list[dict[str, any]]
"""
stdout, _dummy = docker_command(args, ['images', image, '--format', '{{json .}}'], capture=True, always=True)
results = [json.loads(line) for line in stdout.splitlines()]
return results
def docker_rm(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
"""
docker_command(args, ['rm', '-f', container_id], capture=True)
def docker_inspect(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
:rtype: list[dict]
"""
if args.explain:
return []
try:
stdout, _ = docker_command(args, ['inspect', container_id], capture=True)
return json.loads(stdout)
except SubprocessError as ex:
try:
return json.loads(ex.stdout)
except Exception:
raise ex
def docker_network_disconnect(args, container_id, network):
"""
:param args: EnvironmentConfig
:param container_id: str
:param network: str
"""
docker_command(args, ['network', 'disconnect', network, container_id], capture=True)
def docker_network_inspect(args, network):
"""
:type args: EnvironmentConfig
:type network: str
:rtype: list[dict]
"""
if args.explain:
return []
try:
stdout, _ = docker_command(args, ['network', 'inspect', network], capture=True)
return json.loads(stdout)
except SubprocessError as ex:
try:
return json.loads(ex.stdout)
except Exception:
raise ex
def docker_exec(args, container_id, cmd, options=None, capture=False, stdin=None, stdout=None):
"""
:type args: EnvironmentConfig
:type container_id: str
:type cmd: list[str]
:type options: list[str] | None
:type capture: bool
:type stdin: file | None
:type stdout: file | None
:rtype: str | None, str | None
"""
if not options:
options = []
return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout)
def docker_info(args):
"""
:param args: CommonConfig
:rtype: dict[str, any]
"""
stdout, _dummy = docker_command(args, ['info', '--format', '{{json .}}'], capture=True, always=True)
return json.loads(stdout)
def docker_version(args):
"""
:param args: CommonConfig
:rtype: dict[str, any]
"""
stdout, _dummy = docker_command(args, ['version', '--format', '{{json .}}'], capture=True, always=True)
return json.loads(stdout)
def docker_command(args, cmd, capture=False, stdin=None, stdout=None, always=False):
"""
:type args: CommonConfig
:type cmd: list[str]
:type capture: bool
:type stdin: file | None
:type stdout: file | None
:type always: bool
:rtype: str | None, str | None
"""
env = docker_environment()
return run_command(args, ['docker'] + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, always=always)
def docker_environment():
"""
:rtype: dict[str, str]
"""
env = common_environment()
env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_')))
return env
| gpl-3.0 |
huongttlan/statsmodels | statsmodels/sandbox/examples/example_sysreg.py | 31 | 8043 | """Example: statsmodels.sandbox.sysreg
"""
#TODO: this is going to change significantly once we have a panel data structure
from statsmodels.compat.python import lmap, asbytes
import numpy as np
import statsmodels.api as sm
from statsmodels.sandbox.sysreg import *
#for Python 3 compatibility
# Seemingly Unrelated Regressions (SUR) Model
# This example uses the subset of the Grunfeld data in Greene's Econometric
# Analysis Chapter 14 (5th Edition)
grun_data = sm.datasets.grunfeld.load()
firms = ['General Motors', 'Chrysler', 'General Electric', 'Westinghouse',
'US Steel']
#for Python 3 compatibility
firms = lmap(asbytes, firms)
grun_exog = grun_data.exog
grun_endog = grun_data.endog
# Right now takes SUR takes a list of arrays
# The array alternates between the LHS of an equation and RHS side of an
# equation
# This is very likely to change
grun_sys = []
for i in firms:
index = grun_exog['firm'] == i
grun_sys.append(grun_endog[index])
exog = grun_exog[index][['value','capital']].view(float).reshape(-1,2)
exog = sm.add_constant(exog, prepend=True)
grun_sys.append(exog)
# Note that the results in Greene (5th edition) uses a slightly different
# version of the Grunfeld data. To reproduce Table 14.1 the following changes
# are necessary.
grun_sys[-2][5] = 261.6
grun_sys[-2][-3] = 645.2
grun_sys[-1][11,2] = 232.6
grun_mod = SUR(grun_sys)
grun_res = grun_mod.fit()
print("Results for the 2-step GLS")
print("Compare to Greene Table 14.1, 5th edition")
print(grun_res.params)
# or you can do an iterative fit
# you have to define a new model though this will be fixed
# TODO: note the above
print("Results for iterative GLS (equivalent to MLE)")
print("Compare to Greene Table 14.3")
#TODO: these are slightly off, could be a convergence issue
# or might use a different default DOF correction?
grun_imod = SUR(grun_sys)
grun_ires = grun_imod.fit(igls=True)
print(grun_ires.params)
# Two-Stage Least Squares for Simultaneous Equations
#TODO: we are going to need *some kind* of formula framework
# This follows the simple macroeconomic model given in
# Greene Example 15.1 (5th Edition)
# The data however is from statsmodels and is not the same as
# Greene's
# The model is
# consumption: c_{t} = \alpha_{0} + \alpha_{1}y_{t} + \alpha_{2}c_{t-1} + \epsilon_{t1}
# investment: i_{t} = \beta_{0} + \beta_{1}r_{t} + \beta_{2}\left(y_{t}-y_{t-1}\right) + \epsilon_{t2}
# demand: y_{t} = c_{t} + I_{t} + g_{t}
# See Greene's Econometric Analysis for more information
# Load the data
macrodata = sm.datasets.macrodata.load().data
# Not needed, but make sure the data is sorted
macrodata = np.sort(macrodata, order=['year','quarter'])
# Impose the demand restriction
y = macrodata['realcons'] + macrodata['realinv'] + macrodata['realgovt']
# Build the system
macro_sys = []
# First equation LHS
macro_sys.append(macrodata['realcons'][1:]) # leave off first date
# First equation RHS
exog1 = np.column_stack((y[1:],macrodata['realcons'][:-1]))
#TODO: it might be nice to have "lag" and "lead" functions
exog1 = sm.add_constant(exog1, prepend=True)
macro_sys.append(exog1)
# Second equation LHS
macro_sys.append(macrodata['realinv'][1:])
# Second equation RHS
exog2 = np.column_stack((macrodata['tbilrate'][1:], np.diff(y)))
exog2 = sm.add_constant(exog2, prepend=True)
macro_sys.append(exog2)
# We need to say that y_{t} in the RHS of equation 1 is an endogenous regressor
# We will call these independent endogenous variables
# Right now, we use a dictionary to declare these
indep_endog = {0 : [1]}
# We also need to create a design of our instruments
# This will be done automatically in the future
instruments = np.column_stack((macrodata[['realgovt',
'tbilrate']][1:].view(float).reshape(-1,2),macrodata['realcons'][:-1],
y[:-1]))
instruments = sm.add_constant(instruments, prepend=True)
macro_mod = Sem2SLS(macro_sys, indep_endog=indep_endog, instruments=instruments)
# Right now this only returns parameters
macro_params = macro_mod.fit()
print("The parameters for the first equation are correct.")
print("The parameters for the second equation are not.")
print(macro_params)
#TODO: Note that the above is incorrect, because we have no way of telling the
# model that *part* of the y_{t} - y_{t-1} is an independent endogenous variable
# To correct for this we would have to do the following
y_instrumented = macro_mod.wexog[0][:,1]
whitened_ydiff = y_instrumented - y[:-1]
wexog = np.column_stack((macrodata['tbilrate'][1:],whitened_ydiff))
wexog = sm.add_constant(wexog, prepend=True)
correct_params = sm.GLS(macrodata['realinv'][1:], wexog).fit().params
print("If we correctly instrument everything, then these are the parameters")
print("for the second equation")
print(correct_params)
print("Compare to output of R script statsmodels/sandbox/tests/macrodata.s")
print('\nUsing IV2SLS')
from statsmodels.sandbox.regression.gmm import IV2SLS
miv = IV2SLS(macro_sys[0], macro_sys[1], instruments)
resiv = miv.fit()
print("equation 1")
print(resiv.params)
miv2 = IV2SLS(macro_sys[2], macro_sys[3], instruments)
resiv2 = miv2.fit()
print("equation 2")
print(resiv2.params)
### Below is the same example using Greene's data ###
run_greene = 0
if run_greene:
try:
data3 = np.genfromtxt('/home/skipper/school/MetricsII/Greene \
TableF5-1.txt', names=True)
except:
raise ValueError("Based on Greene TableF5-1. You should download it "
"from his web site and edit this script accordingly.")
# Example 15.1 in Greene 5th Edition
# c_t = constant + y_t + c_t-1
# i_t = constant + r_t + (y_t - y_t-1)
# y_t = c_t + i_t + g_t
sys3 = []
sys3.append(data3['realcons'][1:]) # have to leave off a beg. date
# impose 3rd equation on y
y = data3['realcons'] + data3['realinvs'] + data3['realgovt']
exog1 = np.column_stack((y[1:],data3['realcons'][:-1]))
exog1 = sm.add_constant(exog1, prepend=False)
sys3.append(exog1)
sys3.append(data3['realinvs'][1:])
exog2 = np.column_stack((data3['tbilrate'][1:],
np.diff(y)))
# realint is missing 1st observation
exog2 = sm.add_constant(exog2, prepend=False)
sys3.append(exog2)
indep_endog = {0 : [0]} # need to be able to say that y_1 is an instrument..
instruments = np.column_stack((data3[['realgovt',
'tbilrate']][1:].view(float).reshape(-1,2),data3['realcons'][:-1],
y[:-1]))
instruments = sm.add_constant(instruments, prepend=False)
sem_mod = Sem2SLS(sys3, indep_endog = indep_endog, instruments=instruments)
sem_params = sem_mod.fit() # first equation is right, but not second?
# should y_t in the diff be instrumented?
# how would R know this in the script?
# well, let's check...
y_instr = sem_mod.wexog[0][:,0]
wyd = y_instr - y[:-1]
wexog = np.column_stack((data3['tbilrate'][1:],wyd))
wexog = sm.add_constant(wexog, prepend=False)
params = sm.GLS(data3['realinvs'][1:], wexog).fit().params
print("These are the simultaneous equation estimates for Greene's \
example 13-1 (Also application 13-1 in 6th edition.")
print(sem_params)
print("The first set of parameters is correct. The second set is not.")
print("Compare to the solution manual at \
http://pages.stern.nyu.edu/~wgreene/Text/econometricanalysis.htm")
print("The reason is the restriction on (y_t - y_1)")
print("Compare to R script GreeneEx15_1.s")
print("Somehow R carries y.1 in yd to know that it needs to be \
instrumented")
print("If we replace our estimate with the instrumented one")
print(params)
print("We get the right estimate")
print("Without a formula framework we have to be able to do restrictions.")
# yep!, but how in the world does R know this when we just fed it yd??
# must be implicit in the formula framework...
# we are going to need to keep the two equations separate and use
# a restrictions matrix. Ugh, is a formula framework really, necessary to get
# around this?
| bsd-3-clause |
ossdemura/django-miniblog | Lib/site-packages/pip/_vendor/html5lib/treeadapters/sax.py | 1835 | 1661 | from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
| mit |
freakynit/kaggle-ndsb | configurations/bagging_15_convroll4_big_weightdecay_resume.py | 6 | 5502 | import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
import data
import load
import nn_plankton
import dihedral
import dihedral_fast
import tmp_dnn
import tta
resume_path = "metadata/bagging_15_convroll4_big_weightdecay-schaap-20150306-105118.pkl"
validation_split_path = "splits/bagging_split_15.pkl"
patch_size = (95, 95)
augmentation_params = {
'zoom_range': (1 / 1.6, 1.6),
'rotation_range': (0, 360),
'shear_range': (-20, 20),
'translation_range': (-10, 10),
'do_flip': True,
'allow_stretch': 1.3,
}
batch_size = 128 // 4
chunk_size = 32768 // 4
num_chunks_train = 840
momentum = 0.9
learning_rate_schedule = {
0: 0.003,
700: 0.0003,
800: 0.00003,
}
validate_every = 20
save_every = 20
def estimate_scale(img):
return np.maximum(img.shape[0], img.shape[1]) / 85.0
# augmentation_transforms_test = []
# for flip in [True, False]:
# for zoom in [1/1.3, 1/1.2, 1/1.1, 1.0, 1.1, 1.2, 1.3]:
# for rot in np.linspace(0.0, 360.0, 5, endpoint=False):
# tf = data.build_augmentation_transform(zoom=(zoom, zoom), rotation=rot, flip=flip)
# augmentation_transforms_test.append(tf)
augmentation_transforms_test = tta.build_quasirandom_transforms(70, **{
'zoom_range': (1 / 1.4, 1.4),
'rotation_range': (0, 360),
'shear_range': (-10, 10),
'translation_range': (-8, 8),
'do_flip': True,
'allow_stretch': 1.2,
})
data_loader = load.ZmuvRescaledDataLoader(estimate_scale=estimate_scale, num_chunks_train=num_chunks_train,
patch_size=patch_size, chunk_size=chunk_size, augmentation_params=augmentation_params,
augmentation_transforms_test=augmentation_transforms_test, validation_split_path=validation_split_path)
# Conv2DLayer = nn.layers.cuda_convnet.Conv2DCCLayer
# MaxPool2DLayer = nn.layers.cuda_convnet.MaxPool2DCCLayer
Conv2DLayer = tmp_dnn.Conv2DDNNLayer
MaxPool2DLayer = tmp_dnn.MaxPool2DDNNLayer
def build_model():
l0 = nn.layers.InputLayer((batch_size, 1, patch_size[0], patch_size[1]))
l0c = dihedral.CyclicSliceLayer(l0)
l1a = Conv2DLayer(l0c, num_filters=32, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l1b = Conv2DLayer(l1a, num_filters=16, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l1 = MaxPool2DLayer(l1b, ds=(3, 3), strides=(2, 2))
l1r = dihedral_fast.CyclicConvRollLayer(l1)
l2a = Conv2DLayer(l1r, num_filters=64, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l2b = Conv2DLayer(l2a, num_filters=32, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l2 = MaxPool2DLayer(l2b, ds=(3, 3), strides=(2, 2))
l2r = dihedral_fast.CyclicConvRollLayer(l2)
l3a = Conv2DLayer(l2r, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3b = Conv2DLayer(l3a, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3c = Conv2DLayer(l3b, num_filters=64, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3 = MaxPool2DLayer(l3c, ds=(3, 3), strides=(2, 2))
l3r = dihedral_fast.CyclicConvRollLayer(l3)
l4a = Conv2DLayer(l3r, num_filters=256, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4b = Conv2DLayer(l4a, num_filters=256, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4c = Conv2DLayer(l4b, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4 = MaxPool2DLayer(l4c, ds=(3, 3), strides=(2, 2))
l4r = dihedral_fast.CyclicConvRollLayer(l4)
l4f = nn.layers.flatten(l4r)
l5 = nn.layers.DenseLayer(nn.layers.dropout(l4f, p=0.5), num_units=512, W=nn_plankton.Orthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu)
l5r = dihedral_fast.CyclicRollLayer(l5)
l6 = nn.layers.DenseLayer(nn.layers.dropout(l5r, p=0.5), num_units=512, W=nn_plankton.Orthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu)
l6m = dihedral.CyclicPoolLayer(l6, pool_function=nn_plankton.rms)
l7 = nn.layers.DenseLayer(nn.layers.dropout(l6m, p=0.5), num_units=data.num_classes, nonlinearity=T.nnet.softmax, W=nn_plankton.Orthogonal(1.0))
return [l0], l7
def build_objective(l_ins, l_out):
lambda_reg = 0.0005
params = nn.layers.get_all_non_bias_params(l_out)
reg_term = sum(T.sum(p**2) for p in params)
def loss(y, t):
return nn_plankton.log_loss(y, t) + lambda_reg * reg_term
return nn.objectives.Objective(l_out, loss_function=loss)
| mit |
stefan-jonasson/home-assistant | homeassistant/components/telegram_bot/__init__.py | 2 | 26538 | """
Component to send and receive Telegram messages.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/telegram_bot/
"""
import asyncio
import io
from functools import partial
import logging
import os
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA, ATTR_MESSAGE, ATTR_TITLE)
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (
ATTR_COMMAND, ATTR_LATITUDE, ATTR_LONGITUDE, CONF_API_KEY,
CONF_PLATFORM, CONF_TIMEOUT, HTTP_DIGEST_AUTHENTICATION)
import homeassistant.helpers.config_validation as cv
from homeassistant.exceptions import TemplateError
from homeassistant.setup import async_prepare_setup_platform
REQUIREMENTS = ['python-telegram-bot==8.1.1']
_LOGGER = logging.getLogger(__name__)
ATTR_ARGS = 'args'
ATTR_AUTHENTICATION = 'authentication'
ATTR_CALLBACK_QUERY = 'callback_query'
ATTR_CALLBACK_QUERY_ID = 'callback_query_id'
ATTR_CAPTION = 'caption'
ATTR_CHAT_ID = 'chat_id'
ATTR_CHAT_INSTANCE = 'chat_instance'
ATTR_DISABLE_NOTIF = 'disable_notification'
ATTR_DISABLE_WEB_PREV = 'disable_web_page_preview'
ATTR_EDITED_MSG = 'edited_message'
ATTR_FILE = 'file'
ATTR_FROM_FIRST = 'from_first'
ATTR_FROM_LAST = 'from_last'
ATTR_KEYBOARD = 'keyboard'
ATTR_KEYBOARD_INLINE = 'inline_keyboard'
ATTR_MESSAGEID = 'message_id'
ATTR_MSG = 'message'
ATTR_MSGID = 'id'
ATTR_PARSER = 'parse_mode'
ATTR_PASSWORD = 'password'
ATTR_REPLY_TO_MSGID = 'reply_to_message_id'
ATTR_REPLYMARKUP = 'reply_markup'
ATTR_SHOW_ALERT = 'show_alert'
ATTR_TARGET = 'target'
ATTR_TEXT = 'text'
ATTR_URL = 'url'
ATTR_USER_ID = 'user_id'
ATTR_USERNAME = 'username'
CONF_ALLOWED_CHAT_IDS = 'allowed_chat_ids'
CONF_PROXY_URL = 'proxy_url'
CONF_PROXY_PARAMS = 'proxy_params'
DOMAIN = 'telegram_bot'
SERVICE_SEND_MESSAGE = 'send_message'
SERVICE_SEND_PHOTO = 'send_photo'
SERVICE_SEND_DOCUMENT = 'send_document'
SERVICE_SEND_LOCATION = 'send_location'
SERVICE_EDIT_MESSAGE = 'edit_message'
SERVICE_EDIT_CAPTION = 'edit_caption'
SERVICE_EDIT_REPLYMARKUP = 'edit_replymarkup'
SERVICE_ANSWER_CALLBACK_QUERY = 'answer_callback_query'
SERVICE_DELETE_MESSAGE = 'delete_message'
EVENT_TELEGRAM_CALLBACK = 'telegram_callback'
EVENT_TELEGRAM_COMMAND = 'telegram_command'
EVENT_TELEGRAM_TEXT = 'telegram_text'
PARSER_HTML = 'html'
PARSER_MD = 'markdown'
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
vol.Required(CONF_PLATFORM): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_ALLOWED_CHAT_IDS):
vol.All(cv.ensure_list, [vol.Coerce(int)]),
vol.Optional(ATTR_PARSER, default=PARSER_MD): cv.string,
vol.Optional(CONF_PROXY_URL): cv.string,
vol.Optional(CONF_PROXY_PARAMS): dict,
})
BASE_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [vol.Coerce(int)]),
vol.Optional(ATTR_PARSER): cv.string,
vol.Optional(ATTR_DISABLE_NOTIF): cv.boolean,
vol.Optional(ATTR_DISABLE_WEB_PREV): cv.boolean,
vol.Optional(ATTR_KEYBOARD): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_KEYBOARD_INLINE): cv.ensure_list,
}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA_SEND_MESSAGE = BASE_SERVICE_SCHEMA.extend({
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_TITLE): cv.template,
})
SERVICE_SCHEMA_SEND_FILE = BASE_SERVICE_SCHEMA.extend({
vol.Optional(ATTR_URL): cv.template,
vol.Optional(ATTR_FILE): cv.template,
vol.Optional(ATTR_CAPTION): cv.template,
vol.Optional(ATTR_USERNAME): cv.string,
vol.Optional(ATTR_PASSWORD): cv.string,
vol.Optional(ATTR_AUTHENTICATION): cv.string,
})
SERVICE_SCHEMA_SEND_LOCATION = BASE_SERVICE_SCHEMA.extend({
vol.Required(ATTR_LONGITUDE): cv.template,
vol.Required(ATTR_LATITUDE): cv.template,
})
SERVICE_SCHEMA_EDIT_MESSAGE = SERVICE_SCHEMA_SEND_MESSAGE.extend({
vol.Required(ATTR_MESSAGEID):
vol.Any(cv.positive_int, vol.All(cv.string, 'last')),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
})
SERVICE_SCHEMA_EDIT_CAPTION = vol.Schema({
vol.Required(ATTR_MESSAGEID):
vol.Any(cv.positive_int, vol.All(cv.string, 'last')),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_CAPTION): cv.template,
vol.Optional(ATTR_KEYBOARD_INLINE): cv.ensure_list,
}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA_EDIT_REPLYMARKUP = vol.Schema({
vol.Required(ATTR_MESSAGEID):
vol.Any(cv.positive_int, vol.All(cv.string, 'last')),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_KEYBOARD_INLINE): cv.ensure_list,
}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA_ANSWER_CALLBACK_QUERY = vol.Schema({
vol.Required(ATTR_MESSAGE): cv.template,
vol.Required(ATTR_CALLBACK_QUERY_ID): vol.Coerce(int),
vol.Optional(ATTR_SHOW_ALERT): cv.boolean,
}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA_DELETE_MESSAGE = vol.Schema({
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_MESSAGEID):
vol.Any(cv.positive_int, vol.All(cv.string, 'last')),
}, extra=vol.ALLOW_EXTRA)
SERVICE_MAP = {
SERVICE_SEND_MESSAGE: SERVICE_SCHEMA_SEND_MESSAGE,
SERVICE_SEND_PHOTO: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_DOCUMENT: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_LOCATION: SERVICE_SCHEMA_SEND_LOCATION,
SERVICE_EDIT_MESSAGE: SERVICE_SCHEMA_EDIT_MESSAGE,
SERVICE_EDIT_CAPTION: SERVICE_SCHEMA_EDIT_CAPTION,
SERVICE_EDIT_REPLYMARKUP: SERVICE_SCHEMA_EDIT_REPLYMARKUP,
SERVICE_ANSWER_CALLBACK_QUERY: SERVICE_SCHEMA_ANSWER_CALLBACK_QUERY,
SERVICE_DELETE_MESSAGE: SERVICE_SCHEMA_DELETE_MESSAGE,
}
def load_data(hass, url=None, filepath=None, username=None, password=None,
authentication=None, num_retries=5):
"""Load photo/document into ByteIO/File container from a source."""
try:
if url is not None:
# Load photo from URL
params = {"timeout": 15}
if username is not None and password is not None:
if authentication == HTTP_DIGEST_AUTHENTICATION:
params["auth"] = HTTPDigestAuth(username, password)
else:
params["auth"] = HTTPBasicAuth(username, password)
retry_num = 0
while retry_num < num_retries:
req = requests.get(url, **params)
if not req.ok:
_LOGGER.warning("Status code %s (retry #%s) loading %s.",
req.status_code, retry_num + 1, url)
else:
data = io.BytesIO(req.content)
if data.read():
data.seek(0)
data.name = url
return data
_LOGGER.warning("Empty data (retry #%s) in %s).",
retry_num + 1, url)
retry_num += 1
_LOGGER.warning("Can't load photo in %s after %s retries.",
url, retry_num)
elif filepath is not None:
if hass.config.is_allowed_path(filepath):
return open(filepath, "rb")
_LOGGER.warning("'%s' are not secure to load data from!", filepath)
else:
_LOGGER.warning("Can't load photo. No photo found in params!")
except (OSError, TypeError) as error:
_LOGGER.error("Can't load photo into ByteIO: %s", error)
return None
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the Telegram bot component."""
if not config[DOMAIN]:
return False
p_config = config[DOMAIN][0]
descriptions = yield from hass.async_add_job(
load_yaml_config_file,
os.path.join(os.path.dirname(__file__), 'services.yaml'))
p_type = p_config.get(CONF_PLATFORM)
platform = yield from async_prepare_setup_platform(
hass, config, DOMAIN, p_type)
if platform is None:
return
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
try:
receiver_service = yield from \
platform.async_setup_platform(hass, p_config)
if receiver_service is False:
_LOGGER.error(
"Failed to initialize Telegram bot %s", p_type)
return False
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform %s", p_type)
return False
notify_service = TelegramNotificationService(
hass,
p_config.get(CONF_API_KEY),
p_config.get(CONF_ALLOWED_CHAT_IDS),
p_config.get(ATTR_PARSER),
p_config.get(CONF_PROXY_URL),
p_config.get(CONF_PROXY_PARAMS)
)
@asyncio.coroutine
def async_send_telegram_message(service):
"""Handle sending Telegram Bot message service calls."""
def _render_template_attr(data, attribute):
attribute_templ = data.get(attribute)
if attribute_templ:
if any([isinstance(attribute_templ, vtype)
for vtype in [float, int, str]]):
data[attribute] = attribute_templ
else:
attribute_templ.hass = hass
try:
data[attribute] = attribute_templ.async_render()
except TemplateError as exc:
_LOGGER.error(
"TemplateError in %s: %s -> %s",
attribute, attribute_templ.template, exc)
data[attribute] = attribute_templ.template
msgtype = service.service
kwargs = dict(service.data)
for attribute in [ATTR_MESSAGE, ATTR_TITLE, ATTR_URL, ATTR_FILE,
ATTR_CAPTION, ATTR_LONGITUDE, ATTR_LATITUDE]:
_render_template_attr(kwargs, attribute)
_LOGGER.debug("New telegram message %s: %s", msgtype, kwargs)
if msgtype == SERVICE_SEND_MESSAGE:
yield from hass.async_add_job(
partial(notify_service.send_message, **kwargs))
elif msgtype == SERVICE_SEND_PHOTO:
yield from hass.async_add_job(
partial(notify_service.send_file, True, **kwargs))
elif msgtype == SERVICE_SEND_DOCUMENT:
yield from hass.async_add_job(
partial(notify_service.send_file, False, **kwargs))
elif msgtype == SERVICE_SEND_LOCATION:
yield from hass.async_add_job(
partial(notify_service.send_location, **kwargs))
elif msgtype == SERVICE_ANSWER_CALLBACK_QUERY:
yield from hass.async_add_job(
partial(notify_service.answer_callback_query, **kwargs))
elif msgtype == SERVICE_DELETE_MESSAGE:
yield from hass.async_add_job(
partial(notify_service.delete_message, **kwargs))
else:
yield from hass.async_add_job(
partial(notify_service.edit_message, msgtype, **kwargs))
# Register notification services
for service_notif, schema in SERVICE_MAP.items():
hass.services.async_register(
DOMAIN, service_notif, async_send_telegram_message,
descriptions.get(service_notif), schema=schema)
return True
class TelegramNotificationService:
"""Implement the notification services for the Telegram Bot domain."""
def __init__(self, hass, api_key, allowed_chat_ids, parser,
proxy_url=None, proxy_params=None):
"""Initialize the service."""
from telegram import Bot
from telegram.parsemode import ParseMode
from telegram.utils.request import Request
self.allowed_chat_ids = allowed_chat_ids
self._default_user = self.allowed_chat_ids[0]
self._last_message_id = {user: None for user in self.allowed_chat_ids}
self._parsers = {PARSER_HTML: ParseMode.HTML,
PARSER_MD: ParseMode.MARKDOWN}
self._parse_mode = self._parsers.get(parser)
request = None
if proxy_url is not None:
request = Request(proxy_url=proxy_url,
urllib3_proxy_kwargs=proxy_params)
self.bot = Bot(token=api_key, request=request)
self.hass = hass
def _get_msg_ids(self, msg_data, chat_id):
"""Get the message id to edit.
This can be one of (message_id, inline_message_id) from a msg dict,
returning a tuple.
**You can use 'last' as message_id** to edit
the last sended message in the chat_id.
"""
message_id = inline_message_id = None
if ATTR_MESSAGEID in msg_data:
message_id = msg_data[ATTR_MESSAGEID]
if (isinstance(message_id, str) and (message_id == 'last') and
(self._last_message_id[chat_id] is not None)):
message_id = self._last_message_id[chat_id]
else:
inline_message_id = msg_data['inline_message_id']
return message_id, inline_message_id
def _get_target_chat_ids(self, target):
"""Validate chat_id targets or return default target (first).
:param target: optional list of integers ([12234, -12345])
:return list of chat_id targets (integers)
"""
if target is not None:
if isinstance(target, int):
target = [target]
chat_ids = [t for t in target if t in self.allowed_chat_ids]
if chat_ids:
return chat_ids
_LOGGER.warning("Unallowed targets: %s, using default: %s",
target, self._default_user)
return [self._default_user]
def _get_msg_kwargs(self, data):
"""Get parameters in message data kwargs."""
def _make_row_inline_keyboard(row_keyboard):
"""Make a list of InlineKeyboardButtons.
It can accept:
- a list of tuples like:
`[(text_b1, data_callback_b1),
(text_b2, data_callback_b2), ...]
- a string like: `/cmd1, /cmd2, /cmd3`
- or a string like: `text_b1:/cmd1, text_b2:/cmd2`
"""
from telegram import InlineKeyboardButton
buttons = []
if isinstance(row_keyboard, str):
for key in row_keyboard.split(","):
if ':/' in key:
# commands like: 'Label:/cmd' become ('Label', '/cmd')
label = key.split(':/')[0]
command = key[len(label) + 1:]
buttons.append(
InlineKeyboardButton(label, callback_data=command))
else:
# commands like: '/cmd' become ('CMD', '/cmd')
label = key.strip()[1:].upper()
buttons.append(
InlineKeyboardButton(label, callback_data=key))
elif isinstance(row_keyboard, list):
for entry in row_keyboard:
text_btn, data_btn = entry
buttons.append(
InlineKeyboardButton(text_btn, callback_data=data_btn))
else:
raise ValueError(str(row_keyboard))
return buttons
# Defaults
params = {
ATTR_PARSER: self._parse_mode,
ATTR_DISABLE_NOTIF: False,
ATTR_DISABLE_WEB_PREV: None,
ATTR_REPLY_TO_MSGID: None,
ATTR_REPLYMARKUP: None,
CONF_TIMEOUT: None
}
if data is not None:
if ATTR_PARSER in data:
params[ATTR_PARSER] = self._parsers.get(
data[ATTR_PARSER], self._parse_mode)
if CONF_TIMEOUT in data:
params[CONF_TIMEOUT] = data[CONF_TIMEOUT]
if ATTR_DISABLE_NOTIF in data:
params[ATTR_DISABLE_NOTIF] = data[ATTR_DISABLE_NOTIF]
if ATTR_DISABLE_WEB_PREV in data:
params[ATTR_DISABLE_WEB_PREV] = data[ATTR_DISABLE_WEB_PREV]
if ATTR_REPLY_TO_MSGID in data:
params[ATTR_REPLY_TO_MSGID] = data[ATTR_REPLY_TO_MSGID]
# Keyboards:
if ATTR_KEYBOARD in data:
from telegram import ReplyKeyboardMarkup
keys = data.get(ATTR_KEYBOARD)
keys = keys if isinstance(keys, list) else [keys]
params[ATTR_REPLYMARKUP] = ReplyKeyboardMarkup(
[[key.strip() for key in row.split(",")] for row in keys])
elif ATTR_KEYBOARD_INLINE in data:
from telegram import InlineKeyboardMarkup
keys = data.get(ATTR_KEYBOARD_INLINE)
keys = keys if isinstance(keys, list) else [keys]
params[ATTR_REPLYMARKUP] = InlineKeyboardMarkup(
[_make_row_inline_keyboard(row) for row in keys])
return params
def _send_msg(self, func_send, msg_error, *args_msg, **kwargs_msg):
"""Send one message."""
from telegram.error import TelegramError
try:
out = func_send(*args_msg, **kwargs_msg)
if not isinstance(out, bool) and hasattr(out, ATTR_MESSAGEID):
chat_id = out.chat_id
self._last_message_id[chat_id] = out[ATTR_MESSAGEID]
_LOGGER.debug("Last message ID: %s (from chat_id %s)",
self._last_message_id, chat_id)
elif not isinstance(out, bool):
_LOGGER.warning("Update last message: out_type:%s, out=%s",
type(out), out)
return out
except TelegramError as exc:
_LOGGER.error("%s: %s. Args: %s, kwargs: %s",
msg_error, exc, args_msg, kwargs_msg)
def send_message(self, message="", target=None, **kwargs):
"""Send a message to one or multiple pre-allowed chat IDs."""
title = kwargs.get(ATTR_TITLE)
text = '{}\n{}'.format(title, message) if title else message
params = self._get_msg_kwargs(kwargs)
for chat_id in self._get_target_chat_ids(target):
_LOGGER.debug("Send message in chat ID %s with params: %s",
chat_id, params)
self._send_msg(self.bot.sendMessage,
"Error sending message",
chat_id, text, **params)
def delete_message(self, chat_id=None, **kwargs):
"""Delete a previously sent message."""
chat_id = self._get_target_chat_ids(chat_id)[0]
message_id, _ = self._get_msg_ids(kwargs, chat_id)
_LOGGER.debug("Delete message %s in chat ID %s", message_id, chat_id)
deleted = self._send_msg(self.bot.deleteMessage,
"Error deleting message",
chat_id, message_id)
# reduce message_id anyway:
if self._last_message_id[chat_id] is not None:
# change last msg_id for deque(n_msgs)?
self._last_message_id[chat_id] -= 1
return deleted
def edit_message(self, type_edit, chat_id=None, **kwargs):
"""Edit a previously sent message."""
chat_id = self._get_target_chat_ids(chat_id)[0]
message_id, inline_message_id = self._get_msg_ids(kwargs, chat_id)
params = self._get_msg_kwargs(kwargs)
_LOGGER.debug("Edit message %s in chat ID %s with params: %s",
message_id or inline_message_id, chat_id, params)
if type_edit == SERVICE_EDIT_MESSAGE:
message = kwargs.get(ATTR_MESSAGE)
title = kwargs.get(ATTR_TITLE)
text = '{}\n{}'.format(title, message) if title else message
_LOGGER.debug("Editing message with ID %s.",
message_id or inline_message_id)
return self._send_msg(self.bot.editMessageText,
"Error editing text message",
text, chat_id=chat_id, message_id=message_id,
inline_message_id=inline_message_id,
**params)
elif type_edit == SERVICE_EDIT_CAPTION:
func_send = self.bot.editMessageCaption
params[ATTR_CAPTION] = kwargs.get(ATTR_CAPTION)
else:
func_send = self.bot.editMessageReplyMarkup
return self._send_msg(func_send,
"Error editing message attributes",
chat_id=chat_id, message_id=message_id,
inline_message_id=inline_message_id,
**params)
def answer_callback_query(self, message, callback_query_id,
show_alert=False, **kwargs):
"""Answer a callback originated with a press in an inline keyboard."""
params = self._get_msg_kwargs(kwargs)
_LOGGER.debug("Answer callback query with callback ID %s: %s, "
"alert: %s.", callback_query_id, message, show_alert)
self._send_msg(self.bot.answerCallbackQuery,
"Error sending answer callback query",
callback_query_id,
text=message, show_alert=show_alert, **params)
def send_file(self, is_photo=True, target=None, **kwargs):
"""Send a photo or a document."""
params = self._get_msg_kwargs(kwargs)
caption = kwargs.get(ATTR_CAPTION)
func_send = self.bot.sendPhoto if is_photo else self.bot.sendDocument
file_content = load_data(
self.hass,
url=kwargs.get(ATTR_URL),
filepath=kwargs.get(ATTR_FILE),
username=kwargs.get(ATTR_USERNAME),
password=kwargs.get(ATTR_PASSWORD),
authentication=kwargs.get(ATTR_AUTHENTICATION),
)
if file_content:
for chat_id in self._get_target_chat_ids(target):
_LOGGER.debug("Send file to chat ID %s. Caption: %s.",
chat_id, caption)
self._send_msg(func_send, "Error sending file",
chat_id, file_content,
caption=caption, **params)
file_content.seek(0)
else:
_LOGGER.error("Can't send file with kwargs: %s", kwargs)
def send_location(self, latitude, longitude, target=None, **kwargs):
"""Send a location."""
latitude = float(latitude)
longitude = float(longitude)
params = self._get_msg_kwargs(kwargs)
for chat_id in self._get_target_chat_ids(target):
_LOGGER.debug("Send location %s/%s to chat ID %s.",
latitude, longitude, chat_id)
self._send_msg(self.bot.sendLocation,
"Error sending location",
chat_id=chat_id,
latitude=latitude, longitude=longitude, **params)
class BaseTelegramBotEntity:
"""The base class for the telegram bot."""
def __init__(self, hass, allowed_chat_ids):
"""Initialize the bot base class."""
self.allowed_chat_ids = allowed_chat_ids
self.hass = hass
def _get_message_data(self, msg_data):
"""Return boolean msg_data_is_ok and dict msg_data."""
if not msg_data:
return False, None
bad_fields = ('text' not in msg_data and
'data' not in msg_data and
'chat' not in msg_data)
if bad_fields or 'from' not in msg_data:
# Message is not correct.
_LOGGER.error("Incoming message does not have required data (%s)",
msg_data)
return False, None
if (msg_data['from'].get('id') not in self.allowed_chat_ids or
('chat' in msg_data and
msg_data['chat'].get('id') not in self.allowed_chat_ids)):
# Origin is not allowed.
_LOGGER.error("Incoming message is not allowed (%s)", msg_data)
return True, None
data = {
ATTR_USER_ID: msg_data['from']['id'],
ATTR_FROM_FIRST: msg_data['from']['first_name']
}
if 'last_name' in msg_data['from']:
data[ATTR_FROM_LAST] = msg_data['from']['last_name']
if 'chat' in msg_data:
data[ATTR_CHAT_ID] = msg_data['chat']['id']
elif ATTR_MESSAGE in msg_data and 'chat' in msg_data[ATTR_MESSAGE]:
data[ATTR_CHAT_ID] = msg_data[ATTR_MESSAGE]['chat']['id']
return True, data
def process_message(self, data):
"""Check for basic message rules and fire an event if message is ok."""
if ATTR_MSG in data or ATTR_EDITED_MSG in data:
event = EVENT_TELEGRAM_COMMAND
if ATTR_MSG in data:
data = data.get(ATTR_MSG)
else:
data = data.get(ATTR_EDITED_MSG)
message_ok, event_data = self._get_message_data(data)
if event_data is None:
return message_ok
if 'text' in data:
if data['text'][0] == '/':
pieces = data['text'].split(' ')
event_data[ATTR_COMMAND] = pieces[0]
event_data[ATTR_ARGS] = pieces[1:]
else:
event_data[ATTR_TEXT] = data['text']
event = EVENT_TELEGRAM_TEXT
else:
_LOGGER.warning("Message without text data received: %s", data)
event_data[ATTR_TEXT] = str(data)
event = EVENT_TELEGRAM_TEXT
self.hass.bus.async_fire(event, event_data)
return True
elif ATTR_CALLBACK_QUERY in data:
event = EVENT_TELEGRAM_CALLBACK
data = data.get(ATTR_CALLBACK_QUERY)
message_ok, event_data = self._get_message_data(data)
if event_data is None:
return message_ok
event_data[ATTR_DATA] = data[ATTR_DATA]
event_data[ATTR_MSG] = data[ATTR_MSG]
event_data[ATTR_CHAT_INSTANCE] = data[ATTR_CHAT_INSTANCE]
event_data[ATTR_MSGID] = data[ATTR_MSGID]
self.hass.bus.async_fire(event, event_data)
return True
else:
_LOGGER.warning("Message with unknown data received: %s", data)
return True
| mit |
grangier/django-11599 | django/utils/version.py | 320 | 1361 | import django
import os.path
import re
def get_svn_revision(path=None):
"""
Returns the SVN revision in the form SVN-XXXX,
where XXXX is the revision number.
Returns SVN-unknown if anything goes wrong, such as an unexpected
format of internal SVN files.
If path is provided, it should be a directory whose SVN info you want to
inspect. If it's not provided, this will use the root django/ package
directory.
"""
rev = None
if path is None:
path = django.__path__[0]
entries_path = '%s/.svn/entries' % path
try:
entries = open(entries_path, 'r').read()
except IOError:
pass
else:
# Versions >= 7 of the entries file are flat text. The first line is
# the version number. The next set of digits after 'dir' is the revision.
if re.match('(\d+)', entries):
rev_match = re.search('\d+\s+dir\s+(\d+)', entries)
if rev_match:
rev = rev_match.groups()[0]
# Older XML versions of the file specify revision as an attribute of
# the first entries node.
else:
from xml.dom import minidom
dom = minidom.parse(entries_path)
rev = dom.getElementsByTagName('entry')[0].getAttribute('revision')
if rev:
return u'SVN-%s' % rev
return u'SVN-unknown'
| bsd-3-clause |
zaxtax/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 47 | 2486 | # Author: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def assert_csr_equal(X, Y):
X.eliminate_zeros()
Y.eliminate_zeros()
assert_equal(X.shape[0], Y.shape[0])
assert_equal(X.shape[1], Y.shape[1])
assert_array_equal(X.data, Y.data)
assert_array_equal(X.indices, Y.indices)
assert_array_equal(X.indptr, Y.indptr)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
CydarLtd/ansible | lib/ansible/module_utils/connection.py | 61 | 2929 | #
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import socket
import struct
import signal
from ansible.module_utils.basic import get_exception
from ansible.module_utils._text import to_bytes, to_native
def send_data(s, data):
packed_len = struct.pack('!Q',len(data))
return s.sendall(packed_len + data)
def recv_data(s):
header_len = 8 # size of a packed unsigned long long
data = to_bytes("")
while len(data) < header_len:
d = s.recv(header_len - len(data))
if not d:
return None
data += d
data_len = struct.unpack('!Q',data[:header_len])[0]
data = data[header_len:]
while len(data) < data_len:
d = s.recv(data_len - len(data))
if not d:
return None
data += d
return data
def exec_command(module, command):
try:
sf = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sf.connect(module._socket_path)
data = "EXEC: %s" % command
send_data(sf, to_bytes(data.strip()))
rc = int(recv_data(sf), 10)
stdout = recv_data(sf)
stderr = recv_data(sf)
except socket.error:
exc = get_exception()
sf.close()
module.fail_json(msg='unable to connect to socket', err=str(exc))
sf.close()
return (rc, to_native(stdout), to_native(stderr))
| gpl-3.0 |
SonarOpenCommunity/sonar-cxx | cxx-sensors/src/tools/clangsa_createrules.py | 1 | 6838 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SonarQube C++ Community Plugin (cxx plugin)
# Copyright (C) 2010-2021 SonarOpenCommunity
# http://github.com/SonarOpenCommunity/sonar-cxx
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
#
"""
Simple script to generate the rules xml file for SonarQube cxx plugin
from the Clang Static Analyzer checkers.
The clang compiler should be available in the PATH
or output of clang -cc1 -analyzer-checker-help
as input file.
"""
from xml.dom import minidom
import argparse
import re
import subprocess
import sys
import xml.etree.ElementTree as ET
def CDATA(text=None):
element = ET.Element('![CDATA[')
element.text = text
return element
ET._original_serialize_xml = ET._serialize_xml
def _serialize_xml(write, elem, qnames, namespaces,
short_empty_elements, **kwargs):
if elem.tag == '![CDATA[':
write("<%s%s]]>" % (elem.tag, elem.text))
return
return ET._original_serialize_xml(
write, elem, qnames, namespaces, short_empty_elements, **kwargs)
ET._serialize_xml = ET._serialize['xml'] = _serialize_xml
def collect_checkers(clangsa_output):
"""
Parse clang static analyzer output.
Return the list of checkers and the description.
"""
checkers_data = {}
# Checker name and description in one line.
pattern = re.compile(r'^\s\s(?P<checker_name>\S*)\s*(?P<description>.*)')
checker_name = None
for line in clangsa_output.splitlines():
line = line.decode(encoding='UTF-8')
if re.match(r'^CHECKERS:', line) or line == '':
continue
elif checker_name and not re.match(r'^\s\s\S', line):
# Collect description for the checker name.
checkers_data[checker_name] = line.strip()
checker_name = None
elif re.match(r'^\s\s\S+$', line.rstrip()):
# Only checker name is in the line.
checker_name = line.strip()
else:
# Checker name and description is in one line.
match = pattern.match(line.rstrip())
if match:
current = match.groupdict()
checkers_data[current['checker_name']] = current['description']
# Filter out debug checkers.
non_debug = {k: v for k, v in checkers_data.items() if 'debug' not in k}
return non_debug
def main():
parser = argparse.ArgumentParser(
description="""Generate the rules xml file for cxx plugin
plugin from the Clang Static Analyzer checkers.
https://clang-analyzer.llvm.org/""",
usage='%(prog)s -o clangsa.xml')
parser.add_argument('-i', '--input', dest='input_file', action='store',
required=False,
help="""Input file to read rules.
If parameter does not exist
it tries to call clang.""")
parser.add_argument('-o', '--output', dest='output_file', action='store',
required=True,
help="""Output file to write the xml rules.
If the file already exists
it will be overwritten.""")
args = parser.parse_args()
clang_version = "clang version ???".encode('utf-8')
if args.input_file:
with open(args.input_file, 'r') as input:
checker_data = collect_checkers(input.read().encode('utf-8'))
else:
try:
clang_version = ['clang', '--version']
version_info = subprocess.run(clang_version,
stdout=subprocess.PIPE,
check=True).stdout
except subprocess.CalledProcessError as cpe:
sys.exit(cpe.returncode)
# Only the first line is interesting.
clang_version = version_info.splitlines()[0]
try:
clang_checkers = ['clang', '-cc1', '-analyzer-checker-help']
checkers_output = subprocess.run(clang_checkers,
stdout=subprocess.PIPE,
check=True).stdout
print("Collecting clang checkers ...", end='')
checker_data = collect_checkers(checkers_output)
except subprocess.CalledProcessError as cpe:
sys.exit(cpe.returncode)
if not checker_data:
print("No checkers could be processed.")
sys.exit(1)
print(" done.")
print("Generating rules xml ...", end='')
# build a tree structure
rules = ET.Element("rules")
comment = " C and C++ rules for Clang Static Analyzer. " \
"https://clang-analyzer.llvm.org/\n" + \
"Rules list was generated based on " + \
clang_version.decode("utf-8") + " "
rules.append(ET.Comment(comment))
for checker_name, description in checker_data.items():
rule = ET.SubElement(rules, "rule")
key = ET.SubElement(rule, "key")
name = ET.SubElement(rule, "name")
desc = ET.SubElement(rule, "description")
sev = ET.SubElement(rule, "severity")
c_type = ET.SubElement(rule, "type")
key.text = checker_name
name.text = checker_name
sev.text = "MAJOR"
c_type.text = "BUG"
if sev.text != 'INFO':
ET.SubElement(rule, 'remediationFunction').text = 'LINEAR'
ET.SubElement(rule, 'remediationFunctionGapMultiplier').text = '5min'
auto_tag = checker_name.split('.')[0]
tag = ET.SubElement(rule, "tag")
tag.text = auto_tag.lower()
cdata = CDATA('\n<p>' + description.strip() +
'\n</p>\n <h2>References</h2>'
' <p><a href="https://clang-analyzer.llvm.org/"'
' target="_blank">clang-analyzer.llvm.org</a></p> \n')
desc.append(cdata)
xmlstr = minidom.parseString(
ET.tostring(rules, method='xml')).toprettyxml(indent=" ")
print(" done.")
with open(args.output_file, 'w') as out:
out.write(xmlstr)
if __name__ == '__main__':
main()
| lgpl-3.0 |
CingHu/neutron-ustack | neutron/db/migration/alembic_migrations/versions/1d6ee1ae5da5_db_healing.py | 6 | 1060 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Include all tables and make migrations unconditional.
Revision ID: db_healing
Revises: 5446f2a45467
Create Date: 2014-05-29 10:52:43.898980
"""
# revision identifiers, used by Alembic.
revision = 'db_healing'
down_revision = '5446f2a45467'
from neutron.db.migration.alembic_migrations import heal_script
def upgrade(active_plugins=None, options=None):
heal_script.heal()
def downgrade(active_plugins=None, options=None):
pass
| apache-2.0 |
TEAM-Gummy/platform_external_chromium_org | ppapi/generators/generator.py | 165 | 1776 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import traceback
# Note: some of these files are imported to register cmdline options.
from idl_generator import Generator
from idl_option import ParseOptions
from idl_outfile import IDLOutFile
from idl_parser import ParseFiles
from idl_c_header import HGen
from idl_thunk import TGen
from idl_gen_pnacl import PnaclGen
def Main(args):
# If no arguments are provided, assume we are trying to rebuild the
# C headers with warnings off.
try:
if not args:
args = [
'--wnone', '--cgen', '--range=start,end',
'--pnacl', '--pnaclshim',
'../native_client/src/untrusted/pnacl_irt_shim/pnacl_shim.c',
'--tgen',
]
current_dir = os.path.abspath(os.getcwd())
script_dir = os.path.abspath(os.path.dirname(__file__))
if current_dir != script_dir:
print '\nIncorrect CWD, default run skipped.'
print 'When running with no arguments set CWD to the scripts directory:'
print '\t' + script_dir + '\n'
print 'This ensures correct default paths and behavior.\n'
return 1
filenames = ParseOptions(args)
ast = ParseFiles(filenames)
if ast.errors:
print 'Found %d errors. Aborting build.\n' % ast.errors
return 1
return Generator.Run(ast)
except SystemExit, ec:
print 'Exiting with %d' % ec.code
sys.exit(ec.code)
except:
typeinfo, value, tb = sys.exc_info()
traceback.print_exception(typeinfo, value, tb)
print 'Called with: ' + ' '.join(sys.argv)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
pyfidelity/rest-seed | backend/backrest/tests/test_change_password.py | 1 | 1714 | from pytest import fixture, mark
from transaction import commit
@fixture(scope='module')
def url(testing):
return testing.route_url('password-change')
@mark.user('alice')
def test_change_password(browser, url, alice):
data = dict(password='foo!', current='alice')
browser.put_json(url, data)
assert alice.validate_password('foo!')
@mark.user('alice')
def test_change_password_twice(browser, url, alice):
data = dict(password='foo!', current='alice')
browser.put_json(url, data)
assert alice.validate_password('foo!')
commit()
data = dict(password='alice', current='foo!')
browser.put_json(url, data)
alice = alice.query.one() # refetch alice after `commit`
assert alice.validate_password('alice')
@mark.user('alice')
def test_change_password_with_wrong_current_password(browser, url, alice):
data = dict(password='foo!', current='hurz?')
result = browser.put_json(url, data, status=400).json
assert [(e['name'], e['description']) for e in result['errors']] == [
('current', 'Password does not match')]
assert alice.validate_password('alice')
@mark.user('alice')
def test_change_password_without_current_password(browser, url, alice):
data = dict(password='foo!')
result = browser.put_json(url, data, status=400).json
assert [(e['name'], e['description']) for e in result['errors']] == [
('current', 'current is missing')]
assert alice.validate_password('alice')
@mark.user('alice')
def test_set_password_without_existing_password(browser, url, alice):
alice.password = None
data = dict(password='foo!', current=None)
browser.put_json(url, data)
assert alice.validate_password('foo!')
| bsd-2-clause |
gsnbng/erpnext | erpnext/buying/report/subcontracted_item_to_be_received/subcontracted_item_to_be_received.py | 9 | 2552 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
if filters.from_date >= filters.to_date:
frappe.msgprint(_("To Date must be greater than From Date"))
data = []
columns = get_columns()
get_data(data , filters)
return columns, data
def get_columns():
return [
{
"label": _("Purchase Order"),
"fieldtype": "Link",
"fieldname": "purchase_order",
"options": "Purchase Order",
"width": 150
},
{
"label": _("Date"),
"fieldtype": "Date",
"fieldname": "date",
"hidden": 1,
"width": 150
},
{
"label": _("Supplier"),
"fieldtype": "Link",
"fieldname": "supplier",
"options": "Supplier",
"width": 150
},
{
"label": _("Finished Good Item Code"),
"fieldtype": "Data",
"fieldname": "fg_item_code",
"width": 100
},
{
"label": _("Item name"),
"fieldtype": "Data",
"fieldname": "item_name",
"width": 100
},
{
"label": _("Required Quantity"),
"fieldtype": "Float",
"fieldname": "required_qty",
"width": 100
},
{
"label": _("Received Quantity"),
"fieldtype": "Float",
"fieldname": "received_qty",
"width": 100
},
{
"label": _("Pending Quantity"),
"fieldtype": "Float",
"fieldname": "pending_qty",
"width": 100
}
]
def get_data(data, filters):
po = get_po(filters)
po_name = [v.name for v in po]
sub_items = get_purchase_order_item_supplied(po_name)
for item in sub_items:
for order in po:
if order.name == item.parent and item.received_qty < item.qty:
row ={
'purchase_order': item.parent,
'date': order.transaction_date,
'supplier': order.supplier,
'fg_item_code': item.item_code,
'item_name': item.item_name,
'required_qty': item.qty,
'received_qty':item.received_qty,
'pending_qty':item.qty - item.received_qty
}
data.append(row)
def get_po(filters):
record_filters = [
["is_subcontracted", "=", "Yes"],
["supplier", "=", filters.supplier],
["transaction_date", "<=", filters.to_date],
["transaction_date", ">=", filters.from_date],
["docstatus", "=", 1]
]
return frappe.get_all("Purchase Order", filters=record_filters, fields=["name", "transaction_date", "supplier"])
def get_purchase_order_item_supplied(po):
return frappe.get_all("Purchase Order Item", filters=[
('parent', 'IN', po)
], fields=["parent", "item_code", "item_name", "qty", "received_qty"])
| agpl-3.0 |
wscullin/spack | var/spack/repos/builtin/packages/py-pytz/package.py | 3 | 1982 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPytz(PythonPackage):
"""World timezone definitions, modern and historical."""
homepage = "http://pythonhosted.org/pytz"
url = "https://pypi.io/packages/source/p/pytz/pytz-2016.10.tar.gz"
import_modules = ['pytz']
version('2017.2', 'f89bde8a811c8a1a5bac17eaaa94383c',
url="https://pypi.io/packages/source/p/pytz/pytz-2017.2.zip")
version('2016.10', 'cc9f16ba436efabdcef3c4d32ae4919c')
version('2016.6.1', 'b6c28a3b968bc1d8badfb61b93874e03')
version('2014.10', 'eb1cb941a20c5b751352c52486aa1dd7')
version('2015.4', '417a47b1c432d90333e42084a605d3d8')
version('2016.3', 'abae92c3301b27bd8a9f56b14f52cb29')
depends_on('py-setuptools', type='build')
| lgpl-2.1 |
weidnem/IntroPython2016 | students/baumel/session_07/HTML_lab/test_html_render.py | 3 | 1663 | """
test code for html_render
"""
import io
from html_render import Element
def test_init():
e = Element()
e = Element("this is some text")
def test_content():
#fixme: this test internals!!!!!!!!
e = Element("this is some text")
assert "this is some text" in e.content
def test_append():
e = Element("this is some text")
e.append("some more text, wooHoo!")
assert "some more text, wooHoo!" in e.content
def test_two_instances():
e = Element("this is some text")
e2 = Element("this is some text")
e.append("some more text")
assert "some more text" not in e2.content
def test_render():
outfile = io.StringIO()
e = Element("this is some text")
e.append("and this is some more text, WooHoo!!")
e.render(outfile)
outfile.seek(0)
file_contents = outfile.read()
#f = open('test1.html', 'w')
#f.write(file_contents)
open('test1.html', 'w').write(file_contents)
print(file_contents)
assert ("this is some text") in file_contents
assert ("and this is some more text, WooHoo!!") in file_contents
assert file_contents.startswith("<html>")
assert file_contents.strip().endswith("</html>")
def test_tag():
outfile = io.StringIO()
e = Element("this is some text", "body")
e.append("and this is some more text, WooHoo!!")
e.render(outfile)
outfile.seek(0)
file_contents = outfile.read()
#f = open('test1.html', 'w')
#f.write(file_contents)
open('test1.html', 'w').write(file_contents)
print(file_contents)
assert ("this is some text") in file_contents
assert ("and this is some more text, WooHoo!!") in file_contents
assert file_contents.startswith("<body>")
assert file_contents.strip().endswith("</body>")
| unlicense |
mindnervestech/mnrp | addons/sale_crm/wizard/crm_make_sale.py | 74 | 7132 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_make_sale(osv.osv_memory):
""" Make sale order for crm """
_name = "crm.make.sale"
_description = "Make sales"
def _selectPartner(self, cr, uid, context=None):
"""
This function gets default value for partner_id field.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values
@return: default value of partner_id field.
"""
if context is None:
context = {}
lead_obj = self.pool.get('crm.lead')
active_id = context and context.get('active_id', False) or False
if not active_id:
return False
lead = lead_obj.read(cr, uid, [active_id], ['partner_id'], context=context)[0]
return lead['partner_id'][0] if lead['partner_id'] else False
def view_init(self, cr, uid, fields_list, context=None):
return super(crm_make_sale, self).view_init(cr, uid, fields_list, context=context)
def makeOrder(self, cr, uid, ids, context=None):
"""
This function create Quotation on given case.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm make sales' ids
@param context: A standard dictionary for contextual values
@return: Dictionary value of created sales order.
"""
# update context: if come from phonecall, default state values can make the quote crash lp:1017353
context = dict(context or {})
context.pop('default_state', False)
case_obj = self.pool.get('crm.lead')
sale_obj = self.pool.get('sale.order')
partner_obj = self.pool.get('res.partner')
data = context and context.get('active_ids', []) or []
for make in self.browse(cr, uid, ids, context=context):
partner = make.partner_id
partner_addr = partner_obj.address_get(cr, uid, [partner.id],
['default', 'invoice', 'delivery', 'contact'])
pricelist = partner.property_product_pricelist.id
fpos = partner.property_account_position and partner.property_account_position.id or False
payment_term = partner.property_payment_term and partner.property_payment_term.id or False
new_ids = []
for case in case_obj.browse(cr, uid, data, context=context):
if not partner and case.partner_id:
partner = case.partner_id
fpos = partner.property_account_position and partner.property_account_position.id or False
payment_term = partner.property_payment_term and partner.property_payment_term.id or False
partner_addr = partner_obj.address_get(cr, uid, [partner.id],
['default', 'invoice', 'delivery', 'contact'])
pricelist = partner.property_product_pricelist.id
if False in partner_addr.values():
raise osv.except_osv(_('Insufficient Data!'), _('No address(es) defined for this customer.'))
vals = {
'origin': _('Opportunity: %s') % str(case.id),
'section_id': case.section_id and case.section_id.id or False,
'categ_ids': [(6, 0, [categ_id.id for categ_id in case.categ_ids])],
'partner_id': partner.id,
'pricelist_id': pricelist,
'partner_invoice_id': partner_addr['invoice'],
'partner_shipping_id': partner_addr['delivery'],
'date_order': fields.date.context_today(self,cr,uid,context=context),
'fiscal_position': fpos,
'payment_term':payment_term,
}
if partner.id:
vals['user_id'] = partner.user_id and partner.user_id.id or uid
new_id = sale_obj.create(cr, uid, vals, context=context)
sale_order = sale_obj.browse(cr, uid, new_id, context=context)
case_obj.write(cr, uid, [case.id], {'ref': 'sale.order,%s' % new_id})
new_ids.append(new_id)
message = _("Opportunity has been <b>converted</b> to the quotation <em>%s</em>.") % (sale_order.name)
case.message_post(body=message)
if make.close:
case_obj.case_mark_won(cr, uid, data, context=context)
if not new_ids:
return {'type': 'ir.actions.act_window_close'}
if len(new_ids)<=1:
value = {
'domain': str([('id', 'in', new_ids)]),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'sale.order',
'view_id': False,
'type': 'ir.actions.act_window',
'name' : _('Quotation'),
'res_id': new_ids and new_ids[0]
}
else:
value = {
'domain': str([('id', 'in', new_ids)]),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'sale.order',
'view_id': False,
'type': 'ir.actions.act_window',
'name' : _('Quotation'),
'res_id': new_ids
}
return value
_columns = {
'partner_id': fields.many2one('res.partner', 'Customer', required=True, domain=[('customer','=',True)]),
'close': fields.boolean('Mark Won', help='Check this to close the opportunity after having created the sales order.'),
}
_defaults = {
'close': False,
'partner_id': _selectPartner,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mirkix/ardupilot | Tools/scripts/frame_sizes.py | 351 | 1117 | #!/usr/bin/env python
import re, sys, operator, os
code_line = re.compile("^\s*\d+:/")
frame_line = re.compile("^\s*\d+\s+/\* frame size = (\d+) \*/")
class frame(object):
def __init__(self, code, frame_size):
self.code = code
self.frame_size = int(frame_size)
frames = []
def process_lst(filename):
'''process one lst file'''
last_code = ''
h = open(filename, mode='r')
for line in h:
if code_line.match(line):
last_code = line.strip()
elif frame_line.match(line):
frames.append(frame(last_code, frame_line.match(line).group(1)))
h.close()
if len(sys.argv) > 1:
dname = sys.argv[1]
else:
dname = '.'
for root, dirs, files in os.walk(dname):
for f in files:
if f.endswith(".lst"):
process_lst(os.path.join(root, f))
sorted_frames = sorted(frames,
key=operator.attrgetter('frame_size'),
reverse=True)
print("FrameSize Code")
for frame in sorted_frames:
if frame.frame_size > 0:
print("%9u %s" % (frame.frame_size, frame.code))
| gpl-3.0 |
antb/TPT----My-old-mod | src/python/stdlib/plat-mac/Carbon/Appearance.py | 81 | 27268 | # Generated from 'Appearance.h'
def FOUR_CHAR_CODE(x): return x
kAppearanceEventClass = FOUR_CHAR_CODE('appr')
kAEAppearanceChanged = FOUR_CHAR_CODE('thme')
kAESystemFontChanged = FOUR_CHAR_CODE('sysf')
kAESmallSystemFontChanged = FOUR_CHAR_CODE('ssfn')
kAEViewsFontChanged = FOUR_CHAR_CODE('vfnt')
kThemeDataFileType = FOUR_CHAR_CODE('thme')
kThemePlatinumFileType = FOUR_CHAR_CODE('pltn')
kThemeCustomThemesFileType = FOUR_CHAR_CODE('scen')
kThemeSoundTrackFileType = FOUR_CHAR_CODE('tsnd')
kThemeBrushDialogBackgroundActive = 1
kThemeBrushDialogBackgroundInactive = 2
kThemeBrushAlertBackgroundActive = 3
kThemeBrushAlertBackgroundInactive = 4
kThemeBrushModelessDialogBackgroundActive = 5
kThemeBrushModelessDialogBackgroundInactive = 6
kThemeBrushUtilityWindowBackgroundActive = 7
kThemeBrushUtilityWindowBackgroundInactive = 8
kThemeBrushListViewSortColumnBackground = 9
kThemeBrushListViewBackground = 10
kThemeBrushIconLabelBackground = 11
kThemeBrushListViewSeparator = 12
kThemeBrushChasingArrows = 13
kThemeBrushDragHilite = 14
kThemeBrushDocumentWindowBackground = 15
kThemeBrushFinderWindowBackground = 16
kThemeBrushScrollBarDelimiterActive = 17
kThemeBrushScrollBarDelimiterInactive = 18
kThemeBrushFocusHighlight = 19
kThemeBrushPopupArrowActive = 20
kThemeBrushPopupArrowPressed = 21
kThemeBrushPopupArrowInactive = 22
kThemeBrushAppleGuideCoachmark = 23
kThemeBrushIconLabelBackgroundSelected = 24
kThemeBrushStaticAreaFill = 25
kThemeBrushActiveAreaFill = 26
kThemeBrushButtonFrameActive = 27
kThemeBrushButtonFrameInactive = 28
kThemeBrushButtonFaceActive = 29
kThemeBrushButtonFaceInactive = 30
kThemeBrushButtonFacePressed = 31
kThemeBrushButtonActiveDarkShadow = 32
kThemeBrushButtonActiveDarkHighlight = 33
kThemeBrushButtonActiveLightShadow = 34
kThemeBrushButtonActiveLightHighlight = 35
kThemeBrushButtonInactiveDarkShadow = 36
kThemeBrushButtonInactiveDarkHighlight = 37
kThemeBrushButtonInactiveLightShadow = 38
kThemeBrushButtonInactiveLightHighlight = 39
kThemeBrushButtonPressedDarkShadow = 40
kThemeBrushButtonPressedDarkHighlight = 41
kThemeBrushButtonPressedLightShadow = 42
kThemeBrushButtonPressedLightHighlight = 43
kThemeBrushBevelActiveLight = 44
kThemeBrushBevelActiveDark = 45
kThemeBrushBevelInactiveLight = 46
kThemeBrushBevelInactiveDark = 47
kThemeBrushNotificationWindowBackground = 48
kThemeBrushMovableModalBackground = 49
kThemeBrushSheetBackgroundOpaque = 50
kThemeBrushDrawerBackground = 51
kThemeBrushToolbarBackground = 52
kThemeBrushSheetBackgroundTransparent = 53
kThemeBrushMenuBackground = 54
kThemeBrushMenuBackgroundSelected = 55
kThemeBrushSheetBackground = kThemeBrushSheetBackgroundOpaque
kThemeBrushBlack = -1
kThemeBrushWhite = -2
kThemeBrushPrimaryHighlightColor = -3
kThemeBrushSecondaryHighlightColor = -4
kThemeTextColorDialogActive = 1
kThemeTextColorDialogInactive = 2
kThemeTextColorAlertActive = 3
kThemeTextColorAlertInactive = 4
kThemeTextColorModelessDialogActive = 5
kThemeTextColorModelessDialogInactive = 6
kThemeTextColorWindowHeaderActive = 7
kThemeTextColorWindowHeaderInactive = 8
kThemeTextColorPlacardActive = 9
kThemeTextColorPlacardInactive = 10
kThemeTextColorPlacardPressed = 11
kThemeTextColorPushButtonActive = 12
kThemeTextColorPushButtonInactive = 13
kThemeTextColorPushButtonPressed = 14
kThemeTextColorBevelButtonActive = 15
kThemeTextColorBevelButtonInactive = 16
kThemeTextColorBevelButtonPressed = 17
kThemeTextColorPopupButtonActive = 18
kThemeTextColorPopupButtonInactive = 19
kThemeTextColorPopupButtonPressed = 20
kThemeTextColorIconLabel = 21
kThemeTextColorListView = 22
kThemeTextColorDocumentWindowTitleActive = 23
kThemeTextColorDocumentWindowTitleInactive = 24
kThemeTextColorMovableModalWindowTitleActive = 25
kThemeTextColorMovableModalWindowTitleInactive = 26
kThemeTextColorUtilityWindowTitleActive = 27
kThemeTextColorUtilityWindowTitleInactive = 28
kThemeTextColorPopupWindowTitleActive = 29
kThemeTextColorPopupWindowTitleInactive = 30
kThemeTextColorRootMenuActive = 31
kThemeTextColorRootMenuSelected = 32
kThemeTextColorRootMenuDisabled = 33
kThemeTextColorMenuItemActive = 34
kThemeTextColorMenuItemSelected = 35
kThemeTextColorMenuItemDisabled = 36
kThemeTextColorPopupLabelActive = 37
kThemeTextColorPopupLabelInactive = 38
kThemeTextColorTabFrontActive = 39
kThemeTextColorTabNonFrontActive = 40
kThemeTextColorTabNonFrontPressed = 41
kThemeTextColorTabFrontInactive = 42
kThemeTextColorTabNonFrontInactive = 43
kThemeTextColorIconLabelSelected = 44
kThemeTextColorBevelButtonStickyActive = 45
kThemeTextColorBevelButtonStickyInactive = 46
kThemeTextColorNotification = 47
kThemeTextColorBlack = -1
kThemeTextColorWhite = -2
kThemeStateInactive = 0
kThemeStateActive = 1
kThemeStatePressed = 2
kThemeStateRollover = 6
kThemeStateUnavailable = 7
kThemeStateUnavailableInactive = 8
kThemeStateDisabled = 0
kThemeStatePressedUp = 2
kThemeStatePressedDown = 3
kThemeArrowCursor = 0
kThemeCopyArrowCursor = 1
kThemeAliasArrowCursor = 2
kThemeContextualMenuArrowCursor = 3
kThemeIBeamCursor = 4
kThemeCrossCursor = 5
kThemePlusCursor = 6
kThemeWatchCursor = 7
kThemeClosedHandCursor = 8
kThemeOpenHandCursor = 9
kThemePointingHandCursor = 10
kThemeCountingUpHandCursor = 11
kThemeCountingDownHandCursor = 12
kThemeCountingUpAndDownHandCursor = 13
kThemeSpinningCursor = 14
kThemeResizeLeftCursor = 15
kThemeResizeRightCursor = 16
kThemeResizeLeftRightCursor = 17
kThemeMenuBarNormal = 0
kThemeMenuBarSelected = 1
kThemeMenuSquareMenuBar = (1 << 0)
kThemeMenuActive = 0
kThemeMenuSelected = 1
kThemeMenuDisabled = 3
kThemeMenuTypePullDown = 0
kThemeMenuTypePopUp = 1
kThemeMenuTypeHierarchical = 2
kThemeMenuTypeInactive = 0x0100
kThemeMenuItemPlain = 0
kThemeMenuItemHierarchical = 1
kThemeMenuItemScrollUpArrow = 2
kThemeMenuItemScrollDownArrow = 3
kThemeMenuItemAtTop = 0x0100
kThemeMenuItemAtBottom = 0x0200
kThemeMenuItemHierBackground = 0x0400
kThemeMenuItemPopUpBackground = 0x0800
kThemeMenuItemHasIcon = 0x8000
kThemeMenuItemNoBackground = 0x4000
kThemeBackgroundTabPane = 1
kThemeBackgroundPlacard = 2
kThemeBackgroundWindowHeader = 3
kThemeBackgroundListViewWindowHeader = 4
kThemeBackgroundSecondaryGroupBox = 5
kThemeNameTag = FOUR_CHAR_CODE('name')
kThemeVariantNameTag = FOUR_CHAR_CODE('varn')
kThemeVariantBaseTintTag = FOUR_CHAR_CODE('tint')
kThemeHighlightColorTag = FOUR_CHAR_CODE('hcol')
kThemeScrollBarArrowStyleTag = FOUR_CHAR_CODE('sbar')
kThemeScrollBarThumbStyleTag = FOUR_CHAR_CODE('sbth')
kThemeSoundsEnabledTag = FOUR_CHAR_CODE('snds')
kThemeDblClickCollapseTag = FOUR_CHAR_CODE('coll')
kThemeAppearanceFileNameTag = FOUR_CHAR_CODE('thme')
kThemeSystemFontTag = FOUR_CHAR_CODE('lgsf')
kThemeSmallSystemFontTag = FOUR_CHAR_CODE('smsf')
kThemeViewsFontTag = FOUR_CHAR_CODE('vfnt')
kThemeViewsFontSizeTag = FOUR_CHAR_CODE('vfsz')
kThemeDesktopPatternNameTag = FOUR_CHAR_CODE('patn')
kThemeDesktopPatternTag = FOUR_CHAR_CODE('patt')
kThemeDesktopPictureNameTag = FOUR_CHAR_CODE('dpnm')
kThemeDesktopPictureAliasTag = FOUR_CHAR_CODE('dpal')
kThemeDesktopPictureAlignmentTag = FOUR_CHAR_CODE('dpan')
kThemeHighlightColorNameTag = FOUR_CHAR_CODE('hcnm')
kThemeExamplePictureIDTag = FOUR_CHAR_CODE('epic')
kThemeSoundTrackNameTag = FOUR_CHAR_CODE('sndt')
kThemeSoundMaskTag = FOUR_CHAR_CODE('smsk')
kThemeUserDefinedTag = FOUR_CHAR_CODE('user')
kThemeSmoothFontEnabledTag = FOUR_CHAR_CODE('smoo')
kThemeSmoothFontMinSizeTag = FOUR_CHAR_CODE('smos')
kTiledOnScreen = 1
kCenterOnScreen = 2
kFitToScreen = 3
kFillScreen = 4
kUseBestGuess = 5
kThemeCheckBoxClassicX = 0
kThemeCheckBoxCheckMark = 1
kThemeScrollBarArrowsSingle = 0
kThemeScrollBarArrowsLowerRight = 1
kThemeScrollBarThumbNormal = 0
kThemeScrollBarThumbProportional = 1
kThemeSystemFont = 0
kThemeSmallSystemFont = 1
kThemeSmallEmphasizedSystemFont = 2
kThemeViewsFont = 3
kThemeEmphasizedSystemFont = 4
kThemeApplicationFont = 5
kThemeLabelFont = 6
kThemeMenuTitleFont = 100
kThemeMenuItemFont = 101
kThemeMenuItemMarkFont = 102
kThemeMenuItemCmdKeyFont = 103
kThemeWindowTitleFont = 104
kThemePushButtonFont = 105
kThemeUtilityWindowTitleFont = 106
kThemeAlertHeaderFont = 107
kThemeCurrentPortFont = 200
kThemeTabNonFront = 0
kThemeTabNonFrontPressed = 1
kThemeTabNonFrontInactive = 2
kThemeTabFront = 3
kThemeTabFrontInactive = 4
kThemeTabNonFrontUnavailable = 5
kThemeTabFrontUnavailable = 6
kThemeTabNorth = 0
kThemeTabSouth = 1
kThemeTabEast = 2
kThemeTabWest = 3
kThemeSmallTabHeight = 16
kThemeLargeTabHeight = 21
kThemeTabPaneOverlap = 3
kThemeSmallTabHeightMax = 19
kThemeLargeTabHeightMax = 24
kThemeMediumScrollBar = 0
kThemeSmallScrollBar = 1
kThemeMediumSlider = 2
kThemeMediumProgressBar = 3
kThemeMediumIndeterminateBar = 4
kThemeRelevanceBar = 5
kThemeSmallSlider = 6
kThemeLargeProgressBar = 7
kThemeLargeIndeterminateBar = 8
kThemeTrackActive = 0
kThemeTrackDisabled = 1
kThemeTrackNothingToScroll = 2
kThemeTrackInactive = 3
kThemeLeftOutsideArrowPressed = 0x01
kThemeLeftInsideArrowPressed = 0x02
kThemeLeftTrackPressed = 0x04
kThemeThumbPressed = 0x08
kThemeRightTrackPressed = 0x10
kThemeRightInsideArrowPressed = 0x20
kThemeRightOutsideArrowPressed = 0x40
kThemeTopOutsideArrowPressed = kThemeLeftOutsideArrowPressed
kThemeTopInsideArrowPressed = kThemeLeftInsideArrowPressed
kThemeTopTrackPressed = kThemeLeftTrackPressed
kThemeBottomTrackPressed = kThemeRightTrackPressed
kThemeBottomInsideArrowPressed = kThemeRightInsideArrowPressed
kThemeBottomOutsideArrowPressed = kThemeRightOutsideArrowPressed
kThemeThumbPlain = 0
kThemeThumbUpward = 1
kThemeThumbDownward = 2
kThemeTrackHorizontal = (1 << 0)
kThemeTrackRightToLeft = (1 << 1)
kThemeTrackShowThumb = (1 << 2)
kThemeTrackThumbRgnIsNotGhost = (1 << 3)
kThemeTrackNoScrollBarArrows = (1 << 4)
kThemeWindowHasGrow = (1 << 0)
kThemeWindowHasHorizontalZoom = (1 << 3)
kThemeWindowHasVerticalZoom = (1 << 4)
kThemeWindowHasFullZoom = kThemeWindowHasHorizontalZoom + kThemeWindowHasVerticalZoom
kThemeWindowHasCloseBox = (1 << 5)
kThemeWindowHasCollapseBox = (1 << 6)
kThemeWindowHasTitleText = (1 << 7)
kThemeWindowIsCollapsed = (1 << 8)
kThemeWindowHasDirty = (1 << 9)
kThemeDocumentWindow = 0
kThemeDialogWindow = 1
kThemeMovableDialogWindow = 2
kThemeAlertWindow = 3
kThemeMovableAlertWindow = 4
kThemePlainDialogWindow = 5
kThemeShadowDialogWindow = 6
kThemePopupWindow = 7
kThemeUtilityWindow = 8
kThemeUtilitySideWindow = 9
kThemeSheetWindow = 10
kThemeDrawerWindow = 11
kThemeWidgetCloseBox = 0
kThemeWidgetZoomBox = 1
kThemeWidgetCollapseBox = 2
kThemeWidgetDirtyCloseBox = 6
kThemeArrowLeft = 0
kThemeArrowDown = 1
kThemeArrowRight = 2
kThemeArrowUp = 3
kThemeArrow3pt = 0
kThemeArrow5pt = 1
kThemeArrow7pt = 2
kThemeArrow9pt = 3
kThemeGrowLeft = (1 << 0)
kThemeGrowRight = (1 << 1)
kThemeGrowUp = (1 << 2)
kThemeGrowDown = (1 << 3)
kThemePushButton = 0
kThemeCheckBox = 1
kThemeRadioButton = 2
kThemeBevelButton = 3
kThemeArrowButton = 4
kThemePopupButton = 5
kThemeDisclosureButton = 6
kThemeIncDecButton = 7
kThemeSmallBevelButton = 8
kThemeMediumBevelButton = 3
kThemeLargeBevelButton = 9
kThemeListHeaderButton = 10
kThemeRoundButton = 11
kThemeLargeRoundButton = 12
kThemeSmallCheckBox = 13
kThemeSmallRadioButton = 14
kThemeRoundedBevelButton = 15
kThemeNormalCheckBox = kThemeCheckBox
kThemeNormalRadioButton = kThemeRadioButton
kThemeButtonOff = 0
kThemeButtonOn = 1
kThemeButtonMixed = 2
kThemeDisclosureRight = 0
kThemeDisclosureDown = 1
kThemeDisclosureLeft = 2
kThemeAdornmentNone = 0
kThemeAdornmentDefault = (1 << 0)
kThemeAdornmentFocus = (1 << 2)
kThemeAdornmentRightToLeft = (1 << 4)
kThemeAdornmentDrawIndicatorOnly = (1 << 5)
kThemeAdornmentHeaderButtonLeftNeighborSelected = (1 << 6)
kThemeAdornmentHeaderButtonRightNeighborSelected = (1 << 7)
kThemeAdornmentHeaderButtonSortUp = (1 << 8)
kThemeAdornmentHeaderMenuButton = (1 << 9)
kThemeAdornmentHeaderButtonNoShadow = (1 << 10)
kThemeAdornmentHeaderButtonShadowOnly = (1 << 11)
kThemeAdornmentNoShadow = kThemeAdornmentHeaderButtonNoShadow
kThemeAdornmentShadowOnly = kThemeAdornmentHeaderButtonShadowOnly
kThemeAdornmentArrowLeftArrow = (1 << 6)
kThemeAdornmentArrowDownArrow = (1 << 7)
kThemeAdornmentArrowDoubleArrow = (1 << 8)
kThemeAdornmentArrowUpArrow = (1 << 9)
kThemeNoSounds = 0
kThemeWindowSoundsMask = (1 << 0)
kThemeMenuSoundsMask = (1 << 1)
kThemeControlSoundsMask = (1 << 2)
kThemeFinderSoundsMask = (1 << 3)
kThemeDragSoundNone = 0
kThemeDragSoundMoveWindow = FOUR_CHAR_CODE('wmov')
kThemeDragSoundGrowWindow = FOUR_CHAR_CODE('wgro')
kThemeDragSoundMoveUtilWindow = FOUR_CHAR_CODE('umov')
kThemeDragSoundGrowUtilWindow = FOUR_CHAR_CODE('ugro')
kThemeDragSoundMoveDialog = FOUR_CHAR_CODE('dmov')
kThemeDragSoundMoveAlert = FOUR_CHAR_CODE('amov')
kThemeDragSoundMoveIcon = FOUR_CHAR_CODE('imov')
kThemeDragSoundSliderThumb = FOUR_CHAR_CODE('slth')
kThemeDragSoundSliderGhost = FOUR_CHAR_CODE('slgh')
kThemeDragSoundScrollBarThumb = FOUR_CHAR_CODE('sbth')
kThemeDragSoundScrollBarGhost = FOUR_CHAR_CODE('sbgh')
kThemeDragSoundScrollBarArrowDecreasing = FOUR_CHAR_CODE('sbad')
kThemeDragSoundScrollBarArrowIncreasing = FOUR_CHAR_CODE('sbai')
kThemeDragSoundDragging = FOUR_CHAR_CODE('drag')
kThemeSoundNone = 0
kThemeSoundMenuOpen = FOUR_CHAR_CODE('mnuo')
kThemeSoundMenuClose = FOUR_CHAR_CODE('mnuc')
kThemeSoundMenuItemHilite = FOUR_CHAR_CODE('mnui')
kThemeSoundMenuItemRelease = FOUR_CHAR_CODE('mnus')
kThemeSoundWindowClosePress = FOUR_CHAR_CODE('wclp')
kThemeSoundWindowCloseEnter = FOUR_CHAR_CODE('wcle')
kThemeSoundWindowCloseExit = FOUR_CHAR_CODE('wclx')
kThemeSoundWindowCloseRelease = FOUR_CHAR_CODE('wclr')
kThemeSoundWindowZoomPress = FOUR_CHAR_CODE('wzmp')
kThemeSoundWindowZoomEnter = FOUR_CHAR_CODE('wzme')
kThemeSoundWindowZoomExit = FOUR_CHAR_CODE('wzmx')
kThemeSoundWindowZoomRelease = FOUR_CHAR_CODE('wzmr')
kThemeSoundWindowCollapsePress = FOUR_CHAR_CODE('wcop')
kThemeSoundWindowCollapseEnter = FOUR_CHAR_CODE('wcoe')
kThemeSoundWindowCollapseExit = FOUR_CHAR_CODE('wcox')
kThemeSoundWindowCollapseRelease = FOUR_CHAR_CODE('wcor')
kThemeSoundWindowDragBoundary = FOUR_CHAR_CODE('wdbd')
kThemeSoundUtilWinClosePress = FOUR_CHAR_CODE('uclp')
kThemeSoundUtilWinCloseEnter = FOUR_CHAR_CODE('ucle')
kThemeSoundUtilWinCloseExit = FOUR_CHAR_CODE('uclx')
kThemeSoundUtilWinCloseRelease = FOUR_CHAR_CODE('uclr')
kThemeSoundUtilWinZoomPress = FOUR_CHAR_CODE('uzmp')
kThemeSoundUtilWinZoomEnter = FOUR_CHAR_CODE('uzme')
kThemeSoundUtilWinZoomExit = FOUR_CHAR_CODE('uzmx')
kThemeSoundUtilWinZoomRelease = FOUR_CHAR_CODE('uzmr')
kThemeSoundUtilWinCollapsePress = FOUR_CHAR_CODE('ucop')
kThemeSoundUtilWinCollapseEnter = FOUR_CHAR_CODE('ucoe')
kThemeSoundUtilWinCollapseExit = FOUR_CHAR_CODE('ucox')
kThemeSoundUtilWinCollapseRelease = FOUR_CHAR_CODE('ucor')
kThemeSoundUtilWinDragBoundary = FOUR_CHAR_CODE('udbd')
kThemeSoundWindowOpen = FOUR_CHAR_CODE('wopn')
kThemeSoundWindowClose = FOUR_CHAR_CODE('wcls')
kThemeSoundWindowZoomIn = FOUR_CHAR_CODE('wzmi')
kThemeSoundWindowZoomOut = FOUR_CHAR_CODE('wzmo')
kThemeSoundWindowCollapseUp = FOUR_CHAR_CODE('wcol')
kThemeSoundWindowCollapseDown = FOUR_CHAR_CODE('wexp')
kThemeSoundWindowActivate = FOUR_CHAR_CODE('wact')
kThemeSoundUtilWindowOpen = FOUR_CHAR_CODE('uopn')
kThemeSoundUtilWindowClose = FOUR_CHAR_CODE('ucls')
kThemeSoundUtilWindowZoomIn = FOUR_CHAR_CODE('uzmi')
kThemeSoundUtilWindowZoomOut = FOUR_CHAR_CODE('uzmo')
kThemeSoundUtilWindowCollapseUp = FOUR_CHAR_CODE('ucol')
kThemeSoundUtilWindowCollapseDown = FOUR_CHAR_CODE('uexp')
kThemeSoundUtilWindowActivate = FOUR_CHAR_CODE('uact')
kThemeSoundDialogOpen = FOUR_CHAR_CODE('dopn')
kThemeSoundDialogClose = FOUR_CHAR_CODE('dlgc')
kThemeSoundAlertOpen = FOUR_CHAR_CODE('aopn')
kThemeSoundAlertClose = FOUR_CHAR_CODE('altc')
kThemeSoundPopupWindowOpen = FOUR_CHAR_CODE('pwop')
kThemeSoundPopupWindowClose = FOUR_CHAR_CODE('pwcl')
kThemeSoundButtonPress = FOUR_CHAR_CODE('btnp')
kThemeSoundButtonEnter = FOUR_CHAR_CODE('btne')
kThemeSoundButtonExit = FOUR_CHAR_CODE('btnx')
kThemeSoundButtonRelease = FOUR_CHAR_CODE('btnr')
kThemeSoundDefaultButtonPress = FOUR_CHAR_CODE('dbtp')
kThemeSoundDefaultButtonEnter = FOUR_CHAR_CODE('dbte')
kThemeSoundDefaultButtonExit = FOUR_CHAR_CODE('dbtx')
kThemeSoundDefaultButtonRelease = FOUR_CHAR_CODE('dbtr')
kThemeSoundCancelButtonPress = FOUR_CHAR_CODE('cbtp')
kThemeSoundCancelButtonEnter = FOUR_CHAR_CODE('cbte')
kThemeSoundCancelButtonExit = FOUR_CHAR_CODE('cbtx')
kThemeSoundCancelButtonRelease = FOUR_CHAR_CODE('cbtr')
kThemeSoundCheckboxPress = FOUR_CHAR_CODE('chkp')
kThemeSoundCheckboxEnter = FOUR_CHAR_CODE('chke')
kThemeSoundCheckboxExit = FOUR_CHAR_CODE('chkx')
kThemeSoundCheckboxRelease = FOUR_CHAR_CODE('chkr')
kThemeSoundRadioPress = FOUR_CHAR_CODE('radp')
kThemeSoundRadioEnter = FOUR_CHAR_CODE('rade')
kThemeSoundRadioExit = FOUR_CHAR_CODE('radx')
kThemeSoundRadioRelease = FOUR_CHAR_CODE('radr')
kThemeSoundScrollArrowPress = FOUR_CHAR_CODE('sbap')
kThemeSoundScrollArrowEnter = FOUR_CHAR_CODE('sbae')
kThemeSoundScrollArrowExit = FOUR_CHAR_CODE('sbax')
kThemeSoundScrollArrowRelease = FOUR_CHAR_CODE('sbar')
kThemeSoundScrollEndOfTrack = FOUR_CHAR_CODE('sbte')
kThemeSoundScrollTrackPress = FOUR_CHAR_CODE('sbtp')
kThemeSoundSliderEndOfTrack = FOUR_CHAR_CODE('slte')
kThemeSoundSliderTrackPress = FOUR_CHAR_CODE('sltp')
kThemeSoundBalloonOpen = FOUR_CHAR_CODE('blno')
kThemeSoundBalloonClose = FOUR_CHAR_CODE('blnc')
kThemeSoundBevelPress = FOUR_CHAR_CODE('bevp')
kThemeSoundBevelEnter = FOUR_CHAR_CODE('beve')
kThemeSoundBevelExit = FOUR_CHAR_CODE('bevx')
kThemeSoundBevelRelease = FOUR_CHAR_CODE('bevr')
kThemeSoundLittleArrowUpPress = FOUR_CHAR_CODE('laup')
kThemeSoundLittleArrowDnPress = FOUR_CHAR_CODE('ladp')
kThemeSoundLittleArrowEnter = FOUR_CHAR_CODE('lare')
kThemeSoundLittleArrowExit = FOUR_CHAR_CODE('larx')
kThemeSoundLittleArrowUpRelease = FOUR_CHAR_CODE('laur')
kThemeSoundLittleArrowDnRelease = FOUR_CHAR_CODE('ladr')
kThemeSoundPopupPress = FOUR_CHAR_CODE('popp')
kThemeSoundPopupEnter = FOUR_CHAR_CODE('pope')
kThemeSoundPopupExit = FOUR_CHAR_CODE('popx')
kThemeSoundPopupRelease = FOUR_CHAR_CODE('popr')
kThemeSoundDisclosurePress = FOUR_CHAR_CODE('dscp')
kThemeSoundDisclosureEnter = FOUR_CHAR_CODE('dsce')
kThemeSoundDisclosureExit = FOUR_CHAR_CODE('dscx')
kThemeSoundDisclosureRelease = FOUR_CHAR_CODE('dscr')
kThemeSoundTabPressed = FOUR_CHAR_CODE('tabp')
kThemeSoundTabEnter = FOUR_CHAR_CODE('tabe')
kThemeSoundTabExit = FOUR_CHAR_CODE('tabx')
kThemeSoundTabRelease = FOUR_CHAR_CODE('tabr')
kThemeSoundDragTargetHilite = FOUR_CHAR_CODE('dthi')
kThemeSoundDragTargetUnhilite = FOUR_CHAR_CODE('dtuh')
kThemeSoundDragTargetDrop = FOUR_CHAR_CODE('dtdr')
kThemeSoundEmptyTrash = FOUR_CHAR_CODE('ftrs')
kThemeSoundSelectItem = FOUR_CHAR_CODE('fsel')
kThemeSoundNewItem = FOUR_CHAR_CODE('fnew')
kThemeSoundReceiveDrop = FOUR_CHAR_CODE('fdrp')
kThemeSoundCopyDone = FOUR_CHAR_CODE('fcpd')
kThemeSoundResolveAlias = FOUR_CHAR_CODE('fral')
kThemeSoundLaunchApp = FOUR_CHAR_CODE('flap')
kThemeSoundDiskInsert = FOUR_CHAR_CODE('dski')
kThemeSoundDiskEject = FOUR_CHAR_CODE('dske')
kThemeSoundFinderDragOnIcon = FOUR_CHAR_CODE('fdon')
kThemeSoundFinderDragOffIcon = FOUR_CHAR_CODE('fdof')
kThemePopupTabNormalPosition = 0
kThemePopupTabCenterOnWindow = 1
kThemePopupTabCenterOnOffset = 2
kThemeMetricScrollBarWidth = 0
kThemeMetricSmallScrollBarWidth = 1
kThemeMetricCheckBoxHeight = 2
kThemeMetricRadioButtonHeight = 3
kThemeMetricEditTextWhitespace = 4
kThemeMetricEditTextFrameOutset = 5
kThemeMetricListBoxFrameOutset = 6
kThemeMetricFocusRectOutset = 7
kThemeMetricImageWellThickness = 8
kThemeMetricScrollBarOverlap = 9
kThemeMetricLargeTabHeight = 10
kThemeMetricLargeTabCapsWidth = 11
kThemeMetricTabFrameOverlap = 12
kThemeMetricTabIndentOrStyle = 13
kThemeMetricTabOverlap = 14
kThemeMetricSmallTabHeight = 15
kThemeMetricSmallTabCapsWidth = 16
kThemeMetricDisclosureButtonHeight = 17
kThemeMetricRoundButtonSize = 18
kThemeMetricPushButtonHeight = 19
kThemeMetricListHeaderHeight = 20
kThemeMetricSmallCheckBoxHeight = 21
kThemeMetricDisclosureButtonWidth = 22
kThemeMetricSmallDisclosureButtonHeight = 23
kThemeMetricSmallDisclosureButtonWidth = 24
kThemeMetricDisclosureTriangleHeight = 25
kThemeMetricDisclosureTriangleWidth = 26
kThemeMetricLittleArrowsHeight = 27
kThemeMetricLittleArrowsWidth = 28
kThemeMetricPaneSplitterHeight = 29
kThemeMetricPopupButtonHeight = 30
kThemeMetricSmallPopupButtonHeight = 31
kThemeMetricLargeProgressBarThickness = 32
kThemeMetricPullDownHeight = 33
kThemeMetricSmallPullDownHeight = 34
kThemeMetricSmallPushButtonHeight = 35
kThemeMetricSmallRadioButtonHeight = 36
kThemeMetricRelevanceIndicatorHeight = 37
kThemeMetricResizeControlHeight = 38
kThemeMetricSmallResizeControlHeight = 39
kThemeMetricLargeRoundButtonSize = 40
kThemeMetricHSliderHeight = 41
kThemeMetricHSliderTickHeight = 42
kThemeMetricSmallHSliderHeight = 43
kThemeMetricSmallHSliderTickHeight = 44
kThemeMetricVSliderWidth = 45
kThemeMetricVSliderTickWidth = 46
kThemeMetricSmallVSliderWidth = 47
kThemeMetricSmallVSliderTickWidth = 48
kThemeMetricTitleBarControlsHeight = 49
kThemeMetricCheckBoxWidth = 50
kThemeMetricSmallCheckBoxWidth = 51
kThemeMetricRadioButtonWidth = 52
kThemeMetricSmallRadioButtonWidth = 53
kThemeMetricSmallHSliderMinThumbWidth = 54
kThemeMetricSmallVSliderMinThumbHeight = 55
kThemeMetricSmallHSliderTickOffset = 56
kThemeMetricSmallVSliderTickOffset = 57
kThemeMetricNormalProgressBarThickness = 58
kThemeMetricProgressBarShadowOutset = 59
kThemeMetricSmallProgressBarShadowOutset = 60
kThemeMetricPrimaryGroupBoxContentInset = 61
kThemeMetricSecondaryGroupBoxContentInset = 62
kThemeMetricMenuMarkColumnWidth = 63
kThemeMetricMenuExcludedMarkColumnWidth = 64
kThemeMetricMenuMarkIndent = 65
kThemeMetricMenuTextLeadingEdgeMargin = 66
kThemeMetricMenuTextTrailingEdgeMargin = 67
kThemeMetricMenuIndentWidth = 68
kThemeMetricMenuIconTrailingEdgeMargin = 69
# appearanceBadBrushIndexErr = themeInvalidBrushErr
# appearanceProcessRegisteredErr = themeProcessRegisteredErr
# appearanceProcessNotRegisteredErr = themeProcessNotRegisteredErr
# appearanceBadTextColorIndexErr = themeBadTextColorErr
# appearanceThemeHasNoAccents = themeHasNoAccentsErr
# appearanceBadCursorIndexErr = themeBadCursorIndexErr
kThemeActiveDialogBackgroundBrush = kThemeBrushDialogBackgroundActive
kThemeInactiveDialogBackgroundBrush = kThemeBrushDialogBackgroundInactive
kThemeActiveAlertBackgroundBrush = kThemeBrushAlertBackgroundActive
kThemeInactiveAlertBackgroundBrush = kThemeBrushAlertBackgroundInactive
kThemeActiveModelessDialogBackgroundBrush = kThemeBrushModelessDialogBackgroundActive
kThemeInactiveModelessDialogBackgroundBrush = kThemeBrushModelessDialogBackgroundInactive
kThemeActiveUtilityWindowBackgroundBrush = kThemeBrushUtilityWindowBackgroundActive
kThemeInactiveUtilityWindowBackgroundBrush = kThemeBrushUtilityWindowBackgroundInactive
kThemeListViewSortColumnBackgroundBrush = kThemeBrushListViewSortColumnBackground
kThemeListViewBackgroundBrush = kThemeBrushListViewBackground
kThemeIconLabelBackgroundBrush = kThemeBrushIconLabelBackground
kThemeListViewSeparatorBrush = kThemeBrushListViewSeparator
kThemeChasingArrowsBrush = kThemeBrushChasingArrows
kThemeDragHiliteBrush = kThemeBrushDragHilite
kThemeDocumentWindowBackgroundBrush = kThemeBrushDocumentWindowBackground
kThemeFinderWindowBackgroundBrush = kThemeBrushFinderWindowBackground
kThemeActiveScrollBarDelimiterBrush = kThemeBrushScrollBarDelimiterActive
kThemeInactiveScrollBarDelimiterBrush = kThemeBrushScrollBarDelimiterInactive
kThemeFocusHighlightBrush = kThemeBrushFocusHighlight
kThemeActivePopupArrowBrush = kThemeBrushPopupArrowActive
kThemePressedPopupArrowBrush = kThemeBrushPopupArrowPressed
kThemeInactivePopupArrowBrush = kThemeBrushPopupArrowInactive
kThemeAppleGuideCoachmarkBrush = kThemeBrushAppleGuideCoachmark
kThemeActiveDialogTextColor = kThemeTextColorDialogActive
kThemeInactiveDialogTextColor = kThemeTextColorDialogInactive
kThemeActiveAlertTextColor = kThemeTextColorAlertActive
kThemeInactiveAlertTextColor = kThemeTextColorAlertInactive
kThemeActiveModelessDialogTextColor = kThemeTextColorModelessDialogActive
kThemeInactiveModelessDialogTextColor = kThemeTextColorModelessDialogInactive
kThemeActiveWindowHeaderTextColor = kThemeTextColorWindowHeaderActive
kThemeInactiveWindowHeaderTextColor = kThemeTextColorWindowHeaderInactive
kThemeActivePlacardTextColor = kThemeTextColorPlacardActive
kThemeInactivePlacardTextColor = kThemeTextColorPlacardInactive
kThemePressedPlacardTextColor = kThemeTextColorPlacardPressed
kThemeActivePushButtonTextColor = kThemeTextColorPushButtonActive
kThemeInactivePushButtonTextColor = kThemeTextColorPushButtonInactive
kThemePressedPushButtonTextColor = kThemeTextColorPushButtonPressed
kThemeActiveBevelButtonTextColor = kThemeTextColorBevelButtonActive
kThemeInactiveBevelButtonTextColor = kThemeTextColorBevelButtonInactive
kThemePressedBevelButtonTextColor = kThemeTextColorBevelButtonPressed
kThemeActivePopupButtonTextColor = kThemeTextColorPopupButtonActive
kThemeInactivePopupButtonTextColor = kThemeTextColorPopupButtonInactive
kThemePressedPopupButtonTextColor = kThemeTextColorPopupButtonPressed
kThemeIconLabelTextColor = kThemeTextColorIconLabel
kThemeListViewTextColor = kThemeTextColorListView
kThemeActiveDocumentWindowTitleTextColor = kThemeTextColorDocumentWindowTitleActive
kThemeInactiveDocumentWindowTitleTextColor = kThemeTextColorDocumentWindowTitleInactive
kThemeActiveMovableModalWindowTitleTextColor = kThemeTextColorMovableModalWindowTitleActive
kThemeInactiveMovableModalWindowTitleTextColor = kThemeTextColorMovableModalWindowTitleInactive
kThemeActiveUtilityWindowTitleTextColor = kThemeTextColorUtilityWindowTitleActive
kThemeInactiveUtilityWindowTitleTextColor = kThemeTextColorUtilityWindowTitleInactive
kThemeActivePopupWindowTitleColor = kThemeTextColorPopupWindowTitleActive
kThemeInactivePopupWindowTitleColor = kThemeTextColorPopupWindowTitleInactive
kThemeActiveRootMenuTextColor = kThemeTextColorRootMenuActive
kThemeSelectedRootMenuTextColor = kThemeTextColorRootMenuSelected
kThemeDisabledRootMenuTextColor = kThemeTextColorRootMenuDisabled
kThemeActiveMenuItemTextColor = kThemeTextColorMenuItemActive
kThemeSelectedMenuItemTextColor = kThemeTextColorMenuItemSelected
kThemeDisabledMenuItemTextColor = kThemeTextColorMenuItemDisabled
kThemeActivePopupLabelTextColor = kThemeTextColorPopupLabelActive
kThemeInactivePopupLabelTextColor = kThemeTextColorPopupLabelInactive
kAEThemeSwitch = kAEAppearanceChanged
kThemeNoAdornment = kThemeAdornmentNone
kThemeDefaultAdornment = kThemeAdornmentDefault
kThemeFocusAdornment = kThemeAdornmentFocus
kThemeRightToLeftAdornment = kThemeAdornmentRightToLeft
kThemeDrawIndicatorOnly = kThemeAdornmentDrawIndicatorOnly
kThemeBrushPassiveAreaFill = kThemeBrushStaticAreaFill
kThemeMetricCheckBoxGlyphHeight = kThemeMetricCheckBoxHeight
kThemeMetricRadioButtonGlyphHeight = kThemeMetricRadioButtonHeight
kThemeMetricDisclosureButtonSize = kThemeMetricDisclosureButtonHeight
kThemeMetricBestListHeaderHeight = kThemeMetricListHeaderHeight
kThemeMetricSmallProgressBarThickness = kThemeMetricNormalProgressBarThickness
kThemeMetricProgressBarThickness = kThemeMetricLargeProgressBarThickness
kThemeScrollBar = kThemeMediumScrollBar
kThemeSlider = kThemeMediumSlider
kThemeProgressBar = kThemeMediumProgressBar
kThemeIndeterminateBar = kThemeMediumIndeterminateBar
| gpl-2.0 |
alihalabyah/flexx | flexx/pyscript/parser3.py | 21 | 22006 | """
Python Builtins
---------------
Most buildin functions (that make sense in JS) are automatically
translated to JavaScript: isinstance, issubclass, callable, hasattr,
getattr, setattr, delattr, print, len, max, min, chr, ord, dict, list,
tuple, range, pow, sum, round, int, float, str, bool, abs, divmod, all,
any, enumerate, zip, reversed, sorted, filter, map.
.. pyscript_example::
# "self" is replaced with "this"
self.foo
# Printing just works
print('some test')
print(a, b, c, sep='-')
# Getting the length of a string or array
len(foo)
# Rounding and abs
round(foo) # round to nearest integer
int(foo) # round towards 0 as in Python
abs(foo)
# min and max
min(foo)
min(a, b, c)
max(foo)
max(a, b, c)
# divmod
a, b = divmod(100, 7) # -> 14, 2
# Aggregation
sum(foo)
all(foo)
any(foo)
# Turning things into numbers, bools and strings
str(s)
float(x)
bool(y)
int(z) # this rounds towards zero like in Python
chr(65) # -> 'A'
ord('A') # -> 65
# Turning things into lists and dicts
dict([['foo', 1], ['bar', 2]]) # -> {'foo': 1, 'bar': 2}
list('abc') # -> ['a', 'b', 'c']
dict(other_dict) # make a copy
list(other_list) # make copy
The isinstance function (and friends)
-------------------------------------
The ``isinstance()`` function works for all JS primitive types, but also
for user-defined classes.
.. pyscript_example::
# Basic types
isinstance(3, float) # in JS there are no ints
isinstance('', str)
isinstance([], list)
isinstance({}, dict)
isinstance(foo, types.FunctionType)
# Can also use JS strings
isinstance(3, 'number')
isinstance('', 'string')
isinstance([], 'array')
isinstance({}, 'object')
isinstance(foo, 'function')
# You can use it on your own types too ...
isinstance(x, MyClass)
isinstance(x, 'MyClass') # equivalent
isinstance(x, 'Object') # also yields true (subclass of Object)
# issubclass works too
issubclass(Foo, Bar)
# As well as callable
callable(foo)
hasattr, getattr, setattr and delattr
-------------------------------------
.. pyscript_example::
a = {'foo': 1, 'bar': 2}
hasattr(a, 'foo') # -> True
hasattr(a, 'fooo') # -> False
hasattr(null, 'foo') # -> False
getattr(a, 'foo') # -> 1
getattr(a, 'fooo') # -> raise AttributeError
getattr(a, 'fooo', 3) # -> 3
getattr(null, 'foo', 3) # -> 3
setattr(a, 'foo', 2)
delattr(a, 'foo')
Creating sequences
------------------
.. pyscript_example::
range(10)
range(2, 10, 2)
range(100, 0, -1)
reversed(foo)
sorted(foo)
enumerate(foo)
zip(foo, bar)
filter(func, foo)
map(func, foo)
List methods
------------
.. pyscript_example::
# Call a.append() if it exists, otherwise a.push()
a.append(x)
# Similar for remove()
a.remove(x)
Dict methods
------------
.. pyscript_example::
a = {'foo': 3}
a['foo']
a.get('foo', 0)
a.get('foo')
a.keys()
Str methods
-----------
.. pyscript_example::
"foobar".startswith('foo')
Additional sugar
----------------
.. pyscript_example::
# Get time (number of seconds since epoch)
print(time.time())
# High resolution timer (as in time.perf_counter on Python 3)
t0 = time.perf_counter()
do_something()
t1 = time.perf_counter()
print('this took me', t1-t0, 'seconds')
"""
import ast
from .parser2 import Parser2, JSError, unify # noqa
# List of possibly relevant builtin functions:
#
# abs all any bin bool callable chr complex delattr dict dir divmod
# enumerate eval exec filter float format getattr globals hasattr hash
# hex id int isinstance issubclass iter len list locals map max min next
# object oct ord pow print property range repr reversed round set setattr
# slice sorted str sum super tuple type vars zip
#
# Further, all methods of: list, dict, str, set?
# todo: make these more robust by not applying the Python version if a JS version exists.
class Parser3(Parser2):
""" Parser to transcompile Python to JS, allowing more Pythonic
code, like ``self``, ``print()``, ``len()``, list methods, etc.
"""
NAME_MAP = {'self': 'this', }
NAME_MAP.update(Parser2.NAME_MAP)
## Hardcore functions (hide JS functions with the same name)
def function_isinstance(self, node):
if len(node.args) != 2:
raise JSError('isinstance() expects two arguments.')
ob = unify(self.parse(node.args[0]))
cls = unify(self.parse(node.args[1]))
if cls[0] in '"\'':
cls = cls[1:-1] # remove quotes
BASIC_TYPES = ('number', 'boolean', 'string', 'function', 'array',
'object', 'null', 'undefined')
MAP = {'[int, float]': 'number', '[float, int]': 'number', 'float': 'number',
'str': 'string', 'basestring': 'string', 'string_types': 'string',
'bool': 'boolean',
'FunctionType': 'function', 'types.FunctionType': 'function',
'list': 'array', 'tuple': 'array',
'[list, tuple]': 'array', '[tuple, list]': 'array',
'dict': 'object',
}
cmp = MAP.get(cls, cls)
if cmp.lower() in BASIC_TYPES:
# Basic type, use Object.prototype.toString
# http://stackoverflow.com/questions/11108877
return ["({}).toString.call(",
ob,
").match(/\s([a-zA-Z]+)/)[1].toLowerCase() === ",
repr(cmp.lower())
]
else:
# User defined type, use instanceof
# http://tobyho.com/2011/01/28/checking-types-in-javascript/
cmp = unify(cls)
if cmp[0] == '(':
raise JSError('isinstance() can only compare to simple types')
return ob, " instanceof ", cmp
def function_issubclass(self, node):
# issubclass only needs to work on custom classes
if len(node.args) != 2:
raise JSError('issubclass() expects two arguments.')
cls1 = unify(self.parse(node.args[0]))
cls2 = unify(self.parse(node.args[1]))
if cls2 == 'object':
cls2 = 'Object'
return '(%s.prototype instanceof %s)' % (cls1, cls2)
def function_hasattr(self, node):
if len(node.args) == 2:
ob = unify(self.parse(node.args[0]))
name = unify(self.parse(node.args[1]))
dummy1 = self.dummy()
t = "((%s=%s) !== undefined && %s !== null && %s[%s] !== undefined)"
return t % (dummy1, ob, dummy1, dummy1, name)
else:
raise JSError('hasattr() expects two arguments.')
def function_getattr(self, node):
is_ok = "(ob !== undefined && ob !== null && ob[name] !== undefined)"
if len(node.args) == 2:
ob = unify(self.parse(node.args[0]))
name = unify(self.parse(node.args[1]))
func = "(function (ob, name) {if %s {return ob[name];} " % is_ok
func += "else {var e = Error(name); e.name='AttributeError'; throw e;}})"
return func + '(%s, %s)' % (ob, name)
elif len(node.args) == 3:
ob = unify(self.parse(node.args[0]))
name = unify(self.parse(node.args[1]))
default = unify(self.parse(node.args[2]))
func = "(function (ob, name, dflt) {if %s {return ob[name];} " % is_ok
func += "else {return dflt;}})"
return func + '(%s, %s, %s)' % (ob, name, default)
else:
raise JSError('hasattr() expects two or three arguments.')
def function_setattr(self, node):
is_ok = "(ob !== undefined && ob !== null && ob[name] !== undefined)"
if len(node.args) == 3:
ob = unify(self.parse(node.args[0]))
name = unify(self.parse(node.args[1]))
value = unify(self.parse(node.args[2]))
return '%s[%s] = %s' % (ob, name, value)
else:
raise JSError('setattr() expects three arguments.')
def function_delattr(self, node):
if len(node.args) == 2:
ob = unify(self.parse(node.args[0]))
name = unify(self.parse(node.args[1]))
return 'delete %s[%s]' % (ob, name)
else:
raise JSError('delattr() expects two arguments.')
def function_print(self, node):
# Process keywords
sep, end = '" "', ''
for kw in node.keywords:
if kw.arg == 'sep':
sep = ''.join(self.parse(kw.value))
elif kw.arg == 'end':
end = ''.join(self.parse(kw.value))
elif kw.arg in ('file', 'flush'):
raise JSError('print() file and flush args not supported')
else:
raise JSError('Invalid argument for print(): %r' % kw.arg)
# Combine args
args = [unify(self.parse(arg)) for arg in node.args]
end = (" + %s" % end) if (args and end and end != '\n') else ''
combiner = ' + %s + ' % sep
args_concat = combiner.join(args)
return 'console.log(' + args_concat + end + ')'
def function_len(self, node):
if len(node.args) == 1:
return unify(self.parse(node.args[0])), '.length'
else:
return None # don't apply this feature
def function_max(self, node):
if len(node.args) == 0:
raise JSError('max() needs at least one argument')
elif len(node.args) == 1:
arg = ''.join(self.parse(node.args[0]))
return 'Math.max.apply(null, ', arg, ')'
else:
args = ', '.join([unify(self.parse(arg)) for arg in node.args])
return 'Math.max(', args, ')'
def function_min(self, node):
if len(node.args) == 0:
raise JSError('min() needs at least one argument')
elif len(node.args) == 1:
arg = ''.join(self.parse(node.args[0]))
return 'Math.min.apply(null, ', arg, ')'
else:
args = ', '.join([unify(self.parse(arg)) for arg in node.args])
return 'Math.min(', args, ')'
def function_callable(self, node):
if len(node.args) == 1:
arg = unify(self.parse(node.args[0]))
return '(typeof %s === "function")' % arg
else:
raise JSError('callable() needs at least one argument')
def function_chr(self, node):
if len(node.args) == 1:
arg = ''.join(self.parse(node.args[0]))
return 'String.fromCharCode(%s)' % arg
else:
raise JSError('chr() needs at least one argument')
def function_ord(self, node):
if len(node.args) == 1:
arg = ''.join(self.parse(node.args[0]))
return '%s.charCodeAt(0)' % arg
else:
raise JSError('ord() needs at least one argument')
def function_dict(self, node):
if len(node.args) == 0:
return '{}'
if len(node.args) == 1:
code = '(function(x) {var t, i, keys, r={};'
code += 'if (Array.isArray(x)) {'
code += 'for (i=0; i<x.length; i++) {t=x[i]; r[t[0]] = t[1];} return r;'
code += '} else {'
code += 'keys = Object.keys(x); for (i=0; i<keys.length; i++) {t=keys[i]; r[t] = x[t];} return r;}})'
return code + '(%s)' % ''.join(self.parse(node.args[0]))
else:
raise JSError('dict() needs at least one argument')
def function_list(self, node):
if len(node.args) == 0:
return '[]'
if len(node.args) == 1:
code = '(function(x) {var r=[];'
code += 'if (typeof x==="object" && !Array.isArray(x)) {x=Object.keys(x)}'
code += 'for (var i=0; i<x.length; i++) {r.push(x[i]);} return r;})'
return code + '(%s)' % ''.join(self.parse(node.args[0]))
else:
raise JSError('list() needs at least one argument')
def function_tuple(self, node):
return self.function_list(node)
def function_range(self, node):
fun = 'function (start, end, step) {var i, res = []; for (i=start; i<end; i+=step) {res.push(i);} return res;}'
if len(node.args) == 1:
end = unify(self.parse(node.args[0]))
return '(%s)(0, %s, 1)' % (fun, end)
elif len(node.args) == 2:
start = unify(self.parse(node.args[0]))
end = unify(self.parse(node.args[1]))
return '(%s)(%s, %s, 1)' % (fun, start, end)
elif len(node.args) == 3:
start = unify(self.parse(node.args[0]))
end = unify(self.parse(node.args[1]))
step = ''.join(self.parse(node.args[2]))
if step.lstrip('+-').isnumeric() and float(step) < 0:
fun = fun.replace('<', '>')
return '(%s)(%s, %s, %s)' % (fun, start, end, step)
else:
raise JSError('range() needs 1, 2 or 3 arguments')
## Normal functions (can be overloaded)
def function_pow(self, node):
if len(node.args) == 2:
self.vars_for_functions['pow'] = 'Math.pow'
return None
else:
raise JSError('pow() needs exactly two argument2')
def function_sum(self, node):
if len(node.args) == 1:
code = 'function (x) {return x.reduce(function(a, b) {return a + b;});}'
self.vars_for_functions['sum'] = code
return None
else:
raise JSError('sum() needs exactly one argument')
def function_round(self, node):
if len(node.args) == 1:
self.vars_for_functions['round'] = 'Math.round'
else:
raise JSError('round() needs at least one argument')
def function_int(self, node):
# No need to turn into number first
if len(node.args) == 1:
code = 'function (x) {return x<0 ? Math.ceil(x): Math.floor(x);}'
self.vars_for_functions['int'] = code
else:
raise JSError('int() needs one argument')
def function_float(self, node):
if len(node.args) == 1:
self.vars_for_functions['float'] = 'Number'
else:
raise JSError('float() needs one argument')
def function_str(self, node):
if len(node.args) in (0, 1):
self.vars_for_functions['str'] = 'String'
else:
raise JSError('str() needs zero or one argument')
def function_repr(self, node):
if len(node.args) == 1:
# code = 'function (x) {if (typeof x === "object") {return JSON.stringify(x);}'
# code += ' else if (typeof x === "string") {return "\'" + x + "\'";}'
# code += ' else {return x.toString();}}'
self.vars_for_functions['repr'] = 'JSON.stringify'
else:
raise JSError('repr() needs one argument')
def function_bool(self, node):
if len(node.args) == 1:
self._wrap_truthy(ast.Name('x', '')) # trigger _truthy function declaration
self.vars_for_functions['bool'] = 'function (x) {return Boolean(_truthy(x));}'
else:
raise JSError('bool() needs one argument')
def function_abs(self, node):
if len(node.args) == 1:
self.vars_for_functions['abs'] = 'Math.abs'
else:
raise JSError('abs() needs one argument')
def function_divmod(self, node):
if len(node.args) == 2:
code = 'function (x, y) {var m = x % y; return [(x-m)/y, m];}'
self.vars_for_functions['divmod'] = code
else:
raise JSError('divmod() needs two arguments')
def function_all(self, node):
if len(node.args) == 1:
self._wrap_truthy(ast.Name('x', '')) # trigger _truthy function declaration
code = 'function (x) {for (var i=0; i<x.length; i++) {if (!_truthy(x[i])){return false}} return true;}'
self.vars_for_functions['all'] = code
else:
raise JSError('all() needs one argument')
def function_any(self, node):
if len(node.args) == 1:
self._wrap_truthy(ast.Name('x', '')) # trigger _truthy function declaration
code = 'function (x) {for (var i=0; i<x.length; i++) {if (_truthy(x[i])){return true}} return false;}'
self.vars_for_functions['any'] = code
else:
raise JSError('any() needs one argument')
def function_enumerate(self, node):
if len(node.args) == 1:
code = 'function (iter) { var i, res=[];'
code += self._make_iterable('iter', 'iter', False)
code += 'for (i=0; i<iter.length; i++) {res.push([i, iter[i]]);}'
code += 'return res;}'
self.vars_for_functions['enumerate'] = code
else:
raise JSError('enumerate() needs one argument')
def function_zip(self, node):
if len(node.args) == 2:
code = 'function (iter1, iter2) { var i, res=[];'
code += self._make_iterable('iter1', 'iter1', False)
code += self._make_iterable('iter2', 'iter2', False)
code += 'var len = Math.min(iter1.length, iter2.length);'
code += 'for (i=0; i<len; i++) {res.push([iter1[i], iter2[i]]);}'
code += 'return res;}'
self.vars_for_functions['zip'] = code
else:
raise JSError('zip() needs two arguments')
def function_reversed(self, node):
if len(node.args) == 1:
code = 'function (iter) {'
code += self._make_iterable('iter', 'iter', False)
code += 'return iter.slice().reverse();}'
self.vars_for_functions['reversed'] = code
else:
raise JSError('reversed() needs one argument')
def function_sorted(self, node):
if len(node.args) == 1:
code = 'function (iter) {'
code += self._make_iterable('iter', 'iter', False)
code += 'return iter.slice().sort();}'
self.vars_for_functions['sorted'] = code
else:
raise JSError('sorted() needs one argument')
def function_filter(self, node):
if len(node.args) == 2:
code = 'function (func, iter) {'
code += 'if (typeof func === "undefined" || func === null) {func = function(x) {return x;}}'
code += 'return iter.filter(func);}'
self.vars_for_functions['filter'] = code
else:
raise JSError('filter() needs two arguments')
def function_map(self, node):
if len(node.args) == 2:
code = 'function (func, iter) {return iter.map(func);}'
self.vars_for_functions['map'] = code
else:
raise JSError('map() needs two arguments')
## List methods
def method_append(self, node, base):
if len(node.args) == 1:
code = []
code.append('(%s.append || %s.push).apply(%s, [' % (base, base, base))
code += self.parse(node.args[0])
code.append('])')
return code
def method_remove(self, node, base):
if len(node.args) == 1:
code = []
remove_func = 'function (x) {this.splice(this.indexOf(x), 1);}'
code.append('(%s.remove || %s).apply(%s, [' % (base, remove_func, base))
code += self.parse(node.args[0])
code.append('])')
return code
## Dict methods
def method_get(self, node, base):
if len(node.args) in (1, 2):
# Get name to call object - use simple name if we can
ob_name = base
ob_name1 = base
if not base.isalnum():
dummy = self.dummy()
ob_name = dummy
ob_name1 = '(%s=%s)' % (dummy, base)
# Get args
key = unify(self.parse(node.args[0]))
default = 'null'
normal_args = ''.join(self.parse(node.args[0]))
if len(node.args) == 2:
default = unify(self.parse(node.args[1]))
normal_args += ', ' + ''.join(self.parse(node.args[1]))
# Compose
dict_get = '(%s[%s] || %s)' % (ob_name, key, default)
normal_get = '%s.get(%s)' % (ob_name, normal_args)
return '(/*py-dict.get*/typeof %s.get==="function" ? %s : %s)' % (
ob_name1, normal_get, dict_get)
def method_keys(self, node, base):
if len(node.args) == 0:
return 'Object.keys(%s)' % base
## Str methods
def method_startswith(self, node, base):
if len(node.args) == 1:
arg = unify(self.parse(node.args[0]))
return unify(base), '.indexOf(', arg, ') == 0'
## Extra functions / methods
def method_time(self, node, base): # time.time()
if base == 'time':
if len(node.args) == 0:
return '((new Date()).getTime() / 1000)'
else:
raise JSError('time() needs no argument')
def method_perf_counter(self, node, base): # time.perf_counter()
if base == 'time':
if len(node.args) == 0:
# Work in nodejs and browser
dummy = self.dummy()
return '(typeof(process) === "undefined" ? performance.now()*1e-3 : ((%s=process.hrtime())[0] + %s[1]*1e-9))' % (dummy, dummy)
else:
raise JSError('perf_counter() needs no argument')
| bsd-2-clause |
jonycgn/scipy | scipy/stats/tests/test_binned_statistic.py | 50 | 8793 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal, run_module_suite
from scipy.stats import (binned_statistic, binned_statistic_2d,
binned_statistic_dd)
from scipy._lib.six import u
from common_tests import check_named_results
class TestBinnedStatistic(object):
@classmethod
def setup_class(cls):
np.random.seed(9865)
cls.x = np.random.random(100)
cls.y = np.random.random(100)
cls.v = np.random.random(100)
cls.X = np.random.random((100, 3))
def test_1d_count(self):
x = self.x
v = self.v
count1, edges1, bc = binned_statistic(x, v, 'count', bins=10)
count2, edges2 = np.histogram(x, bins=10)
assert_array_almost_equal(count1, count2)
assert_array_almost_equal(edges1, edges2)
def test_1d_result_attributes(self):
x = self.x
v = self.v
res = binned_statistic(x, v, 'count', bins=10)
attributes = ('statistic', 'bin_edges', 'binnumber')
check_named_results(res, attributes)
def test_1d_sum(self):
x = self.x
v = self.v
sum1, edges1, bc = binned_statistic(x, v, 'sum', bins=10)
sum2, edges2 = np.histogram(x, bins=10, weights=v)
assert_array_almost_equal(sum1, sum2)
assert_array_almost_equal(edges1, edges2)
def test_1d_mean(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'mean', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.mean, bins=10)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_1d_std(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'std', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.std, bins=10)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_1d_median(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'median', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.median, bins=10)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_1d_bincode(self):
x = self.x[:20]
v = self.v[:20]
count1, edges1, bc = binned_statistic(x, v, 'count', bins=3)
bc2 = np.array([3, 2, 1, 3, 2, 3, 3, 3, 3, 1, 1, 3, 3, 1, 2, 3, 1,
1, 2, 1])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_array_almost_equal(bc, bc2)
assert_array_almost_equal(bcount, count1)
def test_1d_range_keyword(self):
# Regression test for gh-3063, range can be (min, max) or [(min, max)]
np.random.seed(9865)
x = np.arange(30)
data = np.random.random(30)
mean, bins, _ = binned_statistic(x[:15], data[:15])
mean_range, bins_range, _ = binned_statistic(x, data, range=[(0, 14)])
mean_range2, bins_range2, _ = binned_statistic(x, data, range=(0, 14))
assert_array_almost_equal(mean, mean_range)
assert_array_almost_equal(bins, bins_range)
assert_array_almost_equal(mean, mean_range2)
assert_array_almost_equal(bins, bins_range2)
def test_2d_count(self):
x = self.x
y = self.y
v = self.v
count1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'count', bins=5)
count2, binx2, biny2 = np.histogram2d(x, y, bins=5)
assert_array_almost_equal(count1, count2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_result_attributes(self):
x = self.x
y = self.y
v = self.v
res = binned_statistic_2d(x, y, v, 'count', bins=5)
attributes = ('statistic', 'x_edge', 'y_edge', 'binnumber')
check_named_results(res, attributes)
def test_2d_sum(self):
x = self.x
y = self.y
v = self.v
sum1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'sum', bins=5)
sum2, binx2, biny2 = np.histogram2d(x, y, bins=5, weights=v)
assert_array_almost_equal(sum1, sum2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_mean(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'mean', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_mean_unicode(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, u('mean'), bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_std(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'std', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.std, bins=5)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_median(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'median', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.median, bins=5)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_bincode(self):
x = self.x[:20]
y = self.y[:20]
v = self.v[:20]
count1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'count', bins=3)
bc2 = np.array([17, 11, 6, 16, 11, 17, 18, 17, 17, 7, 6, 18, 16,
6, 11, 16, 6, 6, 11, 8])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_array_almost_equal(bc, bc2)
count1adj = count1[count1.nonzero()]
assert_array_almost_equal(bcount, count1adj)
def test_dd_count(self):
X = self.X
v = self.v
count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
count2, edges2 = np.histogramdd(X, bins=3)
assert_array_almost_equal(count1, count2)
assert_array_almost_equal(edges1, edges2)
def test_dd_result_attributes(self):
X = self.X
v = self.v
res = binned_statistic_dd(X, v, 'count', bins=3)
attributes = ('statistic', 'bin_edges', 'binnumber')
check_named_results(res, attributes)
def test_dd_sum(self):
X = self.X
v = self.v
sum1, edges1, bc = binned_statistic_dd(X, v, 'sum', bins=3)
sum2, edges2 = np.histogramdd(X, bins=3, weights=v)
assert_array_almost_equal(sum1, sum2)
assert_array_almost_equal(edges1, edges2)
def test_dd_mean(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'mean', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.mean, bins=3)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_dd_std(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'std', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.std, bins=3)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_dd_median(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'median', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.median, bins=3)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_dd_bincode(self):
X = self.X[:20]
v = self.v[:20]
count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
bc2 = np.array([63, 33, 86, 83, 88, 67, 57, 33, 42, 41, 82, 83, 92,
32, 36, 91, 43, 87, 81, 81])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_array_almost_equal(bc, bc2)
count1adj = count1[count1.nonzero()]
assert_array_almost_equal(bcount, count1adj)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
Shraddha512/servo | tests/wpt/run.py | 13 | 1745 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys, os, argparse
here = os.path.split(__file__)[0]
servo_root = os.path.abspath(os.path.join(here, "..", ".."))
def wptsubdir(*args):
return os.path.join(here, *args)
# Imports
sys.path.append(wptsubdir("web-platform-tests"))
sys.path.append(wptsubdir("web-platform-tests", "tools", "scripts"))
from wptrunner import wptrunner, wptcommandline
import manifest
def update_manifest():
manifest.update_manifest(wptsubdir("web-platform-tests"),
rebuild=False,
experimental_include_local_changes=True,
path=wptsubdir("metadata", "MANIFEST.json"))
return True
def run_tests(**kwargs):
if not os.path.isfile(wptsubdir("metadata", "MANIFEST.json")):
raise Exception("Manifest not found. Please use --update-manifest in WPTARGS to create one")
wptrunner.setup_logging(kwargs, {"raw": sys.stdout})
return wptrunner.run_tests(**kwargs)
def set_defaults(args):
args.include_manifest = args.include_manifest if args.include_manifest else wptsubdir("include.ini")
args.product = "servo"
rv = vars(args)
wptcommandline.check_args(rv)
return rv
def main():
parser = wptcommandline.create_parser()
parser.add_argument('--update-manifest', dest='update_manifest', action='store_true')
args = parser.parse_args()
if args.update_manifest:
return update_manifest()
kwargs = set_defaults(args)
return run_tests(**kwargs)
if __name__ == "__main__":
sys.exit(0 if main() else 1)
| mpl-2.0 |
mikedh/trimesh | trimesh/proximity.py | 1 | 19400 | """
proximity.py
---------------
Query mesh- point proximity.
"""
import numpy as np
from . import util
from .grouping import group_min
from .constants import tol, log_time
from .triangles import closest_point as closest_point_corresponding
from .triangles import points_to_barycentric
try:
from scipy.spatial import cKDTree
except BaseException as E:
from .exceptions import closure
cKDTree = closure(E)
def nearby_faces(mesh, points):
"""
For each point find nearby faces relatively quickly.
The closest point on the mesh to the queried point is guaranteed to be
on one of the faces listed.
Does this by finding the nearest vertex on the mesh to each point, and
then returns all the faces that intersect the axis aligned bounding box
centered at the queried point and extending to the nearest vertex.
Parameters
----------
mesh : trimesh.Trimesh
Mesh to query.
points : (n, 3) float
Points in space
Returns
-----------
candidates : (points,) int
Sequence of indexes for mesh.faces
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
# an r-tree containing the axis aligned bounding box for every triangle
rtree = mesh.triangles_tree
# a kd-tree containing every vertex of the mesh
kdtree = cKDTree(mesh.vertices[mesh.referenced_vertices])
# query the distance to the nearest vertex to get AABB of a sphere
distance_vertex = kdtree.query(points)[0].reshape((-1, 1))
distance_vertex += tol.merge
# axis aligned bounds
bounds = np.column_stack((points - distance_vertex,
points + distance_vertex))
# faces that intersect axis aligned bounding box
candidates = [list(rtree.intersection(b)) for b in bounds]
return candidates
def closest_point_naive(mesh, points):
"""
Given a mesh and a list of points find the closest point
on any triangle.
Does this by constructing a very large intermediate array and
comparing every point to every triangle.
Parameters
----------
mesh : Trimesh
Takes mesh to have same interfaces as `closest_point`
points : (m, 3) float
Points in space
Returns
----------
closest : (m, 3) float
Closest point on triangles for each point
distance : (m,) float
Distances between point and triangle
triangle_id : (m,) int
Index of triangle containing closest point
"""
# get triangles from mesh
triangles = mesh.triangles.view(np.ndarray)
# establish that input points are sane
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('triangles shape incorrect')
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)')
# create a giant tiled array of each point tiled len(triangles) times
points_tiled = np.tile(points, (1, len(triangles)))
on_triangle = np.array([closest_point_corresponding(
triangles, i.reshape((-1, 3))) for i in points_tiled])
# distance squared
distance_2 = [((i - q)**2).sum(axis=1)
for i, q in zip(on_triangle, points)]
triangle_id = np.array([i.argmin() for i in distance_2])
# closest cartesian point
closest = np.array([g[i] for i, g in zip(triangle_id, on_triangle)])
distance = np.array([g[i] for i, g in zip(triangle_id, distance_2)]) ** .5
return closest, distance, triangle_id
def closest_point(mesh, points):
"""
Given a mesh and a list of points find the closest point
on any triangle.
Parameters
----------
mesh : trimesh.Trimesh
Mesh to query
points : (m, 3) float
Points in space
Returns
----------
closest : (m, 3) float
Closest point on triangles for each point
distance : (m,) float
Distance to mesh.
triangle_id : (m,) int
Index of triangle containing closest point
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
# do a tree- based query for faces near each point
candidates = nearby_faces(mesh, points)
# view triangles as an ndarray so we don't have to recompute
# the MD5 during all of the subsequent advanced indexing
triangles = mesh.triangles.view(np.ndarray)
# create the corresponding list of triangles
# and query points to send to the closest_point function
all_candidates = np.concatenate(candidates)
num_candidates = list(map(len, candidates))
tile_idxs = np.repeat(np.arange(len(points)), num_candidates)
query_point = points[tile_idxs, :]
query_tri = triangles[all_candidates]
# do the computation for closest point
query_close = closest_point_corresponding(query_tri, query_point)
query_group = np.cumsum(num_candidates)[:-1]
# vectors and distances for
# closest point to query point
query_vector = query_point - query_close
query_distance = util.diagonal_dot(query_vector, query_vector)
# get best two candidate indices by arg-sorting the per-query_distances
qds = np.array_split(query_distance, query_group)
idxs = np.int32([qd.argsort()[:2] if len(qd) > 1 else [0, 0] for qd in qds])
idxs[1:] += query_group.reshape(-1, 1)
# points, distances and triangle ids for best two candidates
two_points = query_close[idxs]
two_dists = query_distance[idxs]
two_candidates = all_candidates[idxs]
# the first candidate is the best result for unambiguous cases
result_close = query_close[idxs[:, 0]]
result_tid = two_candidates[:, 0]
result_distance = two_dists[:, 0]
# however: same closest point on two different faces
# find the best one and correct triangle ids if necessary
check_distance = two_dists.ptp(axis=1) < tol.merge
check_magnitude = np.all(np.abs(two_dists) > tol.merge, axis=1)
# mask results where corrections may be apply
c_mask = np.bitwise_and(check_distance, check_magnitude)
# get two face normals for the candidate points
normals = mesh.face_normals[two_candidates[c_mask]]
# compute normalized surface-point to query-point vectors
vectors = (query_vector[idxs[c_mask]] /
two_dists[c_mask].reshape(-1, 2, 1) ** 0.5)
# compare enclosed angle for both face normals
dots = (normals * vectors).sum(axis=2)
# take the idx with the most positive angle
# allows for selecting the correct candidate triangle id
c_idxs = dots.argmax(axis=1)
# correct triangle ids where necessary
# closest point and distance remain valid
result_tid[c_mask] = two_candidates[c_mask, c_idxs]
result_distance[c_mask] = two_dists[c_mask, c_idxs]
result_close[c_mask] = two_points[c_mask, c_idxs]
# we were comparing the distance squared so
# now take the square root in one vectorized operation
result_distance **= .5
return result_close, result_distance, result_tid
def signed_distance(mesh, points):
"""
Find the signed distance from a mesh to a list of points.
* Points OUTSIDE the mesh will have NEGATIVE distance
* Points within tol.merge of the surface will have POSITIVE distance
* Points INSIDE the mesh will have POSITIVE distance
Parameters
-----------
mesh : trimesh.Trimesh
Mesh to query.
points : (n, 3) float
Points in space
Returns
----------
signed_distance : (n,) float
Signed distance from point to mesh
"""
# make sure we have a numpy array
points = np.asanyarray(points, dtype=np.float64)
# find the closest point on the mesh to the queried points
closest, distance, triangle_id = closest_point(mesh, points)
# we only care about nonzero distances
nonzero = distance > tol.merge
if not nonzero.any():
return distance
# For closest points that project directly in to the triangle, compute sign from
# triangle normal Project each point in to the closest triangle plane
nonzero = np.where(nonzero)[0]
normals = mesh.face_normals[triangle_id]
projection = (points[nonzero] -
(normals[nonzero].T * np.einsum(
"ij,ij->i",
points[nonzero] - closest[nonzero],
normals[nonzero])).T)
# Determine if the projection lies within the closest triangle
barycentric = points_to_barycentric(
mesh.triangles[triangle_id[nonzero]],
projection)
ontriangle = ~((
(barycentric < -tol.merge) | (barycentric > 1 + tol.merge)
).any(axis=1))
# Where projection does lie in the triangle, compare vector to projection to the
# triangle normal to compute sign
sign = np.sign(np.einsum(
"ij,ij->i",
normals[nonzero[ontriangle]],
points[nonzero[ontriangle]] - projection[ontriangle]))
distance[nonzero[ontriangle]] *= -1.0 * sign
# For all other triangles, resort to raycasting against the entire mesh
inside = mesh.ray.contains_points(points[nonzero[~ontriangle]])
sign = (inside.astype(int) * 2) - 1.0
# apply sign to previously computed distance
distance[nonzero[~ontriangle]] *= sign
return distance
class ProximityQuery(object):
"""
Proximity queries for the current mesh.
"""
def __init__(self, mesh):
self._mesh = mesh
@log_time
def on_surface(self, points):
"""
Given list of points, for each point find the closest point
on any triangle of the mesh.
Parameters
----------
points : (m,3) float, points in space
Returns
----------
closest : (m, 3) float
Closest point on triangles for each point
distance : (m,) float
Distance to surface
triangle_id : (m,) int
Index of closest triangle for each point.
"""
return closest_point(mesh=self._mesh,
points=points)
def vertex(self, points):
"""
Given a set of points, return the closest vertex index to each point
Parameters
----------
points : (n, 3) float
Points in space
Returns
----------
distance : (n,) float
Distance from source point to vertex.
vertex_id : (n,) int
Index of mesh.vertices for closest vertex.
"""
tree = self._mesh.kdtree
return tree.query(points)
def signed_distance(self, points):
"""
Find the signed distance from a mesh to a list of points.
* Points OUTSIDE the mesh will have NEGATIVE distance
* Points within tol.merge of the surface will have POSITIVE distance
* Points INSIDE the mesh will have POSITIVE distance
Parameters
-----------
points : (n, 3) float
Points in space
Returns
----------
signed_distance : (n,) float
Signed distance from point to mesh.
"""
return signed_distance(self._mesh, points)
def longest_ray(mesh, points, directions):
"""
Find the lengths of the longest rays which do not intersect the mesh
cast from a list of points in the provided directions.
Parameters
-----------
points : (n, 3) float
Points in space.
directions : (n, 3) float
Directions of rays.
Returns
----------
signed_distance : (n,) float
Length of rays.
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
directions = np.asanyarray(directions, dtype=np.float64)
if not util.is_shape(directions, (-1, 3)):
raise ValueError('directions must be (n,3)!')
if len(points) != len(directions):
raise ValueError('number of points must equal number of directions!')
faces, rays, locations = mesh.ray.intersects_id(points, directions,
return_locations=True,
multiple_hits=True)
if len(rays) > 0:
distances = np.linalg.norm(locations - points[rays],
axis=1)
else:
distances = np.array([])
# Reject intersections at distance less than tol.planar
rays = rays[distances > tol.planar]
distances = distances[distances > tol.planar]
# Add infinite length for those with no valid intersection
no_intersections = np.setdiff1d(np.arange(len(points)), rays)
rays = np.concatenate((rays, no_intersections))
distances = np.concatenate((distances,
np.repeat(np.inf,
len(no_intersections))))
return group_min(rays, distances)
def max_tangent_sphere(mesh,
points,
inwards=True,
normals=None,
threshold=1e-6,
max_iter=100):
"""
Find the center and radius of the sphere which is tangent to
the mesh at the given point and at least one more point with no
non-tangential intersections with the mesh.
Masatomo Inui, Nobuyuki Umezu & Ryohei Shimane (2016)
Shrinking sphere:
A parallel algorithm for computing the thickness of 3D objects,
Computer-Aided Design and Applications, 13:2, 199-207,
DOI: 10.1080/16864360.2015.1084186
Parameters
----------
points : (n, 3) float
Points in space.
inwards : bool
Whether to have the sphere inside or outside the mesh.
normals : (n, 3) float or None
Normals of the mesh at the given points
if is None computed automatically.
Returns
----------
centers : (n,3) float
Centers of spheres
radii : (n,) float
Radii of spheres
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
if normals is not None:
normals = np.asanyarray(normals, dtype=np.float64)
if not util.is_shape(normals, (-1, 3)):
raise ValueError('normals must be (n,3)!')
if len(points) != len(normals):
raise ValueError('number of points must equal number of normals!')
else:
normals = mesh.face_normals[closest_point(mesh, points)[2]]
if inwards:
normals = -normals
# Find initial tangent spheres
distances = longest_ray(mesh, points, normals)
radii = distances * 0.5
not_converged = np.ones(len(points), dtype=bool) # boolean mask
# If ray is infinite, find the vertex which is furthest from our point
# when projected onto the ray. I.e. find v which maximises
# (v-p).n = v.n - p.n.
# We use a loop rather a vectorised approach to reduce memory cost
# it also seems to run faster.
for i in np.where(np.isinf(distances))[0]:
projections = np.dot(mesh.vertices - points[i], normals[i])
# If no points lie outside the tangent plane, then the radius is infinite
# otherwise we have a point outside the tangent plane, take the one with maximal
# projection
if projections.max() < tol.planar:
radii[i] = np.inf
not_converged[i] = False
else:
vertex = mesh.vertices[projections.argmax()]
radii[i] = (np.dot(vertex - points[i], vertex - points[i]) /
(2 * np.dot(vertex - points[i], normals[i])))
# Compute centers
centers = points + normals * np.nan_to_num(radii.reshape(-1, 1))
centers[np.isinf(radii)] = [np.nan, np.nan, np.nan]
# Our iterative process terminates when the difference in sphere
# radius is less than threshold*D
D = np.linalg.norm(mesh.bounds[1] - mesh.bounds[0])
convergence_threshold = threshold * D
n_iter = 0
while not_converged.sum() > 0 and n_iter < max_iter:
n_iter += 1
n_points, n_dists, n_faces = mesh.nearest.on_surface(
centers[not_converged])
# If the distance to the nearest point is the same as the distance
# to the start point then we are done.
done = np.abs(
n_dists -
np.linalg.norm(
centers[not_converged] -
points[not_converged],
axis=1)) < tol.planar
not_converged[np.where(not_converged)[0][done]] = False
# Otherwise find the radius and center of the sphere tangent to the mesh
# at the point and the nearest point.
diff = n_points[~done] - points[not_converged]
old_radii = radii[not_converged].copy()
# np.einsum produces element wise dot product
radii[not_converged] = (np.einsum('ij, ij->i',
diff,
diff) /
(2 * np.einsum('ij, ij->i',
diff,
normals[not_converged])))
centers[not_converged] = points[not_converged] + \
normals[not_converged] * radii[not_converged].reshape(-1, 1)
# If change in radius is less than threshold we have converged
cvged = old_radii - radii[not_converged] < convergence_threshold
not_converged[np.where(not_converged)[0][cvged]] = False
return centers, radii
def thickness(mesh,
points,
exterior=False,
normals=None,
method='max_sphere'):
"""
Find the thickness of the mesh at the given points.
Parameters
----------
points : (n, 3) float
Points in space
exterior : bool
Whether to compute the exterior thickness
(a.k.a. reach)
normals : (n, 3) float
Normals of the mesh at the given points
If is None computed automatically.
method : string
One of 'max_sphere' or 'ray'
Returns
----------
thickness : (n,) float
Thickness at given points.
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
if normals is not None:
normals = np.asanyarray(normals, dtype=np.float64)
if not util.is_shape(normals, (-1, 3)):
raise ValueError('normals must be (n,3)!')
if len(points) != len(normals):
raise ValueError('number of points must equal number of normals!')
else:
normals = mesh.face_normals[closest_point(mesh, points)[2]]
if method == 'max_sphere':
centers, radius = max_tangent_sphere(mesh=mesh,
points=points,
inwards=not exterior,
normals=normals)
thickness = radius * 2
return thickness
elif method == 'ray':
if exterior:
return longest_ray(mesh, points, normals)
else:
return longest_ray(mesh, points, -normals)
else:
raise ValueError('Invalid method, use "max_sphere" or "ray"')
| mit |
MSOpenTech/edx-platform | lms/djangoapps/bulk_email/tests/test_err_handling.py | 12 | 17239 | # -*- coding: utf-8 -*-
"""
Unit tests for handling email sending errors
"""
from itertools import cycle
from celery.states import SUCCESS, RETRY
from django.conf import settings
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.db import DatabaseError
import json
from mock import patch, Mock
from smtplib import SMTPDataError, SMTPServerDisconnected, SMTPConnectError
from bulk_email.models import CourseEmail, SEND_TO_ALL
from bulk_email.tasks import perform_delegate_email_batches, send_course_email
from instructor_task.models import InstructorTask
from instructor_task.subtasks import (
initialize_subtask_info,
SubtaskStatus,
check_subtask_is_valid,
update_subtask_status,
DuplicateTaskException,
MAX_DATABASE_LOCK_RETRIES,
)
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class EmailTestException(Exception):
"""Mock exception for email testing."""
pass
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestEmailErrors(ModuleStoreTestCase):
"""
Test that errors from sending email are handled properly.
"""
def setUp(self):
super(TestEmailErrors, self).setUp()
course_title = u"ẗëṡẗ title イ乇丂イ ᄊ乇丂丂ムg乇 キo尺 ムレレ тэѕт мэѕѕаБэ"
self.course = CourseFactory.create(display_name=course_title)
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
# load initial content (since we don't run migrations as part of tests):
call_command("loaddata", "course_email_template.json")
self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.send_mail_url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.success_content = {
'course_id': self.course.id.to_deprecated_string(),
'success': True,
}
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_data_err_retry(self, retry, get_conn):
"""
Test that celery handles transient SMTPDataErrors by retrying.
"""
get_conn.return_value.send_messages.side_effect = SMTPDataError(455, "Throttling: Sending rate exceeded")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# Test that we retry upon hitting a 4xx error
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPDataError)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.update_subtask_status')
@patch('bulk_email.tasks.send_course_email.retry')
def test_data_err_fail(self, retry, result, get_conn):
"""
Test that celery handles permanent SMTPDataErrors by failing and not retrying.
"""
# have every fourth email fail due to blacklisting:
get_conn.return_value.send_messages.side_effect = cycle([SMTPDataError(554, "Email address is blacklisted"),
None, None, None])
students = [UserFactory() for _ in xrange(settings.BULK_EMAIL_EMAILS_PER_TASK)]
for student in students:
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
test_email = {
'action': 'Send email',
'send_to': 'all',
'subject': 'test subject for all',
'message': 'test message for all'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# We shouldn't retry when hitting a 5xx error
self.assertFalse(retry.called)
# Test that after the rejected email, the rest still successfully send
((_entry_id, _current_task_id, subtask_status), _kwargs) = result.call_args
self.assertEquals(subtask_status.skipped, 0)
expected_fails = int((settings.BULK_EMAIL_EMAILS_PER_TASK + 3) / 4.0)
self.assertEquals(subtask_status.failed, expected_fails)
self.assertEquals(subtask_status.succeeded, settings.BULK_EMAIL_EMAILS_PER_TASK - expected_fails)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_disconn_err_retry(self, retry, get_conn):
"""
Test that celery handles SMTPServerDisconnected by retrying.
"""
get_conn.return_value.open.side_effect = SMTPServerDisconnected(425, "Disconnecting")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPServerDisconnected)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_conn_err_retry(self, retry, get_conn):
"""
Test that celery handles SMTPConnectError by retrying.
"""
get_conn.return_value.open.side_effect = SMTPConnectError(424, "Bad Connection")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPConnectError)
@patch('bulk_email.tasks.SubtaskStatus.increment')
@patch('bulk_email.tasks.log')
def test_nonexistent_email(self, mock_log, result):
"""
Tests retries when the email doesn't exist
"""
# create an InstructorTask object to pass through
course_id = self.course.id
entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": -1}
with self.assertRaises(CourseEmail.DoesNotExist):
perform_delegate_email_batches(entry.id, course_id, task_input, "action_name") # pylint: disable=no-member
((log_str, __, email_id), __) = mock_log.warning.call_args
self.assertTrue(mock_log.warning.called)
self.assertIn('Failed to get CourseEmail with id', log_str)
self.assertEqual(email_id, -1)
self.assertFalse(result.called)
def test_nonexistent_course(self):
"""
Tests exception when the course in the email doesn't exist
"""
course_id = SlashSeparatedCourseKey("I", "DONT", "EXIST")
email = CourseEmail(course_id=course_id)
email.save()
entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
# (?i) is a regex for ignore case
with self.assertRaisesRegexp(ValueError, r"(?i)course not found"):
perform_delegate_email_batches(entry.id, course_id, task_input, "action_name") # pylint: disable=no-member
def test_nonexistent_to_option(self):
"""
Tests exception when the to_option in the email doesn't exist
"""
email = CourseEmail(course_id=self.course.id, to_option="IDONTEXIST")
email.save()
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
with self.assertRaisesRegexp(Exception, 'Unexpected bulk email TO_OPTION found: IDONTEXIST'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=no-member
def test_wrong_course_id_in_task(self):
"""
Tests exception when the course_id in task is not the same as one explicitly passed in.
"""
email = CourseEmail(course_id=self.course.id, to_option=SEND_TO_ALL)
email.save()
entry = InstructorTask.create("bogus/task/id", "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
with self.assertRaisesRegexp(ValueError, 'does not match task value'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=no-member
def test_wrong_course_id_in_email(self):
"""
Tests exception when the course_id in CourseEmail is not the same as one explicitly passed in.
"""
email = CourseEmail(course_id=SlashSeparatedCourseKey("bogus", "course", "id"), to_option=SEND_TO_ALL)
email.save()
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
with self.assertRaisesRegexp(ValueError, 'does not match email value'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=no-member
def test_send_email_undefined_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
to_list = ['[email protected]']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-value"
subtask_status = SubtaskStatus.create(subtask_id)
email_id = 1001
with self.assertRaisesRegexp(DuplicateTaskException, 'unable to find subtasks of instructor task'):
send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_missing_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
to_list = ['[email protected]']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
different_subtask_id = "bogus-subtask-id-value"
subtask_status = SubtaskStatus.create(different_subtask_id)
bogus_email_id = 1001
with self.assertRaisesRegexp(DuplicateTaskException, 'unable to find status for subtask of instructor task'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_completed_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id, state=SUCCESS)
update_subtask_status(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['[email protected]']
global_email_context = {'course_title': 'dummy course'}
new_subtask_status = SubtaskStatus.create(subtask_id)
with self.assertRaisesRegexp(DuplicateTaskException, 'already completed'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
def test_send_email_running_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
update_subtask_status(entry_id, subtask_id, subtask_status)
check_subtask_is_valid(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['[email protected]']
global_email_context = {'course_title': 'dummy course'}
with self.assertRaisesRegexp(DuplicateTaskException, 'already being executed'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_retried_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=2)
update_subtask_status(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['[email protected]']
global_email_context = {'course_title': 'dummy course'}
# try running with a clean subtask:
new_subtask_status = SubtaskStatus.create(subtask_id)
with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
# try again, with a retried subtask with lower count:
new_subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=1)
with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
def test_send_email_with_locked_instructor_task(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-locked-model"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
bogus_email_id = 1001
to_list = ['[email protected]']
global_email_context = {'course_title': 'dummy course'}
with patch('instructor_task.subtasks.InstructorTask.save') as mock_task_save:
mock_task_save.side_effect = DatabaseError
with self.assertRaises(DatabaseError):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
self.assertEquals(mock_task_save.call_count, MAX_DATABASE_LOCK_RETRIES)
def test_send_email_undefined_email(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
to_list = ['[email protected]']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-undefined-email"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
bogus_email_id = 1001
with self.assertRaises(CourseEmail.DoesNotExist):
# we skip the call that updates subtask status, since we've not set up the InstructorTask
# for the subtask, and it's not important to the test.
with patch('bulk_email.tasks.update_subtask_status'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
| agpl-3.0 |
kidaa/avmplus | test/performance/metricinfo.py | 8 | 2932 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This file contains information about the different performance metrics
# It is a python file that is imported into runtests.py
# Only one variable, metric_info is to be defined in this file.
# metric_info is a dictionary with the primary key being the metric name whose value is another
# dictionary.
# This secondary dictionary MUST define the following (string) keys:
# best : when analyzing multiple iterations, how is the "best" value calculated
# valid values are one of [ min | max | mean | median ]
# note that these are NOT strings, but method names
# The following are optional (string) keys:
# desc : A string description of the metric
# name : Display this name instead of the metric name
# unit : Metric Unit
# largerIsFaster : Boolean indicating whether larger values are considered
# to be faster. Defaults to False
# If a test reports a metric not defined in the metric_info dictionary, min is used as the default
import sys
# add parent dir to python module search path
sys.path.append('..')
from util.runtestUtils import mean, median
metric_info = {
'time': {
'best':min,
'unit':'milliseconds',
},
'compile_time': {
'best':min,
'unit':'seconds',
},
'memory':{
'best':max,
'unit':'k',
},
'size':{
'best':min,
'unit':'bytes',
},
'v8': {
'best':max,
'desc': 'custom v8 normalized metric (hardcoded in the test)',
'largerIsFaster':True
},
'iterations/second':{
'best':max,
'largerIsFaster':True,
},
# steps is a metric output by the avm when compiled with --enable-count-steps
'steps':{
'best':mean,
'desc':'internal steps reported by vm composed of call_count+loop_count. See Bug 568933 for details'
},
# vprof / perfm metrics
'vprof-compile-time': {
'best':min,
'name':'vprof: compile (time)'
},
'vprof-code-size' : {
'best':min,
'name':'vprof: code size (bytes)'
},
'vprof-verify-time' : {
'best':min,
'name':'vprof: verify & IR gen (time)'
},
'vprof-ir-bytes': {
'best':min,
'name':'vprof: mir/lir bytes'
},
'vprof-ir-time': {
'best':min,
'name':'vprof: mir/lir (# of inst)'
},
'vprof-count': {
'best':min,
'name':'vprof: count'
}
}
| mpl-2.0 |
IptvBrasilGroup/Cleitonleonelcreton.repository | plugin.video.armagedompirata/mechanize/_firefox3cookiejar.py | 134 | 8345 | """Firefox 3 "cookies.sqlite" cookie persistence.
Copyright 2008 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import logging
import time
from _clientcookie import CookieJar, Cookie, MappingIterator
from _util import isstringlike, experimental
debug = logging.getLogger("mechanize.cookies").debug
class Firefox3CookieJar(CookieJar):
"""Firefox 3 cookie jar.
The cookies are stored in Firefox 3's "cookies.sqlite" format.
Constructor arguments:
filename: filename of cookies.sqlite (typically found at the top level
of a firefox profile directory)
autoconnect: as a convenience, connect to the SQLite cookies database at
Firefox3CookieJar construction time (default True)
policy: an object satisfying the mechanize.CookiePolicy interface
Note that this is NOT a FileCookieJar, and there are no .load(),
.save() or .restore() methods. The database is in sync with the
cookiejar object's state after each public method call.
Following Firefox's own behaviour, session cookies are never saved to
the database.
The file is created, and an sqlite database written to it, if it does
not already exist. The moz_cookies database table is created if it does
not already exist.
"""
# XXX
# handle DatabaseError exceptions
# add a FileCookieJar (explicit .save() / .revert() / .load() methods)
def __init__(self, filename, autoconnect=True, policy=None):
experimental("Firefox3CookieJar is experimental code")
CookieJar.__init__(self, policy)
if filename is not None and not isstringlike(filename):
raise ValueError("filename must be string-like")
self.filename = filename
self._conn = None
if autoconnect:
self.connect()
def connect(self):
import sqlite3 # not available in Python 2.4 stdlib
self._conn = sqlite3.connect(self.filename)
self._conn.isolation_level = "DEFERRED"
self._create_table_if_necessary()
def close(self):
self._conn.close()
def _transaction(self, func):
try:
cur = self._conn.cursor()
try:
result = func(cur)
finally:
cur.close()
except:
self._conn.rollback()
raise
else:
self._conn.commit()
return result
def _execute(self, query, params=()):
return self._transaction(lambda cur: cur.execute(query, params))
def _query(self, query, params=()):
# XXX should we bother with a transaction?
cur = self._conn.cursor()
try:
cur.execute(query, params)
return cur.fetchall()
finally:
cur.close()
def _create_table_if_necessary(self):
self._execute("""\
CREATE TABLE IF NOT EXISTS moz_cookies (id INTEGER PRIMARY KEY, name TEXT,
value TEXT, host TEXT, path TEXT,expiry INTEGER,
lastAccessed INTEGER, isSecure INTEGER, isHttpOnly INTEGER)""")
def _cookie_from_row(self, row):
(pk, name, value, domain, path, expires,
last_accessed, secure, http_only) = row
version = 0
domain = domain.encode("ascii", "ignore")
path = path.encode("ascii", "ignore")
name = name.encode("ascii", "ignore")
value = value.encode("ascii", "ignore")
secure = bool(secure)
# last_accessed isn't a cookie attribute, so isn't added to rest
rest = {}
if http_only:
rest["HttpOnly"] = None
if name == "":
name = value
value = None
initial_dot = domain.startswith(".")
domain_specified = initial_dot
discard = False
if expires == "":
expires = None
discard = True
return Cookie(version, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
rest)
def clear(self, domain=None, path=None, name=None):
CookieJar.clear(self, domain, path, name)
where_parts = []
sql_params = []
if domain is not None:
where_parts.append("host = ?")
sql_params.append(domain)
if path is not None:
where_parts.append("path = ?")
sql_params.append(path)
if name is not None:
where_parts.append("name = ?")
sql_params.append(name)
where = " AND ".join(where_parts)
if where:
where = " WHERE " + where
def clear(cur):
cur.execute("DELETE FROM moz_cookies%s" % where,
tuple(sql_params))
self._transaction(clear)
def _row_from_cookie(self, cookie, cur):
expires = cookie.expires
if cookie.discard:
expires = ""
domain = unicode(cookie.domain)
path = unicode(cookie.path)
name = unicode(cookie.name)
value = unicode(cookie.value)
secure = bool(int(cookie.secure))
if value is None:
value = name
name = ""
last_accessed = int(time.time())
http_only = cookie.has_nonstandard_attr("HttpOnly")
query = cur.execute("""SELECT MAX(id) + 1 from moz_cookies""")
pk = query.fetchone()[0]
if pk is None:
pk = 1
return (pk, name, value, domain, path, expires,
last_accessed, secure, http_only)
def set_cookie(self, cookie):
if cookie.discard:
CookieJar.set_cookie(self, cookie)
return
def set_cookie(cur):
# XXX
# is this RFC 2965-correct?
# could this do an UPDATE instead?
row = self._row_from_cookie(cookie, cur)
name, unused, domain, path = row[1:5]
cur.execute("""\
DELETE FROM moz_cookies WHERE host = ? AND path = ? AND name = ?""",
(domain, path, name))
cur.execute("""\
INSERT INTO moz_cookies VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""", row)
self._transaction(set_cookie)
def __iter__(self):
# session (non-persistent) cookies
for cookie in MappingIterator(self._cookies):
yield cookie
# persistent cookies
for row in self._query("""\
SELECT * FROM moz_cookies ORDER BY name, path, host"""):
yield self._cookie_from_row(row)
def _cookies_for_request(self, request):
session_cookies = CookieJar._cookies_for_request(self, request)
def get_cookies(cur):
query = cur.execute("SELECT host from moz_cookies")
domains = [row[0] for row in query.fetchall()]
cookies = []
for domain in domains:
cookies += self._persistent_cookies_for_domain(domain,
request, cur)
return cookies
persistent_coookies = self._transaction(get_cookies)
return session_cookies + persistent_coookies
def _persistent_cookies_for_domain(self, domain, request, cur):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
query = cur.execute("""\
SELECT * from moz_cookies WHERE host = ? ORDER BY path""",
(domain,))
cookies = [self._cookie_from_row(row) for row in query.fetchall()]
last_path = None
r = []
for cookie in cookies:
if (cookie.path != last_path and
not self._policy.path_return_ok(cookie.path, request)):
last_path = cookie.path
continue
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
r.append(cookie)
return r
| gpl-2.0 |
JonnyWong16/plexpy | lib/apscheduler/triggers/cron/expressions.py | 3 | 9184 | """This module contains the expressions applicable for CronTrigger's fields."""
from calendar import monthrange
import re
from apscheduler.util import asint
__all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression',
'WeekdayPositionExpression', 'LastDayOfMonthExpression')
WEEKDAYS = ['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat']
MONTHS = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
class AllExpression(object):
value_re = re.compile(r'\*(?:/(?P<step>\d+))?$')
def __init__(self, step=None):
self.step = asint(step)
if self.step == 0:
raise ValueError('Increment must be higher than 0')
def validate_range(self, field_name):
from apscheduler.triggers.cron.fields import MIN_VALUES, MAX_VALUES
value_range = MAX_VALUES[field_name] - MIN_VALUES[field_name]
if self.step and self.step > value_range:
raise ValueError('the step value ({}) is higher than the total range of the '
'expression ({})'.format(self.step, value_range))
def get_next_value(self, date, field):
start = field.get_value(date)
minval = field.get_min(date)
maxval = field.get_max(date)
start = max(start, minval)
if not self.step:
next = start
else:
distance_to_next = (self.step - (start - minval)) % self.step
next = start + distance_to_next
if next <= maxval:
return next
def __eq__(self, other):
return isinstance(other, self.__class__) and self.step == other.step
def __str__(self):
if self.step:
return '*/%d' % self.step
return '*'
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.step)
class RangeExpression(AllExpression):
value_re = re.compile(
r'(?P<first>\d+)(?:-(?P<last>\d+))?(?:/(?P<step>\d+))?$')
def __init__(self, first, last=None, step=None):
super(RangeExpression, self).__init__(step)
first = asint(first)
last = asint(last)
if last is None and step is None:
last = first
if last is not None and first > last:
raise ValueError('The minimum value in a range must not be higher than the maximum')
self.first = first
self.last = last
def validate_range(self, field_name):
from apscheduler.triggers.cron.fields import MIN_VALUES, MAX_VALUES
super(RangeExpression, self).validate_range(field_name)
if self.first < MIN_VALUES[field_name]:
raise ValueError('the first value ({}) is lower than the minimum value ({})'
.format(self.first, MIN_VALUES[field_name]))
if self.last is not None and self.last > MAX_VALUES[field_name]:
raise ValueError('the last value ({}) is higher than the maximum value ({})'
.format(self.last, MAX_VALUES[field_name]))
value_range = (self.last or MAX_VALUES[field_name]) - self.first
if self.step and self.step > value_range:
raise ValueError('the step value ({}) is higher than the total range of the '
'expression ({})'.format(self.step, value_range))
def get_next_value(self, date, field):
startval = field.get_value(date)
minval = field.get_min(date)
maxval = field.get_max(date)
# Apply range limits
minval = max(minval, self.first)
maxval = min(maxval, self.last) if self.last is not None else maxval
nextval = max(minval, startval)
# Apply the step if defined
if self.step:
distance_to_next = (self.step - (nextval - minval)) % self.step
nextval += distance_to_next
return nextval if nextval <= maxval else None
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.first == other.first and
self.last == other.last)
def __str__(self):
if self.last != self.first and self.last is not None:
range = '%d-%d' % (self.first, self.last)
else:
range = str(self.first)
if self.step:
return '%s/%d' % (range, self.step)
return range
def __repr__(self):
args = [str(self.first)]
if self.last != self.first and self.last is not None or self.step:
args.append(str(self.last))
if self.step:
args.append(str(self.step))
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
class MonthRangeExpression(RangeExpression):
value_re = re.compile(r'(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?', re.IGNORECASE)
def __init__(self, first, last=None):
try:
first_num = MONTHS.index(first.lower()) + 1
except ValueError:
raise ValueError('Invalid month name "%s"' % first)
if last:
try:
last_num = MONTHS.index(last.lower()) + 1
except ValueError:
raise ValueError('Invalid month name "%s"' % last)
else:
last_num = None
super(MonthRangeExpression, self).__init__(first_num, last_num)
def __str__(self):
if self.last != self.first and self.last is not None:
return '%s-%s' % (MONTHS[self.first - 1], MONTHS[self.last - 1])
return MONTHS[self.first - 1]
def __repr__(self):
args = ["'%s'" % MONTHS[self.first]]
if self.last != self.first and self.last is not None:
args.append("'%s'" % MONTHS[self.last - 1])
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
class WeekdayRangeExpression(RangeExpression):
value_re = re.compile(r'(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?', re.IGNORECASE)
def __init__(self, first, last=None):
try:
first_num = WEEKDAYS.index(first.lower())
except ValueError:
raise ValueError('Invalid weekday name "%s"' % first)
if last:
try:
last_num = WEEKDAYS.index(last.lower())
except ValueError:
raise ValueError('Invalid weekday name "%s"' % last)
else:
last_num = None
super(WeekdayRangeExpression, self).__init__(first_num, last_num)
def __str__(self):
if self.last != self.first and self.last is not None:
return '%s-%s' % (WEEKDAYS[self.first], WEEKDAYS[self.last])
return WEEKDAYS[self.first]
def __repr__(self):
args = ["'%s'" % WEEKDAYS[self.first]]
if self.last != self.first and self.last is not None:
args.append("'%s'" % WEEKDAYS[self.last])
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
class WeekdayPositionExpression(AllExpression):
options = ['1st', '2nd', '3rd', '4th', '5th', 'last']
value_re = re.compile(r'(?P<option_name>%s) +(?P<weekday_name>(?:\d+|\w+))' %
'|'.join(options), re.IGNORECASE)
def __init__(self, option_name, weekday_name):
super(WeekdayPositionExpression, self).__init__(None)
try:
self.option_num = self.options.index(option_name.lower())
except ValueError:
raise ValueError('Invalid weekday position "%s"' % option_name)
try:
self.weekday = WEEKDAYS.index(weekday_name.lower())
except ValueError:
raise ValueError('Invalid weekday name "%s"' % weekday_name)
def get_next_value(self, date, field):
# Figure out the weekday of the month's first day and the number of days in that month
first_day_wday, last_day = monthrange(date.year, date.month)
# Calculate which day of the month is the first of the target weekdays
first_hit_day = self.weekday - first_day_wday + 1
if first_hit_day <= 0:
first_hit_day += 7
# Calculate what day of the month the target weekday would be
if self.option_num < 5:
target_day = first_hit_day + self.option_num * 7
else:
target_day = first_hit_day + ((last_day - first_hit_day) // 7) * 7
if target_day <= last_day and target_day >= date.day:
return target_day
def __eq__(self, other):
return (super(WeekdayPositionExpression, self).__eq__(other) and
self.option_num == other.option_num and self.weekday == other.weekday)
def __str__(self):
return '%s %s' % (self.options[self.option_num], WEEKDAYS[self.weekday])
def __repr__(self):
return "%s('%s', '%s')" % (self.__class__.__name__, self.options[self.option_num],
WEEKDAYS[self.weekday])
class LastDayOfMonthExpression(AllExpression):
value_re = re.compile(r'last', re.IGNORECASE)
def __init__(self):
super(LastDayOfMonthExpression, self).__init__(None)
def get_next_value(self, date, field):
return monthrange(date.year, date.month)[1]
def __str__(self):
return 'last'
def __repr__(self):
return "%s()" % self.__class__.__name__
| gpl-3.0 |
aforalee/RRally | tests/unit/plugins/openstack/context/keystone/test_roles.py | 13 | 5273 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally import exceptions
from rally.plugins.openstack.context.keystone import roles
from tests.unit import fakes
from tests.unit import test
CTX = "rally.plugins.openstack.context.keystone.roles"
class RoleGeneratorTestCase(test.TestCase):
def create_default_roles_and_patch_add_remove_functions(self, fc):
fc.keystone().roles.add_user_role = mock.MagicMock()
fc.keystone().roles.remove_user_role = mock.MagicMock()
fc.keystone().roles.create("r1", "test_role1")
fc.keystone().roles.create("r2", "test_role2")
self.assertEqual(2, len(fc.keystone().roles.list()))
@property
def context(self):
return {
"config": {
"roles": [
"test_role1",
"test_role2"
]
},
"admin": {"endpoint": mock.MagicMock()},
"task": mock.MagicMock()
}
@mock.patch("%s.osclients" % CTX)
def test_add_role(self, mock_osclients):
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
ctx = roles.RoleGenerator(self.context)
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"}]
result = ctx._add_role(mock.MagicMock(),
self.context["config"]["roles"][0])
expected = {"id": "r1", "name": "test_role1"}
self.assertEqual(expected, result)
@mock.patch("%s.osclients" % CTX)
def test_add_role_which_does_not_exist(self, mock_osclients):
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
ctx = roles.RoleGenerator(self.context)
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"}]
ex = self.assertRaises(exceptions.NoSuchRole, ctx._add_role,
mock.MagicMock(), "unknown_role")
expected = "There is no role with name `unknown_role`."
self.assertEqual(expected, str(ex))
@mock.patch("%s.osclients" % CTX)
def test_remove_role(self, mock_osclients):
role = mock.MagicMock()
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
ctx = roles.RoleGenerator(self.context)
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"}]
ctx._remove_role(mock.MagicMock(), role)
calls = [
mock.call("u1", role["id"], tenant="t1"),
mock.call("u2", role["id"], tenant="t2"),
]
mock_keystone = mock_osclients.Clients().keystone()
mock_keystone.roles.remove_user_role.assert_has_calls(calls)
@mock.patch("%s.osclients" % CTX)
def test_setup_and_cleanup(self, mock_osclients):
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
with roles.RoleGenerator(self.context) as ctx:
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"}]
ctx.setup()
calls = [
mock.call("u1", "r1", tenant="t1"),
mock.call("u2", "r1", tenant="t2"),
mock.call("u1", "r2", tenant="t1"),
mock.call("u2", "r2", tenant="t2")
]
fc.keystone().roles.add_user_role.assert_has_calls(calls)
self.assertEqual(
4, fc.keystone().roles.add_user_role.call_count)
self.assertEqual(
0, fc.keystone().roles.remove_user_role.call_count)
self.assertEqual(2, len(ctx.context["roles"]))
self.assertEqual(2, len(fc.keystone().roles.list()))
# Cleanup (called by content manager)
self.assertEqual(2, len(fc.keystone().roles.list()))
self.assertEqual(4, fc.keystone().roles.add_user_role.call_count)
self.assertEqual(4, fc.keystone().roles.remove_user_role.call_count)
calls = [
mock.call("u1", "r1", tenant="t1"),
mock.call("u2", "r1", tenant="t2"),
mock.call("u1", "r2", tenant="t1"),
mock.call("u2", "r2", tenant="t2")
]
fc.keystone().roles.remove_user_role.assert_has_calls(calls)
| apache-2.0 |
gormanb/mongo-python-driver | bson/__init__.py | 14 | 32058 | # Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BSON (Binary JSON) encoding and decoding.
"""
import calendar
import collections
import datetime
import itertools
import re
import struct
import sys
import uuid
from codecs import (utf_8_decode as _utf_8_decode,
utf_8_encode as _utf_8_encode)
from bson.binary import (Binary, OLD_UUID_SUBTYPE,
JAVA_LEGACY, CSHARP_LEGACY,
UUIDLegacy)
from bson.code import Code
from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS
from bson.dbref import DBRef
from bson.errors import (InvalidBSON,
InvalidDocument,
InvalidStringData)
from bson.int64 import Int64
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.py3compat import (b,
PY3,
iteritems,
text_type,
string_type,
reraise)
from bson.regex import Regex
from bson.son import SON, RE_TYPE
from bson.timestamp import Timestamp
from bson.tz_util import utc
try:
from bson import _cbson
_USE_C = True
except ImportError:
_USE_C = False
EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc)
EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0)
BSONNUM = b"\x01" # Floating point
BSONSTR = b"\x02" # UTF-8 string
BSONOBJ = b"\x03" # Embedded document
BSONARR = b"\x04" # Array
BSONBIN = b"\x05" # Binary
BSONUND = b"\x06" # Undefined
BSONOID = b"\x07" # ObjectId
BSONBOO = b"\x08" # Boolean
BSONDAT = b"\x09" # UTC Datetime
BSONNUL = b"\x0A" # Null
BSONRGX = b"\x0B" # Regex
BSONREF = b"\x0C" # DBRef
BSONCOD = b"\x0D" # Javascript code
BSONSYM = b"\x0E" # Symbol
BSONCWS = b"\x0F" # Javascript code with scope
BSONINT = b"\x10" # 32bit int
BSONTIM = b"\x11" # Timestamp
BSONLON = b"\x12" # 64bit int
BSONMIN = b"\xFF" # Min key
BSONMAX = b"\x7F" # Max key
_UNPACK_FLOAT = struct.Struct("<d").unpack
_UNPACK_INT = struct.Struct("<i").unpack
_UNPACK_LENGTH_SUBTYPE = struct.Struct("<iB").unpack
_UNPACK_LONG = struct.Struct("<q").unpack
_UNPACK_TIMESTAMP = struct.Struct("<II").unpack
def _get_int(data, position, dummy0, dummy1):
"""Decode a BSON int32 to python int."""
end = position + 4
return _UNPACK_INT(data[position:end])[0], end
def _get_c_string(data, position, opts):
"""Decode a BSON 'C' string to python unicode string."""
end = data.index(b"\x00", position)
return _utf_8_decode(data[position:end],
opts.unicode_decode_error_handler, True)[0], end + 1
def _get_float(data, position, dummy0, dummy1):
"""Decode a BSON double to python float."""
end = position + 8
return _UNPACK_FLOAT(data[position:end])[0], end
def _get_string(data, position, obj_end, opts):
"""Decode a BSON string to python unicode string."""
length = _UNPACK_INT(data[position:position + 4])[0]
position += 4
if length < 1 or obj_end - position < length:
raise InvalidBSON("invalid string length")
end = position + length - 1
if data[end:end + 1] != b"\x00":
raise InvalidBSON("invalid end of string")
return _utf_8_decode(data[position:end],
opts.unicode_decode_error_handler, True)[0], end + 1
def _get_object(data, position, obj_end, opts):
"""Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef."""
obj_size = _UNPACK_INT(data[position:position + 4])[0]
end = position + obj_size - 1
if data[end:position + obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
if end >= obj_end:
raise InvalidBSON("invalid object length")
obj = _elements_to_dict(data, position + 4, end, opts)
position += obj_size
if "$ref" in obj:
return (DBRef(obj.pop("$ref"), obj.pop("$id", None),
obj.pop("$db", None), obj), position)
return obj, position
def _get_array(data, position, obj_end, opts):
"""Decode a BSON array to python list."""
size = _UNPACK_INT(data[position:position + 4])[0]
end = position + size - 1
if data[end:end + 1] != b"\x00":
raise InvalidBSON("bad eoo")
position += 4
end -= 1
result = []
# Avoid doing global and attibute lookups in the loop.
append = result.append
index = data.index
getter = _ELEMENT_GETTER
while position < end:
element_type = data[position:position + 1]
# Just skip the keys.
position = index(b'\x00', position) + 1
value, position = getter[element_type](data, position, obj_end, opts)
append(value)
return result, position + 1
def _get_binary(data, position, dummy, opts):
"""Decode a BSON binary to bson.binary.Binary or python UUID."""
length, subtype = _UNPACK_LENGTH_SUBTYPE(data[position:position + 5])
position += 5
if subtype == 2:
length2 = _UNPACK_INT(data[position:position + 4])[0]
position += 4
if length2 != length - 4:
raise InvalidBSON("invalid binary (st 2) - lengths don't match!")
length = length2
end = position + length
if subtype in (3, 4):
# Java Legacy
uuid_representation = opts.uuid_representation
if uuid_representation == JAVA_LEGACY:
java = data[position:end]
value = uuid.UUID(bytes=java[0:8][::-1] + java[8:16][::-1])
# C# legacy
elif uuid_representation == CSHARP_LEGACY:
value = uuid.UUID(bytes_le=data[position:end])
# Python
else:
value = uuid.UUID(bytes=data[position:end])
return value, end
# Python3 special case. Decode subtype 0 to 'bytes'.
if PY3 and subtype == 0:
value = data[position:end]
else:
value = Binary(data[position:end], subtype)
return value, end
def _get_oid(data, position, dummy0, dummy1):
"""Decode a BSON ObjectId to bson.objectid.ObjectId."""
end = position + 12
return ObjectId(data[position:end]), end
def _get_boolean(data, position, dummy0, dummy1):
"""Decode a BSON true/false to python True/False."""
end = position + 1
return data[position:end] == b"\x01", end
def _get_date(data, position, dummy, opts):
"""Decode a BSON datetime to python datetime.datetime."""
end = position + 8
millis = _UNPACK_LONG(data[position:end])[0]
diff = ((millis % 1000) + 1000) % 1000
seconds = (millis - diff) / 1000
micros = diff * 1000
if opts.tz_aware:
dt = EPOCH_AWARE + datetime.timedelta(
seconds=seconds, microseconds=micros)
if opts.tzinfo:
dt = dt.astimezone(opts.tzinfo)
else:
dt = EPOCH_NAIVE + datetime.timedelta(
seconds=seconds, microseconds=micros)
return dt, end
def _get_code(data, position, obj_end, opts):
"""Decode a BSON code to bson.code.Code."""
code, position = _get_string(data, position, obj_end, opts)
return Code(code), position
def _get_code_w_scope(data, position, obj_end, opts):
"""Decode a BSON code_w_scope to bson.code.Code."""
code, position = _get_string(data, position + 4, obj_end, opts)
scope, position = _get_object(data, position, obj_end, opts)
return Code(code, scope), position
def _get_regex(data, position, dummy0, opts):
"""Decode a BSON regex to bson.regex.Regex or a python pattern object."""
pattern, position = _get_c_string(data, position, opts)
bson_flags, position = _get_c_string(data, position, opts)
bson_re = Regex(pattern, bson_flags)
return bson_re, position
def _get_ref(data, position, obj_end, opts):
"""Decode (deprecated) BSON DBPointer to bson.dbref.DBRef."""
collection, position = _get_string(data, position, obj_end, opts)
oid, position = _get_oid(data, position, obj_end, opts)
return DBRef(collection, oid), position
def _get_timestamp(data, position, dummy0, dummy1):
"""Decode a BSON timestamp to bson.timestamp.Timestamp."""
end = position + 8
inc, timestamp = _UNPACK_TIMESTAMP(data[position:end])
return Timestamp(timestamp, inc), end
def _get_int64(data, position, dummy0, dummy1):
"""Decode a BSON int64 to bson.int64.Int64."""
end = position + 8
return Int64(_UNPACK_LONG(data[position:end])[0]), end
# Each decoder function's signature is:
# - data: bytes
# - position: int, beginning of object in 'data' to decode
# - obj_end: int, end of object to decode in 'data' if variable-length type
# - opts: a CodecOptions
_ELEMENT_GETTER = {
BSONNUM: _get_float,
BSONSTR: _get_string,
BSONOBJ: _get_object,
BSONARR: _get_array,
BSONBIN: _get_binary,
BSONUND: lambda w, x, y, z: (None, x), # Deprecated undefined
BSONOID: _get_oid,
BSONBOO: _get_boolean,
BSONDAT: _get_date,
BSONNUL: lambda w, x, y, z: (None, x),
BSONRGX: _get_regex,
BSONREF: _get_ref, # Deprecated DBPointer
BSONCOD: _get_code,
BSONSYM: _get_string, # Deprecated symbol
BSONCWS: _get_code_w_scope,
BSONINT: _get_int,
BSONTIM: _get_timestamp,
BSONLON: _get_int64,
BSONMIN: lambda w, x, y, z: (MinKey(), x),
BSONMAX: lambda w, x, y, z: (MaxKey(), x)}
def _element_to_dict(data, position, obj_end, opts):
"""Decode a single key, value pair."""
element_type = data[position:position + 1]
position += 1
element_name, position = _get_c_string(data, position, opts)
value, position = _ELEMENT_GETTER[element_type](data,
position, obj_end, opts)
return element_name, value, position
def _elements_to_dict(data, position, obj_end, opts):
"""Decode a BSON document."""
result = opts.document_class()
end = obj_end - 1
while position < end:
(key, value, position) = _element_to_dict(data, position, obj_end, opts)
result[key] = value
return result
def _bson_to_dict(data, opts):
"""Decode a BSON string to document_class."""
try:
obj_size = _UNPACK_INT(data[:4])[0]
except struct.error as exc:
raise InvalidBSON(str(exc))
if obj_size != len(data):
raise InvalidBSON("invalid object size")
if data[obj_size - 1:obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
try:
return _elements_to_dict(data, 4, obj_size - 1, opts)
except InvalidBSON:
raise
except Exception:
# Change exception type to InvalidBSON but preserve traceback.
_, exc_value, exc_tb = sys.exc_info()
reraise(InvalidBSON, exc_value, exc_tb)
if _USE_C:
_bson_to_dict = _cbson._bson_to_dict
_PACK_FLOAT = struct.Struct("<d").pack
_PACK_INT = struct.Struct("<i").pack
_PACK_LENGTH_SUBTYPE = struct.Struct("<iB").pack
_PACK_LONG = struct.Struct("<q").pack
_PACK_TIMESTAMP = struct.Struct("<II").pack
_LIST_NAMES = tuple(b(str(i)) + b"\x00" for i in range(1000))
def gen_list_name():
"""Generate "keys" for encoded lists in the sequence
b"0\x00", b"1\x00", b"2\x00", ...
The first 1000 keys are returned from a pre-built cache. All
subsequent keys are generated on the fly.
"""
for name in _LIST_NAMES:
yield name
counter = itertools.count(1000)
while True:
yield b(str(next(counter))) + b"\x00"
def _make_c_string_check(string):
"""Make a 'C' string, checking for embedded NUL characters."""
if isinstance(string, bytes):
if b"\x00" in string:
raise InvalidDocument("BSON keys / regex patterns must not "
"contain a NUL character")
try:
_utf_8_decode(string, None, True)
return string + b"\x00"
except UnicodeError:
raise InvalidStringData("strings in documents must be valid "
"UTF-8: %r" % string)
else:
if "\x00" in string:
raise InvalidDocument("BSON keys / regex patterns must not "
"contain a NUL character")
return _utf_8_encode(string)[0] + b"\x00"
def _make_c_string(string):
"""Make a 'C' string."""
if isinstance(string, bytes):
try:
_utf_8_decode(string, None, True)
return string + b"\x00"
except UnicodeError:
raise InvalidStringData("strings in documents must be valid "
"UTF-8: %r" % string)
else:
return _utf_8_encode(string)[0] + b"\x00"
if PY3:
def _make_name(string):
"""Make a 'C' string suitable for a BSON key."""
# Keys can only be text in python 3.
if "\x00" in string:
raise InvalidDocument("BSON keys / regex patterns must not "
"contain a NUL character")
return _utf_8_encode(string)[0] + b"\x00"
else:
# Keys can be unicode or bytes in python 2.
_make_name = _make_c_string_check
def _encode_float(name, value, dummy0, dummy1):
"""Encode a float."""
return b"\x01" + name + _PACK_FLOAT(value)
if PY3:
def _encode_bytes(name, value, dummy0, dummy1):
"""Encode a python bytes."""
# Python3 special case. Store 'bytes' as BSON binary subtype 0.
return b"\x05" + name + _PACK_INT(len(value)) + b"\x00" + value
else:
def _encode_bytes(name, value, dummy0, dummy1):
"""Encode a python str (python 2.x)."""
try:
_utf_8_decode(value, None, True)
except UnicodeError:
raise InvalidStringData("strings in documents must be valid "
"UTF-8: %r" % (value,))
return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00"
def _encode_mapping(name, value, check_keys, opts):
"""Encode a mapping type."""
data = b"".join([_element_to_bson(key, val, check_keys, opts)
for key, val in iteritems(value)])
return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00"
def _encode_dbref(name, value, check_keys, opts):
"""Encode bson.dbref.DBRef."""
buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00")
begin = len(buf) - 4
buf += _name_value_to_bson(b"$ref\x00",
value.collection, check_keys, opts)
buf += _name_value_to_bson(b"$id\x00",
value.id, check_keys, opts)
if value.database is not None:
buf += _name_value_to_bson(
b"$db\x00", value.database, check_keys, opts)
for key, val in iteritems(value._DBRef__kwargs):
buf += _element_to_bson(key, val, check_keys, opts)
buf += b"\x00"
buf[begin:begin + 4] = _PACK_INT(len(buf) - begin)
return bytes(buf)
def _encode_list(name, value, check_keys, opts):
"""Encode a list/tuple."""
lname = gen_list_name()
data = b"".join([_name_value_to_bson(next(lname), item,
check_keys, opts)
for item in value])
return b"\x04" + name + _PACK_INT(len(data) + 5) + data + b"\x00"
def _encode_text(name, value, dummy0, dummy1):
"""Encode a python unicode (python 2.x) / str (python 3.x)."""
value = _utf_8_encode(value)[0]
return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00"
def _encode_binary(name, value, dummy0, dummy1):
"""Encode bson.binary.Binary."""
subtype = value.subtype
if subtype == 2:
value = _PACK_INT(len(value)) + value
return b"\x05" + name + _PACK_LENGTH_SUBTYPE(len(value), subtype) + value
def _encode_uuid(name, value, dummy, opts):
"""Encode uuid.UUID."""
uuid_representation = opts.uuid_representation
# Python Legacy Common Case
if uuid_representation == OLD_UUID_SUBTYPE:
return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes
# Java Legacy
elif uuid_representation == JAVA_LEGACY:
from_uuid = value.bytes
data = from_uuid[0:8][::-1] + from_uuid[8:16][::-1]
return b"\x05" + name + b'\x10\x00\x00\x00\x03' + data
# C# legacy
elif uuid_representation == CSHARP_LEGACY:
# Microsoft GUID representation.
return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes_le
# New
else:
return b"\x05" + name + b'\x10\x00\x00\x00\x04' + value.bytes
def _encode_objectid(name, value, dummy0, dummy1):
"""Encode bson.objectid.ObjectId."""
return b"\x07" + name + value.binary
def _encode_bool(name, value, dummy0, dummy1):
"""Encode a python boolean (True/False)."""
return b"\x08" + name + (value and b"\x01" or b"\x00")
def _encode_datetime(name, value, dummy0, dummy1):
"""Encode datetime.datetime."""
if value.utcoffset() is not None:
value = value - value.utcoffset()
millis = int(calendar.timegm(value.timetuple()) * 1000 +
value.microsecond / 1000)
return b"\x09" + name + _PACK_LONG(millis)
def _encode_none(name, dummy0, dummy1, dummy2):
"""Encode python None."""
return b"\x0A" + name
def _encode_regex(name, value, dummy0, dummy1):
"""Encode a python regex or bson.regex.Regex."""
flags = value.flags
# Python 2 common case
if flags == 0:
return b"\x0B" + name + _make_c_string_check(value.pattern) + b"\x00"
# Python 3 common case
elif flags == re.UNICODE:
return b"\x0B" + name + _make_c_string_check(value.pattern) + b"u\x00"
else:
sflags = b""
if flags & re.IGNORECASE:
sflags += b"i"
if flags & re.LOCALE:
sflags += b"l"
if flags & re.MULTILINE:
sflags += b"m"
if flags & re.DOTALL:
sflags += b"s"
if flags & re.UNICODE:
sflags += b"u"
if flags & re.VERBOSE:
sflags += b"x"
sflags += b"\x00"
return b"\x0B" + name + _make_c_string_check(value.pattern) + sflags
def _encode_code(name, value, dummy, opts):
"""Encode bson.code.Code."""
cstring = _make_c_string(value)
cstrlen = len(cstring)
if not value.scope:
return b"\x0D" + name + _PACK_INT(cstrlen) + cstring
scope = _dict_to_bson(value.scope, False, opts, False)
full_length = _PACK_INT(8 + cstrlen + len(scope))
return b"\x0F" + name + full_length + _PACK_INT(cstrlen) + cstring + scope
def _encode_int(name, value, dummy0, dummy1):
"""Encode a python int."""
if -2147483648 <= value <= 2147483647:
return b"\x10" + name + _PACK_INT(value)
else:
try:
return b"\x12" + name + _PACK_LONG(value)
except struct.error:
raise OverflowError("BSON can only handle up to 8-byte ints")
def _encode_timestamp(name, value, dummy0, dummy1):
"""Encode bson.timestamp.Timestamp."""
return b"\x11" + name + _PACK_TIMESTAMP(value.inc, value.time)
def _encode_long(name, value, dummy0, dummy1):
"""Encode a python long (python 2.x)"""
try:
return b"\x12" + name + _PACK_LONG(value)
except struct.error:
raise OverflowError("BSON can only handle up to 8-byte ints")
def _encode_minkey(name, dummy0, dummy1, dummy2):
"""Encode bson.min_key.MinKey."""
return b"\xFF" + name
def _encode_maxkey(name, dummy0, dummy1, dummy2):
"""Encode bson.max_key.MaxKey."""
return b"\x7F" + name
# Each encoder function's signature is:
# - name: utf-8 bytes
# - value: a Python data type, e.g. a Python int for _encode_int
# - check_keys: bool, whether to check for invalid names
# - opts: a CodecOptions
_ENCODERS = {
bool: _encode_bool,
bytes: _encode_bytes,
datetime.datetime: _encode_datetime,
dict: _encode_mapping,
float: _encode_float,
int: _encode_int,
list: _encode_list,
# unicode in py2, str in py3
text_type: _encode_text,
tuple: _encode_list,
type(None): _encode_none,
uuid.UUID: _encode_uuid,
Binary: _encode_binary,
Int64: _encode_long,
Code: _encode_code,
DBRef: _encode_dbref,
MaxKey: _encode_maxkey,
MinKey: _encode_minkey,
ObjectId: _encode_objectid,
Regex: _encode_regex,
RE_TYPE: _encode_regex,
SON: _encode_mapping,
Timestamp: _encode_timestamp,
UUIDLegacy: _encode_binary,
# Special case. This will never be looked up directly.
collections.Mapping: _encode_mapping,
}
_MARKERS = {
5: _encode_binary,
7: _encode_objectid,
11: _encode_regex,
13: _encode_code,
17: _encode_timestamp,
18: _encode_long,
100: _encode_dbref,
127: _encode_maxkey,
255: _encode_minkey,
}
if not PY3:
_ENCODERS[long] = _encode_long
def _name_value_to_bson(name, value, check_keys, opts):
"""Encode a single name, value pair."""
# First see if the type is already cached. KeyError will only ever
# happen once per subtype.
try:
return _ENCODERS[type(value)](name, value, check_keys, opts)
except KeyError:
pass
# Second, fall back to trying _type_marker. This has to be done
# before the loop below since users could subclass one of our
# custom types that subclasses a python built-in (e.g. Binary)
marker = getattr(value, "_type_marker", None)
if isinstance(marker, int) and marker in _MARKERS:
func = _MARKERS[marker]
# Cache this type for faster subsequent lookup.
_ENCODERS[type(value)] = func
return func(name, value, check_keys, opts)
# If all else fails test each base type. This will only happen once for
# a subtype of a supported base type.
for base in _ENCODERS:
if isinstance(value, base):
func = _ENCODERS[base]
# Cache this type for faster subsequent lookup.
_ENCODERS[type(value)] = func
return func(name, value, check_keys, opts)
raise InvalidDocument("cannot convert value of type %s to bson" %
type(value))
def _element_to_bson(key, value, check_keys, opts):
"""Encode a single key, value pair."""
if not isinstance(key, string_type):
raise InvalidDocument("documents must have only string keys, "
"key was %r" % (key,))
if check_keys:
if key.startswith("$"):
raise InvalidDocument("key %r must not start with '$'" % (key,))
if "." in key:
raise InvalidDocument("key %r must not contain '.'" % (key,))
name = _make_name(key)
return _name_value_to_bson(name, value, check_keys, opts)
def _dict_to_bson(doc, check_keys, opts, top_level=True):
"""Encode a document to BSON."""
try:
elements = []
if top_level and "_id" in doc:
elements.append(_name_value_to_bson(b"_id\x00", doc["_id"],
check_keys, opts))
for (key, value) in iteritems(doc):
if not top_level or key != "_id":
elements.append(_element_to_bson(key, value,
check_keys, opts))
except AttributeError:
raise TypeError("encoder expected a mapping type but got: %r" % (doc,))
encoded = b"".join(elements)
return _PACK_INT(len(encoded) + 5) + encoded + b"\x00"
if _USE_C:
_dict_to_bson = _cbson._dict_to_bson
_CODEC_OPTIONS_TYPE_ERROR = TypeError(
"codec_options must be an instance of CodecOptions")
def decode_all(data, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode BSON data to multiple documents.
`data` must be a string of concatenated, valid, BSON-encoded
documents.
:Parameters:
- `data`: BSON data
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Removed `compile_re` option: PyMongo now always represents BSON regular
expressions as :class:`~bson.regex.Regex` objects. Use
:meth:`~bson.regex.Regex.try_compile` to attempt to convert from a
BSON regular expression to a Python regular expression object.
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
docs = []
position = 0
end = len(data) - 1
try:
while position < end:
obj_size = _UNPACK_INT(data[position:position + 4])[0]
if len(data) - position < obj_size:
raise InvalidBSON("invalid object size")
obj_end = position + obj_size - 1
if data[obj_end:position + obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
docs.append(_elements_to_dict(data,
position + 4,
obj_end,
codec_options))
position += obj_size
return docs
except InvalidBSON:
raise
except Exception:
# Change exception type to InvalidBSON but preserve traceback.
_, exc_value, exc_tb = sys.exc_info()
reraise(InvalidBSON, exc_value, exc_tb)
if _USE_C:
decode_all = _cbson.decode_all
def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode BSON data to multiple documents as a generator.
Works similarly to the decode_all function, but yields one document at a
time.
`data` must be a string of concatenated, valid, BSON-encoded
documents.
:Parameters:
- `data`: BSON data
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionadded:: 2.8
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
position = 0
end = len(data) - 1
while position < end:
obj_size = _UNPACK_INT(data[position:position + 4])[0]
elements = data[position:position + obj_size]
position += obj_size
yield _bson_to_dict(elements, codec_options)
def decode_file_iter(file_obj, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode bson data from a file to multiple documents as a generator.
Works similarly to the decode_all function, but reads from the file object
in chunks and parses bson in chunks, yielding one document at a time.
:Parameters:
- `file_obj`: A file object containing BSON data.
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionadded:: 2.8
"""
while True:
# Read size of next object.
size_data = file_obj.read(4)
if len(size_data) == 0:
break # Finished with file normaly.
elif len(size_data) != 4:
raise InvalidBSON("cut off in middle of objsize")
obj_size = _UNPACK_INT(size_data)[0] - 4
elements = size_data + file_obj.read(obj_size)
yield _bson_to_dict(elements, codec_options)
def is_valid(bson):
"""Check that the given string represents valid :class:`BSON` data.
Raises :class:`TypeError` if `bson` is not an instance of
:class:`str` (:class:`bytes` in python 3). Returns ``True``
if `bson` is valid :class:`BSON`, ``False`` otherwise.
:Parameters:
- `bson`: the data to be validated
"""
if not isinstance(bson, bytes):
raise TypeError("BSON data must be an instance of a subclass of bytes")
try:
_bson_to_dict(bson, DEFAULT_CODEC_OPTIONS)
return True
except Exception:
return False
class BSON(bytes):
"""BSON (Binary JSON) data.
"""
@classmethod
def encode(cls, document, check_keys=False,
codec_options=DEFAULT_CODEC_OPTIONS):
"""Encode a document to a new :class:`BSON` instance.
A document can be any mapping type (like :class:`dict`).
Raises :class:`TypeError` if `document` is not a mapping type,
or contains keys that are not instances of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~bson.errors.InvalidDocument` if `document` cannot be
converted to :class:`BSON`.
:Parameters:
- `document`: mapping type representing a document
- `check_keys` (optional): check if keys start with '$' or
contain '.', raising :class:`~bson.errors.InvalidDocument` in
either case
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Replaced `uuid_subtype` option with `codec_options`.
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
return cls(_dict_to_bson(document, check_keys, codec_options))
def decode(self, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode this BSON data.
By default, returns a BSON document represented as a Python
:class:`dict`. To use a different :class:`MutableMapping` class,
configure a :class:`~bson.codec_options.CodecOptions`::
>>> import collections # From Python standard library.
>>> import bson
>>> from bson.codec_options import CodecOptions
>>> data = bson.BSON.encode({'a': 1})
>>> decoded_doc = bson.BSON.decode(data)
<type 'dict'>
>>> options = CodecOptions(document_class=collections.OrderedDict)
>>> decoded_doc = bson.BSON.decode(data, codec_options=options)
>>> type(decoded_doc)
<class 'collections.OrderedDict'>
:Parameters:
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Removed `compile_re` option: PyMongo now always represents BSON
regular expressions as :class:`~bson.regex.Regex` objects. Use
:meth:`~bson.regex.Regex.try_compile` to attempt to convert from a
BSON regular expression to a Python regular expression object.
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
return _bson_to_dict(self, codec_options)
def has_c():
"""Is the C extension installed?
"""
return _USE_C
| apache-2.0 |
kived/plyer | plyer/platforms/macosx/wifi.py | 2 | 4929 | from plyer.facades import Wifi
from pyobjus.dylib_manager import load_framework, INCLUDE
from pyobjus import autoclass
load_framework(INCLUDE.Foundation)
load_framework(INCLUDE.CoreWLAN)
CWInterface = autoclass('CWInterface')
CWNetwork = autoclass('CWNetwork')
CWWiFiClient = autoclass('CWWiFiClient')
NSArray = autoclass('NSArray')
NSDictionary = autoclass('NSDictionary')
NSString = autoclass('NSString')
class OSXWifi(Wifi):
names = {}
def _is_enabled(self):
'''
Returns `True` if the Wifi is enabled else returns `False`.
'''
return CWWiFiClient.sharedWiFiClient().interface().powerOn()
def _get_network_info(self, name):
'''
Returns all the network information.
'''
def ns(x):
NSString.alloc().initWithUTF8String_(x)
accessNetworkType = self.names[name].accessNetworkType
aggregateRSSI = self.names[name].aggregateRSSI
beaconInterval = self.names[name].beaconInterval
bssid = self.names[name].bssid.UTF8String()
countryCode = self.names[name].countryCode
hasInternet = self.names[name].hasInternet
hasInterworkingIE = self.names[name].hasInterworkingIE
hessid = self.names[name].hessid
ibss = self.names[name].ibss
isAdditionalStepRequiredForAccess = \
self.names[name].isAdditionalStepRequiredForAccess
isCarPlayNetwork = self.names[name].isCarPlayNetwork
isEmergencyServicesReachable = \
self.names[name].isEmergencyServicesReachable
isPasspoint = self.names[name].isPasspoint
isPersonalHotspot = self.names[name].isPersonalHotspot
isUnauthenticatedEmergencyServiceAccessible = \
self.names[name].isUnauthenticatedEmergencyServiceAccessible
noiseMeasurement = self.names[name].noiseMeasurement
physicalLayerMode = self.names[name].physicalLayerMode
rssiValue = self.names[name].rssiValue
securityType = self.names[name].securityType
ssid = self.names[name].ssid.UTF8String()
supportsEasyConnect = self.names[name].supportsEasyConnect
supportsWPS = self.names[name].supportsWPS
venueGroup = self.names[name].venueGroup
venueType = self.names[name].venueType
return {'accessNetworkType': accessNetworkType,
'aggregateRSSI': aggregateRSSI,
'beaconInterval': beaconInterval,
'bssid': bssid,
'countryCode': countryCode,
'hasInternet': hasInternet,
'hasInternet': hasInternet,
'hasInterworkingIE': hasInterworkingIE,
'hessid': hessid,
'ibss': ibss,
'isAdditionalStepRequiredForAccess':
isAdditionalStepRequiredForAccess,
'isCarPlayNetwork': isCarPlayNetwork,
'isEmergencyServicesReachable': isEmergencyServicesReachable,
'isPasspoint': isPasspoint,
'isPersonalHotspot': isPersonalHotspot,
'isUnauthenticatedEmergencyServiceAccessible':
isUnauthenticatedEmergencyServiceAccessible,
'noiseMeasurement': noiseMeasurement,
'physicalLayerMode': physicalLayerMode,
'rssiValue': rssiValue,
'securityType': securityType,
'ssid': ssid,
'supportsEasyConnect': supportsEasyConnect,
'supportsWPS': supportsWPS,
'venueGroup': venueGroup,
'venueType': venueType}
def _start_scanning(self):
'''
Starts scanning for available Wi-Fi networks.
'''
if self._is_enabled():
self.names = {}
c = CWInterface.interface()
scan = c.scanForNetworksWithName_error_(None, None)
cnt = scan.allObjects().count()
for i in range(cnt):
self.names[
scan.allObjects().objectAtIndex_(i).ssid.UTF8String()] \
= scan.allObjects().objectAtIndex_(i)
else:
raise Exception("Wifi not enabled.")
def _get_available_wifi(self):
'''
Returns the name of available networks.
'''
return self.names.keys()
def _connect(self, network, parameters):
'''
Expects 2 parameters:
- name/ssid of the network.
- password: dict type
'''
password = parameters['password']
network_object = self.names[network]
CWInterface.interface().associateToNetwork_password_error_(
network_object,
password,
None)
return
def _disconnect(self):
'''
Disconnect from network.
'''
CWInterface.interface().disassociate()
return
def instance():
return OSXWifi()
| mit |
evensonbryan/yocto-autobuilder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/test/test_sslverify.py | 5 | 21662 | # Copyright 2005 Divmod, Inc. See LICENSE file for details
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet._sslverify}.
"""
import itertools
try:
from OpenSSL import SSL
from OpenSSL.crypto import PKey, X509, X509Req
from OpenSSL.crypto import TYPE_RSA
from twisted.internet import _sslverify as sslverify
except ImportError:
pass
from twisted.trial import unittest
from twisted.internet import protocol, defer, reactor
from twisted.python.reflect import objgrep, isSame
from twisted.python import log
from twisted.internet.error import CertificateError, ConnectionLost
from twisted.internet import interfaces
# A couple of static PEM-format certificates to be used by various tests.
A_HOST_CERTIFICATE_PEM = """
-----BEGIN CERTIFICATE-----
MIIC2jCCAkMCAjA5MA0GCSqGSIb3DQEBBAUAMIG0MQswCQYDVQQGEwJVUzEiMCAG
A1UEAxMZZXhhbXBsZS50d2lzdGVkbWF0cml4LmNvbTEPMA0GA1UEBxMGQm9zdG9u
MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMRYwFAYDVQQIEw1NYXNzYWNo
dXNldHRzMScwJQYJKoZIhvcNAQkBFhhub2JvZHlAdHdpc3RlZG1hdHJpeC5jb20x
ETAPBgNVBAsTCFNlY3VyaXR5MB4XDTA2MDgxNjAxMDEwOFoXDTA3MDgxNjAxMDEw
OFowgbQxCzAJBgNVBAYTAlVTMSIwIAYDVQQDExlleGFtcGxlLnR3aXN0ZWRtYXRy
aXguY29tMQ8wDQYDVQQHEwZCb3N0b24xHDAaBgNVBAoTE1R3aXN0ZWQgTWF0cml4
IExhYnMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxJzAlBgkqhkiG9w0BCQEWGG5v
Ym9keUB0d2lzdGVkbWF0cml4LmNvbTERMA8GA1UECxMIU2VjdXJpdHkwgZ8wDQYJ
KoZIhvcNAQEBBQADgY0AMIGJAoGBAMzH8CDF/U91y/bdbdbJKnLgnyvQ9Ig9ZNZp
8hpsu4huil60zF03+Lexg2l1FIfURScjBuaJMR6HiMYTMjhzLuByRZ17KW4wYkGi
KXstz03VIKy4Tjc+v4aXFI4XdRw10gGMGQlGGscXF/RSoN84VoDKBfOMWdXeConJ
VyC4w3iJAgMBAAEwDQYJKoZIhvcNAQEEBQADgYEAviMT4lBoxOgQy32LIgZ4lVCj
JNOiZYg8GMQ6y0ugp86X80UjOvkGtNf/R7YgED/giKRN/q/XJiLJDEhzknkocwmO
S+4b2XpiaZYxRyKWwL221O7CGmtWYyZl2+92YYmmCiNzWQPfP6BOMlfax0AGLHls
fXzCWdG0O/3Lk2SRM0I=
-----END CERTIFICATE-----
"""
A_PEER_CERTIFICATE_PEM = """
-----BEGIN CERTIFICATE-----
MIIC3jCCAkcCAjA6MA0GCSqGSIb3DQEBBAUAMIG2MQswCQYDVQQGEwJVUzEiMCAG
A1UEAxMZZXhhbXBsZS50d2lzdGVkbWF0cml4LmNvbTEPMA0GA1UEBxMGQm9zdG9u
MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMRYwFAYDVQQIEw1NYXNzYWNo
dXNldHRzMSkwJwYJKoZIhvcNAQkBFhpzb21lYm9keUB0d2lzdGVkbWF0cml4LmNv
bTERMA8GA1UECxMIU2VjdXJpdHkwHhcNMDYwODE2MDEwMTU2WhcNMDcwODE2MDEw
MTU2WjCBtjELMAkGA1UEBhMCVVMxIjAgBgNVBAMTGWV4YW1wbGUudHdpc3RlZG1h
dHJpeC5jb20xDzANBgNVBAcTBkJvc3RvbjEcMBoGA1UEChMTVHdpc3RlZCBNYXRy
aXggTGFiczEWMBQGA1UECBMNTWFzc2FjaHVzZXR0czEpMCcGCSqGSIb3DQEJARYa
c29tZWJvZHlAdHdpc3RlZG1hdHJpeC5jb20xETAPBgNVBAsTCFNlY3VyaXR5MIGf
MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCnm+WBlgFNbMlHehib9ePGGDXF+Nz4
CjGuUmVBaXCRCiVjg3kSDecwqfb0fqTksBZ+oQ1UBjMcSh7OcvFXJZnUesBikGWE
JE4V8Bjh+RmbJ1ZAlUPZ40bAkww0OpyIRAGMvKG+4yLFTO4WDxKmfDcrOb6ID8WJ
e1u+i3XGkIf/5QIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAD4Oukm3YYkhedUepBEA
vvXIQhVDqL7mk6OqYdXmNj6R7ZMC8WWvGZxrzDI1bZuB+4aIxxd1FXC3UOHiR/xg
i9cDl1y8P/qRp4aEBNF6rI0D4AxTbfnHQx4ERDAOShJdYZs/2zifPJ6va6YvrEyr
yqDtGhklsWW3ZwBzEh5VEOUp
-----END CERTIFICATE-----
"""
counter = itertools.count().next
def makeCertificate(**kw):
keypair = PKey()
keypair.generate_key(TYPE_RSA, 512)
certificate = X509()
certificate.gmtime_adj_notBefore(0)
certificate.gmtime_adj_notAfter(60 * 60 * 24 * 365) # One year
for xname in certificate.get_issuer(), certificate.get_subject():
for (k, v) in kw.items():
setattr(xname, k, v)
certificate.set_serial_number(counter())
certificate.set_pubkey(keypair)
certificate.sign(keypair, "md5")
return keypair, certificate
class DataCallbackProtocol(protocol.Protocol):
def dataReceived(self, data):
d, self.factory.onData = self.factory.onData, None
if d is not None:
d.callback(data)
def connectionLost(self, reason):
d, self.factory.onLost = self.factory.onLost, None
if d is not None:
d.errback(reason)
class WritingProtocol(protocol.Protocol):
byte = 'x'
def connectionMade(self):
self.transport.write(self.byte)
def connectionLost(self, reason):
self.factory.onLost.errback(reason)
class OpenSSLOptions(unittest.TestCase):
serverPort = clientConn = None
onServerLost = onClientLost = None
sKey = None
sCert = None
cKey = None
cCert = None
def setUp(self):
"""
Create class variables of client and server certificates.
"""
self.sKey, self.sCert = makeCertificate(
O="Server Test Certificate",
CN="server")
self.cKey, self.cCert = makeCertificate(
O="Client Test Certificate",
CN="client")
def tearDown(self):
if self.serverPort is not None:
self.serverPort.stopListening()
if self.clientConn is not None:
self.clientConn.disconnect()
L = []
if self.onServerLost is not None:
L.append(self.onServerLost)
if self.onClientLost is not None:
L.append(self.onClientLost)
return defer.DeferredList(L, consumeErrors=True)
def loopback(self, serverCertOpts, clientCertOpts,
onServerLost=None, onClientLost=None, onData=None):
if onServerLost is None:
self.onServerLost = onServerLost = defer.Deferred()
if onClientLost is None:
self.onClientLost = onClientLost = defer.Deferred()
if onData is None:
onData = defer.Deferred()
serverFactory = protocol.ServerFactory()
serverFactory.protocol = DataCallbackProtocol
serverFactory.onLost = onServerLost
serverFactory.onData = onData
clientFactory = protocol.ClientFactory()
clientFactory.protocol = WritingProtocol
clientFactory.onLost = onClientLost
self.serverPort = reactor.listenSSL(0, serverFactory, serverCertOpts)
self.clientConn = reactor.connectSSL('127.0.0.1',
self.serverPort.getHost().port, clientFactory, clientCertOpts)
def test_abbreviatingDistinguishedNames(self):
"""
Check that abbreviations used in certificates correctly map to
complete names.
"""
self.assertEqual(
sslverify.DN(CN='a', OU='hello'),
sslverify.DistinguishedName(commonName='a',
organizationalUnitName='hello'))
self.assertNotEquals(
sslverify.DN(CN='a', OU='hello'),
sslverify.DN(CN='a', OU='hello', emailAddress='xxx'))
dn = sslverify.DN(CN='abcdefg')
self.assertRaises(AttributeError, setattr, dn, 'Cn', 'x')
self.assertEqual(dn.CN, dn.commonName)
dn.CN = 'bcdefga'
self.assertEqual(dn.CN, dn.commonName)
def testInspectDistinguishedName(self):
n = sslverify.DN(commonName='common name',
organizationName='organization name',
organizationalUnitName='organizational unit name',
localityName='locality name',
stateOrProvinceName='state or province name',
countryName='country name',
emailAddress='email address')
s = n.inspect()
for k in [
'common name',
'organization name',
'organizational unit name',
'locality name',
'state or province name',
'country name',
'email address']:
self.assertIn(k, s, "%r was not in inspect output." % (k,))
self.assertIn(k.title(), s, "%r was not in inspect output." % (k,))
def testInspectDistinguishedNameWithoutAllFields(self):
n = sslverify.DN(localityName='locality name')
s = n.inspect()
for k in [
'common name',
'organization name',
'organizational unit name',
'state or province name',
'country name',
'email address']:
self.assertNotIn(k, s, "%r was in inspect output." % (k,))
self.assertNotIn(k.title(), s, "%r was in inspect output." % (k,))
self.assertIn('locality name', s)
self.assertIn('Locality Name', s)
def test_inspectCertificate(self):
"""
Test that the C{inspect} method of L{sslverify.Certificate} returns
a human-readable string containing some basic information about the
certificate.
"""
c = sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM)
self.assertEqual(
c.inspect().split('\n'),
["Certificate For Subject:",
" Common Name: example.twistedmatrix.com",
" Country Name: US",
" Email Address: [email protected]",
" Locality Name: Boston",
" Organization Name: Twisted Matrix Labs",
" Organizational Unit Name: Security",
" State Or Province Name: Massachusetts",
"",
"Issuer:",
" Common Name: example.twistedmatrix.com",
" Country Name: US",
" Email Address: [email protected]",
" Locality Name: Boston",
" Organization Name: Twisted Matrix Labs",
" Organizational Unit Name: Security",
" State Or Province Name: Massachusetts",
"",
"Serial Number: 12345",
"Digest: C4:96:11:00:30:C3:EC:EE:A3:55:AA:ED:8C:84:85:18",
"Public Key with Hash: ff33994c80812aa95a79cdb85362d054"])
def test_certificateOptionsSerialization(self):
"""
Test that __setstate__(__getstate__()) round-trips properly.
"""
firstOpts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
method=SSL.SSLv3_METHOD,
verify=True,
caCerts=[self.sCert],
verifyDepth=2,
requireCertificate=False,
verifyOnce=False,
enableSingleUseKeys=False,
enableSessions=False,
fixBrokenPeers=True,
enableSessionTickets=True)
context = firstOpts.getContext()
state = firstOpts.__getstate__()
# The context shouldn't be in the state to serialize
self.failIf(objgrep(state, context, isSame),
objgrep(state, context, isSame))
opts = sslverify.OpenSSLCertificateOptions()
opts.__setstate__(state)
self.assertEqual(opts.privateKey, self.sKey)
self.assertEqual(opts.certificate, self.sCert)
self.assertEqual(opts.method, SSL.SSLv3_METHOD)
self.assertEqual(opts.verify, True)
self.assertEqual(opts.caCerts, [self.sCert])
self.assertEqual(opts.verifyDepth, 2)
self.assertEqual(opts.requireCertificate, False)
self.assertEqual(opts.verifyOnce, False)
self.assertEqual(opts.enableSingleUseKeys, False)
self.assertEqual(opts.enableSessions, False)
self.assertEqual(opts.fixBrokenPeers, True)
self.assertEqual(opts.enableSessionTickets, True)
def test_certificateOptionsSessionTickets(self):
"""
Enabling session tickets should not set the OP_NO_TICKET option.
"""
opts = sslverify.OpenSSLCertificateOptions(enableSessionTickets=True)
ctx = opts.getContext()
self.assertEqual(0, ctx.set_options(0) & 0x00004000)
def test_certificateOptionsSessionTicketsDisabled(self):
"""
Enabling session tickets should set the OP_NO_TICKET option.
"""
opts = sslverify.OpenSSLCertificateOptions(enableSessionTickets=False)
ctx = opts.getContext()
self.assertEqual(0x00004000, ctx.set_options(0) & 0x00004000)
def test_allowedAnonymousClientConnection(self):
"""
Check that anonymous connections are allowed when certificates aren't
required on the server.
"""
onData = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, requireCertificate=False),
sslverify.OpenSSLCertificateOptions(
requireCertificate=False),
onData=onData)
return onData.addCallback(
lambda result: self.assertEqual(result, WritingProtocol.byte))
def test_refusedAnonymousClientConnection(self):
"""
Check that anonymous connections are refused when certificates are
required on the server.
"""
onServerLost = defer.Deferred()
onClientLost = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, verify=True,
caCerts=[self.sCert], requireCertificate=True),
sslverify.OpenSSLCertificateOptions(
requireCertificate=False),
onServerLost=onServerLost,
onClientLost=onClientLost)
d = defer.DeferredList([onClientLost, onServerLost],
consumeErrors=True)
def afterLost(((cSuccess, cResult), (sSuccess, sResult))):
self.failIf(cSuccess)
self.failIf(sSuccess)
# Win32 fails to report the SSL Error, and report a connection lost
# instead: there is a race condition so that's not totally
# surprising (see ticket #2877 in the tracker)
self.assertIsInstance(cResult.value, (SSL.Error, ConnectionLost))
self.assertIsInstance(sResult.value, SSL.Error)
return d.addCallback(afterLost)
def test_failedCertificateVerification(self):
"""
Check that connecting with a certificate not accepted by the server CA
fails.
"""
onServerLost = defer.Deferred()
onClientLost = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, verify=False,
requireCertificate=False),
sslverify.OpenSSLCertificateOptions(verify=True,
requireCertificate=False, caCerts=[self.cCert]),
onServerLost=onServerLost,
onClientLost=onClientLost)
d = defer.DeferredList([onClientLost, onServerLost],
consumeErrors=True)
def afterLost(((cSuccess, cResult), (sSuccess, sResult))):
self.failIf(cSuccess)
self.failIf(sSuccess)
return d.addCallback(afterLost)
def test_successfulCertificateVerification(self):
"""
Test a successful connection with client certificate validation on
server side.
"""
onData = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, verify=False,
requireCertificate=False),
sslverify.OpenSSLCertificateOptions(verify=True,
requireCertificate=True, caCerts=[self.sCert]),
onData=onData)
return onData.addCallback(
lambda result: self.assertEqual(result, WritingProtocol.byte))
def test_successfulSymmetricSelfSignedCertificateVerification(self):
"""
Test a successful connection with validation on both server and client
sides.
"""
onData = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, verify=True,
requireCertificate=True, caCerts=[self.cCert]),
sslverify.OpenSSLCertificateOptions(privateKey=self.cKey,
certificate=self.cCert, verify=True,
requireCertificate=True, caCerts=[self.sCert]),
onData=onData)
return onData.addCallback(
lambda result: self.assertEqual(result, WritingProtocol.byte))
def test_verification(self):
"""
Check certificates verification building custom certificates data.
"""
clientDN = sslverify.DistinguishedName(commonName='client')
clientKey = sslverify.KeyPair.generate()
clientCertReq = clientKey.certificateRequest(clientDN)
serverDN = sslverify.DistinguishedName(commonName='server')
serverKey = sslverify.KeyPair.generate()
serverCertReq = serverKey.certificateRequest(serverDN)
clientSelfCertReq = clientKey.certificateRequest(clientDN)
clientSelfCertData = clientKey.signCertificateRequest(
clientDN, clientSelfCertReq, lambda dn: True, 132)
clientSelfCert = clientKey.newCertificate(clientSelfCertData)
serverSelfCertReq = serverKey.certificateRequest(serverDN)
serverSelfCertData = serverKey.signCertificateRequest(
serverDN, serverSelfCertReq, lambda dn: True, 516)
serverSelfCert = serverKey.newCertificate(serverSelfCertData)
clientCertData = serverKey.signCertificateRequest(
serverDN, clientCertReq, lambda dn: True, 7)
clientCert = clientKey.newCertificate(clientCertData)
serverCertData = clientKey.signCertificateRequest(
clientDN, serverCertReq, lambda dn: True, 42)
serverCert = serverKey.newCertificate(serverCertData)
onData = defer.Deferred()
serverOpts = serverCert.options(serverSelfCert)
clientOpts = clientCert.options(clientSelfCert)
self.loopback(serverOpts,
clientOpts,
onData=onData)
return onData.addCallback(
lambda result: self.assertEqual(result, WritingProtocol.byte))
if interfaces.IReactorSSL(reactor, None) is None:
OpenSSLOptions.skip = "Reactor does not support SSL, cannot run SSL tests"
class _NotSSLTransport:
def getHandle(self):
return self
class _MaybeSSLTransport:
def getHandle(self):
return self
def get_peer_certificate(self):
return None
def get_host_certificate(self):
return None
class _ActualSSLTransport:
def getHandle(self):
return self
def get_host_certificate(self):
return sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM).original
def get_peer_certificate(self):
return sslverify.Certificate.loadPEM(A_PEER_CERTIFICATE_PEM).original
class Constructors(unittest.TestCase):
def test_peerFromNonSSLTransport(self):
"""
Verify that peerFromTransport raises an exception if the transport
passed is not actually an SSL transport.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.peerFromTransport,
_NotSSLTransport())
self.failUnless(str(x).startswith("non-TLS"))
def test_peerFromBlankSSLTransport(self):
"""
Verify that peerFromTransport raises an exception if the transport
passed is an SSL transport, but doesn't have a peer certificate.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.peerFromTransport,
_MaybeSSLTransport())
self.failUnless(str(x).startswith("TLS"))
def test_hostFromNonSSLTransport(self):
"""
Verify that hostFromTransport raises an exception if the transport
passed is not actually an SSL transport.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.hostFromTransport,
_NotSSLTransport())
self.failUnless(str(x).startswith("non-TLS"))
def test_hostFromBlankSSLTransport(self):
"""
Verify that hostFromTransport raises an exception if the transport
passed is an SSL transport, but doesn't have a host certificate.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.hostFromTransport,
_MaybeSSLTransport())
self.failUnless(str(x).startswith("TLS"))
def test_hostFromSSLTransport(self):
"""
Verify that hostFromTransport successfully creates the correct
certificate if passed a valid SSL transport.
"""
self.assertEqual(
sslverify.Certificate.hostFromTransport(
_ActualSSLTransport()).serialNumber(),
12345)
def test_peerFromSSLTransport(self):
"""
Verify that peerFromTransport successfully creates the correct
certificate if passed a valid SSL transport.
"""
self.assertEqual(
sslverify.Certificate.peerFromTransport(
_ActualSSLTransport()).serialNumber(),
12346)
if interfaces.IReactorSSL(reactor, None) is None:
Constructors.skip = "Reactor does not support SSL, cannot run SSL tests"
| gpl-2.0 |
hcseob/py_spectre | py_spectre/psf.py | 1 | 50756 | # -*- coding: latin-1 -*-
"""
Copyright (c) 2008 Pycircuit Development Team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of the Pycircuit nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
import unittest
import struct, os, re
import operator
import numpy
# import psfasc
from copy import copy
from struct import unpack, pack
class PSFInvalid(Exception):
pass
def warning(str):
print "Warning: "+str
def indent(str, n=2):
return "\n".join([' '*n+s for s in str.split("\n")])
class PSFData(object):
@classmethod
def fromFile(cls, file):
obj = cls()
obj.deSerializeFile(file)
return obj
size=None
def __init__(self, value=None, extarg=None):
self.value = value
self.extarg = extarg
def setValue(self, value):
self.value = value
def __eq__(self, a):
return self.value == a
def __cmp__(self, a):
return cmp(self.value, a)
def __hash__(self):
return hash(self.value)
def deSerializeFile(self, file):
pass
def getSize(self):
self.size
def getValue(self):
return self.value
def __str__(self):
return str(self.value)
def toPSFasc(self, prec=None):
return str(self)
def __repr__(self):
return self.value.__repr__()
class PSFNumber(PSFData):
def __int__(self):
return self.value
def __add__(self, a):
return UInt32(self.value+int(a))
def __mul__(self, a):
return UInt32(self.value*int(a))
def __radd__(self, a):
return UInt32(self.value+int(a))
def __sub__(self, a):
return UInt32(self.value-int(a))
def __rsub__(self, a):
return UInt32(int(a)-self.value)
def __div__(self, a):
return UInt32(self.value/int(a))
def __rdiv__(self, a):
return UInt32(int(a)/self.value)
def __floordiv__(self, a):
return UInt32(self.value//int(a))
def __rfloordiv__(self, a):
return UInt32(int(a)//self.value)
def __mod__(self, a):
return UInt32(self.value%int(a))
class Int8(PSFNumber):
size=4
def deSerializeFile(self, file, size=None):
data=file.read(self.size)
self.value = unpack("b",data[3])[0]
class UInt8(PSFNumber):
size=4
def deSerializeFile(self, file, size=None):
data=file.read(self.size)
self.value = unpack("B",data[3])[0]
class Int32(PSFNumber):
size=4
def deSerializeFile(self, file, size=None):
self.value = unpack(">i",file.read(self.size))[0]
class UInt32(PSFNumber):
size=4
def deSerializeFile(self, file, size=None):
self.value = unpack(">I",file.read(self.size))[0]
class Int64(PSFNumber):
size=8
def __int__(self):
return self.value
def deSerializeFile(self, file, size=None):
self.value = unpack(">q",file.read(self.size))[0]
class UInt64(PSFNumber):
size=8
def __int__(self):
return self.value
def deSerializeFile(self, file, size=None):
self.value = unpack(">Q",file.read(self.size))[0]
class Float64(PSFNumber):
size=8
def __float__(self):
return float(self.value)
def toPSFasc(self, prec=6):
if prec:
fmt=('%%#%dg'%prec)
else:
fmt='%#g'
return fmt%self.value
def deSerializeFile(self, file, size=None):
self.value = unpack(">d",file.read(self.size))[0]
class Float32(PSFNumber):
size=4
def __float__(self):
return float(self.value)
def deSerializeFile(self, file, size=None):
self.value = unpack(">f",file.read(self.size))[0]
class ComplexFloat64(PSFNumber):
size=16
def toPSFasc(self, prec=6):
if prec:
fmt=('%%#%dg'%prec)
else:
fmt='%#g'
return "(" + fmt%self.value.real + " " + fmt%self.value.imag + ")"
def deSerializeFile(self, file, size=None):
re,im = unpack(">dd",file.read(self.size))
self.value = complex(re,im)
class String(PSFData):
def __str__(self):
return self.value
def deSerializeFile(self, file, size=None):
self.len = unpack(">I",file.read(4))[0]
if self.len < 0x100:
self.value = file.read(self.len)
# Pad to 32-bit boundary
file.read((4-self.len)%4)
else:
raise Exception("String too long %d"%self.len)
def toPSFasc(self, prec=None):
return "\""+str(self.value)+"\""
class Struct(PSFData):
def __init__(self, structdef, value=None):
self.structdef = structdef
self.value = {}
if value:
self.setValue(value)
def __getitem__(self, key):
return self.value[key]
def getValue(self):
return dict([(k,v.getValue()) for k,v in self.value.items()])
def setValue(self, value):
assert(value != None and len(value) == len(self.structdef.children))
for element, val in zip(self.structdef.children, value):
valueobj = element.getDataObj()
valueobj.setValue(val)
self.value[element.name] = valueobj
def deSerializeFile(self, file):
for element in self.structdef.children:
value = element.getDataObj()
value.deSerializeFile(file)
self.value[element.name] = value
def toPSFasc(self, prec=None):
s="(\n"
for element in self.structdef.children:
s+=self.value[element.name].toPSFasc(prec)+"\n"
s+=")"
return s
def __repr__(self):
return "\n".join([indent(s) for s in map(repr,self.value.items())]) + "\n"
class Array(PSFData):
def setValue(self, value):
dataclass, length = self.extarg
if value != None:
self.children = [dataclass(value=val) for val in value]
else:
self.children = [dataclass(value=None) for val in range(length)]
def getValue(self):
return [v.getValue() for v in self.children]
def __iter__(self):
return self.children.__iter__()
def __tuple__(self):
return tuple(self.children)
def __repr__(self):
return "\n".join([indent(s) for s in map(str,self.children)]) + "\n"
class Chunk:
"""Base class for chunk"""
def __init__(self, psf=None, type=None):
self.psf = psf
self.fileoffset=None
if not hasattr(self.__class__, 'type'):
self.type = type
self.verbose = False
self.name = ""
def deSerializeFile(self, file):
self.fileoffset = file.tell()
type = UInt32.fromFile(file)
if (self.type != None) and self.type != type:
file.seek(-UInt32.size, 1)
raise IncorrectChunk(type, self.type)
def __repr__(self):
return self.__class__.__name__
class NextSectionType(Chunk):
type=1
class NextSectionSweep(Chunk):
type=2
class NextSectionTrace(Chunk):
type=3
class NextSectionValues(Chunk):
type=4
class EndOfStructDef(Chunk):
type=18
NextSectionClasses = [NextSectionType, NextSectionSweep, NextSectionTrace, NextSectionValues]
class Property(Chunk):
type=None
valueclass=None
def __init__(self, name=None, value=None):
Chunk.__init__(self)
self.name = String(name)
self.value = self.valueclass(value)
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.name = String.fromFile(file)
self.value = self.valueclass.fromFile(file)
def toPSFasc(self, prec=9):
return self.name.toPSFasc() + " " + self.value.toPSFasc(prec=prec)
def __repr__(self):
return self.__class__.__name__+"("+str(self.name)+","+str(self.value)+")"
class PropertyString(Property):
type=33
valueclass=String
class PropertyUInt(Property):
type=34
valueclass=UInt32
class PropertyFloat64(Property):
type=35
valueclass=Float64
PropertyClasses = [PropertyString, PropertyUInt, PropertyFloat64]
TYPEFLOATDOUBLE = 11
TYPEINTBYTE = 1
TYPECOMPLEXDOUBLE = 12
TYPESTRUCT = 16
TYPESTRING = 2 ## Incorrect number
TYPEARRAY = 3 ## Incorrect number
TYPEINTLONG = 5
class DataTypeDef(Chunk):
"""Class representing data type of waveform data"""
type=16
ClassDict = {
TYPEFLOATDOUBLE: Float64,
TYPEINTBYTE: Int8,
TYPECOMPLEXDOUBLE: ComplexFloat64,
TYPESTRING: String,
TYPEARRAY: Array,
TYPEINTLONG: Int32
}
PSFASCDict = {
TYPEFLOATDOUBLE: "FLOAT DOUBLE",
TYPEINTBYTE: "INT BYTE",
TYPECOMPLEXDOUBLE: "COMPLEX DOUBLE",
TYPESTRING: "STRING *",
TYPEINTLONG: "INT LONG"
}
def __init__(self, psf, id=0, name=None, datatypeid=0, structdef=None):
Chunk.__init__(self, psf, type)
self.id = id
self.name = name
self.datatypeid = datatypeid
self.structdef = structdef
self.properties = []
def getDataObj(self):
"""Get a data object described by the DataType"""
if self.datatypeid == TYPESTRUCT:
return self.structdef.getDataObj()
elif self.datatypeid == TYPEARRAY:
return Array(extarg=(self.ClassDict[self.structdef[0]], self.structdef[1]))
else:
return self.ClassDict[self.datatypeid](extarg=self.structdef)
def toPSFasc(self, prec=None):
r=self.name.toPSFasc(prec) + " "
if self.datatypeid == TYPESTRUCT:
r+=self.structdef.toPSFasc(prec)
elif self.datatypeid == TYPEARRAY:
r+="ARRAY ( %s ) "%str(self.structdef[1])+self.PSFASCDict[self.structdef[0]]
else:
r+= self.PSFASCDict[self.datatypeid]
if len(self.properties)>0:
r+=" PROP(\n"
r+="\n".join([prop.toPSFasc(prec) for prop in self.properties])
r+="\n)"
return r
def getDataSize(self):
if self.datatypeid == TYPESTRUCT:
return self.structdef.getDataSize()
else:
return self.ClassDict[self.datatypeid].size
def deSerializeFile(self, file):
start = file.tell()
Chunk.deSerializeFile(self, file)
self.id = UInt32.fromFile(file)
self.name = String.fromFile(file)
arraytype = UInt32.fromFile(file)
self.datatypeid = UInt32.fromFile(file)
if arraytype != 0:
self.datatypeid, self.structdef = TYPEARRAY, (UInt32.fromFile(file), self.datatypeid)
if self.datatypeid == 16:
self.structdef = StructDef.fromFile(file, self.psf)
# Read possible property objects that belongs to the type by peeking ahead
while True:
oldpos = file.tell()
try:
prop = readChunk(self.psf, file, expectedclasses=PropertyClasses)
self.properties.append(prop)
except ValueError:
file.seek(oldpos)
break
def __repr__(self):
return self.__class__.__name__+"("+str({"name":self.name,"id":"0x%x"%self.id, "datatypeid":self.datatypeid,
"properties":self.properties})+")"
class DataTypeRef(Chunk):
type=16
"""Class representing link to data type"""
def __init__(self, psf, type=None):
Chunk.__init__(self, psf, type)
self.id = None
self.name = None
self.datatypeid = 0
self.properties = []
def getDataObj(self):
"""Get a data object described by the DataType"""
return self.psf.types.idMap[self.datatypeid].getDataObj()
def toPSFasc(self, prec=None):
r=self.name.toPSFasc(prec) + " "
r+=self.psf.types.idMap[self.datatypeid].name.toPSFasc()
if len(self.properties)>0:
r+=" PROP(\n"
r+="\n".join([prop.toPSFasc(prec) for prop in self.properties])
r+="\n)"
return r
def getDataSize(self):
return self.psf.types.idMap[self.datatypeid].getDataSize()
def deSerializeFile(self, file):
start = file.tell()
Chunk.deSerializeFile(self, file)
self.id = UInt32.fromFile(file)
self.name = String.fromFile(file)
self.datatypeid = UInt32.fromFile(file)
assert(self.datatypeid != 0)
# Read possible property objects that belongs to the type by peeking ahead
while True:
oldpos = file.tell()
try:
prop = readChunk(self.psf, file, expectedclasses=PropertyClasses)
self.properties.append(prop)
except ValueError:
file.seek(oldpos)
break
def __repr__(self):
return self.__class__.__name__+"("+str({"name":self.name,"id":"0x%x"%self.id, "datatypeid":self.datatypeid,
"properties":self.properties})+")"
class StructDef(PSFData):
"""Class representing struct definition"""
@classmethod
def fromFile(cls, file, psf):
obj = cls()
obj.deSerializeFile(file, psf)
return obj
def __init__(self):
self.children = []
def getDataObj(self):
return Struct(self)
def getDataSize(self):
return sum([child.getDataSize() for child in self.children])
def toPSFasc(self, prec=None):
s="STRUCT(\n"
for child in self.children:
s+=child.toPSFasc(prec)+"\n"
s+=")"
return s
def deSerializeFile(self, file, psf):
while True:
chunk = readChunk(psf, file, expectedclasses=[DataTypeDef, EndOfStructDef])
if isinstance(chunk, EndOfStructDef):
break
else:
self.children.append(chunk)
def __repr__(self):
return self.__class__.__name__ + "(\n"+\
"\n".join(map(str,self.children))+\
")\n"
class SimpleContainer(Chunk):
type = 21
def __init__(self, psf, type=None, childrenclslist=None, childrenclsignore=None):
Chunk.__init__(self, psf, type)
self.section = None
self.children = []
self.childrenclslist = childrenclslist
self.childrenclsignore = childrenclsignore
self.endpos = None
def getChunks(self):
return self.children
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.endpos = UInt32.fromFile(file).value
self.children = []
while file.tell() < self.endpos:
chunk = readChunk(self.psf, file, expectedclasses=self.childrenclslist+self.childrenclsignore)
if chunk.__class__ in self.childrenclslist:
self.children.append(chunk)
# Read trailing bytes
if self.endpos-file.tell() != 0:
warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__))
self.tail = file.read(self.endpos-file.tell())
file.seek(self.endpos)
def __repr__(self):
s=""
if self.fileoffset:
s+= "0x%x"%self.fileoffset+ ":"
s+= self.__class__.__name__ + "(" + str(self.type) +")"
if self.endpos and self.fileoffset:
s+= "size="+str(self.endpos-self.fileoffset)
s+= "\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n"
return s
class Container22(Chunk):
type=22
def __init__(self, psf, type=None, n=None, childrenclslist=None):
Chunk.__init__(self, psf, 22)
self.section = None
self.children = []
self.childrenclslist = childrenclslist
self.endpos = None
def getChunks(self):
return self.children
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.endpos = UInt32.fromFile(file).value # Save end position of Container
self.children = []
while file.tell() < self.endpos:
chunk = readChunk(self.psf, file,
expectedclasses=self.childrenclslist)
self.children.append(chunk)
# Read trailing bytes
if self.endpos-file.tell() != 0:
warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__))
self.tail = file.read(self.endpos-file.tell())
file.seek(self.endpos)
def __repr__(self):
return "0x%x"%self.fileoffset +":" + self.__class__.__name__ +\
"(" + str(self.type) +")" + "\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n"
class ZeroPad(Chunk):
type = 20
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
size = UInt32.fromFile(file).value
self.endpos = file.tell() + size
file.seek(self.endpos)
class HashTable(Chunk):
type = 19
"""Class representing offset of trace data"""
def __init__(self, psf, n=None):
Chunk.__init__(self, psf, type)
self.children = []
self.extra=[]
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
startpos = file.tell()
size = UInt32.fromFile(file)
for i in range(0, size/8):
id = UInt32.fromFile(file)
offset = UInt32.fromFile(file)
self.children.append((id, offset))
def __repr__(self):
return self.__class__.__name__+"\n"+ "\n".join([" 0x%x: 0x%x"%(k,v.value) for k,v in self.children])+")"
class HashTableTrace(Chunk):
type = 19
"""Class representing offset of trace data"""
def __init__(self, psf):
Chunk.__init__(self, psf, type)
self.children = []
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.size = UInt32.fromFile(file)
for i in range(0, self.size.value/16):
id = UInt32.fromFile(file)
offset = UInt32.fromFile(file)
data1 = UInt32.fromFile(file).value
data2 = UInt32.fromFile(file).value
self.children.append((id,offset,data1,data2))
def __repr__(self):
return self.__class__.__name__+"\n"+ "\n".join([" %s: 0x%x 0x%x 0x%x"%(pack(">I",k.value),v.value,d1,d2) for k,v,d1,d2 in self.children])+")"
class HashContainer(Chunk):
type=21
hashclass = HashTable
def __init__(self, psf, childrenclslist=None, childrenclsignore=None):
Chunk.__init__(self, psf, type)
self.section = None
self.children = []
self.childrenclslist = childrenclslist
self.childrenclsignore = childrenclsignore
self.endpos = None
self.hashtable = None
def __len__(self):
return len(self.children)
def getChunks(self):
return self.children
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.endpos = UInt32.fromFile(file).value
self.children = []
self.data = Container22(self.psf,
childrenclslist=self.childrenclslist)
self.data.deSerializeFile(file)
self.hashtable = self.hashclass(self.psf)
self.hashtable.deSerializeFile(file)
# Copy children reference from data
self.children = self.data.children
self.section = UInt32.fromFile(file)
# Read trailing bytes
if self.endpos-file.tell() != 0:
warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__))
self.tail = file.read(self.endpos-file.tell())
file.seek(self.endpos)
def __repr__(self):
s=""
if self.fileoffset:
s += "0x%x"%self.fileoffset +":"
s += self.__class__.__name__ + "(" + str(self.type) +")"
if self.endpos:
s+=" size="+str(self.endpos-self.fileoffset) + "\n"
s += "\n".join([indent(s) for s in map(str,(self.children, self.hashtable))]) + "\n"
return s
class HeaderSection(SimpleContainer):
type=21
def __init__(self, psf, n=None):
SimpleContainer.__init__(self,psf, childrenclslist=PropertyClasses,
childrenclsignore=NextSectionClasses)
self.properties = {}
def addProperty(self, prop):
"""Add property to header"""
self.children.append(prop)
self.properties[prop.name] = prop.value
def deSerializeFile(self, file):
SimpleContainer.deSerializeFile(self, file)
# Read header properties
self.properties = {}
for prop in self.children:
self.properties[prop.name] = prop.value
def toPSFasc(self, prec=None):
r="HEADER\n"
r+='"PSFversion" "1.00"\n'
r+="\n".join([child.toPSFasc(prec) for child in self.children \
if not child.name.value[0:3].upper() == 'PSF'])
return r
class SweepSection(SimpleContainer):
type=21
def __init__(self, psf):
SimpleContainer.__init__(self, psf, childrenclslist=[DataTypeRef],
childrenclsignore=NextSectionClasses)
def deSerializeFile(self, file):
SimpleContainer.deSerializeFile(self, file)
# Read header properties
self.idMap = {}
for chunk in self.children:
self.idMap[chunk.id] = chunk
def getSweep(self, id):
return self.idMap[id]
def getNames(self):
return tuple([str(child.name) for child in self.children])
def toPSFasc(self, prec=None):
r="SWEEP\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
class TypeSection(HashContainer):
def __init__(self, psf):
HashContainer.__init__(self, psf, childrenclslist=[DataTypeDef],
childrenclsignore=NextSectionClasses)
self.idMap = {}
self.nameMap = {}
def addType(self, type):
type.id = self.psf.allocId()
self.children.append(type)
self.idMap[type.id] = type
self.nameMap[type.name] = type
def getType(self, id):
return self.idMap[id]
def getTypeByName(self, name):
return self.nameMap[name]
def deSerializeFile(self, file):
HashContainer.deSerializeFile(self, file)
# Read header properties
self.idMap = {}
for chunk in self.children:
self.idMap[chunk.id] = chunk
self.nameMap[chunk.name] = type
def toPSFasc(self, prec=None):
r="TYPE\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
class TraceSection(HashContainer):
hashclass = HashTableTrace
def __init__(self, psf):
HashContainer.__init__(self, psf, childrenclslist=[GroupDef, DataTypeRef])
self.idMap = {}
self.nameIndex = {}
def deSerializeFile(self, file):
HashContainer.deSerializeFile(self, file)
self.idMap = {}
for index, chunk in enumerate(self.children):
self.idMap[chunk.id] = chunk
if isinstance(chunk, GroupDef):
self.nameIndex.update(dict([(par, (index,)+value) for par,value in chunk.getNameIndex().items()]))
else:
self.nameIndex[chunk.name] = (index,)
def getNameIndex(self):
return self.nameIndex
def toPSFasc(self, prec=None):
r="TRACE\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
def getTraceNames(self):
result = []
for trace in self.children:
if isinstance(trace,GroupDef):
result += trace.getNames()
else:
result.append(trace.name)
return tuple(map(str, result))
def getTraceIndexByName(self, name):
"""Returns an index to the given trace name
The index is hierarchical so if if the traces are divided into 2 groups the index (0,1) means
child 1 of group 0
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.traces.getTraceIndexByName("VIN")
(0, 1)
>>> psf=PSFReader('./test/resultdirs/parsweep2/C=1e-12,R=1e-12/psf/ac.ac')
>>> psf.open()
>>> psf.traces.getTraceIndexByName("net3")
(0,)
"""
return self.nameIndex[name]
class ValuesSectionNonSweep(HashContainer):
type=21
def __init__(self, psf):
HashContainer.__init__(self, psf, childrenclslist=[NonSweepValue])
self.idMap={}
self.nameMap={}
def addValue(self, value):
value.id = self.psf.allocId()
if not isinstance(value, NonSweepValue):
raise ValueError("Value should be a NonSweepValue")
self.idMap[value.id] = value
self.nameMap[value.name] = value
self.children.append(value)
def deSerializeFile(self, file):
HashContainer.deSerializeFile(self, file)
for child in self.children:
self.nameMap[child.name] = child
def getValuePropertiesByName(self, name):
return dict([(prop.name, prop.value) for prop in self.nameMap[name].properties])
def getValueByName(self, name):
return self.nameMap[name].getValue()
def getValueNames(self):
return tuple([child.name for child in self.children])
def toPSFasc(self, prec=None):
r="VALUE\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
class ValuesSectionSweep(SimpleContainer):
type=21
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.endpos = UInt32.fromFile(file).value
windowedsweep = self.psf.header.properties.has_key('PSF window size')
if windowedsweep:
el = ZeroPad(self.psf)
el.deSerializeFile(file)
isweep=0
while isweep < self.psf.header.properties['PSF sweep points']:
if windowedsweep:
value = SweepValueWindowed(self.psf)
else:
value = SweepValueSimple(self.psf)
isweep += value.deSerializeFile(file, n=self.psf.header.properties['PSF sweep points']-isweep)
self.children.append(value)
self.section = UInt32.fromFile(file)
# Read trailing bytes
if self.endpos-file.tell() != 0:
warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__))
self.tail = file.read(self.endpos-file.tell())
file.seek(self.endpos)
def getSweepParamValues(self):
return reduce(operator.__add__, [child.getSweepParamValues() for child in self.children])
def getValueNames(self):
return self.psf.traces.getTraceNames()
def __len__(self):
return len(self.psf.traces)
def getValueByName(self, name):
windowedsweep = self.psf.header.properties.has_key('PSF window size')
index = self.psf.traces.getTraceIndexByName(name)
result = []
for child in self.children:
obj=child
for i in index:
obj = obj.children[i]
# If windowed sweep, each child will be a list of values in the window
if windowedsweep:
result += [v.getValue() for v in obj]
else:
result.append(obj.getValue())
return numpy.array(result)
def toPSFasc(self, prec=None):
r="VALUE\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
class NonSweepValue(Chunk):
type=16
def __init__(self, psf, id=None, typeid=None, name=None, value=None):
Chunk.__init__(self, psf, type)
self.id = id
self.name = name
self.typeid = typeid
if typeid:
self.valuetype = self.psf.types.idMap[self.typeid]
else:
self.valuetype = None
if value:
self.value = value
elif self.valuetype:
self.value = self.valuetype.getDataObj()
else:
self.value = None
self.properties = []
def getValue(self):
return self.value.getValue()
def setValue(self, value):
self.value.setValue(value)
def deSerializeFile(self, file):
startpos = file.tell()
Chunk.deSerializeFile(self, file)
self.id = UInt32.fromFile(file)
self.name = String.fromFile(file)
self.typeid = UInt32.fromFile(file)
assert(self.typeid != 0)
self.valuetype = self.psf.types.idMap[self.typeid]
self.value = self.valuetype.getDataObj()
self.value.deSerializeFile(file)
# Read possible property objects that belongs to the type by peeking ahead
while True:
oldpos = file.tell()
try:
prop = readChunk(self.psf, file, expectedclasses=PropertyClasses)
self.properties.append(prop)
except ValueError:
file.seek(oldpos)
break
def toPSFasc(self, prec=None):
r = self.name.toPSFasc(prec) + " " + self.valuetype.name.toPSFasc(prec) + " " + self.value.toPSFasc(prec)
if len(self.properties)>0:
r+=" PROP(\n"
r+="\n".join([prop.toPSFasc(prec) for prop in self.properties])
r+="\n)"
return r
def __repr__(self):
return self.__class__.__name__+"("+str({"name":self.name, "id":"0x%x"%self.id, "typeid":"0x%x"%self.typeid,
"properties":self.properties,"value":self.value})+")"
class SweepValue(Chunk):
"""Class representing waveform data"""
type = 16
def __init__(self, psf, type=None):
Chunk.__init__(self, psf, type)
self.id = None
self.linktypeid = UInt32()
self.datatypeid = UInt32()
self.paramtype = None
self.paramvalue = None
self.children = []
self.properties = []
def deSerializeFile(self, file, n=None):
pass
def getSweepParamValues(self):
pass
def __len__(self):
return len(self.children)
def __repr__(self):
return self.__class__.__name__ + "(" + str(self.paramtype.name) + "=" + str(self.paramvalue) +","+ \
"children="+str(self.children) +")\n"
class SweepValueSimple(SweepValue):
def deSerializeFile(self, file, n=None):
Chunk.deSerializeFile(self, file)
self.paramtypeid = UInt32.fromFile(file)
self.paramtype = self.psf.sweeps.getSweep(self.paramtypeid)
self.paramvalue = self.paramtype.getDataObj()
self.paramvalue.deSerializeFile(file)
for datatype in self.psf.traces.children:
datatypeid = UInt32.fromFile(file)
if datatypeid in (17,16):
valuetypeid = UInt32.fromFile(file)
if valuetypeid != datatype.id:
## Unexpected value type id found
## This is probably because of missing trace values
## Undo read of datatypeid, valuetypeid and break out of loop and
file.seek(-2*UInt32.size, 1)
break
value = datatype.getDataObj()
value.deSerializeFile(file)
self.children.append(value)
elif datatypeid == 15:
## End of section
file.seek(-UInt32.size, 1)
break
else:
raise Exception("Datatypeid unknown 0x%x" % datatypeid)
return 1
def getSweepParamValues(self):
return [self.paramvalue.getValue()]
def toPSFasc(self, prec=None):
r=self.paramtype.name.toPSFasc(prec) + " " +self.paramvalue.toPSFasc(prec)+"\n"
r+="\n".join([valuetype.name.toPSFasc(prec) + " " + value.toPSFasc(prec) \
for valuetype, value in zip(self.psf.traces.children, self.children)])
return r
class SweepValueWindowed(SweepValue):
def deSerializeFile(self, file, n=None):
bufferstart = file.tell()
Chunk.deSerializeFile(self, file)
self.paramtypeid = UInt32.fromFile(file)
assert(len(self.psf.sweeps.children) == 1)
self.paramtype=self.psf.sweeps.children[0]
self.paramvalue = []
# Get sweep parameter values
paramvaluesize = self.paramtype.getDataSize()
windowsize = self.psf.header.properties['PSF window size'].value
leftinwindow = (file.tell()//windowsize + 1)*windowsize - file.tell()
windowlen = leftinwindow//paramvaluesize;
if n > windowlen:
n = windowlen
for j in xrange(n):
paramvalue = self.paramtype.getDataObj()
paramvalue.deSerializeFile(file)
if j < n:
self.paramvalue.append(paramvalue)
# Get trace values
for trace in self.psf.traces.children:
value = trace.getDataObj()
value.deSerializeFile(file, count=n,
windowsize=self.psf.header.properties['PSF window size'].value)
self.children.append(value)
# Skip trailing padding bytes
padsize = int((self.psf.header.properties['PSF buffer size'] - (file.tell()-bufferstart))% \
self.psf.header.properties['PSF buffer size'])
file.seek(padsize, 1)
return n
def getSweepParamValues(self):
return [v.getValue() for v in self.paramvalue]
def toPSFasc(self, prec=None):
r=''
for i, paramvalue in enumerate(self.paramvalue):
r+=self.paramtype.name.toPSFasc(prec) + " " + paramvalue.toPSFasc(prec) + "\n"
r+="\n".join([trace.name.toPSFasc(prec) + " " + value.toPSFasc(prec=prec, index=i) \
for trace,value in zip(self.psf.traces.children, self.children)])
if i < len(self.paramvalue)-1:
r+="\n"
return r
class GroupData(PSFData):
def __init__(self, groupdef):
PSFData.__init__(self)
self.groupdef = groupdef
self.children = []
def deSerializeFile(self, file, count=None, windowsize=None):
for element in self.groupdef.children:
if count==None:
value = element.getDataObj()
value.deSerializeFile(file)
self.children.append(value)
else:
valuearray=[]
# If a window is used in the PSF file, the entire window is stored
# and the data is aligned to the end of the window. So we need
# to skip window size - data size
file.seek(int(windowsize - count*element.getDataSize()), 1)
for i in xrange(0,count):
value = element.getDataObj()
value.deSerializeFile(file)
valuearray.append(value)
self.children.append(valuearray)
def toPSFasc(self, prec=None, index=None):
if index != None:
return "\n".join([v[index].toPSFasc(prec) for v in self.children])
else:
return "\n".join([v.toPSFasc(prec) for v in self.children])
def getSize(self):
return self.groupdef.getDataSize()
def __repr__(self):
return "GroupData" + "\n" + "\n".join([indent(s) for s in map(repr,self.children)]) + "\n"
class GroupDef(Chunk):
type=17
"""Class representing group of traces"""
def __init__(self, psf):
Chunk.__init__(self, psf)
self.children=[]
self.datasize=None
def getDataObj(self):
return GroupData(self)
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.id = UInt32.fromFile(file)
self.name = String.fromFile(file)
self.nchildren = UInt32.fromFile(file)
# Read children
self.children = []
self.datasize = 0
for i in range(0, self.nchildren):
child = DataTypeRef(self.psf)
child.deSerializeFile(file)
self.children.append(child)
self.datasize += child.getDataSize()
def getNameIndex(self):
return dict([(v.name, (i,)) for i,v in enumerate(self.children)])
def toPSFasc(self, prec=None):
s=self.name.toPSFasc(prec) + " GROUP %d\n"%len(self.children)
s+="\n".join([child.toPSFasc(prec) for child in self.children])
return s
def getDataSize(self):
return self.datasize
def getNames(self):
return [str(child.name) for child in self.children]
def __repr__(self):
return "0x%x"%self.fileoffset +":" + self.__class__.__name__+ "(id=0x%x"%self.id+", nchildren=%d"%self.nchildren+")\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n"
class UnknownChunk(Exception):
def __init__(self, chunktype):
self.type = chunktype
def __str__(self):
return "Unknown chunk of type: %d"%self.type
class InvalidChunk(Exception):
def __init__(self, chunk):
self.chunk = chunk
def __str__(self):
return "Invalid %s"%(self.chunk.__class__.__name__)
class IncorrectChunk(Exception):
def __init__(self, type, expectedtype):
self.type = type
self.expectedtype = expectedtype
def __str__(self):
return "Incorrect chunk type %d (should be %d)"%(self.type, self.expectedtype)
class LastValue(Exception):
pass
def readChunk(psf, file, expectedclasses=None):
type = UInt32.fromFile(file)
file.seek(-4, 1) # Rewind one word since the type will be read again by the deSerializeFile function
if expectedclasses:
if not type in [cls.type for cls in expectedclasses]:
raise ValueError("Unexpected type %d, not in "%type + str([cls.type for cls in expectedclasses]))
for cls in expectedclasses:
if type == cls.type:
chunk = cls(psf)
else:
raise Exception("Use expectedclasses!")
if type == 21:
chunk = Section(psf)
elif type == 20:
chunk = ZeroPad(psf)
elif type == 22:
chunk = Container22(psf, type, n=n)
elif type == 33:
chunk = PropertyString(psf)
elif type == 34:
chunk = PropertyUInt(psf)
elif type == 35:
chunk = PropertyFloat64(psf)
elif type == 16:
chunk = DataTypeDef(psf,type)
elif type == 17:
chunk = GroupDef(psf)
elif type == 19:
chunk = HashTable(psf, n=n)
elif type in (1,2,3,4):
file.seek(4,1)
return None
else:
warning("Unknown chunk %d"%type)
raise UnknownChunk(type)
chunk.deSerializeFile(file)
return chunk
class PSFReader(object):
def __init__(self, filename=None, asc=None):
self.header = None
self.types = TypeSection(self)
self.sweeps = None
self.traces = None
self.lastid = 0x1000
self.verbose = False
self.filename = filename
self.file = None
self.values = None
self.asc = asc
def open(self):
"""Open a PSF file and read its headers.
Example:
Trying to open a valid psf file
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
"""
if self.asc == None:
self.asc = False
if not self.asc:
self.file = open(self.filename, "rb")
if self.validate():
self.deSerializeFile(self.file)
else:
raise PSFInvalid("Invalid PSF file")
else:
newpsfobj = psfasc.parse("psfasc", open(self.filename).read())
self.header = newpsfobj.header
self.types = newpsfobj.types
self.sweeps = newpsfobj.sweeps
self.traces = newpsfobj.traces
self.values = newpsfobj.values
self.lastid = newpsfobj.lastid
self.verbose = newpsfobj.verbose
def validate(self):
"""Check if the PSF file is valid.
Returns True if valid, False otherwise
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.validate()
True
>>> psf=PSFReader('./test/psfasc/srcSweep.asc')
>>> psf.validate()
False
"""
if self.file == None:
file = open(self.filename, "rb")
else:
file = self.file
# Read Clarissa signature
file.seek(-4-8,2)
clarissa = file.read(8)
return clarissa == "Clarissa"
def getNSweepPoints(self):
"""Returns number of sweeps. 0 if not swept.
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.getNSweepPoints()
4
"""
if self.file == None:
ValueError("Please open the PSF file first")
return self.header.properties['PSF sweep points']
def getNSweeps(self):
"""Returns the number of nested sweeps
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.getNSweeps()
1
"""
if self.file == None:
ValueError("Please open the PSF file first")
return self.header.properties['PSF sweeps']
def __len__(self):
return len(self.values)
def getValueNames(self):
"""Returns a tuple of the names of the traces
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.getValueNames()
>>> psf.open()
>>> psf.getValueNames()
('VOUT', 'VIN', 'R0')
>>> psf=PSFReader('./test/resultdirs/simple/opBegin')
>>> psf.open()
>>> psf.getValueNames()
('R0', 'V1', 'V0', 'E0', 'VIN', 'NET9', 'VOUT')
"""
if self.values:
return self.values.getValueNames()
def getSweepParamNames(self):
return self.sweeps.getNames()
def getSweepParamValues(self, dim=0):
"""Returns a numpy.array of sweep parameter values for sweep dimension dim.
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.getSweepParamValues(0)
array([ 1., 2., 3., 4.])
windowed result
>>> psf=PSFReader('./test/psf/timeSweep')
>>> psf.open()
>>> psf.getSweepParamValues(0)[:3]
array([ 0.00000000e+00, 2.00000000e-11, 5.33333333e-11])
"""
return numpy.array(self.values.getSweepParamValues())
def getValuePropertiesByName(self, name):
"""Returns the properties associated with value
>>> psf=PSFReader('./test/psf/opBegin')
>>> psf.open()
>>> psf.getValuePropertiesByName("XIRXRFMIXTRIM0.XM1PDAC1.XMN.MAIN")["Region"]
'subthreshold'
"""
return self.values.getValuePropertiesByName(name)
def getValuesByName(self, name):
"""Returns a numpy.array of trace values for swept results and a scalar for non swept.
Example:
swept psf file
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.getValuesByName("VOUT")
array([-6., -4., -2., 0.])
>>> psf.getValuesByName("VIN")
array([ 1., 2., 3., 4.])
swept psf with complex numbers
>>> psf=PSFReader('./test/psf/frequencySweep')
>>> psf.open()
>>> res = psf.getValuesByName("ANT_CM")
>>> len(res)
123
>>> res[:3]
array([ 0.6+0.j, 0. +0.j, 0. +0.j])
swept windowed psf file
>>> psf=PSFReader('./test/psf/timeSweep')
>>> psf.open()
>>> psf.getValuesByName("INP")[0:3]
array([ 0.6 , 0.62486899, 0.66211478])
non-swept psf file
>>> psf=PSFReader('./test/psf/dcOpInfo.info')
>>> psf.open()
>>> psf.getValuesByName("IREG21U_0.MP5.b1")['betadc']
4.7957014499434756
swept psf file withouth groups
>>> psf=PSFReader('./test/resultdirs/parsweep/C=1e-12,R=1e-12/psf/ac.ac')
>>> psf.open()
>>> psf.getValuesByName("net3")
array([ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j])
"""
return self.values.getValueByName(name)
def nTraces(self):
"""Returns number of traces
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.nTraces()
3
"""
if self.file == None:
ValueError("Please open the PSF file first")
return self.header.properties['PSF traces']
def allocId(self):
self.lastid+=1
return self.lastid-1
def info(self):
s="Number of sweeps: %d\n"%self.getNSweeps()
if self.getNSweeps() > 0:
s+="Number of sweep points: %d\n"%self.getNSweepPoints()
s+="Number of traces: %d"%self.nTraces()
return s
def updateHeader(self):
if self.sweeps:
sweeps = len(self.sweeps.children)
else:
sweeps=0
self.header.addProperty(PropertyUInt("PSF sweeps", sweeps))
def deSerializeFile(self, file):
# Find filesize
file.seek(0,2)
filesize = file.tell()
# Last word contains the size of the data
file.seek(-4,2)
datasize = UInt32.fromFile(file).value
if self.verbose:
print "Total data size: ",datasize
# Read Clarissa signature
file.seek(-4-8,2)
clarissa = file.read(8)
if not clarissa == "Clarissa":
raise ValueError("Clarissa signature not found")
# Read section index table
sectionoffsets = {}
file.seek(-4-8-8,2)
pos = file.tell()
sectionnums = []
while file.tell() >= datasize:
sectionnum = UInt32.fromFile(file)
sectionnums.insert(0,sectionnum.value)
offset = UInt32.fromFile(file)
sectionoffsets[sectionnum] = offset
pos -= 8
file.seek(pos)
offsets = [sectionoffsets[secnum] for secnum in sectionnums]
sizes = map(operator.sub, offsets[1:]+[datasize], offsets)
sectionsizes = dict(zip(sectionnums, sizes))
if self.verbose:
print sectionoffsets, sectionsizes
file.seek(0)
self.unk1 = UInt32.fromFile(file)
if self.verbose:
print "First word: 0x%x"%self.unk1
# Load headers
file.seek(int(sectionoffsets[0]))
self.header = HeaderSection(self)
self.header.deSerializeFile(file)
if self.verbose:
print "HEADER"
print self.header
if sectionoffsets.has_key(1):
file.seek(int(sectionoffsets[1]))
self.types.deSerializeFile(file)
if self.verbose:
print "TYPE"
print self.types
if sectionoffsets.has_key(2):
file.seek(int(sectionoffsets[2]))
self.sweeps = SweepSection(self)
self.sweeps.deSerializeFile(file)
if self.verbose:
print "SWEEPS"
print self.sweeps
if sectionoffsets.has_key(3):
file.seek(int(sectionoffsets[3]))
self.traces = TraceSection(self)
self.traces.deSerializeFile(file)
if sectionoffsets.has_key(4):
file.seek(int(sectionoffsets[4]))
# Load data
if self.sweeps:
self.values = ValuesSectionSweep(self)
else:
self.values = ValuesSectionNonSweep(self)
self.values.deSerializeFile(file)
def printme(self):
print "HEADER"
print self.header
print "TYPES"
print self.types
if self.sweeps:
print "SWEEP"
print self.sweeps
if self.traces:
print "TRACE"
print self.traces
print "VALUES"
print self.values
def toPSFasc(self, prec=None):
"""Export to PSF ascii"""
sections = [self.header.toPSFasc(prec), self.types.toPSFasc(prec)]
if self.sweeps:
sections.append(self.sweeps.toPSFasc(prec))
if self.traces:
sections.append(self.traces.toPSFasc(prec))
if self.values:
sections.append(self.values.toPSFasc(prec))
r="\n".join(sections) + "\n"
r+="END\n"
return r
def __repr__(self):
return "\n".join(map(str, (self.header, self.types, self.sweeps, self.traces, self.values)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit |
jemmyw/ansible | v1/ansible/runner/lookup_plugins/inventory_hostnames.py | 173 | 1756 | # (c) 2012, Michael DeHaan <[email protected]>
# (c) 2013, Steven Dossett <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.utils import safe_eval
import ansible.utils as utils
import ansible.errors as errors
import ansible.inventory as inventory
def flatten(terms):
ret = []
for term in terms:
if isinstance(term, list):
ret.extend(term)
else:
ret.append(term)
return ret
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
if 'runner' in kwargs:
self.host_list = kwargs['runner'].inventory.host_list
else:
raise errors.AnsibleError("inventory_hostnames must be used as a loop. Example: \"with_inventory_hostnames: \'all\'\"")
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if not isinstance(terms, list):
raise errors.AnsibleError("with_inventory_hostnames expects a list")
return flatten(inventory.Inventory(self.host_list).list_hosts(terms))
| gpl-3.0 |
when30/namebench | nb_third_party/dns/rdtypes/IN/NAPTR.py | 248 | 4889 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.name
import dns.rdata
def _write_string(file, s):
l = len(s)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(s)
class NAPTR(dns.rdata.Rdata):
"""NAPTR record
@ivar order: order
@type order: int
@ivar preference: preference
@type preference: int
@ivar flags: flags
@type flags: string
@ivar service: service
@type service: string
@ivar regexp: regular expression
@type regexp: string
@ivar replacement: replacement name
@type replacement: dns.name.Name object
@see: RFC 3403"""
__slots__ = ['order', 'preference', 'flags', 'service', 'regexp',
'replacement']
def __init__(self, rdclass, rdtype, order, preference, flags, service,
regexp, replacement):
super(NAPTR, self).__init__(rdclass, rdtype)
self.order = order
self.preference = preference
self.flags = flags
self.service = service
self.regexp = regexp
self.replacement = replacement
def to_text(self, origin=None, relativize=True, **kw):
replacement = self.replacement.choose_relativity(origin, relativize)
return '%d %d "%s" "%s" "%s" %s' % \
(self.order, self.preference,
dns.rdata._escapify(self.flags),
dns.rdata._escapify(self.service),
dns.rdata._escapify(self.regexp),
self.replacement)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
order = tok.get_uint16()
preference = tok.get_uint16()
flags = tok.get_string()
service = tok.get_string()
regexp = tok.get_string()
replacement = tok.get_name()
replacement = replacement.choose_relativity(origin, relativize)
tok.get_eol()
return cls(rdclass, rdtype, order, preference, flags, service,
regexp, replacement)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
two_ints = struct.pack("!HH", self.order, self.preference)
file.write(two_ints)
_write_string(file, self.flags)
_write_string(file, self.service)
_write_string(file, self.regexp)
self.replacement.to_wire(file, compress, origin)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(order, preference) = struct.unpack('!HH', wire[current : current + 4])
current += 4
rdlen -= 4
strings = []
for i in xrange(3):
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen or rdlen < 0:
raise dns.exception.FormError
s = wire[current : current + l]
current += l
rdlen -= l
strings.append(s)
(replacement, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
if cused != rdlen:
raise dns.exception.FormError
if not origin is None:
replacement = replacement.relativize(origin)
return cls(rdclass, rdtype, order, preference, strings[0], strings[1],
strings[2], replacement)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.replacement = self.replacement.choose_relativity(origin,
relativize)
def _cmp(self, other):
sp = struct.pack("!HH", self.order, self.preference)
op = struct.pack("!HH", other.order, other.preference)
v = cmp(sp, op)
if v == 0:
v = cmp(self.flags, other.flags)
if v == 0:
v = cmp(self.service, other.service)
if v == 0:
v = cmp(self.regexp, other.regexp)
if v == 0:
v = cmp(self.replacement, other.replacement)
return v
| apache-2.0 |
rebstar6/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_endtoend.py | 449 | 26811 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""End-to-end tests for pywebsocket. Tests standalone.py by default. You
can also test mod_pywebsocket hosted on an Apache server by setting
_use_external_server to True and modifying _external_server_port to point to
the port on which the Apache server is running.
"""
import logging
import os
import signal
import socket
import subprocess
import sys
import time
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from test import client_for_testing
from test import mux_client_for_testing
# Special message that tells the echo server to start closing handshake
_GOODBYE_MESSAGE = 'Goodbye'
_SERVER_WARMUP_IN_SEC = 0.2
# If you want to use external server to run end to end tests, set following
# parameters correctly.
_use_external_server = False
_external_server_port = 0
# Test body functions
def _echo_check_procedure(client):
client.connect()
client.send_message('test')
client.assert_receive('test')
client.send_message('helloworld')
client.assert_receive('helloworld')
client.send_close()
client.assert_receive_close()
client.assert_connection_closed()
def _echo_check_procedure_with_binary(client):
client.connect()
client.send_message('binary', binary=True)
client.assert_receive('binary', binary=True)
client.send_message('\x00\x80\xfe\xff\x00\x80', binary=True)
client.assert_receive('\x00\x80\xfe\xff\x00\x80', binary=True)
client.send_close()
client.assert_receive_close()
client.assert_connection_closed()
def _echo_check_procedure_with_goodbye(client):
client.connect()
client.send_message('test')
client.assert_receive('test')
client.send_message(_GOODBYE_MESSAGE)
client.assert_receive(_GOODBYE_MESSAGE)
client.assert_receive_close()
client.send_close()
client.assert_connection_closed()
def _echo_check_procedure_with_code_and_reason(client, code, reason):
client.connect()
client.send_close(code, reason)
client.assert_receive_close(code, reason)
client.assert_connection_closed()
def _unmasked_frame_check_procedure(client):
client.connect()
client.send_message('test', mask=False)
client.assert_receive_close(client_for_testing.STATUS_PROTOCOL_ERROR, '')
client.assert_connection_closed()
def _mux_echo_check_procedure(mux_client):
mux_client.connect()
mux_client.send_flow_control(1, 1024)
logical_channel_options = client_for_testing.ClientOptions()
logical_channel_options.server_host = 'localhost'
logical_channel_options.server_port = 80
logical_channel_options.origin = 'http://localhost'
logical_channel_options.resource = '/echo'
mux_client.add_channel(2, logical_channel_options)
mux_client.send_flow_control(2, 1024)
mux_client.send_message(2, 'test')
mux_client.assert_receive(2, 'test')
mux_client.add_channel(3, logical_channel_options)
mux_client.send_flow_control(3, 1024)
mux_client.send_message(2, 'hello')
mux_client.send_message(3, 'world')
mux_client.assert_receive(2, 'hello')
mux_client.assert_receive(3, 'world')
# Don't send close message on channel id 1 so that server-initiated
# closing handshake won't occur.
mux_client.send_close(2)
mux_client.send_close(3)
mux_client.assert_receive_close(2)
mux_client.assert_receive_close(3)
mux_client.send_physical_connection_close()
mux_client.assert_physical_connection_receive_close()
class EndToEndTestBase(unittest.TestCase):
"""Base class for end-to-end tests that launch pywebsocket standalone
server as a separate process, connect to it using the client_for_testing
module, and check if the server behaves correctly by exchanging opening
handshake and frames over a TCP connection.
"""
def setUp(self):
self.server_stderr = None
self.top_dir = os.path.join(os.path.split(__file__)[0], '..')
os.putenv('PYTHONPATH', os.path.pathsep.join(sys.path))
self.standalone_command = os.path.join(
self.top_dir, 'mod_pywebsocket', 'standalone.py')
self.document_root = os.path.join(self.top_dir, 'example')
s = socket.socket()
s.bind(('localhost', 0))
(_, self.test_port) = s.getsockname()
s.close()
self._options = client_for_testing.ClientOptions()
self._options.server_host = 'localhost'
self._options.origin = 'http://localhost'
self._options.resource = '/echo'
# TODO(toyoshim): Eliminate launching a standalone server on using
# external server.
if _use_external_server:
self._options.server_port = _external_server_port
else:
self._options.server_port = self.test_port
# TODO(tyoshino): Use tearDown to kill the server.
def _run_python_command(self, commandline, stdout=None, stderr=None):
return subprocess.Popen([sys.executable] + commandline, close_fds=True,
stdout=stdout, stderr=stderr)
def _run_server(self):
args = [self.standalone_command,
'-H', 'localhost',
'-V', 'localhost',
'-p', str(self.test_port),
'-P', str(self.test_port),
'-d', self.document_root]
# Inherit the level set to the root logger by test runner.
root_logger = logging.getLogger()
log_level = root_logger.getEffectiveLevel()
if log_level != logging.NOTSET:
args.append('--log-level')
args.append(logging.getLevelName(log_level).lower())
return self._run_python_command(args,
stderr=self.server_stderr)
def _kill_process(self, pid):
if sys.platform in ('win32', 'cygwin'):
subprocess.call(
('taskkill.exe', '/f', '/pid', str(pid)), close_fds=True)
else:
os.kill(pid, signal.SIGKILL)
class EndToEndHyBiTest(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _run_test_with_client_options(self, test_function, options):
server = self._run_server()
try:
# TODO(tyoshino): add some logic to poll the server until it
# becomes ready
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_test(self, test_function):
self._run_test_with_client_options(test_function, self._options)
def _run_deflate_frame_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
self._options.enable_deflate_frame()
client = client_for_testing.create_client(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_permessage_deflate_test(
self, offer, response_checker, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
self._options.extensions += offer
self._options.check_permessage_deflate = response_checker
client = client_for_testing.create_client(self._options)
try:
client.connect()
if test_function is not None:
test_function(client)
client.assert_connection_closed()
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_close_with_code_and_reason_test(self, test_function, code,
reason):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(self._options)
try:
test_function(client, code, reason)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_http_fallback_test(self, options, status):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(options)
try:
client.connect()
self.fail('Could not catch HttpStatusException')
except client_for_testing.HttpStatusException, e:
self.assertEqual(status, e.status)
except Exception, e:
self.fail('Catch unexpected exception')
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_mux_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = mux_client_for_testing.MuxClient(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def test_echo(self):
self._run_test(_echo_check_procedure)
def test_echo_binary(self):
self._run_test(_echo_check_procedure_with_binary)
def test_echo_server_close(self):
self._run_test(_echo_check_procedure_with_goodbye)
def test_unmasked_frame(self):
self._run_test(_unmasked_frame_check_procedure)
def test_echo_deflate_frame(self):
self._run_deflate_frame_test(_echo_check_procedure)
def test_echo_deflate_frame_server_close(self):
self._run_deflate_frame_test(
_echo_check_procedure_with_goodbye)
def test_echo_permessage_deflate(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_frames(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd',
client_for_testing.OPCODE_TEXT,
end=False,
rsv1=1)
client._stream.send_data(
'\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_messages(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.send_data(
'\xf2\x00\x11\x00\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x00\x11\x00\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_msgs_server_no_context_takeover(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.send_data(
'\xf2\x00\x11\x00\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([('server_no_context_takeover', None)],
parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate; server_no_context_takeover'],
response_checker,
test_function)
def test_echo_permessage_deflate_preference(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate', 'deflate-frame'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_parameters(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([('server_max_window_bits', '10'),
('server_no_context_takeover', None)],
parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=10; '
'server_no_context_takeover'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_bad_server_max_window_bits(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=3000000'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_bad_server_max_window_bits(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=3000000'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_undefined_parameter(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; foo=bar'],
response_checker,
test_function)
def test_echo_close_with_code_and_reason(self):
self._options.resource = '/close'
self._run_close_with_code_and_reason_test(
_echo_check_procedure_with_code_and_reason, 3333, 'sunsunsunsun')
def test_echo_close_with_empty_body(self):
self._options.resource = '/close'
self._run_close_with_code_and_reason_test(
_echo_check_procedure_with_code_and_reason, None, '')
def test_mux_echo(self):
self._run_mux_test(_mux_echo_check_procedure)
def test_close_on_protocol_error(self):
"""Tests that the server sends a close frame with protocol error status
code when the client sends data with some protocol error.
"""
def test_function(client):
client.connect()
# Intermediate frame without any preceding start of fragmentation
# frame.
client.send_frame_of_arbitrary_bytes('\x80\x80', '')
client.assert_receive_close(
client_for_testing.STATUS_PROTOCOL_ERROR)
self._run_test(test_function)
def test_close_on_unsupported_frame(self):
"""Tests that the server sends a close frame with unsupported operation
status code when the client sends data asking some operation that is
not supported by the server.
"""
def test_function(client):
client.connect()
# Text frame with RSV3 bit raised.
client.send_frame_of_arbitrary_bytes('\x91\x80', '')
client.assert_receive_close(
client_for_testing.STATUS_UNSUPPORTED_DATA)
self._run_test(test_function)
def test_close_on_invalid_frame(self):
"""Tests that the server sends a close frame with invalid frame payload
data status code when the client sends an invalid frame like containing
invalid UTF-8 character.
"""
def test_function(client):
client.connect()
# Text frame with invalid UTF-8 string.
client.send_message('\x80', raw=True)
client.assert_receive_close(
client_for_testing.STATUS_INVALID_FRAME_PAYLOAD_DATA)
self._run_test(test_function)
def test_close_on_internal_endpoint_error(self):
"""Tests that the server sends a close frame with internal endpoint
error status code when the handler does bad operation.
"""
self._options.resource = '/internal_error'
def test_function(client):
client.connect()
client.assert_receive_close(
client_for_testing.STATUS_INTERNAL_ENDPOINT_ERROR)
self._run_test(test_function)
# TODO(toyoshim): Add tests to verify invalid absolute uri handling like
# host unmatch, port unmatch and invalid port description (':' without port
# number).
def test_absolute_uri(self):
"""Tests absolute uri request."""
options = self._options
options.resource = 'ws://localhost:%d/echo' % options.server_port
self._run_test_with_client_options(_echo_check_procedure, options)
def test_origin_check(self):
"""Tests http fallback on origin check fail."""
options = self._options
options.resource = '/origin_check'
# Server shows warning message for http 403 fallback. This warning
# message is confusing. Following pipe disposes warning messages.
self.server_stderr = subprocess.PIPE
self._run_http_fallback_test(options, 403)
def test_version_check(self):
"""Tests http fallback on version check fail."""
options = self._options
options.version = 99
self._run_http_fallback_test(options, 400)
class EndToEndHyBi00Test(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _run_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client_hybi00(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def test_echo(self):
self._run_test(_echo_check_procedure)
def test_echo_server_close(self):
self._run_test(_echo_check_procedure_with_goodbye)
class EndToEndTestWithEchoClient(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _check_example_echo_client_result(
self, expected, stdoutdata, stderrdata):
actual = stdoutdata.decode("utf-8")
if actual != expected:
raise Exception('Unexpected result on example echo client: '
'%r (expected) vs %r (actual)' %
(expected, actual))
if stderrdata is not None:
raise Exception('Unexpected error message on example echo '
'client: %r' % stderrdata)
def test_example_echo_client(self):
"""Tests that the echo_client.py example can talk with the server."""
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client_command = os.path.join(
self.top_dir, 'example', 'echo_client.py')
# Expected output for the default messages.
default_expectation = ('Send: Hello\n' 'Recv: Hello\n'
u'Send: \u65e5\u672c\n' u'Recv: \u65e5\u672c\n'
'Send close\n' 'Recv ack\n')
args = [client_command,
'-p', str(self._options.server_port)]
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
self._check_example_echo_client_result(
default_expectation, stdoutdata, stderrdata)
# Process a big message for which extended payload length is used.
# To handle extended payload length, ws_version attribute will be
# accessed. This test checks that ws_version is correctly set.
big_message = 'a' * 1024
args = [client_command,
'-p', str(self._options.server_port),
'-m', big_message]
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
expected = ('Send: %s\nRecv: %s\nSend close\nRecv ack\n' %
(big_message, big_message))
self._check_example_echo_client_result(
expected, stdoutdata, stderrdata)
# Test the permessage-deflate extension.
args = [client_command,
'-p', str(self._options.server_port),
'--use_permessage_deflate']
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
self._check_example_echo_client_result(
default_expectation, stdoutdata, stderrdata)
finally:
self._kill_process(server.pid)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
jlegendary/orange | Orange/OrangeWidgets/Classify/OWKNN.py | 6 | 6326 | """
<name>k Nearest Neighbours</name>
<description>K-nearest neighbours learner/classifier.</description>
<icon>icons/kNearestNeighbours.svg</icon>
<contact>Janez Demsar (janez.demsar(@at@)fri.uni-lj.si)</contact>
<priority>25</priority>
"""
from OWWidget import *
import OWGUI
from exceptions import Exception
from orngWrap import PreprocessedLearner
NAME = "k Nearest Neighbours"
ID = "orange.widgets.classify.knn"
DESCRIPTION = "K-nearest neighbours learner/classifier."
ICON = "icons/kNearestNeighbours.svg"
AUTHOR = "Janez Demsar"
PRIORITY = 25
HELP_REF = "k-Nearest Neighbours"
KEYWORDS = ["knn"]
INPUTS = (
InputSignal(name="Data",
type=ExampleTable,
handler="setData",
doc="Training data set",
id="train-data"),
InputSignal(name="Preprocess",
type=PreprocessedLearner,
handler="setPreprocessor",
id="preprocessor")
)
OUTPUTS = (
OutputSignal(name="Learner",
type=orange.Learner,
doc="The kNN learner with settings as specified in "
"the dialog",
id="learner"),
OutputSignal(name="kNN Classifier",
type=orange.kNNClassifier,
doc="A kNN classifier trained on 'Data'.",
id="knn-classifier")
)
WIDGET_CLASS = "OWKNN"
class OWKNN(OWWidget):
settingsList = ["name", "k", "metrics", "ranks", "normalize", "ignoreUnknowns"]
def __init__(self, parent=None, signalManager = None, name='kNN'):
OWWidget.__init__(self, parent, signalManager, name, wantMainArea = 0, resizingEnabled = 0)
self.callbackDeposit = []
self.inputs = [("Data", ExampleTable, self.setData), ("Preprocess", PreprocessedLearner, self.setPreprocessor)]
self.outputs = [("Learner", orange.Learner),("kNN Classifier", orange.kNNClassifier)]
self.metricsList = [("Euclidean", orange.ExamplesDistanceConstructor_Euclidean),
("Hamming", orange.ExamplesDistanceConstructor_Hamming),
("Manhattan", orange.ExamplesDistanceConstructor_Manhattan),
("Maximal", orange.ExamplesDistanceConstructor_Maximal),
# ("Dynamic time warp", orange.ExamplesDistanceConstructor_DTW)
]
# Settings
self.name = 'kNN'
self.k = 5; self.metrics = 0; self.ranks = 0
self.ignoreUnknowns = 0
self.normalize = self.oldNormalize = 1
self.loadSettings()
self.data = None # input data set
self.preprocessor = None # no preprocessing as default
self.setLearner() # this just sets the learner, no data
# has come to the input yet
OWGUI.lineEdit(self.controlArea, self, 'name', box='Learner/Classifier Name', \
tooltip='Name to be used by other widgets to identify your learner/classifier.')
OWGUI.separator(self.controlArea)
wbN = OWGUI.widgetBox(self.controlArea, "Neighbours")
OWGUI.spin(wbN, self, "k", 1, 100, 1, None, "Number of neighbours ", orientation="horizontal")
OWGUI.checkBox(wbN, self, "ranks", "Weighting by ranks, not distances")
OWGUI.separator(self.controlArea)
wbM = OWGUI.widgetBox(self.controlArea, "Metrics")
OWGUI.comboBox(wbM, self, "metrics", items = [x[0] for x in self.metricsList], valueType = int, callback = self.metricsChanged)
self.cbNormalize = OWGUI.checkBox(wbM, self, "normalize", "Normalize continuous attributes")
OWGUI.checkBox(wbM, self, "ignoreUnknowns", "Ignore unknown values")
self.metricsChanged()
OWGUI.separator(self.controlArea)
OWGUI.button(self.controlArea, self, "&Apply", callback=self.setLearner, disabled=0, default=True)
OWGUI.rubber(self.controlArea)
self.resize(100,250)
def sendReport(self):
self.reportSettings("Learning parameters",
[("Metrics", self.metricsList[self.metrics][0]),
not self.metrics and ("Continuous attributes", ["Raw", "Normalized"][self.normalize]),
("Unknown values ignored", OWGUI.YesNo[self.ignoreUnknowns]),
("Number of neighbours", self.k),
("Weighting", ["By distances", "By ranked distances"][self.ranks])])
self.reportData(self.data)
def metricsChanged(self):
if not self.metrics and not self.cbNormalize.isEnabled():
self.normalize = self.oldNormalize
self.cbNormalize.setEnabled(True)
elif self.metrics and self.cbNormalize.isEnabled():
self.oldNormalize = self.normalize
self.normalize = False
self.cbNormalize.setEnabled(False)
def setData(self,data):
self.data = self.isDataWithClass(data, orange.VarTypes.Discrete, checkMissing=True) and data or None
self.setLearner()
def setPreprocessor(self, pp):
self.preprocessor = pp
self.setLearner()
def setLearner(self):
distconst = self.metricsList[self.metrics][1]()
distconst.ignoreUnknowns = self.ignoreUnknowns
distconst.normalize = self.normalize
self.learner = orange.kNNLearner(k = self.k, rankWeight = self.ranks, distanceConstructor = distconst)
if self.preprocessor:
self.learner = self.preprocessor.wrapLearner(self.learner)
self.learner.name = self.name
self.send("Learner", self.learner)
self.learn()
def learn(self):
self.classifier = None
if self.data and self.learner:
try:
self.classifier = self.learner(self.data)
self.classifier.name = self.name
except Exception, (errValue):
self.classifier = None
self.error(str(errValue))
self.send("kNN Classifier", self.classifier)
if __name__ == "__main__":
a = QApplication(sys.argv)
ow = OWKNN()
dataset = orange.ExampleTable('adult_sample')
ow.setData(dataset)
ow.show()
a.exec_()
ow.saveSettings()
| gpl-3.0 |
TresysTechnology/setools | tests/nodeconquery.py | 1 | 10617 | # Copyright 2014, Tresys Technology, LLC
# Copyright 2017, Chris PeBenito <[email protected]>
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SETools. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import unittest
from socket import AF_INET6
from ipaddress import IPv4Network, IPv6Network
from setools import SELinuxPolicy, NodeconQuery
class NodeconQueryTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.p = SELinuxPolicy("tests/nodeconquery.conf")
def test_000_unset(self):
"""Nodecon query with no criteria"""
# query with no parameters gets all nodecons.
nodecons = sorted(self.p.nodecons())
q = NodeconQuery(self.p)
q_nodecons = sorted(q.results())
self.assertListEqual(nodecons, q_nodecons)
def test_001_ip_version(self):
"""Nodecon query with IP version match."""
q = NodeconQuery(self.p, ip_version=AF_INET6)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv6Network("1100::/16"), IPv6Network("1110::/16")], nodecons)
def test_020_user_exact(self):
"""Nodecon query with context user exact match"""
q = NodeconQuery(self.p, user="user20", user_regex=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.20.1/32")], nodecons)
def test_021_user_regex(self):
"""Nodecon query with context user regex match"""
q = NodeconQuery(self.p, user="user21(a|b)", user_regex=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.21.1/32"), IPv4Network("10.1.21.2/32")], nodecons)
def test_030_role_exact(self):
"""Nodecon query with context role exact match"""
q = NodeconQuery(self.p, role="role30_r", role_regex=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.30.1/32")], nodecons)
def test_031_role_regex(self):
"""Nodecon query with context role regex match"""
q = NodeconQuery(self.p, role="role31(a|c)_r", role_regex=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.31.1/32"), IPv4Network("10.1.31.3/32")], nodecons)
def test_040_type_exact(self):
"""Nodecon query with context type exact match"""
q = NodeconQuery(self.p, type_="type40", type_regex=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.40.1/32")], nodecons)
def test_041_type_regex(self):
"""Nodecon query with context type regex match"""
q = NodeconQuery(self.p, type_="type41(b|c)", type_regex=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.41.2/32"), IPv4Network("10.1.41.3/32")], nodecons)
def test_050_range_exact(self):
"""Nodecon query with context range exact match"""
q = NodeconQuery(self.p, range_="s0:c1 - s0:c0.c4")
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.50.1/32")], nodecons)
def test_051_range_overlap1(self):
"""Nodecon query with context range overlap match (equal)"""
q = NodeconQuery(self.p, range_="s1:c1 - s1:c0.c4", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_051_range_overlap2(self):
"""Nodecon query with context range overlap match (subset)"""
q = NodeconQuery(self.p, range_="s1:c1,c2 - s1:c0.c3", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_051_range_overlap3(self):
"""Nodecon query with context range overlap match (superset)"""
q = NodeconQuery(self.p, range_="s1 - s1:c0.c4", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_051_range_overlap4(self):
"""Nodecon query with context range overlap match (overlap low level)"""
q = NodeconQuery(self.p, range_="s1 - s1:c1,c2", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_051_range_overlap5(self):
"""Nodecon query with context range overlap match (overlap high level)"""
q = NodeconQuery(self.p, range_="s1:c1,c2 - s1:c0.c4", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_052_range_subset1(self):
"""Nodecon query with context range subset match"""
q = NodeconQuery(self.p, range_="s2:c1,c2 - s2:c0.c3", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.52.1/32")], nodecons)
def test_052_range_subset2(self):
"""Nodecon query with context range subset match (equal)"""
q = NodeconQuery(self.p, range_="s2:c1 - s2:c1.c3", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.52.1/32")], nodecons)
def test_053_range_superset1(self):
"""Nodecon query with context range superset match"""
q = NodeconQuery(self.p, range_="s3 - s3:c0.c4", range_superset=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.53.1/32")], nodecons)
def test_053_range_superset2(self):
"""Nodecon query with context range superset match (equal)"""
q = NodeconQuery(self.p, range_="s3:c1 - s3:c1.c3", range_superset=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.53.1/32")], nodecons)
def test_054_range_proper_subset1(self):
"""Nodecon query with context range proper subset match"""
q = NodeconQuery(self.p, range_="s4:c1,c2", range_subset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.54.1/32")], nodecons)
def test_054_range_proper_subset2(self):
"""Nodecon query with context range proper subset match (equal)"""
q = NodeconQuery(self.p, range_="s4:c1 - s4:c1.c3", range_subset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([], nodecons)
def test_054_range_proper_subset3(self):
"""Nodecon query with context range proper subset match (equal low only)"""
q = NodeconQuery(self.p, range_="s4:c1 - s4:c1.c2", range_subset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.54.1/32")], nodecons)
def test_054_range_proper_subset4(self):
"""Nodecon query with context range proper subset match (equal high only)"""
q = NodeconQuery(self.p, range_="s4:c1,c2 - s4:c1.c3", range_subset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.54.1/32")], nodecons)
def test_055_range_proper_superset1(self):
"""Nodecon query with context range proper superset match"""
q = NodeconQuery(self.p, range_="s5 - s5:c0.c4", range_superset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.55.1/32")], nodecons)
def test_055_range_proper_superset2(self):
"""Nodecon query with context range proper superset match (equal)"""
q = NodeconQuery(self.p, range_="s5:c1 - s5:c1.c3", range_superset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([], nodecons)
def test_055_range_proper_superset3(self):
"""Nodecon query with context range proper superset match (equal low)"""
q = NodeconQuery(self.p, range_="s5:c1 - s5:c1.c4", range_superset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.55.1/32")], nodecons)
def test_055_range_proper_superset4(self):
"""Nodecon query with context range proper superset match (equal high)"""
q = NodeconQuery(self.p, range_="s5 - s5:c1.c3", range_superset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.55.1/32")], nodecons)
def test_100_v4network_equal(self):
"""Nodecon query with IPv4 equal network"""
q = NodeconQuery(self.p, network="192.168.1.0/24", network_overlap=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("192.168.1.0/24")], nodecons)
def test_101_v4network_overlap(self):
"""Nodecon query with IPv4 network overlap"""
q = NodeconQuery(self.p, network="192.168.201.0/24", network_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("192.168.200.0/22")], nodecons)
def test_110_v6network_equal(self):
"""Nodecon query with IPv6 equal network"""
q = NodeconQuery(self.p, network="1100::/16", network_overlap=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv6Network("1100::/16")], nodecons)
def test_111_v6network_overlap(self):
"""Nodecon query with IPv6 network overlap"""
q = NodeconQuery(self.p, network="1110:8000::/17", network_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv6Network("1110::/16")], nodecons)
| lgpl-2.1 |
lcf258/openwrtcnwanzhen | target/linux/x86/image/mkimg_bifferboard.py | 561 | 1265 | #!/usr/bin/env python
"""
Create firmware for 4/8MB Bifferboards, suitable for uploading using
either bb_upload8.py or bb_eth_upload8.py
"""
import struct, sys
# Increase the kmax value if the script gives errors about the kernel being
# too large. You need to set the Biffboot kmax value to the same value you
# use here.
kmax = 0x10
# No need to change this for 4MB devices, it's only used to tell you if
# the firmware is too large!
flash_size = 0x800000
# This is always the same, for 1MB, 4MB and 8MB devices
config_extent = 0x6000
kernel_extent = kmax * 0x10000
if __name__ == "__main__":
if len(sys.argv) != 4:
print "usage: mkimg_bifferboard.py <kernel> <rootfs> <output file>"
sys.exit(-1)
bzimage = sys.argv[1]
rootfs = sys.argv[2]
target = sys.argv[3]
# Kernel first
fw = file(bzimage).read()
if len(fw) > (kernel_extent - config_extent):
raise IOError("Kernel too large")
# Pad up to end of kernel partition
while len(fw) < (kernel_extent - config_extent):
fw += "\xff"
fw += file(rootfs).read()
# Check length of total
if len(fw) > (flash_size - 0x10000 - config_extent):
raise IOError("Rootfs too large")
file(target,"wb").write(fw)
print "Firmware written to '%s'" % target
| gpl-2.0 |
jeremiahmarks/sl4a | python/src/Lib/encodings/iso8859_3.py | 593 | 13345 | """ Python Character Mapping Codec iso8859_3 generated from 'MAPPINGS/ISO8859/8859-3.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0126' # 0xA1 -> LATIN CAPITAL LETTER H WITH STROKE
u'\u02d8' # 0xA2 -> BREVE
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\ufffe'
u'\u0124' # 0xA6 -> LATIN CAPITAL LETTER H WITH CIRCUMFLEX
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\u0130' # 0xA9 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u011e' # 0xAB -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0134' # 0xAC -> LATIN CAPITAL LETTER J WITH CIRCUMFLEX
u'\xad' # 0xAD -> SOFT HYPHEN
u'\ufffe'
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0127' # 0xB1 -> LATIN SMALL LETTER H WITH STROKE
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u0125' # 0xB6 -> LATIN SMALL LETTER H WITH CIRCUMFLEX
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\u0131' # 0xB9 -> LATIN SMALL LETTER DOTLESS I
u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
u'\u011f' # 0xBB -> LATIN SMALL LETTER G WITH BREVE
u'\u0135' # 0xBC -> LATIN SMALL LETTER J WITH CIRCUMFLEX
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\ufffe'
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\ufffe'
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u010a' # 0xC5 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
u'\u0108' # 0xC6 -> LATIN CAPITAL LETTER C WITH CIRCUMFLEX
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\ufffe'
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0120' # 0xD5 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u011c' # 0xD8 -> LATIN CAPITAL LETTER G WITH CIRCUMFLEX
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u016c' # 0xDD -> LATIN CAPITAL LETTER U WITH BREVE
u'\u015c' # 0xDE -> LATIN CAPITAL LETTER S WITH CIRCUMFLEX
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\ufffe'
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u010b' # 0xE5 -> LATIN SMALL LETTER C WITH DOT ABOVE
u'\u0109' # 0xE6 -> LATIN SMALL LETTER C WITH CIRCUMFLEX
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\ufffe'
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0121' # 0xF5 -> LATIN SMALL LETTER G WITH DOT ABOVE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u011d' # 0xF8 -> LATIN SMALL LETTER G WITH CIRCUMFLEX
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u016d' # 0xFD -> LATIN SMALL LETTER U WITH BREVE
u'\u015d' # 0xFE -> LATIN SMALL LETTER S WITH CIRCUMFLEX
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
coins4lunch/EvilCoin | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
zq317157782/Narukami | external/googletest/googlemock/scripts/generator/cpp/ast.py | 16 | 62772 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = '[email protected] (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
FUNCTION_OVERRIDE = 0x100
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparison.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparison.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
if name_tokens:
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter(end):
if default:
del default[0] # Remove flag.
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter(s.start)
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter(tokens[-1].end)
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necessary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'override':
modifiers |= FUNCTION_OVERRIDE
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
if token.name == 'default' or token.name == 'delete':
# Ignore explicitly defaulted and deleted special members
# in C++11.
token = self._GetNextToken()
else:
# Handle pure-virtual declarations.
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(') # )
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
# Skip any macro (e.g. storage class specifiers) after the
# 'class' keyword.
next_token = self._GetNextToken()
if next_token.token_type == tokenize.NAME:
self._AddBackToken(next_token)
else:
self._AddBackTokens([class_token, next_token])
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, templated_types, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
# Create an internal token that denotes when the namespace is complete.
internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP,
None, None)
internal_token.whence = token.whence
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
self._AddBackToken(internal_token)
else:
assert token.name == '{', token
tokens = list(self.GetScope())
# Replace the trailing } with the internal namespace pop token.
tokens[-1] = internal_token
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv)
| mit |
loopCM/chromium | build/gyp_helper.py | 57 | 1598 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file helps gyp_chromium and landmines correctly set up the gyp
# environment from chromium.gyp_env on disk
import os
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CHROME_SRC = os.path.dirname(SCRIPT_DIR)
def apply_gyp_environment_from_file(file_path):
"""Reads in a *.gyp_env file and applies the valid keys to os.environ."""
if not os.path.exists(file_path):
return
with open(file_path, 'rU') as f:
file_contents = f.read()
try:
file_data = eval(file_contents, {'__builtins__': None}, None)
except SyntaxError, e:
e.filename = os.path.abspath(file_path)
raise
supported_vars = (
'CC',
'CC_wrapper',
'CHROMIUM_GYP_FILE',
'CHROMIUM_GYP_SYNTAX_CHECK',
'CXX',
'CXX_wrapper',
'GYP_DEFINES',
'GYP_GENERATOR_FLAGS',
'GYP_CROSSCOMPILE',
'GYP_GENERATOR_OUTPUT',
'GYP_GENERATORS',
)
for var in supported_vars:
file_val = file_data.get(var)
if file_val:
if var in os.environ:
print 'INFO: Environment value for "%s" overrides value in %s.' % (
var, os.path.abspath(file_path)
)
else:
os.environ[var] = file_val
def apply_chromium_gyp_env():
if 'SKIP_CHROMIUM_GYP_ENV' not in os.environ:
# Update the environment based on chromium.gyp_env
path = os.path.join(os.path.dirname(CHROME_SRC), 'chromium.gyp_env')
apply_gyp_environment_from_file(path)
| bsd-3-clause |
plamut/superdesk | server/apps/legal_archive/service.py | 4 | 6656 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from eve.versioning import versioned_id_field
from flask import g, current_app as app
from eve.utils import config, ParsedRequest
from .resource import LEGAL_ARCHIVE_NAME
from superdesk import Service, get_resource_privileges
from superdesk.errors import SuperdeskApiError
from superdesk.metadata.item import ITEM_TYPE, GUID_FIELD, CONTENT_TYPE
from superdesk.metadata.packages import GROUPS, RESIDREF, REFS
from superdesk.utils import ListCursor
logger = logging.getLogger(__name__)
class LegalService(Service):
"""
Base Service Class for Legal Archive related services
"""
def on_create(self, docs):
"""
Overriding to replace the location of each item in the package to legal archive instead of archive,
if doc is a pacakge.
"""
super().on_create(docs)
for doc in docs:
if ITEM_TYPE in doc:
doc.setdefault(config.ID_FIELD, doc[GUID_FIELD])
if doc[ITEM_TYPE] == CONTENT_TYPE.COMPOSITE:
self._change_location_of_items_in_package(doc)
def on_replace(self, document, original):
"""
Overriding to replace the location of each item in the package to legal archive instead of archive,
if doc is a pacakge.
"""
super().on_replace(document, original)
if document.get(ITEM_TYPE) == CONTENT_TYPE.COMPOSITE:
self._change_location_of_items_in_package(document)
def get(self, req, lookup):
"""
Overriding to check if user is authorized to perform get operation on Legal Archive resources. If authorized
then request is forwarded otherwise throws forbidden error.
:return: list of docs matching query in req and lookup
:raises: SuperdeskApiError.forbiddenError() if user is unauthorized to access the Legal Archive resources.
"""
self.check_get_access_privilege()
return super().get(req, lookup)
def find_one(self, req, **lookup):
"""
Overriding to check if user is authorized to perform get operation on Legal Archive resources. If authorized
then request is forwarded otherwise throws forbidden error.
:return: doc if there is one matching the query in req and lookup
:raises: SuperdeskApiError.forbiddenError() if user is unauthorized to access the Legal Archive resources.
"""
self.check_get_access_privilege()
return super().find_one(req, **lookup)
def check_get_access_privilege(self):
"""
Checks if user is authorized to perform get operation on Legal Archive resources. If authorized then request is
forwarded otherwise throws forbidden error.
:raises: SuperdeskApiError.forbiddenError() if user is unauthorized to access the Legal Archive resources.
"""
if not hasattr(g, 'user'):
return
privileges = g.user.get('active_privileges', {})
resource_privileges = get_resource_privileges(self.datasource).get('GET', None)
if privileges.get(resource_privileges, 0) == 0:
raise SuperdeskApiError.forbiddenError()
def enhance(self, legal_archive_docs):
"""
Enhances the item in Legal Archive Service
:param legal_archive_docs:
"""
if isinstance(legal_archive_docs, list):
for legal_archive_doc in legal_archive_docs:
legal_archive_doc['_type'] = LEGAL_ARCHIVE_NAME
else:
legal_archive_docs['_type'] = LEGAL_ARCHIVE_NAME
def _change_location_of_items_in_package(self, package):
"""
Changes location of each item in the package to legal archive instead of archive.
"""
for group in package.get(GROUPS, []):
for ref in group.get(REFS, []):
if RESIDREF in ref:
ref['location'] = LEGAL_ARCHIVE_NAME
class LegalArchiveService(LegalService):
def on_fetched(self, docs):
"""
Overriding this to enhance the published article with the one in archive collection
"""
self.enhance(docs[config.ITEMS])
def on_fetched_item(self, doc):
"""
Overriding this to enhance the published article with the one in archive collection
"""
self.enhance(doc)
class LegalPublishQueueService(LegalService):
def create(self, docs, **kwargs):
"""
Overriding this from preventing the transmission details again. This happens when an item in a package expires
at later point of time. In this case, the call to insert transmission details happens twice once when the
package expires and once when the item expires.
"""
ids = []
for doc in docs:
doc_if_exists = self.find_one(req=None, _id=doc['_id'])
if doc_if_exists is None:
ids.extend(super().create([doc]))
return ids
class LegalArchiveVersionsService(LegalService):
def create(self, docs, **kwargs):
"""
Overriding this from preventing the same version again. This happens when an item is published more than once.
"""
ids = []
for doc in docs:
doc_if_exists = None
if config.ID_FIELD in doc: # This happens when inserting docs from pre-populate command
doc_if_exists = self.find_one(req=None, _id=doc['_id'])
if doc_if_exists is None:
ids.extend(super().create([doc]))
return ids
def get(self, req, lookup):
"""
Version of an article in Legal Archive isn't maintained by Eve. Overriding this to fetch the version history.
"""
resource_def = app.config['DOMAIN'][LEGAL_ARCHIVE_NAME]
id_field = versioned_id_field(resource_def)
if req and req.args and req.args.get(config.ID_FIELD):
version_history = list(super().get_from_mongo(req=ParsedRequest(),
lookup={id_field: req.args.get(config.ID_FIELD)}))
else:
version_history = list(super().get_from_mongo(req=req, lookup=lookup))
for doc in version_history:
doc[config.ID_FIELD] = doc[id_field]
self.enhance(doc)
return ListCursor(version_history)
| agpl-3.0 |
dzz007/photivo | scons-local-2.2.0/SCons/Tool/mwld.py | 14 | 3666 | """SCons.Tool.mwld
Tool-specific initialization for the Metrowerks CodeWarrior linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mwld.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Tool
def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['AR'] = 'mwld'
env['ARCOM'] = '$AR $ARFLAGS -library -o $TARGET $SOURCES'
env['LIBDIRPREFIX'] = '-L'
env['LIBDIRSUFFIX'] = ''
env['LIBLINKPREFIX'] = '-l'
env['LIBLINKSUFFIX'] = '.lib'
env['LINK'] = 'mwld'
env['LINKCOM'] = '$LINK $LINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = '$LINKFLAGS'
env['SHLINKCOM'] = shlib_action
env['SHLIBEMITTER']= shlib_emitter
def exists(env):
import SCons.Tool.mwcc
return SCons.Tool.mwcc.set_vars(env)
def shlib_generator(target, source, env, for_signature):
cmd = ['$SHLINK', '$SHLINKFLAGS', '-shared']
no_import_lib = env.get('no_import_lib', 0)
if no_import_lib: cmd.extend('-noimplib')
dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX')
if dll: cmd.extend(['-o', dll])
implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX')
if implib: cmd.extend(['-implib', implib.get_string(for_signature)])
cmd.extend(['$SOURCES', '$_LIBDIRFLAGS', '$_LIBFLAGS'])
return [cmd]
def shlib_emitter(target, source, env):
dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX')
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError("A shared library should have exactly one target with the suffix: %s" % env.subst("$SHLIBSUFFIX"))
if not no_import_lib and \
not env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX'):
# Append an import library to the list of targets.
target.append(env.ReplaceIxes(dll,
'SHLIBPREFIX', 'SHLIBSUFFIX',
'LIBPREFIX', 'LIBSUFFIX'))
return target, source
shlib_action = SCons.Action.Action(shlib_generator, generator=1)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
Lektorium-LLC/edx-platform | openedx/core/lib/token_utils.py | 11 | 4236 | """Utilities for working with ID tokens."""
import json
from time import time
from Cryptodome.PublicKey import RSA
from django.conf import settings
from django.utils.functional import cached_property
from jwkest.jwk import KEYS, RSAKey
from jwkest.jws import JWS
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from student.models import UserProfile, anonymous_id_for_user
class JwtBuilder(object):
"""Utility for building JWTs.
Unifies diverse approaches to JWT creation in a single class. This utility defaults to using the system's
JWT configuration.
NOTE: This utility class will allow you to override the signing key and audience claim to support those
clients which still require this. This approach to JWT creation is DEPRECATED. Avoid doing this for new clients.
Arguments:
user (User): User for which to generate the JWT.
Keyword Arguments:
asymmetric (Boolean): Whether the JWT should be signed with this app's private key.
secret (string): Overrides configured JWT secret (signing) key. Unused if an asymmetric signature is requested.
"""
def __init__(self, user, asymmetric=False, secret=None):
self.user = user
self.asymmetric = asymmetric
self.secret = secret
self.jwt_auth = configuration_helpers.get_value('JWT_AUTH', settings.JWT_AUTH)
def build_token(self, scopes, expires_in=None, aud=None, additional_claims=None):
"""Returns a JWT access token.
Arguments:
scopes (list): Scopes controlling which optional claims are included in the token.
Keyword Arguments:
expires_in (int): Time to token expiry, specified in seconds.
aud (string): Overrides configured JWT audience claim.
additional_claims (dict): Additional claims to include in the token.
Returns:
str: Encoded JWT
"""
now = int(time())
expires_in = expires_in or self.jwt_auth['JWT_EXPIRATION']
payload = {
# TODO Consider getting rid of this claim since we don't use it.
'aud': aud if aud else self.jwt_auth['JWT_AUDIENCE'],
'exp': now + expires_in,
'iat': now,
'iss': self.jwt_auth['JWT_ISSUER'],
'preferred_username': self.user.username,
'scopes': scopes,
'sub': anonymous_id_for_user(self.user, None),
}
if additional_claims:
payload.update(additional_claims)
for scope in scopes:
handler = self.claim_handlers.get(scope)
if handler:
handler(payload)
return self.encode(payload)
@cached_property
def claim_handlers(self):
"""Returns a dictionary mapping scopes to methods that will add claims to the JWT payload."""
return {
'email': self.attach_email_claim,
'profile': self.attach_profile_claim
}
def attach_email_claim(self, payload):
"""Add the email claim details to the JWT payload."""
payload['email'] = self.user.email
def attach_profile_claim(self, payload):
"""Add the profile claim details to the JWT payload."""
try:
# Some users (e.g., service users) may not have user profiles.
name = UserProfile.objects.get(user=self.user).name
except UserProfile.DoesNotExist:
name = None
payload.update({
'name': name,
'family_name': self.user.last_name,
'given_name': self.user.first_name,
'administrator': self.user.is_staff,
})
def encode(self, payload):
"""Encode the provided payload."""
keys = KEYS()
if self.asymmetric:
keys.add(RSAKey(key=RSA.importKey(settings.JWT_PRIVATE_SIGNING_KEY)))
algorithm = 'RS512'
else:
key = self.secret if self.secret else self.jwt_auth['JWT_SECRET_KEY']
keys.add({'key': key, 'kty': 'oct'})
algorithm = self.jwt_auth['JWT_ALGORITHM']
data = json.dumps(payload)
jws = JWS(data, alg=algorithm)
return jws.sign_compact(keys=keys)
| agpl-3.0 |
8u1a/plaso | plaso/parsers/java_idx.py | 3 | 8539 | # -*- coding: utf-8 -*-
"""Parser for Java Cache IDX files."""
# TODO:
# * 6.02 files did not retain IP addresses. However, the
# deploy_resource_codebase header field may contain the host IP.
# This needs to be researched further, as that field may not always
# be present. 6.02 files will currently return 'Unknown'.
import os
import construct
from plaso.events import time_events
from plaso.lib import errors
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import interface
from plaso.parsers import manager
class JavaIDXEvent(time_events.TimestampEvent):
"""Convenience class for a Java IDX cache file download event."""
DATA_TYPE = u'java:download:idx'
def __init__(
self, timestamp, timestamp_description, idx_version, url, ip_address):
"""Initializes the event object.
Args:
timestamp: The timestamp value.
timestamp_description: The description of the usage of the time value.
idx_version: Version of IDX file.
url: URL of the downloaded file.
ip_address: IP address of the host in the URL.
"""
super(JavaIDXEvent, self).__init__(timestamp, timestamp_description)
self.idx_version = idx_version
self.url = url
self.ip_address = ip_address
class JavaIDXParser(interface.SingleFileBaseParser):
"""Parse Java WebStart Cache IDX files for download events.
There are five structures defined. 6.02 files had one generic section
that retained all data. From 6.03, the file went to a multi-section
format where later sections were optional and had variable-lengths.
6.03, 6.04, and 6.05 files all have their main data section (#2)
begin at offset 128. The short structure is because 6.05 files
deviate after the 8th byte. So, grab the first 8 bytes to ensure it's
valid, get the file version, then continue on with the correct
structures.
"""
_INITIAL_FILE_OFFSET = None
NAME = u'java_idx'
DESCRIPTION = u'Parser for Java WebStart Cache IDX files.'
IDX_SHORT_STRUCT = construct.Struct(
u'magic',
construct.UBInt8(u'busy'),
construct.UBInt8(u'incomplete'),
construct.UBInt32(u'idx_version'))
IDX_602_STRUCT = construct.Struct(
u'IDX_602_Full',
construct.UBInt16(u'null_space'),
construct.UBInt8(u'shortcut'),
construct.UBInt32(u'content_length'),
construct.UBInt64(u'last_modified_date'),
construct.UBInt64(u'expiration_date'),
construct.PascalString(
u'version_string', length_field=construct.UBInt16(u'length')),
construct.PascalString(
u'url', length_field=construct.UBInt16(u'length')),
construct.PascalString(
u'namespace', length_field=construct.UBInt16(u'length')),
construct.UBInt32(u'FieldCount'))
IDX_605_SECTION_ONE_STRUCT = construct.Struct(
u'IDX_605_Section1',
construct.UBInt8(u'shortcut'),
construct.UBInt32(u'content_length'),
construct.UBInt64(u'last_modified_date'),
construct.UBInt64(u'expiration_date'),
construct.UBInt64(u'validation_date'),
construct.UBInt8(u'signed'),
construct.UBInt32(u'sec2len'),
construct.UBInt32(u'sec3len'),
construct.UBInt32(u'sec4len'))
IDX_605_SECTION_TWO_STRUCT = construct.Struct(
u'IDX_605_Section2',
construct.PascalString(
u'version', length_field=construct.UBInt16(u'length')),
construct.PascalString(
u'url', length_field=construct.UBInt16(u'length')),
construct.PascalString(
u'namespec', length_field=construct.UBInt16(u'length')),
construct.PascalString(
u'ip_address', length_field=construct.UBInt16(u'length')),
construct.UBInt32(u'FieldCount'))
# Java uses Pascal-style strings, but with a 2-byte length field.
JAVA_READUTF_STRING = construct.Struct(
u'Java.ReadUTF',
construct.PascalString(
u'string', length_field=construct.UBInt16(u'length')))
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses a Java WebStart Cache IDX file-like object.
Args:
parser_mediator: A parser context object (instance of ParserContext).
file_object: A file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_object.seek(0, os.SEEK_SET)
try:
magic = self.IDX_SHORT_STRUCT.parse_stream(file_object)
except (IOError, construct.FieldError) as exception:
raise errors.UnableToParseFile(
u'Unable to parse Java IDX file with error: {0:s}.'.format(exception))
# Fields magic.busy and magic.incomplete are normally 0x00. They
# are set to 0x01 if the file is currently being downloaded. Logic
# checks for > 1 to avoid a race condition and still reject any
# file with other data.
# Field magic.idx_version is the file version, of which only
# certain versions are supported.
if magic.busy > 1 or magic.incomplete > 1:
raise errors.UnableToParseFile(u'Not a valid Java IDX file')
if not magic.idx_version in [602, 603, 604, 605]:
raise errors.UnableToParseFile(u'Not a valid Java IDX file')
# Obtain the relevant values from the file. The last modified date
# denotes when the file was last modified on the HOST. For example,
# when the file was uploaded to a web server.
if magic.idx_version == 602:
section_one = self.IDX_602_STRUCT.parse_stream(file_object)
last_modified_date = section_one.last_modified_date
url = section_one.url
ip_address = u'Unknown'
http_header_count = section_one.FieldCount
elif magic.idx_version in [603, 604, 605]:
# IDX 6.03 and 6.04 have two unused bytes before the structure.
if magic.idx_version in [603, 604]:
file_object.read(2)
# IDX 6.03, 6.04, and 6.05 files use the same structures for the
# remaining data.
section_one = self.IDX_605_SECTION_ONE_STRUCT.parse_stream(file_object)
last_modified_date = section_one.last_modified_date
if file_object.get_size() > 128:
file_object.seek(128, os.SEEK_SET) # Static offset for section 2.
section_two = self.IDX_605_SECTION_TWO_STRUCT.parse_stream(file_object)
url = section_two.url
ip_address = section_two.ip_address
http_header_count = section_two.FieldCount
else:
url = u'Unknown'
ip_address = u'Unknown'
http_header_count = 0
# File offset is now just prior to HTTP headers. Make sure there
# are headers, and then parse them to retrieve the download date.
download_date = None
for field in range(0, http_header_count):
field = self.JAVA_READUTF_STRING.parse_stream(file_object)
value = self.JAVA_READUTF_STRING.parse_stream(file_object)
if field.string == u'date':
# Time string "should" be in UTC or have an associated time zone
# information in the string itself. If that is not the case then
# there is no reliable method for plaso to determine the proper
# timezone, so the assumption is that it is UTC.
try:
download_date = timelib.Timestamp.FromTimeString(
value.string, gmt_as_timezone=False)
except errors.TimestampError:
download_date = None
parser_mediator.ProduceParseError(
u'Unable to parse time value: {0:s}'.format(value.string))
if not url or not ip_address:
raise errors.UnableToParseFile(
u'Unexpected Error: URL or IP address not found in file.')
last_modified_timestamp = timelib.Timestamp.FromJavaTime(
last_modified_date)
# TODO: Move the timestamp description fields into eventdata.
event_object = JavaIDXEvent(
last_modified_timestamp, u'File Hosted Date', magic.idx_version, url,
ip_address)
parser_mediator.ProduceEvent(event_object)
if section_one:
expiration_date = section_one.get(u'expiration_date', None)
if expiration_date:
expiration_timestamp = timelib.Timestamp.FromJavaTime(expiration_date)
event_object = JavaIDXEvent(
expiration_timestamp, u'File Expiration Date', magic.idx_version,
url, ip_address)
parser_mediator.ProduceEvent(event_object)
if download_date:
event_object = JavaIDXEvent(
download_date, eventdata.EventTimestamp.FILE_DOWNLOADED,
magic.idx_version, url, ip_address)
parser_mediator.ProduceEvent(event_object)
manager.ParsersManager.RegisterParser(JavaIDXParser)
| apache-2.0 |
snyaggarwal/pex | tests/test_environment.py | 5 | 3792 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from contextlib import contextmanager
from twitter.common.contextutil import temporary_dir
from pex.compatibility import nested
from pex.environment import PEXEnvironment
from pex.pex_builder import PEXBuilder
from pex.pex_info import PexInfo
from pex.testing import make_bdist, temporary_filename
@contextmanager
def yield_pex_builder(zip_safe=True):
with nested(temporary_dir(), make_bdist('p1', zipped=True, zip_safe=zip_safe)) as (td, p1):
pb = PEXBuilder(path=td)
pb.add_egg(p1.location)
yield pb
def test_force_local():
with nested(yield_pex_builder(), temporary_dir(), temporary_filename()) as (
pb, pex_root, pex_file):
pb.info.pex_root = pex_root
pb.build(pex_file)
code_cache = PEXEnvironment.force_local(pex_file, pb.info)
assert os.path.exists(pb.info.zip_unsafe_cache)
assert len(os.listdir(pb.info.zip_unsafe_cache)) == 1
assert [os.path.basename(code_cache)] == os.listdir(pb.info.zip_unsafe_cache)
assert set(os.listdir(code_cache)) == set([PexInfo.PATH, '__main__.py', '__main__.pyc'])
# idempotence
assert PEXEnvironment.force_local(pex_file, pb.info) == code_cache
def normalize(path):
return os.path.normpath(os.path.realpath(path)).lower()
def test_write_zipped_internal_cache():
# zip_safe pex will not be written to install cache unless always_write_cache
with nested(yield_pex_builder(zip_safe=True), temporary_dir(), temporary_filename()) as (
pb, pex_root, pex_file):
pb.info.pex_root = pex_root
pb.build(pex_file)
existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info)
assert len(zip_safe) == 1
assert normalize(zip_safe[0].location).startswith(
normalize(os.path.join(pex_file, pb.info.internal_cache))), (
'loc: %s, cache: %s' % (
normalize(zip_safe[0].location),
normalize(os.path.join(pex_file, pb.info.internal_cache))))
pb.info.always_write_cache = True
existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info)
assert len(new) == 1
assert normalize(new[0].location).startswith(normalize(pb.info.install_cache))
# Check that we can read from the cache
existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info)
assert len(existing) == 1
assert normalize(existing[0].location).startswith(normalize(pb.info.install_cache))
# non-zip_safe pex will be written to install cache
with nested(yield_pex_builder(zip_safe=False), temporary_dir(), temporary_filename()) as (
pb, pex_root, pex_file):
pb.info.pex_root = pex_root
pb.build(pex_file)
existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info)
assert len(new) == 1
assert normalize(new[0].location).startswith(normalize(pb.info.install_cache))
original_location = normalize(new[0].location)
# do the second time to validate idempotence of caching
existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info)
assert len(existing) == 1
assert normalize(existing[0].location) == original_location
def test_load_internal_cache_unzipped():
# zip_safe pex will not be written to install cache unless always_write_cache
with nested(yield_pex_builder(zip_safe=True), temporary_dir()) as (pb, pex_root):
pb.info.pex_root = pex_root
pb.freeze()
dists = list(PEXEnvironment.load_internal_cache(pb.path(), pb.info))
assert len(dists) == 1
assert normalize(dists[0].location).startswith(
normalize(os.path.join(pb.path(), pb.info.internal_cache)))
| apache-2.0 |
MIPS/external-chromium_org-third_party-skia | tools/test_pdfs.py | 231 | 1801 | '''
Compares the rendererings of serialized SkPictures to expected images.
Launch with --help to see more information.
Copyright 2012 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
# common Python modules
import os
import optparse
import sys
import shutil
import tempfile
import test_rendering
USAGE_STRING = 'Usage: %s input... expectedDir'
HELP_STRING = '''
Takes input SkPicture files and renders them as PDF files, and then compares
those resulting PDF files against PDF files found in expectedDir.
Each instance of "input" can be either a file (name must end in .skp), or a
directory (in which case this script will process all .skp files within the
directory).
'''
def Main(args):
"""Allow other scripts to call this script with fake command-line args.
@param The commandline argument list
"""
parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING)
parser.add_option('--render_dir', dest='render_dir',
help = ('specify the location to output the rendered '
'files. Default is a temp directory.'))
parser.add_option('--diff_dir', dest='diff_dir',
help = ('specify the location to output the diff files. '
'Default is a temp directory.'))
options, arguments = parser.parse_args(args)
if (len(arguments) < 3):
print("Expected at least one input and one ouput folder.")
parser.print_help()
sys.exit(-1)
inputs = arguments[1:-1]
expected_dir = arguments[-1]
test_rendering.TestRenderSkps(inputs, expected_dir, options.render_dir,
options.diff_dir, 'render_pdfs', '')
if __name__ == '__main__':
Main(sys.argv)
| bsd-3-clause |
unix-beard/matasano | set1/detect_single_character_xor/detect_single_character_xor.py | 1 | 1514 | #!/usr/bin/env python3
################################################################################
# The matasano crypto challenges
# http://cryptopals.com/sets/1/challenges/4/
# Set 1 Challenge 4
# Detect single-character XOR
################################################################################
# One of the 60-character strings in the input file has been encrypted
# by single-character XOR. Find it.
# Key: int=53, char='5'
# Message: Now that the party is jumping
#
# NOTE: This implementation is strictly sequential
################################################################################
import sys
import string
def find_key(key, tuple_):
return chr(int(tuple_[0] + tuple_[1], base=16) ^ key)
def decode_with_key(key, s):
decoded_msg = ''
for t in zip(s[0::2], s[1::2]):
decoded_msg += find_key(key, t)
if len([c for c in decoded_msg if c in string.ascii_letters + ' \n']) == len(decoded_msg):
print('[*] Trying the key: int: {0}, char: {1}'.format(key, chr(key)))
print('Decoded message: {0}'.format(decoded_msg))
def decode(s):
print('Decoding [{0}]'.format(s))
for key in range(0, 256):
decode_with_key(key, s)
def remove_eol(s):
"""Removes trailing '\n' if there is one"""
return s[0:len(s) - 1] if s[len(s) - 1] == '\n' else s
def main():
with open(sys.argv[1], 'r') as f:
for encoded_str in f:
decode(remove_eol(encoded_str))
if __name__ == '__main__':
main()
| mit |
igabr/Metis_Projects_Chicago_2017 | 03-Project-McNulty/web_app/src/flask-lesscss/docs/conf.py | 6 | 6500 | # -*- coding: utf-8 -*-
#
# flask-lesscss documentation build configuration file, created by
# sphinx-quickstart on Tue May 11 18:54:04 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_themes'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'flask-lesscss'
copyright = u'2010, Steve Losh'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.1'
# The full version, including alpha/beta/rc tags.
release = '0.9.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'flask_small'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'flask-lesscssdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'flask-lesscss.tex', u'flask-lesscss Documentation',
u'Steve Losh', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| mit |
fajoy/nova | nova/api/openstack/urlmap.py | 12 | 10628 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import paste.urlmap
import re
import urllib2
from nova.api.openstack import wsgi
from nova.openstack.common import log as logging
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*'
r'(?:=\s*([^;]+|%s))?\s*' %
(_quoted_string_re, _quoted_string_re))
LOG = logging.getLogger(__name__)
def unquote_header_value(value):
"""Unquotes a header value.
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
return value
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in urllib2.parse_http_list(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('Content-Type: text/html; mimetype=text/html')
('Content-Type:', {'mimetype': 'text/html'})
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value)
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = parts.next()[0]
extra = dict(parts)
return name, extra
class Accept(object):
def __init__(self, value):
self._content_types = [parse_options_header(v) for v in
parse_list_header(value)]
def best_match(self, supported_content_types):
# FIXME: Should we have a more sophisticated matching algorithm that
# takes into account the version as well?
best_quality = -1
best_content_type = None
best_params = {}
best_match = '*/*'
for content_type in supported_content_types:
for content_mask, params in self._content_types:
try:
quality = float(params.get('q', 1))
except ValueError:
continue
if quality < best_quality:
continue
elif best_quality == quality:
if best_match.count('*') <= content_mask.count('*'):
continue
if self._match_mask(content_mask, content_type):
best_quality = quality
best_content_type = content_type
best_params = params
best_match = content_mask
return best_content_type, best_params
def content_type_params(self, best_content_type):
"""Find parameters in Accept header for given content type."""
for content_type, params in self._content_types:
if best_content_type == content_type:
return params
return {}
def _match_mask(self, mask, content_type):
if '*' not in mask:
return content_type == mask
if mask == '*/*':
return True
mask_major = mask[:-2]
content_type_major = content_type.split('/', 1)[0]
return content_type_major == mask_major
def urlmap_factory(loader, global_conf, **local_conf):
if 'not_found_app' in local_conf:
not_found_app = local_conf.pop('not_found_app')
else:
not_found_app = global_conf.get('not_found_app')
if not_found_app:
not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
urlmap = URLMap(not_found_app=not_found_app)
for path, app_name in local_conf.items():
path = paste.urlmap.parse_path_expression(path)
app = loader.get_app(app_name, global_conf=global_conf)
urlmap[path] = app
return urlmap
class URLMap(paste.urlmap.URLMap):
def _match(self, host, port, path_info):
"""Find longest match for a given URL path."""
for (domain, app_url), app in self.applications:
if domain and domain != host and domain != host + ':' + port:
continue
if (path_info == app_url
or path_info.startswith(app_url + '/')):
return app, app_url
return None, None
def _set_script_name(self, app, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
return app(environ, start_response)
return wrap
def _munge_path(self, app, path_info, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
environ['PATH_INFO'] = path_info[len(app_url):]
return app(environ, start_response)
return wrap
def _path_strategy(self, host, port, path_info):
"""Check path suffix for MIME type and path prefix for API version."""
mime_type = app = app_url = None
parts = path_info.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in wsgi.SUPPORTED_CONTENT_TYPES:
mime_type = possible_type
parts = path_info.split('/')
if len(parts) > 1:
possible_app, possible_app_url = self._match(host, port, path_info)
# Don't use prefix if it ends up matching default
if possible_app and possible_app_url:
app_url = possible_app_url
app = self._munge_path(possible_app, path_info, app_url)
return mime_type, app, app_url
def _content_type_strategy(self, host, port, environ):
"""Check Content-Type header for API version."""
app = None
params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1]
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return app
def _accept_strategy(self, host, port, environ, supported_content_types):
"""Check Accept header for best matching MIME type and API version."""
accept = Accept(environ.get('HTTP_ACCEPT', ''))
app = None
# Find the best match in the Accept header
mime_type, params = accept.best_match(supported_content_types)
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return mime_type, app
def __call__(self, environ, start_response):
host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower()
if ':' in host:
host, port = host.split(':', 1)
else:
if environ['wsgi.url_scheme'] == 'http':
port = '80'
else:
port = '443'
path_info = environ['PATH_INFO']
path_info = self.normalize_url(path_info, False)[1]
# The MIME type for the response is determined in one of two ways:
# 1) URL path suffix (eg /servers/detail.json)
# 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2)
# The API version is determined in one of three ways:
# 1) URL path prefix (eg /v1.1/tenant/servers/detail)
# 2) Content-Type header (eg application/json;version=1.1)
# 3) Accept header (eg application/json;q=0.8;version=1.1)
supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES)
mime_type, app, app_url = self._path_strategy(host, port, path_info)
# Accept application/atom+xml for the index query of each API
# version mount point as well as the root index
if (app_url and app_url + '/' == path_info) or path_info == '/':
supported_content_types.append('application/atom+xml')
if not app:
app = self._content_type_strategy(host, port, environ)
if not mime_type or not app:
possible_mime_type, possible_app = self._accept_strategy(
host, port, environ, supported_content_types)
if possible_mime_type and not mime_type:
mime_type = possible_mime_type
if possible_app and not app:
app = possible_app
if not mime_type:
mime_type = 'application/json'
if not app:
# Didn't match a particular version, probably matches default
app, app_url = self._match(host, port, path_info)
if app:
app = self._munge_path(app, path_info, app_url)
if app:
environ['nova.best_content_type'] = mime_type
return app(environ, start_response)
environ['paste.urlmap_object'] = self
return self.not_found_application(environ, start_response)
| apache-2.0 |
RaphaelKimmig/django_helpful | django_helpful/__init__.py | 1 | 1416 | # Copyright (c) 2013, Raphael Kimmig
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided
# with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
# AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .utils import *
try:
from .test_runners import *
except ImportError:
pass
| bsd-2-clause |
looker/sdk-examples | python/soft_delete_dashboard.py | 1 | 1367 | import sys
from typing import Sequence
import exceptions
from looker_sdk import client, error, models
sdk = client.setup("../looker.ini")
def main():
"""Given a dashboard title, get the ids of all dashboards with matching titles
and move them to trash.
$ python soft_delete_dashboard.py "An Unused Dashboard"
"""
dashboard_title = sys.argv[1] if len(sys.argv) > 1 else ""
if not dashboard_title:
raise exceptions.ArgumentError("Please provide: <dashboardTitle>")
dashboards = get_dashboards(dashboard_title)
delete_dashboards(dashboards)
def get_dashboards(title: str) -> Sequence[models.Dashboard]:
"""Get dashboards with matching title"""
lc_title = title.lower()
results = sdk.search_dashboards(title=lc_title)
if not results:
raise exceptions.NotFoundError(f'dashboard "{title}" not found')
assert isinstance(results, Sequence)
return results
def delete_dashboards(dashboards: Sequence[models.Dashboard]):
"""Soft delete dashboards"""
for dashboard in dashboards:
try:
assert dashboard.id
sdk.delete_dashboard(dashboard.id)
except error.SDKError:
print(f"Failed to delete dashboard with id {dashboard.id}.")
else:
print(f'"{dashboard.title}" (id {dashboard.id}) has been moved to trash.')
main()
| mit |
cbanta/pjproject | tests/pjsua/mod_recvfrom.py | 39 | 2746 | # $Id$
import imp
import sys
import inc_sip as sip
import inc_const as const
import re
from inc_cfg import *
# Read configuration
cfg_file = imp.load_source("cfg_file", ARGS[1])
# Default server port (should we randomize?)
srv_port = 50070
def test_func(test):
pjsua = test.process[0]
dlg = sip.Dialog("127.0.0.1", pjsua.inst_param.sip_port,
local_port=srv_port,
tcp=cfg_file.recvfrom_cfg.tcp)
last_cseq = 0
last_method = ""
last_call_id = ""
for t in cfg_file.recvfrom_cfg.transaction:
# Print transaction title
if t.title != "":
dlg.trace(t.title)
# Run command and expect patterns
for c in t.cmds:
if c[0] and c[0] != "":
pjsua.send(c[0])
if len(c)>1 and c[1] and c[1] != "":
pjsua.expect(c[1])
# Wait for request
if t.check_cseq:
# Absorbs retransmissions
cseq = 0
method = last_method
call_id = last_call_id
while cseq <= last_cseq and method == last_method and call_id == last_call_id:
request, src_addr = dlg.wait_msg_from(30)
if request==None or request=="":
raise TestError("Timeout waiting for request")
method = request.split(" ", 1)[0]
cseq_hval = sip.get_header(request, "CSeq")
cseq_hval = cseq_hval.split(" ")[0]
cseq = int(cseq_hval)
call_id = sip.get_header(request, "Call-ID")
last_cseq = cseq
last_method = method
else:
request, src_addr = dlg.wait_msg_from(30)
if request==None or request=="":
raise TestError("Timeout waiting for request")
# Check for include patterns
for pat in t.include:
if re.search(pat, request, re.M | re.I)==None:
if t.title:
tname = " in " + t.title + " transaction"
else:
tname = ""
raise TestError("Pattern " + pat + " not found" + tname)
# Check for exclude patterns
for pat in t.exclude:
if re.search(pat, request, re.M | re.I)!=None:
if t.title:
tname = " in " + t.title + " transaction"
else:
tname = ""
raise TestError("Excluded pattern " + pat + " found" + tname)
# Create response
if t.resp_code!=0:
response = dlg.create_response(request, t.resp_code, "Status reason")
# Add headers to response
for h in t.resp_hdr:
response = response + h + "\r\n"
# Add message body if required
if t.body:
response = response + t.body
# Send response
dlg.send_msg(response, src_addr)
# Expect something to happen in pjsua
if t.expect != "":
pjsua.expect(t.expect)
# Sync
pjsua.sync_stdout()
# Replace "$PORT" with server port in pjsua args
cfg_file.recvfrom_cfg.inst_param.arg = cfg_file.recvfrom_cfg.inst_param.arg.replace("$PORT", str(srv_port))
# Here where it all comes together
test = TestParam(cfg_file.recvfrom_cfg.name,
[cfg_file.recvfrom_cfg.inst_param],
test_func)
| gpl-2.0 |
robmcmullen/peppy | peppy/major_modes/fortran_95.py | 1 | 1742 | # peppy Copyright (c) 2006-2009 Rob McMullen
# Licenced under the GPLv2; see http://peppy.flipturn.org for more info
"""Fortran 95 programming language editing support.
Major mode for editing Fortran 95 files.
Supporting actions and minor modes should go here only if they are uniquely
applicable to this major mode and can't be used in other major modes. If
actions can be used with multiple major modes, they should be put in a
separate plugin in the peppy/plugins directory.
"""
import os
import wx
import wx.stc
from peppy.lib.foldexplorer import *
from peppy.lib.autoindent import *
from peppy.yapsy.plugins import *
from peppy.major import *
from peppy.editra.style_specs import unique_keywords
from peppy.fundamental import FundamentalMode
class Fortran95Mode(FundamentalMode):
"""Stub major mode for editing Fortran 95 files.
This major mode has been automatically generated and is a boilerplate/
placeholder major mode. Enhancements to this mode are appreciated!
"""
keyword = 'Fortran 95'
editra_synonym = 'Fortran 95'
stc_lexer_id = wx.stc.STC_LEX_FORTRAN
start_line_comment = '!'
end_line_comment = ''
icon = 'icons/page_white.png'
default_classprefs = (
StrParam('extensions', 'f2k f90 f95 fpp', fullwidth=True),
StrParam('keyword_set_0', unique_keywords[38], hidden=False, fullwidth=True),
StrParam('keyword_set_1', unique_keywords[39], hidden=False, fullwidth=True),
StrParam('keyword_set_2', unique_keywords[40], hidden=False, fullwidth=True),
)
class Fortran95ModePlugin(IPeppyPlugin):
"""Plugin to register modes and user interface for Fortran 95
"""
def getMajorModes(self):
yield Fortran95Mode
| gpl-2.0 |
HH890612/MiliCloud | lib/requests/api.py | 92 | 5400 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
response = session.request(method=method, url=url, **kwargs)
# By explicitly closing the session, we avoid leaving sockets open which
# can trigger a ResourceWarning in some cases, and look like a memory leak
# in others.
session.close()
return response
def get(url, params=None, **kwargs):
"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
| mit |
evernym/plenum | plenum/common/script_helper.py | 2 | 6795 | import os
from jsonpickle import json
from plenum.common.constants import CLIENT_STACK_SUFFIX
from plenum.common.roles import Roles
from plenum.common.transactions import PlenumTransactions
from storage.text_file_store import TextFileStore
NodeInfoFile = "node-info"
GenTxnFile = "genesis_txn"
ExportedTxnFile = "exported_genesis_txn"
def buildKeepDirIfNotExists(baseDir):
keepDir = os.path.expanduser(baseDir)
if not os.path.exists(keepDir):
os.makedirs(keepDir, exist_ok=True)
def isNodeType(baseDir, name):
filepath = os.path.join(os.path.expanduser(baseDir),
name + CLIENT_STACK_SUFFIX)
if os.path.exists(filepath):
return True
else:
return False
def getLedger(baseDir, dbName, storeHash=True, isLineNoKey: bool = False):
return TextFileStore(
dbDir=baseDir,
dbName=dbName,
storeContentHash=storeHash,
isLineNoKey=isLineNoKey)
def storeToFile(baseDir, dbName, value, key,
storeHash=True, isLineNoKey=False):
ledger = getLedger(baseDir, dbName, storeHash=storeHash,
isLineNoKey=isLineNoKey)
if key is None:
ledger.put(value)
else:
ledger.put(value, key)
ledger.close()
def getNodeInfo(baseDir, nodeName):
ledger = getLedger(baseDir, NodeInfoFile, storeHash=False,
isLineNoKey=False)
rec = ledger.get(nodeName)
ledger.close()
return json.loads(rec)
def storeNodeInfo(baseDir, nodeName, steward, nodeip, nodeport, clientip,
clientport):
data = {}
vnodeip, vnodeport, vclientip, vclientport = getAddGenesisHAs(nodeip,
nodeport,
clientip,
clientport)
nodeAddr = vnodeip + ":" + str(vnodeport)
clientAddr = vclientip + ":" + str(vclientport)
data['steward'] = steward
data['nodeAddr'] = nodeAddr
data['clientAddr'] = clientAddr
newJsonData = json.dumps(data)
ledger = getLedger(baseDir, NodeInfoFile, storeHash=False,
isLineNoKey=False)
storedJsonData = ledger.get(nodeName)
if not storedJsonData:
storeToFile(baseDir, NodeInfoFile, newJsonData, nodeName,
storeHash=False, isLineNoKey=False)
elif not storedJsonData == newJsonData:
newRec = []
for key, jsonValue in ledger.iterator(include_key=True,
include_value=True):
if key != nodeName:
newRec.append((key, jsonValue))
newRec.append((nodeName, newJsonData))
ledger.reset()
for key, value in newRec:
storeToFile(baseDir, NodeInfoFile, value, key, storeHash=False,
isLineNoKey=False)
ledger.close()
def storeExportedTxns(baseDir, txn):
storeToFile(baseDir, ExportedTxnFile, txn, None, storeHash=False,
isLineNoKey=True)
def storeGenTxns(baseDir, txn):
storeToFile(baseDir, GenTxnFile, txn, None, storeHash=False,
isLineNoKey=True)
def getAddGenesisHAs(nodeip, nodeport, clientip, clientport):
vnodeip = nodeip if nodeip else "127.0.0.1"
vnodeport = nodeport if nodeport else "9701"
vclientip = clientip if clientip else vnodeip
vclientport = clientport if clientport else str(int(vnodeport) + 1)
return vnodeip, vnodeport, vclientip, vclientport
def getAddNewGenNodeCommand(name, verkey, stewardkey, nodeip, nodeport,
clientip, clientport):
vnodeip, vnodeport, vclientip, vclientport = getAddGenesisHAs(nodeip,
nodeport,
clientip,
clientport)
nodeAddr = vnodeip + ":" + vnodeport
clientAddr = vclientip + ":" + vclientport
return 'add genesis transaction {node} with data {"'.format(node=PlenumTransactions.NODE.name) + name + '": {' \
'"verkey": ' + verkey + \
'"node_address": "' + nodeAddr + '", "client_address": "' + \
clientAddr + '"},' \
'"by": "' + stewardkey + '"}'
def getOldAddNewGenNodeCommand(name, verkey, stewardverkey, nodeip, nodeport,
clientip, clientport):
vnodeip, vnodeport, vclientip, vclientport = getAddGenesisHAs(nodeip,
nodeport,
clientip,
clientport)
return 'add genesis transaction {node} for '.format(node=PlenumTransactions.NODE.name) + verkey + ' by ' + \
stewardverkey + ' with data {"node_ip": "' + \
vnodeip + '", "node_port": ' + vnodeport + ', "client_ip": "' + \
vclientip + '", "client_port": ' + \
vclientport + ', "alias": "' + name + '"}'
def generateNodeGenesisTxn(baseDir, displayTxn, name, verkey, stewardverkey,
nodeip, nodeport, clientip, clientport):
storeNodeInfo(baseDir, name, stewardverkey, nodeip, nodeport, clientip,
clientport)
txn = getOldAddNewGenNodeCommand(name, verkey, stewardverkey, nodeip,
nodeport, clientip, clientport)
storeGenTxns(baseDir, txn)
printGenTxn(txn, displayTxn)
def getAddNewGenStewardCommand(name, verkey):
return 'add genesis transaction {nym} with data {"'.format(nym=PlenumTransactions.NYM.name) \
+ name + '": {"verkey": "' + verkey + \
'"} role={role}'.format(role=Roles.STEWARD.name)
def getOldAddNewGenStewardCommand(name, verkey):
return 'add genesis transaction {nym} for '.format(nym=PlenumTransactions.NYM.name) + verkey + ' with data ' \
'{"alias": ' \
'"' + name + \
'"} role={role}'.format(role=Roles.STEWARD.name)
def generateStewardGenesisTxn(baseDir, displayTxn, name, verkey):
txn = getOldAddNewGenStewardCommand(name, verkey)
storeGenTxns(baseDir, txn)
printGenTxn(txn, displayTxn)
def printGenTxn(txn, displayTxn):
if displayTxn:
print('\n' + txn)
def _checkClientConnected(cli, ):
assert cli.hasSufficientConnections
| apache-2.0 |
clayz/crazy-quiz-web | lib/werkzeug/exceptions.py | 316 | 17799 | # -*- coding: utf-8 -*-
"""
werkzeug.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements a number of Python exceptions you can raise from
within your views to trigger a standard non-200 response.
Usage Example
-------------
::
from werkzeug.wrappers import BaseRequest
from werkzeug.wsgi import responder
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except HTTPException as e:
return e
As you can see from this example those exceptions are callable WSGI
applications. Because of Python 2.4 compatibility those do not extend
from the response objects but only from the python exception class.
As a matter of fact they are not Werkzeug response objects. However you
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you have to pass an environment to ``get_response()``
because some errors fetch additional information from the WSGI
environment.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error::
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except NotFound, e:
return not_found(request)
except HTTPException, e:
return e
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
# Because of bootstrapping reasons we need to manually patch ourselves
# onto our parent module.
import werkzeug
werkzeug.exceptions = sys.modules[__name__]
from werkzeug._internal import _get_environ
from werkzeug._compat import iteritems, integer_types, text_type, \
implements_to_string
from werkzeug.wrappers import Response
@implements_to_string
class HTTPException(Exception):
"""
Baseclass for all HTTP exceptions. This exception can be called as WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code = None
description = None
def __init__(self, description=None, response=None):
Exception.__init__(self)
if description is not None:
self.description = description
self.response = response
@classmethod
def wrap(cls, exception, name=None):
"""This method returns a new subclass of the exception provided that
also is a subclass of `BadRequest`.
"""
class newcls(cls, exception):
def __init__(self, arg=None, *args, **kwargs):
cls.__init__(self, *args, **kwargs)
exception.__init__(self, arg)
newcls.__module__ = sys._getframe(1).f_globals.get('__name__')
newcls.__name__ = name or cls.__name__ + exception.__name__
return newcls
@property
def name(self):
"""The status name."""
return HTTP_STATUS_CODES.get(self.code, 'Unknown Error')
def get_description(self, environ=None):
"""Get the description."""
return u'<p>%s</p>' % escape(self.description)
def get_body(self, environ=None):
"""Get the HTML body."""
return text_type((
u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
u'<title>%(code)s %(name)s</title>\n'
u'<h1>%(name)s</h1>\n'
u'%(description)s\n'
) % {
'code': self.code,
'name': escape(self.name),
'description': self.get_description(environ)
})
def get_headers(self, environ=None):
"""Get a list of headers."""
return [('Content-Type', 'text/html')]
def get_response(self, environ=None):
"""Get a response object. If one was passed to the exception
it's returned directly.
:param environ: the optional environ for the request. This
can be used to modify the response depending
on how the request looked like.
:return: a :class:`Response` object or a subclass thereof.
"""
if self.response is not None:
return self.response
if environ is not None:
environ = _get_environ(environ)
headers = self.get_headers(environ)
return Response(self.get_body(environ), self.code, headers)
def __call__(self, environ, start_response):
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = self.get_response(environ)
return response(environ, start_response)
def __str__(self):
return '%d: %s' % (self.code, self.name)
def __repr__(self):
return '<%s \'%s\'>' % (self.__class__.__name__, self)
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
'The browser (or proxy) sent a request that this server could '
'not understand.'
)
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extend this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class SecurityError(BadRequest):
"""Raised if something triggers a security error. This is otherwise
exactly like a bad request error.
.. versionadded:: 0.9
"""
class Unauthorized(HTTPException):
"""*401* `Unauthorized`
Raise if the user is not authorized. Also used if you want to use HTTP
basic auth.
"""
code = 401
description = (
'The server could not verify that you are authorized to access '
'the URL requested. You either supplied the wrong credentials (e.g. '
'a bad password), or your browser doesn\'t understand how to supply '
'the credentials required.'
)
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
'You don\'t have the permission to access the requested resource. '
'It is either read-protected or not readable by the server.'
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
'The requested URL was not found on the server. '
'If you entered the URL manually please check your spelling and '
'try again.'
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
description = 'The method is not allowed for the requested URL.'
def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(('Allow', ', '.join(self.valid_methods)))
return headers
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
'The resource identified by the request is only capable of '
'generating response entities which have content characteristics '
'not acceptable according to the accept headers sent in the '
'request.'
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
'The server closed the network connection because the browser '
'didn\'t finish the request within the specified time.'
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
'A conflict happened while processing the request. The resource '
'might have been modified while the request was being processed.'
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
'The requested URL is no longer available on this server and '
'there is no forwarding address.</p><p>If you followed a link '
'from a foreign page, please contact the author of this page.'
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
'A request with this method requires a valid <code>Content-'
'Length</code> header.'
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
'The precondition on the request for the URL failed positive '
'evaluation.'
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = (
'The data value transmitted exceeds the capacity limit.'
)
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
'The length of the requested URL exceeds the capacity limit '
'for this server. The request cannot be processed.'
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
'The server does not support the media type transmitted in '
'the request.'
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for a part of the file that lies beyond the end
of the file.
.. versionadded:: 0.7
"""
code = 416
description = (
'The server cannot provide the requested range.'
)
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = (
'The server could not meet the requirements of the Expect header'
)
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = (
'This server is a teapot, not a coffee machine'
)
class UnprocessableEntity(HTTPException):
"""*422* `Unprocessable Entity`
Used if the request is well formed, but the instructions are otherwise
incorrect.
"""
code = 422
description = (
'The request was well-formed but was unable to be followed '
'due to semantic errors.'
)
class PreconditionRequired(HTTPException):
"""*428* `Precondition Required`
The server requires this request to be conditional, typically to prevent
the lost update problem, which is a race condition between two or more
clients attempting to update a resource through PUT or DELETE. By requiring
each client to include a conditional header ("If-Match" or "If-Unmodified-
Since") with the proper value retained from a recent GET request, the
server ensures that each client has at least seen the previous revision of
the resource.
"""
code = 428
description = (
'This request is required to be conditional; try using "If-Match" '
'or "If-Unmodified-Since".'
)
class TooManyRequests(HTTPException):
"""*429* `Too Many Requests`
The server is limiting the rate at which this user receives responses, and
this request exceeds that rate. (The server may use any convenient method
to identify users and their request rates). The server may include a
"Retry-After" header to indicate how long the user should wait before
retrying.
"""
code = 429
description = (
'This user has exceeded an allotted request count. Try again later.'
)
class RequestHeaderFieldsTooLarge(HTTPException):
"""*431* `Request Header Fields Too Large`
The server refuses to process the request because the header fields are too
large. One or more individual fields may be too large, or the set of all
headers is too large.
"""
code = 431
description = (
'One or more header fields exceeds the maximum size.'
)
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
"""
code = 500
description = (
'The server encountered an internal error and was unable to '
'complete your request. Either the server is overloaded or there '
'is an error in the application.'
)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = (
'The server does not support the action requested by the '
'browser.'
)
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
'The proxy server received an invalid response from an upstream '
'server.'
)
class ServiceUnavailable(HTTPException):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily unavailable.
"""
code = 503
description = (
'The server is temporarily unable to service your request due to '
'maintenance downtime or capacity problems. Please try again '
'later.'
)
default_exceptions = {}
__all__ = ['HTTPException']
def _find_exceptions():
for name, obj in iteritems(globals()):
try:
if getattr(obj, 'code', None) is not None:
default_exceptions[obj.code] = obj
__all__.append(obj.__name__)
except TypeError: # pragma: no cover
continue
_find_exceptions()
del _find_exceptions
class Aborter(object):
"""
When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(self, mapping=None, extra=None):
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(self, code, *args, **kwargs):
if not args and not kwargs and not isinstance(code, integer_types):
raise HTTPException(response=code)
if code not in self.mapping:
raise LookupError('no exception for %r' % code)
raise self.mapping[code](*args, **kwargs)
abort = Aborter()
#: an exception that is used internally to signal both a key error and a
#: bad request. Used by a lot of the datastructures.
BadRequestKeyError = BadRequest.wrap(KeyError)
# imported here because of circular dependencies of werkzeug.utils
from werkzeug.utils import escape
from werkzeug.http import HTTP_STATUS_CODES
| apache-2.0 |
bhavin04890/finaldashboard | static/scripts/tools/csv2xml.py | 12 | 3028 | # -*- coding: utf-8 -*-
#
# Debug/Helper script for CSV stylesheet development
#
# >>> python csv2xml <CSV File>
# ... converts the CSV file into XML
#
# >>> python csv2xml <CSV File> <XSLT Stylesheet>
# ... converts the CSV file into XML and transforms it using the stylesheet
#
import sys
import csv
from lxml import etree
from xml.sax.saxutils import escape, unescape
TABLE = "table"
ROW = "row"
COL = "col"
FIELD = "field"
def xml_encode(s):
if s:
s = escape(s, {"'": "'", '"': """})
return s
def xml_decode(s):
if s:
s = unescape(s, {"'": "'", """: '"'})
return s
def parse(source):
parser = etree.XMLParser(no_network=False)
result = etree.parse(source, parser)
return result
def csv2tree(source, delimiter=",", quotechar='"'):
root = etree.Element(TABLE)
def utf_8_encode(source):
encodings = ["utf-8", "iso-8859-1"]
e = encodings[0]
for line in source:
if e:
try:
yield unicode(line, e, "strict").encode("utf-8")
except:
pass
else:
continue
for encoding in encodings:
try:
yield unicode(line, encoding, "strict").encode("utf-8")
except:
continue
else:
e = encoding
break
reader = csv.DictReader(utf_8_encode(source),
delimiter=delimiter,
quotechar=quotechar)
for r in reader:
row = etree.SubElement(root, ROW)
for k in r:
col = etree.SubElement(row, COL)
col.set(FIELD, str(k))
value = r[k]
if value:
text = str(value)
if text.lower() not in ("null", "<null>"):
text = xml_encode(unicode(text.decode("utf-8")))
col.text = text
else:
col.text = ""
return etree.ElementTree(root)
def transform(tree, stylesheet_path, **args):
if args:
_args = [(k, "'%s'" % args[k]) for k in args]
_args = dict(_args)
else:
_args = None
stylesheet = etree.parse(stylesheet_path)
ac = etree.XSLTAccessControl(read_file=True, read_network=True)
transformer = etree.XSLT(stylesheet, access_control=ac)
if _args:
result = transformer(tree, **_args)
else:
result = transformer(tree)
return result
def main(argv):
try:
csvpath = argv[0]
except:
print "Usage: python csv2xml <CSV File> [<XSLT Stylesheet>]"
return
try:
xslpath = argv[1]
except:
xslpath = None
csvfile = open(csvpath)
tree = csv2tree(csvfile)
if xslpath is not None:
tree = transform(tree, xslpath)
print etree.tostring(tree, pretty_print=True)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| mit |
erjac77/ansible-module-f5bigip | library/f5bigip_ltm_profile_diameter.py | 2 | 8266 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_ltm_profile_diameter
short_description: BIG-IP ltm profile diameter module
description:
- Configures a profile to manage Diameter network traffic.
version_added: "2.4"
author:
- "Gabriel Fortin (@GabrielFortin)"
options:
app_service:
description:
- Specifies the name of the application service to which the profile belongs.
connection_prime:
description:
- When enabled, and the system receives a capabilities exchange request from the client, the system will
establish connections and perform handshaking with all the servers prior to sending the capabilities
exchange answer to the client.
default: disabled
choices: ['disabled', 'enabled']
defaults_from:
description:
- Specifies the profile that you want to use as the parent profile.
default: diameter
description:
description:
- User defined description.
destination_realm:
description:
- This attribute has been deprecated as of BIG-IP v11.
handshake_timeout:
description:
- Specifies the handshake timeout in seconds.
default: 10
choices: range(0,4294967296)
host_ip_rewrite:
description:
- When enabled and the message is a capabilities exchange request or capabilities exchange answer, rewrite
the host-ip-address attribute with the system's egress IP address.
default: enabled
choices: ['disabled', 'enabled']
max_retransmit_attempts:
description:
- Specifies the maximum number of retransmit attempts.
default: 1
choices: range(0,4294967296)
max_watchdog_failure:
description:
- Specifies the maximum number of device watchdog failures that the traffic management system can take
before it tears down the connection.
default: 10
choices: range(0,4294967296)
name:
description:
- Specifies a unique name for the component.
required: true
origin_host_to_client:
description:
- Specifies the origin host to client of BIG-IP.
origin_host_to_server:
description:
- Specifies the origin host to server of BIG-IP.
origin_realm_to_client:
description:
- Specifies the origin realm of BIG-IP.
origin_realm_to_server:
description:
- Specifies the origin realm to server of BIG-IP.
overwrite_destination_host:
description:
- This attribute has been deprecated as of BIG-IP v11.
default: enabled
choices: ['disabled', 'enabled']
parent_avp:
description:
- Specifies the name of the Diameter attribute that the system uses to indicate if the persist-avp option is
embedded in a grouped avp.
choices: range(0, 4294967296)
partition:
description:
- Displays the administrative partition within which the profile resides.
persist_avp:
description:
- Specifies the name of the Diameter attribute that the system persists on.
reset_on_timeout:
description:
- When it is enabled and the watchdog failures exceed the max watchdog failure, the system resets the
connection.
default: enabled
choices: ['disabled', 'enabled']
retransmit_timeout:
description:
- Specifies the retransmit timeout in seconds.
default: 10
choices: range(0, 4294967296)
subscriber_aware:
description:
- When you enable this option, the system extracts available subscriber information, such as phone number or
phone model, from diameter authentication and/or accounting packets.
default: disabled
choices: ['disabled', 'enabled']
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
watchdog_timeout:
description:
- Specifies the watchdog timeout in seconds.
default: 0
choices: range(0, 4294967296)
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create LTM Profile Diameter
f5bigip_ltm_profile_diameter:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_diameter_profile
partition: Common
description: My diameter profile
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import range
from ansible_common_f5.base import F5_ACTIVATION_CHOICES
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
app_service=dict(type='str'),
connection_prime=dict(type='str', choices=F5_ACTIVATION_CHOICES),
defaults_from=dict(type='str'),
description=dict(type='str'),
destination_realm=dict(type='str'),
handshake_timeout=dict(type='int', choices=range(0, 4294967296)),
host_ip_rewrite=dict(type='str', choices=F5_ACTIVATION_CHOICES),
max_retransmit_attempts=dict(type='int', choices=range(0, 4294967296)),
max_watchdog_failure=dict(type='int', choices=range(0, 4294967296)),
origin_host_to_client=dict(type='str'),
origin_host_to_server=dict(type='str'),
origin_realm_to_client=dict(type='str'),
origin_realm_to_server=dict(type='str'),
overwrite_destination_host=dict(type='str', choices=F5_ACTIVATION_CHOICES),
parent_avp=dict(type='str'),
persist_avp=dict(type='str'),
reset_on_timeout=dict(type='str', choices=F5_ACTIVATION_CHOICES),
retransmit_timeout=dict(type='int', choices=range(0, 4294967296)),
subscriber_aware=dict(type='str', choices=F5_ACTIVATION_CHOICES),
watchdog_timeout=dict(type='int', choices=range(0, 4294967296))
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpLtmProfileDiameter(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.ltm.profile.diameters.diameter.create,
'read': self._api.tm.ltm.profile.diameters.diameter.load,
'update': self._api.tm.ltm.profile.diameters.diameter.update,
'delete': self._api.tm.ltm.profile.diameters.diameter.delete,
'exists': self._api.tm.ltm.profile.diameters.diameter.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpLtmProfileDiameter(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
| apache-2.0 |
mattrobenolt/django | django/contrib/staticfiles/handlers.py | 581 | 2328 | from django.conf import settings
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.six.moves.urllib.request import url2pathname
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATIC_URL setting, and serves those files.
"""
# May be used to differentiate between handler types (e.g. in a
# request_finished signal)
handles_files = True
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super(StaticFilesHandler, self).__init__()
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404 as e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)
| bsd-3-clause |
aaxelb/osf.io | framework/postcommit_tasks/handlers.py | 21 | 3764 | # -*- coding: utf-8 -*-
import functools
import hashlib
import logging
import threading
import binascii
from collections import OrderedDict
import os
from celery import chain
from framework.celery_tasks import app
from celery.local import PromiseProxy
from gevent.pool import Pool
from website import settings
_local = threading.local()
logger = logging.getLogger(__name__)
def postcommit_queue():
if not hasattr(_local, 'postcommit_queue'):
_local.postcommit_queue = OrderedDict()
return _local.postcommit_queue
def postcommit_celery_queue():
if not hasattr(_local, 'postcommit_celery_queue'):
_local.postcommit_celery_queue = OrderedDict()
return _local.postcommit_celery_queue
def postcommit_before_request():
_local.postcommit_queue = OrderedDict()
_local.postcommit_celery_queue = OrderedDict()
@app.task(max_retries=5, default_retry_delay=60)
def postcommit_celery_task_wrapper(queue):
# chain.apply calls the tasks synchronously without re-enqueuing each one
# http://stackoverflow.com/questions/34177131/how-to-solve-python-celery-error-when-using-chain-encodeerrorruntimeerrormaxi?answertab=votes#tab-top
chain(*queue.values()).apply()
def postcommit_after_request(response, base_status_error_code=500):
if response.status_code >= base_status_error_code:
_local.postcommit_queue = OrderedDict()
_local.postcommit_celery_queue = OrderedDict()
return response
try:
if postcommit_queue():
number_of_threads = 30 # one db connection per greenlet, let's share
pool = Pool(number_of_threads)
for func in postcommit_queue().values():
pool.spawn(func)
pool.join(timeout=5.0, raise_error=True) # 5 second timeout and reraise exceptions
if postcommit_celery_queue():
if settings.USE_CELERY:
# delay pushes the wrapper task into celery
postcommit_celery_task_wrapper.delay(postcommit_celery_queue())
else:
for task in postcommit_celery_queue().values():
task()
except AttributeError as ex:
if not settings.DEBUG_MODE:
logger.error('Post commit task queue not initialized: {}'.format(ex))
return response
def enqueue_postcommit_task(fn, args, kwargs, celery=False, once_per_request=True):
# make a hash of the pertinent data
raw = [fn.__name__, fn.__module__, args, kwargs]
m = hashlib.md5()
m.update('-'.join([x.__repr__() for x in raw]))
key = m.hexdigest()
if not once_per_request:
# we want to run it once for every occurrence, add a random string
key = '{}:{}'.format(key, binascii.hexlify(os.urandom(8)))
if celery and isinstance(fn, PromiseProxy):
postcommit_celery_queue().update({key: fn.si(*args, **kwargs)})
else:
postcommit_queue().update({key: functools.partial(fn, *args, **kwargs)})
handlers = {
'before_request': postcommit_before_request,
'after_request': postcommit_after_request,
}
def run_postcommit(once_per_request=True, celery=False):
'''
Delays function execution until after the request's transaction has been committed.
If you set the celery kwarg to True args and kwargs must be JSON serializable
Tasks will only be run if the response's status code is < 500.
:return:
'''
def wrapper(func):
# if we're local dev or running unit tests, run without queueing
if settings.DEBUG_MODE:
return func
@functools.wraps(func)
def wrapped(*args, **kwargs):
enqueue_postcommit_task(func, args, kwargs, celery=celery, once_per_request=once_per_request)
return wrapped
return wrapper
| apache-2.0 |
ForkedReposBak/mxnet | python/mxnet/gluon/contrib/nn/basic_layers.py | 2 | 17216 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""Custom neural network layers in model_zoo."""
__all__ = ['Concurrent', 'HybridConcurrent', 'Identity', 'SparseEmbedding',
'SyncBatchNorm', 'PixelShuffle1D', 'PixelShuffle2D',
'PixelShuffle3D']
import warnings
from .... import ndarray as nd, context
from ...block import HybridBlock, Block
from ...nn import Sequential, HybridSequential, BatchNorm
class Concurrent(Sequential):
"""Lays `Block` s concurrently.
This block feeds its input to all children blocks, and
produce the output by concatenating all the children blocks' outputs
on the specified axis.
Example::
net = Concurrent()
# use net's name_scope to give children blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
net.add(Identity())
Parameters
----------
axis : int, default -1
The axis on which to concatenate the outputs.
"""
def __init__(self, axis=-1, prefix=None, params=None):
super(Concurrent, self).__init__(prefix=prefix, params=params)
self.axis = axis
def forward(self, x):
out = []
for block in self._children.values():
out.append(block()(x))
out = nd.concat(*out, dim=self.axis)
return out
class HybridConcurrent(HybridSequential):
"""Lays `HybridBlock` s concurrently.
This block feeds its input to all children blocks, and
produce the output by concatenating all the children blocks' outputs
on the specified axis.
Example::
net = HybridConcurrent()
# use net's name_scope to give children blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
net.add(Identity())
Parameters
----------
axis : int, default -1
The axis on which to concatenate the outputs.
"""
def __init__(self, axis=-1, prefix=None, params=None):
super(HybridConcurrent, self).__init__(prefix=prefix, params=params)
self.axis = axis
def hybrid_forward(self, F, x):
out = []
for block in self._children.values():
out.append(block()(x))
out = F.concat(*out, dim=self.axis)
return out
class Identity(HybridBlock):
"""Block that passes through the input directly.
This block can be used in conjunction with HybridConcurrent
block for residual connection.
Example::
net = HybridConcurrent()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
net.add(Identity())
"""
def __init__(self, prefix=None, params=None):
super(Identity, self).__init__(prefix=prefix, params=params)
def hybrid_forward(self, F, x):
return x
class SparseEmbedding(Block):
r"""Turns non-negative integers (indexes/tokens) into dense vectors
of fixed size. eg. [4, 20] -> [[0.25, 0.1], [0.6, -0.2]]
This SparseBlock is designed for distributed training with extremely large
input dimension. Both weight and gradient w.r.t. weight are `RowSparseNDArray`.
Note: if `sparse_grad` is set to True, the gradient w.r.t weight will be
sparse. Only a subset of optimizers support sparse gradients, including SGD, AdaGrad
and Adam. By default lazy updates is turned on, which may perform differently
from standard updates. For more details, please check the Optimization API at:
https://mxnet.incubator.apache.org/api/python/optimization/optimization.html
Parameters
----------
input_dim : int
Size of the vocabulary, i.e. maximum integer index + 1.
output_dim : int
Dimension of the dense embedding.
dtype : str or np.dtype, default 'float32'
Data type of output embeddings.
weight_initializer : Initializer
Initializer for the `embeddings` matrix.
Inputs:
- **data**: (N-1)-D tensor with shape: `(x1, x2, ..., xN-1)`.
Output:
- **out**: N-D tensor with shape: `(x1, x2, ..., xN-1, output_dim)`.
"""
def __init__(self, input_dim, output_dim, dtype='float32',
weight_initializer=None, **kwargs):
super(SparseEmbedding, self).__init__(**kwargs)
self._kwargs = {'input_dim': input_dim, 'output_dim': output_dim,
'dtype': dtype, 'sparse_grad': True}
self.weight = self.params.get('weight', shape=(input_dim, output_dim),
init=weight_initializer, dtype=dtype,
grad_stype='row_sparse', stype='row_sparse')
def forward(self, x):
weight = self.weight.row_sparse_data(x)
return nd.Embedding(x, weight, name='fwd', **self._kwargs)
def __repr__(self):
s = '{block_name}({input_dim} -> {output_dim}, {dtype})'
return s.format(block_name=self.__class__.__name__,
**self._kwargs)
class SyncBatchNorm(BatchNorm):
"""Cross-GPU Synchronized Batch normalization (SyncBN)
Standard BN [1]_ implementation only normalize the data within each device.
SyncBN normalizes the input within the whole mini-batch.
We follow the implementation described in the paper [2]_.
Note: Current implementation of SyncBN does not support FP16 training.
For FP16 inference, use standard nn.BatchNorm instead of SyncBN.
Parameters
----------
in_channels : int, default 0
Number of channels (feature maps) in input data. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
num_devices : int, default number of visible GPUs
momentum: float, default 0.9
Momentum for the moving average.
epsilon: float, default 1e-5
Small float added to variance to avoid dividing by zero.
center: bool, default True
If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: bool, default True
If True, multiply by `gamma`. If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
use_global_stats: bool, default False
If True, use global moving statistics instead of local batch-norm. This will force
change batch-norm into a scale shift operator.
If False, use local batch-norm.
beta_initializer: str or `Initializer`, default 'zeros'
Initializer for the beta weight.
gamma_initializer: str or `Initializer`, default 'ones'
Initializer for the gamma weight.
running_mean_initializer: str or `Initializer`, default 'zeros'
Initializer for the running mean.
running_variance_initializer: str or `Initializer`, default 'ones'
Initializer for the running variance.
Inputs:
- **data**: input tensor with arbitrary shape.
Outputs:
- **out**: output tensor with the same shape as `data`.
Reference:
.. [1] Ioffe, Sergey, and Christian Szegedy. "Batch normalization: Accelerating \
deep network training by reducing internal covariate shift." *ICML 2015*
.. [2] Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, \
Ambrish Tyagi, and Amit Agrawal. "Context Encoding for Semantic Segmentation." *CVPR 2018*
"""
def __init__(self, in_channels=0, num_devices=None, momentum=0.9, epsilon=1e-5,
center=True, scale=True, use_global_stats=False, beta_initializer='zeros',
gamma_initializer='ones', running_mean_initializer='zeros',
running_variance_initializer='ones', **kwargs):
super(SyncBatchNorm, self).__init__(
axis=1, momentum=momentum, epsilon=epsilon,
center=center, scale=scale,
use_global_stats=use_global_stats,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
running_mean_initializer=running_mean_initializer,
running_variance_initializer=running_variance_initializer,
in_channels=in_channels, **kwargs)
num_devices = self._get_num_devices() if num_devices is None else num_devices
self._kwargs = {'eps': epsilon, 'momentum': momentum,
'fix_gamma': not scale, 'use_global_stats': use_global_stats,
'ndev': num_devices, 'key': self.prefix}
def _get_num_devices(self):
warnings.warn("Caution using SyncBatchNorm: "
"if not using all the GPUs, please mannually set num_devices",
UserWarning)
num_devices = context.num_gpus()
num_devices = num_devices if num_devices > 0 else 1
return num_devices
def hybrid_forward(self, F, x, gamma, beta, running_mean, running_var):
return F.contrib.SyncBatchNorm(x, gamma, beta, running_mean, running_var,
name='fwd', **self._kwargs)
class PixelShuffle1D(HybridBlock):
r"""Pixel-shuffle layer for upsampling in 1 dimension.
Pixel-shuffling is the operation of taking groups of values along
the *channel* dimension and regrouping them into blocks of pixels
along the ``W`` dimension, thereby effectively multiplying that dimension
by a constant factor in size.
For example, a feature map of shape :math:`(fC, W)` is reshaped
into :math:`(C, fW)` by forming little value groups of size :math:`f`
and arranging them in a grid of size :math:`W`.
Parameters
----------
factor : int or 1-tuple of int
Upsampling factor, applied to the ``W`` dimension.
Inputs:
- **data**: Tensor of shape ``(N, f*C, W)``.
Outputs:
- **out**: Tensor of shape ``(N, C, W*f)``.
Examples
--------
>>> pxshuf = PixelShuffle1D(2)
>>> x = mx.nd.zeros((1, 8, 3))
>>> pxshuf(x).shape
(1, 4, 6)
"""
def __init__(self, factor):
super(PixelShuffle1D, self).__init__()
self._factor = int(factor)
def hybrid_forward(self, F, x):
"""Perform pixel-shuffling on the input."""
f = self._factor
# (N, C*f, W)
x = F.reshape(x, (0, -4, -1, f, 0)) # (N, C, f, W)
x = F.transpose(x, (0, 1, 3, 2)) # (N, C, W, f)
x = F.reshape(x, (0, 0, -3)) # (N, C, W*f)
return x
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self._factor)
class PixelShuffle2D(HybridBlock):
r"""Pixel-shuffle layer for upsampling in 2 dimensions.
Pixel-shuffling is the operation of taking groups of values along
the *channel* dimension and regrouping them into blocks of pixels
along the ``H`` and ``W`` dimensions, thereby effectively multiplying
those dimensions by a constant factor in size.
For example, a feature map of shape :math:`(f^2 C, H, W)` is reshaped
into :math:`(C, fH, fW)` by forming little :math:`f \times f` blocks
of pixels and arranging them in an :math:`H \times W` grid.
Pixel-shuffling together with regular convolution is an alternative,
learnable way of upsampling an image by arbitrary factors. It is reported
to help overcome checkerboard artifacts that are common in upsampling with
transposed convolutions (also called deconvolutions). See the paper
`Real-Time Single Image and Video Super-Resolution Using an Efficient
Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158>`_
for further details.
Parameters
----------
factor : int or 2-tuple of int
Upsampling factors, applied to the ``H`` and ``W`` dimensions,
in that order.
Inputs:
- **data**: Tensor of shape ``(N, f1*f2*C, H, W)``.
Outputs:
- **out**: Tensor of shape ``(N, C, H*f1, W*f2)``.
Examples
--------
>>> pxshuf = PixelShuffle2D((2, 3))
>>> x = mx.nd.zeros((1, 12, 3, 5))
>>> pxshuf(x).shape
(1, 2, 6, 15)
"""
def __init__(self, factor):
super(PixelShuffle2D, self).__init__()
try:
self._factors = (int(factor),) * 2
except TypeError:
self._factors = tuple(int(fac) for fac in factor)
assert len(self._factors) == 2, "wrong length {}".format(len(self._factors))
def hybrid_forward(self, F, x):
"""Perform pixel-shuffling on the input."""
f1, f2 = self._factors
# (N, f1*f2*C, H, W)
x = F.reshape(x, (0, -4, -1, f1 * f2, 0, 0)) # (N, C, f1*f2, H, W)
x = F.reshape(x, (0, 0, -4, f1, f2, 0, 0)) # (N, C, f1, f2, H, W)
x = F.transpose(x, (0, 1, 4, 2, 5, 3)) # (N, C, H, f1, W, f2)
x = F.reshape(x, (0, 0, -3, -3)) # (N, C, H*f1, W*f2)
return x
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self._factors)
class PixelShuffle3D(HybridBlock):
r"""Pixel-shuffle layer for upsampling in 3 dimensions.
Pixel-shuffling (or voxel-shuffling in 3D) is the operation of taking
groups of values along the *channel* dimension and regrouping them into
blocks of voxels along the ``D``, ``H`` and ``W`` dimensions, thereby
effectively multiplying those dimensions by a constant factor in size.
For example, a feature map of shape :math:`(f^3 C, D, H, W)` is reshaped
into :math:`(C, fD, fH, fW)` by forming little :math:`f \times f \times f`
blocks of voxels and arranging them in a :math:`D \times H \times W` grid.
Pixel-shuffling together with regular convolution is an alternative,
learnable way of upsampling an image by arbitrary factors. It is reported
to help overcome checkerboard artifacts that are common in upsampling with
transposed convolutions (also called deconvolutions). See the paper
`Real-Time Single Image and Video Super-Resolution Using an Efficient
Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158>`_
for further details.
Parameters
----------
factor : int or 3-tuple of int
Upsampling factors, applied to the ``D``, ``H`` and ``W``
dimensions, in that order.
Inputs:
- **data**: Tensor of shape ``(N, f1*f2*f3*C, D, H, W)``.
Outputs:
- **out**: Tensor of shape ``(N, C, D*f1, H*f2, W*f3)``.
Examples
--------
>>> pxshuf = PixelShuffle3D((2, 3, 4))
>>> x = mx.nd.zeros((1, 48, 3, 5, 7))
>>> pxshuf(x).shape
(1, 2, 6, 15, 28)
"""
def __init__(self, factor):
super(PixelShuffle3D, self).__init__()
try:
self._factors = (int(factor),) * 3
except TypeError:
self._factors = tuple(int(fac) for fac in factor)
assert len(self._factors) == 3, "wrong length {}".format(len(self._factors))
def hybrid_forward(self, F, x):
"""Perform pixel-shuffling on the input."""
# `transpose` doesn't support 8D, need other implementation
f1, f2, f3 = self._factors
# (N, C*f1*f2*f3, D, H, W)
x = F.reshape(x, (0, -4, -1, f1 * f2 * f3, 0, 0, 0)) # (N, C, f1*f2*f3, D, H, W)
x = F.swapaxes(x, 2, 3) # (N, C, D, f1*f2*f3, H, W)
x = F.reshape(x, (0, 0, 0, -4, f1, f2*f3, 0, 0)) # (N, C, D, f1, f2*f3, H, W)
x = F.reshape(x, (0, 0, -3, 0, 0, 0)) # (N, C, D*f1, f2*f3, H, W)
x = F.swapaxes(x, 3, 4) # (N, C, D*f1, H, f2*f3, W)
x = F.reshape(x, (0, 0, 0, 0, -4, f2, f3, 0)) # (N, C, D*f1, H, f2, f3, W)
x = F.reshape(x, (0, 0, 0, -3, 0, 0)) # (N, C, D*f1, H*f2, f3, W)
x = F.swapaxes(x, 4, 5) # (N, C, D*f1, H*f2, W, f3)
x = F.reshape(x, (0, 0, 0, 0, -3)) # (N, C, D*f1, H*f2, W*f3)
return x
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self._factors)
| apache-2.0 |
jagg81/translate-toolkit | build/lib.linux-x86_64-2.6/translate/convert/prop2po.py | 3 | 9977 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""convert Java/Mozilla .properties files to Gettext PO localization files
See: http://translate.sourceforge.net/wiki/toolkit/prop2po for examples and
usage instructions
"""
import sys
from translate.storage import po
from translate.storage import properties
class prop2po:
"""convert a .properties file to a .po file for handling the
translation."""
def convertstore(self, thepropfile, personality="java",
duplicatestyle="msgctxt"):
"""converts a .properties file to a .po file..."""
self.personality = personality
thetargetfile = po.pofile()
if self.personality == "mozilla" or self.personality == "skype":
targetheader = thetargetfile.init_headers(charset="UTF-8",
encoding="8bit",
x_accelerator_marker="&")
else:
targetheader = thetargetfile.init_headers(charset="UTF-8",
encoding="8bit")
targetheader.addnote("extracted from %s" % thepropfile.filename,
"developer")
# we try and merge the header po with any comments at the start of the
# properties file
appendedheader = False
waitingcomments = []
for propunit in thepropfile.units:
pounit = self.convertunit(propunit, "developer")
if pounit is None:
waitingcomments.extend(propunit.comments)
# FIXME the storage class should not be creating blank units
if pounit is "discard":
continue
if not appendedheader:
if propunit.isblank():
targetheader.addnote("\n".join(waitingcomments).rstrip(),
"developer", position="prepend")
waitingcomments = []
pounit = None
appendedheader = True
if pounit is not None:
pounit.addnote("\n".join(waitingcomments).rstrip(),
"developer", position="prepend")
waitingcomments = []
thetargetfile.addunit(pounit)
thetargetfile.removeduplicates(duplicatestyle)
return thetargetfile
def mergestore(self, origpropfile, translatedpropfile, personality="java",
blankmsgstr=False, duplicatestyle="msgctxt"):
"""converts two .properties files to a .po file..."""
self.personality = personality
thetargetfile = po.pofile()
if self.personality == "mozilla" or self.personality == "skype":
targetheader = thetargetfile.init_headers(charset="UTF-8",
encoding="8bit",
x_accelerator_marker="&")
else:
targetheader = thetargetfile.init_headers(charset="UTF-8",
encoding="8bit")
targetheader.addnote("extracted from %s, %s" % (origpropfile.filename, translatedpropfile.filename),
"developer")
translatedpropfile.makeindex()
# we try and merge the header po with any comments at the start of
# the properties file
appendedheader = False
waitingcomments = []
# loop through the original file, looking at units one by one
for origprop in origpropfile.units:
origpo = self.convertunit(origprop, "developer")
if origpo is None:
waitingcomments.extend(origprop.comments)
# FIXME the storage class should not be creating blank units
if origpo is "discard":
continue
# handle the header case specially...
if not appendedheader:
if origprop.isblank():
targetheader.addnote(u"".join(waitingcomments).rstrip(),
"developer", position="prepend")
waitingcomments = []
origpo = None
appendedheader = True
# try and find a translation of the same name...
if origprop.name in translatedpropfile.locationindex:
translatedprop = translatedpropfile.locationindex[origprop.name]
# Need to check that this comment is not a copy of the
# developer comments
translatedpo = self.convertunit(translatedprop, "translator")
if translatedpo is "discard":
continue
else:
translatedpo = None
# if we have a valid po unit, get the translation and add it...
if origpo is not None:
if translatedpo is not None and not blankmsgstr:
origpo.target = translatedpo.source
origpo.addnote(u"".join(waitingcomments).rstrip(),
"developer", position="prepend")
waitingcomments = []
thetargetfile.addunit(origpo)
elif translatedpo is not None:
print >> sys.stderr, "error converting original properties definition %s" % origprop.name
thetargetfile.removeduplicates(duplicatestyle)
return thetargetfile
def convertunit(self, propunit, commenttype):
"""Converts a .properties unit to a .po unit. Returns None if empty
or not for translation."""
if propunit is None:
return None
# escape unicode
pounit = po.pounit(encoding="UTF-8")
if hasattr(propunit, "comments"):
for comment in propunit.comments:
if "DONT_TRANSLATE" in comment:
return "discard"
pounit.addnote(u"".join(propunit.getnotes()).rstrip(), commenttype)
# TODO: handle multiline msgid
if propunit.isblank():
return None
pounit.addlocation(propunit.name)
pounit.source = propunit.source
pounit.target = u""
return pounit
def convertstrings(inputfile, outputfile, templatefile, personality="strings",
pot=False, duplicatestyle="msgctxt", encoding=None):
""".strings specific convertor function"""
return convertprop(inputfile, outputfile, templatefile,
personality="strings", pot=pot,
duplicatestyle=duplicatestyle, encoding=encoding)
def convertmozillaprop(inputfile, outputfile, templatefile, pot=False,
duplicatestyle="msgctxt"):
"""Mozilla specific convertor function"""
return convertprop(inputfile, outputfile, templatefile,
personality="mozilla", pot=pot,
duplicatestyle=duplicatestyle)
def convertprop(inputfile, outputfile, templatefile, personality="java",
pot=False, duplicatestyle="msgctxt", encoding=None):
"""reads in inputfile using properties, converts using prop2po, writes
to outputfile"""
inputstore = properties.propfile(inputfile, personality, encoding)
convertor = prop2po()
if templatefile is None:
outputstore = convertor.convertstore(inputstore, personality,
duplicatestyle=duplicatestyle)
else:
templatestore = properties.propfile(templatefile, personality, encoding)
outputstore = convertor.mergestore(templatestore, inputstore,
personality, blankmsgstr=pot,
duplicatestyle=duplicatestyle)
if outputstore.isempty():
return 0
outputfile.write(str(outputstore))
return 1
formats = {
"properties": ("po", convertprop),
("properties", "properties"): ("po", convertprop),
"lang": ("po", convertprop),
("lang", "lang"): ("po", convertprop),
"strings": ("po", convertstrings),
("strings", "strings"): ("po", convertstrings),
}
def main(argv=None):
from translate.convert import convert
parser = convert.ConvertOptionParser(formats, usetemplates=True,
usepots=True,
description=__doc__)
parser.add_option("", "--personality", dest="personality",
default=properties.default_dialect,
type="choice",
choices=properties.dialects.keys(),
help="override the input file format: %s (for .properties files, default: %s)" %
(", ".join(properties.dialects.iterkeys()),
properties.default_dialect),
metavar="TYPE")
parser.add_option("", "--encoding", dest="encoding", default=None,
help="override the encoding set by the personality",
metavar="ENCODING")
parser.add_duplicates_option()
parser.passthrough.append("pot")
parser.passthrough.append("personality")
parser.passthrough.append("encoding")
parser.run(argv)
if __name__ == '__main__':
main()
| gpl-2.0 |
boumenot/azure-linux-extensions | OSPatching/azure/__init__.py | 46 | 33598 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import ast
import base64
import hashlib
import hmac
import sys
import types
import warnings
import inspect
if sys.version_info < (3,):
from urllib2 import quote as url_quote
from urllib2 import unquote as url_unquote
_strtype = basestring
else:
from urllib.parse import quote as url_quote
from urllib.parse import unquote as url_unquote
_strtype = str
from datetime import datetime
from xml.dom import minidom
from xml.sax.saxutils import escape as xml_escape
#--------------------------------------------------------------------------
# constants
__author__ = 'Microsoft Corp. <[email protected]>'
__version__ = '0.8.4'
# Live ServiceClient URLs
BLOB_SERVICE_HOST_BASE = '.blob.core.windows.net'
QUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'
TABLE_SERVICE_HOST_BASE = '.table.core.windows.net'
SERVICE_BUS_HOST_BASE = '.servicebus.windows.net'
MANAGEMENT_HOST = 'management.core.windows.net'
# Development ServiceClient URLs
DEV_BLOB_HOST = '127.0.0.1:10000'
DEV_QUEUE_HOST = '127.0.0.1:10001'
DEV_TABLE_HOST = '127.0.0.1:10002'
# Default credentials for Development Storage Service
DEV_ACCOUNT_NAME = 'devstoreaccount1'
DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
# All of our error messages
_ERROR_CANNOT_FIND_PARTITION_KEY = 'Cannot find partition key in request.'
_ERROR_CANNOT_FIND_ROW_KEY = 'Cannot find row key in request.'
_ERROR_INCORRECT_TABLE_IN_BATCH = \
'Table should be the same in a batch operations'
_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH = \
'Partition Key should be the same in a batch operations'
_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = \
'Row Keys should not be the same in a batch operations'
_ERROR_BATCH_COMMIT_FAIL = 'Batch Commit Fail'
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE = \
'Message is not peek locked and cannot be deleted.'
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK = \
'Message is not peek locked and cannot be unlocked.'
_ERROR_QUEUE_NOT_FOUND = 'Queue was not found'
_ERROR_TOPIC_NOT_FOUND = 'Topic was not found'
_ERROR_CONFLICT = 'Conflict ({0})'
_ERROR_NOT_FOUND = 'Not found ({0})'
_ERROR_UNKNOWN = 'Unknown error ({0})'
_ERROR_SERVICEBUS_MISSING_INFO = \
'You need to provide servicebus namespace, access key and Issuer'
_ERROR_STORAGE_MISSING_INFO = \
'You need to provide both account name and access key'
_ERROR_ACCESS_POLICY = \
'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \
'instance'
_WARNING_VALUE_SHOULD_BE_BYTES = \
'Warning: {0} must be bytes data type. It will be converted ' + \
'automatically, with utf-8 text encoding.'
_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'
_ERROR_VALUE_NONE = '{0} should not be None.'
_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = \
'Cannot serialize the specified value ({0}) to an entity. Please use ' + \
'an EntityProperty (which can specify custom types), int, str, bool, ' + \
'or datetime.'
_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \
'Invalid page blob size: {0}. ' + \
'The size must be aligned to a 512-byte boundary.'
_USER_AGENT_STRING = 'pyazure/' + __version__
METADATA_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'
class WindowsAzureData(object):
''' This is the base of data class.
It is only used to check whether it is instance or not. '''
pass
class WindowsAzureError(Exception):
''' WindowsAzure Excpetion base class. '''
def __init__(self, message):
super(WindowsAzureError, self).__init__(message)
class WindowsAzureConflictError(WindowsAzureError):
'''Indicates that the resource could not be created because it already
exists'''
def __init__(self, message):
super(WindowsAzureConflictError, self).__init__(message)
class WindowsAzureMissingResourceError(WindowsAzureError):
'''Indicates that a request for a request for a resource (queue, table,
container, etc...) failed because the specified resource does not exist'''
def __init__(self, message):
super(WindowsAzureMissingResourceError, self).__init__(message)
class WindowsAzureBatchOperationError(WindowsAzureError):
'''Indicates that a batch operation failed'''
def __init__(self, message, code):
super(WindowsAzureBatchOperationError, self).__init__(message)
self.code = code
class Feed(object):
pass
class _Base64String(str):
pass
class HeaderDict(dict):
def __getitem__(self, index):
return super(HeaderDict, self).__getitem__(index.lower())
def _encode_base64(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def _decode_base64_to_bytes(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def _decode_base64_to_text(data):
decoded_bytes = _decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def _get_readable_id(id_name, id_prefix_to_skip):
"""simplified an id to be more friendly for us people"""
# id_name is in the form 'https://namespace.host.suffix/name'
# where name may contain a forward slash!
pos = id_name.find('//')
if pos != -1:
pos += 2
if id_prefix_to_skip:
pos = id_name.find(id_prefix_to_skip, pos)
if pos != -1:
pos += len(id_prefix_to_skip)
pos = id_name.find('/', pos)
if pos != -1:
return id_name[pos + 1:]
return id_name
def _get_entry_properties_from_node(entry, include_id, id_prefix_to_skip=None, use_title_as_id=False):
''' get properties from entry xml '''
properties = {}
etag = entry.getAttributeNS(METADATA_NS, 'etag')
if etag:
properties['etag'] = etag
for updated in _get_child_nodes(entry, 'updated'):
properties['updated'] = updated.firstChild.nodeValue
for name in _get_children_from_path(entry, 'author', 'name'):
if name.firstChild is not None:
properties['author'] = name.firstChild.nodeValue
if include_id:
if use_title_as_id:
for title in _get_child_nodes(entry, 'title'):
properties['name'] = title.firstChild.nodeValue
else:
for id in _get_child_nodes(entry, 'id'):
properties['name'] = _get_readable_id(
id.firstChild.nodeValue, id_prefix_to_skip)
return properties
def _get_entry_properties(xmlstr, include_id, id_prefix_to_skip=None):
''' get properties from entry xml '''
xmldoc = minidom.parseString(xmlstr)
properties = {}
for entry in _get_child_nodes(xmldoc, 'entry'):
properties.update(_get_entry_properties_from_node(entry, include_id, id_prefix_to_skip))
return properties
def _get_first_child_node_value(parent_node, node_name):
xml_attrs = _get_child_nodes(parent_node, node_name)
if xml_attrs:
xml_attr = xml_attrs[0]
if xml_attr.firstChild:
value = xml_attr.firstChild.nodeValue
return value
def _get_child_nodes(node, tagName):
return [childNode for childNode in node.getElementsByTagName(tagName)
if childNode.parentNode == node]
def _get_children_from_path(node, *path):
'''descends through a hierarchy of nodes returning the list of children
at the inner most level. Only returns children who share a common parent,
not cousins.'''
cur = node
for index, child in enumerate(path):
if isinstance(child, _strtype):
next = _get_child_nodes(cur, child)
else:
next = _get_child_nodesNS(cur, *child)
if index == len(path) - 1:
return next
elif not next:
break
cur = next[0]
return []
def _get_child_nodesNS(node, ns, tagName):
return [childNode for childNode in node.getElementsByTagNameNS(ns, tagName)
if childNode.parentNode == node]
def _create_entry(entry_body):
''' Adds common part of entry to a given entry body and return the whole
xml. '''
updated_str = datetime.utcnow().isoformat()
if datetime.utcnow().utcoffset() is None:
updated_str += '+00:00'
entry_start = '''<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom" >
<title /><updated>{updated}</updated><author><name /></author><id />
<content type="application/xml">
{body}</content></entry>'''
return entry_start.format(updated=updated_str, body=entry_body)
def _to_datetime(strtime):
return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f")
_KNOWN_SERIALIZATION_XFORMS = {
'include_apis': 'IncludeAPIs',
'message_id': 'MessageId',
'content_md5': 'Content-MD5',
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
'account_admin_live_email_id': 'AccountAdminLiveEmailId',
'service_admin_live_email_id': 'ServiceAdminLiveEmailId',
'subscription_id': 'SubscriptionID',
'fqdn': 'FQDN',
'private_id': 'PrivateID',
'os_virtual_hard_disk': 'OSVirtualHardDisk',
'logical_disk_size_in_gb': 'LogicalDiskSizeInGB',
'logical_size_in_gb': 'LogicalSizeInGB',
'os': 'OS',
'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo',
'copy_id': 'CopyId',
}
def _get_serialization_name(element_name):
"""converts a Python name into a serializable name"""
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith('x_ms_'):
return element_name.replace('_', '-')
if element_name.endswith('_id'):
element_name = element_name.replace('_id', 'ID')
for name in ['content_', 'last_modified', 'if_', 'cache_control']:
if element_name.startswith(name):
element_name = element_name.replace('_', '-_')
return ''.join(name.capitalize() for name in element_name.split('_'))
if sys.version_info < (3,):
_unicode_type = unicode
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
_unicode_type = str
def _str_or_none(value):
if value is None:
return None
return _str(value)
def _int_or_none(value):
if value is None:
return None
return str(int(value))
def _bool_or_none(value):
if value is None:
return None
if isinstance(value, bool):
if value:
return 'true'
else:
return 'false'
return str(value)
def _convert_class_to_xml(source, xml_prefix=True):
if source is None:
return ''
xmlstr = ''
if xml_prefix:
xmlstr = '<?xml version="1.0" encoding="utf-8"?>'
if isinstance(source, list):
for value in source:
xmlstr += _convert_class_to_xml(value, False)
elif isinstance(source, WindowsAzureData):
class_name = source.__class__.__name__
xmlstr += '<' + class_name + '>'
for name, value in vars(source).items():
if value is not None:
if isinstance(value, list) or \
isinstance(value, WindowsAzureData):
xmlstr += _convert_class_to_xml(value, False)
else:
xmlstr += ('<' + _get_serialization_name(name) + '>' +
xml_escape(str(value)) + '</' +
_get_serialization_name(name) + '>')
xmlstr += '</' + class_name + '>'
return xmlstr
def _find_namespaces_from_child(parent, child, namespaces):
"""Recursively searches from the parent to the child,
gathering all the applicable namespaces along the way"""
for cur_child in parent.childNodes:
if cur_child is child:
return True
if _find_namespaces_from_child(cur_child, child, namespaces):
# we are the parent node
for key in cur_child.attributes.keys():
if key.startswith('xmlns:') or key == 'xmlns':
namespaces[key] = cur_child.attributes[key]
break
return False
def _find_namespaces(parent, child):
res = {}
for key in parent.documentElement.attributes.keys():
if key.startswith('xmlns:') or key == 'xmlns':
res[key] = parent.documentElement.attributes[key]
_find_namespaces_from_child(parent, child, res)
return res
def _clone_node_with_namespaces(node_to_clone, original_doc):
clone = node_to_clone.cloneNode(True)
for key, value in _find_namespaces(original_doc, node_to_clone).items():
clone.attributes[key] = value
return clone
def _convert_response_to_feeds(response, convert_callback):
if response is None:
return None
feeds = _list_of(Feed)
x_ms_continuation = HeaderDict()
for name, value in response.headers:
if 'x-ms-continuation' in name:
x_ms_continuation[name[len('x-ms-continuation') + 1:]] = value
if x_ms_continuation:
setattr(feeds, 'x_ms_continuation', x_ms_continuation)
xmldoc = minidom.parseString(response.body)
xml_entries = _get_children_from_path(xmldoc, 'feed', 'entry')
if not xml_entries:
# in some cases, response contains only entry but no feed
xml_entries = _get_children_from_path(xmldoc, 'entry')
if inspect.isclass(convert_callback) and issubclass(convert_callback, WindowsAzureData):
for xml_entry in xml_entries:
return_obj = convert_callback()
for node in _get_children_from_path(xml_entry,
'content',
convert_callback.__name__):
_fill_data_to_return_object(node, return_obj)
for name, value in _get_entry_properties_from_node(xml_entry,
include_id=True,
use_title_as_id=True).items():
setattr(return_obj, name, value)
feeds.append(return_obj)
else:
for xml_entry in xml_entries:
new_node = _clone_node_with_namespaces(xml_entry, xmldoc)
feeds.append(convert_callback(new_node.toxml('utf-8')))
return feeds
def _validate_type_bytes(param_name, param):
if not isinstance(param, bytes):
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _validate_not_none(param_name, param):
if param is None:
raise TypeError(_ERROR_VALUE_NONE.format(param_name))
def _fill_list_of(xmldoc, element_type, xml_element_name):
xmlelements = _get_child_nodes(xmldoc, xml_element_name)
return [_parse_response_body_from_xml_node(xmlelement, element_type) \
for xmlelement in xmlelements]
def _fill_scalar_list_of(xmldoc, element_type, parent_xml_element_name,
xml_element_name):
'''Converts an xml fragment into a list of scalar types. The parent xml
element contains a flat list of xml elements which are converted into the
specified scalar type and added to the list.
Example:
xmldoc=
<Endpoints>
<Endpoint>http://{storage-service-name}.blob.core.windows.net/</Endpoint>
<Endpoint>http://{storage-service-name}.queue.core.windows.net/</Endpoint>
<Endpoint>http://{storage-service-name}.table.core.windows.net/</Endpoint>
</Endpoints>
element_type=str
parent_xml_element_name='Endpoints'
xml_element_name='Endpoint'
'''
xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = _get_child_nodes(xmlelements[0], xml_element_name)
return [_get_node_value(xmlelement, element_type) \
for xmlelement in xmlelements]
def _fill_dict(xmldoc, element_name):
xmlelements = _get_child_nodes(xmldoc, element_name)
if xmlelements:
return_obj = {}
for child in xmlelements[0].childNodes:
if child.firstChild:
return_obj[child.nodeName] = child.firstChild.nodeValue
return return_obj
def _fill_dict_of(xmldoc, parent_xml_element_name, pair_xml_element_name,
key_xml_element_name, value_xml_element_name):
'''Converts an xml fragment into a dictionary. The parent xml element
contains a list of xml elements where each element has a child element for
the key, and another for the value.
Example:
xmldoc=
<ExtendedProperties>
<ExtendedProperty>
<Name>Ext1</Name>
<Value>Val1</Value>
</ExtendedProperty>
<ExtendedProperty>
<Name>Ext2</Name>
<Value>Val2</Value>
</ExtendedProperty>
</ExtendedProperties>
element_type=str
parent_xml_element_name='ExtendedProperties'
pair_xml_element_name='ExtendedProperty'
key_xml_element_name='Name'
value_xml_element_name='Value'
'''
return_obj = {}
xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = _get_child_nodes(xmlelements[0], pair_xml_element_name)
for pair in xmlelements:
keys = _get_child_nodes(pair, key_xml_element_name)
values = _get_child_nodes(pair, value_xml_element_name)
if keys and values:
key = keys[0].firstChild.nodeValue
value = values[0].firstChild.nodeValue
return_obj[key] = value
return return_obj
def _fill_instance_child(xmldoc, element_name, return_type):
'''Converts a child of the current dom element to the specified type.
'''
xmlelements = _get_child_nodes(
xmldoc, _get_serialization_name(element_name))
if not xmlelements:
return None
return_obj = return_type()
_fill_data_to_return_object(xmlelements[0], return_obj)
return return_obj
def _fill_instance_element(element, return_type):
"""Converts a DOM element into the specified object"""
return _parse_response_body_from_xml_node(element, return_type)
def _fill_data_minidom(xmldoc, element_name, data_member):
xmlelements = _get_child_nodes(
xmldoc, _get_serialization_name(element_name))
if not xmlelements or not xmlelements[0].childNodes:
return None
value = xmlelements[0].firstChild.nodeValue
if data_member is None:
return value
elif isinstance(data_member, datetime):
return _to_datetime(value)
elif type(data_member) is bool:
return value.lower() != 'false'
else:
return type(data_member)(value)
def _get_node_value(xmlelement, data_type):
value = xmlelement.firstChild.nodeValue
if data_type is datetime:
return _to_datetime(value)
elif data_type is bool:
return value.lower() != 'false'
else:
return data_type(value)
def _get_request_body_bytes_only(param_name, param_value):
'''Validates the request body passed in and converts it to bytes
if our policy allows it.'''
if param_value is None:
return b''
if isinstance(param_value, bytes):
return param_value
# Previous versions of the SDK allowed data types other than bytes to be
# passed in, and they would be auto-converted to bytes. We preserve this
# behavior when running under 2.7, but issue a warning.
# Python 3 support is new, so we reject anything that's not bytes.
if sys.version_info < (3,):
warnings.warn(_WARNING_VALUE_SHOULD_BE_BYTES.format(param_name))
return _get_request_body(param_value)
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _get_request_body(request_body):
'''Converts an object into a request body. If it's None
we'll return an empty string, if it's one of our objects it'll
convert it to XML and return it. Otherwise we just use the object
directly'''
if request_body is None:
return b''
if isinstance(request_body, WindowsAzureData):
request_body = _convert_class_to_xml(request_body)
if isinstance(request_body, bytes):
return request_body
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
request_body = str(request_body)
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
return request_body
def _parse_enum_results_list(response, return_type, resp_type, item_type):
"""resp_body is the XML we received
resp_type is a string, such as Containers,
return_type is the type we're constructing, such as ContainerEnumResults
item_type is the type object of the item to be created, such as Container
This function then returns a ContainerEnumResults object with the
containers member populated with the results.
"""
# parsing something like:
# <EnumerationResults ... >
# <Queues>
# <Queue>
# <Something />
# <SomethingElse />
# </Queue>
# </Queues>
# </EnumerationResults>
respbody = response.body
return_obj = return_type()
doc = minidom.parseString(respbody)
items = []
for enum_results in _get_child_nodes(doc, 'EnumerationResults'):
# path is something like Queues, Queue
for child in _get_children_from_path(enum_results,
resp_type,
resp_type[:-1]):
items.append(_fill_instance_element(child, item_type))
for name, value in vars(return_obj).items():
# queues, Queues, this is the list its self which we populated
# above
if name == resp_type.lower():
# the list its self.
continue
value = _fill_data_minidom(enum_results, name, value)
if value is not None:
setattr(return_obj, name, value)
setattr(return_obj, resp_type.lower(), items)
return return_obj
def _parse_simple_list(response, type, item_type, list_name):
respbody = response.body
res = type()
res_items = []
doc = minidom.parseString(respbody)
type_name = type.__name__
item_name = item_type.__name__
for item in _get_children_from_path(doc, type_name, item_name):
res_items.append(_fill_instance_element(item, item_type))
setattr(res, list_name, res_items)
return res
def _parse_response(response, return_type):
'''
Parse the HTTPResponse's body and fill all the data into a class of
return_type.
'''
return _parse_response_body_from_xml_text(response.body, return_type)
def _parse_service_resources_response(response, return_type):
'''
Parse the HTTPResponse's body and fill all the data into a class of
return_type.
'''
return _parse_response_body_from_service_resources_xml_text(response.body, return_type)
def _fill_data_to_return_object(node, return_obj):
members = dict(vars(return_obj))
for name, value in members.items():
if isinstance(value, _list_of):
setattr(return_obj,
name,
_fill_list_of(node,
value.list_type,
value.xml_element_name))
elif isinstance(value, _scalar_list_of):
setattr(return_obj,
name,
_fill_scalar_list_of(node,
value.list_type,
_get_serialization_name(name),
value.xml_element_name))
elif isinstance(value, _dict_of):
setattr(return_obj,
name,
_fill_dict_of(node,
_get_serialization_name(name),
value.pair_xml_element_name,
value.key_xml_element_name,
value.value_xml_element_name))
elif isinstance(value, _xml_attribute):
real_value = None
if node.hasAttribute(value.xml_element_name):
real_value = node.getAttribute(value.xml_element_name)
if real_value is not None:
setattr(return_obj, name, real_value)
elif isinstance(value, WindowsAzureData):
setattr(return_obj,
name,
_fill_instance_child(node, name, value.__class__))
elif isinstance(value, dict):
setattr(return_obj,
name,
_fill_dict(node, _get_serialization_name(name)))
elif isinstance(value, _Base64String):
value = _fill_data_minidom(node, name, '')
if value is not None:
value = _decode_base64_to_text(value)
# always set the attribute, so we don't end up returning an object
# with type _Base64String
setattr(return_obj, name, value)
else:
value = _fill_data_minidom(node, name, value)
if value is not None:
setattr(return_obj, name, value)
def _parse_response_body_from_xml_node(node, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
return_obj = return_type()
_fill_data_to_return_object(node, return_obj)
return return_obj
def _parse_response_body_from_xml_text(respbody, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
doc = minidom.parseString(respbody)
return_obj = return_type()
xml_name = return_type._xml_name if hasattr(return_type, '_xml_name') else return_type.__name__
for node in _get_child_nodes(doc, xml_name):
_fill_data_to_return_object(node, return_obj)
return return_obj
def _parse_response_body_from_service_resources_xml_text(respbody, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
doc = minidom.parseString(respbody)
return_obj = _list_of(return_type)
for node in _get_children_from_path(doc, "ServiceResources", "ServiceResource"):
local_obj = return_type()
_fill_data_to_return_object(node, local_obj)
return_obj.append(local_obj)
return return_obj
class _dict_of(dict):
"""a dict which carries with it the xml element names for key,val.
Used for deserializaion and construction of the lists"""
def __init__(self, pair_xml_element_name, key_xml_element_name,
value_xml_element_name):
self.pair_xml_element_name = pair_xml_element_name
self.key_xml_element_name = key_xml_element_name
self.value_xml_element_name = value_xml_element_name
super(_dict_of, self).__init__()
class _list_of(list):
"""a list which carries with it the type that's expected to go in it.
Used for deserializaion and construction of the lists"""
def __init__(self, list_type, xml_element_name=None):
self.list_type = list_type
if xml_element_name is None:
self.xml_element_name = list_type.__name__
else:
self.xml_element_name = xml_element_name
super(_list_of, self).__init__()
class _scalar_list_of(list):
"""a list of scalar types which carries with it the type that's
expected to go in it along with its xml element name.
Used for deserializaion and construction of the lists"""
def __init__(self, list_type, xml_element_name):
self.list_type = list_type
self.xml_element_name = xml_element_name
super(_scalar_list_of, self).__init__()
class _xml_attribute:
"""a accessor to XML attributes
expected to go in it along with its xml element name.
Used for deserialization and construction"""
def __init__(self, xml_element_name):
self.xml_element_name = xml_element_name
def _update_request_uri_query_local_storage(request, use_local_storage):
''' create correct uri and query for the request '''
uri, query = _update_request_uri_query(request)
if use_local_storage:
return '/' + DEV_ACCOUNT_NAME + uri, query
return uri, query
def _update_request_uri_query(request):
'''pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters'''
if '?' in request.path:
request.path, _, query_string = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
name, _, value = query.partition('=')
request.query.append((name, value))
request.path = url_quote(request.path, '/()$=\',')
# add encoded queries to request.path.
if request.query:
request.path += '?'
for name, value in request.query:
if value is not None:
request.path += name + '=' + url_quote(value, '/()$=\',') + '&'
request.path = request.path[:-1]
return request.path, request.query
def _dont_fail_on_exist(error):
''' don't throw exception if the resource exists.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, WindowsAzureConflictError):
return False
else:
raise error
def _dont_fail_not_exist(error):
''' don't throw exception if the resource doesn't exist.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, WindowsAzureMissingResourceError):
return False
else:
raise error
def _general_error_handler(http_error):
''' Simple error handler for azure.'''
if http_error.status == 409:
raise WindowsAzureConflictError(
_ERROR_CONFLICT.format(str(http_error)))
elif http_error.status == 404:
raise WindowsAzureMissingResourceError(
_ERROR_NOT_FOUND.format(str(http_error)))
else:
if http_error.respbody is not None:
raise WindowsAzureError(
_ERROR_UNKNOWN.format(str(http_error)) + '\n' + \
http_error.respbody.decode('utf-8'))
else:
raise WindowsAzureError(_ERROR_UNKNOWN.format(str(http_error)))
def _parse_response_for_dict(response):
''' Extracts name-values from response header. Filter out the standard
http headers.'''
if response is None:
return None
http_headers = ['server', 'date', 'location', 'host',
'via', 'proxy-connection', 'connection']
return_dict = HeaderDict()
if response.headers:
for name, value in response.headers:
if not name.lower() in http_headers:
return_dict[name] = value
return return_dict
def _parse_response_for_dict_prefix(response, prefixes):
''' Extracts name-values for names starting with prefix from response
header. Filter out the standard http headers.'''
if response is None:
return None
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.items():
for prefix_value in prefixes:
if name.lower().startswith(prefix_value.lower()):
return_dict[name] = value
break
return return_dict
else:
return None
def _parse_response_for_dict_filter(response, filter):
''' Extracts name-values for names in filter from response header. Filter
out the standard http headers.'''
if response is None:
return None
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.items():
if name.lower() in filter:
return_dict[name] = value
return return_dict
else:
return None
def _sign_string(key, string_to_sign, key_is_base64=True):
if key_is_base64:
key = _decode_base64_to_bytes(key)
else:
if isinstance(key, _unicode_type):
key = key.encode('utf-8')
if isinstance(string_to_sign, _unicode_type):
string_to_sign = string_to_sign.encode('utf-8')
signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
digest = signed_hmac_sha256.digest()
encoded_digest = _encode_base64(digest)
return encoded_digest
| apache-2.0 |
arenadata/ambari | ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_extension/HIVE/package/scripts/status_params.py | 25 | 1062 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
config = Script.get_config()
hive_pid_dir = config['configurations']['global']['hive_pid_dir']
hive_pid = 'hive-server.pid'
hive_metastore_pid = 'hive.pid'
hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir
| apache-2.0 |
awacha/cct | cct/qtgui/devices/motor/movemotor/movemotor.py | 1 | 4527 | import logging
from PyQt5 import QtWidgets, QtGui
from .movemotor_ui import Ui_Form
from ....core.mixins import ToolWindow
from .....core.devices import Motor
from .....core.instrument.privileges import PRIV_MOVEMOTORS
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class MoveMotor(QtWidgets.QWidget, Ui_Form, ToolWindow):
required_privilege = PRIV_MOVEMOTORS
def __init__(self, *args, **kwargs):
credo = kwargs.pop('credo')
self.motorname = kwargs.pop('motorname')
QtWidgets.QWidget.__init__(self, *args, **kwargs)
self.setupToolWindow(credo, required_devices=['Motor_' + self.motorname])
self._start_requested = False
self.setupUi(self)
def setupUi(self, Form):
Ui_Form.setupUi(self, Form)
self.motorComboBox.addItems(sorted(self.credo.motors.keys()))
self.motorComboBox.currentTextChanged.connect(self.onMotorSelected)
self.movePushButton.clicked.connect(self.onMove)
self.motorComboBox.setCurrentIndex(self.motorComboBox.findText(self.motorname))
self.relativeCheckBox.toggled.connect(self.onRelativeChanged)
self.targetDoubleSpinBox.editingFinished.connect(self.onEditingFinished)
self.onMotorSelected()
self.adjustSize()
def onEditingFinished(self):
if self.targetDoubleSpinBox.hasFocus():
self.onMove()
def onRelativeChanged(self):
self.onMotorPositionChange(self.motor(), self.motor().where())
if self.relativeCheckBox.isChecked():
self.targetDoubleSpinBox.setValue(0)
else:
self.targetDoubleSpinBox.setValue(self.motor().where())
self.adjustSize()
def setIdle(self):
super().setIdle()
self.movePushButton.setText('Move')
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/motor.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.movePushButton.setIcon(icon)
self.targetDoubleSpinBox.setEnabled(True)
self.motorComboBox.setEnabled(True)
self.relativeCheckBox.setEnabled(True)
self.movePushButton.setEnabled(True)
self._start_requested = False
def setBusy(self):
self.movePushButton.setText('Stop')
self.movePushButton.setIcon(QtGui.QIcon.fromTheme('process-stop'))
self.targetDoubleSpinBox.setEnabled(False)
self.motorComboBox.setEnabled(False)
self.relativeCheckBox.setEnabled(False)
self.movePushButton.setEnabled(True)
super().setBusy()
def motor(self) -> Motor:
return self.credo.motors[self.motorComboBox.currentText()]
def onMove(self):
if self.movePushButton.text() == 'Move':
self.movePushButton.setEnabled(False)
self._start_requested = True
if self.relativeCheckBox.isChecked():
self.motor().moverel(self.targetDoubleSpinBox.value())
else:
self.motor().moveto(self.targetDoubleSpinBox.value())
else:
self.movePushButton.setEnabled(False)
self.motor().stop()
def onMotorStart(self, motor: Motor):
if self._start_requested:
self.setBusy()
def onMotorSelected(self):
self.setWindowTitle('Move motor {}'.format(self.motorComboBox.currentText()))
for d in self.required_devices:
self.unrequireDevice(d)
self.required_devices = ['Motor_' + self.motorComboBox.currentText()]
self.requireDevice(self.required_devices[0])
motor = self.credo.motors[self.motorComboBox.currentText()]
self.onMotorPositionChange(motor, motor.where())
if self.relativeCheckBox.isChecked():
self.targetDoubleSpinBox.setValue(0.0)
else:
self.targetDoubleSpinBox.setValue(motor.where())
def onMotorPositionChange(self, motor: Motor, newposition: float):
self.positionLabel.setText('<b>{:.4f}</b>'.format(newposition))
left = motor.get_variable('softleft')
right = motor.get_variable('softright')
if self.relativeCheckBox.isChecked():
left -= newposition
right -= newposition
self.targetDoubleSpinBox.setMinimum(left)
self.targetDoubleSpinBox.setMaximum(right)
self.leftLimitLabel.setText('{:.4f}'.format(left))
self.rightLimitLabel.setText('{:.4f}'.format(right))
self.adjustSize()
def onMotorStop(self, motor: Motor, targetpositionreached: bool):
self.setIdle()
| bsd-3-clause |
RossBrunton/django | django/core/management/commands/check.py | 316 | 1892 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps
from django.core import checks
from django.core.checks.registry import registry
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Checks the entire Django project for potential problems."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='*')
parser.add_argument('--tag', '-t', action='append', dest='tags',
help='Run only checks labeled with given tag.')
parser.add_argument('--list-tags', action='store_true', dest='list_tags',
help='List available tags.')
parser.add_argument('--deploy', action='store_true', dest='deploy',
help='Check deployment settings.')
def handle(self, *app_labels, **options):
include_deployment_checks = options['deploy']
if options.get('list_tags'):
self.stdout.write('\n'.join(sorted(registry.tags_available(include_deployment_checks))))
return
if app_labels:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
else:
app_configs = None
tags = options.get('tags')
if tags:
try:
invalid_tag = next(
tag for tag in tags if not checks.tag_exists(tag, include_deployment_checks)
)
except StopIteration:
# no invalid tags
pass
else:
raise CommandError('There is no system check with the "%s" tag.' % invalid_tag)
self.check(
app_configs=app_configs,
tags=tags,
display_num_errors=True,
include_deployment_checks=include_deployment_checks,
)
| bsd-3-clause |
wfnex/openbras | src/VPP/test/test_vxlan.py | 2 | 9043 | #!/usr/bin/env python
import socket
from util import ip4n_range
import unittest
from framework import VppTestCase, VppTestRunner
from template_bd import BridgeDomain
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
from scapy.layers.vxlan import VXLAN
from scapy.utils import atol
class TestVxlan(BridgeDomain, VppTestCase):
""" VXLAN Test Case """
def __init__(self, *args):
BridgeDomain.__init__(self)
VppTestCase.__init__(self, *args)
def encapsulate(self, pkt, vni):
"""
Encapsulate the original payload frame by adding VXLAN header with its
UDP, IP and Ethernet fields
"""
return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=self.dport, dport=self.dport, chksum=0) /
VXLAN(vni=vni, flags=self.flags) /
pkt)
def encap_mcast(self, pkt, src_ip, src_mac, vni):
"""
Encapsulate the original payload frame by adding VXLAN header with its
UDP, IP and Ethernet fields
"""
return (Ether(src=src_mac, dst=self.mcast_mac) /
IP(src=src_ip, dst=self.mcast_ip4) /
UDP(sport=self.dport, dport=self.dport, chksum=0) /
VXLAN(vni=vni, flags=self.flags) /
pkt)
def decapsulate(self, pkt):
"""
Decapsulate the original payload frame by removing VXLAN header
"""
# check if is set I flag
self.assertEqual(pkt[VXLAN].flags, int('0x8', 16))
return pkt[VXLAN].payload
# Method for checking VXLAN encapsulation.
#
def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
# TODO: add error messages
# Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
# by VPP using ARP.
self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
if not local_only:
if not mcast_pkt:
self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
else:
self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
# Verify VXLAN tunnel source IP is VPP_IP and destination IP is MY_IP.
self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
if not local_only:
if not mcast_pkt:
self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
else:
self.assertEqual(pkt[IP].dst, type(self).mcast_ip4)
# Verify UDP destination port is VXLAN 4789, source UDP port could be
# arbitrary.
self.assertEqual(pkt[UDP].dport, type(self).dport)
# TODO: checksum check
# Verify VNI
self.assertEqual(pkt[VXLAN].vni, vni)
@classmethod
def create_vxlan_flood_test_bd(cls, vni, n_ucast_tunnels):
# Create 10 ucast vxlan tunnels under bd
ip_range_start = 10
ip_range_end = ip_range_start + n_ucast_tunnels
next_hop_address = cls.pg0.remote_ip4n
for dest_ip4n in ip4n_range(next_hop_address, ip_range_start,
ip_range_end):
# add host route so dest_ip4n will not be resolved
cls.vapi.ip_add_del_route(dest_ip4n, 32, next_hop_address)
r = cls.vapi.vxlan_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=dest_ip4n,
vni=vni)
cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index, bd_id=vni)
@classmethod
def add_del_shared_mcast_dst_load(cls, is_add):
"""
add or del tunnels sharing the same mcast dst
to test vxlan ref_count mechanism
"""
n_shared_dst_tunnels = 2000
vni_start = 10000
vni_end = vni_start + n_shared_dst_tunnels
for vni in range(vni_start, vni_end):
r = cls.vapi.vxlan_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=cls.mcast_ip4n,
mcast_sw_if_index=1,
vni=vni,
is_add=is_add)
if r.sw_if_index == 0xffffffff:
raise "bad sw_if_index"
@classmethod
def add_shared_mcast_dst_load(cls):
cls.add_del_shared_mcast_dst_load(is_add=1)
@classmethod
def del_shared_mcast_dst_load(cls):
cls.add_del_shared_mcast_dst_load(is_add=0)
@classmethod
def add_del_mcast_tunnels_load(cls, is_add):
"""
add or del tunnels to test vxlan stability
"""
n_distinct_dst_tunnels = 200
ip_range_start = 10
ip_range_end = ip_range_start + n_distinct_dst_tunnels
for dest_ip4n in ip4n_range(cls.mcast_ip4n, ip_range_start,
ip_range_end):
vni = bytearray(dest_ip4n)[3]
cls.vapi.vxlan_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=dest_ip4n,
mcast_sw_if_index=1,
vni=vni,
is_add=is_add)
@classmethod
def add_mcast_tunnels_load(cls):
cls.add_del_mcast_tunnels_load(is_add=1)
@classmethod
def del_mcast_tunnels_load(cls):
cls.add_del_mcast_tunnels_load(is_add=0)
# Class method to start the VXLAN test case.
# Overrides setUpClass method in VppTestCase class.
# Python try..except statement is used to ensure that the tear down of
# the class will be executed even if exception is raised.
# @param cls The class pointer.
@classmethod
def setUpClass(cls):
super(TestVxlan, cls).setUpClass()
try:
cls.dport = 4789
cls.flags = 0x8
# Create 2 pg interfaces.
cls.create_pg_interfaces(range(4))
for pg in cls.pg_interfaces:
pg.admin_up()
# Configure IPv4 addresses on VPP pg0.
cls.pg0.config_ip4()
# Resolve MAC address for VPP's IP address on pg0.
cls.pg0.resolve_arp()
# Our Multicast address
cls.mcast_ip4 = '239.1.1.1'
cls.mcast_ip4n = socket.inet_pton(socket.AF_INET, cls.mcast_ip4)
iplong = atol(cls.mcast_ip4)
cls.mcast_mac = "01:00:5e:%02x:%02x:%02x" % (
(iplong >> 16) & 0x7F, (iplong >> 8) & 0xFF, iplong & 0xFF)
# Create VXLAN VTEP on VPP pg0, and put vxlan_tunnel0 and pg1
# into BD.
cls.single_tunnel_bd = 1
r = cls.vapi.vxlan_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=cls.pg0.remote_ip4n,
vni=cls.single_tunnel_bd)
cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index,
bd_id=cls.single_tunnel_bd)
cls.vapi.sw_interface_set_l2_bridge(cls.pg1.sw_if_index,
bd_id=cls.single_tunnel_bd)
# Setup vni 2 to test multicast flooding
cls.n_ucast_tunnels = 10
cls.mcast_flood_bd = 2
cls.create_vxlan_flood_test_bd(cls.mcast_flood_bd,
cls.n_ucast_tunnels)
r = cls.vapi.vxlan_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=cls.mcast_ip4n,
mcast_sw_if_index=1,
vni=cls.mcast_flood_bd)
cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index,
bd_id=cls.mcast_flood_bd)
cls.vapi.sw_interface_set_l2_bridge(cls.pg2.sw_if_index,
bd_id=cls.mcast_flood_bd)
# Add and delete mcast tunnels to check stability
cls.add_shared_mcast_dst_load()
cls.add_mcast_tunnels_load()
cls.del_shared_mcast_dst_load()
cls.del_mcast_tunnels_load()
# Setup vni 3 to test unicast flooding
cls.ucast_flood_bd = 3
cls.create_vxlan_flood_test_bd(cls.ucast_flood_bd,
cls.n_ucast_tunnels)
cls.vapi.sw_interface_set_l2_bridge(cls.pg3.sw_if_index,
bd_id=cls.ucast_flood_bd)
except Exception:
super(TestVxlan, cls).tearDownClass()
raise
# Method to define VPP actions before tear down of the test case.
# Overrides tearDown method in VppTestCase class.
# @param self The object pointer.
def tearDown(self):
super(TestVxlan, self).tearDown()
if not self.vpp_dead:
self.logger.info(self.vapi.cli("show bridge-domain 1 detail"))
self.logger.info(self.vapi.cli("show bridge-domain 2 detail"))
self.logger.info(self.vapi.cli("show bridge-domain 3 detail"))
self.logger.info(self.vapi.cli("show vxlan tunnel"))
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| bsd-3-clause |
ncliam/serverpos | openerp/addons/l10n_be_intrastat/l10n_be_intrastat.py | 258 | 7828 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Business Applications
# Copyright (C) 2014-2015 Odoo S.A. <http://www.odoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_invoice(osv.osv):
_inherit = "account.invoice"
_columns = {
'incoterm_id': fields.many2one(
'stock.incoterms', 'Incoterm',
help="International Commercial Terms are a series of predefined commercial terms "
"used in international transactions."),
'intrastat_transaction_id': fields.many2one(
'l10n_be_intrastat.transaction', 'Intrastat Transaction Type',
help="Intrastat nature of transaction"),
'transport_mode_id': fields.many2one(
'l10n_be_intrastat.transport_mode', 'Intrastat Transport Mode'),
'intrastat_country_id': fields.many2one(
'res.country', 'Intrastat Country',
help='Intrastat country, delivery for sales, origin for purchases',
domain=[('intrastat','=',True)]),
}
class intrastat_region(osv.osv):
_name = 'l10n_be_intrastat.region'
_columns = {
'code': fields.char('Code', required=True),
'country_id': fields.many2one('res.country', 'Country'),
'name': fields.char('Name', translate=True),
'description': fields.char('Description'),
}
_sql_constraints = [
('l10n_be_intrastat_regioncodeunique', 'UNIQUE (code)', 'Code must be unique.'),
]
class intrastat_transaction(osv.osv):
_name = 'l10n_be_intrastat.transaction'
_rec_name = 'code'
_columns = {
'code': fields.char('Code', required=True, readonly=True),
'description': fields.text('Description', readonly=True),
}
_sql_constraints = [
('l10n_be_intrastat_trcodeunique', 'UNIQUE (code)', 'Code must be unique.'),
]
class intrastat_transport_mode(osv.osv):
_name = 'l10n_be_intrastat.transport_mode'
_columns = {
'code': fields.char('Code', required=True, readonly=True),
'name': fields.char('Description', readonly=True),
}
_sql_constraints = [
('l10n_be_intrastat_trmodecodeunique', 'UNIQUE (code)', 'Code must be unique.'),
]
class product_category(osv.osv):
_name = "product.category"
_inherit = "product.category"
_columns = {
'intrastat_id': fields.many2one('report.intrastat.code', 'Intrastat Code'),
}
def get_intrastat_recursively(self, cr, uid, category, context=None):
""" Recursively search in categories to find an intrastat code id
:param category : Browse record of a category
"""
if category.intrastat_id:
res = category.intrastat_id.id
elif category.parent_id:
res = self.get_intrastat_recursively(cr, uid, category.parent_id, context=context)
else:
res = None
return res
class product_product(osv.osv):
_name = "product.product"
_inherit = "product.product"
def get_intrastat_recursively(self, cr, uid, id, context=None):
""" Recursively search in categories to find an intrastat code id
"""
product = self.browse(cr, uid, id, context=context)
if product.intrastat_id:
res = product.intrastat_id.id
elif product.categ_id:
res = self.pool['product.category'].get_intrastat_recursively(
cr, uid, product.categ_id, context=context)
else:
res = None
return res
class purchase_order(osv.osv):
_inherit = "purchase.order"
def _prepare_invoice(self, cr, uid, order, line_ids, context=None):
"""
copy incoterm from purchase order to invoice
"""
invoice = super(purchase_order, self)._prepare_invoice(
cr, uid, order, line_ids, context=context)
if order.incoterm_id:
invoice['incoterm_id'] = order.incoterm_id.id
#Try to determine products origin
if order.partner_id.country_id:
#It comes from supplier
invoice['intrastat_country_id'] = order.partner_id.country_id.id
return invoice
class report_intrastat_code(osv.osv):
_inherit = "report.intrastat.code"
_columns = {
'description': fields.text('Description', translate=True),
}
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'region_id': fields.many2one('l10n_be_intrastat.region', 'Intrastat region'),
'transport_mode_id': fields.many2one('l10n_be_intrastat.transport_mode',
'Default transport mode'),
'incoterm_id': fields.many2one('stock.incoterms', 'Default incoterm for Intrastat',
help="International Commercial Terms are a series of "
"predefined commercial terms used in international "
"transactions."),
}
class sale_order(osv.osv):
_inherit = "sale.order"
def _prepare_invoice(self, cr, uid, saleorder, lines, context=None):
"""
copy incoterm from sale order to invoice
"""
invoice = super(sale_order, self)._prepare_invoice(
cr, uid, saleorder, lines, context=context)
if saleorder.incoterm:
invoice['incoterm_id'] = saleorder.incoterm.id
# Guess products destination
if saleorder.partner_shipping_id.country_id:
invoice['intrastat_country_id'] = saleorder.partner_shipping_id.country_id.id
elif saleorder.partner_id.country_id:
invoice['intrastat_country_id'] = saleorder.partner_id.country_id.id
elif saleorder.partner_invoice_id.country_id:
invoice['intrastat_country_id'] = saleorder.partner_invoice_id.country_id.id
return invoice
class stock_warehouse(osv.osv):
_inherit = "stock.warehouse"
_columns = {
'region_id': fields.many2one('l10n_be_intrastat.region', 'Intrastat region'),
}
def get_regionid_from_locationid(self, cr, uid, location_id, context=None):
location_model = self.pool['stock.location']
location = location_model.browse(cr, uid, location_id, context=context)
location_ids = location_model.search(cr, uid,
[('parent_left', '<=', location.parent_left),
('parent_right', '>=', location.parent_right)],
context=context)
warehouse_ids = self.search(cr, uid,
[('lot_stock_id', 'in', location_ids),
('region_id', '!=', False)],
context=context)
warehouses = self.browse(cr, uid, warehouse_ids, context=context)
if warehouses and warehouses[0]:
return warehouses[0].region_id.id
return None
| agpl-3.0 |
davidbrazdil/nacl | tools/process_oprofile_x86_64.py | 12 | 17097 | #!/usr/bin/python
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Post-process Oprofile logs for x86-64 nexes running under sel_ldr.
Maps event counts in the "anon" region, to the appropriate addresses
in the nexe assembly. "Anon" represents the untrusted sandbox.
This will become unnecessary once we get immutable files for our .nexe
so that sel_ldr can use mmap the .nexe instead of copying it in
(Oprofile should understand mmap).
Remember to look at the oprofile log for the time spent in the
trusted code / OS (this only lists time spent in the untrusted code).
"""
# TODO(jvoung): consider using addr2line to look up functions with
# the linenum / file info instead of the using the rangemap.
# Pro: less custom code and possibility of understanding Dwarf info.
# Con: lots of exec()s to cover all the samples...
import commands
import getopt
import math
import re
import sys
def Debug(mesg):
sys.stdout.flush() # Make stdout/stderr come out in order.
print >>sys.stderr, "# ", mesg
return
def DemangleFunc(fun_name):
# In case the disassembly was done without the objdump "-C" flag.
# Heuristic for detecting already demangled names
# (c++filt will hate you for giving it an already demangled name)
if ('(' in fun_name or
'*' in fun_name or
':' in fun_name or
'&' in fun_name):
return fun_name
return commands.getoutput("c++filt " + fun_name)
# Assume addresses in inputs (logs and assembly files) are all this base.
ADDRESS_BASE = 16
ADDRESS_DIGIT = '[a-f0-9]'
def GetUntrustedBase(sel_ldr_log_fd):
""" Parse the sel_ldr debug output to find the base of the untrusted memory
region.
Returns the base address. """
untrusted_base = None
for line in sel_ldr_log_fd:
# base is the mem start addr printed by sel_ldr
if line.find('mem start addr') != -1:
fields = line.split()
untrusted_base = int(fields[-1], ADDRESS_BASE)
break
assert untrusted_base is not None, "Couldn't parse untrusted base"
Debug("untrusted_base = %s" % hex(untrusted_base))
return untrusted_base
#--------------- Parse Oprofile Log ---------------
def CheckIfInSelLdrRegion(line, cur_range_base):
""" Checks if we are reading the part of the oprofile --details log
pertaining to the untrusted sandbox in sel_ldr's address space.
Returns the base of that memory region or None. """
fields = line.split()
# cur_range_base should be set if we are already parsing the
# untrusted sandbox section of the log.
if cur_range_base:
# Check if we are exiting the untrusted sandbox section of the log.
# The header of a new non-untrusted-sandbox section should look like:
# 00000000 samples pct foo.so foo.so /path-to/foo.so
if len(fields) >= 6:
Debug('Likely exiting sel_ldr section to a new section: %s' % fields[3])
# Check if the next section is also a sel_ldr region
return CheckIfInSelLdrRegion(line, None)
else:
return cur_range_base
else:
# Check if we are entering the untrusted-sandbox section of the log.
# The header of such a section should look like:
#
# 00000000 samples pct anon (tgid:22067 range:0xBASE-0xEND)
# (sel_ldr or chrome) anon (tgid:22067 range:...)
#
# I.e., 10 fields...
if (len(fields) == 10
and (fields[6] == 'sel_ldr'
or fields[6] == 'chrome'
or fields[6] == 'nacl_helper_bootstrap')
and ('anon' == fields[3])):
Debug('Likely starting sel_ldr section: %s %s' % (fields[3], fields[6]))
range_token = fields[9]
range_re = re.compile('range:0x(' + ADDRESS_DIGIT + '+)-0x')
match = range_re.search(range_token)
if match:
range_str = match.group(1)
range_base = int(range_str, ADDRESS_BASE)
Debug('Likely range base is %s' % hex(range_base))
return range_base
else:
Debug("Couldn't parse range base for: " + str(fields))
return None
else:
return None
def UpdateAddrEventMap(line, sel_ldr_range_base, untrusted_base, addr_to_event):
""" Add an event count to the addr_to_event map if the line of data looks
like an event count. Example:
vma samples %
0000028a 1 1.8e-04
"""
fields = line.split()
if len(fields) == 3:
# deal with numbers like fffffff484494ca5 which are actually negative
address = int(fields[0], ADDRESS_BASE)
if address > 0x8000000000000000:
address = -((0xffffffffffffffff - address) + 1)
address = address + sel_ldr_range_base - untrusted_base
sample_count = int(fields[1])
cur = addr_to_event.get(address, 0)
addr_to_event[address] = cur + sample_count
return
def CheckTrustedRecord(line, trusted_events, filter_events):
""" Checks if this line is a samplecount for a trusted function. Because
oprofile understands these, we just use its aggregate count.
Updates the trusted_events map."""
# oprofile function records have the following format:
# address sample_count percent image_name app_name symbol_name
# Some symbol names have spaces (function prototypes), so only split 6 words.
fields = line.split(None, 5)
if len(fields) < 6:
return False
image_name = fields[3]
symbol_name = fields[5].rstrip()
# 2 cases: we want only 'relevant' samples, or we want all of them.
# Either way, ignore the untrusted region.
if (image_name == "anon" and symbol_name.find('sel_ldr') != -1):
return False
try: # catch lines that aren't records (e.g. the CPU type)
sample_count = int(fields[1])
except ValueError:
return False
if (filter_events and not (image_name.endswith('sel_ldr')
or image_name.startswith('llc')
or image_name.endswith('.so')
or image_name == 'no-vmlinux'
or image_name == 'chrome'
or image_name == 'nacl_helper_bootstrap')):
trusted_events['FILTERED'] = trusted_events.get('FILTERED',0) + sample_count
return False
# If there are duplicate function names, just use the first instance.
# (Most likely they are from shared libraries in different processes, and
# because the opreport output is sorted, the top one is most likely to be
# our process of interest, and the rest are not.)
key = image_name + ':' + symbol_name
trusted_events[key] = trusted_events.get(key, sample_count)
return True
def GetAddressToEventSelLdr(fd, filter_events, untrusted_base):
""" Returns 2 maps: addr_to_event: address (int) -> event count (int)
and trusted_events: func (str) - > event count (int)"""
addr_to_event = {}
trusted_events = {}
sel_ldr_range_base = None
for line in fd:
sel_ldr_range_base = CheckIfInSelLdrRegion(line, sel_ldr_range_base)
if sel_ldr_range_base:
# If we've parsed the header of the region and know the base of
# this range, start picking up event counts.
UpdateAddrEventMap(line,
sel_ldr_range_base,
untrusted_base,
addr_to_event)
else:
CheckTrustedRecord(line, trusted_events, filter_events)
fd.seek(0) # Reset for future use...
return addr_to_event, trusted_events
#--------------- Parse Assembly File ---------------
def CompareBounds((lb1, ub1), (lb2, ub2)):
# Shouldn't be overlapping, so both the upper and lower
# should be less than the other's lower bound
if (lb1 < lb2) and (ub1 < lb2):
return -1
elif (lb1 > ub2) and (ub1 > ub2):
return 1
else:
# Somewhere between, not necessarily equal.
return 0
class RangeMapSorted(object):
""" Simple range map using a sorted list of pairs
((lowerBound, upperBound), data). """
ranges = []
# Error indexes (< 0)
kGREATER = -2
kLESS = -1
def FindIndex(self, lb, ub):
length = len(self.ranges)
return self.FindIndexFrom(lb, ub,
int(math.ceil(length / 2.0)), 0, length)
def FindIndexFrom(self, lb, ub, CurGuess, CurL, CurH):
length = len(self.ranges)
# If it is greater than the last index, it is greater than all.
if CurGuess >= length:
return self.kGREATER
((lb2, ub2), _) = self.ranges[CurGuess]
comp = CompareBounds((lb, ub), (lb2, ub2))
if comp == 0:
return CurGuess
elif comp < 0:
# If it is less than index 0, it is less than all.
if CurGuess == 0:
return self.kLESS
NextL = CurL
NextH = CurGuess
NextGuess = CurGuess - int (math.ceil((NextH - NextL) / 2.0))
else:
# If it is greater than the last index, it is greater than all.
if CurGuess >= length - 1:
return self.kGREATER
NextL = CurGuess
NextH = CurH
NextGuess = CurGuess + int (math.ceil((NextH - NextL) / 2.0))
return self.FindIndexFrom(lb, ub, NextGuess, NextL, NextH)
def Add(self, lb, ub, data):
""" Add a mapping from [lb, ub] --> data """
index = self.FindIndex(lb, ub)
range_data = ((lb, ub), data)
if index == self.kLESS:
self.ranges.insert(0, range_data)
elif index == self.kGREATER:
self.ranges.append(range_data)
else:
self.ranges.insert(index, range_data)
def Lookup(self, key):
""" Get the data that falls within the range. """
index = self.FindIndex(key, key)
# Check if it is out of range.
if index < 0:
return None
((lb, ub), d) = self.ranges[index]
# Double check that the key actually falls in range.
if lb <= key and key <= ub:
return d
else:
return None
def GetRangeFromKey(self, key):
index = self.FindIndex(key, key)
# Check if it is out of range.
if index < 0:
return None
((lb, ub), _) = self.ranges[index]
# Double check that the key actually falls in range.
if lb <= key and key <= ub:
return (lb, ub)
else:
return None
ADDRESS_RE = re.compile('(' + ADDRESS_DIGIT + '+):')
FUNC_RE = re.compile('(' + ADDRESS_DIGIT + '+) <(.*)>:')
def GetAssemblyAddress(line):
""" Look for lines of assembly that look like
address: [byte] [byte]... [instruction in text]
"""
fields = line.split()
if len(fields) > 1:
match = ADDRESS_RE.search(fields[0])
if match:
return int(match.group(1), ADDRESS_BASE)
return None
def GetAssemblyRanges(fd):
""" Return a RangeMap that tracks the boundaries of each function.
E.g., [0x20000, 0x2003f] --> "foo"
[0x20040, 0x20060] --> "bar"
"""
rmap = RangeMapSorted()
cur_start = None
cur_func = None
cur_end = None
for line in fd:
# If we are within a function body...
if cur_func:
# Check if it has ended (with a newline)
if line.strip() == '':
assert (cur_start and cur_end)
rmap.Add(cur_start, cur_end, cur_func)
cur_start = None
cur_end = None
cur_func = None
else:
maybe_addr = GetAssemblyAddress(line)
if maybe_addr:
cur_end = maybe_addr
else:
# Not yet within a function body. Check if we are entering.
# The header should look like:
# 0000000000020040 <foo>:
match = FUNC_RE.search(line)
if match:
cur_start = int(match.group(1), ADDRESS_BASE)
cur_func = match.group(2)
fd.seek(0) # reset for future use.
return rmap
#--------------- Summarize Data ---------------
def PrintTopFunctions(assembly_ranges, address_to_events, trusted_events):
""" Prints the N functions with the top event counts """
func_events = {}
some_addrs_not_found = False
for (addr, count) in address_to_events.iteritems():
func = assembly_ranges.Lookup(addr)
if (func):
# Function labels are mostly unique, except when we have ASM labels
# that we mistake for functions. E.g., "loop:" is a common ASM label.
# Thus, to get a unique value, we must append the unique key range
# to the function label.
(lb, ub) = assembly_ranges.GetRangeFromKey(addr)
key = (func, lb, ub)
cur_count = func_events.get(key, 0)
func_events[key] = cur_count + count
else:
Debug('No matching function for addr/count: %s %d'
% (hex(addr), count))
some_addrs_not_found = True
if some_addrs_not_found:
# Addresses < 0x20000 are likely trampoline addresses.
Debug('NOTE: sample addrs < 0x20000 are likely trampolines')
filtered_events = trusted_events.pop('FILTERED', 0)
# convert trusted functions (which are just functions and not ranges) into
# the same format and mix them with untrusted. Just use 0s for the ranges
for (func, count) in trusted_events.iteritems():
key = (func, 0, 0)
func_events[key] = count
flattened = func_events.items()
def CompareCounts ((k1, c1), (k2, c2)):
if c1 < c2:
return -1
elif c1 == c2:
return 0
else:
return 1
flattened.sort(cmp=CompareCounts, reverse=True)
top_30 = flattened[:30]
total_samples = (sum(address_to_events.itervalues())
+ sum(trusted_events.itervalues()))
print "============= Top 30 Functions ==============="
print "EVENTS\t\tPCT\tCUM\tFUNC [LOW_VMA, UPPER_VMA]"
cum_pct = 0.0
for ((func, lb, ub), count) in top_30:
pct = 100.0 * count / total_samples
cum_pct += pct
print "%d\t\t%.2f\t%.2f\t%s [%s, %s]" % (count, pct, cum_pct,
DemangleFunc(func), hex(lb), hex(ub))
print "%d samples filtered (%.2f%% of all samples)" % (filtered_events,
100.0 * filtered_events / (filtered_events + total_samples))
#--------------- Annotate Assembly ---------------
def PrintAnnotatedAssembly(fd_in, address_to_events, fd_out):
""" Writes to output, a version of assembly_file which has event
counts in the form #; EVENTS: N
This lets us know which instructions took the most time, etc.
"""
for line in fd_in:
line = line.strip()
maybe_addr = GetAssemblyAddress(line)
if maybe_addr in address_to_events:
event_count = address_to_events[maybe_addr]
print >>fd_out, "%s #; EVENTS: %d" % (line, event_count)
else:
print >>fd_out, line
fd_in.seek(0) # reset for future use.
#--------------- Main ---------------
def main(argv):
try:
opts, args = getopt.getopt(argv[1:],
'l:s:o:m:f',
['oprofilelog=',
'assembly=',
'output=',
'memmap=',
'untrusted_base=',
])
assembly_file = None
assembly_fd = None
oprof_log = None
oprof_fd = None
output = sys.stdout
out_name = None
filter_events = False
# Get the untrusted base address from either a sel_ldr log
# which prints out the mapping, or from the command line directly.
mapfile_name = None
mapfile_fd = None
untrusted_base = None
for o, a in opts:
if o in ('-l', '--oprofilelog'):
oprof_log = a
oprof_fd = open(oprof_log, 'r')
elif o in ('-s', '--assembly'):
assembly_file = a
assembly_fd = open(assembly_file, 'r')
elif o in ('-o', '--output'):
out_name = a
output = open(out_name, 'w')
elif o in ('-m', '--memmap'):
mapfile_name = a
try:
mapfile_fd = open(mapfile_name, 'r')
except IOError:
pass
elif o in ('-b', '--untrusted_base'):
untrusted_base = a
elif o == '-f':
filter_events = True
else:
assert False, 'unhandled option'
if untrusted_base:
if mapfile_fd:
print 'Error: Specified both untrusted_base directly and w/ memmap file'
sys.exit(1)
untrusted_base = int(untrusted_base, 16)
else:
if mapfile_fd:
Debug('Parsing sel_ldr output for untrusted memory base: %s' %
mapfile_name)
untrusted_base = GetUntrustedBase(mapfile_fd)
else:
print 'Error: Need sel_ldr log --memmap or --untrusted_base.'
sys.exit(1)
if assembly_file and oprof_log:
Debug('Parsing assembly file of nexe: %s' % assembly_file)
assembly_ranges = GetAssemblyRanges(assembly_fd)
Debug('Parsing oprofile log: %s' % oprof_log)
untrusted_events, trusted_events = \
GetAddressToEventSelLdr(oprof_fd, filter_events, untrusted_base)
Debug('Printing the top functions (most events)')
PrintTopFunctions(assembly_ranges, untrusted_events, trusted_events)
Debug('Printing annotated assembly to %s (or stdout)' % out_name)
PrintAnnotatedAssembly(assembly_fd, untrusted_events, output)
else:
print 'Need assembly file(%s) and oprofile log(%s)!' \
% (assembly_file, oprof_log)
sys.exit(1)
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
ivanhorvath/openshift-tools | openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py | 24 | 6571 | """Ansible callback plugin to print a summary completion status of installation
phases.
"""
from datetime import datetime
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
class CallbackModule(CallbackBase):
"""This callback summarizes installation phase status."""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'installer_checkpoint'
CALLBACK_NEEDS_WHITELIST = False
def __init__(self):
super(CallbackModule, self).__init__()
def v2_playbook_on_stats(self, stats):
# Set the order of the installer phases
installer_phases = [
'installer_phase_initialize',
'installer_phase_health',
'installer_phase_etcd',
'installer_phase_nfs',
'installer_phase_loadbalancer',
'installer_phase_master',
'installer_phase_master_additional',
'installer_phase_node',
'installer_phase_glusterfs',
'installer_phase_hosted',
'installer_phase_web_console',
'installer_phase_metrics',
'installer_phase_logging',
'installer_phase_prometheus',
'installer_phase_servicecatalog',
'installer_phase_management',
]
# Define the attributes of the installer phases
phase_attributes = {
'installer_phase_initialize': {
'title': 'Initialization',
'playbook': ''
},
'installer_phase_health': {
'title': 'Health Check',
'playbook': 'playbooks/openshift-checks/pre-install.yml'
},
'installer_phase_etcd': {
'title': 'etcd Install',
'playbook': 'playbooks/openshift-etcd/config.yml'
},
'installer_phase_nfs': {
'title': 'NFS Install',
'playbook': 'playbooks/openshift-nfs/config.yml'
},
'installer_phase_loadbalancer': {
'title': 'Load balancer Install',
'playbook': 'playbooks/openshift-loadbalancer/config.yml'
},
'installer_phase_master': {
'title': 'Master Install',
'playbook': 'playbooks/openshift-master/config.yml'
},
'installer_phase_master_additional': {
'title': 'Master Additional Install',
'playbook': 'playbooks/openshift-master/additional_config.yml'
},
'installer_phase_node': {
'title': 'Node Install',
'playbook': 'playbooks/openshift-node/config.yml'
},
'installer_phase_glusterfs': {
'title': 'GlusterFS Install',
'playbook': 'playbooks/openshift-glusterfs/config.yml'
},
'installer_phase_hosted': {
'title': 'Hosted Install',
'playbook': 'playbooks/openshift-hosted/config.yml'
},
'installer_phase_web_console': {
'title': 'Web Console Install',
'playbook': 'playbooks/openshift-web-console/config.yml'
},
'installer_phase_metrics': {
'title': 'Metrics Install',
'playbook': 'playbooks/openshift-metrics/config.yml'
},
'installer_phase_logging': {
'title': 'Logging Install',
'playbook': 'playbooks/openshift-logging/config.yml'
},
'installer_phase_prometheus': {
'title': 'Prometheus Install',
'playbook': 'playbooks/openshift-prometheus/config.yml'
},
'installer_phase_servicecatalog': {
'title': 'Service Catalog Install',
'playbook': 'playbooks/openshift-service-catalog/config.yml'
},
'installer_phase_management': {
'title': 'Management Install',
'playbook': 'playbooks/openshift-management/config.yml'
},
}
# Find the longest phase title
max_column = 0
for phase in phase_attributes:
max_column = max(max_column, len(phase_attributes[phase]['title']))
if '_run' in stats.custom:
self._display.banner('INSTALLER STATUS')
for phase in installer_phases:
phase_title = phase_attributes[phase]['title']
padding = max_column - len(phase_title) + 2
if phase in stats.custom['_run']:
phase_status = stats.custom['_run'][phase]['status']
phase_time = phase_time_delta(stats.custom['_run'][phase])
self._display.display(
'{}{}: {} ({})'.format(phase_title, ' ' * padding, phase_status, phase_time),
color=self.phase_color(phase_status))
if phase_status == 'In Progress' and phase != 'installer_phase_initialize':
self._display.display(
'\tThis phase can be restarted by running: {}'.format(
phase_attributes[phase]['playbook']))
if 'message' in stats.custom['_run'][phase]:
self._display.display(
'\t{}'.format(
stats.custom['_run'][phase]['message']))
self._display.display("", screen_only=True)
def phase_color(self, status):
""" Return color code for installer phase"""
valid_status = [
'In Progress',
'Complete',
]
if status not in valid_status:
self._display.warning('Invalid phase status defined: {}'.format(status))
if status == 'Complete':
phase_color = C.COLOR_OK
elif status == 'In Progress':
phase_color = C.COLOR_ERROR
else:
phase_color = C.COLOR_WARN
return phase_color
def phase_time_delta(phase):
""" Calculate the difference between phase start and end times """
time_format = '%Y%m%d%H%M%SZ'
phase_start = datetime.strptime(phase['start'], time_format)
if 'end' not in phase:
# The phase failed so set the end time to now
phase_end = datetime.now()
else:
phase_end = datetime.strptime(phase['end'], time_format)
delta = str(phase_end - phase_start).split(".")[0] # Trim microseconds
return delta
| apache-2.0 |
philipz/PyCV-time | opencv-official-samples/2.4.9/demo.py | 7 | 5157 | #!/usr/bin/env python
'''
Sample-launcher application.
'''
import Tkinter as tk
from ScrolledText import ScrolledText
from glob import glob
from common import splitfn
import webbrowser
from subprocess import Popen
#from IPython.Shell import IPShellEmbed
#ipshell = IPShellEmbed()
exclude_list = ['demo', 'common']
class LinkManager:
def __init__(self, text, url_callback = None):
self.text = text
self.text.tag_config("link", foreground="blue", underline=1)
self.text.tag_bind("link", "<Enter>", self._enter)
self.text.tag_bind("link", "<Leave>", self._leave)
self.text.tag_bind("link", "<Button-1>", self._click)
self.url_callback = url_callback
self.reset()
def reset(self):
self.links = {}
def add(self, action):
# add an action to the manager. returns tags to use in
# associated text widget
tag = "link-%d" % len(self.links)
self.links[tag] = action
return "link", tag
def _enter(self, event):
self.text.config(cursor="hand2")
def _leave(self, event):
self.text.config(cursor="")
def _click(self, event):
for tag in self.text.tag_names(tk.CURRENT):
if tag.startswith("link-"):
proc = self.links[tag]
if callable(proc):
proc()
else:
if self.url_callback:
self.url_callback(proc)
class App:
def __init__(self):
root = tk.Tk()
root.title('OpenCV Demo')
self.win = win = tk.PanedWindow(root, orient=tk.HORIZONTAL, sashrelief=tk.RAISED, sashwidth=4)
self.win.pack(fill=tk.BOTH, expand=1)
left = tk.Frame(win)
right = tk.Frame(win)
win.add(left)
win.add(right)
scrollbar = tk.Scrollbar(left, orient=tk.VERTICAL)
self.demos_lb = demos_lb = tk.Listbox(left, yscrollcommand=scrollbar.set)
scrollbar.config(command=demos_lb.yview)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
demos_lb.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.samples = {}
for fn in glob('*.py'):
name = splitfn(fn)[1]
if fn[0] != '_' and name not in exclude_list:
demos_lb.insert(tk.END, name)
self.samples[name] = fn
demos_lb.bind('<<ListboxSelect>>', self.on_demo_select)
self.cmd_entry = cmd_entry = tk.Entry(right)
cmd_entry.bind('<Return>', self.on_run)
run_btn = tk.Button(right, command=self.on_run, text='Run', width=8)
self.text = text = ScrolledText(right, font=('arial', 12, 'normal'), width = 30, wrap='word')
self.linker = linker = LinkManager(text, self.on_link)
self.text.tag_config("header1", font=('arial', 14, 'bold'))
self.text.tag_config("header2", font=('arial', 12, 'bold'))
text.config(state='disabled')
text.pack(fill='both', expand=1, side=tk.BOTTOM)
cmd_entry.pack(fill='x', side='left' , expand=1)
run_btn.pack()
def on_link(self, url):
print url
webbrowser.open(url)
def on_demo_select(self, evt):
name = self.demos_lb.get( self.demos_lb.curselection()[0] )
fn = self.samples[name]
loc = {}
execfile(fn, loc)
descr = loc.get('__doc__', 'no-description')
self.linker.reset()
self.text.config(state='normal')
self.text.delete(1.0, tk.END)
self.format_text(descr)
self.text.config(state='disabled')
self.cmd_entry.delete(0, tk.END)
self.cmd_entry.insert(0, fn)
def format_text(self, s):
text = self.text
lines = s.splitlines()
for i, s in enumerate(lines):
s = s.rstrip()
if i == 0 and not s:
continue
if s and s == '='*len(s):
text.tag_add('header1', 'end-2l', 'end-1l')
elif s and s == '-'*len(s):
text.tag_add('header2', 'end-2l', 'end-1l')
else:
text.insert('end', s+'\n')
def add_link(start, end, url):
for tag in self.linker.add(url):
text.tag_add(tag, start, end)
self.match_text(r'http://\S+', add_link)
def match_text(self, pattern, tag_proc, regexp=True):
text = self.text
text.mark_set('matchPos', '1.0')
count = tk.IntVar()
while True:
match_index = text.search(pattern, 'matchPos', count=count, regexp=regexp, stopindex='end')
if not match_index: break
end_index = text.index( "%s+%sc" % (match_index, count.get()) )
text.mark_set('matchPos', end_index)
if callable(tag_proc):
tag_proc(match_index, end_index, text.get(match_index, end_index))
else:
text.tag_add(tag_proc, match_index, end_index)
def on_run(self, *args):
cmd = self.cmd_entry.get()
print 'running:', cmd
Popen("python " + cmd, shell=True)
def run(self):
tk.mainloop()
if __name__ == '__main__':
App().run()
| mit |
biskett/mic | tests/test_archive.py | 5 | 26592 | """
It is used to test mic/archive.py
"""
import os
import shutil
import unittest
from mic import archive
class ArchiveTest(unittest.TestCase):
"""
test pulic methods in archive.py
"""
def setUp(self):
"""Create files and directories for later use"""
self.relative_file = './sdfb.gxdf.bzws.zzz'
abs_file = '/tmp/adsdfb.gxdf.bzws.zzz'
bare_file = 'abc.def.bz.zzz'
self.relative_dir = './sdf.zzz'
abs_dir = '/tmp/asdf.zzz'
bare_dir = 'abd.zzz'
self.wrong_format_file = './sdbs.werxdf.bz.zzz'
self.files = [self.relative_file, abs_file, bare_file]
self.dirs = [self.relative_dir, abs_dir, bare_dir]
for file_item in self.files:
os.system('touch %s' % file_item)
for dir_item in self.dirs:
self.create_dir(dir_item)
shutil.copy(self.relative_file, '%s/1.txt' % dir_item)
shutil.copy(self.relative_file, '%s/2.txt' % dir_item)
self.create_dir('%s/dir1' % dir_item)
self.create_dir('%s/dir2' % dir_item)
def tearDown(self):
"""Clean up unuseful file and directory """
try:
for file_item in self.files:
os.remove(file_item)
for dir_item in self.dirs:
shutil.rmtree(dir_item, ignore_errors=True)
except OSError:
pass
def create_dir(self, dir_name):
"""Create directories and ignore any erros """
try:
os.makedirs(dir_name)
except OSError:
pass
def test_get_compress_formats(self):
"""Test get compress format """
compress_list = archive.get_compress_formats()
compress_list.sort()
self.assertEqual(compress_list, ['bz2', 'gz', 'lzo'])
def test_compress_negtive_file_path_is_required(self):
"""Test if the first parameter: file path is empty"""
with self.assertRaises(OSError):
archive.compress('', 'bz2')
def test_compress_negtive_compress_format_is_required(self):
"""Test if the second parameter: compress format is empty"""
with self.assertRaises(ValueError):
archive.compress(self.relative_file, '')
def test_compress_negtive_parameters_are_all_required(self):
"""Test if two parameters are both empty"""
with self.assertRaises(OSError):
archive.compress('', '')
def test_compress_negtive_file_not_exist(self):
"""Test target file does not exist"""
with self.assertRaises(OSError):
archive.compress('a.py', 'bz2')
def test_compress_negtive_file_is_dir(self):
"""Test target is one direcoty, which is not supported"""
with self.assertRaises(OSError):
archive.compress(self.relative_dir, 'bz2')
def test_compress_negtive_wrong_compress_format(self):
"""Test wrong compress format"""
with self.assertRaises(ValueError):
archive.compress(self.relative_file, 'bzip2')
def _compress_negtive_gz_command_not_exists(self):
#TODO: test if command like 'pigz', 'gzip' does not exist
pass
def _compress_negtive_lzo_command_not_exists(self):
#TODO: test if command 'lzop' does not exist
pass
def _compress_negtive_bz2_command_not_exists(self):
#TODO: test if command like 'pbzip2', 'bzip2' does not exist
pass
def test_compress_gz(self):
"""Test compress format: gz"""
for file_item in self.files:
output_name = archive.compress(file_item, 'gz')
self.assertEqual('%s.gz' % file_item, output_name)
self.assertTrue(os.path.exists(output_name))
os.remove(output_name)
def test_compress_bz2(self):
"""Test compress format: bz2"""
for file_item in self.files:
output_name = archive.compress(file_item, 'bz2')
self.assertEqual('%s.bz2' % file_item, output_name)
self.assertTrue(os.path.exists(output_name))
os.remove(output_name)
def _test_compress_lzo(self):
"""Test compress format: lzo"""
for file_item in self.files:
output_name = archive.compress(file_item, 'lzo')
self.assertEqual('%s.lzo' % file_item, output_name)
self.assertTrue(os.path.exists(output_name))
os.remove(output_name)
def test_decompress_negtive_file_path_is_required(self):
"""Test if the first parameter: file to be uncompressed is empty"""
with self.assertRaises(OSError):
archive.decompress('', 'bz')
def test_decompress_compress_format_is_empty(self):
"""Test if the second parameter: compress format is empty string"""
output_name = archive.compress(self.relative_file, 'gz')
self.assertEqual('%s.gz' % self.relative_file, output_name)
self.assertTrue(os.path.exists(output_name))
self.assertFalse(os.path.exists(self.relative_file))
archive.decompress(output_name, '')
self.assertTrue(os.path.exists(self.relative_file))
def test_decompress_negtive_parameters_are_empty(self):
"""Test if two parameters are both empty string"""
with self.assertRaises(OSError):
archive.decompress('', '')
def test_decompress_negtive_file_not_exist(self):
"""Test decompress target does not exist"""
with self.assertRaises(OSError):
archive.decompress('tresa.py', 'bz2')
def test_decompress_negtive_path_is_dir(self):
"""Test decompress target is a directory"""
with self.assertRaises(OSError):
archive.decompress(self.relative_dir, 'bz2')
def _decompress_negtive_not_corresponding(self):
# TODO: test if path is .lzo, but given format is bz2
pass
def test_decompress_negtive_wrong_compress_format(self):
"""Test wrong decompress format"""
with self.assertRaises(ValueError):
archive.decompress(self.relative_file, 'bzip2')
def test_decompress_negtive_wrong_file_format(self):
"""Test wrong target format"""
with self.assertRaises(Exception):
archive.decompress(self.wrong_format_file, 'bz2')
def test_decompress_gz(self):
"""Test decompress
Format: gz
both two parameters are given, one is target file,
the other is corresponding compress format"""
for file_item in self.files:
output_name = archive.compress(file_item, 'gz')
self.assertEqual('%s.gz' % file_item, output_name)
self.assertTrue(os.path.exists(output_name))
self.assertFalse(os.path.exists(file_item))
archive.decompress(output_name, 'gz')
self.assertTrue(os.path.exists(file_item))
def test_decompress_gz_no_compress_format(self):
"""Test decompress
Format: gz
one parameters is given, only target file"""
for file_item in self.files:
output_name = archive.compress(file_item, 'gz')
self.assertEqual('%s.gz' % file_item, output_name)
self.assertTrue(os.path.exists(output_name))
self.assertFalse(os.path.exists(file_item))
archive.decompress(output_name)
self.assertTrue(os.path.exists(file_item))
def test_decompress_bz2(self):
"""Test decompress
Format: bz2
both two parameters are given, one is target file,
the other is corresponding compress format"""
for file_item in self.files:
output_name = archive.compress(file_item, 'bz2')
self.assertEqual('%s.bz2' % file_item, output_name)
self.assertTrue(os.path.exists(output_name))
self.assertFalse(os.path.exists(file_item))
archive.decompress(output_name, 'bz2')
self.assertTrue(os.path.exists(file_item))
def test_decompress_bz2_no_compress_format(self):
"""Test decompress
Format: bz2
one parameters is given, only target file"""
for file_item in self.files:
output_name = archive.compress(file_item, 'bz2')
self.assertEqual('%s.bz2' % file_item, output_name)
self.assertTrue(os.path.exists(output_name))
self.assertFalse(os.path.exists(file_item))
archive.decompress(output_name)
self.assertTrue(os.path.exists(file_item))
def _test_decompress_lzo(self):
"""Test decompress
Format: lzo
both two parameters are given, one is target file,
the other is corresponding compress format"""
for file_item in self.files:
output_name = archive.compress(file_item, 'lzo')
self.assertEqual('%s.lzo' % file_item, output_name)
self.assertTrue(os.path.exists(output_name))
self.assertFalse(os.path.exists(file_item))
archive.decompress(output_name, 'lzo')
self.assertTrue(os.path.exists(file_item))
def _test_decompress_lzo_no_compress_format(self):
"""Test decompress
Format: lzo
one parameters is given, only target file"""
for file_item in self.files:
output_name = archive.compress(file_item, 'lzo')
self.assertEqual('%s.lzo' % file_item, output_name)
self.assertTrue(os.path.exists(output_name))
self.assertFalse(os.path.exists(file_item))
archive.decompress(output_name)
self.assertTrue(os.path.exists(file_item))
def test_get_archive_formats(self):
"""Test get archive format"""
archive_formats = archive.get_archive_formats()
archive_formats.sort()
self.assertEqual(archive_formats,
["bztar", "gztar", "lzotar", "tar", 'zip'])
def test_get_archive_suffixes(self):
"""Test get archive suffixes"""
archive_suffixes = archive.get_archive_suffixes()
archive_suffixes.sort()
self.assertEqual(archive_suffixes,
['.tar', '.tar.bz', '.tar.bz2', '.tar.gz', '.tar.lzo',
'.taz', '.tbz', '.tbz2', '.tgz', '.tzo', '.zip'])
def test_make_archive_negtive_archive_name_is_required(self):
"""Test if first parameter: file path is empty"""
with self.assertRaises(Exception):
archive.make_archive('', self.relative_dir)
def test_extract_archive_negtive_archive_name_is_required(self):
"""Test if first parameter: file path is empty"""
with self.assertRaises(Exception):
archive.extract_archive('', self.relative_dir)
def test_make_archive_negtive_target_name_is_required(self):
"""Test if second parameter: target name is empty"""
with self.assertRaises(Exception):
archive.make_archive('a.zip', '')
def _extract_archive_negtive_target_name_is_required(self):
# Not sure if the current dir will be used ?
# TODO:
pass
def test_make_archive_negtive_parameters_are_empty(self):
"""Test if both parameters are empty"""
with self.assertRaises(Exception):
archive.make_archive('', '')
def test_extract_archive_negtive_parameters_are_empty(self):
"""Test if both parameters are empty"""
with self.assertRaises(Exception):
archive.extract_archive('', '')
def test_make_archive_negtive_target_path_not_exists(self):
"""Test if file path does not exist"""
fake_file = 'abcdfsdf'
with self.assertRaises(Exception):
archive.make_archive('a.tar', fake_file)
with self.assertRaises(Exception):
archive.make_archive('a.zip', fake_file)
def test_extract_archive_negtive_path_not_exists(self):
"""Test if file path does not exist"""
fake_file = 'abcdfsdf'
with self.assertRaises(Exception):
archive.extract_archive(fake_file, self.relative_dir)
def test_extract_archive_negtive_target_is_file(self):
"""Test if the extract target is file"""
out_file = '%s.tar' % self.relative_dir
self.assertTrue(archive.make_archive(out_file, self.relative_dir))
self.assertTrue(os.path.exists(out_file))
with self.assertRaises(Exception):
archive.extract_archive(out_file, self.relative_file)
os.remove(out_file)
def test_make_archive_wrong_format(self):
"""Test wrong make_archive format"""
with self.assertRaises(Exception):
archive.make_archive('a.sfsfrwe', self.relative_dir)
def test_make_archive_tar_with_different_name(self):
""" Test make_archive format: tar
It packs the source with another name"""
for item in self.files + self.dirs:
out_file = 'abcd.tar'
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def test_make_archive_tar(self):
""" Test make_archive format: tar"""
for item in self.files + self.dirs:
out_file = '%s.tar' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def test_extract_archive_tar(self):
""" Test extract format: tar"""
for item in self.files:
out_file = '%s.tar' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
out_dir = 'un_tar_dir'
archive.extract_archive(out_file, out_dir)
self.assertTrue(os.path.exists(os.path.join(
out_dir,
os.path.basename(item))))
shutil.rmtree(out_dir)
for item in self.dirs:
out_file = '%s.tar' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
out_dir = 'un_tar_dir'
archive.extract_archive(out_file, out_dir)
self.assertTrue(os.path.exists(os.path.join(out_dir, '1.txt')))
self.assertTrue(os.path.exists(os.path.join(out_dir, '2.txt')))
self.assertTrue(os.path.exists(os.path.join(out_dir, 'dir1')))
self.assertTrue(os.path.exists(os.path.join(out_dir, 'dir2')))
shutil.rmtree(out_dir)
def test_make_archive_zip_with_different_name(self):
""" Test make_archive format: zip
It packs the source with another name"""
for item in self.files + self.dirs:
out_file = 'a.zip'
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def test_make_archive_zip(self):
""" Test make_archive format: zip"""
for item in self.files + self.dirs:
out_file = '%s.zip' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def _extract_archive_zip(self):
""" Test extract archive format: zip"""
for item in self.files + self.dirs:
out_file = '%s.zip' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
out_dir = 'un_tar_dir'
archive.extract_archive(out_file, out_dir)
self.assertTrue(os.path.exists(os.path.join(out_dir, item)))
shutil.rmtree(out_dir)
def _test_make_archive_tzo_with_different_name(self):
""" Test make_archive format: tzo
It packs the source with another name"""
for item in self.files + self.dirs:
out_file = 'abc.tzo'
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def _test_make_archive_tzo(self):
""" Test make_archive format: tzo"""
for item in self.files + self.dirs:
out_file = '%s.tzo' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def _extract_archive_tzo(self):
""" Test extract format: tzo"""
for item in self.files + self.dirs:
out_file = '%s.tzo' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
out_dir = 'un_tar_dir'
archive.extract_archive(out_file, out_dir)
self.assertTrue(os.path.exists(os.path.join(out_dir, item)))
shutil.rmtree(out_dir)
def _test_make_archive_tar_lzo_with_different_name(self):
""" Test make_archive format: lzo
It packs the source with another name"""
for item in self.files + self.dirs:
out_file = 'abc.tar.lzo'
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def _test_make_archive_tar_lzo(self):
""" Test make_archive format: lzo"""
for item in self.files + self.dirs:
out_file = '%s.tar.lzo' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def _extract_archive_tar_lzo(self):
""" Test extract_archive format: lzo"""
for item in self.files + self.dirs:
out_file = '%s.tar.lzo' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
out_dir = 'un_tar_dir'
archive.extract_archive(out_file, out_dir)
self.assertTrue(os.path.exists(os.path.join(out_dir, item)))
shutil.rmtree(out_dir)
def test_make_archive_taz_with_different_name(self):
""" Test make_archive format: taz
It packs the source with another name"""
for item in self.files + self.dirs:
out_file = 'abcd.taz'
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def test_make_archive_taz(self):
""" Test make_archive format: taz"""
for item in self.files + self.dirs:
out_file = '%s.taz' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def _extract_archive_taz(self):
""" Test extract archive format: taz"""
for item in self.files + self.dirs:
out_file = '%s.taz' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
out_dir = 'un_tar_dir'
archive.extract_archive(out_file, out_dir)
self.assertTrue(os.path.exists(os.path.join(out_dir, item)))
shutil.rmtree(out_dir)
def test_make_archive_tgz_with_different_name(self):
""" Test make_archive format: tgz
It packs the source with anotehr name"""
for item in self.files + self.dirs:
out_file = 'abc.tgz'
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def test_make_archive_tgz(self):
""" Test make_archive format: tgz"""
for item in self.files + self.dirs:
out_file = '%s.tgz' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def _extract_archive_tgz(self):
""" Test extract archive format: tgz"""
for item in self.files + self.dirs:
out_file = '%s.tgz' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
out_dir = 'un_tar_dir'
archive.extract_archive(out_file, out_dir)
self.assertTrue(os.path.exists(os.path.join(out_dir, item)))
shutil.rmtree(out_dir)
def test_make_archive_tar_gz_with_different_name(self):
""" Test make_archive format: tar.gz
It packs the source with another name"""
for item in self.files + self.dirs:
out_file = 'erwe.tar.gz'
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def test_make_archive_tar_gz(self):
""" Test make_archive format: tar.gz"""
for item in self.files + self.dirs:
out_file = '%s.tar.gz' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def _extract_archive_tar_gz(self):
""" Test extract archive format: tar.gz"""
for item in self.files + self.dirs:
out_file = '%s.tar.gz' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
out_dir = 'un_tar_dir'
archive.extract_archive(out_file, out_dir)
self.assertTrue(os.path.exists(os.path.join(out_dir, item)))
shutil.rmtree(out_dir)
def test_make_archive_tbz_with_different_name(self):
""" Test make_archive format: tbz
It packs the source with another name"""
for item in self.files + self.dirs:
out_file = 'sdfsd.tbz'
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def test_make_archive_tbz(self):
""" Test make_archive format: tbz"""
for item in self.files + self.dirs:
out_file = '%s.tbz' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def _extract_archive_tbz(self):
""" Test extract format: tbz"""
for item in self.files + self.dirs:
out_file = '%s.tbz' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
out_dir = 'un_tar_dir'
archive.extract_archive(out_file, out_dir)
self.assertTrue(os.path.exists(os.path.join(out_dir, item)))
shutil.rmtree(out_dir)
def test_make_archive_tbz2_with_different_name(self):
""" Test make_archive format: tbz2
It packs source with another name"""
for item in self.files + self.dirs:
out_file = 'sfsfd.tbz2'
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def test_make_archive_tbz2(self):
""" Test make_archive format: tbz2"""
for item in self.files + self.dirs:
out_file = '%s.tbz2' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def _extract_archive_tbz2(self):
""" Test extract format: tbz2"""
for item in self.files + self.dirs:
out_file = '%s.tbz2' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
out_dir = 'un_tar_dir'
archive.extract_archive(out_file, out_dir)
self.assertTrue(os.path.exists(os.path.join(out_dir, item)))
shutil.rmtree(out_dir)
def test_make_archive_tar_bz_with_different_name(self):
""" Test make_archive format: tar.bz
It packs source with antoher name"""
for item in self.files + self.dirs:
out_file = 'sdf.tar.bz'
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def test_make_archive_tar_bz(self):
""" Test make_archive format: tar.bz"""
for item in self.files + self.dirs:
out_file = '%s.tar.bz' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def _extract_archive_tar_bz(self):
""" Test extract format: tar.bz"""
for item in self.files + self.dirs:
out_file = '%s.tar.bz' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
out_dir = 'un_tar_dir'
archive.extract_archive(out_file, out_dir)
self.assertTrue(os.path.exists(os.path.join(out_dir, item)))
shutil.rmtree(out_dir)
def test_make_archive_tar_bz2_with_different_name(self):
""" Test make_archive format: tar.bz2
it packs the source with another name """
for item in self.files + self.dirs:
out_file = 'df.tar.bz2'
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def test_make_archive_tar_bz2(self):
""" Test make_archive format: tar.bz2"""
for item in self.files + self.dirs:
out_file = '%s.tar.bz2' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
os.remove(out_file)
def _extract_archive_tar_bz2(self):
""" Test extract format: tar.bz2"""
for item in self.files + self.dirs:
out_file = '%s.tar.bz2' % item
self.assertTrue(archive.make_archive(out_file, item))
self.assertTrue(os.path.exists(out_file))
out_dir = 'un_tar_dir'
archive.extract_archive(out_file, out_dir)
self.assertTrue(os.path.exists(os.path.join(out_dir, item)))
shutil.rmtree(out_dir)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
creeptonik/videojs-live-card | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py | 1284 | 100329 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
from gyp.common import OrderedSet
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target(object):
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here. In this case, we also need to save the compile_deps for the target,
# so that the the target that directly depends on the .objs can also depend
# on those.
self.component_objs = None
self.compile_deps = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter(object):
def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.hash_for_rules = hash_for_rules
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
assert not os.path.isabs(path_dir), (
"'%s' can not be absolute path (see crbug.com/462153)." % path_dir)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets, order_only=None):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
assert not order_only
return None
if len(targets) > 1 or order_only:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
self.ninja.variable('cc_host', '$cl_' + arch)
self.ninja.variable('cxx_host', '$cl_' + arch)
self.ninja.variable('asm', '$ml_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
elif self.flavor == 'mac' and len(self.archs) > 1:
link_deps = collections.defaultdict(list)
compile_deps = self.target.actions_stamp or actions_depends
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
self.target.compile_deps = compile_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
compile_deps)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRulesOrActions(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
xcassets = self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends)
self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetToolchainEnv()
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'], self.hash_for_rules)
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
depfile = action.get('depfile', None)
if depfile:
depfile = self.ExpandSpecial(depfile, self.base_to_build)
pool = 'console' if int(action.get('ninja_use_console', 0)) else None
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env, pool,
depfile=depfile)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetToolchainEnv()
all_outputs = []
for rule in rules:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'], self.hash_for_rules)
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
pool = 'console' if int(rule.get('ninja_use_console', 0)) else None
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env, pool)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if '${%s}' % var in argument:
needed_variables.add(var)
def cygwin_munge(path):
# pylint: disable=cell-var-from-loop
if is_cygwin:
return path.replace('\\', '/')
return path
inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])]
# If there are n source files matching the rule, and m additional rule
# inputs, then adding 'inputs' to each build edge written below will
# write m * n inputs. Collapsing reduces this to m + n.
sources = rule.get('rule_sources', [])
num_inputs = len(inputs)
if prebuild:
num_inputs += 1
if num_inputs > 2 and len(sources) > 2:
inputs = [self.WriteCollapsedDependencies(
rule['rule_name'], inputs, order_only=prebuild)]
prebuild = []
# For each source file, write an edge that generates all the outputs.
for source in sources:
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
outputs = [self.GypPathToNinja(o, env) for o in outputs]
if self.flavor == 'win':
# WriteNewNinjaRule uses unique_name for creating an rsp file on win.
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetToolchainEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
xcassets = []
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
if os.path.splitext(output)[-1] != '.xcassets':
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource'), \
('binary', isBinary)])
bundle_depends.append(output)
else:
xcassets.append(res)
return xcassets
def WriteMacXCassets(self, xcassets, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources' .xcassets files.
This add an invocation of 'actool' via the 'mac_tool.py' helper script.
It assumes that the assets catalogs define at least one imageset and
thus an Assets.car file will be generated in the application resources
directory. If this is not the case, then the build will probably be done
at each invocation of ninja."""
if not xcassets:
return
extra_arguments = {}
settings_to_arg = {
'XCASSETS_APP_ICON': 'app-icon',
'XCASSETS_LAUNCH_IMAGE': 'launch-image',
}
settings = self.xcode_settings.xcode_settings[self.config_name]
for settings_key, arg_name in settings_to_arg.iteritems():
value = settings.get(settings_key)
if value:
extra_arguments[arg_name] = value
partial_info_plist = None
if extra_arguments:
partial_info_plist = self.GypPathToUniqueOutput(
'assetcatalog_generated_info.plist')
extra_arguments['output-partial-info-plist'] = partial_info_plist
outputs = []
outputs.append(
os.path.join(
self.xcode_settings.GetBundleResourceFolder(),
'Assets.car'))
if partial_info_plist:
outputs.append(partial_info_plist)
keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor)
extra_env = self.xcode_settings.GetPerTargetSettings()
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
bundle_depends.extend(self.ninja.build(
outputs, 'compile_xcassets', xcassets,
variables=[('env', env), ('keys', keys)]))
return partial_info_plist
def WriteMacInfoPlist(self, partial_info_plist, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
if partial_info_plist:
intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist')
info_plist = self.ninja.build(
intermediate_plist, 'merge_infoplist',
[partial_info_plist, info_plist])
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys),
('binary', isBinary)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
self.ninja.variable('nm', '$nm_host')
self.ninja.variable('readelf', '$readelf_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
asmflags = self.msvs_settings.GetAsmflags(config_name)
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
elif self.toolset == 'host':
cflags_c = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CFLAGS_host', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CXXFLAGS_host', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'asmflags',
map(self.ExpandSpecial, asmflags))
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetToolchainEnv()
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
if self.flavor == 'win':
midl_include_dirs = config.get('midl_include_dirs', [])
midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs(
midl_include_dirs, config_name)
self.WriteVariableList(ninja_file, 'midl_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in midl_include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
arflags = config.get('arflags', [])
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
self.WriteVariableList(ninja_file, 'arflags',
map(self.ExpandSpecial, arflags))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
build_output = output
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
# TODO(yyanagisawa): more work needed to fix:
# https://code.google.com/p/gyp/issues/detail?id=411
if (spec['type'] in ('shared_library', 'loadable_module') and
not self.is_mac_bundle):
extra_bindings.append(('lib', output))
self.ninja.build([output, output + '.TOC'], 'solipo', inputs,
variables=extra_bindings)
else:
self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
order_deps = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
if target.compile_deps:
order_deps.add(target.compile_deps)
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
map(self.ExpandSpecial, ldflags))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor != 'win':
link_file_list = output
if self.is_mac_bundle:
# 'Dependency Framework.framework/Versions/A/Dependency Framework' ->
# 'Dependency Framework.framework.rsp'
link_file_list = self.xcode_settings.GetWrapperName()
if arch:
link_file_list += '.' + arch
link_file_list += '.rsp'
# If an rspfile contains spaces, ninja surrounds the filename with
# quotes around it and then passes it to open(), creating a file with
# quotes in its name (and when looking for the rsp file, the name
# makes it through bash which strips the quotes) :-/
link_file_list = link_file_list.replace(' ', '_')
extra_bindings.append(
('link_file_list',
gyp.common.EncodePOSIXShellArgument(link_file_list)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if ('/NOENTRY' not in ldflags and
not self.msvs_settings.GetNoImportLibrary(config_name)):
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
order_only=list(order_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetToolchainEnv(self, additional_settings=None):
"""Returns the variables toolchain would set for build steps."""
env = self.GetSortedXcodeEnv(additional_settings=additional_settings)
if self.flavor == 'win':
env = self.GetMsvsToolchainEnv(
additional_settings=additional_settings)
return env
def GetMsvsToolchainEnv(self, additional_settings=None):
"""Returns the variables Visual Studio would set for build steps."""
return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=self.config_name)
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool,
depfile=None):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, depfile=depfile,
restat=True, pool=pool,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
exts = gyp.MSVSUtil.TARGET_TYPE_EXT
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable']
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library']
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library']
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0))
if pool_size:
return pool_size
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
# VS 2015 uses 20% more working set than VS 2013 and can consume all RAM
# on a 64 GB machine.
mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB
hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32)))
return min(mem_limit, hard_cap)
elif sys.platform.startswith('linux'):
if os.path.exists("/proc/meminfo"):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
ar = 'lib.exe'
# cc and cxx must be set to the correct architecture by overriding with one
# of cl_x86 or cl_x64 below.
cc = 'UNSET'
cxx = 'UNSET'
ld = 'link.exe'
ld_host = '$ld'
else:
ar = 'ar'
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
ar_host = 'ar'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
clang_cl = None
nm = 'nm'
nm_host = 'nm'
readelf = 'readelf'
readelf_host = 'readelf'
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_root, value)
if key == 'AR.host':
ar_host = os.path.join(build_to_root, value)
if key == 'CC':
cc = os.path.join(build_to_root, value)
if cc.endswith('clang-cl'):
clang_cl = cc
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key == 'NM':
nm = os.path.join(build_to_root, value)
if key == 'NM.host':
nm_host = os.path.join(build_to_root, value)
if key == 'READELF':
readelf = os.path.join(build_to_root, value)
if key == 'READELF.host':
readelf_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
configs = [target_dicts[qualified_target]['configurations'][config_name]
for qualified_target in target_list]
shared_system_includes = None
if not generator_flags.get('ninja_use_custom_environment_files', 0):
shared_system_includes = \
gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes(
configs, generator_flags)
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, shared_system_includes, OpenOutput)
for arch, path in cl_paths.iteritems():
if clang_cl:
# If we have selected clang-cl, use that instead.
path = clang_cl
command = CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, 'win'))
if clang_cl:
# Use clang-cl to cross-compile for x86 or x86_64.
command += (' -m32' if arch == 'x86' else ' -m64')
master_ninja.variable('cl_' + arch, command)
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', ar)
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('ml_x86', 'ml.exe')
master_ninja.variable('ml_x64', 'ml64.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar))
if flavor != 'mac':
# Mac does not use readelf/nm for .TOC generation, so avoiding polluting
# the master ninja with extra unused variables.
master_ninja.variable(
'nm', GetEnvironFallback(['NM_target', 'NM'], nm))
master_ninja.variable(
'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host))
master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host))
master_ninja.variable('readelf_host',
GetEnvironFallback(['READELF_host'], readelf_host))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$midl_includes $idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $out',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes $asmflags /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $arflags $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $arflags $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ]; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ $readelf -d $lib | grep SONAME ; '
'$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content=
'-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in -Wl,--end-group $solibs $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
master_ninja.rule(
'solipo',
description='SOLIPO $out, POSTBUILDS',
command=(
'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&'
'%(extract_toc)s > $lib.TOC'
% { 'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}))
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; '
'else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then '
'mv $lib.tmp $lib.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '@$link_file_list$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys')
master_ninja.rule(
'merge_infoplist',
description='MERGE INFOPLISTS $in',
command='$env ./gyp-mac-tool merge-info-plist $out $in')
master_ninja.rule(
'compile_xcassets',
description='COMPILE XCASSETS $in',
command='$env ./gyp-mac-tool compile-xcassets $keys $in')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
# short name of targets that were skipped because they didn't contain anything
# interesting.
# NOTE: there may be overlap between this an non_empty_target_names.
empty_target_names = set()
# Set of non-empty short target names.
# NOTE: there may be overlap between this an empty_target_names.
non_empty_target_names = set()
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
# If build_file is a symlink, we must not follow it because there's a chance
# it could point to a path above toplevel_dir, and we cannot correctly deal
# with that case at the moment.
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir,
False)
qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name,
toolset)
hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest()
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
non_empty_target_names.add(name)
else:
empty_target_names.add(name)
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
# Write phony targets for any empty targets that weren't written yet. As
# short names are not necessarily unique only do this for short names that
# haven't already been output for another target.
empty_target_names = empty_target_names - non_empty_target_names
if empty_target_names:
master_ninja.newline()
master_ninja.comment('Empty targets (output for completeness).')
for name in sorted(empty_target_names):
master_ninja.build(name, 'phony')
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit |
Bashar/django | django/core/cache/backends/memcached.py | 11 | 7035 | "Memcached cache backend"
import time
import pickle
from django.core.cache.backends.base import BaseCache, DEFAULT_TIMEOUT
from django.utils import six
from django.utils.deprecation import RenameMethodsBase, RemovedInDjango19Warning
from django.utils.encoding import force_str
from django.utils.functional import cached_property
class BaseMemcachedCacheMethods(RenameMethodsBase):
renamed_methods = (
('_get_memcache_timeout', 'get_backend_timeout', RemovedInDjango19Warning),
)
class BaseMemcachedCache(six.with_metaclass(BaseMemcachedCacheMethods, BaseCache)):
def __init__(self, server, params, library, value_not_found_exception):
super(BaseMemcachedCache, self).__init__(params)
if isinstance(server, six.string_types):
self._servers = server.split(';')
else:
self._servers = server
# The exception type to catch from the underlying library for a key
# that was not found. This is a ValueError for python-memcache,
# pylibmc.NotFound for pylibmc, and cmemcache will return None without
# raising an exception.
self.LibraryValueNotFoundException = value_not_found_exception
self._lib = library
self._options = params.get('OPTIONS', None)
@property
def _cache(self):
"""
Implements transparent thread-safe access to a memcached client.
"""
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers)
return self._client
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Memcached deals with long (> 30 days) timeouts in a special
way. Call this function to obtain a safe value for your timeout.
"""
if timeout == DEFAULT_TIMEOUT:
return self.default_timeout
if timeout is None:
# Using 0 in memcache sets a non-expiring timeout.
return 0
elif int(timeout) == 0:
# Other cache backends treat 0 as set-and-expire. To achieve this
# in memcache backends, a negative timeout must be passed.
timeout = -1
if timeout > 2592000: # 60*60*24*30, 30 days
# See http://code.google.com/p/memcached/wiki/FAQ
# "You can set expire times up to 30 days in the future. After that
# memcached interprets it as a date, and will expire the item after
# said date. This is a simple (but obscure) mechanic."
#
# This means that we have to switch to absolute timestamps.
timeout += int(time.time())
return int(timeout)
def make_key(self, key, version=None):
# Python 2 memcache requires the key to be a byte string.
return force_str(super(BaseMemcachedCache, self).make_key(key, version))
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
return self._cache.add(key, value, self.get_backend_timeout(timeout))
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
val = self._cache.get(key)
if val is None:
return default
return val
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self._cache.set(key, value, self.get_backend_timeout(timeout))
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self._cache.delete(key)
def get_many(self, keys, version=None):
new_keys = [self.make_key(x, version=version) for x in keys]
ret = self._cache.get_multi(new_keys)
if ret:
_ = {}
m = dict(zip(new_keys, keys))
for k, v in ret.items():
_[m[k]] = v
ret = _
return ret
def close(self, **kwargs):
self._cache.disconnect_all()
def incr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.decr(key, -delta)
try:
val = self._cache.incr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def decr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.incr(key, -delta)
try:
val = self._cache.decr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
safe_data = {}
for key, value in data.items():
key = self.make_key(key, version=version)
safe_data[key] = value
self._cache.set_multi(safe_data, self.get_backend_timeout(timeout))
def delete_many(self, keys, version=None):
l = lambda x: self.make_key(x, version=version)
self._cache.delete_multi(map(l, keys))
def clear(self):
self._cache.flush_all()
class MemcachedCache(BaseMemcachedCache):
"An implementation of a cache binding using python-memcached"
def __init__(self, server, params):
import memcache
super(MemcachedCache, self).__init__(server, params,
library=memcache,
value_not_found_exception=ValueError)
@property
def _cache(self):
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers, pickleProtocol=pickle.HIGHEST_PROTOCOL)
return self._client
class PyLibMCCache(BaseMemcachedCache):
"An implementation of a cache binding using pylibmc"
def __init__(self, server, params):
import pylibmc
super(PyLibMCCache, self).__init__(server, params,
library=pylibmc,
value_not_found_exception=pylibmc.NotFound)
@cached_property
def _cache(self):
client = self._lib.Client(self._servers)
if self._options:
client.behaviors = self._options
return client
| bsd-3-clause |
abdhaleegit/avocado-misc-tests | io/disk/ioping.py | 4 | 2385 | #!/usr/bin/env python
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2017 IBM
# Author:Praveen K Pandey <[email protected]>
#
import os
from avocado import Test
from avocado.utils import process, archive, build
from avocado.utils.software_manager import SoftwareManager
class Ioping(Test):
"""
Disk I/O latency monitoring tool
"""
def setUp(self):
'''
Build Ioping Test
'''
# Check for basic utilities
smm = SoftwareManager()
self.count = self.params.get('count', default='2')
self.mode = self.params.get('mode', default='-C')
self.deadline = self.params.get('deadline', default='10')
self.period = self.params.get('period', default='10')
self.interval = self.params.get('interval', default='1s')
self.size = self.params.get('size', default='4k')
self.wsize = self.params.get('wsize', default='10m')
self.disk = self.params.get('disk', default='/home')
for package in ['gcc', 'make']:
if not smm.check_installed(package) and not smm.install(package):
self.cancel(
"Fail to install %s required for this test." % package)
tarball = self.fetch_asset("ioping.zip", locations="https://github.com/"
"koct9i/ioping/archive/master.zip",
expire='1d')
archive.extract(tarball, self.workdir)
self.sourcedir = os.path.join(self.workdir, 'ioping-master')
build.make(self.sourcedir)
def test(self):
os.chdir(self.sourcedir)
cmd = '%s -c %s -w %s -p %s -i %s -s %s -S %s %s' % (
self.mode, self.count, self.deadline, self.period, self.interval,
self.size, self.wsize, self.disk)
if process.system('./ioping %s' % cmd, ignore_status=True, shell=True):
self.fail("test run fails of %s" % cmd)
| gpl-2.0 |
SivagnanamCiena/ciscoconfparse | ciscoconfparse/ccp_util.py | 3 | 26161 | from collections import MutableSequence
import itertools
import sys
import re
import os
from protocol_values import ASA_TCP_PORTS, ASA_UDP_PORTS
from dns.exception import DNSException
from dns.resolver import Resolver
from dns import reversename, query
if sys.version_info[0]<3:
from ipaddr import IPv4Network, IPv6Network, IPv4Address, IPv6Address
else:
from ipaddress import IPv4Network, IPv6Network, IPv4Address, IPv6Address
""" ccp_util.py - Parse, Query, Build, and Modify IOS-style configurations
Copyright (C) 2014-2015 David Michael Pennington
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
If you need to contact the author, you can do so by emailing:
mike [~at~] pennington [/dot\] net
"""
_IPV6_REGEX_STR = r"""(?!:::\S+?$) # Negative Lookahead for 3 colons
(?P<addr> # Begin a group named 'addr'
(?P<opt1>{0}(?::{0}){{7}}) # no double colons, option 1
|(?P<opt2>(?:{0}:){{1}}(?::{0}){{1,6}}) # match fe80::1
|(?P<opt3>(?:{0}:){{2}}(?::{0}){{1,5}}) # match fe80:a::1
|(?P<opt4>(?:{0}:){{3}}(?::{0}){{1,4}}) # match fe80:a:b::1
|(?P<opt5>(?:{0}:){{4}}(?::{0}){{1,3}}) # match fe80:a:b:c::1
|(?P<opt6>(?:{0}:){{5}}(?::{0}){{1,2}}) # match fe80:a:b:c:d::1
|(?P<opt7>(?:{0}:){{6}}(?::{0}){{1,1}}) # match fe80:a:b:c:d:e::1
|(?P<opt8>:(?::{0}){{1,7}}) # leading double colons
|(?P<opt9>(?:{0}:){{1,7}}:) # trailing double colons
|(?P<opt10>(?:::)) # bare double colons (default route)
) # End group named 'addr'
""".format(r'[0-9a-fA-F]{1,4}')
_IPV6_REGEX_STR_COMPRESSED1 = r"""(?!:::\S+?$)(?P<addr1>(?P<opt1_1>{0}(?::{0}){{7}})|(?P<opt1_2>(?:{0}:){{1}}(?::{0}){{1,6}})|(?P<opt1_3>(?:{0}:){{2}}(?::{0}){{1,5}})|(?P<opt1_4>(?:{0}:){{3}}(?::{0}){{1,4}})|(?P<opt1_5>(?:{0}:){{4}}(?::{0}){{1,3}})|(?P<opt1_6>(?:{0}:){{5}}(?::{0}){{1,2}})|(?P<opt1_7>(?:{0}:){{6}}(?::{0}){{1,1}})|(?P<opt1_8>:(?::{0}){{1,7}})|(?P<opt1_9>(?:{0}:){{1,7}}:)|(?P<opt1_10>(?:::)))""".format(r'[0-9a-fA-F]{1,4}')
_IPV6_REGEX_STR_COMPRESSED2 = r"""(?!:::\S+?$)(?P<addr2>(?P<opt2_1>{0}(?::{0}){{7}})|(?P<opt2_2>(?:{0}:){{1}}(?::{0}){{1,6}})|(?P<opt2_3>(?:{0}:){{2}}(?::{0}){{1,5}})|(?P<opt2_4>(?:{0}:){{3}}(?::{0}){{1,4}})|(?P<opt2_5>(?:{0}:){{4}}(?::{0}){{1,3}})|(?P<opt2_6>(?:{0}:){{5}}(?::{0}){{1,2}})|(?P<opt2_7>(?:{0}:){{6}}(?::{0}){{1,1}})|(?P<opt2_8>:(?::{0}){{1,7}})|(?P<opt2_9>(?:{0}:){{1,7}}:)|(?P<opt2_10>(?:::)))""".format(r'[0-9a-fA-F]{1,4}')
_IPV6_REGEX_STR_COMPRESSED3 = r"""(?!:::\S+?$)(?P<addr3>(?P<opt3_1>{0}(?::{0}){{7}})|(?P<opt3_2>(?:{0}:){{1}}(?::{0}){{1,6}})|(?P<opt3_3>(?:{0}:){{2}}(?::{0}){{1,5}})|(?P<opt3_4>(?:{0}:){{3}}(?::{0}){{1,4}})|(?P<opt3_5>(?:{0}:){{4}}(?::{0}){{1,3}})|(?P<opt3_6>(?:{0}:){{5}}(?::{0}){{1,2}})|(?P<opt3_7>(?:{0}:){{6}}(?::{0}){{1,1}})|(?P<opt3_8>:(?::{0}){{1,7}})|(?P<opt3_9>(?:{0}:){{1,7}}:)|(?P<opt3_10>(?:::)))""".format(r'[0-9a-fA-F]{1,4}')
_RGX_IPV6ADDR = re.compile(_IPV6_REGEX_STR, re.VERBOSE)
_RGX_IPV4ADDR = re.compile(r'^(?P<addr>\d+\.\d+\.\d+\.\d+)')
_RGX_IPV4ADDR_NETMASK = re.compile(
r"""
(?:
^(?P<addr0>\d+\.\d+\.\d+\.\d+)$
|(?:^
(?:(?P<addr1>\d+\.\d+\.\d+\.\d+))(?:\s+|\/)(?:(?P<netmask>\d+\.\d+\.\d+\.\d+))
$)
|^(?:\s*(?P<addr2>\d+\.\d+\.\d+\.\d+)(?:\/(?P<masklen>\d+))\s*)$
)
""",
re.VERBOSE)
## Emulate the old behavior of ipaddr.IPv4Network in Python2, which can use
## IPv4Network with a host address. Google removed that in Python3's
## ipaddress.py module
class IPv4Obj(object):
"""An object to represent IPv4 addresses and IPv4Networks. When :class:`~ccp_util.IPv4Obj` objects are compared or sorted, shorter masks are greater than longer masks. After comparing mask length, numerically higher IP addresses are greater than numerically lower IP addresses.
Kwargs:
- arg (str): A string containing an IPv4 address, and optionally a netmask or masklength. The following address/netmask formats are supported: "10.1.1.1/24", "10.1.1.1 255.255.255.0", "10.1.1.1/255.255.255.0"
Attributes:
- network_object : An IPv4Network object
- ip_object : An IPv4Address object
- ip : An IPv4Address object
- as_binary_tuple (tuple): The address as a tuple of zero-padded binary strings
- as_hex_tuple (tuple): The address as a tuple of zero-padded 8-bit hex strings
- as_decimal (int): The ip address as a decimal integer
- network (str): A string representing the network address
- netmask (str): A string representing the netmask
- prefixlen (int): An integer representing the length of the netmask
- broadcast (str): A string representing the broadcast address
- hostmask (str): A string representing the hostmask
- numhosts (int): An integer representing the number of hosts contained in the network
Returns:
- an instance of :class:`~ccp_util.IPv4Obj`.
"""
def __init__(self, arg='127.0.0.1/32', strict=False):
#RGX_IPV4ADDR = re.compile(r'^(\d+\.\d+\.\d+\.\d+)')
#RGX_IPV4ADDR_NETMASK = re.compile(r'(\d+\.\d+\.\d+\.\d+)\s+(\d+\.\d+\.\d+\.\d+)')
self.arg = arg
mm = _RGX_IPV4ADDR_NETMASK.search(arg)
ERROR = "IPv4Obj couldn't parse '{0}'".format(arg)
assert (not (mm is None)), ERROR
mm_result = mm.groupdict()
addr = mm_result['addr0'] or mm_result['addr1'] \
or mm_result['addr2'] or '127.0.0.1'
masklen = int(mm_result['masklen'] or 32)
netmask = mm_result['netmask']
if netmask:
## ALWAYS check for the netmask first
self.network_object = IPv4Network('{0}/{1}'.format(addr, netmask),
strict=strict)
self.ip_object = IPv4Address('{0}'.format(addr))
else:
self.network_object = IPv4Network('{0}/{1}'.format(addr, masklen),
strict=strict)
self.ip_object = IPv4Address('{0}'.format(addr))
def __repr__(self):
return """<IPv4Obj {0}/{1}>""".format(str(self.ip_object), self.prefixlen)
def __eq__(self, val):
try:
if self.network_object==val.network_object:
return True
return False
except (Exception) as e:
errmsg = "'{0}' cannot compare itself to '{1}': {2}".format(self.__repr__(), val, e)
raise ValueError(errmsg)
def __gt__(self, val):
try:
val_prefixlen = int(getattr(val, 'prefixlen'))
val_nobj = getattr(val, 'network_object')
self_nobj = self.network_object
if (self.network_object.prefixlen<val_prefixlen):
# Sort shorter masks as higher...
return True
elif (self.network_object.prefixlen>val_prefixlen):
return False
elif (self_nobj>val_nobj):
# If masks are equal, rely on Google's sorting...
return True
return False
except:
errmsg = "{0} cannot compare itself to '{1}'".format(self.__repr__(), val)
raise ValueError(errmsg)
def __lt__(self, val):
try:
val_prefixlen = int(getattr(val, 'prefixlen'))
val_nobj = getattr(val, 'network_object')
self_nobj = self.network_object
if (self.network_object.prefixlen>val_prefixlen):
# Sort shorter masks as lower...
return True
elif (self.network_object.prefixlen<val_prefixlen):
return False
elif (self_nobj<val_nobj):
# If masks are equal, rely on Google's sorting...
return True
return False
except:
errmsg = "{0} cannot compare itself to '{1}'".format(self.__repr__(), val)
raise ValueError(errmsg)
def __contains__(self, val):
# Used for "foo in bar"... python calls bar.__contains__(foo)
try:
if (self.network_object.prefixlen==0):
return True
elif self.network_object.prefixlen>val.network_object.prefixlen:
# obvious shortcut... if this object's mask is longer than
# val, this object cannot contain val
return False
else:
#return (val.network in self.network)
return (self.network<=val.network) and \
(self.broadcast>=val.broadcast)
except (Exception) as e:
raise ValueError("Could not check whether '{0}' is contained in '{1}': {2}".format(val, self, e))
def __hash__(self):
# Python3 needs __hash__()
return hash(str(self.ip_object))+hash(str(self.prefixlen))
def __iter__(self):
return self.network_object.__iter__()
def __next__(self):
## For Python3 iteration...
return self.network_object.__next__()
def next(self):
## For Python2 iteration...
return self.network_object.__next__()
@property
def ip(self):
"""Returns the address as an IPv4Address object."""
return self.ip_object
@property
def netmask(self):
"""Returns the network mask as an IPv4Address object."""
return self.network_object.netmask
@property
def prefixlen(self):
"""Returns the length of the network mask as an integer."""
return self.network_object.prefixlen
@property
def broadcast(self):
"""Returns the broadcast address as an IPv4Address object."""
if sys.version_info[0]<3:
return self.network_object.broadcast
else:
return self.network_object.broadcast_address
@property
def network(self):
"""Returns an IPv4Network object, which represents this network.
"""
if sys.version_info[0]<3:
return self.network_object.network
else:
## The ipaddress module returns an "IPAddress" object in Python3...
return IPv4Network('{0}'.format(self.network_object.compressed))
@property
def hostmask(self):
"""Returns the host mask as an IPv4Address object."""
return self.network_object.hostmask
@property
def version(self):
"""Returns the version of the object as an integer. i.e. 4"""
return 4
@property
def numhosts(self):
"""Returns the total number of IP addresses in this network, including broadcast and the "subnet zero" address"""
if sys.version_info[0]<3:
return self.network_object.numhosts
else:
return 2**(32-self.network_object.prefixlen)
@property
def as_decimal(self):
"""Returns the IP address as a decimal integer"""
num_strings = str(self.ip).split('.')
num_strings.reverse() # reverse the order
return sum([int(num)*(256**idx) for idx, num in enumerate(num_strings)])
@property
def as_binary_tuple(self):
"""Returns the IP address as a tuple of zero-padded binary strings"""
return tuple(['{0:08b}'.format(int(num)) for num in \
str(self.ip).split('.')])
@property
def as_hex_tuple(self):
"""Returns the IP address as a tuple of zero-padded hex strings"""
return tuple(['{0:02x}'.format(int(num)) for num in \
str(self.ip).split('.')])
@property
def is_multicast(self):
"""Returns a boolean for whether this is a multicast address"""
return self.network_object.is_multicast
@property
def is_private(self):
"""Returns a boolean for whether this is a private address"""
return self.network_object.is_private
@property
def is_reserved(self):
"""Returns a boolean for whether this is a reserved address"""
return self.network_object.is_reserved
## Emulate the old behavior of ipaddr.IPv6Network in Python2, which can use
## IPv6Network with a host address. Google removed that in Python3's
## ipaddress.py module
class IPv6Obj(object):
"""An object to represent IPv6 addresses and IPv6Networks. When :class:`~ccp_util.IPv6Obj` objects are compared or sorted, shorter masks are greater than longer masks. After comparing mask length, numerically higher IP addresses are greater than numerically lower IP addresses.
Kwargs:
- arg (str): A string containing an IPv6 address, and optionally a netmask or masklength. The following address/netmask formats are supported: "2001::dead:beef", "2001::dead:beef/64",
Attributes:
- network_object : An IPv6Network object
- ip_object : An IPv6Address object
- ip : An IPv6Address object
- as_binary_tuple (tuple): The ipv6 address as a tuple of zero-padded binary strings
- as_decimal (int): The ipv6 address as a decimal integer
- as_hex_tuple (tuple): The ipv6 address as a tuple of zero-padded 8-bit hex strings
- network (str): A string representing the network address
- netmask (str): A string representing the netmask
- prefixlen (int): An integer representing the length of the netmask
- broadcast: raises `NotImplementedError`; IPv6 doesn't use broadcast
- hostmask (str): A string representing the hostmask
- numhosts (int): An integer representing the number of hosts contained in the network
Returns:
- an instance of :class:`~ccp_util.IPv6Obj`.
"""
def __init__(self, arg='::1/128', strict=False):
#arg= _RGX_IPV6ADDR_NETMASK.sub(r'\1/\2', arg) # mangle IOS: 'addr mask'
self.arg = arg
mm = _RGX_IPV6ADDR.search(arg)
assert (not (mm is None)), "IPv6Obj couldn't parse {0}".format(arg)
self.network_object = IPv6Network(arg, strict=strict)
self.ip_object = IPv6Address(mm.group(1))
# 'address_exclude', 'compare_networks', 'hostmask', 'ipv4_mapped', 'iter_subnets', 'iterhosts', 'masked', 'max_prefixlen', 'netmask', 'network', 'numhosts', 'overlaps', 'prefixlen', 'sixtofour', 'subnet', 'supernet', 'teredo', 'with_hostmask', 'with_netmask', 'with_prefixlen'
def __repr__(self):
return """<IPv6Obj {0}/{1}>""".format(str(self.ip_object), self.prefixlen)
def __eq__(self, val):
try:
if self.network_object==val.network_object:
return True
return False
except (Exception) as e:
errmsg = "'{0}' cannot compare itself to '{1}': {2}".format(self.__repr__(), val, e)
raise ValueError(errmsg)
def __gt__(self, val):
try:
val_prefixlen = int(getattr(val, 'prefixlen'))
val_nobj = getattr(val, 'network_object')
self_nobj = self.network_object
if (self.network_object.prefixlen<val_prefixlen):
# Sort shorter masks as higher...
return True
elif (self.network_object.prefixlen>val_prefixlen):
return False
elif (self_nobj>val_nobj):
# If masks are equal, rely on Google's sorting...
return True
return False
except:
errmsg = "{0} cannot compare itself to '{1}'".format(self.__repr__(), val)
raise ValueError(errmsg)
def __lt__(self, val):
try:
val_prefixlen = int(getattr(val, 'prefixlen'))
val_nobj = getattr(val, 'network_object')
self_nobj = self.network_object
if (self.network_object.prefixlen>val_prefixlen):
# Sort shorter masks as lower...
return True
elif (self.network_object.prefixlen<val_prefixlen):
return False
elif (self_nobj<val_nobj):
# If masks are equal, rely on Google's sorting...
return True
return False
except:
errmsg = "{0} cannot compare itself to '{1}'".format(self.__repr__(), val)
raise ValueError(errmsg)
def __contains__(self, val):
# Used for "foo in bar"... python calls bar.__contains__(foo)
try:
if (self.network_object.prefixlen==0):
return True
elif self.network_object.prefixlen>val.network_object.prefixlen:
# obvious shortcut... if this object's mask is longer than
# val, this object cannot contain val
return False
else:
#return (val.network in self.network)
return (self.network<=val.network) and \
(self.broadcast>=val.broadcast)
except (Exception) as e:
raise ValueError("Could not check whether '{0}' is contained in '{1}': {2}".format(val, self, e))
def __hash__(self):
# Python3 needs __hash__()
return hash(str(self.ip_object))+hash(str(self.prefixlen))
def __iter__(self):
return self.network_object.__iter__()
def __next__(self):
## For Python3 iteration...
return self.network_object.__next__()
def next(self):
## For Python2 iteration...
return self.network_object.__next__()
@property
def ip(self):
"""Returns the address as an IPv6Address object."""
return self.ip_object
@property
def netmask(self):
"""Returns the network mask as an IPv6Address object."""
return self.network_object.netmask
@property
def prefixlen(self):
"""Returns the length of the network mask as an integer."""
return self.network_object.prefixlen
@property
def compressed(self):
"""Returns the IPv6 object in compressed form"""
return self.network_object.compressed
@property
def exploded(self):
"""Returns the IPv6 object in exploded form"""
return self.network_object.exploded
@property
def packed(self):
"""Returns the IPv6 object in packed form"""
return self.network_object.packed
@property
def broadcast(self):
raise NotImplementedError("IPv6 does not have broadcasts")
@property
def network(self):
"""Returns an IPv6Network object, which represents this network.
"""
if sys.version_info[0]<3:
return self.network_object.network
else:
## The ipaddress module returns an "IPAddress" object in Python3...
return IPv6Network('{0}'.format(self.network_object.compressed))
@property
def hostmask(self):
"""Returns the host mask as an IPv6Address object."""
return self.network_object.hostmask
@property
def version(self):
"""Returns the version of the object as an integer. i.e. 4"""
return 6
@property
def numhosts(self):
"""Returns the total number of IP addresses in this network, including broadcast and the "subnet zero" address"""
if sys.version_info[0]<3:
return self.network_object.numhosts
else:
return 2**(128-self.network_object.prefixlen)
@property
def as_decimal(self):
"""Returns the IP address as a decimal integer"""
num_strings = str(self.ip.exploded).split(':')
num_strings.reverse() # reverse the order
return sum([int(num, 16)*(256**idx) for idx, num in enumerate(num_strings)])
@property
def as_binary_tuple(self):
"""Returns the IPv6 address as a tuple of zero-padded 8-bit binary strings"""
nested_list = [
['{0:08b}'.format(int(ii, 16)) for ii in [num[0:2], num[2:4]]]
for num in str(self.ip.exploded).split(':')]
return tuple(itertools.chain(*nested_list))
@property
def as_hex_tuple(self):
"""Returns the IPv6 address as a tuple of zero-padded 8-bit hex strings"""
nested_list = [
['{0:02x}'.format(int(ii, 16)) for ii in [num[0:2], num[2:4]]]
for num in str(self.ip.exploded).split(':')]
return tuple(itertools.chain(*nested_list))
@property
def is_multicast(self):
"""Returns a boolean for whether this is a multicast address"""
return self.network_object.is_multicast
@property
def is_private(self):
"""Returns a boolean for whether this is a private address"""
return self.network_object.is_private
@property
def is_reserved(self):
"""Returns a boolean for whether this is a reserved address"""
return self.network_object.is_reserved
@property
def is_link_local(self):
"""Returns a boolean for whether this is an IPv6 link-local address"""
return self.network_object.is_link_local
@property
def is_site_local(self):
"""Returns a boolean for whether this is an IPv6 site-local address"""
return self.network_object.is_site_local
@property
def is_unspecified(self):
"""Returns a boolean for whether this address is not otherwise
classified"""
return self.network_object.is_unspecified
@property
def teredo(self):
return self.network_object.teredo
@property
def sixtofour(self):
return self.network_object.sixtofour
class L4Object(object):
"""Object for Transport-layer protocols; the object ensures that logical operators (such as le, gt, eq, and ne) are parsed correctly, as well as mapping service names to port numbers"""
def __init__(self, protocol='', port_spec='', syntax=''):
self.protocol = protocol
self.port_list = list()
self.syntax = syntax
try:
port_spec = port_spec.strip()
except:
port_spec = port_spec
if syntax=='asa':
if protocol=='tcp':
ports = ASA_TCP_PORTS
elif protocol=='udp':
ports = ASA_UDP_PORTS
else:
raise NotImplementedError("'{0}' is not supported: '{0}'".format(protocol))
else:
raise NotImplementedError("This syntax is unknown: '{0}'".format(syntax))
if 'eq ' in port_spec:
port_str = re.split('\s+', port_spec)[-1]
self.port_list = [int(ports.get(port_str, port_str))]
elif re.search(r'^\S+$', port_spec):
# Technically, 'eq ' is optional...
self.port_list = [int(ports.get(port_spec, port_spec))]
elif 'range ' in port_spec:
port_tmp = re.split('\s+', port_spec)[1:]
self.port_list = range(int(ports.get(port_tmp[0], port_tmp[0])),
int(ports.get(port_tmp[1], port_tmp[1])) + 1)
elif 'lt ' in port_spec:
port_str = re.split('\s+', port_spec)[-1]
self.port_list = range(1, int(ports.get(port_str, port_str)))
elif 'gt ' in port_spec:
port_str = re.split('\s+', port_spec)[-1]
self.port_list = range(int(ports.get(port_str, port_str)) + 1, 65535)
elif 'neq ' in port_spec:
port_str = re.split('\s+', port_spec)[-1]
tmp = set(range(1, 65535))
tmp.remove(int(port_str))
self.port_list = sorted(tmp)
def __eq__(self, val):
if (self.protocol==val.protocol) and (self.port_list==val.port_list):
return True
return False
def __repr__(self):
return "<L4Object {0} {1}>".format(self.protocol, self.port_list)
def dns_lookup(input, timeout=3, server=''):
"""Perform a simple DNS lookup, return results in a dictionary"""
resolver = Resolver()
resolver.timeout = float(timeout)
resolver.lifetime = float(timeout)
if server:
resolver.nameservers = [server]
try:
records = resolver.query(input, 'A')
return {'addrs': [ii.address for ii in records],
'error': '',
'name': input,
}
except DNSException as e:
return {'addrs': [],
'error': repr(e),
'name': input,
}
def dns6_lookup(input, timeout=3, server=''):
"""Perform a simple DNS lookup, return results in a dictionary"""
resolver = Resolver()
resolver.timeout = float(timeout)
resolver.lifetime = float(timeout)
if server:
resolver.nameservers = [server]
try:
records = resolver.query(input, 'AAAA')
return {'addrs': [ii.address for ii in records],
'error': '',
'name': input,
}
except DNSException as e:
return {'addrs': [],
'error': repr(e),
'name': input,
}
_REVERSE_DNS_REGEX = re.compile(r'^\s*\d+\.\d+\.\d+\.\d+\s*$')
def reverse_dns_lookup(input, timeout=3, server=''):
"""Perform a simple reverse DNS lookup, return results in a dictionary"""
assert _REVERSE_DNS_REGEX.search(input), "Invalid address format: '{0}'".format(input)
resolver = Resolver()
resolver.timeout = float(timeout)
resolver.lifetime = float(timeout)
if server:
resolver.nameservers = [server]
try:
tmp = input.strip().split('.')
tmp.reverse()
inaddr = '.'.join(tmp) + ".in-addr.arpa"
records = resolver.query(inaddr, 'PTR')
return {'name': records[0].to_text(),
'lookup': inaddr,
'error': '',
'addr': input,
}
except DNSException as e:
return {'addrs': [],
'lookup': inaddr,
'error': repr(e),
'name': input,
}
| gpl-3.0 |
agentfog/qiime | qiime/filter.py | 15 | 26099 | #!/usr/bin/env python
# File created on 18 May 2010
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Greg Caporaso", "Will Van Treuren", "Daniel McDonald",
"Jai Ram Rideout", "Yoshiki Vazquez Baeza"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
from collections import defaultdict
from random import shuffle, sample
from numpy import array, inf
from skbio.parse.sequences import parse_fasta, parse_fastq
from skbio.format.sequences import format_fastq_record
from biom import load_table
from qiime.parse import (parse_distmat, parse_mapping_file,
parse_metadata_state_descriptions)
from qiime.format import format_distance_matrix, format_mapping_file
from qiime.util import MetadataMap
def get_otu_ids_from_taxonomy_f(positive_taxa=None,
negative_taxa=None,
metadata_field="taxonomy"):
""" return function to pass to Table.filter_observations for taxon-based filtering
positive_taxa : a list of strings that will be compared to each
taxonomy level in an observation's (i.e., OTU's) metadata_field. If
one of the levels matches exactly (except for case) to an item in
positive_taxa, that OTU will be marked for retention. Default: All
OTUs are retained.
negative_taxa : a list of strings that will be compared to each
taxonomy level in an observation's (i.e., OTU's) metadata_field. If
one of the levels matches exactly (except for case) to an item in
negative_taxa, that OTU will be marked for removal. Default: All
OTUs are retained.
metadata_field : the metadata field to look up in the
observation metadata
Note: string matches are case insensitive.
"""
# define a positive screening function - if the user doesn't pass
# positive_taxa, all OTUs will pass this filter
# (i.e., be marked for retention)
if positive_taxa is None:
positive_taxa = set()
def positive_screen(e):
return True
else:
positive_taxa = set([t.strip().lower() for t in positive_taxa])
def positive_screen(e):
return e in positive_taxa
# define a negative screening function - if the user doesn't pass
# negative_taxa, all OTUs will pass this filter
# (i.e., be marked for retention)
if negative_taxa is None:
negative_taxa = set()
def negative_screen(e):
return False
else:
negative_taxa = set([t.strip().lower() for t in negative_taxa])
def negative_screen(e):
return e in negative_taxa
# The positive_taxa and negative_taxa lists must be mutually exclusive.
if len(positive_taxa & negative_taxa) != 0:
raise ValueError("Your positive and negative taxa lists contain "
"overlapping values. These lists must be mutually "
"exclusive.\nOffending values are: %s" %
' '.join(positive_taxa & negative_taxa))
# Define the function that can be passed to Table.filter_observations
def result(v, oid, md):
positive_hit = False
negative_hit = False
for e in md[metadata_field]:
if positive_screen(e.strip().lower()):
# Note that we don't want to just do
# positive_hit = positive_screen(e.strip())
# we're checking whether any e hits the positive taxa
# and doing that be the same as
# positive_hit = md[metadata_field][-1]
positive_hit = True
if negative_screen(e.strip().lower()):
# see note in previous if statement for why we don't use
# negative_hit = negative_screen(e.strip())
negative_hit = True
return positive_hit and not negative_hit
return result
def sample_ids_from_metadata_description(mapping_f, valid_states_str):
""" Given a description of metadata, return the corresponding sample ids
"""
map_data, map_header, map_comments = parse_mapping_file(mapping_f)
valid_states = parse_metadata_state_descriptions(valid_states_str)
sample_ids = get_sample_ids(map_data, map_header, valid_states)
if len(sample_ids) < 1:
raise ValueError("All samples have been filtered out for the criteria"
" described in the valid states")
return sample_ids
def get_sample_ids(map_data, map_header, states):
"""Takes col states in {col:[vals]} format.
If val starts with !, exclude rather than include.
Combines cols with and, states with or.
For example, Study:Dog,Hand will return rows where Study is Dog or Hand;
Study:Dog,Hand;BodySite:Palm,Stool will return rows where Study is Dog
or Hand _and_ BodySite is Palm or Stool; Study:*,!Dog;BodySite:*,!Stool
will return all rows except the ones where the Study is Dog or the BodySite
is Stool.
"""
name_to_col = dict([(s, map_header.index(s)) for s in states])
good_ids = []
for row in map_data: # remember to exclude header
include = True
for s, vals in states.items():
curr_state = row[name_to_col[s]]
include = include and (curr_state in vals or '*' in vals) \
and not '!' + curr_state in vals
if include:
good_ids.append(row[0])
return good_ids
def sample_ids_from_category_state_coverage(mapping_f,
coverage_category,
subject_category,
min_num_states=None,
required_states=None,
considered_states=None,
splitter_category=None):
"""Filter sample IDs based on subject's coverage of a category.
Given a category that groups samples by subject (subject_category), samples
are filtered by how well a subject covers (i.e. has at least one sample
for) the category states in coverage_category.
Two filtering criteria are provided (min_num_states and required_states).
At least one must be provided. If both are provided, the subject must meet
both criteria to pass the filter (i.e. providing both filters is an AND,
not an OR, operation).
A common use case is to provide a 'time' category for coverage_category and
an 'individual' category for subject_category in order to filter out
individuals from a study that do not have samples for some minimum number
of timepoints (min_num_states) and that do not have samples for certain
timepoints (required_states). For example, this could be the first and last
timepoints in the study.
Returns a set of sample IDs to keep, the number of subjects that were
kept, and a set of the unique category states in coverage_category that
were kept. The set of sample IDs is not guaranteed to be in any specific
order relative to the order of sample IDs or subjects in the mapping file.
Arguments:
mapping_f - metadata mapping file (file-like object)
coverage_category - category to test subjects' coverage (string)
subject_category - category to group samples by subject (string)
min_num_states - minimum number of category states in coverage_category
that a subject must cover (i.e. have at least one sample for) to be
included in results (integer)
required_states - category states in coverage_category that must be
covered by a subject's samples in order to be included in results
(list of strings or items that can be converted to strings)
considered_states - category states that are counted toward the
min_num_states (list of strings or items that can be converted to
strings)
splitter_category - category to split input mapping file on prior to
processing. If not supplied, the mapping file will not be split. If
supplied, a dictionary mapping splitter_category state to results
will be returned instead of the three-element tuple. The supplied
filtering criteria will apply to each split piece of the mapping
file independently (e.g. if an individual passes the filters for
the tongue samples, his/her tongue samples will be included for
the tongue results, even if he/she doesn't pass the filters for the
palm samples)
"""
metadata_map = MetadataMap.parseMetadataMap(mapping_f)
# Make sure our input looks sane.
categories_to_test = [coverage_category, subject_category]
if splitter_category is not None:
categories_to_test.append(splitter_category)
if 'SampleID' in categories_to_test:
raise ValueError("The 'SampleID' category is not suitable for use in "
"this function. Please choose a different category "
"from the metadata mapping file.")
for category in categories_to_test:
if category not in metadata_map.CategoryNames:
raise ValueError("The category '%s' is not in the metadata "
"mapping file." % category)
if len(set(categories_to_test)) < len(categories_to_test):
raise ValueError("The coverage, subject, and (optional) splitter "
"categories must all be unique.")
if required_states is not None:
# required_states must be in coverage_category's states in the mapping
# file.
required_states = set(map(str, required_states))
valid_coverage_states = set(metadata_map.getCategoryValues(
metadata_map.sample_ids, coverage_category))
invalid_coverage_states = required_states - valid_coverage_states
if invalid_coverage_states:
raise ValueError("The category state(s) '%s' are not in the '%s' "
"category in the metadata mapping file." %
(', '.join(invalid_coverage_states),
coverage_category))
if considered_states is not None:
# considered_states is not as restrictive as required_states - we don't
# require that these are present, so it's OK if some of the states
# listed here don't actually show up in the mapping file (allowing
# the user to pass something like range(100) to consider only states
# that fall in some range)
considered_states = set(map(str, considered_states))
# define a function to determine if a state should be considered
consider_state = lambda s: s in considered_states
else:
# define a dummy function to consider all states (the default
# if the user does not provide a list of considered_states)
consider_state = lambda s: True
if min_num_states is None and required_states is None:
raise ValueError("You must specify either the minimum number of "
"category states the subject must have samples for "
"(min_num_states), or the minimal category states "
"the subject must have samples for "
"(required_states), or both. Supplying neither "
"filtering criteria is not supported.")
if splitter_category is None:
results = _filter_sample_ids_from_category_state_coverage(
metadata_map, metadata_map.sample_ids, coverage_category,
subject_category, consider_state, min_num_states,
required_states)
else:
# "Split" the metadata mapping file by extracting only sample IDs that
# match the current splitter category state and using those for the
# actual filtering.
splitter_category_states = defaultdict(list)
for samp_id in metadata_map.sample_ids:
splitter_category_state = \
metadata_map.getCategoryValue(samp_id, splitter_category)
splitter_category_states[splitter_category_state].append(samp_id)
results = {}
for splitter_category_state, sample_ids in \
splitter_category_states.items():
results[splitter_category_state] = \
_filter_sample_ids_from_category_state_coverage(
metadata_map, sample_ids, coverage_category,
subject_category, consider_state, min_num_states,
required_states)
return results
def _filter_sample_ids_from_category_state_coverage(metadata_map,
sample_ids,
coverage_category,
subject_category,
consider_state_fn,
min_num_states=None,
required_states=None):
"""Helper function to perform filtering based on category state coverage.
Not explicitly unit-tested because it is implicitly tested by
sample_ids_from_category_state_coverage's unit tests.
"""
# Build mapping from subject to sample IDs.
subjects = defaultdict(list)
for samp_id in sample_ids:
subject = metadata_map.getCategoryValue(samp_id, subject_category)
subjects[subject].append(samp_id)
# Perform filtering.
samp_ids_to_keep = []
num_subjects_kept = 0
states_kept = []
for subject, samp_ids in subjects.items():
subject_covered_states = set(
metadata_map.getCategoryValues(samp_ids, coverage_category))
# Short-circuit evaluation of ANDing filters.
keep_subject = True
if min_num_states is not None:
# note: when summing a list of boolean values, True == 1 and
# False == 0
if sum([consider_state_fn(s) for s in subject_covered_states]) < \
min_num_states:
keep_subject = False
if keep_subject and required_states is not None:
if len(subject_covered_states & required_states) != \
len(required_states):
keep_subject = False
if keep_subject:
samp_ids_to_keep.extend(samp_ids)
states_kept.extend(subject_covered_states)
num_subjects_kept += 1
return set(samp_ids_to_keep), num_subjects_kept, set(states_kept)
def filter_fasta(input_seqs_f, output_seqs_f, seqs_to_keep, negate=False,
seqid_f=None):
""" Write filtered input_seqs to output_seqs_f which contains only seqs_to_keep
input_seqs can be the output of parse_fasta or parse_fastq
"""
if seqid_f is None:
seqs_to_keep_lookup = {}.fromkeys([seq_id.split()[0]
for seq_id in seqs_to_keep])
# Define a function based on the value of negate
if not negate:
def keep_seq(seq_id):
return seq_id.split()[0] in seqs_to_keep_lookup
else:
def keep_seq(seq_id):
return seq_id.split()[0] not in seqs_to_keep_lookup
else:
if not negate:
keep_seq = seqid_f
else:
keep_seq = lambda x: not seqid_f(x)
for seq_id, seq in parse_fasta(input_seqs_f):
if keep_seq(seq_id):
output_seqs_f.write('>%s\n%s\n' % (seq_id, seq))
output_seqs_f.close()
def filter_fastq(input_seqs_f, output_seqs_f, seqs_to_keep, negate=False,
seqid_f=None):
""" Write filtered input_seqs to output_seqs_f which contains only seqs_to_keep
input_seqs can be the output of parse_fasta or parse_fastq
"""
if seqid_f is None:
seqs_to_keep_lookup = {}.fromkeys([seq_id.split()[0]
for seq_id in seqs_to_keep])
# Define a function based on the value of negate
if not negate:
def keep_seq(seq_id):
return seq_id.split()[0] in seqs_to_keep_lookup
else:
def keep_seq(seq_id):
return seq_id.split()[0] not in seqs_to_keep_lookup
else:
if not negate:
keep_seq = seqid_f
else:
keep_seq = lambda x: not seqid_f(x)
for seq_id, seq, qual in parse_fastq(input_seqs_f,
enforce_qual_range=False):
if keep_seq(seq_id):
output_seqs_f.write(format_fastq_record(seq_id, seq, qual))
output_seqs_f.close()
def filter_mapping_file(map_data, map_header, good_sample_ids,
include_repeat_cols=False, column_rename_ids=None):
"""Filters map according to several criteria.
- keep only sample ids in good_sample_ids
- drop cols that are different in every sample (except id)
- drop cols that are the same in every sample
"""
# keeping samples
to_keep = []
to_keep.extend([i for i in map_data if i[0] in good_sample_ids])
# keeping columns
headers = []
to_keep = zip(*to_keep)
headers.append(map_header[0])
result = [to_keep[0]]
if column_rename_ids:
# reduce in 1 as we are not using the first colum (SampleID)
column_rename_ids = column_rename_ids - 1
for i, l in enumerate(to_keep[1:-1]):
if i == column_rename_ids:
if len(set(l)) != len(result[0]):
raise ValueError(
"The column to rename the samples is not unique.")
result.append(result[0])
result[0] = l
headers.append('SampleID_was_' + map_header[i + 1])
elif include_repeat_cols or len(set(l)) > 1:
headers.append(map_header[i + 1])
result.append(l)
else:
for i, l in enumerate(to_keep[1:-1]):
if include_repeat_cols or len(set(l)) > 1:
headers.append(map_header[i + 1])
result.append(l)
headers.append(map_header[-1])
result.append(to_keep[-1])
result = map(list, zip(*result))
return headers, result
def filter_mapping_file_from_mapping_f(
mapping_f, sample_ids_to_keep, negate=False):
""" Filter rows from a metadata mapping file """
mapping_data, header, comments = parse_mapping_file(mapping_f)
filtered_mapping_data = []
sample_ids_to_keep = {}.fromkeys(sample_ids_to_keep)
for mapping_datum in mapping_data:
hit = mapping_datum[0] in sample_ids_to_keep
if hit and not negate:
filtered_mapping_data.append(mapping_datum)
elif not hit and negate:
filtered_mapping_data.append(mapping_datum)
else:
pass
return format_mapping_file(header, filtered_mapping_data)
def filter_mapping_file_by_metadata_states(mapping_f, valid_states_str):
sample_ids_to_keep = sample_ids_from_metadata_description(
mapping_f,
valid_states_str)
mapping_f.seek(0)
return filter_mapping_file_from_mapping_f(mapping_f, sample_ids_to_keep)
def filter_samples_from_distance_matrix(dm, samples_to_discard, negate=False):
""" Remove specified samples from distance matrix
dm: (sample_ids, dm_data) tuple, as returned from
qiime.parse.parse_distmat; or a file handle that can be passed
to qiime.parse.parse_distmat
"""
try:
sample_ids, dm_data = dm
except ValueError:
# input was provide as a file handle
sample_ids, dm_data = parse_distmat(dm)
sample_lookup = {}.fromkeys([e.split()[0] for e in samples_to_discard])
temp_dm_data = []
new_dm_data = []
new_sample_ids = []
if negate:
def keep_sample(s):
return s in sample_lookup
else:
def keep_sample(s):
return s not in sample_lookup
for row, sample_id in zip(dm_data, sample_ids):
if keep_sample(sample_id):
temp_dm_data.append(row)
new_sample_ids.append(sample_id)
temp_dm_data = array(temp_dm_data).transpose()
for col, sample_id in zip(temp_dm_data, sample_ids):
if keep_sample(sample_id):
new_dm_data.append(col)
new_dm_data = array(new_dm_data).transpose()
return format_distance_matrix(new_sample_ids, new_dm_data)
def negate_tips_to_keep(tips_to_keep, tree):
""" Return the list of tips in the tree that are not in tips_to_keep"""
tips_to_keep = set(tips_to_keep)
# trees can return node names in ways that have multiple quotes, e.g.
# '"node_1"' or ''node_1''. remove them or it can cause problems with
# tips_to_keep not matching
tmp_tips = set([tip.Name for tip in tree.tips()])
tips = set([t.strip('\'').strip('\"') for t in tmp_tips])
return tips - tips_to_keep
def get_seqs_to_keep_lookup_from_biom(biom_f):
otu_table = load_table(biom_f)
return set(otu_table.ids(axis='observation'))
def get_seqs_to_keep_lookup_from_seq_id_file(id_to_keep_f):
"""generate a lookup dict of chimeras in chimera file."""
return (
set([l.split()[0].strip()
for l in id_to_keep_f if l.strip() and not l.startswith('#')])
)
get_seq_ids_from_seq_id_file = get_seqs_to_keep_lookup_from_seq_id_file
def get_seqs_to_keep_lookup_from_fasta_file(fasta_f):
"""return the sequence ids within the fasta file"""
return (
set([seq_id.split()[0] for seq_id, seq in parse_fasta(fasta_f)])
)
get_seq_ids_from_fasta_file = get_seqs_to_keep_lookup_from_fasta_file
# start functions used by filter_samples_from_otu_table.py and
# filter_otus_from_otu_table.py
def get_filter_function(ids_to_keep, min_count, max_count,
min_nonzero, max_nonzero, negate_ids_to_keep=False):
if negate_ids_to_keep:
def f(data_vector, id_, metadata):
return (id_ not in ids_to_keep) and \
(min_count <= data_vector.sum() <= max_count) and \
(min_nonzero <= (data_vector > 0).sum() <= max_nonzero)
else:
def f(data_vector, id_, metadata):
return (id_ in ids_to_keep) and \
(min_count <= data_vector.sum() <= max_count) and \
(min_nonzero <= (data_vector > 0).sum() <= max_nonzero)
return f
def filter_samples_from_otu_table(otu_table, ids_to_keep, min_count, max_count,
negate_ids_to_keep=False):
filter_f = get_filter_function({}.fromkeys(ids_to_keep),
min_count,
max_count,
0, inf, negate_ids_to_keep)
return otu_table.filter(filter_f, axis='sample', inplace=False)
def filter_otus_from_otu_table(otu_table, ids_to_keep, min_count, max_count,
min_samples, max_samples,
negate_ids_to_keep=False):
filter_f = get_filter_function({}.fromkeys(ids_to_keep),
min_count,
max_count,
min_samples, max_samples,
negate_ids_to_keep)
return otu_table.filter(filter_f, axis='observation', inplace=False)
# end functions used by filter_samples_from_otu_table.py and
# filter_otus_from_otu_table.py
def filter_otu_table_to_n_samples(otu_table, n):
""" Filter OTU table to n random samples.
If n is greater than the number of samples or less than zero a
ValueError will be raised.
"""
if not (0 < n <= len(otu_table.ids())):
raise ValueError("Number of samples to filter must be between 0 and "
"the number of samples.")
return otu_table.subsample(n, axis='sample', by_id=True)
def filter_otus_from_otu_map(input_otu_map_fp,
output_otu_map_fp,
min_count,
min_sample_count=1):
""" Filter otus with fewer than min_count sequences from input_otu_map_fp
With very large data sets the number of singletons can be very large,
and it becomes more efficent to filter them at the otu map stage than
the otu table stage.
There are two outputs from this function: the output file (which is the
filtered otu map) and the list of retained otu ids as a set. Since I
need to return the retained ids for pick_open_reference_otus, this
takes filepaths instead of file handles (since it can't be a generator
and return something).
"""
results = set()
output_otu_map_f = open(output_otu_map_fp, 'w')
for line in open(input_otu_map_fp, 'U'):
fields = line.strip().split('\t')
sample_ids = set([e.split('_')[0] for e in fields[1:]])
# only write this line if the otu has more than n sequences (so
# greater than n tab-separated fields including the otu identifier)
if (len(fields) > min_count) and (len(sample_ids) >= min_sample_count):
output_otu_map_f.write(line)
results.add(fields[0].split('\t')[0])
output_otu_map_f.close()
return results
def filter_tree(tree, tips_to_keep):
result = tree.copy()
# don't use this, it doesn't eliminate tips!
# result = tree.getSubTree(tips_to_keep,ignore_missing=True)
def f(node):
if node.istip() and\
node.Name is not None and\
node.Name not in tips_to_keep and\
node.Name.strip().strip('"').strip("'") not in tips_to_keep:
return True
return False
result.removeDeleted(f)
result.prune()
return result
| gpl-2.0 |
viaregio/cartridge | cartridge/shop/tests.py | 2 | 20578 |
from datetime import timedelta
from decimal import Decimal
from operator import mul
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from django.utils.timezone import now
from django.utils.unittest import skipUnless
from mezzanine.conf import settings
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
from mezzanine.utils.tests import run_pyflakes_for_package
from mezzanine.utils.tests import run_pep8_for_package
from cartridge.shop.models import Product, ProductOption, ProductVariation
from cartridge.shop.models import Category, Cart, Order, DiscountCode
from cartridge.shop.models import Sale
from cartridge.shop.forms import OrderForm
from cartridge.shop.checkout import CHECKOUT_STEPS
TEST_STOCK = 5
TEST_PRICE = Decimal("20")
class ShopTests(TestCase):
def setUp(self):
"""
Set up test data - category, product and options.
"""
self._published = {"status": CONTENT_STATUS_PUBLISHED}
self._category = Category.objects.create(**self._published)
self._product = Product.objects.create(**self._published)
for option_type in settings.SHOP_OPTION_TYPE_CHOICES:
for i in range(10):
name = "test%s" % i
ProductOption.objects.create(type=option_type[0], name=name)
self._options = ProductOption.objects.as_fields()
def test_views(self):
"""
Test the main shop views for errors.
"""
# Category.
response = self.client.get(self._category.get_absolute_url())
self.assertEqual(response.status_code, 200)
# Product.
response = self.client.get(self._product.get_absolute_url())
self.assertEqual(response.status_code, 200)
# Cart.
response = self.client.get(reverse("shop_cart"))
self.assertEqual(response.status_code, 200)
# Checkout.
response = self.client.get(reverse("shop_checkout"))
self.assertEqual(response.status_code, 200 if not
settings.SHOP_CHECKOUT_ACCOUNT_REQUIRED else 302)
def test_variations(self):
"""
Test creation of variations from options, and management of empty
variations.
"""
total = reduce(mul, [len(v) for v in self._options.values()])
# Clear variations.
self._product.variations.all().delete()
self.assertEqual(self._product.variations.count(), 0)
# Create single empty variation.
self._product.variations.manage_empty()
self.assertEqual(self._product.variations.count(), 1)
# Create variations from all options.
self._product.variations.create_from_options(self._options)
# Should do nothing.
self._product.variations.create_from_options(self._options)
# All options plus empty.
self.assertEqual(self._product.variations.count(), total + 1)
# Remove empty.
self._product.variations.manage_empty()
self.assertEqual(self._product.variations.count(), total)
def test_stock(self):
"""
Test stock checking on product variations.
"""
self._product.variations.all().delete()
self._product.variations.manage_empty()
variation = self._product.variations.all()[0]
variation.num_in_stock = TEST_STOCK
# Check stock field not in use.
self.assertTrue(variation.has_stock())
# Check available and unavailable quantities.
self.assertTrue(variation.has_stock(TEST_STOCK))
self.assertFalse(variation.has_stock(TEST_STOCK + 1))
# Check sold out.
variation = self._product.variations.all()[0]
variation.num_in_stock = 0
self.assertFalse(variation.has_stock())
def assertCategoryFilteredProducts(self, num_products):
"""
Tests the number of products returned by the category's
current filters.
"""
products = Product.objects.filter(self._category.filters())
self.assertEqual(products.distinct().count(), num_products)
def test_category_filters(self):
"""
Test the category filters returns expected results.
"""
self._product.variations.all().delete()
self.assertCategoryFilteredProducts(0)
# Test option filters - add a variation with one option, and
# assign another option as a category filter. Check that no
# products match the filters, then add the first option as a
# category filter and check that the product is matched.
option_field, options = self._options.items()[0]
option1, option2 = options[:2]
# Variation with the first option.
self._product.variations.create_from_options({option_field: [option1]})
# Filter with the second option
option = ProductOption.objects.get(type=option_field[-1], name=option2)
self.assertCategoryFilteredProducts(0)
# First option as a filter.
option = ProductOption.objects.get(type=option_field[-1], name=option1)
self._category.options.add(option)
self.assertCategoryFilteredProducts(1)
# Test price filters - add a price filter that when combined
# with previously created filters, should match no products.
# Update the variations to match the filter for a unit price,
# then with sale prices, checking correct matches based on sale
# dates.
self._category.combined = True
self._category.price_min = TEST_PRICE
self.assertCategoryFilteredProducts(0)
self._product.variations.all().update(unit_price=TEST_PRICE)
self.assertCategoryFilteredProducts(1)
n, d = now(), timedelta(days=1)
tomorrow, yesterday = n + d, n - d
self._product.variations.all().update(unit_price=0,
sale_price=TEST_PRICE,
sale_from=tomorrow)
self.assertCategoryFilteredProducts(0)
self._product.variations.all().update(sale_from=yesterday)
self.assertCategoryFilteredProducts(1)
# Clean up previously added filters and check that explicitly
# assigned products match.
for option in self._category.options.all():
self._category.options.remove(option)
self._category.price_min = None
self.assertCategoryFilteredProducts(0)
self._category.products.add(self._product)
self.assertCategoryFilteredProducts(1)
# Test the ``combined`` field - create a variation which
# matches a price filter, and a separate variation which
# matches an option filter, and check that the filters
# have no results when ``combined`` is set, and that the
# product matches when ``combined`` is disabled.
self._product.variations.all().delete()
self._product.variations.create_from_options({option_field:
[option1, option2]})
# Price variation and filter.
variation = self._product.variations.get(**{option_field: option1})
variation.unit_price = TEST_PRICE
variation.save()
self._category.price_min = TEST_PRICE
# Option variation and filter.
option = ProductOption.objects.get(type=option_field[-1], name=option2)
self._category.options.add(option)
# Check ``combined``.
self._category.combined = True
self.assertCategoryFilteredProducts(0)
self._category.combined = False
self.assertCategoryFilteredProducts(1)
def _add_to_cart(self, variation, quantity):
"""
Given a variation, creates the dict for posting to the cart
form to add the variation, and posts it.
"""
field_names = [f.name for f in ProductVariation.option_fields()]
data = dict(zip(field_names, variation.options()))
data["quantity"] = quantity
self.client.post(variation.product.get_absolute_url(), data)
def _empty_cart(self, cart):
"""
Given a cart, creates the dict for posting to the cart form
to remove all items from the cart, and posts it.
"""
data = {"items-INITIAL_FORMS": 0, "items-TOTAL_FORMS": 0,
"update_cart": 1}
for i, item in enumerate(cart):
data["items-INITIAL_FORMS"] += 1
data["items-TOTAL_FORMS"] += 1
data["items-%s-id" % i] = item.id
data["items-%s-DELETE" % i] = "on"
self.client.post(reverse("shop_cart"), data)
def _reset_variations(self):
"""
Recreates variations and sets up the first.
"""
self._product.variations.all().delete()
self._product.variations.create_from_options(self._options)
variation = self._product.variations.all()[0]
variation.unit_price = TEST_PRICE
variation.num_in_stock = TEST_STOCK * 2
variation.save()
def test_cart(self):
"""
Test the cart object and cart add/remove forms.
"""
# Test initial cart.
cart = Cart.objects.from_request(self.client)
self.assertFalse(cart.has_items())
self.assertEqual(cart.total_quantity(), 0)
self.assertEqual(cart.total_price(), Decimal("0"))
# Add quantity and check stock levels / cart totals.
self._reset_variations()
variation = self._product.variations.all()[0]
self._add_to_cart(variation, TEST_STOCK)
cart = Cart.objects.from_request(self.client)
variation = self._product.variations.all()[0]
self.assertTrue(variation.has_stock(TEST_STOCK))
self.assertFalse(variation.has_stock(TEST_STOCK * 2))
self.assertTrue(cart.has_items())
self.assertEqual(cart.total_quantity(), TEST_STOCK)
self.assertEqual(cart.total_price(), TEST_PRICE * TEST_STOCK)
# Add remaining quantity and check again.
self._add_to_cart(variation, TEST_STOCK)
cart = Cart.objects.from_request(self.client)
variation = self._product.variations.all()[0]
self.assertFalse(variation.has_stock())
self.assertTrue(cart.has_items())
self.assertEqual(cart.total_quantity(), TEST_STOCK * 2)
self.assertEqual(cart.total_price(), TEST_PRICE * TEST_STOCK * 2)
# Remove from cart.
self._empty_cart(cart)
cart = Cart.objects.from_request(self.client)
variation = self._product.variations.all()[0]
self.assertTrue(variation.has_stock(TEST_STOCK * 2))
self.assertFalse(cart.has_items())
self.assertEqual(cart.total_quantity(), 0)
self.assertEqual(cart.total_price(), Decimal("0"))
def test_discount_codes(self):
"""
Test that all types of discount codes are applied.
"""
self._reset_variations()
variation = self._product.variations.all()[0]
invalid_product = Product.objects.create(**self._published)
invalid_product.variations.create_from_options(self._options)
invalid_variation = invalid_product.variations.all()[0]
invalid_variation.unit_price = TEST_PRICE
invalid_variation.num_in_stock = TEST_STOCK * 2
invalid_variation.save()
discount_value = TEST_PRICE / 2
# Set up discounts with and without a specific product, for
# each type of discount.
for discount_target in ("cart", "item"):
for discount_type in ("percent", "deduct"):
code = "%s_%s" % (discount_target, discount_type)
kwargs = {
"code": code,
"discount_%s" % discount_type: discount_value,
"active": True,
}
cart = Cart.objects.from_request(self.client)
self._empty_cart(cart)
self._add_to_cart(variation, 1)
self._add_to_cart(invalid_variation, 1)
discount = DiscountCode.objects.create(**kwargs)
if discount_target == "item":
discount.products.add(variation.product)
post_data = {"discount_code": code}
self.client.post(reverse("shop_cart"), post_data)
discount_total = self.client.session["discount_total"]
if discount_type == "percent":
expected = TEST_PRICE / Decimal("100") * discount_value
if discount_target == "cart":
# Excpected amount applies to entire cart.
cart = Cart.objects.from_request(self.client)
expected *= cart.items.count()
elif discount_type == "deduct":
expected = discount_value
self.assertEqual(discount_total, expected)
if discount_target == "item":
# Test discount isn't applied for an invalid product.
cart = Cart.objects.from_request(self.client)
self._empty_cart(cart)
self._add_to_cart(invalid_variation, 1)
self.client.post(reverse("shop_cart"), post_data)
discount_total = self.client.session.get("discount_total")
self.assertEqual(discount_total, None)
def test_order(self):
"""
Test that a completed order contains cart items and that
they're removed from stock.
"""
# Add to cart.
self._reset_variations()
variation = self._product.variations.all()[0]
self._add_to_cart(variation, TEST_STOCK)
cart = Cart.objects.from_request(self.client)
# Post order.
data = {
"step": len(CHECKOUT_STEPS),
"billing_detail_email": "[email protected]",
"discount_code": "",
}
for field_name, field in OrderForm(None, None).fields.items():
value = field.choices[-1][1] if hasattr(field, "choices") else "1"
data.setdefault(field_name, value)
self.client.post(reverse("shop_checkout"), data)
try:
order = Order.objects.from_request(self.client)
except Order.DoesNotExist:
self.fail("Couldn't create an order")
items = order.items.all()
variation = self._product.variations.all()[0]
self.assertEqual(cart.total_quantity(), 0)
self.assertEqual(len(items), 1)
self.assertEqual(items[0].sku, variation.sku)
self.assertEqual(items[0].quantity, TEST_STOCK)
self.assertEqual(variation.num_in_stock, TEST_STOCK)
self.assertEqual(order.item_total, TEST_PRICE * TEST_STOCK)
def test_syntax(self):
"""
Run pyflakes/pep8 across the code base to check for potential errors.
"""
extra_ignore = (
"redefinition of unused 'digest'",
"redefinition of unused 'OperationalError'",
"'from mezzanine.project_template.settings import *' used",
)
warnings = []
warnings.extend(run_pyflakes_for_package("cartridge",
extra_ignore=extra_ignore))
warnings.extend(run_pep8_for_package("cartridge"))
if warnings:
self.fail("Syntax warnings!\n\n%s" % "\n".join(warnings))
class SaleTests(TestCase):
def setUp(self):
product1 = Product(unit_price="1.27")
product1.save()
ProductVariation(unit_price="1.27", product_id=product1.id).save()
ProductVariation(unit_price="1.27", product_id=product1.id).save()
product2 = Product(unit_price="1.27")
product2.save()
ProductVariation(unit_price="1.27", product_id=product2.id).save()
ProductVariation(unit_price="1.27", product_id=product2.id).save()
sale = Sale(
title="30% OFF - Ken Bruce has gone mad!",
discount_percent="30"
)
sale.save()
sale.products.add(product1)
sale.products.add(product2)
sale.save()
def test_sale_save(self):
"""
Regression test for GitHub issue #24. Incorrect exception handle meant
that in some cases (usually percentage discount) sale_prices were not
being applied to all products and their varitations.
Note: This issues was only relevant using MySQL and with exceptions
turned on (which is the default when DEBUG=True).
"""
# Initially no sale prices will be set.
for product in Product.objects.all():
self.assertFalse(product.sale_price)
for variation in ProductVariation.objects.all():
self.assertFalse(variation.sale_price)
# Activate the sale and verify the prices.
sale = Sale.objects.all()[0]
sale.active = True
sale.save()
# Afterward ensure that all the sale prices have been updated.
for product in Product.objects.all():
self.assertTrue(product.sale_price)
for variation in ProductVariation.objects.all():
self.assertTrue(variation.sale_price)
try:
__import__("stripe")
import mock
except ImportError:
stripe_used = False
else:
stripe_handler = "cartridge.shop.payment.stripe_api.process"
stripe_used = settings.SHOP_HANDLER_PAYMENT == stripe_handler
if stripe_used:
settings.STRIPE_API_KEY = "dummy"
from cartridge.shop.payment import stripe_api
class StripeTests(TestCase):
"""Test the Stripe payment backend"""
def setUp(self):
# Every test needs access to the request factory.
self.factory = RequestFactory()
def test_charge(self, mock_charge):
# Create a fake request object with the test data
request = self.factory.post("/shop/checkout/")
request.POST["card_number"] = "4242424242424242"
request.POST["card_expiry_month"] = "06"
request.POST["card_expiry_year"] = "2014"
request.POST["billing_detail_street"] = "123 Evergreen Terrace"
request.POST["billing_detail_city"] = "Springfield"
request.POST["billing_detail_state"] = "WA"
request.POST["billing_detail_postcode"] = "01234"
request.POST["billing_detail_country"] = "USA"
# Order form isn't used by stripe backend
order_form = None
# Create an order
order = Order.objects.create(total=Decimal("22.37"))
# Code under test
stripe_api.process(request, order_form, order)
# Assertion
mock_charge.create.assert_called_with(
amount=2237,
currency="usd",
card={'number': "4242424242424242",
'exp_month': "06",
'exp_year': "14",
'address_line1': "123 Evergreen Terrace",
'address_city': "Springfield",
'address_state': "WA",
'address_zip': "01234",
'country': "USA"})
StripeTests = skipUnless(stripe_used, "Stripe not used")(StripeTests)
if stripe_used:
charge = "stripe.Charge"
StripeTests.test_charge = mock.patch(charge)(StripeTests.test_charge)
class TaxationTests(TestCase):
def test_default_handler_exists(self):
'''
Ensure that the handler specified in default settings exists as well as
the default setting itself.
'''
from mezzanine.utils.importing import import_dotted_path
settings.use_editable()
assert hasattr(settings, 'SHOP_HANDLER_TAX'), \
'Setting SHOP_HANDLER_TAX not found.'
handler = lambda s: import_dotted_path(s) if s else lambda *args: None
tax_handler = handler(settings.SHOP_HANDLER_TAX)
assert tax_handler is not None, \
'Could not find default SHOP_HANDLER_TAX function.'
def test_set_tax(self):
'''
Regression test to ensure that set_tax still sets the appropriate
session variables.
'''
from cartridge.shop.utils import set_tax
tax_type = 'Tax for Testing'
tax_total = 56.65
class request:
session = {}
set_tax(request, tax_type, tax_total)
assert request.session.get('tax_type') == tax_type, \
'tax_type not set with set_tax'
assert request.session.get('tax_total') == tax_total, \
'tax_total not set with set_tax'
| bsd-2-clause |
venkatant/msproject | flow_statistics.py | 1 | 7329 | __author__ = 'venkat'
from header import *
from json_http_handler import *
class FlowWindow:
bottom_frame = 0
bottom_row = 0
class FlowTable:
def __init__(self):
self.dest_ip = None
self.dest_mask = None
self.dest_mac = None
self.dest_port = None
self.dest_node = None
return
def updateflowtable(self, destIp, destMask, destMac, destPort, destNode):
self.dest_ip = destIp
self.dest_mask = destMask
self.dest_mac = destMac
self.dest_port = destPort
self.dest_node = destNode
return
def displayflowtable(self):
print(self.dest_ip,
self.dest_mask,
self.dest_mac,
self.dest_port,
self.dest_node)
return
class FlowStatistics:
def __init__(self):
self.listbox = None
self.toplevel = None
self.no_of_flows = 0
def CurSelet(self):
print("Hello")
switch = str((self.mylistbox.get(self.mylistbox.curselection())))
print(switch)
def fillListWithNodesInfo(self):
'''
Create an object of Http JSON Handler Class to receive
resp from respective Rest URL's
'''
http_obj = HttpJsonHandler()
json_nodes = http_obj.getnodeinfo()
for node in json_nodes['nodeProperties']:
self.listbox.insert(END, node['node']['id'])
def displayFlowTableTitle(self, bottom_frame, bottom_row):
for column in range(5):
if column == 0:
label = Label(bottom_frame, text="Destination IP", borderwidth=0, width=15, fg="red")
elif column == 1:
label = Label(bottom_frame, text="Destination Mask", borderwidth=0, width=15, fg="red")
elif column == 2:
label = Label(bottom_frame, text="Output Mac", borderwidth=0, width=15, fg="red")
elif column == 3:
label = Label(bottom_frame, text="Output Port", borderwidth=0, width=15, fg="red")
elif column == 4:
label = Label(bottom_frame, text="Output Node", borderwidth=0, width=25, fg="red")
label.configure(bg="white")
label.grid(row=bottom_row, column=column, sticky="nsew", padx=1, pady=1)
return
def displayFlowTableContent(self, flow_list, flow_window_obj):
bottom_frame = flow_window_obj.bottom_frame
bottom_row = flow_window_obj.bottom_row
#for row in range(4):
for row in flow_list:
current_row = []
for column in range(5):
if column == 0:
label = Label(bottom_frame, text="%s" % row.dest_ip, borderwidth=0, width=15)
elif column == 1:
label = Label(bottom_frame, text="%s" % row.dest_mask, borderwidth=0, width=15)
elif column == 2:
label = Label(bottom_frame, text="%s" % row.dest_mac, borderwidth=0, width=15)
elif column == 3:
label = Label(bottom_frame, text="%s" % row.dest_port, borderwidth=0, width=15)
elif column == 4:
label = Label(bottom_frame, text="%s" % row.dest_node, borderwidth=0, width=25)
label.configure(bg="white")
label.grid(row=bottom_row, column=column, sticky="nsew", padx=1, pady=1)
current_row.append(label)
bottom_row += 1
for column in range(5):
bottom_frame.grid_columnconfigure(column, weight=1)
return
def CurListSelet(self, evt, flow_window_obj):
#mylistbox = evt.widget
switch=str((self.listbox.get(self.listbox.curselection())))
print(switch)
'''
Create an object of Http JSON Handler Class to receive
resp from respective Rest URL's
'''
http_obj = HttpJsonHandler()
json_flows = http_obj.getflowinfo(switch)
no_of_flows = 0
flow_list = []
for flowCount in json_flows['flowStatistic']:
destIp = json_flows['flowStatistic'][no_of_flows]['flow']['match']['matchField'][0]['value']
destMask = json_flows['flowStatistic'][no_of_flows]['flow']['match']['matchField'][0]['mask']
destPort = 0
destnode = '00:00:00:00:00:00:00:00'
try:
destMac = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][0]['address']
try:
destPort = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][1]['port']['id']
destnode = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][1]['port']['node']['id']
except:
print('')
except KeyError:
destPort = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][0]['port']['id']
destnode = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][0]['port']['node']['id']
destMac = '000000000000'
# destIp, destMask, destMac, destPort, destNode
# Create an instance of FlowTable class
flow_table_entry = FlowTable()
flow_table_entry.updateflowtable(destIp, destMask, destMac, destPort, destnode)
flow_list.append(flow_table_entry)
no_of_flows += 1
flow_table_entry.displayflowtable()
# sort the list with switch_is as Key
flow_list.sort(key=lambda host:host.dest_ip)
self.displayFlowTableContent(flow_list, flow_window_obj)
def flowstatistics():
# Create an instance of FlowTable class
#flow_table_entry = FlowTable()
# Create an instance of FlowStatistics class
obj = FlowStatistics()
'''
scrollbar.config(command=obj.mylistbox.yview)
submit = Button(obj.toplevel, text="Submit", command=obj.CurSelet)
submit.pack()
'''
toplevel = Toplevel()
toplevel.title("Flow Monitoring")
toplevel.geometry("750x250")
top_row = 0
bottom_row = 0
top_frame = Frame(toplevel)
top_frame.pack(side=TOP)
top_label = Label(top_frame, text=" SELECT SWITCH TO GET FLOW ENTRIES", fg="red", borderwidth=0, width=40)
top_label.grid(row=top_row, rowspan=1)
top_row += 1
bottom_frame = Frame(toplevel)
bottom_frame.pack(side=TOP)
bottom_label = Label(bottom_frame, fg="green")
bottom_label.grid(row=bottom_row)
bottom_row += 1
scrollbar = Scrollbar(top_frame)
obj.listbox = Listbox(top_frame, yscrollcommand=scrollbar.set)
obj.listbox.config(height=4)
# Fills the list of nodes in the List Box
obj.fillListWithNodesInfo()
obj.listbox.grid(row=top_row, column=0, sticky="nsew", padx=1, pady=1)
scrollbar.grid(row=top_row, column=1, sticky="nsew", padx=1, pady=1)
scrollbar.config(command=obj.listbox.yview)
obj.displayFlowTableTitle(bottom_frame, bottom_row)
bottom_row += 1
flow_window_obj = FlowWindow()
flow_window_obj.bottom_row = bottom_row
flow_window_obj.bottom_frame = bottom_frame
# Below code to activate on selection of items in List Box
obj.listbox.bind('<<ListboxSelect>>', lambda event, arg=flow_window_obj: obj.CurListSelet(event, flow_window_obj))
return | gpl-2.0 |
ninotoshi/tensorflow | tensorflow/contrib/learn/python/learn/tests/test_custom_decay.py | 7 | 2270 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
class CustomDecayTest(tf.test.TestCase):
def testIrisExponentialDecay(self):
random.seed(42)
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
# setup exponential decay function
def exp_decay(global_step):
return tf.train.exponential_decay(learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
steps=500,
learning_rate=exp_decay)
classifier.fit(X_train, y_train)
score = accuracy_score(y_test, classifier.predict(X_test))
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
rosenvladimirov/odoo-fixes | stock_account/wizard/stock_change_standard_price.py | 315 | 3824 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class change_standard_price(osv.osv_memory):
_name = "stock.change.standard.price"
_description = "Change Standard Price"
_columns = {
'new_price': fields.float('Price', required=True, digits_compute=dp.get_precision('Product Price'),
help="If cost price is increased, stock variation account will be debited "
"and stock output account will be credited with the value = (difference of amount * quantity available).\n"
"If cost price is decreased, stock variation account will be creadited and stock input account will be debited."),
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None:
context = {}
if context.get("active_model") == 'product.product':
product_pool = self.pool.get('product.product')
else:
product_pool = self.pool.get('product.template')
product_obj = product_pool.browse(cr, uid, context.get('active_id', False))
res = super(change_standard_price, self).default_get(cr, uid, fields, context=context)
price = product_obj.standard_price
if 'new_price' in fields:
res.update({'new_price': price})
return res
def change_price(self, cr, uid, ids, context=None):
""" Changes the Standard Price of Product.
And creates an account move accordingly.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
if context is None:
context = {}
rec_id = context.get('active_id', False)
assert rec_id, _('Active ID is not set in Context.')
if context.get("active_model") == 'product.product':
prod_obj = self.pool.get('product.product')
rec_id = prod_obj.browse(cr, uid, rec_id, context=context).product_tmpl_id.id
prod_obj = self.pool.get('product.template')
res = self.browse(cr, uid, ids, context=context)
prod_obj.do_change_standard_price(cr, uid, [rec_id], res[0].new_price, context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tracyjacks/PyMetWeather | pymetweather/pymetweather.py | 1 | 13941 | import curses
from datetime import date, timedelta
import locale
from textwrap import fill
from pymetweather.forecasts import WeatherForecast
from pymetweather.get_args import get_command_line_args, get_config_args
locale.setlocale(locale.LC_ALL, '')
class WeatherPrinter(object):
def __init__(self, forecast, screen_width):
self.fcs = forecast
self.cols = [
(['Time'], 5, '{$:02}:00'),
(['Conditions'], 22, '{W}'),
(['Precipitation', 'probability'], 15, '{Pp:>3} %'),
(['Temperature', '(Feels Like)'], 14, '{T:>2} {F} °C'),
(['Wind Speed', '(Gust)'], 16, '{S:>2} {G} mph'),
(['Wind', 'Direction'], 12, '{D:>3}'),
(['Relative', 'Humidity'], 10, '{H} %'),
(['Visibility'], 12, '{V}'),
(['UV', 'Index'], 7, '{U}')]
self.daily_cols = [
(['Day'], 13, '{$}', '{$}'),
(['Conditions'], 22, '{W}', '{W}'),
(['Precipitation', 'probability'], 15,
'{PPd:>3} %', '{PPn:>3} %'),
(['Max day/', 'Min night', 'Temperature', '(Feels like)'], 14,
'{Dm:>2} {FDm} °C', '{Nm:>2} {FNm} °C'),
(['Wind Speed', '(Gust)'], 16,
'{S:>2} {Gn} mph', '{S:>2} {Gm} mph'),
(['Wind', 'Direction'], 12, '{D:>3}', '{D:>3}'),
(['Relative', 'Humidity'], 10, '{Hn} %', '{Hm} %'),
(['Visibility'], 12, '{V}', '{V}')]
self.top_pad = curses.newpad(2000, 500)
self.tab_pad = curses.newpad(2000, 500)
self.bottom_bar = curses.newpad(1, 500)
self.help_screen_pad = curses.newpad(500, 500)
self.top_maxy = 0
self.tab_maxy = 0
self.tab_maxx = 0
self.screen_width = screen_width
self.print_bottom_bar()
self.setup_help()
@staticmethod
def addustr(win, text, *args):
win.addstr(text.encode('utf-8'), *args)
def print_help_screen(self, top_only):
if not top_only:
self.addustr(self.tab_pad, self.help_string)
self.tab_maxy = self.help_maxy
self.tab_maxx = self.help_maxx
def setup_help(self):
help = [
('q', 'Quit'),
('?', 'Show this help'),
('t', "Today's weather"),
('d', 'Five day summary'),
('0', "Today's weather"),
('1', "Tomorrow's weather"),
('2', 'Weather for 2 days later'),
('3', 'Weather for 3 days later'),
('4', 'Weather for 4 days later'),
('5–9', 'UK outlook for the next month'),
('l', 'UK outlook for the next month'),
('left arrow', 'scroll left'),
('right arrow', 'scroll left'),
('up arrow', 'scroll up'),
('down arrow', 'scroll down'),
]
c1width = max([len(k[0]) for k in help])
c2width = max([len(k[1]) for k in help])
self.help_string = ''
for h in help:
self.help_string += h[0].ljust(c1width + 1) + ' : ' + h[1] + '\n'
self.help_string = self.help_string.strip('\n')
self.help_maxy = len(help) - 1
self.help_maxx = c1width + c2width - 1
def print_bottom_bar(self):
self.addustr(
self.bottom_bar, '?: help q: quit t: today '
'd: 5 day summary 1–4: days 1 to 4 '
'l: longterm'.ljust(499),
curses.A_REVERSE | curses.A_BOLD)
def print_longer_term_weather(self):
regf1 = self.fcs.reg_fcs[2]['Paragraph']
regf2 = self.fcs.reg_fcs[3]['Paragraph']
self.addustr(
self.top_pad, self.wrap_text(regf1['title']), curses.A_BOLD)
self.addustr(self.top_pad, '\n' + self.wrap_text(regf1['$']) + '\n\n')
self.addustr(
self.top_pad, self.wrap_text(regf2['title']), curses.A_BOLD)
self.addustr(self.top_pad, '\n' + self.wrap_text(regf2['$']))
self.top_maxy = self.top_pad.getyx()[0] + 1
def wrap_text(self, text):
return fill(text, self.screen_width)
def print_hourly_top(self, n_day, day):
title = 'Weather for {}, {}'.format(
self.fcs.site_name, day.strftime('%A %d %B %Y'))
self.addustr(self.top_pad, self.wrap_text(title) + '\n', curses.A_BOLD)
regfindex = 0
regf = self.fcs.reg_fcs[0]['Paragraph']
if n_day == 0:
if 'Headline' in regf[regfindex]['title']:
self.addustr(self.top_pad, self.wrap_text(regf[regfindex]['$'])
+ '\n\n')
regfindex += 1
if 'Today' in regf[regfindex]['title']:
today_text = self.wrap_text('Today: ' + regf[regfindex]['$'])
self.addustr(self.top_pad, today_text[:7], curses.A_BOLD)
self.addustr(self.top_pad, today_text[7:] + '\n\n')
regfindex += 1
if 'Tonight' in regf[regfindex]['title']:
tonight_text = self.wrap_text(regf[regfindex]['title'] + ' ' +
regf[regfindex]['$'])
lent = len(regf[regfindex]['title'])
self.addustr(self.top_pad, tonight_text[:lent], curses.A_BOLD)
self.addustr(self.top_pad, tonight_text[lent:] + '\n\n')
regfindex += 1
elif n_day == 1:
for regfindex in range(len(regf)):
if day.strftime('%A') in regf[regfindex]['title']:
self.addustr(
self.top_pad,
self.wrap_text(regf[regfindex]['$']) + '\n\n')
break
else:
regf = self.fcs.reg_fcs[1]['Paragraph']
outlook = self.wrap_text(regf['title'] + ' ' + regf['$'])
lent = len(regf['title']) + 1
self.addustr(self.top_pad, '\n' + outlook[:lent], curses.A_BOLD)
self.addustr(self.top_pad, outlook[lent:] + '\n\n')
self.top_maxy = self.top_pad.getyx()[0] + 1
def print_hourly_tab(self, n_day, period):
width_counter = 0
for c in self.cols:
for i, head in enumerate(c[0]):
head_text = '{:^{}}'.format(head, c[1])
self.tab_pad.move(i, width_counter)
self.addustr(self.tab_pad, head_text, curses.A_BOLD)
width_counter += c[1]
top_row = (
self.tab_pad.getyx()[0] + max([len(c[0]) for c in self.cols]) - 1)
for i, rep in enumerate(period['Rep']):
width_counter = 0
for c in self.cols:
cell_text = '{:^{}}'.format(c[2].format(**rep), c[1])
self.tab_pad.move(top_row + i, width_counter)
self.addustr(self.tab_pad, cell_text)
width_counter += c[1]
self.tab_maxy = self.tab_pad.getyx()[0]
self.tab_maxx = sum([c[1] for c in self.cols]) - 2
def print_hourly_weather(self, n_day, top_only=False):
day = date.today() + timedelta(n_day)
period = self.fcs.hourly_fcs['Period'][n_day]
assert period['value'] == day.strftime('%Y-%m-%dZ')
self.print_hourly_top(n_day, day)
if not top_only:
self.print_hourly_tab(n_day, period)
def print_weather_brief(self, top_only=False):
period = self.fcs.daily_fcs['Period']
width_counter = 0
for c in self.daily_cols:
for i, head in enumerate(c[0]):
head_text = '{:^{}}'.format(head, c[1])
self.tab_pad.move(i, width_counter)
self.addustr(self.tab_pad, head_text, curses.A_BOLD)
width_counter += c[1]
top_row = (
self.tab_pad.getyx()[0] +
max([len(c[0]) for c in self.daily_cols]))
c = self.daily_cols[0]
for i, rep in enumerate(period):
cell_text = '{:<{}} '.format(rep['value'], c[1] - 3)
self.tab_pad.move(top_row + i * 4, 0)
self.addustr(self.tab_pad, cell_text)
cell_text = '{:>{}} '.format(
c[2].format(**rep['Rep'][0]), c[1] - 3)
self.tab_pad.move(top_row + i * 4 + 1, 0)
self.addustr(self.tab_pad, cell_text)
cell_text = '{:>{}} '.format(
c[3].format(**rep['Rep'][1]), c[1] - 3)
self.tab_pad.move(top_row + i * 4 + 2, 0)
self.addustr(self.tab_pad, cell_text)
for i, rep in enumerate(period):
rep = rep['Rep']
width_counter = self.daily_cols[0][1]
for c in self.daily_cols[1:]:
cell_text = '{:^{}}'.format(c[2].format(**rep[0]), c[1])
self.tab_pad.move(top_row + i * 4 + 1, width_counter)
self.addustr(self.tab_pad, cell_text)
cell_text = '{:^{}}'.format(c[3].format(**rep[1]), c[1])
self.tab_pad.move(top_row + i * 4 + 2, width_counter)
self.addustr(self.tab_pad, cell_text)
width_counter += c[1]
self.tab_maxy = self.tab_pad.getyx()[0]
self.tab_maxx = sum([c[1] for c in self.daily_cols]) - 2
def print_screen(self, screen, screen_width=None, top_only=False):
if screen_width is not None:
self.screen_width = screen_width
self.top_pad.clear()
self.top_maxy = 0
if not top_only:
self.tab_maxy = 0
self.tab_maxx = 0
self.tab_pad.clear()
if screen in range(0, 5):
self.print_hourly_weather(screen, top_only)
elif screen == 8:
self.print_longer_term_weather()
elif screen == 7:
self.print_weather_brief(top_only)
elif screen == 9:
self.print_help_screen(top_only)
class WeatherApp(object):
key_map = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4,
'5': 8, '6': 8, '7': 8, '8': 8, '9': 9,
't': 0,
'l': 8,
'd': 7,
'b': 7,
'?': 9}
def __init__(self, stdscr, fcs, start_screen=0):
self.stdscr = stdscr
curses.curs_set(0)
curses.use_default_colors()
self.fcs = fcs
self.scrolly = 0
self.scrollx = 0
self.maxy = 0
self.maxx = 0
self.y = self.stdscr.getmaxyx()[0] - 1
self.x = self.stdscr.getmaxyx()[1] - 1
self.printer = WeatherPrinter(self.fcs, self.x + 1)
self.print_screen(start_screen)
def print_resize(self):
self.y = self.stdscr.getmaxyx()[0] - 1
self.x = self.stdscr.getmaxyx()[1] - 1
self.printer.print_screen(self.screen_showing, self.x + 1, True)
self.maxx = max(self.printer.tab_maxx, self.x - 1)
self.maxy = self.printer.tab_maxy + self.printer.top_maxy
if self.y > (self.maxy - self.scrolly):
self.scrolly = max(self.maxy - (self.y - 1), 0)
if self.x > (self.maxx - self.scrollx):
self.scrollx = max(self.maxx - (self.x - 1), 0)
self.draw_screen()
def print_screen(self, screen):
self.screen_showing = screen
self.scrolly = 0
self.scrollx = 0
self.printer.print_screen(self.screen_showing)
self.maxy = self.printer.tab_maxy + self.printer.top_maxy
self.maxx = max(self.printer.tab_maxx, self.x - 1)
self.draw_screen()
def draw_screen(self):
self.stdscr.clear()
self.stdscr.refresh()
top_y = self.printer.top_maxy
try:
assert self.y == self.stdscr.getmaxyx()[0] - 1
assert self.x == self.stdscr.getmaxyx()[1] - 1
except AssertionError:
self.print_resize()
return
self.printer.top_pad.noutrefresh(
self.scrolly, 0, 0, 0, min(top_y, self.y), self.x)
if self.y - (top_y - self.scrolly) > 1:
self.printer.tab_pad.noutrefresh(
max(0, self.scrolly - top_y), self.scrollx,
top_y - self.scrolly, 0,
self.y, self.x)
self.printer.bottom_bar.noutrefresh(
0, 0, self.y, 0, self.y, self.x)
try:
assert self.y == self.stdscr.getmaxyx()[0] - 1
assert self.x == self.stdscr.getmaxyx()[1] - 1
except AssertionError:
self.print_resize()
return
with open('/tmp/log', 'a') as f:
f.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(
self.maxy, self.y, self.scrolly,
self.maxx, self.x, self.scrollx))
curses.doupdate()
def main_loop(self):
while True:
c = self.stdscr.getkey()
if c == 'q':
return
elif c in self.key_map and self.screen_showing != self.key_map[c]:
self.print_screen(self.key_map[c])
elif c == 'KEY_RESIZE':
self.print_resize()
elif c == 'KEY_DOWN':
if self.scrolly + self.y - 1 < self.maxy:
self.scrolly += 1
self.draw_screen()
elif c == 'KEY_UP' and self.scrolly != 0:
self.scrolly -= 1
self.draw_screen()
elif c == 'KEY_LEFT' and self.scrollx != 0:
self.scrollx -= 1
self.draw_screen()
elif c == 'KEY_RIGHT':
if self.scrollx + self.x - 1 < self.maxx:
self.scrollx += 1
self.draw_screen()
def run_curses_app(screen, fcs):
wap = WeatherApp(screen, fcs)
wap.main_loop()
def run_app(args):
fcs = WeatherForecast(args['api_key'], args['location'], args['datadir'])
if args['quiet_update']:
fcs.load(True)
return
fcs.load(args['dont_update'])
curses.wrapper(run_curses_app, fcs)
def main():
args = get_config_args()
args.update(get_command_line_args())
run_app(args)
| gpl-2.0 |
ktnyt/chainer | chainer/testing/distribution_test.py | 2 | 12804 | import functools
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer.testing import array
from chainer.testing import attr
from chainer import utils
def skip_not_in_test_target(test_target):
def decorator(f):
@functools.wraps(f)
def new_f(self, *args, **kwargs):
if test_target not in self.test_targets:
self.skipTest(
"\'%s\' is not exist in test_targets." % test_target)
else:
f(self, *args, **kwargs)
return new_f
return decorator
class distribution_unittest(unittest.TestCase):
scipy_onebyone = False
def setUp(self):
self.support = 'real'
if not hasattr(self, 'event_shape'):
self.event_shape = ()
self.continuous = True
self.test_targets = set()
self.options = {}
self.setUp_configure()
targets_not_found = self.test_targets - {
"batch_shape", "cdf", "entropy", "event_shape", "icdf", "log_cdf",
"log_prob", "log_survival", "mean", "prob", "sample", "stddev",
"support", "survival", "variance"}
if targets_not_found:
raise ValueError(
"invalid target(s): {}".format(targets_not_found))
if self.is_variable:
self.params = {k: chainer.Variable(v)
for k, v in self.params.items()}
def scipy_onebyone_params_iter(self):
for index in numpy.ndindex(self.shape):
yield {k: v[index] for k, v in self.scipy_params.items()}
@property
def cpu_dist(self):
params = self.params
params.update(self.options)
return self.dist(**params)
@property
def gpu_dist(self):
if self.is_variable:
gpu_params = {k: cuda.to_gpu(v.data)
for k, v in self.params.items()}
gpu_params = {k: chainer.Variable(v)
for k, v in gpu_params.items()}
else:
gpu_params = {k: cuda.to_gpu(v)
for k, v in self.params.items()}
gpu_params.update(self.options)
return self.dist(**gpu_params)
@skip_not_in_test_target('batch_shape')
def test_batch_shape_cpu(self):
self.assertEqual(self.cpu_dist.batch_shape, self.shape)
@attr.gpu
@skip_not_in_test_target('batch_shape')
def test_batch_shape_gpu(self):
self.assertEqual(self.gpu_dist.batch_shape, self.shape)
def check_cdf(self, is_gpu):
smp = self.sample_for_test()
if is_gpu:
cdf1 = self.gpu_dist.cdf(cuda.to_gpu(smp)).data
else:
cdf1 = self.cpu_dist.cdf(smp).data
cdf2 = self.scipy_dist.cdf(smp, **self.scipy_params)
array.assert_allclose(cdf1, cdf2)
@skip_not_in_test_target('cdf')
def test_cdf_cpu(self):
self.check_cdf(False)
@attr.gpu
@skip_not_in_test_target('cdf')
def test_cdf_gpu(self):
self.check_cdf(True)
def check_entropy(self, is_gpu):
if is_gpu:
ent1 = self.gpu_dist.entropy.data
else:
ent1 = self.cpu_dist.entropy.data
if self.scipy_onebyone:
ent2 = []
for one_params in self.scipy_onebyone_params_iter():
ent2.append(self.scipy_dist.entropy(**one_params))
ent2 = numpy.vstack(ent2).reshape(self.shape)
else:
ent2 = self.scipy_dist.entropy(**self.scipy_params)
array.assert_allclose(ent1, ent2)
@skip_not_in_test_target('entropy')
def test_entropy_cpu(self):
self.check_entropy(False)
@attr.gpu
@skip_not_in_test_target('entropy')
def test_entropy_gpu(self):
self.check_entropy(True)
@skip_not_in_test_target('event_shape')
def test_event_shape_cpu(self):
self.assertEqual(self.cpu_dist.event_shape, self.event_shape)
@attr.gpu
@skip_not_in_test_target('event_shape')
def test_event_shape_gpu(self):
self.assertEqual(self.gpu_dist.event_shape, self.event_shape)
def check_icdf(self, is_gpu):
smp = numpy.random.uniform(
1e-5, 1 - 1e-5, self.sample_shape + self.shape
).astype(numpy.float32)
if is_gpu:
icdf1 = self.gpu_dist.icdf(cuda.to_gpu(smp)).data
else:
icdf1 = self.cpu_dist.icdf(smp).data
icdf2 = self.scipy_dist.ppf(smp, **self.scipy_params)
array.assert_allclose(icdf1, icdf2)
@skip_not_in_test_target('icdf')
def test_icdf_cpu(self):
self.check_icdf(False)
@attr.gpu
@skip_not_in_test_target('icdf')
def test_icdf_gpu(self):
self.check_icdf(True)
def check_log_cdf(self, is_gpu):
smp = self.sample_for_test()
if is_gpu:
log_cdf1 = self.gpu_dist.log_cdf(cuda.to_gpu(smp)).data
else:
log_cdf1 = self.cpu_dist.log_cdf(smp).data
log_cdf2 = self.scipy_dist.logcdf(smp, **self.scipy_params)
array.assert_allclose(log_cdf1, log_cdf2)
@skip_not_in_test_target('log_cdf')
def test_log_cdf_cpu(self):
self.check_log_cdf(False)
@attr.gpu
@skip_not_in_test_target('log_cdf')
def test_log_cdf_gpu(self):
self.check_log_cdf(True)
def check_log_prob(self, is_gpu):
smp = self.sample_for_test()
if is_gpu:
log_prob1 = self.gpu_dist.log_prob(cuda.to_gpu(smp)).data
else:
log_prob1 = self.cpu_dist.log_prob(smp).data
if self.continuous:
scipy_prob = self.scipy_dist.logpdf
else:
scipy_prob = self.scipy_dist.logpmf
if self.scipy_onebyone:
onebyone_smp = smp.reshape(*[
utils.size_of_shape(sh)
for sh in [self.sample_shape, self.shape, self.event_shape]])
onebyone_smp = numpy.swapaxes(onebyone_smp, 0, 1)
onebyone_smp = onebyone_smp.reshape((-1,) + self.sample_shape
+ self.event_shape)
log_prob2 = []
for one_params, one_smp in zip(
self.scipy_onebyone_params_iter(), onebyone_smp):
log_prob2.append(scipy_prob(one_smp, **one_params))
log_prob2 = numpy.vstack(log_prob2)
log_prob2 = log_prob2.reshape(
utils.size_of_shape(self.shape), -1).T
log_prob2 = log_prob2.reshape(self.sample_shape + self.shape)
else:
log_prob2 = scipy_prob(smp, **self.scipy_params)
array.assert_allclose(log_prob1, log_prob2)
@skip_not_in_test_target('log_prob')
def test_log_prob_cpu(self):
self.check_log_prob(False)
@attr.gpu
@skip_not_in_test_target('log_prob')
def test_log_prob_gpu(self):
self.check_log_prob(True)
def check_log_survival(self, is_gpu):
smp = self.sample_for_test()
if is_gpu:
log_survival1 = \
self.gpu_dist.log_survival_function(cuda.to_gpu(smp)).data
else:
log_survival1 = self.cpu_dist.log_survival_function(smp).data
log_survival2 = self.scipy_dist.logsf(smp, **self.scipy_params)
array.assert_allclose(log_survival1, log_survival2)
@skip_not_in_test_target('log_survival')
def test_log_survival_cpu(self):
self.check_log_survival(False)
@attr.gpu
@skip_not_in_test_target('log_survival')
def test_log_survival_gpu(self):
self.check_log_survival(True)
def check_mean(self, is_gpu):
if is_gpu:
mean1 = self.gpu_dist.mean.data
else:
mean1 = self.cpu_dist.mean.data
if self.scipy_onebyone:
mean2 = []
for one_params in self.scipy_onebyone_params_iter():
mean2.append(self.scipy_dist.mean(**one_params))
mean2 = numpy.vstack(mean2).reshape(
self.shape + self.cpu_dist.event_shape)
else:
mean2 = self.scipy_dist.mean(**self.scipy_params)
array.assert_allclose(mean1, mean2)
@skip_not_in_test_target('mean')
def test_mean_cpu(self):
self.check_mean(False)
@attr.gpu
@skip_not_in_test_target('mean')
def test_mean_gpu(self):
self.check_mean(True)
def check_prob(self, is_gpu):
smp = self.sample_for_test()
if is_gpu:
prob1 = self.gpu_dist.prob(cuda.to_gpu(smp)).data
else:
prob1 = self.cpu_dist.prob(smp).data
if self.continuous:
prob2 = self.scipy_dist.pdf(smp, **self.scipy_params)
else:
prob2 = self.scipy_dist.pmf(smp, **self.scipy_params)
array.assert_allclose(prob1, prob2)
@skip_not_in_test_target('prob')
def test_prob_cpu(self):
self.check_prob(False)
@attr.gpu
@skip_not_in_test_target('prob')
def test_prob_gpu(self):
self.check_prob(True)
def check_sample(self, is_gpu):
if is_gpu:
smp1 = self.gpu_dist.sample(
sample_shape=(100000,)+self.sample_shape).data
else:
smp1 = self.cpu_dist.sample(
sample_shape=(100000,)+self.sample_shape).data
if self.scipy_onebyone:
smp2 = []
for one_params in self.scipy_onebyone_params_iter():
smp2.append(self.scipy_dist.rvs(
size=(100000,)+self.sample_shape, **one_params))
smp2 = numpy.vstack(smp2)
smp2 = smp2.reshape((utils.size_of_shape(self.shape), 100000)
+ self.sample_shape
+ self.cpu_dist.event_shape)
smp2 = numpy.rollaxis(
smp2, 0, smp2.ndim-len(self.cpu_dist.event_shape))
smp2 = smp2.reshape((100000,) + self.sample_shape + self.shape
+ self.cpu_dist.event_shape)
else:
smp2 = self.scipy_dist.rvs(
size=(100000,) + self.sample_shape + self.shape,
**self.scipy_params)
array.assert_allclose(smp1.mean(axis=0), smp2.mean(axis=0),
atol=3e-2, rtol=3e-2)
array.assert_allclose(smp1.std(axis=0), smp2.std(axis=0),
atol=3e-2, rtol=3e-2)
@skip_not_in_test_target('sample')
def test_sample_cpu(self):
self.check_sample(False)
@attr.gpu
@skip_not_in_test_target('sample')
def test_sample_gpu(self):
self.check_sample(True)
def check_stddev(self, is_gpu):
if is_gpu:
stddev1 = self.gpu_dist.stddev.data
else:
stddev1 = self.cpu_dist.stddev.data
stddev2 = self.scipy_dist.std(**self.scipy_params)
array.assert_allclose(stddev1, stddev2)
@skip_not_in_test_target('stddev')
def test_stddev_cpu(self):
self.check_stddev(False)
@attr.gpu
@skip_not_in_test_target('stddev')
def test_stddev_gpu(self):
self.check_stddev(True)
@skip_not_in_test_target('support')
def test_support_cpu(self):
self.assertEqual(self.cpu_dist.support, self.support)
@attr.gpu
@skip_not_in_test_target('support')
def test_support_gpu(self):
self.assertEqual(self.gpu_dist.support, self.support)
def check_survival(self, is_gpu):
smp = self.sample_for_test()
if is_gpu:
survival1 = self.gpu_dist.survival_function(
cuda.to_gpu(smp)).data
else:
survival1 = self.cpu_dist.survival_function(smp).data
survival2 = self.scipy_dist.sf(smp, **self.scipy_params)
array.assert_allclose(survival1, survival2)
@skip_not_in_test_target('survival')
def test_survival_cpu(self):
self.check_survival(False)
@attr.gpu
@skip_not_in_test_target('survival')
def test_survival_gpu(self):
self.check_survival(True)
def check_variance(self, is_gpu):
if is_gpu:
variance1 = self.gpu_dist.variance.data
else:
variance1 = self.cpu_dist.variance.data
if self.scipy_onebyone:
variance2 = []
for one_params in self.scipy_onebyone_params_iter():
variance2.append(self.scipy_dist.var(**one_params))
variance2 = numpy.vstack(variance2).reshape(
self.shape + self.cpu_dist.event_shape)
else:
variance2 = self.scipy_dist.var(**self.scipy_params)
array.assert_allclose(variance1, variance2)
@skip_not_in_test_target('variance')
def test_variance_cpu(self):
self.check_variance(False)
@attr.gpu
@skip_not_in_test_target('variance')
def test_variance_gpu(self):
self.check_variance(True)
| mit |
jcarva/digital_image_processing_assignments | spatial_domain/python/task1_6.py | 1 | 1722 | # coding=UTF-8
# 1.6. Limiarização aplicada sobre Y, com limiar m e duas opções: a) m
# escolhido pelo usuáio; b) m = média de valores da banda Y;
import numpy as np
import utils
import color
def main():
image = utils.load_image('lenna.png')
yiq_image = color.rgb2yiq(image)
grayscale_image = yiq_image[:, :, 2] # Y
threshold_value = 255 * 0.2
mean_value = np.mean(grayscale_image)
threshold_user_image = _segment(grayscale_image, threshold_value)
original_threshold_user_image = np.copy(yiq_image)
original_threshold_user_image[:, :, 2] = threshold_user_image
original_threshold_user_image = color.yiq2rgb(original_threshold_user_image)
threshold_mean_image = _segment(grayscale_image, mean_value)
original_threshold_mean_image = np.copy(yiq_image)
original_threshold_mean_image[:, :, 2] = threshold_mean_image
original_threshold_mean_image = color.yiq2rgb(original_threshold_mean_image)
utils.display_single_image('Original Image', image)
utils.display_single_image('YIQ Image', yiq_image)
utils.display_single_image('Y Channel', grayscale_image)
utils.display_single_image('Y Threshold (User ' + str(threshold_value) + ')', threshold_user_image)
utils.display_single_image('Back to Original (User ' + str(threshold_value) + ')', original_threshold_user_image)
utils.display_single_image('Y Threshold (Mean ' + str(mean_value) + ')', threshold_mean_image)
utils.display_single_image('Back to Original (Mean ' + str(mean_value) + ')', original_threshold_mean_image)
utils.wait_key_and_destroy_windows()
def _segment(image, m):
output = (image >= m) * 255
return output
if __name__ == "__main__":
main() | gpl-3.0 |
PXke/invenio | invenio/legacy/websubmit/functions/Create_Modify_Interface.py | 1 | 12922 | ## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This is the Create_Modify_Interface function (along with its helpers).
It is used by WebSubmit for the "Modify Bibliographic Information" action.
"""
__revision__ = "$Id$"
import os
import re
import time
import pprint
from invenio.legacy.dbquery import run_sql
from invenio.legacy.websubmit.config import InvenioWebSubmitFunctionError
from invenio.legacy.websubmit.functions.Retrieve_Data import Get_Field
from invenio.ext.logging import register_exception
def Create_Modify_Interface_getfieldval_fromfile(cur_dir, fld=""):
"""Read a field's value from its corresponding text file in 'cur_dir' (if it exists) into memory.
Delete the text file after having read-in its value.
This function is called on the reload of the modify-record page. This way, the field in question
can be populated with the value last entered by the user (before reload), instead of always being
populated with the value still found in the DB.
"""
fld_val = ""
if len(fld) > 0 and os.access("%s/%s" % (cur_dir, fld), os.R_OK|os.W_OK):
fp = open( "%s/%s" % (cur_dir, fld), "r" )
fld_val = fp.read()
fp.close()
try:
os.unlink("%s/%s"%(cur_dir, fld))
except OSError:
# Cannot unlink file - ignore, let WebSubmit main handle this
pass
fld_val = fld_val.strip()
return fld_val
def Create_Modify_Interface_getfieldval_fromDBrec(fieldcode, recid):
"""Read a field's value from the record stored in the DB.
This function is called when the Create_Modify_Interface function is called for the first time
when modifying a given record, and field values must be retrieved from the database.
"""
fld_val = ""
if fieldcode != "":
for next_field_code in [x.strip() for x in fieldcode.split(",")]:
fld_val += "%s\n" % Get_Field(next_field_code, recid)
fld_val = fld_val.rstrip('\n')
return fld_val
def Create_Modify_Interface_transform_date(fld_val):
"""Accept a field's value as a string. If the value is a date in one of the following formats:
DD Mon YYYY (e.g. 23 Apr 2005)
YYYY-MM-DD (e.g. 2005-04-23)
...transform this date value into "DD/MM/YYYY" (e.g. 23/04/2005).
"""
if re.search("^[0-9]{2} [a-z]{3} [0-9]{4}$", fld_val, re.IGNORECASE) is not None:
try:
fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%d %b %Y"))
except (ValueError, TypeError):
# bad date format:
pass
elif re.search("^[0-9]{4}-[0-9]{2}-[0-9]{2}$", fld_val, re.IGNORECASE) is not None:
try:
fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%Y-%m-%d"))
except (ValueError,TypeError):
# bad date format:
pass
return fld_val
def Create_Modify_Interface(parameters, curdir, form, user_info=None):
"""
Create an interface for the modification of a document, based on
the fields that the user has chosen to modify. This avoids having
to redefine a submission page for the modifications, but rely on
the elements already defined for the initial submission i.e. SBI
action (The only page that needs to be built for the modification
is the page letting the user specify a document to modify).
This function should be added at step 1 of your modification
workflow, after the functions that retrieves report number and
record id (Get_Report_Number, Get_Recid). Functions at step 2 are
the one executed upon successful submission of the form.
Create_Modify_Interface expects the following parameters:
* "fieldnameMBI" - the name of a text file in the submission
working directory that contains a list of the names of the
WebSubmit fields to include in the Modification interface.
These field names are separated by"\n" or "+".
Given the list of WebSubmit fields to be included in the
modification interface, the values for each field are retrieved
for the given record (by way of each WebSubmit field being
configured with a MARC Code in the WebSubmit database). An HTML
FORM is then created. This form allows a user to modify certain
field values for a record.
The file referenced by 'fieldnameMBI' is usually generated from a
multiple select form field): users can then select one or several
fields to modify
Note that the function will display WebSubmit Response elements,
but will not be able to set an initial value: this must be done by
the Response element iteself.
Additionally the function creates an internal field named
'Create_Modify_Interface_DONE' on the interface, that can be
retrieved in curdir after the form has been submitted.
This flag is an indicator for the function that displayed values
should not be retrieved from the database, but from the submitted
values (in case the page is reloaded). You can also rely on this
value when building your WebSubmit Response element in order to
retrieve value either from the record, or from the submission
directory.
"""
global sysno,rn
t = ""
# variables declaration
fieldname = parameters['fieldnameMBI']
# Path of file containing fields to modify
the_globals = {
'doctype' : doctype,
'action' : action,
'act' : action, ## for backward compatibility
'step' : step,
'access' : access,
'ln' : ln,
'curdir' : curdir,
'uid' : user_info['uid'],
'uid_email' : user_info['email'],
'rn' : rn,
'last_step' : last_step,
'action_score' : action_score,
'__websubmit_in_jail__' : True,
'form': form,
'sysno': sysno,
'user_info' : user_info,
'__builtins__' : globals()['__builtins__'],
'Request_Print': Request_Print
}
if os.path.exists("%s/%s" % (curdir, fieldname)):
fp = open( "%s/%s" % (curdir, fieldname), "r" )
fieldstext = fp.read()
fp.close()
fieldstext = re.sub("\+","\n", fieldstext)
fields = fieldstext.split("\n")
else:
res = run_sql("SELECT fidesc FROM sbmFIELDDESC WHERE name=%s", (fieldname,))
if len(res) == 1:
fields = res[0][0].replace(" ", "")
fields = re.findall("<optionvalue=.*>", fields)
regexp = re.compile("""<optionvalue=(?P<quote>['|"]?)(?P<value>.*?)(?P=quote)""")
fields = [regexp.search(x) for x in fields]
fields = [x.group("value") for x in fields if x is not None]
fields = [x for x in fields if x not in ("Select", "select")]
else:
raise InvenioWebSubmitFunctionError("cannot find fields to modify")
#output some text
t = t+"<CENTER bgcolor=\"white\">The document <B>%s</B> has been found in the database.</CENTER><br />Please modify the following fields:<br />Then press the 'END' button at the bottom of the page<br />\n" % rn
for field in fields:
subfield = ""
value = ""
marccode = ""
text = ""
# retrieve and display the modification text
t = t + "<FONT color=\"darkblue\">\n"
res = run_sql("SELECT modifytext FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res)>0:
t = t + "<small>%s</small> </FONT>\n" % res[0][0]
# retrieve the marc code associated with the field
res = run_sql("SELECT marccode FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res) > 0:
marccode = res[0][0]
# then retrieve the previous value of the field
if os.path.exists("%s/%s" % (curdir, "Create_Modify_Interface_DONE")):
# Page has been reloaded - get field value from text file on server, not from DB record
value = Create_Modify_Interface_getfieldval_fromfile(curdir, field)
else:
# First call to page - get field value from DB record
value = Create_Modify_Interface_getfieldval_fromDBrec(marccode, sysno)
# If field is a date value, transform date into format DD/MM/YYYY:
value = Create_Modify_Interface_transform_date(value)
res = run_sql("SELECT * FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res) > 0:
element_type = res[0][3]
numcols = res[0][6]
numrows = res[0][5]
size = res[0][4]
maxlength = res[0][7]
val = res[0][8]
fidesc = res[0][9]
if element_type == "T":
text = "<TEXTAREA name=\"%s\" rows=%s cols=%s wrap>%s</TEXTAREA>" % (field, numrows, numcols, value)
elif element_type == "F":
text = "<INPUT TYPE=\"file\" name=\"%s\" size=%s maxlength=\"%s\">" % (field, size, maxlength)
elif element_type == "I":
value = re.sub("[\n\r\t]+", "", value)
text = "<INPUT name=\"%s\" size=%s value=\"%s\"> " % (field, size, val)
text = text + "<SCRIPT>document.forms[0].%s.value=\"%s\";</SCRIPT>" % (field, value)
elif element_type == "H":
text = "<INPUT type=\"hidden\" name=\"%s\" value=\"%s\">" % (field, val)
text = text + "<SCRIPT>document.forms[0].%s.value=\"%s\";</SCRIPT>" % (field, value)
elif element_type == "S":
values = re.split("[\n\r]+", value)
text = fidesc
if re.search("%s\[\]" % field, fidesc):
multipletext = "[]"
else:
multipletext = ""
if len(values) > 0 and not(len(values) == 1 and values[0] == ""):
text += "<SCRIPT>\n"
text += "var i = 0;\n"
text += "el = document.forms[0].elements['%s%s'];\n" % (field, multipletext)
text += "max = el.length;\n"
for val in values:
text += "var found = 0;\n"
text += "var i=0;\n"
text += "while (i != max) {\n"
text += " if (el.options[i].value == \"%s\" || el.options[i].text == \"%s\") {\n" % (val, val)
text += " el.options[i].selected = true;\n"
text += " found = 1;\n"
text += " }\n"
text += " i=i+1;\n"
text += "}\n"
#text += "if (found == 0) {\n"
#text += " el[el.length] = new Option(\"%s\", \"%s\", 1,1);\n"
#text += "}\n"
text += "</SCRIPT>\n"
elif element_type == "D":
text = fidesc
elif element_type == "R":
try:
co = compile(fidesc.replace("\r\n", "\n"), "<string>", "exec")
## Note this exec is safe WRT global variable because the
## Create_Modify_Interface has already been parsed by
## execfile within a protected environment.
the_globals['text'] = ''
exec co in the_globals
text = the_globals['text']
except:
msg = "Error in evaluating response element %s with globals %s" % (pprint.pformat(field), pprint.pformat(globals()))
register_exception(req=None, alert_admin=True, prefix=msg)
raise InvenioWebSubmitFunctionError(msg)
else:
text = "%s: unknown field type" % field
t = t + "<small>%s</small>" % text
# output our flag field
t += '<input type="hidden" name="Create_Modify_Interface_DONE" value="DONE\n" />'
# output some more text
t = t + "<br /><br /><CENTER><small><INPUT type=\"button\" width=400 height=50 name=\"End\" value=\"END\" onClick=\"document.forms[0].step.value = 2;user_must_confirm_before_leaving_page = false;document.forms[0].submit();\"></small></CENTER></H4>"
return t
| gpl-2.0 |
quonb/atom-generator | atom_generator/video.py | 1 | 2028 | import re
class YouTube(object):
def __init__(self, url=None):
self._video_id = self._extract_id(url)
def __call__(self, url=False):
if url is None or url:
self._video_id = self._extract_id(url)
return self._video_id
def _extract_id(self, url=None):
"""Extract youtube video ID
Based on `youtube_dl` code
"""
if not url:
return None
YOUTUBE_URL = r"""^
(?:
(?:https?://)? # http(s):// (optional)
(?:(?:(?:
(?:\w+\.)?youtube(?:-nocookie)?\.com/|
tube\.majestyc\.net/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/)| # v/ or embed/ or e/
(?: # or the v= param in all its forms
(?:
(?:watch|movie)(?:_popup)?(?:\.php)?
)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
v=
)
))|
youtu\.be/ # just youtu.be/xxxx
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow
$"""
video_id = re.match(YOUTUBE_URL, str(url), re.VERBOSE)
return video_id and video_id.group(1)
def thumbnail(self):
return self._video_id and "http://i.ytimg.com/vi/%s/0.jpg" % self._video_id
def video(self):
return self._video_id and "http://www.youtube.com/watch?v=%s" % self._video_id
| apache-2.0 |
RockchipOpensourceCommunity/popmetal-kernel-3.14 | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
orlov-vo/mtasa | vendor/google-breakpad/src/tools/gyp/test/ninja/solibs_avoid_relinking/gyptest-solibs-avoid-relinking.py | 216 | 1427 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that relinking a solib doesn't relink a dependent executable if the
solib's public API hasn't changed.
"""
import os
import sys
import TestCommon
import TestGyp
# NOTE(fischman): This test will not work with other generators because the
# API-hash-based-mtime-preservation optimization is only implemented in
# ninja.py. It could be extended to the make.py generator as well pretty
# easily, probably.
# (also, it tests ninja-specific out paths, which would have to be generalized
# if this was extended to other generators).
test = TestGyp.TestGyp(formats=['ninja'])
test.run_gyp('solibs_avoid_relinking.gyp')
# Build the executable, grab its timestamp, touch the solib's source, rebuild
# executable, ensure timestamp hasn't changed.
test.build('solibs_avoid_relinking.gyp', 'b')
test.built_file_must_exist('b' + TestCommon.exe_suffix)
pre_stat = os.stat(test.built_file_path('b' + TestCommon.exe_suffix))
os.utime(os.path.join(test.workdir, 'solib.cc'),
(pre_stat.st_atime, pre_stat.st_mtime + 100))
test.sleep()
test.build('solibs_avoid_relinking.gyp', 'b')
post_stat = os.stat(test.built_file_path('b' + TestCommon.exe_suffix))
if pre_stat.st_mtime != post_stat.st_mtime:
test.fail_test()
else:
test.pass_test()
| gpl-3.0 |
readevalprint/mezzanine | mezzanine/utils/cache.py | 5 | 3908 | from __future__ import unicode_literals
from hashlib import md5
from time import time
from django.core.cache import cache
from django.utils.lru_cache import lru_cache
from django.utils.cache import _i18n_cache_key_suffix
from mezzanine.conf import settings
from mezzanine.utils.sites import current_site_id
from mezzanine.utils.conf import middlewares_or_subclasses_installed
def _hashed_key(key):
"""
Hash keys when talking directly to the cache API, to avoid
keys longer than the backend supports (eg memcache limit is 255)
"""
return md5(key.encode("utf-8")).hexdigest()
def cache_set(key, value, timeout=None, refreshed=False):
"""
Wrapper for ``cache.set``. Stores the cache entry packed with
the desired cache expiry time. When the entry is retrieved from
cache, the packed expiry time is also checked, and if past,
the stale cache entry is stored again with an expiry that has
``CACHE_SET_DELAY_SECONDS`` added to it. In this case the entry
is not returned, so that a cache miss occurs and the entry
should be set by the caller, but all other callers will still get
the stale entry, so no real cache misses ever occur.
"""
if timeout is None:
timeout = settings.CACHE_MIDDLEWARE_SECONDS
refresh_time = timeout + time()
real_timeout = timeout + settings.CACHE_SET_DELAY_SECONDS
packed = (value, refresh_time, refreshed)
return cache.set(_hashed_key(key), packed, real_timeout)
def cache_get(key):
"""
Wrapper for ``cache.get``. The expiry time for the cache entry
is stored with the entry. If the expiry time has past, put the
stale entry back into cache, and don't return it to trigger a
fake cache miss.
"""
packed = cache.get(_hashed_key(key))
if packed is None:
return None
value, refresh_time, refreshed = packed
if (time() > refresh_time) and not refreshed:
cache_set(key, value, settings.CACHE_SET_DELAY_SECONDS, True)
return None
return value
@lru_cache(maxsize=None)
def cache_installed():
"""
Returns ``True`` if a cache backend is configured, and the
cache middleware classes or subclasses thereof are present.
This will be evaluated once per run, and then cached.
"""
has_key = bool(getattr(settings, "NEVERCACHE_KEY", ""))
return (has_key and settings.CACHES and not settings.TESTING and
middlewares_or_subclasses_installed([
"mezzanine.core.middleware.UpdateCacheMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
]))
def cache_key_prefix(request):
"""
Cache key for Mezzanine's cache middleware. Adds the current
site ID.
"""
cache_key = "%s.%s.%s" % (
settings.CACHE_MIDDLEWARE_KEY_PREFIX,
current_site_id(),
# This last part used to indicate the device type for the request,
# but device detection was removed in Mezzanine 4.3.
# The "default" value was kept to maintain existing cache keys.
# See: https://github.com/stephenmcd/mezzanine/pull/1783
"default",
)
return _i18n_cache_key_suffix(request, cache_key)
def nevercache_token():
"""
Returns the secret token that delimits content wrapped in
the ``nevercache`` template tag.
"""
return "nevercache." + settings.NEVERCACHE_KEY
def add_cache_bypass(url):
"""
Adds the current time to the querystring of the URL to force a
cache reload. Used for when a form post redirects back to a
page that should display updated content, such as new comments or
ratings.
"""
if not cache_installed():
return url
hash_str = ""
if "#" in url:
url, hash_str = url.split("#", 1)
hash_str = "#" + hash_str
url += "?" if "?" not in url else "&"
return url + "t=" + str(time()).replace(".", "") + hash_str
| bsd-2-clause |
kc-lab/dms2dfe | dms2dfe/lib/io_data_files.py | 2 | 14758 | #!usr/bin/python
# Copyright 2016, Rohan Dandage <[email protected],[email protected]>
# This program is distributed under General Public License v. 3.
"""
================================
``io_data_files``
================================
"""
import sys
import pandas as pd
from os.path import exists,basename,abspath,dirname,expanduser
import logging
from glob import glob
import numpy as np
from dms2dfe.lib.io_seq_files import get_fsta_feats
logging.basicConfig(format='[%(asctime)s] %(levelname)s\tfrom %(filename)s in %(funcName)s(..): %(message)s',level=logging.DEBUG) # filename=cfg_xls_fh+'.log'
import pickle
## DEFS
def is_cfg_ok(cfg_dh,cfgs) :
"""
Checks if the required files are present in given directory.
:param cfg_dh: path to directory.
:param cfgs: list of names of files.
"""
cfg_dh_cfgs=glob(cfg_dh+"/*")
cfg_dh_cfgs=[basename(cfg_dh_cfg) for cfg_dh_cfg in cfg_dh_cfgs]
for cfg in cfgs : # check if required sheets are present
if not cfg in cfg_dh_cfgs :
logging.error("%s does not exist" % cfg)
return False
break
return True
def auto_find_missing_paths(prj_dh):
"""
Finds the missing paths in the configuration given in cfg/ directory
:param prj_dh: path to the project directory
"""
info=pd.read_csv(prj_dh+"/cfg/info")
info_path_vars=[varn for varn in info['varname'] if ("_fh" in varn) or ("_dh" in varn)]
info=info.set_index("varname")
#find pdb_fh and fsta_fh in prj_dh
if pd.isnull(info.loc["pdb_fh","input"]):
try:
info.loc["pdb_fh","input"]=glob("%s/*.pdb" % prj_dh)[0]
except:
logging.error("can not find .pdb file")
if pd.isnull(info.loc["fsta_fh","input"]):
try:
fsta_fhs=glob("%s/*.fasta" % prj_dh)
for fsta_fh in fsta_fhs:
if not (('prt' in fsta_fh) or ('_cctmr1.' in fsta_fh)):
info.loc["fsta_fh","input"]=fsta_fh
break
except:
logging.error("could not find .fasta file")
info_paths=[info.loc[info_path_var,"input"] for info_path_var in info_path_vars]
info.reset_index().to_csv(prj_dh+"/cfg/info",index=False)
# if any(pd.isnull(info_paths)):
info_paths_missing=[v for v in info_path_vars if (pd.isnull(info.loc[v,"input"]) and info.loc[v,"default"])]
if len(info_paths_missing)>0:
logging.error("Values for following variables are missing in 'project_dir/cfg/info' file.")
# print [p for p in info_paths if pd.isnull(p)]
print info_paths_missing
sys.exit()
def get_raw_input(info,var):
"""
Get intearactive inputs from user
:param info: dict, with information about experiment
:param var: variable whose value is obtained from interactive shell
"""
# from dms2dfe.lib.io_dfs import set_index
# info=set_index(info,'var')
val=raw_input("%s: %s (default: %s) =" % (var,info.loc[var, "description"],info.loc[var, "default"]))
return val
from dms2dfe.lib.io_seq_files import cctmr_fasta2ref_fasta
def info2src(prj_dh):
"""
This converts `.csv` configuration file to `.py` source file saved in `/tmp/`.
:param prj_dh: path to project directory
"""
import subprocess
from dms2dfe.lib.io_seq_files import fasta_nts2prt
csv2src("%s/../cfg/info" % abspath(dirname(__file__)),"%s/../tmp/info.py" % (abspath(dirname(__file__))))
auto_find_missing_paths(prj_dh)
info=pd.read_csv(prj_dh+"/cfg/info")
# info=auto_find_missing_paths(prj_dh)
info_path_vars=[varn for varn in info['varname'] if ("_fh" in varn) or ("_dh" in varn)]
info=info.set_index("varname")
# find still missing paths ones
info_paths=[info.loc[info_path_var,"input"] for info_path_var in info_path_vars]
for info_path_var,info_path in zip(info_path_vars,info_paths):
# if not exists(info_path):
if not ('bowtie' in info_path):
if not exists(info_path):
if info_path_var=='rscript_fh':
info_path = subprocess.check_output(["which", "Rscript"]).replace('\n','')
# print info_path
while not exists(info_path):
logging.error('Path to files do not exist. Include correct path in cfg/info. %s : %s' % (info_path_var,info_path))
info_path=get_raw_input(info,info_path_var)
info.loc[info_path_var,'input']=info_path
if not pd.isnull(info.loc['cctmr','input']):
cctmr=info.loc['cctmr','input']
cctmr=[int("%s" % i) for i in cctmr.split(" ")]
fsta_fh=cctmr_fasta2ref_fasta(info.loc['fsta_fh','input'],cctmr)
else:
fsta_fh=info.loc['fsta_fh','input']
info.loc['prj_dh','input']=abspath(prj_dh)
info.loc['fsta_id','input'],info.loc['fsta_seq','input'],info.loc['fsta_len','input']=get_fsta_feats(fsta_fh)
host=info.loc['host','input']
if pd.isnull(host):
host=info.loc['host','default']
info.loc['prt_seq','input']=fasta_nts2prt(fsta_fh,host=host).replace('*','X')
info.reset_index().to_csv(prj_dh+"/cfg/info",index=False)
csv2src(prj_dh+"/cfg/info","%s/../tmp/info.py" % (abspath(dirname(__file__))))
csv2src(prj_dh+"/cfg/info",prj_dh+"/cfg/info.py")
logging.info("configuration compiled: %s/cfg/info" % prj_dh)
def csv2src(csv_fh,src_fh):
"""
This writes `.csv` to `.py` source file.
:param csv_fh: path to input `.csv` file.
:param src_fh: path to output `.py` source file.
"""
info=pd.read_csv(csv_fh)
info=info.set_index('varname')
src_f=open(src_fh,'w')
src_f.write("#!usr/bin/python\n")
src_f.write("\n")
src_f.write("# source file for dms2dfe's configuration \n")
src_f.write("\n")
for var in info.iterrows() :
val=info['input'][var[0]]
if pd.isnull(val):
val=info['default'][var[0]]
src_f.write("%s='%s' #%s\n" % (var[0],val,info["description"][var[0]]))
src_f.close()
def raw_input2info(prj_dh,inputORdefault):
"""
This writes configuration `.csv` file from `raw_input` from prompt.
:param prj_dh: path to project directory.
:param inputORdefault: column name "input" or "default".
"""
info=pd.read_csv(prj_dh+"/cfg/info")
info=info.set_index("varname",drop=True)
for var in info.index.values:
val=raw_input("%s (default: %s) =" % (info.loc[var, "description"],info.loc[var, "default"]))
if not val=='':
info.loc[var, inputORdefault]=val
info.reset_index().to_csv("%s/cfg/info" % prj_dh, index=False)
def is_xls_ok(cfg_xls,cfg_xls_sheetnames_required) :
"""
Checks if the required sheets are present in the configuration excel file.
:param cfg_xls: path to configuration excel file
"""
cfg_xls_sheetnames=cfg_xls.sheet_names
cfg_xls_sheetnames= [str(x) for x in cfg_xls_sheetnames]# unicode to str
for qry_sheet_namei in cfg_xls_sheetnames_required : # check if required sheets are present
#qry_sheet_namei=str(qry_sheet_namei)
if not qry_sheet_namei in cfg_xls_sheetnames :
logging.error("pipeline : sheetname '%s' does not exist" % qry_sheet_namei)
return False
break
return True
def is_info_ok(xls_fh):
"""
This checks the sanity of info sheet in the configuration excel file.
For example if the files exists or not.
:param cfg_xls: path to configuration excel file
"""
info=pd.read_excel(xls_fh,'info')
info_path_vars=[varn for varn in info['varname'] if ("_fh" in varn) or ("_dh" in varn)]
info=info.set_index("varname")
info_paths=[info.loc[info_path_var,"input"] for info_path_var in info_path_vars]
for info_path in info_paths:
if not pd.isnull(info_path):
if not exists(info_path):
return False #(info_path_vars[info_paths.index(info_path)],info_path)
break
return True
def xls2h5(cfg_xls,cfg_h5,cfg_xls_sheetnames_required) :
"""
Converts configuration excel file to HDF5(h5) file.
Here sheets in excel files are converted to groups in HDF5 file.
:param cfg_xls: path to configuration excel file
"""
for qry_sheet_namei in cfg_xls_sheetnames_required:
qry_sheet_df=cfg_xls.parse(qry_sheet_namei)
qry_sheet_df=qry_sheet_df.astype(str) # suppress unicode error
qry_sheet_df.columns=[col.replace(" ","_") for col in qry_sheet_df.columns]
cfg_h5.put("cfg/"+qry_sheet_namei,convert2h5form(qry_sheet_df), format='table', data_columns=True)
return cfg_h5
def xls2csvs(cfg_xls,cfg_xls_sheetnames_required,output_dh):
"""
Converts configuration excel file to HDF5(h5) file.
Here sheets in excel files are converted to groups in HDF5 file.
:param cfg_xls: path to configuration excel file
"""
for qry_sheet_namei in cfg_xls_sheetnames_required:
qry_sheet_df=cfg_xls.parse(qry_sheet_namei)
qry_sheet_df=qry_sheet_df.astype(str) # suppress unicode error
qry_sheet_df.to_csv("%s/%s" % (output_dh,qry_sheet_namei))
# print "%s/%s" % (output_dh,qry_sheet_namei)
def convert2h5form(df):
"""
Convert dataframe compatible to Hdf5 format
:param df: pandas dataframe
"""
from dms2dfe.lib.io_strs import convertstr2format
df.columns=[convertstr2format(col,"^[a-zA-Z0-9_]*$") for col in df.columns.tolist()]
return df
def csvs2h5(dh,sub_dh_list,fn_list,output_dh,cfg_h5):
"""
This converts the csv files to tables in HDF5.
:param dh: path to the directory with csv files
:param fn_list: list of filenames of the csv files
"""
for fn in fn_list:
for sub_dh in sub_dh_list : # get aas or cds
fh=output_dh+"/"+dh+"/"+sub_dh+"/"+fn+""
df=pd.read_csv(fh) # get mat to df
df=df.loc[:,[col.replace(" ","_") for col in list(df.columns) if not (('index' in col) or ('Unnamed' in col)) ]]
exec("cfg_h5.put('%s/%s/%s',df, format='table', data_columns=True)" % (dh,sub_dh,str(fn)),locals(), globals()) # store the otpts in h5 eg. cds/N/lbl
# print("cfg_h5.put('%s/%s/%s',df.convert_objects(), format='table', data_columns=True)" % (dh,sub_dh,str(fn))) # store the otpts in h5 eg. cds/N/lbl
def csvs2h5(dh,sub_dh_list,fn_list):
"""
This converts csvs into HDF5 tables.
:param dh: path to the directory with csv files
:param fn_list: list of filenames of the csv files
"""
for fn in fn_list:
for sub_dh in sub_dh_list : # get aas or cds
fh=output_dh+"/"+dh+"/"+sub_dh+"/"+fn+""
key=dh+"/"+sub_dh+"/"+fn
if (exists(fh)) and (key in cfg_h5):
df=pd.read_csv(fh) # get mat to df
key=key+"2"
cfg_h5.put(key,df.convert_objects(), format='table', data_columns=True) # store the otpts in h5 eg. cds/N/lbl
#mut_lbl_fit_comparison
def getusable_lbls_list(prj_dh):
"""
This detects the samples that can be processed.
:param prj_dh: path to project directory.
:returns lbls_list: list of names of samples that can be processed.
"""
lbls=pd.read_csv(prj_dh+'/cfg/lbls')
lbls=lbls.set_index('varname')
lbls_list=[]
#data_lbl cols: NiA mutids NiS NiN NiNcut NiNcutlog NiScut NiScutlog NiAcut NiAcutlog
for lbli,lbl in lbls.iterrows() :
# print "%s/data_lbl/%s/%s" % (prj_dh,'aas',str(lbli))
if (not exists("%s/data_lbl/%s/%s" % (prj_dh,'aas',str(lbli)))):
fh_1=expanduser(str(lbl['fhs_1']))
lbl_mat_mut_cds_fh=[fh for fh in glob(fh_1+"*") if '.mat_mut_cds' in fh]
if len(lbl_mat_mut_cds_fh)!=0:
lbl_mat_mut_cds_fh=lbl_mat_mut_cds_fh[0]
lbls_list.append([lbli,lbl_mat_mut_cds_fh])
else :
fh_1="%s/data_mutmat/%s" % (prj_dh,basename(fh_1))
# print fh_1
lbl_mat_mut_cds_fh=[fh for fh in glob(fh_1+"*") if '.mat_mut_cds' in fh]
if len(lbl_mat_mut_cds_fh)!=0:
lbl_mat_mut_cds_fh=lbl_mat_mut_cds_fh[0]
lbls_list.append([lbli,lbl_mat_mut_cds_fh])
else:
logging.warning("can not find: %s" % fh_1)
# else:
# logging.info("already processed: %s" % (str(lbli)))
return lbls_list
def getusable_fits_list(prj_dh,data_fit_dh='data_fit'):
"""
This gets the list of samples that can be processed for fitness estimations.
:param prj_dh: path to project directory.
:returns fits_pairs_list: list of tuples with names of input and selected samples.
"""
if exists('%s/cfg/fit'% (prj_dh)):
fits=pd.read_csv(prj_dh+'/cfg/fit')
if "Unnamed: 0" in fits.columns:
fits=fits.drop("Unnamed: 0", axis=1)
fits_pairs_list=[]
sel_cols=[col for col in fits.columns.tolist() if "sel_" in col]
for pairi in fits.index.values :
unsel_lbl=fits.loc[pairi,"unsel"]
sels=list(fits.loc[pairi,sel_cols])
# print sels
for sel_lbl in sels :
if not pd.isnull(sel_lbl):
fit_lbl=sel_lbl+"_WRT_"+unsel_lbl
if (not exists("%s/%s/%s/%s" % (prj_dh,data_fit_dh,'aas',fit_lbl))):
fits_pairs_list.append([unsel_lbl,sel_lbl])
else :
logging.info("already processed: %s" % (fit_lbl))
return fits_pairs_list
else:
logging.warning("ana3_mutmat2fit : getusable_fits_list : not fits in cfg/fit")
return []
def getusable_comparison_list(prj_dh):
"""
This converts the table of tests and controls in configuration file into tuples of test and control.
:param prj_dh: path to project directory.
"""
comparisons=pd.read_csv(prj_dh+'/cfg/comparison')
comparisons=comparisons.set_index('ctrl')
comparison_list=[]
for ctrl,row in comparisons.iterrows() :
row=row[~row.isnull()]
for test in row[0:] :
comparison_list.append([ctrl,test])
return comparison_list
def to_pkl(data,fh):
"""
Saves a dict in pkl format
:param data: dict, containing data
:param fh: path to the output pkl file
"""
if not fh is None:
with open(fh, 'wb') as f:
pickle.dump(data, f, -1)
def read_pkl(fh):
"""
Reads a file in pkl format
:param fh: path to the pkl file
:returns data: dict, containing data
"""
with open(fh,'rb') as f:
return pickle.load(f)
| gpl-3.0 |
citrix-openstack-build/neutron-vpnaas | tools/install_venv.py | 102 | 2304 | #!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Installation script for Neutron's development virtualenv
"""
from __future__ import print_function
import os
import sys
import install_venv_common as install_venv
def print_help():
help = """
Neutron development environment setup is complete.
Neutron development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Neutron virtualenv for the extent of your current shell
session you can run:
$ source .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print(help)
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
venv = os.path.join(root, '.venv')
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Neutron'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
print_help()
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
jsjohnst/tornado | tornado/httpserver.py | 96 | 11915 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A non-blocking, single-threaded HTTP server.
Typical applications have little direct interaction with the `HTTPServer`
class except to start a server at the beginning of the process
(and even that is often done indirectly via `tornado.web.Application.listen`).
.. versionchanged:: 4.0
The ``HTTPRequest`` class that used to live in this module has been moved
to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias.
"""
from __future__ import absolute_import, division, print_function, with_statement
import socket
from tornado.escape import native_str
from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters
from tornado import gen
from tornado import httputil
from tornado import iostream
from tornado import netutil
from tornado.tcpserver import TCPServer
from tornado.util import Configurable
class HTTPServer(TCPServer, Configurable,
httputil.HTTPServerConnectionDelegate):
r"""A non-blocking, single-threaded HTTP server.
A server is defined by a subclass of `.HTTPServerConnectionDelegate`,
or, for backwards compatibility, a callback that takes an
`.HTTPServerRequest` as an argument. The delegate is usually a
`tornado.web.Application`.
`HTTPServer` supports keep-alive connections by default
(automatically for HTTP/1.1, or for HTTP/1.0 when the client
requests ``Connection: keep-alive``).
If ``xheaders`` is ``True``, we support the
``X-Real-Ip``/``X-Forwarded-For`` and
``X-Scheme``/``X-Forwarded-Proto`` headers, which override the
remote IP and URI scheme/protocol for all requests. These headers
are useful when running Tornado behind a reverse proxy or load
balancer. The ``protocol`` argument can also be set to ``https``
if Tornado is run behind an SSL-decoding proxy that does not set one of
the supported ``xheaders``.
To make this server serve SSL traffic, send the ``ssl_options`` keyword
argument with an `ssl.SSLContext` object. For compatibility with older
versions of Python ``ssl_options`` may also be a dictionary of keyword
arguments for the `ssl.wrap_socket` method.::
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"),
os.path.join(data_dir, "mydomain.key"))
HTTPServer(applicaton, ssl_options=ssl_ctx)
`HTTPServer` initialization follows one of three patterns (the
initialization methods are defined on `tornado.tcpserver.TCPServer`):
1. `~tornado.tcpserver.TCPServer.listen`: simple single-process::
server = HTTPServer(app)
server.listen(8888)
IOLoop.current().start()
In many cases, `tornado.web.Application.listen` can be used to avoid
the need to explicitly create the `HTTPServer`.
2. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`:
simple multi-process::
server = HTTPServer(app)
server.bind(8888)
server.start(0) # Forks multiple sub-processes
IOLoop.current().start()
When using this interface, an `.IOLoop` must *not* be passed
to the `HTTPServer` constructor. `~.TCPServer.start` will always start
the server on the default singleton `.IOLoop`.
3. `~tornado.tcpserver.TCPServer.add_sockets`: advanced multi-process::
sockets = tornado.netutil.bind_sockets(8888)
tornado.process.fork_processes(0)
server = HTTPServer(app)
server.add_sockets(sockets)
IOLoop.current().start()
The `~.TCPServer.add_sockets` interface is more complicated,
but it can be used with `tornado.process.fork_processes` to
give you more flexibility in when the fork happens.
`~.TCPServer.add_sockets` can also be used in single-process
servers if you want to create your listening sockets in some
way other than `tornado.netutil.bind_sockets`.
.. versionchanged:: 4.0
Added ``decompress_request``, ``chunk_size``, ``max_header_size``,
``idle_connection_timeout``, ``body_timeout``, ``max_body_size``
arguments. Added support for `.HTTPServerConnectionDelegate`
instances as ``request_callback``.
.. versionchanged:: 4.1
`.HTTPServerConnectionDelegate.start_request` is now called with
two arguments ``(server_conn, request_conn)`` (in accordance with the
documentation) instead of one ``(request_conn)``.
.. versionchanged:: 4.2
`HTTPServer` is now a subclass of `tornado.util.Configurable`.
"""
def __init__(self, *args, **kwargs):
# Ignore args to __init__; real initialization belongs in
# initialize since we're Configurable. (there's something
# weird in initialization order between this class,
# Configurable, and TCPServer so we can't leave __init__ out
# completely)
pass
def initialize(self, request_callback, no_keep_alive=False, io_loop=None,
xheaders=False, ssl_options=None, protocol=None,
decompress_request=False,
chunk_size=None, max_header_size=None,
idle_connection_timeout=None, body_timeout=None,
max_body_size=None, max_buffer_size=None):
self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.xheaders = xheaders
self.protocol = protocol
self.conn_params = HTTP1ConnectionParameters(
decompress=decompress_request,
chunk_size=chunk_size,
max_header_size=max_header_size,
header_timeout=idle_connection_timeout or 3600,
max_body_size=max_body_size,
body_timeout=body_timeout)
TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options,
max_buffer_size=max_buffer_size,
read_chunk_size=chunk_size)
self._connections = set()
@classmethod
def configurable_base(cls):
return HTTPServer
@classmethod
def configurable_default(cls):
return HTTPServer
@gen.coroutine
def close_all_connections(self):
while self._connections:
# Peek at an arbitrary element of the set
conn = next(iter(self._connections))
yield conn.close()
def handle_stream(self, stream, address):
context = _HTTPRequestContext(stream, address,
self.protocol)
conn = HTTP1ServerConnection(
stream, self.conn_params, context)
self._connections.add(conn)
conn.start_serving(self)
def start_request(self, server_conn, request_conn):
return _ServerRequestAdapter(self, server_conn, request_conn)
def on_close(self, server_conn):
self._connections.remove(server_conn)
class _HTTPRequestContext(object):
def __init__(self, stream, address, protocol):
self.address = address
# Save the socket's address family now so we know how to
# interpret self.address even after the stream is closed
# and its socket attribute replaced with None.
if stream.socket is not None:
self.address_family = stream.socket.family
else:
self.address_family = None
# In HTTPServerRequest we want an IP, not a full socket address.
if (self.address_family in (socket.AF_INET, socket.AF_INET6) and
address is not None):
self.remote_ip = address[0]
else:
# Unix (or other) socket; fake the remote address.
self.remote_ip = '0.0.0.0'
if protocol:
self.protocol = protocol
elif isinstance(stream, iostream.SSLIOStream):
self.protocol = "https"
else:
self.protocol = "http"
self._orig_remote_ip = self.remote_ip
self._orig_protocol = self.protocol
def __str__(self):
if self.address_family in (socket.AF_INET, socket.AF_INET6):
return self.remote_ip
elif isinstance(self.address, bytes):
# Python 3 with the -bb option warns about str(bytes),
# so convert it explicitly.
# Unix socket addresses are str on mac but bytes on linux.
return native_str(self.address)
else:
return str(self.address)
def _apply_xheaders(self, headers):
"""Rewrite the ``remote_ip`` and ``protocol`` fields."""
# Squid uses X-Forwarded-For, others use X-Real-Ip
ip = headers.get("X-Forwarded-For", self.remote_ip)
ip = ip.split(',')[-1].strip()
ip = headers.get("X-Real-Ip", ip)
if netutil.is_valid_ip(ip):
self.remote_ip = ip
# AWS uses X-Forwarded-Proto
proto_header = headers.get(
"X-Scheme", headers.get("X-Forwarded-Proto",
self.protocol))
if proto_header in ("http", "https"):
self.protocol = proto_header
def _unapply_xheaders(self):
"""Undo changes from `_apply_xheaders`.
Xheaders are per-request so they should not leak to the next
request on the same connection.
"""
self.remote_ip = self._orig_remote_ip
self.protocol = self._orig_protocol
class _ServerRequestAdapter(httputil.HTTPMessageDelegate):
"""Adapts the `HTTPMessageDelegate` interface to the interface expected
by our clients.
"""
def __init__(self, server, server_conn, request_conn):
self.server = server
self.connection = request_conn
self.request = None
if isinstance(server.request_callback,
httputil.HTTPServerConnectionDelegate):
self.delegate = server.request_callback.start_request(
server_conn, request_conn)
self._chunks = None
else:
self.delegate = None
self._chunks = []
def headers_received(self, start_line, headers):
if self.server.xheaders:
self.connection.context._apply_xheaders(headers)
if self.delegate is None:
self.request = httputil.HTTPServerRequest(
connection=self.connection, start_line=start_line,
headers=headers)
else:
return self.delegate.headers_received(start_line, headers)
def data_received(self, chunk):
if self.delegate is None:
self._chunks.append(chunk)
else:
return self.delegate.data_received(chunk)
def finish(self):
if self.delegate is None:
self.request.body = b''.join(self._chunks)
self.request._parse_body()
self.server.request_callback(self.request)
else:
self.delegate.finish()
self._cleanup()
def on_connection_close(self):
if self.delegate is None:
self._chunks = None
else:
self.delegate.on_connection_close()
self._cleanup()
def _cleanup(self):
if self.server.xheaders:
self.connection.context._unapply_xheaders()
HTTPRequest = httputil.HTTPServerRequest
| apache-2.0 |
helifu/kudu | python/kudu/tests/test_scanner.py | 2 | 14089 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import division
from kudu.compat import unittest
from kudu.tests.util import TestScanBase
from kudu.tests.common import KuduTestBase, TimeoutError
import kudu
import datetime
import time
import pytest
class TestScanner(TestScanBase):
@classmethod
def setUpClass(self):
super(TestScanner, self).setUpClass()
def setUp(self):
pass
def test_scan_rows_basic(self):
# Let's scan with no predicates
scanner = self.table.scanner().open()
tuples = scanner.read_all_tuples()
self.assertEqual(sorted(tuples), self.tuples)
def test_scan_rows_simple_predicate(self):
key = self.table['key']
preds = [key > 19, key < 50]
def _read_predicates(preds):
scanner = self.table.scanner()
scanner.add_predicates(preds)
scanner.open()
return scanner.read_all_tuples()
tuples = _read_predicates(preds)
self.assertEqual(sorted(tuples), self.tuples[20:50])
# verify predicates reusable
tuples = _read_predicates(preds)
self.assertEqual(sorted(tuples), self.tuples[20:50])
def test_scan_limit(self):
# Set limits both below and above the max number of rows.
limits = [self.nrows - 1, self.nrows, self.nrows + 1]
for limit in limits:
scanner = self.table.scanner()
scanner.set_limit(limit)
tuples = scanner.read_all_tuples()
self.assertEqual(len(tuples), min(limit, self.nrows))
def test_scan_rows_string_predicate_and_projection(self):
scanner = self.table.scanner()
scanner.set_projected_column_names(['key', 'string_val'])
sv = self.table['string_val']
scanner.add_predicates([sv >= 'hello_20',
sv <= 'hello_22'])
scanner.set_fault_tolerant()
scanner.open()
tuples = scanner.read_all_tuples()
self.assertEqual(sorted(tuples), [(20, 'hello_20'), (22, 'hello_22')])
def test_scan_rows_in_list_predicate(self):
"""
Test scanner with an InList predicate and
a string comparison predicate
"""
key_list = [2, 98]
scanner = self.table.scanner()
scanner.set_fault_tolerant()\
.add_predicates([
self.table[0].in_list(key_list),
self.table['string_val'] >= 'hello_9'
])
scanner.open()
tuples = scanner.read_all_tuples()
self.assertEqual(tuples, [self.tuples[98]])
def test_scan_rows_is_not_null_predicate(self):
"""
Test scanner with an IsNotNull predicate on string_val column
"""
pred = self.table['string_val'].is_not_null()
scanner = self.table.scanner()
scanner.add_predicate(pred)
scanner.open()
tuples = scanner.read_all_tuples()
rows = [i for i in range(100) if i % 2 == 0]
self.assertEqual(sorted(tuples), [self.tuples[i] for i in rows])
def test_scan_rows_is_null_predicate(self):
"""
Test scanner with an IsNull predicate on string_val column
"""
pred = self.table['string_val'].is_null()
scanner = self.table.scanner()
scanner.add_predicate(pred)
scanner.open()
tuples = scanner.read_all_tuples()
rows = [i for i in range(100) if i % 2 != 0]
self.assertEqual(sorted(tuples), [self.tuples[i] for i in rows])
def test_index_projection_with_schema(self):
scanner = self.table.scanner()
scanner.set_projected_column_indexes([0, 1])
scanner.set_fault_tolerant()
scanner.open()
tuples = scanner.read_all_tuples()
# Build schema to check against
builder = kudu.schema_builder()
builder.add_column('key', kudu.int32, nullable=False)
builder.add_column('int_val', kudu.int32)
builder.set_primary_keys(['key'])
expected_schema = builder.build()
# Build new schema from projection schema
builder = kudu.schema_builder()
for col in scanner.get_projection_schema():
builder.copy_column(col)
builder.set_primary_keys(['key'])
new_schema = builder.build()
self.assertEqual(tuples, [t[0:2] for t in self.tuples])
self.assertTrue(expected_schema.equals(new_schema))
def test_scan_with_bounds(self):
scanner = self.table.scanner()
scanner.set_fault_tolerant()\
.add_lower_bound({'key': 50})\
.add_exclusive_upper_bound({'key': 55})
scanner.open()
tuples = scanner.read_all_tuples()
self.assertEqual(sorted(tuples), self.tuples[50:55])
def test_scan_invalid_predicates(self):
scanner = self.table.scanner()
sv = self.table['string_val']
with self.assertRaises(TypeError):
scanner.add_predicates([sv >= None])
with self.assertRaises(TypeError):
scanner.add_predicates([sv >= 1])
with self.assertRaises(TypeError):
scanner.add_predicates([sv.in_list(['testing',
datetime.datetime.utcnow()])])
with self.assertRaises(TypeError):
scanner.add_predicates([sv.in_list([
'hello_20',
120
])])
def test_scan_batch_by_batch(self):
scanner = self.table.scanner()
scanner.set_fault_tolerant()
lower_bound = scanner.new_bound()
lower_bound['key'] = 10
scanner.add_lower_bound(lower_bound)
upper_bound = scanner.new_bound()
upper_bound['key'] = 90
scanner.add_exclusive_upper_bound(upper_bound)
scanner.open()
tuples = []
while scanner.has_more_rows():
batch = scanner.next_batch()
tuples.extend(batch.as_tuples())
self.assertEqual(sorted(tuples), self.tuples[10:90])
def test_unixtime_micros(self):
"""
Test setting and getting unixtime_micros fields
"""
# Insert new rows
self.insert_new_unixtime_micros_rows()
# Validate results
scanner = self.table.scanner()
scanner.set_fault_tolerant().open()
self.assertEqual(sorted(self.tuples), scanner.read_all_tuples())
def test_read_mode(self):
"""
Test scanning in latest, snapshot and read_your_writes read modes.
"""
# Delete row
self.delete_insert_row_for_read_test()
# Check scanner results prior to delete
scanner = self.table.scanner()
scanner.set_read_mode('snapshot')\
.set_snapshot(self.snapshot_timestamp)\
.open()
self.assertEqual(sorted(self.tuples[1:]), sorted(scanner.read_all_tuples()))
# Check scanner results after delete with latest mode
timeout = time.time() + 10
check_tuples = []
while check_tuples != sorted(self.tuples):
if time.time() > timeout:
raise TimeoutError("Could not validate results in allocated" +
"time.")
scanner = self.table.scanner()
scanner.set_read_mode(kudu.READ_LATEST)\
.open()
check_tuples = sorted(scanner.read_all_tuples())
# Avoid tight looping
time.sleep(0.05)
# Check scanner results after delete with read_your_writes mode
scanner = self.table.scanner()
scanner.set_read_mode('read_your_writes')\
.open()
self.assertEqual(sorted(self.tuples), sorted(scanner.read_all_tuples()))
def test_resource_metrics_and_cache_blocks(self):
"""
Test getting the resource metrics after scanning and
setting the scanner to not cache blocks.
"""
# Build scanner and read through all batches and retrieve metrics.
scanner = self.table.scanner()
scanner.set_fault_tolerant().set_cache_blocks(False).open()
scanner.read_all_tuples()
metrics = scanner.get_resource_metrics()
# Confirm that the scanner returned cache hit and miss values.
self.assertTrue('cfile_cache_hit_bytes' in metrics)
self.assertTrue('cfile_cache_miss_bytes' in metrics)
def verify_pred_type_scans(self, preds, row_indexes, count_only=False):
# Using the incoming list of predicates, verify that the row returned
# matches the inserted tuple at the row indexes specified in a
# slice object
scanner = self.type_table.scanner()
scanner.set_fault_tolerant()
scanner.add_predicates(preds)
scanner.set_projected_column_names(self.projected_names_w_o_float)
tuples = scanner.open().read_all_tuples()
# verify rows
if count_only:
self.assertEqual(len(self.type_test_rows[row_indexes]), len(tuples))
else:
self.assertEqual(sorted(self.type_test_rows[row_indexes]), tuples)
def test_unixtime_micros_pred(self):
# Test unixtime_micros value predicate
self._test_unixtime_micros_pred()
def test_bool_pred(self):
# Test a boolean value predicate
self._test_bool_pred()
def test_double_pred(self):
# Test a double precision float predicate
self._test_double_pred()
def test_float_pred(self):
# Test a single precision float predicate
# Does a row check count only
self._test_float_pred()
def test_decimal_pred(self):
if kudu.CLIENT_SUPPORTS_DECIMAL:
# Test a decimal predicate
self._test_decimal_pred()
def test_binary_pred(self):
# Test a binary predicate
self._test_binary_pred()
def test_scan_selection(self):
"""
This test confirms that setting the scan selection policy on the
scanner does not cause any errors. There is no way to confirm
that the policy was actually set. This functionality is
tested in the C++ test:
ClientTest.TestReplicatedMultiTabletTableFailover.
"""
for policy in ['leader', kudu.CLOSEST_REPLICA, 2]:
scanner = self.table.scanner()
scanner.set_selection(policy)
scanner.open()
self.assertEqual(sorted(scanner.read_all_tuples()),
sorted(self.tuples))
@pytest.mark.skipif(not (kudu.CLIENT_SUPPORTS_PANDAS),
reason="Pandas required to run this test.")
def test_scanner_to_pandas_types(self):
"""
This test confirms that data types are converted as expected to Pandas.
"""
import numpy as np
scanner = self.type_table.scanner()
df = scanner.to_pandas()
types = df.dtypes
if kudu.CLIENT_SUPPORTS_DECIMAL:
self.assertEqual(types[0], np.int64)
self.assertEqual(types[1], 'datetime64[ns, UTC]')
self.assertEqual(types[2], np.object)
self.assertEqual(types[3], np.object)
self.assertEqual(types[4], np.bool)
self.assertEqual(types[5], np.float64)
self.assertEqual(types[6], np.int8)
self.assertEqual(types[7], np.object)
self.assertEqual(types[8], np.float32)
else:
self.assertEqual(types[0], np.int64)
self.assertEqual(types[1], 'datetime64[ns, UTC]')
self.assertEqual(types[2], np.object)
self.assertEqual(types[3], np.bool)
self.assertEqual(types[4], np.float64)
self.assertEqual(types[5], np.int8)
self.assertEqual(types[6], np.object)
self.assertEqual(types[7], np.float32)
@pytest.mark.skipif(not (kudu.CLIENT_SUPPORTS_PANDAS),
reason="Pandas required to run this test.")
def test_scanner_to_pandas_row_count(self):
"""
This test confirms that the record counts match between Pandas and the scanner.
"""
scanner = self.type_table.scanner()
scanner_count = len(scanner.read_all_tuples())
scanner = self.type_table.scanner()
df = scanner.to_pandas()
self.assertEqual(scanner_count, df.shape[0])
@pytest.mark.skipif(not (kudu.CLIENT_SUPPORTS_PANDAS),
reason="Pandas required to run this test.")
def test_scanner_to_pandas_index(self):
"""
This test confirms that an index is correctly applied.
"""
scanner = self.type_table.scanner()
df = scanner.to_pandas(index='key')
self.assertEqual(df.index.name, 'key')
self.assertEqual(list(df.index), [1, 2])
@pytest.mark.skipif((not(kudu.CLIENT_SUPPORTS_PANDAS) or
(not(kudu.CLIENT_SUPPORTS_DECIMAL))),
reason="Pandas and Decimal support required to run this test.")
def test_scanner_to_pandas_index(self):
"""
This test confirms that a decimal column is coerced to a double when specified.
"""
import numpy as np
scanner = self.type_table.scanner()
df = scanner.to_pandas(coerce_float=True)
types = df.dtypes
self.assertEqual(types[2], np.float64)
| apache-2.0 |
nikolhm/Pokus | knownpaths.py | 1 | 9583 | import ctypes, sys
from ctypes import windll, wintypes
from uuid import UUID
class GUID(ctypes.Structure): # [1]
_fields_ = [
("Data1", wintypes.DWORD),
("Data2", wintypes.WORD),
("Data3", wintypes.WORD),
("Data4", wintypes.BYTE * 8)
]
def __init__(self, uuid_):
ctypes.Structure.__init__(self)
self.Data1, self.Data2, self.Data3, self.Data4[0], self.Data4[1], rest = uuid_.fields
for i in range(2, 8):
self.Data4[i] = rest>>(8 - i - 1)*8 & 0xff
class FOLDERID: # [2]
AccountPictures = UUID('{008ca0b1-55b4-4c56-b8a8-4de4b299d3be}')
AdminTools = UUID('{724EF170-A42D-4FEF-9F26-B60E846FBA4F}')
ApplicationShortcuts = UUID('{A3918781-E5F2-4890-B3D9-A7E54332328C}')
CameraRoll = UUID('{AB5FB87B-7CE2-4F83-915D-550846C9537B}')
CDBurning = UUID('{9E52AB10-F80D-49DF-ACB8-4330F5687855}')
CommonAdminTools = UUID('{D0384E7D-BAC3-4797-8F14-CBA229B392B5}')
CommonOEMLinks = UUID('{C1BAE2D0-10DF-4334-BEDD-7AA20B227A9D}')
CommonPrograms = UUID('{0139D44E-6AFE-49F2-8690-3DAFCAE6FFB8}')
CommonStartMenu = UUID('{A4115719-D62E-491D-AA7C-E74B8BE3B067}')
CommonStartup = UUID('{82A5EA35-D9CD-47C5-9629-E15D2F714E6E}')
CommonTemplates = UUID('{B94237E7-57AC-4347-9151-B08C6C32D1F7}')
Contacts = UUID('{56784854-C6CB-462b-8169-88E350ACB882}')
Cookies = UUID('{2B0F765D-C0E9-4171-908E-08A611B84FF6}')
Desktop = UUID('{B4BFCC3A-DB2C-424C-B029-7FE99A87C641}')
DeviceMetadataStore = UUID('{5CE4A5E9-E4EB-479D-B89F-130C02886155}')
Documents = UUID('{FDD39AD0-238F-46AF-ADB4-6C85480369C7}')
DocumentsLibrary = UUID('{7B0DB17D-9CD2-4A93-9733-46CC89022E7C}')
Downloads = UUID('{374DE290-123F-4565-9164-39C4925E467B}')
Favorites = UUID('{1777F761-68AD-4D8A-87BD-30B759FA33DD}')
Fonts = UUID('{FD228CB7-AE11-4AE3-864C-16F3910AB8FE}')
GameTasks = UUID('{054FAE61-4DD8-4787-80B6-090220C4B700}')
History = UUID('{D9DC8A3B-B784-432E-A781-5A1130A75963}')
ImplicitAppShortcuts = UUID('{BCB5256F-79F6-4CEE-B725-DC34E402FD46}')
InternetCache = UUID('{352481E8-33BE-4251-BA85-6007CAEDCF9D}')
Libraries = UUID('{1B3EA5DC-B587-4786-B4EF-BD1DC332AEAE}')
Links = UUID('{bfb9d5e0-c6a9-404c-b2b2-ae6db6af4968}')
LocalAppData = UUID('{F1B32785-6FBA-4FCF-9D55-7B8E7F157091}')
LocalAppDataLow = UUID('{A520A1A4-1780-4FF6-BD18-167343C5AF16}')
LocalizedResourcesDir = UUID('{2A00375E-224C-49DE-B8D1-440DF7EF3DDC}')
Music = UUID('{4BD8D571-6D19-48D3-BE97-422220080E43}')
MusicLibrary = UUID('{2112AB0A-C86A-4FFE-A368-0DE96E47012E}')
NetHood = UUID('{C5ABBF53-E17F-4121-8900-86626FC2C973}')
OriginalImages = UUID('{2C36C0AA-5812-4b87-BFD0-4CD0DFB19B39}')
PhotoAlbums = UUID('{69D2CF90-FC33-4FB7-9A0C-EBB0F0FCB43C}')
PicturesLibrary = UUID('{A990AE9F-A03B-4E80-94BC-9912D7504104}')
Pictures = UUID('{33E28130-4E1E-4676-835A-98395C3BC3BB}')
Playlists = UUID('{DE92C1C7-837F-4F69-A3BB-86E631204A23}')
PrintHood = UUID('{9274BD8D-CFD1-41C3-B35E-B13F55A758F4}')
Profile = UUID('{5E6C858F-0E22-4760-9AFE-EA3317B67173}')
ProgramData = UUID('{62AB5D82-FDC1-4DC3-A9DD-070D1D495D97}')
ProgramFiles = UUID('{905e63b6-c1bf-494e-b29c-65b732d3d21a}')
ProgramFilesX64 = UUID('{6D809377-6AF0-444b-8957-A3773F02200E}')
ProgramFilesX86 = UUID('{7C5A40EF-A0FB-4BFC-874A-C0F2E0B9FA8E}')
ProgramFilesCommon = UUID('{F7F1ED05-9F6D-47A2-AAAE-29D317C6F066}')
ProgramFilesCommonX64 = UUID('{6365D5A7-0F0D-45E5-87F6-0DA56B6A4F7D}')
ProgramFilesCommonX86 = UUID('{DE974D24-D9C6-4D3E-BF91-F4455120B917}')
Programs = UUID('{A77F5D77-2E2B-44C3-A6A2-ABA601054A51}')
Public = UUID('{DFDF76A2-C82A-4D63-906A-5644AC457385}')
PublicDesktop = UUID('{C4AA340D-F20F-4863-AFEF-F87EF2E6BA25}')
PublicDocuments = UUID('{ED4824AF-DCE4-45A8-81E2-FC7965083634}')
PublicDownloads = UUID('{3D644C9B-1FB8-4f30-9B45-F670235F79C0}')
PublicGameTasks = UUID('{DEBF2536-E1A8-4c59-B6A2-414586476AEA}')
PublicLibraries = UUID('{48DAF80B-E6CF-4F4E-B800-0E69D84EE384}')
PublicMusic = UUID('{3214FAB5-9757-4298-BB61-92A9DEAA44FF}')
PublicPictures = UUID('{B6EBFB86-6907-413C-9AF7-4FC2ABF07CC5}')
PublicRingtones = UUID('{E555AB60-153B-4D17-9F04-A5FE99FC15EC}')
PublicUserTiles = UUID('{0482af6c-08f1-4c34-8c90-e17ec98b1e17}')
PublicVideos = UUID('{2400183A-6185-49FB-A2D8-4A392A602BA3}')
QuickLaunch = UUID('{52a4f021-7b75-48a9-9f6b-4b87a210bc8f}')
Recent = UUID('{AE50C081-EBD2-438A-8655-8A092E34987A}')
RecordedTVLibrary = UUID('{1A6FDBA2-F42D-4358-A798-B74D745926C5}')
ResourceDir = UUID('{8AD10C31-2ADB-4296-A8F7-E4701232C972}')
Ringtones = UUID('{C870044B-F49E-4126-A9C3-B52A1FF411E8}')
RoamingAppData = UUID('{3EB685DB-65F9-4CF6-A03A-E3EF65729F3D}')
RoamedTileImages = UUID('{AAA8D5A5-F1D6-4259-BAA8-78E7EF60835E}')
RoamingTiles = UUID('{00BCFC5A-ED94-4e48-96A1-3F6217F21990}')
SampleMusic = UUID('{B250C668-F57D-4EE1-A63C-290EE7D1AA1F}')
SamplePictures = UUID('{C4900540-2379-4C75-844B-64E6FAF8716B}')
SamplePlaylists = UUID('{15CA69B3-30EE-49C1-ACE1-6B5EC372AFB5}')
SampleVideos = UUID('{859EAD94-2E85-48AD-A71A-0969CB56A6CD}')
SavedGames = UUID('{4C5C32FF-BB9D-43b0-B5B4-2D72E54EAAA4}')
SavedSearches = UUID('{7d1d3a04-debb-4115-95cf-2f29da2920da}')
Screenshots = UUID('{b7bede81-df94-4682-a7d8-57a52620b86f}')
SearchHistory = UUID('{0D4C3DB6-03A3-462F-A0E6-08924C41B5D4}')
SearchTemplates = UUID('{7E636BFE-DFA9-4D5E-B456-D7B39851D8A9}')
SendTo = UUID('{8983036C-27C0-404B-8F08-102D10DCFD74}')
SidebarDefaultParts = UUID('{7B396E54-9EC5-4300-BE0A-2482EBAE1A26}')
SidebarParts = UUID('{A75D362E-50FC-4fb7-AC2C-A8BEAA314493}')
SkyDrive = UUID('{A52BBA46-E9E1-435f-B3D9-28DAA648C0F6}')
SkyDriveCameraRoll = UUID('{767E6811-49CB-4273-87C2-20F355E1085B}')
SkyDriveDocuments = UUID('{24D89E24-2F19-4534-9DDE-6A6671FBB8FE}')
SkyDrivePictures = UUID('{339719B5-8C47-4894-94C2-D8F77ADD44A6}')
StartMenu = UUID('{625B53C3-AB48-4EC1-BA1F-A1EF4146FC19}')
Startup = UUID('{B97D20BB-F46A-4C97-BA10-5E3608430854}')
System = UUID('{1AC14E77-02E7-4E5D-B744-2EB1AE5198B7}')
SystemX86 = UUID('{D65231B0-B2F1-4857-A4CE-A8E7C6EA7D27}')
Templates = UUID('{A63293E8-664E-48DB-A079-DF759E0509F7}')
UserPinned = UUID('{9E3995AB-1F9C-4F13-B827-48B24B6C7174}')
UserProfiles = UUID('{0762D272-C50A-4BB0-A382-697DCD729B80}')
UserProgramFiles = UUID('{5CD7AEE2-2219-4A67-B85D-6C9CE15660CB}')
UserProgramFilesCommon = UUID('{BCBD3057-CA5C-4622-B42D-BC56DB0AE516}')
Videos = UUID('{18989B1D-99B5-455B-841C-AB7C74E4DDFC}')
VideosLibrary = UUID('{491E922F-5643-4AF4-A7EB-4E7A138D8174}')
Windows = UUID('{F38BF404-1D43-42F2-9305-67DE0B28FC23}')
class UserHandle: # [3]
current = wintypes.HANDLE(0)
common = wintypes.HANDLE(-1)
_CoTaskMemFree = windll.ole32.CoTaskMemFree # [4]
_CoTaskMemFree.restype= None
_CoTaskMemFree.argtypes = [ctypes.c_void_p]
_SHGetKnownFolderPath = windll.shell32.SHGetKnownFolderPath # [5] [3]
_SHGetKnownFolderPath.argtypes = [
ctypes.POINTER(GUID), wintypes.DWORD, wintypes.HANDLE, ctypes.POINTER(ctypes.c_wchar_p)
]
class PathNotFoundException(Exception): pass
def get_path(folderid, user_handle=UserHandle.common):
fid = GUID(folderid)
pPath = ctypes.c_wchar_p()
S_OK = 0
if _SHGetKnownFolderPath(ctypes.byref(fid), 0, user_handle, ctypes.byref(pPath)) != S_OK:
raise PathNotFoundException()
path = pPath.value
_CoTaskMemFree(pPath)
return path
if __name__ == '__main__':
if len(sys.argv) < 2 or sys.argv[1] in ['-?', '/?']:
print('python knownpaths.py FOLDERID {current|common}')
sys.exit(0)
try:
folderid = getattr(FOLDERID, sys.argv[1])
except AttributeError:
print('Unknown folder id "%s"' % sys.argv[1], file=sys.stderr)
sys.exit(1)
try:
if len(sys.argv) == 2:
print(get_path(folderid))
else:
print(get_path(folderid, getattr(UserHandle, sys.argv[2])))
except PathNotFoundException:
print('Folder not found "%s"' % ' '.join(sys.argv[1:]), file=sys.stderr)
sys.exit(1)
# [1] http://msdn.microsoft.com/en-us/library/windows/desktop/aa373931.aspx
# [2] http://msdn.microsoft.com/en-us/library/windows/desktop/dd378457.aspx
# [3] http://msdn.microsoft.com/en-us/library/windows/desktop/bb762188.aspx
# [4] http://msdn.microsoft.com/en-us/library/windows/desktop/ms680722.aspx
# [5] http://www.themacaque.com/?p=954
| mit |
Mustard-Systems-Ltd/pyzmq | perf/perf.py | 6 | 5316 | #!/usr/bin/env python
# coding: utf-8
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
#
# Some original test code Copyright (c) 2007-2010 iMatix Corporation,
# Used under LGPLv3
import argparse
import time
from multiprocessing import Process
import zmq
def parse_args(argv=None):
parser = argparse.ArgumentParser(description='Run a zmq performance test')
parser.add_argument('-p', '--poll', action='store_true',
help='use a zmq Poller instead of raw send/recv')
parser.add_argument('-c', '--copy', action='store_true',
help='copy messages instead of using zero-copy')
parser.add_argument('-s', '--size', type=int, default=10240,
help='size (in bytes) of the test message')
parser.add_argument('-n', '--count', type=int, default=10240,
help='number of test messages to send')
parser.add_argument('--url', dest='url', type=str, default='tcp://127.0.0.1:5555',
help='the zmq URL on which to run the test')
parser.add_argument(dest='test', type=str, default='lat', choices=['lat', 'thr'],
help='which test to run')
return parser.parse_args(argv)
def latency_echo(url, count, poll, copy):
"""echo messages on a REP socket
Should be started before `latency`
"""
ctx = zmq.Context()
s = ctx.socket(zmq.REP)
if poll:
p = zmq.Poller()
p.register(s)
s.bind(url)
block = zmq.NOBLOCK if poll else 0
for i in range(count):
if poll:
res = p.poll()
msg = s.recv(block, copy=copy)
if poll:
res = p.poll()
s.send(msg, block, copy=copy)
msg = s.recv()
assert msg == b'done'
s.close()
ctx.term()
def latency(url, count, size, poll, copy):
"""Perform a latency test"""
ctx = zmq.Context()
s = ctx.socket(zmq.REQ)
s.setsockopt(zmq.LINGER, -1)
s.connect(url)
if poll:
p = zmq.Poller()
p.register(s)
msg = b' ' * size
watch = zmq.Stopwatch()
block = zmq.NOBLOCK if poll else 0
time.sleep(1)
watch.start()
for i in range (0, count):
if poll:
res = p.poll()
assert(res[0][1] & zmq.POLLOUT)
s.send(msg, block, copy=copy)
if poll:
res = p.poll()
assert(res[0][1] & zmq.POLLIN)
msg = s.recv(block, copy=copy)
assert len(msg) == size
elapsed = watch.stop()
s.send(b'done')
latency = elapsed / (count * 2.)
print ("message size : %8i [B]" % (size, ))
print ("roundtrip count: %8i [msgs]" % (count, ))
print ("mean latency : %12.3f [µs]" % (latency, ))
print ("test time : %12.3f [s]" % (elapsed * 1e-6, ))
def pusher(url, count, size, copy, poll):
"""send a bunch of messages on a PUSH socket"""
ctx = zmq.Context()
s = ctx.socket(zmq.PUSH)
# Add your socket options here.
# For example ZMQ_RATE, ZMQ_RECOVERY_IVL and ZMQ_MCAST_LOOP for PGM.
if poll:
p = zmq.Poller()
p.register(s)
s.connect(url)
msg = zmq.Message(b' ' * size)
block = zmq.NOBLOCK if poll else 0
for i in range(count):
if poll:
res = p.poll()
assert(res[0][1] & zmq.POLLOUT)
s.send(msg, block, copy=copy)
s.close()
ctx.term()
def throughput(url, count, size, poll, copy):
"""recv a bunch of messages on a PULL socket
Should be started before `pusher`
"""
ctx = zmq.Context()
s = ctx.socket(zmq.PULL)
# Add your socket options here.
# For example ZMQ_RATE, ZMQ_RECOVERY_IVL and ZMQ_MCAST_LOOP for PGM.
if poll:
p = zmq.Poller()
p.register(s)
s.bind(url)
watch = zmq.Stopwatch()
block = zmq.NOBLOCK if poll else 0
# Wait for the other side to connect.
msg = s.recv()
assert len (msg) == size
watch.start()
for i in range (count-1):
if poll:
res = p.poll()
msg = s.recv(block, copy=copy)
elapsed = watch.stop()
if elapsed == 0:
elapsed = 1
throughput = (1e6 * float(count)) / float(elapsed)
megabits = float(throughput * size * 8) / 1e6
print ("message size : %8i [B]" % (size, ))
print ("message count : %8i [msgs]" % (count, ))
print ("mean throughput: %8.0f [msg/s]" % (throughput, ))
print ("mean throughput: %12.3f [Mb/s]" % (megabits, ))
print ("test time : %12.3f [s]" % (elapsed * 1e-6, ))
def main():
args = parse_args()
tic = time.time()
if args.test == 'lat':
bg = Process(target=latency_echo, args=(args.url, args.count, args.poll, args.copy))
bg.start()
latency(args.url, args.count, args.size, args.poll, args.copy)
elif args.test == 'thr':
bg = Process(target=throughput, args=(args.url, args.count, args.size, args.poll, args.copy))
bg.start()
pusher(args.url, args.count, args.size, args.poll, args.copy)
bg.join()
toc = time.time()
if (toc - tic) < 3:
print ("For best results, tests should take at least a few seconds.")
if __name__ == '__main__':
main()
| bsd-3-clause |
martinbuc/missionplanner | Lib/lib2to3/pgen2/parse.py | 68 | 8254 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser engine for the grammar tables generated by pgen.
The grammar table must be loaded first.
See Parser/parser.c in the Python distribution for additional info on
how this parsing engine works.
"""
# Local imports
from . import token
class ParseError(Exception):
"""Exception to signal the parser is stuck."""
def __init__(self, msg, type, value, context):
Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
(msg, type, value, context))
self.msg = msg
self.type = type
self.value = value
self.context = context
class Parser(object):
"""Parser engine.
The proper usage sequence is:
p = Parser(grammar, [converter]) # create instance
p.setup([start]) # prepare for parsing
<for each input token>:
if p.addtoken(...): # parse a token; may raise ParseError
break
root = p.rootnode # root of abstract syntax tree
A Parser instance may be reused by calling setup() repeatedly.
A Parser instance contains state pertaining to the current token
sequence, and should not be used concurrently by different threads
to parse separate token sequences.
See driver.py for how to get input tokens by tokenizing a file or
string.
Parsing is complete when addtoken() returns True; the root of the
abstract syntax tree can then be retrieved from the rootnode
instance variable. When a syntax error occurs, addtoken() raises
the ParseError exception. There is no error recovery; the parser
cannot be used after a syntax error was reported (but it can be
reinitialized by calling setup()).
"""
def __init__(self, grammar, convert=None):
"""Constructor.
The grammar argument is a grammar.Grammar instance; see the
grammar module for more information.
The parser is not ready yet for parsing; you must call the
setup() method to get it started.
The optional convert argument is a function mapping concrete
syntax tree nodes to abstract syntax tree nodes. If not
given, no conversion is done and the syntax tree produced is
the concrete syntax tree. If given, it must be a function of
two arguments, the first being the grammar (a grammar.Grammar
instance), and the second being the concrete syntax tree node
to be converted. The syntax tree is converted from the bottom
up.
A concrete syntax tree node is a (type, value, context, nodes)
tuple, where type is the node type (a token or symbol number),
value is None for symbols and a string for tokens, context is
None or an opaque value used for error reporting (typically a
(lineno, offset) pair), and nodes is a list of children for
symbols, and None for tokens.
An abstract syntax tree node may be anything; this is entirely
up to the converter function.
"""
self.grammar = grammar
self.convert = convert or (lambda grammar, node: node)
def setup(self, start=None):
"""Prepare for parsing.
This *must* be called before starting to parse.
The optional argument is an alternative start symbol; it
defaults to the grammar's start symbol.
You can use a Parser instance to parse any number of programs;
each time you call setup() the parser is reset to an initial
state determined by the (implicit or explicit) start symbol.
"""
if start is None:
start = self.grammar.start
# Each stack entry is a tuple: (dfa, state, node).
# A node is a tuple: (type, value, context, children),
# where children is a list of nodes or None, and context may be None.
newnode = (start, None, None, [])
stackentry = (self.grammar.dfas[start], 0, newnode)
self.stack = [stackentry]
self.rootnode = None
self.used_names = set() # Aliased to self.rootnode.used_names in pop()
def addtoken(self, type, value, context):
"""Add a token; return True iff this is the end of the program."""
# Map from token to label
ilabel = self.classify(type, value, context)
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
states, first = dfa
arcs = states[state]
# Look for a state with this label
for i, newstate in arcs:
t, v = self.grammar.labels[i]
if ilabel == i:
# Look it up in the list of labels
assert t < 256
# Shift a token; we're done with it
self.shift(type, value, newstate, context)
# Pop while we are in an accept-only state
state = newstate
while states[state] == [(0, state)]:
self.pop()
if not self.stack:
# Done parsing!
return True
dfa, state, node = self.stack[-1]
states, first = dfa
# Done with this token
return False
elif t >= 256:
# See if it's a symbol and if we're in its first set
itsdfa = self.grammar.dfas[t]
itsstates, itsfirst = itsdfa
if ilabel in itsfirst:
# Push a symbol
self.push(t, self.grammar.dfas[t], newstate, context)
break # To continue the outer while loop
else:
if (0, state) in arcs:
# An accepting state, pop it and try something else
self.pop()
if not self.stack:
# Done parsing, but another token is input
raise ParseError("too much input",
type, value, context)
else:
# No success finding a transition
raise ParseError("bad input", type, value, context)
def classify(self, type, value, context):
"""Turn a token into a label. (Internal)"""
if type == token.NAME:
# Keep a listing of all used names
self.used_names.add(value)
# Check for reserved words
ilabel = self.grammar.keywords.get(value)
if ilabel is not None:
return ilabel
ilabel = self.grammar.tokens.get(type)
if ilabel is None:
raise ParseError("bad token", type, value, context)
return ilabel
def shift(self, type, value, newstate, context):
"""Shift a token. (Internal)"""
dfa, state, node = self.stack[-1]
newnode = (type, value, context, None)
newnode = self.convert(self.grammar, newnode)
if newnode is not None:
node[-1].append(newnode)
self.stack[-1] = (dfa, newstate, node)
def push(self, type, newdfa, newstate, context):
"""Push a nonterminal. (Internal)"""
dfa, state, node = self.stack[-1]
newnode = (type, None, context, [])
self.stack[-1] = (dfa, newstate, node)
self.stack.append((newdfa, 0, newnode))
def pop(self):
"""Pop a nonterminal. (Internal)"""
popdfa, popstate, popnode = self.stack.pop()
newnode = self.convert(self.grammar, popnode)
if newnode is not None:
if self.stack:
dfa, state, node = self.stack[-1]
node[-1].append(newnode)
else:
self.rootnode = newnode
self.rootnode.used_names = self.used_names
| gpl-3.0 |
AndreaCrotti/offlineimap | docs/dev-doc-src/conf.py | 11 | 6621 | # -*- coding: utf-8 -*-
#
# pyDNS documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 2 10:00:47 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0,os.path.abspath('../..'))
from offlineimap import __version__,__author__
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo']
autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OfflineImap'
copyright = u'2002-2010, ' + __author__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['html']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'dev-doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'offlineimap.tex', u'OfflineImap Documentation',
u'OfflineImap contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| gpl-2.0 |
Subsets and Splits