repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringclasses 981
values | size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
srluge/SickRage | lib/sqlalchemy/dialects/mysql/cymysql.py | 78 | 2352 | # mysql/cymysql.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+cymysql
:name: CyMySQL
:dbapi: cymysql
:connectstring: mysql+cymysql://<username>:<password>@<host>/<dbname>[?<options>]
:url: https://github.com/nakagami/CyMySQL
"""
import re
from .mysqldb import MySQLDialect_mysqldb
from .base import (BIT, MySQLDialect)
from ... import util
class _cymysqlBIT(BIT):
def result_processor(self, dialect, coltype):
"""Convert a MySQL's 64 bit, variable length binary string to a long.
"""
def process(value):
if value is not None:
v = 0
for i in util.iterbytes(value):
v = v << 8 | i
return v
return value
return process
class MySQLDialect_cymysql(MySQLDialect_mysqldb):
driver = 'cymysql'
description_encoding = None
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
supports_unicode_statements = True
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
BIT: _cymysqlBIT,
}
)
@classmethod
def dbapi(cls):
return __import__('cymysql')
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.server_version):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def _detect_charset(self, connection):
return connection.connection.charset
def _extract_error_code(self, exception):
return exception.errno
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.OperationalError):
return self._extract_error_code(e) in \
(2006, 2013, 2014, 2045, 2055)
elif isinstance(e, self.dbapi.InterfaceError):
# if underlying connection is closed,
# this is the error you get
return True
else:
return False
dialect = MySQLDialect_cymysql
| gpl-3.0 |
NCBI-Hackathons/Pharmacogenomics_Prediction_Pipeline_P3 | doc/source/conf.py | 4 | 9581 | # -*- coding: utf-8 -*-
#
# Pharmacogenomics Prediction Pipeline (P3) documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 13 09:37:04 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pharmacogenomics Prediction Pipeline (P3)'
copyright = u'2015, various'
author = u'various'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PharmacogenomicsPredictionPipelineP3doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PharmacogenomicsPredictionPipelineP3.tex', u'Pharmacogenomics Prediction Pipeline (P3) Documentation',
u'various', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pharmacogenomicspredictionpipelinep3', u'Pharmacogenomics Prediction Pipeline (P3) Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PharmacogenomicsPredictionPipelineP3', u'Pharmacogenomics Prediction Pipeline (P3) Documentation',
author, 'PharmacogenomicsPredictionPipelineP3', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| cc0-1.0 |
xu6148152/Binea_Python_Project | automate the boring stuff with python/automate_online-materials/phoneAndEmail.py | 4 | 1235 | #! python3
# phoneAndEmail.py - Finds phone numbers and email addresses on the clipboard.
import pyperclip, re
phoneRegex = re.compile(r'''(
(\d{3}|\(\d{3}\))? # area code
(\s|-|\.)? # separator
(\d{3}) # first 3 digits
(\s|-|\.) # separator
(\d{4}) # last 4 digits
(\s*(ext|x|ext.)\s*(\d{2,5}))? # extension
)''', re.VERBOSE)
# Create email regex.
emailRegex = re.compile(r'''(
[a-zA-Z0-9._%+-]+ # username
@ # @ symbol
[a-zA-Z0-9.-]+ # domain name
(\.[a-zA-Z]{2,4}){1,2} # dot-something
)''', re.VERBOSE)
# Find matches in clipboard text.
text = str(pyperclip.paste())
matches = []
for groups in phoneRegex.findall(text):
phoneNum = '-'.join([groups[1], groups[3], groups[5]])
if groups[8] != '':
phoneNum += ' x' + groups[8]
matches.append(phoneNum)
for groups in emailRegex.findall(text):
matches.append(groups[0])
# Copy results to the clipboard.
if len(matches) > 0:
pyperclip.copy('\n'.join(matches))
print('Copied to clipboard:')
print('\n'.join(matches))
else:
print('No phone numbers or email addresses found.')
| mit |
sdgathman/pymilter | testsample.py | 1 | 5060 | import unittest
import Milter
import sample
import template
import mime
import zipfile
from Milter.test import TestBase
from Milter.testctx import TestCtx
class TestMilter(TestBase,sample.sampleMilter):
def __init__(self):
TestBase.__init__(self)
sample.sampleMilter.__init__(self)
class BMSMilterTestCase(unittest.TestCase):
def setUp(self):
self.zf = zipfile.ZipFile('test/virus.zip','r')
self.zf.setpassword(b'denatured')
def tearDown(self):
self.zf.close()
self.zf = None
def testTemplate(self,fname='test2'):
ctx = TestCtx()
Milter.factory = template.myMilter
ctx._setsymval('{auth_authen}','batman')
ctx._setsymval('{auth_type}','batcomputer')
ctx._setsymval('j','mailhost')
count = 10
while count > 0:
rc = ctx._connect(helo='milter-template.example.org')
self.assertEquals(rc,Milter.CONTINUE)
with open('test/'+fname,'rb') as fp:
rc = ctx._feedFile(fp)
milter = ctx.getpriv()
self.assertFalse(ctx._bodyreplaced,"Message body replaced")
ctx._close()
count -= 1
def testHeader(self,fname='utf8'):
ctx = TestCtx()
Milter.factory = sample.sampleMilter
ctx._setsymval('{auth_authen}','batman')
ctx._setsymval('{auth_type}','batcomputer')
ctx._setsymval('j','mailhost')
rc = ctx._connect()
self.assertEquals(rc,Milter.CONTINUE)
with open('test/'+fname,'rb') as fp:
rc = ctx._feedFile(fp)
milter = ctx.getpriv()
self.assertFalse(ctx._bodyreplaced,"Message body replaced")
fp = ctx._body
with open('test/'+fname+".tstout","wb") as ofp:
ofp.write(fp.getvalue())
ctx._close()
def testCtx(self,fname='virus1'):
ctx = TestCtx()
Milter.factory = sample.sampleMilter
ctx._setsymval('{auth_authen}','batman')
ctx._setsymval('{auth_type}','batcomputer')
ctx._setsymval('j','mailhost')
rc = ctx._connect()
self.assertTrue(rc == Milter.CONTINUE)
with self.zf.open(fname) as fp:
rc = ctx._feedFile(fp)
milter = ctx.getpriv()
# self.assertTrue(milter.user == 'batman',"getsymval failed: "+
# "%s != %s"%(milter.user,'batman'))
self.assertEquals(milter.user,'batman')
self.assertTrue(milter.auth_type != 'batcomputer',"setsymlist failed")
self.assertTrue(rc == Milter.ACCEPT)
self.assertTrue(ctx._bodyreplaced,"Message body not replaced")
fp = ctx._body
with open('test/'+fname+".tstout","wb") as f:
f.write(fp.getvalue())
#self.assertTrue(fp.getvalue() == open("test/virus1.out","r").read())
fp.seek(0)
msg = mime.message_from_file(fp)
s = msg.get_payload(1).get_payload()
milter.log(s)
ctx._close()
def testDefang(self,fname='virus1'):
milter = TestMilter()
milter.setsymval('{auth_authen}','batman')
milter.setsymval('{auth_type}','batcomputer')
milter.setsymval('j','mailhost')
rc = milter.connect()
self.assertTrue(rc == Milter.CONTINUE)
with self.zf.open(fname) as fp:
rc = milter.feedFile(fp)
self.assertTrue(milter.user == 'batman',"getsymval failed")
# setsymlist not working in TestBase
#self.assertTrue(milter.auth_type != 'batcomputer',"setsymlist failed")
self.assertTrue(rc == Milter.ACCEPT)
self.assertTrue(milter._bodyreplaced,"Message body not replaced")
fp = milter._body
with open('test/'+fname+".tstout","wb") as f:
f.write(fp.getvalue())
#self.assertTrue(fp.getvalue() == open("test/virus1.out","r").read())
fp.seek(0)
msg = mime.message_from_file(fp)
s = msg.get_payload(1).get_payload()
milter.log(s)
milter.close()
def testParse(self,fname='spam7'):
milter = TestMilter()
milter.connect('somehost')
rc = milter.feedMsg(fname)
self.assertTrue(rc == Milter.ACCEPT)
self.assertFalse(milter._bodyreplaced,"Milter needlessly replaced body.")
fp = milter._body
with open('test/'+fname+".tstout","wb") as f:
f.write(fp.getvalue())
milter.close()
def testDefang2(self):
milter = TestMilter()
milter.connect('somehost')
rc = milter.feedMsg('samp1')
self.assertTrue(rc == Milter.ACCEPT)
self.assertFalse(milter._bodyreplaced,"Milter needlessly replaced body.")
with self.zf.open("virus3") as fp:
rc = milter.feedFile(fp)
self.assertTrue(rc == Milter.ACCEPT)
self.assertTrue(milter._bodyreplaced,"Message body not replaced")
fp = milter._body
with open("test/virus3.tstout","wb") as f:
f.write(fp.getvalue())
#self.assertTrue(fp.getvalue() == open("test/virus3.out","r").read())
with self.zf.open("virus6") as fp:
rc = milter.feedFile(fp)
self.assertTrue(rc == Milter.ACCEPT)
self.assertTrue(milter._bodyreplaced,"Message body not replaced")
self.assertTrue(milter._headerschanged,"Message headers not adjusted")
fp = milter._body
with open("test/virus6.tstout","wb") as f:
f.write(fp.getvalue())
milter.close()
def suite(): return unittest.makeSuite(BMSMilterTestCase,'test')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
Gagnavarslan/djangosaml2 | tests/testprofiles/tests.py | 9 | 2048 | # Copyright (C) 2012 Sam Bull ([email protected])
# Copyright (C) 2011-2012 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2010 Lorenzo Gil Sanchez <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.contrib.auth.models import User
from django.test import TestCase
from djangosaml2.backends import Saml2Backend
from testprofiles.models import TestProfile
class Saml2BackendTests(TestCase):
def test_update_user(self):
# we need a user
user = User.objects.create(username='john')
backend = Saml2Backend()
attribute_mapping = {
'uid': ('username', ),
'mail': ('email', ),
'cn': ('first_name', ),
'sn': ('last_name', ),
}
attributes = {
'uid': ('john', ),
'mail': ('[email protected]', ),
'cn': ('John', ),
'sn': ('Doe', ),
}
backend.update_user(user, attributes, attribute_mapping)
self.assertEquals(user.email, '[email protected]')
self.assertEquals(user.first_name, 'John')
self.assertEquals(user.last_name, 'Doe')
# now we create a user profile and link it to the user
profile = TestProfile.objects.create(user=user)
self.assertNotEquals(profile, None)
attribute_mapping['saml_age'] = ('age', )
attributes['saml_age'] = ('22', )
backend.update_user(user, attributes, attribute_mapping)
self.assertEquals(user.get_profile().age, '22')
| apache-2.0 |
bobobox/ansible | lib/ansible/plugins/action/vyos.py | 5 | 4465 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import copy
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.utils.path import unfrackpath
from ansible.plugins import connection_loader
from ansible.compat.six import iteritems
from ansible.module_utils.vyos import vyos_argument_spec
from ansible.module_utils.basic import AnsibleFallbackNotFound
from ansible.module_utils._text import to_bytes
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = self.load_provider()
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'vyos'
pc.port = provider['port'] or self._play_context.port or 22
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = provider['timeout'] or self._play_context.timeout
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = self._get_socket_path(pc)
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not os.path.exists(socket_path):
# start the connection if it isn't started
rc, out, err = connection.exec_command('open_shell()')
if rc != 0:
return {'failed': True, 'msg': 'unable to connect to control socket'}
else:
# make sure we are in the right cli context which should be
# enable mode and not config module
rc, out, err = connection.exec_command('prompt()')
while str(out).strip().endswith('#'):
display.debug('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
rc, out, err = connection.exec_command('prompt()')
task_vars['ansible_socket'] = socket_path
return super(ActionModule, self).run(tmp, task_vars)
def _get_socket_path(self, play_context):
ssh = connection_loader.get('ssh', class_only=True)
cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user)
path = unfrackpath("$HOME/.ansible/pc")
return cp % dict(directory=path)
def load_provider(self):
provider = self._task.args.get('provider', {})
for key, value in iteritems(vyos_argument_spec):
if key != 'provider' and key not in provider:
if key in self._task.args:
provider[key] = self._task.args[key]
elif 'fallback' in value:
provider[key] = self._fallback(value['fallback'])
elif key not in provider:
provider[key] = None
return provider
def _fallback(self, fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except AnsibleFallbackNotFound:
pass
| gpl-3.0 |
damonkohler/sl4a | python/src/Lib/lib-tk/Tkconstants.py | 375 | 1493 | # Symbolic constants for Tk
# Booleans
NO=FALSE=OFF=0
YES=TRUE=ON=1
# -anchor and -sticky
N='n'
S='s'
W='w'
E='e'
NW='nw'
SW='sw'
NE='ne'
SE='se'
NS='ns'
EW='ew'
NSEW='nsew'
CENTER='center'
# -fill
NONE='none'
X='x'
Y='y'
BOTH='both'
# -side
LEFT='left'
TOP='top'
RIGHT='right'
BOTTOM='bottom'
# -relief
RAISED='raised'
SUNKEN='sunken'
FLAT='flat'
RIDGE='ridge'
GROOVE='groove'
SOLID = 'solid'
# -orient
HORIZONTAL='horizontal'
VERTICAL='vertical'
# -tabs
NUMERIC='numeric'
# -wrap
CHAR='char'
WORD='word'
# -align
BASELINE='baseline'
# -bordermode
INSIDE='inside'
OUTSIDE='outside'
# Special tags, marks and insert positions
SEL='sel'
SEL_FIRST='sel.first'
SEL_LAST='sel.last'
END='end'
INSERT='insert'
CURRENT='current'
ANCHOR='anchor'
ALL='all' # e.g. Canvas.delete(ALL)
# Text widget and button states
NORMAL='normal'
DISABLED='disabled'
ACTIVE='active'
# Canvas state
HIDDEN='hidden'
# Menu item types
CASCADE='cascade'
CHECKBUTTON='checkbutton'
COMMAND='command'
RADIOBUTTON='radiobutton'
SEPARATOR='separator'
# Selection modes for list boxes
SINGLE='single'
BROWSE='browse'
MULTIPLE='multiple'
EXTENDED='extended'
# Activestyle for list boxes
# NONE='none' is also valid
DOTBOX='dotbox'
UNDERLINE='underline'
# Various canvas styles
PIESLICE='pieslice'
CHORD='chord'
ARC='arc'
FIRST='first'
LAST='last'
BUTT='butt'
PROJECTING='projecting'
ROUND='round'
BEVEL='bevel'
MITER='miter'
# Arguments to xview/yview
MOVETO='moveto'
SCROLL='scroll'
UNITS='units'
PAGES='pages'
| apache-2.0 |
UManPychron/pychron | pychron/hardware/arduino/arduino_valve_actuator.py | 2 | 1597 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# from traits.api import HasTraits, on_trait_change,Str,Int,Float,Button
# from traitsui.api import View,Item,Group,HGroup,VGroup
# ============= standard library imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
from .arduino_gp_actuator import ArduinoGPActuator
class ArduinoValveActuator(ArduinoGPActuator):
def get_open_indicator_state(self, obj):
'''
'''
pass
def get_closed_indicator_state(self, obj):
'''
'''
pass
# def get_hard_lock_indicator_state(self, obj):
# '''
# '''
# cmd = 'A{}'.format(obj.name)
# return self.ask(cmd, verbose=False) == '1'
# ============= EOF ====================================
| apache-2.0 |
ryanahall/django | django/db/migrations/executor.py | 12 | 8846 | from __future__ import unicode_literals
from django.apps.registry import apps as global_apps
from django.db import migrations
from .loader import MigrationLoader
from .recorder import MigrationRecorder
from .state import ProjectState
class MigrationExecutor(object):
"""
End-to-end migration execution - loads migrations, and runs them
up or down to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets, clean_start=False):
"""
Given a set of targets, returns a list of (Migration instance, backwards?).
"""
plan = []
if clean_start:
applied = set()
else:
applied = set(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
# Don't migrate backwards all the way to the target node (that
# may roll back dependencies in other apps that don't need to
# be rolled back); instead roll back through target's immediate
# child(ren) in the same app, and no further.
next_in_app = sorted(
n for n in
self.loader.graph.node_map[target].children
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def migrate(self, targets, plan=None, fake=False, fake_initial=False):
"""
Migrates the database up to the given targets.
Django first needs to create all project states before a migration is
(un)applied and in a second step run all the database operations.
"""
if plan is None:
plan = self.migration_plan(targets)
migrations_to_run = {m[0] for m in plan}
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
# Holds all states right before and right after a migration is applied
# if the migration is being run.
states = {}
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
if self.progress_callback:
self.progress_callback("render_start")
state.apps # Render all real_apps -- performance critical
if self.progress_callback:
self.progress_callback("render_success")
# Phase 1 -- Store all required states
for migration, _ in full_plan:
if migration in migrations_to_run:
states[migration] = state.clone()
state = migration.mutate_state(state) # state is cloned inside
# Phase 2 -- Run the migrations
for migration, backwards in plan:
if not backwards:
self.apply_migration(states[migration], migration, fake=fake, fake_initial=fake_initial)
else:
self.unapply_migration(states[migration], migration, fake=fake)
def collect_sql(self, plan):
"""
Takes a migration plan and returns a list of collected SQL
statements that represent the best-efforts version of that plan.
"""
statements = []
state = None
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True) as schema_editor:
if state is None:
state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
state = migration.apply(state, schema_editor, collect_sql=True)
else:
state = migration.unapply(state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, state, migration, fake=False, fake_initial=False):
"""
Runs a migration forwards.
"""
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
if fake_initial:
# Test to see if this is an already-applied initial migration
applied, state = self.detect_soft_applied(state, migration)
if applied:
fake = True
if not fake:
# Alright, do it normally
with self.connection.schema_editor() as schema_editor:
state = migration.apply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
return state
def unapply_migration(self, state, migration, fake=False):
"""
Runs a migration backwards.
"""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor() as schema_editor:
state = migration.unapply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
return state
def detect_soft_applied(self, project_state, migration):
"""
Tests whether a migration has been implicitly applied - that the
tables it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel).
"""
# Bail if the migration isn't the first one in its app
if [name for app, name in migration.dependencies if app == migration.app_label]:
return False, project_state
if project_state is None:
after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True)
else:
after_state = migration.mutate_state(project_state)
apps = after_state.apps
found_create_migration = False
# Make sure all create model are done
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if model._meta.db_table not in self.connection.introspection.table_names(self.connection.cursor()):
return False, project_state
found_create_migration = True
# If we get this far and we found at least one CreateModel migration,
# the migration is considered implicitly applied.
return found_create_migration, after_state
| bsd-3-clause |
Inspq/ansible | lib/ansible/modules/storage/netapp/sf_volume_manager.py | 49 | 11480 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_volume_manager
short_description: Manage SolidFire volumes
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.3'
author: Sumit Kumar ([email protected])
description:
- Create, destroy, or update volumes on SolidFire
options:
state:
description:
- Whether the specified volume should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the volume to manage.
required: true
account_id:
description:
- Account ID for the owner of this volume.
required: true
512emulation:
description:
- Should the volume provide 512-byte sector emulation?
- Required when C(state=present)
required: false
qos:
description: Initial quality of service settings for this volume.
required: false
default: None
attributes:
description: A YAML dictionary of attributes that you would like to apply on this volume.
required: false
default: None
volume_id:
description:
- The ID of the volume to manage or update.
- In order to create multiple volumes with the same name, but different volume_ids, please declare the I(volume_id)
parameter with an arbitary value. However, the specified volume_id will not be assigned to the newly created
volume (since it's an auto-generated property).
required: false
default: None
size:
description:
- The size of the volume in (size_unit).
- Required when C(state = present).
required: false
size_unit:
description:
- The unit used to interpret the size parameter.
required: false
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
access:
required: false
choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget']
description:
- "Access allowed for the volume."
- "readOnly: Only read operations are allowed."
- "readWrite: Reads and writes are allowed."
- "locked: No reads or writes are allowed."
- "replicationTarget: Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked."
- "If unspecified, the access settings of the clone will be the same as the source."
default: None
'''
EXAMPLES = """
- name: Create Volume
sf_volume_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: AnsibleVol
account_id: 3
enable512e: False
size: 1
size_unit: gb
- name: Update Volume
sf_volume_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: AnsibleVol
account_id: 3
access: readWrite
- name: Delete Volume
sf_volume_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
name: AnsibleVol
account_id: 2
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireVolume(object):
def __init__(self):
self._size_unit_map = netapp_utils.SF_BYTE_MAP
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
account_id=dict(required=True, type='int'),
enable512e=dict(type='bool', aliases=['512emulation']),
qos=dict(required=False, type='str', default=None),
attributes=dict(required=False, type='dict', default=None),
volume_id=dict(type='int', default=None),
size=dict(type='int'),
size_unit=dict(default='gb',
choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
'pb', 'eb', 'zb', 'yb'], type='str'),
access=dict(required=False, type='str', default=None, choices=['readOnly', 'readWrite',
'locked', 'replicationTarget']),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['size', 'enable512e'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.account_id = p['account_id']
self.enable512e = p['enable512e']
self.qos = p['qos']
self.attributes = p['attributes']
self.volume_id = p['volume_id']
self.size_unit = p['size_unit']
if p['size'] is not None:
self.size = p['size'] * self._size_unit_map[self.size_unit]
else:
self.size = None
self.access = p['access']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_volume(self):
"""
Return volume object if found
:return: Details about the volume. None if not found.
:rtype: dict
"""
volume_list = self.sfe.list_volumes_for_account(account_id=self.account_id)
for volume in volume_list.volumes:
if volume.name == self.name:
# Update self.volume_id
if self.volume_id is not None:
if volume.volume_id == self.volume_id and str(volume.delete_time) == "":
return volume
else:
if str(volume.delete_time) == "":
self.volume_id = volume.volume_id
return volume
return None
def create_volume(self):
try:
self.sfe.create_volume(name=self.name,
account_id=self.account_id,
total_size=self.size,
enable512e=self.enable512e,
qos=self.qos,
attributes=self.attributes)
except:
err = get_exception()
self.module.fail_json(msg="Error provisioning volume %s of size %s" % (self.name, self.size),
exception=str(err))
def delete_volume(self):
try:
self.sfe.delete_volume(volume_id=self.volume_id)
except:
err = get_exception()
self.module.fail_json(msg="Error deleting volume %s" % self.volume_id,
exception=str(err))
def update_volume(self):
try:
self.sfe.modify_volume(self.volume_id,
account_id=self.account_id,
access=self.access,
qos=self.qos,
total_size=self.size,
attributes=self.attributes)
except:
err = get_exception()
self.module.fail_json(msg="Error updating volume %s" % self.name,
exception=str(err))
def apply(self):
changed = False
volume_exists = False
update_volume = False
volume_detail = self.get_volume()
if volume_detail:
volume_exists = True
if self.state == 'absent':
# Checking for state change(s) here, and applying it later in the code allows us to support
# check_mode
changed = True
elif self.state == 'present':
if volume_detail.access is not None and self.access is not None and volume_detail.access != self.access:
update_volume = True
changed = True
elif volume_detail.account_id is not None and self.account_id is not None \
and volume_detail.account_id != self.account_id:
update_volume = True
changed = True
elif volume_detail.qos is not None and self.qos is not None and volume_detail.qos != self.qos:
update_volume = True
changed = True
elif volume_detail.total_size is not None and volume_detail.total_size != self.size:
size_difference = abs(float(volume_detail.total_size - self.size))
# Change size only if difference is bigger than 0.001
if size_difference/self.size > 0.001:
update_volume = True
changed = True
elif volume_detail.attributes is not None and self.attributes is not None and \
volume_detail.attributes != self.attributes:
update_volume = True
changed = True
else:
if self.state == 'present':
changed = True
result_message = ""
if changed:
if self.module.check_mode:
result_message = "Check mode, skipping changes"
pass
else:
if self.state == 'present':
if not volume_exists:
self.create_volume()
result_message = "Volume created"
elif update_volume:
self.update_volume()
result_message = "Volume updated"
elif self.state == 'absent':
self.delete_volume()
result_message = "Volume deleted"
self.module.exit_json(changed=changed, msg=result_message)
def main():
v = SolidFireVolume()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
gauribhoite/personfinder | env/site-packages/babel/support.py | 137 | 21812 | # -*- coding: utf-8 -*-
"""
babel.support
~~~~~~~~~~~~~
Several classes and functions that help with integrating and using Babel
in applications.
.. note: the code in this module is not used by Babel itself
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import gettext
import locale
from babel.core import Locale
from babel.dates import format_date, format_datetime, format_time, \
format_timedelta
from babel.numbers import format_number, format_decimal, format_currency, \
format_percent, format_scientific
from babel._compat import PY2, text_type, text_to_native
class Format(object):
"""Wrapper class providing the various date and number formatting functions
bound to a specific locale and time-zone.
>>> from babel.util import UTC
>>> from datetime import date
>>> fmt = Format('en_US', UTC)
>>> fmt.date(date(2007, 4, 1))
u'Apr 1, 2007'
>>> fmt.decimal(1.2345)
u'1.234'
"""
def __init__(self, locale, tzinfo=None):
"""Initialize the formatter.
:param locale: the locale identifier or `Locale` instance
:param tzinfo: the time-zone info (a `tzinfo` instance or `None`)
"""
self.locale = Locale.parse(locale)
self.tzinfo = tzinfo
def date(self, date=None, format='medium'):
"""Return a date formatted according to the given pattern.
>>> from datetime import date
>>> fmt = Format('en_US')
>>> fmt.date(date(2007, 4, 1))
u'Apr 1, 2007'
"""
return format_date(date, format, locale=self.locale)
def datetime(self, datetime=None, format='medium'):
"""Return a date and time formatted according to the given pattern.
>>> from datetime import datetime
>>> from pytz import timezone
>>> fmt = Format('en_US', tzinfo=timezone('US/Eastern'))
>>> fmt.datetime(datetime(2007, 4, 1, 15, 30))
u'Apr 1, 2007, 11:30:00 AM'
"""
return format_datetime(datetime, format, tzinfo=self.tzinfo,
locale=self.locale)
def time(self, time=None, format='medium'):
"""Return a time formatted according to the given pattern.
>>> from datetime import datetime
>>> from pytz import timezone
>>> fmt = Format('en_US', tzinfo=timezone('US/Eastern'))
>>> fmt.time(datetime(2007, 4, 1, 15, 30))
u'11:30:00 AM'
"""
return format_time(time, format, tzinfo=self.tzinfo, locale=self.locale)
def timedelta(self, delta, granularity='second', threshold=.85,
format='medium', add_direction=False):
"""Return a time delta according to the rules of the given locale.
>>> from datetime import timedelta
>>> fmt = Format('en_US')
>>> fmt.timedelta(timedelta(weeks=11))
u'3 months'
"""
return format_timedelta(delta, granularity=granularity,
threshold=threshold,
format=format, add_direction=add_direction,
locale=self.locale)
def number(self, number):
"""Return an integer number formatted for the locale.
>>> fmt = Format('en_US')
>>> fmt.number(1099)
u'1,099'
"""
return format_number(number, locale=self.locale)
def decimal(self, number, format=None):
"""Return a decimal number formatted for the locale.
>>> fmt = Format('en_US')
>>> fmt.decimal(1.2345)
u'1.234'
"""
return format_decimal(number, format, locale=self.locale)
def currency(self, number, currency):
"""Return a number in the given currency formatted for the locale.
"""
return format_currency(number, currency, locale=self.locale)
def percent(self, number, format=None):
"""Return a number formatted as percentage for the locale.
>>> fmt = Format('en_US')
>>> fmt.percent(0.34)
u'34%'
"""
return format_percent(number, format, locale=self.locale)
def scientific(self, number):
"""Return a number formatted using scientific notation for the locale.
"""
return format_scientific(number, locale=self.locale)
class LazyProxy(object):
"""Class for proxy objects that delegate to a specified function to evaluate
the actual object.
>>> def greeting(name='world'):
... return 'Hello, %s!' % name
>>> lazy_greeting = LazyProxy(greeting, name='Joe')
>>> print lazy_greeting
Hello, Joe!
>>> u' ' + lazy_greeting
u' Hello, Joe!'
>>> u'(%s)' % lazy_greeting
u'(Hello, Joe!)'
This can be used, for example, to implement lazy translation functions that
delay the actual translation until the string is actually used. The
rationale for such behavior is that the locale of the user may not always
be available. In web applications, you only know the locale when processing
a request.
The proxy implementation attempts to be as complete as possible, so that
the lazy objects should mostly work as expected, for example for sorting:
>>> greetings = [
... LazyProxy(greeting, 'world'),
... LazyProxy(greeting, 'Joe'),
... LazyProxy(greeting, 'universe'),
... ]
>>> greetings.sort()
>>> for greeting in greetings:
... print greeting
Hello, Joe!
Hello, universe!
Hello, world!
"""
__slots__ = ['_func', '_args', '_kwargs', '_value', '_is_cache_enabled']
def __init__(self, func, *args, **kwargs):
is_cache_enabled = kwargs.pop('enable_cache', True)
# Avoid triggering our own __setattr__ implementation
object.__setattr__(self, '_func', func)
object.__setattr__(self, '_args', args)
object.__setattr__(self, '_kwargs', kwargs)
object.__setattr__(self, '_is_cache_enabled', is_cache_enabled)
object.__setattr__(self, '_value', None)
@property
def value(self):
if self._value is None:
value = self._func(*self._args, **self._kwargs)
if not self._is_cache_enabled:
return value
object.__setattr__(self, '_value', value)
return self._value
def __contains__(self, key):
return key in self.value
def __nonzero__(self):
return bool(self.value)
def __dir__(self):
return dir(self.value)
def __iter__(self):
return iter(self.value)
def __len__(self):
return len(self.value)
def __str__(self):
return str(self.value)
def __unicode__(self):
return unicode(self.value)
def __add__(self, other):
return self.value + other
def __radd__(self, other):
return other + self.value
def __mod__(self, other):
return self.value % other
def __rmod__(self, other):
return other % self.value
def __mul__(self, other):
return self.value * other
def __rmul__(self, other):
return other * self.value
def __call__(self, *args, **kwargs):
return self.value(*args, **kwargs)
def __lt__(self, other):
return self.value < other
def __le__(self, other):
return self.value <= other
def __eq__(self, other):
return self.value == other
def __ne__(self, other):
return self.value != other
def __gt__(self, other):
return self.value > other
def __ge__(self, other):
return self.value >= other
def __delattr__(self, name):
delattr(self.value, name)
def __getattr__(self, name):
return getattr(self.value, name)
def __setattr__(self, name, value):
setattr(self.value, name, value)
def __delitem__(self, key):
del self.value[key]
def __getitem__(self, key):
return self.value[key]
def __setitem__(self, key, value):
self.value[key] = value
class NullTranslations(gettext.NullTranslations, object):
DEFAULT_DOMAIN = None
def __init__(self, fp=None):
"""Initialize a simple translations class which is not backed by a
real catalog. Behaves similar to gettext.NullTranslations but also
offers Babel's on *gettext methods (e.g. 'dgettext()').
:param fp: a file-like object (ignored in this class)
"""
# These attributes are set by gettext.NullTranslations when a catalog
# is parsed (fp != None). Ensure that they are always present because
# some *gettext methods (including '.gettext()') rely on the attributes.
self._catalog = {}
self.plural = lambda n: int(n != 1)
super(NullTranslations, self).__init__(fp=fp)
self.files = filter(None, [getattr(fp, 'name', None)])
self.domain = self.DEFAULT_DOMAIN
self._domains = {}
def dgettext(self, domain, message):
"""Like ``gettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).gettext(message)
def ldgettext(self, domain, message):
"""Like ``lgettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).lgettext(message)
def udgettext(self, domain, message):
"""Like ``ugettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).ugettext(message)
# backward compatibility with 0.9
dugettext = udgettext
def dngettext(self, domain, singular, plural, num):
"""Like ``ngettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).ngettext(singular, plural, num)
def ldngettext(self, domain, singular, plural, num):
"""Like ``lngettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).lngettext(singular, plural, num)
def udngettext(self, domain, singular, plural, num):
"""Like ``ungettext()`` but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).ungettext(singular, plural, num)
# backward compatibility with 0.9
dungettext = udngettext
# Most of the downwards code, until it get's included in stdlib, from:
# http://bugs.python.org/file10036/gettext-pgettext.patch
#
# The encoding of a msgctxt and a msgid in a .mo file is
# msgctxt + "\x04" + msgid (gettext version >= 0.15)
CONTEXT_ENCODING = '%s\x04%s'
def pgettext(self, context, message):
"""Look up the `context` and `message` id in the catalog and return the
corresponding message string, as an 8-bit string encoded with the
catalog's charset encoding, if known. If there is no entry in the
catalog for the `message` id and `context` , and a fallback has been
set, the look up is forwarded to the fallback's ``pgettext()``
method. Otherwise, the `message` id is returned.
"""
ctxt_msg_id = self.CONTEXT_ENCODING % (context, message)
missing = object()
tmsg = self._catalog.get(ctxt_msg_id, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.pgettext(context, message)
return message
# Encode the Unicode tmsg back to an 8-bit string, if possible
if self._output_charset:
return text_to_native(tmsg, self._output_charset)
elif self._charset:
return text_to_native(tmsg, self._charset)
return tmsg
def lpgettext(self, context, message):
"""Equivalent to ``pgettext()``, but the translation is returned in the
preferred system encoding, if no other encoding was explicitly set with
``bind_textdomain_codeset()``.
"""
ctxt_msg_id = self.CONTEXT_ENCODING % (context, message)
missing = object()
tmsg = self._catalog.get(ctxt_msg_id, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.lpgettext(context, message)
return message
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
def npgettext(self, context, singular, plural, num):
"""Do a plural-forms lookup of a message id. `singular` is used as the
message id for purposes of lookup in the catalog, while `num` is used to
determine which plural form to use. The returned message string is an
8-bit string encoded with the catalog's charset encoding, if known.
If the message id for `context` is not found in the catalog, and a
fallback is specified, the request is forwarded to the fallback's
``npgettext()`` method. Otherwise, when ``num`` is 1 ``singular`` is
returned, and ``plural`` is returned in all other cases.
"""
ctxt_msg_id = self.CONTEXT_ENCODING % (context, singular)
try:
tmsg = self._catalog[(ctxt_msg_id, self.plural(num))]
if self._output_charset:
return text_to_native(tmsg, self._output_charset)
elif self._charset:
return text_to_native(tmsg, self._charset)
return tmsg
except KeyError:
if self._fallback:
return self._fallback.npgettext(context, singular, plural, num)
if num == 1:
return singular
else:
return plural
def lnpgettext(self, context, singular, plural, num):
"""Equivalent to ``npgettext()``, but the translation is returned in the
preferred system encoding, if no other encoding was explicitly set with
``bind_textdomain_codeset()``.
"""
ctxt_msg_id = self.CONTEXT_ENCODING % (context, singular)
try:
tmsg = self._catalog[(ctxt_msg_id, self.plural(num))]
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
except KeyError:
if self._fallback:
return self._fallback.lnpgettext(context, singular, plural, num)
if num == 1:
return singular
else:
return plural
def upgettext(self, context, message):
"""Look up the `context` and `message` id in the catalog and return the
corresponding message string, as a Unicode string. If there is no entry
in the catalog for the `message` id and `context`, and a fallback has
been set, the look up is forwarded to the fallback's ``upgettext()``
method. Otherwise, the `message` id is returned.
"""
ctxt_message_id = self.CONTEXT_ENCODING % (context, message)
missing = object()
tmsg = self._catalog.get(ctxt_message_id, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.upgettext(context, message)
return text_type(message)
return tmsg
def unpgettext(self, context, singular, plural, num):
"""Do a plural-forms lookup of a message id. `singular` is used as the
message id for purposes of lookup in the catalog, while `num` is used to
determine which plural form to use. The returned message string is a
Unicode string.
If the message id for `context` is not found in the catalog, and a
fallback is specified, the request is forwarded to the fallback's
``unpgettext()`` method. Otherwise, when `num` is 1 `singular` is
returned, and `plural` is returned in all other cases.
"""
ctxt_message_id = self.CONTEXT_ENCODING % (context, singular)
try:
tmsg = self._catalog[(ctxt_message_id, self.plural(num))]
except KeyError:
if self._fallback:
return self._fallback.unpgettext(context, singular, plural, num)
if num == 1:
tmsg = text_type(singular)
else:
tmsg = text_type(plural)
return tmsg
def dpgettext(self, domain, context, message):
"""Like `pgettext()`, but look the message up in the specified
`domain`.
"""
return self._domains.get(domain, self).pgettext(context, message)
def udpgettext(self, domain, context, message):
"""Like `upgettext()`, but look the message up in the specified
`domain`.
"""
return self._domains.get(domain, self).upgettext(context, message)
# backward compatibility with 0.9
dupgettext = udpgettext
def ldpgettext(self, domain, context, message):
"""Equivalent to ``dpgettext()``, but the translation is returned in the
preferred system encoding, if no other encoding was explicitly set with
``bind_textdomain_codeset()``.
"""
return self._domains.get(domain, self).lpgettext(context, message)
def dnpgettext(self, domain, context, singular, plural, num):
"""Like ``npgettext``, but look the message up in the specified
`domain`.
"""
return self._domains.get(domain, self).npgettext(context, singular,
plural, num)
def udnpgettext(self, domain, context, singular, plural, num):
"""Like ``unpgettext``, but look the message up in the specified
`domain`.
"""
return self._domains.get(domain, self).unpgettext(context, singular,
plural, num)
# backward compatibility with 0.9
dunpgettext = udnpgettext
def ldnpgettext(self, domain, context, singular, plural, num):
"""Equivalent to ``dnpgettext()``, but the translation is returned in
the preferred system encoding, if no other encoding was explicitly set
with ``bind_textdomain_codeset()``.
"""
return self._domains.get(domain, self).lnpgettext(context, singular,
plural, num)
if not PY2:
ugettext = gettext.NullTranslations.gettext
ungettext = gettext.NullTranslations.ngettext
class Translations(NullTranslations, gettext.GNUTranslations):
"""An extended translation catalog class."""
DEFAULT_DOMAIN = 'messages'
def __init__(self, fp=None, domain=None):
"""Initialize the translations catalog.
:param fp: the file-like object the translation should be read from
:param domain: the message domain (default: 'messages')
"""
super(Translations, self).__init__(fp=fp)
self.domain = domain or self.DEFAULT_DOMAIN
if not PY2:
ugettext = gettext.GNUTranslations.gettext
ungettext = gettext.GNUTranslations.ngettext
@classmethod
def load(cls, dirname=None, locales=None, domain=None):
"""Load translations from the given directory.
:param dirname: the directory containing the ``MO`` files
:param locales: the list of locales in order of preference (items in
this list can be either `Locale` objects or locale
strings)
:param domain: the message domain (default: 'messages')
"""
if locales is not None:
if not isinstance(locales, (list, tuple)):
locales = [locales]
locales = [str(locale) for locale in locales]
if not domain:
domain = cls.DEFAULT_DOMAIN
filename = gettext.find(domain, dirname, locales)
if not filename:
return NullTranslations()
with open(filename, 'rb') as fp:
return cls(fp=fp, domain=domain)
def __repr__(self):
return '<%s: "%s">' % (type(self).__name__,
self._info.get('project-id-version'))
def add(self, translations, merge=True):
"""Add the given translations to the catalog.
If the domain of the translations is different than that of the
current catalog, they are added as a catalog that is only accessible
by the various ``d*gettext`` functions.
:param translations: the `Translations` instance with the messages to
add
:param merge: whether translations for message domains that have
already been added should be merged with the existing
translations
"""
domain = getattr(translations, 'domain', self.DEFAULT_DOMAIN)
if merge and domain == self.domain:
return self.merge(translations)
existing = self._domains.get(domain)
if merge and existing is not None:
existing.merge(translations)
else:
translations.add_fallback(self)
self._domains[domain] = translations
return self
def merge(self, translations):
"""Merge the given translations into the catalog.
Message translations in the specified catalog override any messages
with the same identifier in the existing catalog.
:param translations: the `Translations` instance with the messages to
merge
"""
if isinstance(translations, gettext.GNUTranslations):
self._catalog.update(translations._catalog)
if isinstance(translations, Translations):
self.files.extend(translations.files)
return self
| apache-2.0 |
arvinquilao/android_kernel_cyanogen_msm8916 | tools/perf/scripts/python/net_dropmonitor.py | 2669 | 1738 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
fengzhyuan/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
pombredanne/Rusthon | regtests/bench/fannkuch.py | 2 | 1085 | # The Computer Language Benchmarks Game
# http://shootout.alioth.debian.org/
#
# contributed by Sokolov Yura
# modified by Tupteq
# modified by hartsantler 2014
from time import clock
from runtime import *
DEFAULT_ARG = 9
def main():
times = []
for i in range(4):
t0 = clock()
res = fannkuch(DEFAULT_ARG)
tk = clock()
times.append(tk - t0)
avg = sum(times) / len(times)
print(avg)
def fannkuch(n):
count = list(range(1, n+1))
perm1 = list(range(n))
perm = list(range(n))
max_flips = 0
m = n-1
r = n
check = 0
while True:
if check < 30:
check += 1
while r != 1:
count[r-1] = r
r -= 1
if perm1[0] != 0 and perm1[m] != m:
perm = perm1[:]
flips_count = 0
k = perm[0]
#while k: ## TODO fix for dart
while k != 0:
perm[:k+1] = perm[k::-1]
flips_count += 1
k = perm[0]
if flips_count > max_flips:
max_flips = flips_count
do_return = True
while r != n:
perm1.insert(r, perm1.pop(0))
count[r] -= 1
if count[r] > 0:
do_return = False
break
r += 1
if do_return:
return max_flips
main() | bsd-3-clause |
Mixser/django | tests/template_tests/filter_tests/test_wordwrap.py | 324 | 1666 | from django.template.defaultfilters import wordwrap
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class WordwrapTests(SimpleTestCase):
@setup({'wordwrap01':
'{% autoescape off %}{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}{% endautoescape %}'})
def test_wordwrap01(self):
output = self.engine.render_to_string('wordwrap01', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a &\nb a &\nb')
@setup({'wordwrap02': '{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}'})
def test_wordwrap02(self):
output = self.engine.render_to_string('wordwrap02', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a &\nb a &\nb')
class FunctionTests(SimpleTestCase):
def test_wrap(self):
self.assertEqual(
wordwrap('this is a long paragraph of text that really needs to be wrapped I\'m afraid', 14),
'this is a long\nparagraph of\ntext that\nreally needs\nto be wrapped\nI\'m afraid',
)
def test_indent(self):
self.assertEqual(
wordwrap('this is a short paragraph of text.\n But this line should be indented', 14),
'this is a\nshort\nparagraph of\ntext.\n But this\nline should be\nindented',
)
def test_indent2(self):
self.assertEqual(
wordwrap('this is a short paragraph of text.\n But this line should be indented', 15),
'this is a short\nparagraph of\ntext.\n But this line\nshould be\nindented',
)
def test_non_string_input(self):
self.assertEqual(wordwrap(123, 2), '123')
| bsd-3-clause |
StuartGordonReid/Comp-Finance | Optimizers/Solution.py | 1 | 1512 | __author__ = 'Stuart Gordon Reid'
__email__ = '[email protected]'
__website__ = 'http://www.stuartreid.co.za'
"""
File description
"""
class Solution(object):
solution = []
def __init__(self, solution, problem):
"""
Abstract initialization method for a solution to some optimization function
:param solution: a numpy array (much faster than lists)
"""
self.solution = solution
self.problem = problem
return
def __len__(self):
"""
Overload of the len operator for the Solution class
:rtype : Sized?
"""
return len(self.solution)
def update(self, solution):
"""
This method is used for updating a solution
"""
self.solution = solution
def get(self):
"""
This method is used to retrieve the numpy array for direct manipulation
"""
return self.solution
def evaluate(self):
return self.problem.evaluate(self.solution)
def __gt__(self, other):
assert isinstance(other, Solution)
if self.problem.optimization is "min":
return self.evaluate() < other.evaluate()
elif self.problem.optimization is "max":
return self.evaluate() > other.evaluate()
def deep_copy(self):
copy = Solution(None, self.problem)
copy.solution = []
for i in range(len(self.solution)):
copy.solution.append(self.solution[i])
return copy
| lgpl-3.0 |
defaultnamehere/grr | lib/flows/general/endtoend_test.py | 3 | 8099 | #!/usr/bin/env python
"""Tests for grr.lib.flows.general.endtoend."""
# pylint: disable=unused-import, g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import, g-bad-import-order
from grr.endtoend_tests import base
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
class MockEndToEndTest(base.AutomatedTest):
platforms = ["Linux", "Darwin"]
flow = "ListDirectory"
args = {"pathspec": rdfvalue.PathSpec(
path="/bin",
pathtype=rdfvalue.PathSpec.PathType.OS)}
output_path = "/fs/os/bin"
file_to_find = "ls"
def setUp(self):
pass
def CheckFlow(self):
pass
def tearDown(self):
pass
class MockEndToEndTestBadFlow(MockEndToEndTest):
flow = "RaiseOnStart"
args = {}
class TestBadSetUp(MockEndToEndTest):
def setUp(self):
raise RuntimeError
class TestBadTearDown(MockEndToEndTest):
def tearDown(self):
raise RuntimeError
class TestFailure(MockEndToEndTest):
def CheckFlow(self):
raise RuntimeError("This should be logged")
class TestEndToEndTestFlow(test_lib.FlowTestsBaseclass):
def setUp(self):
super(TestEndToEndTestFlow, self).setUp()
install_time = rdfvalue.RDFDatetime().Now()
user = "testuser"
userobj = rdfvalue.User(username=user)
interface = rdfvalue.Interface(ifname="eth0")
self.client = aff4.FACTORY.Create(self.client_id, "VFSGRRClient", mode="rw",
token=self.token, age=aff4.ALL_TIMES)
self.client.Set(self.client.Schema.HOSTNAME("hostname"))
self.client.Set(self.client.Schema.SYSTEM("Linux"))
self.client.Set(self.client.Schema.OS_RELEASE("debian"))
self.client.Set(self.client.Schema.OS_VERSION("14.04"))
self.client.Set(self.client.Schema.KERNEL("3.15-rc2"))
self.client.Set(self.client.Schema.FQDN("hostname.example.com"))
self.client.Set(self.client.Schema.ARCH("x86_64"))
self.client.Set(self.client.Schema.INSTALL_DATE(install_time))
self.client.Set(self.client.Schema.USER([userobj]))
self.client.Set(self.client.Schema.USERNAMES([user]))
self.client.Set(self.client.Schema.LAST_INTERFACES([interface]))
self.client.Flush()
self.client_mock = action_mocks.ActionMock("ListDirectory", "StatFile")
def testRunSuccess(self):
args = rdfvalue.EndToEndTestFlowArgs(
test_names=["TestListDirectoryOSLinuxDarwin",
"MockEndToEndTest",
"TestListDirectoryOSLinuxDarwin"])
with test_lib.Instrument(flow.GRRFlow, "SendReply") as send_reply:
for _ in test_lib.TestFlowHelper(
"EndToEndTestFlow", self.client_mock, client_id=self.client_id,
token=self.token, args=args):
pass
results = []
for _, reply in send_reply.args:
if isinstance(reply, rdfvalue.EndToEndTestResult):
results.append(reply)
self.assertTrue(reply.success)
self.assertTrue(reply.test_class_name in [
"TestListDirectoryOSLinuxDarwin", "MockEndToEndTest"])
self.assertFalse(reply.log)
# We only expect 2 results because we dedup test names
self.assertEqual(len(results), 2)
def testNoApplicableTests(self):
"""Try to run linux tests on windows."""
install_time = rdfvalue.RDFDatetime().Now()
user = "testuser"
userobj = rdfvalue.User(username=user)
interface = rdfvalue.Interface(ifname="eth0")
self.client = aff4.FACTORY.Create(self.client_id, "VFSGRRClient", mode="rw",
token=self.token, age=aff4.ALL_TIMES)
self.client.Set(self.client.Schema.HOSTNAME("hostname"))
self.client.Set(self.client.Schema.SYSTEM("Windows"))
self.client.Set(self.client.Schema.OS_RELEASE("7"))
self.client.Set(self.client.Schema.OS_VERSION("6.1.7601SP1"))
self.client.Set(self.client.Schema.KERNEL("6.1.7601"))
self.client.Set(self.client.Schema.FQDN("hostname.example.com"))
self.client.Set(self.client.Schema.ARCH("AMD64"))
self.client.Set(self.client.Schema.INSTALL_DATE(install_time))
self.client.Set(self.client.Schema.USER([userobj]))
self.client.Set(self.client.Schema.USERNAMES([user]))
self.client.Set(self.client.Schema.LAST_INTERFACES([interface]))
self.client.Flush()
args = rdfvalue.EndToEndTestFlowArgs(
test_names=["TestListDirectoryOSLinuxDarwin",
"MockEndToEndTest",
"TestListDirectoryOSLinuxDarwin"])
self.assertRaises(flow.FlowError, list, test_lib.TestFlowHelper(
"EndToEndTestFlow", self.client_mock, client_id=self.client_id,
token=self.token, args=args))
def testRunSuccessAndFail(self):
args = rdfvalue.EndToEndTestFlowArgs()
with utils.Stubber(base.AutomatedTest, "classes",
{"MockEndToEndTest": MockEndToEndTest,
"TestFailure": TestFailure}):
with test_lib.Instrument(flow.GRRFlow, "SendReply") as send_reply:
for _ in test_lib.TestFlowHelper(
"EndToEndTestFlow", self.client_mock, client_id=self.client_id,
token=self.token, args=args):
pass
results = []
for _, reply in send_reply.args:
if isinstance(reply, rdfvalue.EndToEndTestResult):
results.append(reply)
if reply.test_class_name == "MockEndToEndTest":
self.assertTrue(reply.success)
self.assertFalse(reply.log)
elif reply.test_class_name == "TestFailure":
self.assertFalse(reply.success)
self.assertTrue("This should be logged" in reply.log)
self.assertItemsEqual([x.test_class_name for x in results],
["MockEndToEndTest", "TestFailure"])
self.assertEqual(len(results), 2)
def testRunBadSetUp(self):
args = rdfvalue.EndToEndTestFlowArgs(
test_names=["TestBadSetUp"])
self.assertRaises(RuntimeError, list, test_lib.TestFlowHelper(
"EndToEndTestFlow", self.client_mock, client_id=self.client_id,
token=self.token, args=args))
def testRunBadTearDown(self):
args = rdfvalue.EndToEndTestFlowArgs(
test_names=["TestBadTearDown"])
self.assertRaises(RuntimeError, list, test_lib.TestFlowHelper(
"EndToEndTestFlow", self.client_mock, client_id=self.client_id,
token=self.token, args=args))
def testRunBadFlow(self):
"""Test behaviour when test flow raises in Start.
A flow that raises in its Start method will kill the EndToEndTest run.
Protecting and reporting on this significantly complicates this code, and a
flow raising in Start is really broken, so we allow this behaviour.
"""
args = rdfvalue.EndToEndTestFlowArgs(
test_names=["MockEndToEndTestBadFlow", "MockEndToEndTest"])
self.assertRaises(RuntimeError, list, test_lib.TestFlowHelper(
"EndToEndTestFlow", self.client_mock, client_id=self.client_id,
token=self.token, args=args))
def testEndToEndTestFailure(self):
args = rdfvalue.EndToEndTestFlowArgs(
test_names=["TestFailure"])
with test_lib.Instrument(flow.GRRFlow, "SendReply") as send_reply:
for _ in test_lib.TestFlowHelper(
"EndToEndTestFlow", self.client_mock, client_id=self.client_id,
token=self.token, args=args):
pass
results = []
for _, reply in send_reply.args:
if isinstance(reply, rdfvalue.EndToEndTestResult):
results.append(reply)
self.assertFalse(reply.success)
self.assertEqual(reply.test_class_name,
"TestFailure")
self.assertTrue("This should be logged" in reply.log)
self.assertEqual(len(results), 1)
class FlowTestLoader(test_lib.GRRTestLoader):
base_class = TestEndToEndTestFlow
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv, testLoader=FlowTestLoader())
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
kytvi2p/tahoe-lafs | src/allmydata/scripts/tahoe_backup.py | 2 | 12614 |
import os.path
import time
import urllib
import simplejson
import datetime
from allmydata.scripts.common import get_alias, escape_path, DEFAULT_ALIAS, \
UnknownAliasError
from allmydata.scripts.common_http import do_http, HTTPError, format_http_error
from allmydata.util import time_format
from allmydata.scripts import backupdb
from allmydata.util.encodingutil import listdir_unicode, quote_output, \
to_str, FilenameEncodingError, unicode_to_url
from allmydata.util.assertutil import precondition
from allmydata.util.fileutil import abspath_expanduser_unicode
def get_local_metadata(path):
metadata = {}
# posix stat(2) metadata, depends on the platform
os.stat_float_times(True)
s = os.stat(path)
metadata["ctime"] = s.st_ctime
metadata["mtime"] = s.st_mtime
misc_fields = ("st_mode", "st_ino", "st_dev", "st_uid", "st_gid")
macos_misc_fields = ("st_rsize", "st_creator", "st_type")
for field in misc_fields + macos_misc_fields:
if hasattr(s, field):
metadata[field] = getattr(s, field)
# TODO: extended attributes, like on OS-X's HFS+
return metadata
def mkdir(contents, options):
kids = dict([ (childname, (contents[childname][0],
{"ro_uri": contents[childname][1],
"metadata": contents[childname][2],
}))
for childname in contents
])
body = simplejson.dumps(kids).encode("utf-8")
url = options['node-url'] + "uri?t=mkdir-immutable"
resp = do_http("POST", url, body)
if resp.status < 200 or resp.status >= 300:
raise HTTPError("Error during mkdir", resp)
dircap = to_str(resp.read().strip())
return dircap
def put_child(dirurl, childname, childcap):
assert dirurl[-1] == "/"
url = dirurl + urllib.quote(unicode_to_url(childname)) + "?t=uri"
resp = do_http("PUT", url, childcap)
if resp.status not in (200, 201):
raise HTTPError("Error during put_child", resp)
class BackupProcessingError(Exception):
pass
class BackerUpper:
def __init__(self, options):
self.options = options
self.files_uploaded = 0
self.files_reused = 0
self.files_checked = 0
self.files_skipped = 0
self.directories_created = 0
self.directories_reused = 0
self.directories_checked = 0
self.directories_skipped = 0
def run(self):
options = self.options
nodeurl = options['node-url']
self.verbosity = 1
if options['quiet']:
self.verbosity = 0
if options['verbose']:
self.verbosity = 2
stdout = options.stdout
stderr = options.stderr
start_timestamp = datetime.datetime.now()
self.backupdb = None
bdbfile = os.path.join(options["node-directory"],
"private", "backupdb.sqlite")
bdbfile = abspath_expanduser_unicode(bdbfile)
self.backupdb = backupdb.get_backupdb(bdbfile, stderr)
if not self.backupdb:
print >>stderr, "ERROR: Unable to load backup db."
return 1
try:
rootcap, path = get_alias(options.aliases, options.to_dir, DEFAULT_ALIAS)
except UnknownAliasError, e:
e.display(stderr)
return 1
to_url = nodeurl + "uri/%s/" % urllib.quote(rootcap)
if path:
to_url += escape_path(path)
if not to_url.endswith("/"):
to_url += "/"
archives_url = to_url + "Archives/"
# first step: make sure the target directory exists, as well as the
# Archives/ subdirectory.
resp = do_http("GET", archives_url + "?t=json")
if resp.status == 404:
resp = do_http("POST", archives_url + "?t=mkdir")
if resp.status != 200:
print >>stderr, format_http_error("Unable to create target directory", resp)
return 1
# second step: process the tree
new_backup_dircap = self.process(options.from_dir)
# third: attach the new backup to the list
now = time_format.iso_utc(int(time.time()), sep="_") + "Z"
put_child(archives_url, now, new_backup_dircap)
put_child(to_url, "Latest", new_backup_dircap)
end_timestamp = datetime.datetime.now()
# calc elapsed time, omitting microseconds
elapsed_time = str(end_timestamp - start_timestamp).split('.')[0]
if self.verbosity >= 1:
print >>stdout, (" %d files uploaded (%d reused), "
"%d files skipped, "
"%d directories created (%d reused), "
"%d directories skipped"
% (self.files_uploaded,
self.files_reused,
self.files_skipped,
self.directories_created,
self.directories_reused,
self.directories_skipped))
if self.verbosity >= 2:
print >>stdout, (" %d files checked, %d directories checked"
% (self.files_checked,
self.directories_checked))
print >>stdout, " backup done, elapsed time: %s" % elapsed_time
# The command exits with code 2 if files or directories were skipped
if self.files_skipped or self.directories_skipped:
return 2
# done!
return 0
def verboseprint(self, msg):
precondition(isinstance(msg, str), msg)
if self.verbosity >= 2:
print >>self.options.stdout, msg
def warn(self, msg):
precondition(isinstance(msg, str), msg)
print >>self.options.stderr, msg
def process(self, localpath):
precondition(isinstance(localpath, unicode), localpath)
# returns newdircap
self.verboseprint("processing %s" % quote_output(localpath))
create_contents = {} # childname -> (type, rocap, metadata)
compare_contents = {} # childname -> rocap
try:
children = listdir_unicode(localpath)
except EnvironmentError:
self.directories_skipped += 1
self.warn("WARNING: permission denied on directory %s" % quote_output(localpath))
children = []
except FilenameEncodingError:
self.directories_skipped += 1
self.warn("WARNING: could not list directory %s due to a filename encoding error" % quote_output(localpath))
children = []
for child in self.options.filter_listdir(children):
assert isinstance(child, unicode), child
childpath = os.path.join(localpath, child)
# note: symlinks to directories are both islink() and isdir()
if os.path.isdir(childpath) and not os.path.islink(childpath):
metadata = get_local_metadata(childpath)
# recurse on the child directory
childcap = self.process(childpath)
assert isinstance(childcap, str)
create_contents[child] = ("dirnode", childcap, metadata)
compare_contents[child] = childcap
elif os.path.isfile(childpath) and not os.path.islink(childpath):
try:
childcap, metadata = self.upload(childpath)
assert isinstance(childcap, str)
create_contents[child] = ("filenode", childcap, metadata)
compare_contents[child] = childcap
except EnvironmentError:
self.files_skipped += 1
self.warn("WARNING: permission denied on file %s" % quote_output(childpath))
else:
self.files_skipped += 1
if os.path.islink(childpath):
self.warn("WARNING: cannot backup symlink %s" % quote_output(childpath))
else:
self.warn("WARNING: cannot backup special file %s" % quote_output(childpath))
must_create, r = self.check_backupdb_directory(compare_contents)
if must_create:
self.verboseprint(" creating directory for %s" % quote_output(localpath))
newdircap = mkdir(create_contents, self.options)
assert isinstance(newdircap, str)
if r:
r.did_create(newdircap)
self.directories_created += 1
return newdircap
else:
self.verboseprint(" re-using old directory for %s" % quote_output(localpath))
self.directories_reused += 1
return r.was_created()
def check_backupdb_file(self, childpath):
if not self.backupdb:
return True, None
use_timestamps = not self.options["ignore-timestamps"]
r = self.backupdb.check_file(childpath, use_timestamps)
if not r.was_uploaded():
return True, r
if not r.should_check():
# the file was uploaded or checked recently, so we can just use
# it
return False, r
# we must check the file before using the results
filecap = r.was_uploaded()
self.verboseprint("checking %s" % quote_output(filecap))
nodeurl = self.options['node-url']
checkurl = nodeurl + "uri/%s?t=check&output=JSON" % urllib.quote(filecap)
self.files_checked += 1
resp = do_http("POST", checkurl)
if resp.status != 200:
# can't check, so we must assume it's bad
return True, r
cr = simplejson.loads(resp.read())
healthy = cr["results"]["healthy"]
if not healthy:
# must upload
return True, r
# file is healthy, no need to upload
r.did_check_healthy(cr)
return False, r
def check_backupdb_directory(self, compare_contents):
if not self.backupdb:
return True, None
r = self.backupdb.check_directory(compare_contents)
if not r.was_created():
return True, r
if not r.should_check():
# the file was uploaded or checked recently, so we can just use
# it
return False, r
# we must check the directory before re-using it
dircap = r.was_created()
self.verboseprint("checking %s" % quote_output(dircap))
nodeurl = self.options['node-url']
checkurl = nodeurl + "uri/%s?t=check&output=JSON" % urllib.quote(dircap)
self.directories_checked += 1
resp = do_http("POST", checkurl)
if resp.status != 200:
# can't check, so we must assume it's bad
return True, r
cr = simplejson.loads(resp.read())
healthy = cr["results"]["healthy"]
if not healthy:
# must create
return True, r
# directory is healthy, no need to upload
r.did_check_healthy(cr)
return False, r
# This function will raise an IOError exception when called on an unreadable file
def upload(self, childpath):
precondition(isinstance(childpath, unicode), childpath)
#self.verboseprint("uploading %s.." % quote_output(childpath))
metadata = get_local_metadata(childpath)
# we can use the backupdb here
must_upload, bdb_results = self.check_backupdb_file(childpath)
if must_upload:
self.verboseprint("uploading %s.." % quote_output(childpath))
infileobj = open(childpath, "rb")
url = self.options['node-url'] + "uri"
resp = do_http("PUT", url, infileobj)
if resp.status not in (200, 201):
raise HTTPError("Error during file PUT", resp)
filecap = resp.read().strip()
self.verboseprint(" %s -> %s" % (quote_output(childpath, quotemarks=False),
quote_output(filecap, quotemarks=False)))
#self.verboseprint(" metadata: %s" % (quote_output(metadata, quotemarks=False),))
if bdb_results:
bdb_results.did_upload(filecap)
self.files_uploaded += 1
return filecap, metadata
else:
self.verboseprint("skipping %s.." % quote_output(childpath))
self.files_reused += 1
return bdb_results.was_uploaded(), metadata
def backup(options):
bu = BackerUpper(options)
return bu.run()
| gpl-2.0 |
mozilla/lumbergh | careers/careers/feeds.py | 2 | 1413 | # -*- coding: utf-8 -*-
from datetime import date
from django.contrib.syndication.views import Feed
from django.urls import reverse
from django.utils import feedgenerator
from careers.careers.models import Position
class LatestPositionsFeed(Feed):
feed_type = feedgenerator.Rss201rev2Feed
title = 'Current Mozilla job openings'
description = ('The current list of job openings, available internships '
'and contract opportunities at Mozilla.')
feed_copyright = ('Portions of this content are ©1998–%s by individual '
'mozilla.org contributors. Content available under a '
'Creative Commons license.' % date.today().year)
def link(self):
return reverse('careers.listings')
def feed_url(self):
return reverse('careers.feed')
def categories(self):
return Position.categories()
def items(self):
return Position.objects.all()
def item_title(self, item):
return item.title
def item_description(self, item):
return item.description
def item_pubdate(self, item):
return item.updated_at
def item_categories(self, item):
categories = []
categories.append(item.department)
categories += item.location_list
if 'Remote' in item.location_list:
categories.append('Worldwide')
return categories
| mpl-2.0 |
Fl0rianFischer/sme_odoo | addons/website_sale_digital/controllers/main.py | 20 | 4262 | # -*- coding: utf-8 -*-
import base64
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website_portal.controllers.main import website_account
from openerp.addons.website_sale.controllers.main import website_sale
from cStringIO import StringIO
from werkzeug.utils import redirect
class website_sale_digital_confirmation(website_sale):
@http.route([
'/shop/confirmation',
], type='http', auth="public", website=True)
def payment_confirmation(self, **post):
response = super(website_sale_digital_confirmation, self).payment_confirmation(**post)
order_lines = response.qcontext['order'].order_line
digital_content = map(lambda x: x.product_id.type == 'digital', order_lines)
response.qcontext.update(digital=any(digital_content))
return response
class website_sale_digital(website_account):
orders_page = '/my/orders'
@http.route([
'/my/orders/<int:order>',
], type='http', auth='user', website=True)
def orders_followup(self, order=None, **post):
response = super(website_sale_digital, self).orders_followup(order=order, **post)
order_products_attachments = {}
order = response.qcontext['order']
invoiced_lines = request.env['account.invoice.line'].sudo().search([('invoice_id', 'in', order.invoice_ids.ids), ('invoice_id.state', '=', 'paid')])
purchased_products_attachments = {}
for il in invoiced_lines:
p_obj = il.product_id
# Ignore products that do not have digital content
if not p_obj.product_tmpl_id.type == 'digital':
continue
# Search for product attachments
A = request.env['ir.attachment']
p_id = p_obj.id
template = p_obj.product_tmpl_id
att = A.search_read(
domain=['|', '&', ('res_model', '=', p_obj._name), ('res_id', '=', p_id), '&', ('res_model', '=', template._name), ('res_id', '=', template.id)],
fields=['name', 'write_date'],
order='write_date desc',
)
# Ignore products with no attachments
if not att:
continue
purchased_products_attachments[p_id] = att
response.qcontext.update({
'digital_attachments': purchased_products_attachments,
})
return response
@http.route([
'/my/download',
], type='http', auth='public')
def download_attachment(self, attachment_id):
# Check if this is a valid attachment id
attachment = request.env['ir.attachment'].sudo().search_read(
[('id', '=', int(attachment_id))],
["name", "datas", "file_type", "res_model", "res_id", "type", "url"]
)
if attachment:
attachment = attachment[0]
else:
return redirect(self.orders_page)
# Check if the user has bought the associated product
res_model = attachment['res_model']
res_id = attachment['res_id']
purchased_products = request.env['account.invoice.line'].get_digital_purchases(request.uid)
if res_model == 'product.product':
if res_id not in purchased_products:
return redirect(self.orders_page)
# Also check for attachments in the product templates
elif res_model == 'product.template':
P = request.env['product.product']
template_ids = map(lambda x: P.browse(x).product_tmpl_id.id, purchased_products)
if res_id not in template_ids:
return redirect(self.orders_page)
else:
return redirect(self.orders_page)
# The client has bought the product, otherwise it would have been blocked by now
if attachment["type"] == "url":
if attachment["url"]:
return redirect(attachment["url"])
else:
return request.not_found()
elif attachment["datas"]:
data = StringIO(base64.standard_b64decode(attachment["datas"]))
return http.send_file(data, filename=attachment['name'], as_attachment=True)
else:
return request.not_found()
| gpl-3.0 |
yesudeep/cmc | app/jinja2/docs/jinjaext.py | 9 | 6833 | # -*- coding: utf-8 -*-
"""
Jinja Documentation Extensions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for automatically documenting filters and tests.
:copyright: Copyright 2008 by Armin Ronacher.
:license: BSD.
"""
import os
import re
import inspect
import jinja2
from itertools import islice
from types import BuiltinFunctionType
from docutils import nodes
from docutils.statemachine import ViewList
from sphinx.ext.autodoc import prepare_docstring
from sphinx.application import TemplateBridge
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
from jinja2 import Environment, FileSystemLoader
def parse_rst(state, content_offset, doc):
node = nodes.section()
# hack around title style bookkeeping
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
state.memo.title_styles = []
state.memo.section_level = 0
state.nested_parse(doc, content_offset, node, match_titles=1)
state.memo.title_styles = surrounding_title_styles
state.memo.section_level = surrounding_section_level
return node.children
class JinjaStyle(Style):
title = 'Jinja Style'
default_style = ""
styles = {
Comment: 'italic #aaaaaa',
Comment.Preproc: 'noitalic #B11414',
Comment.Special: 'italic #505050',
Keyword: 'bold #B80000',
Keyword.Type: '#808080',
Operator.Word: 'bold #B80000',
Name.Builtin: '#333333',
Name.Function: '#333333',
Name.Class: 'bold #333333',
Name.Namespace: 'bold #333333',
Name.Entity: 'bold #363636',
Name.Attribute: '#686868',
Name.Tag: 'bold #686868',
Name.Decorator: '#686868',
String: '#AA891C',
Number: '#444444',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
_sig_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*(\(.*?\))')
def format_function(name, aliases, func):
lines = inspect.getdoc(func).splitlines()
signature = '()'
if isinstance(func, BuiltinFunctionType):
match = _sig_re.match(lines[0])
if match is not None:
del lines[:1 + bool(lines and not lines[0])]
signature = match.group(1)
else:
try:
argspec = inspect.getargspec(func)
if getattr(func, 'environmentfilter', False) or \
getattr(func, 'contextfilter', False):
del argspec[0][0]
signature = inspect.formatargspec(*argspec)
except:
pass
result = ['.. function:: %s%s' % (name, signature), '']
result.extend(' ' + line for line in lines)
if aliases:
result.extend(('', ' :aliases: %s' % ', '.join(
'``%s``' % x for x in sorted(aliases))))
return result
def dump_functions(mapping):
def directive(dirname, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
reverse_mapping = {}
for name, func in mapping.iteritems():
reverse_mapping.setdefault(func, []).append(name)
filters = []
for func, names in reverse_mapping.iteritems():
aliases = sorted(names, key=lambda x: len(x))
name = aliases.pop()
filters.append((name, aliases, func))
filters.sort()
result = ViewList()
for name, aliases, func in filters:
for item in format_function(name, aliases, func):
result.append(item, '<jinjaext>')
node = nodes.paragraph()
state.nested_parse(result, content_offset, node)
return node.children
return directive
from jinja2.defaults import DEFAULT_FILTERS, DEFAULT_TESTS
jinja_filters = dump_functions(DEFAULT_FILTERS)
jinja_tests = dump_functions(DEFAULT_TESTS)
def jinja_nodes(dirname, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
from jinja2.nodes import Node
doc = ViewList()
def walk(node, indent):
p = ' ' * indent
sig = ', '.join(node.fields)
doc.append(p + '.. autoclass:: %s(%s)' % (node.__name__, sig), '')
if node.abstract:
members = []
for key, name in node.__dict__.iteritems():
if not key.startswith('_') and \
not hasattr(node.__base__, key) and callable(name):
members.append(key)
if members:
members.sort()
doc.append('%s :members: %s' % (p, ', '.join(members)), '')
if node.__base__ != object:
doc.append('', '')
doc.append('%s :Node type: :class:`%s`' %
(p, node.__base__.__name__), '')
doc.append('', '')
children = node.__subclasses__()
children.sort(key=lambda x: x.__name__.lower())
for child in children:
walk(child, indent)
walk(Node, 0)
return parse_rst(state, content_offset, doc)
def inject_toc(app, doctree, docname):
titleiter = iter(doctree.traverse(nodes.title))
try:
# skip first title, we are not interested in that one
titleiter.next()
title = titleiter.next()
# and check if there is at least another title
titleiter.next()
except StopIteration:
return
tocnode = nodes.section('')
tocnode['classes'].append('toc')
toctitle = nodes.section('')
toctitle['classes'].append('toctitle')
toctitle.append(nodes.title(text='Table Of Contents'))
tocnode.append(toctitle)
tocnode += doctree.document.settings.env.get_toc_for(docname)[0][1]
title.parent.insert(title.parent.children.index(title), tocnode)
def setup(app):
app.add_directive('jinjafilters', jinja_filters, 0, (0, 0, 0))
app.add_directive('jinjatests', jinja_tests, 0, (0, 0, 0))
app.add_directive('jinjanodes', jinja_nodes, 0, (0, 0, 0))
# uncomment for inline toc. links are broken unfortunately
##app.connect('doctree-resolved', inject_toc)
| mit |
fhaoquan/kbengine | kbe/src/lib/python/Lib/test/test_parser.py | 113 | 26114 | import parser
import unittest
import sys
import operator
import struct
from test import support
from test.script_helper import assert_python_failure
#
# First, we test that we can generate trees from valid source fragments,
# and that these valid trees are indeed allowed by the tree-loading side
# of the parser module.
#
class RoundtripLegalSyntaxTestCase(unittest.TestCase):
def roundtrip(self, f, s):
st1 = f(s)
t = st1.totuple()
try:
st2 = parser.sequence2st(t)
except parser.ParserError as why:
self.fail("could not roundtrip %r: %s" % (s, why))
self.assertEqual(t, st2.totuple(),
"could not re-generate syntax tree")
def check_expr(self, s):
self.roundtrip(parser.expr, s)
def test_flags_passed(self):
# The unicode literals flags has to be passed from the paser to AST
# generation.
suite = parser.suite("from __future__ import unicode_literals; x = ''")
code = suite.compile()
scope = {}
exec(code, {}, scope)
self.assertIsInstance(scope["x"], str)
def check_suite(self, s):
self.roundtrip(parser.suite, s)
def test_yield_statement(self):
self.check_suite("def f(): yield 1")
self.check_suite("def f(): yield")
self.check_suite("def f(): x += yield")
self.check_suite("def f(): x = yield 1")
self.check_suite("def f(): x = y = yield 1")
self.check_suite("def f(): x = yield")
self.check_suite("def f(): x = y = yield")
self.check_suite("def f(): 1 + (yield)*2")
self.check_suite("def f(): (yield 1)*2")
self.check_suite("def f(): return; yield 1")
self.check_suite("def f(): yield 1; return")
self.check_suite("def f(): yield from 1")
self.check_suite("def f(): x = yield from 1")
self.check_suite("def f(): f((yield from 1))")
self.check_suite("def f(): yield 1; return 1")
self.check_suite("def f():\n"
" for x in range(30):\n"
" yield x\n")
self.check_suite("def f():\n"
" if (yield):\n"
" yield x\n")
def test_nonlocal_statement(self):
self.check_suite("def f():\n"
" x = 0\n"
" def g():\n"
" nonlocal x\n")
self.check_suite("def f():\n"
" x = y = 0\n"
" def g():\n"
" nonlocal x, y\n")
def test_expressions(self):
self.check_expr("foo(1)")
self.check_expr("[1, 2, 3]")
self.check_expr("[x**3 for x in range(20)]")
self.check_expr("[x**3 for x in range(20) if x % 3]")
self.check_expr("[x**3 for x in range(20) if x % 2 if x % 3]")
self.check_expr("list(x**3 for x in range(20))")
self.check_expr("list(x**3 for x in range(20) if x % 3)")
self.check_expr("list(x**3 for x in range(20) if x % 2 if x % 3)")
self.check_expr("foo(*args)")
self.check_expr("foo(*args, **kw)")
self.check_expr("foo(**kw)")
self.check_expr("foo(key=value)")
self.check_expr("foo(key=value, *args)")
self.check_expr("foo(key=value, *args, **kw)")
self.check_expr("foo(key=value, **kw)")
self.check_expr("foo(a, b, c, *args)")
self.check_expr("foo(a, b, c, *args, **kw)")
self.check_expr("foo(a, b, c, **kw)")
self.check_expr("foo(a, *args, keyword=23)")
self.check_expr("foo + bar")
self.check_expr("foo - bar")
self.check_expr("foo * bar")
self.check_expr("foo / bar")
self.check_expr("foo // bar")
self.check_expr("lambda: 0")
self.check_expr("lambda x: 0")
self.check_expr("lambda *y: 0")
self.check_expr("lambda *y, **z: 0")
self.check_expr("lambda **z: 0")
self.check_expr("lambda x, y: 0")
self.check_expr("lambda foo=bar: 0")
self.check_expr("lambda foo=bar, spaz=nifty+spit: 0")
self.check_expr("lambda foo=bar, **z: 0")
self.check_expr("lambda foo=bar, blaz=blat+2, **z: 0")
self.check_expr("lambda foo=bar, blaz=blat+2, *y, **z: 0")
self.check_expr("lambda x, *y, **z: 0")
self.check_expr("(x for x in range(10))")
self.check_expr("foo(x for x in range(10))")
self.check_expr("...")
self.check_expr("a[...]")
def test_simple_expression(self):
# expr_stmt
self.check_suite("a")
def test_simple_assignments(self):
self.check_suite("a = b")
self.check_suite("a = b = c = d = e")
def test_simple_augmented_assignments(self):
self.check_suite("a += b")
self.check_suite("a -= b")
self.check_suite("a *= b")
self.check_suite("a /= b")
self.check_suite("a //= b")
self.check_suite("a %= b")
self.check_suite("a &= b")
self.check_suite("a |= b")
self.check_suite("a ^= b")
self.check_suite("a <<= b")
self.check_suite("a >>= b")
self.check_suite("a **= b")
def test_function_defs(self):
self.check_suite("def f(): pass")
self.check_suite("def f(*args): pass")
self.check_suite("def f(*args, **kw): pass")
self.check_suite("def f(**kw): pass")
self.check_suite("def f(foo=bar): pass")
self.check_suite("def f(foo=bar, *args): pass")
self.check_suite("def f(foo=bar, *args, **kw): pass")
self.check_suite("def f(foo=bar, **kw): pass")
self.check_suite("def f(a, b): pass")
self.check_suite("def f(a, b, *args): pass")
self.check_suite("def f(a, b, *args, **kw): pass")
self.check_suite("def f(a, b, **kw): pass")
self.check_suite("def f(a, b, foo=bar): pass")
self.check_suite("def f(a, b, foo=bar, *args): pass")
self.check_suite("def f(a, b, foo=bar, *args, **kw): pass")
self.check_suite("def f(a, b, foo=bar, **kw): pass")
self.check_suite("@staticmethod\n"
"def f(): pass")
self.check_suite("@staticmethod\n"
"@funcattrs(x, y)\n"
"def f(): pass")
self.check_suite("@funcattrs()\n"
"def f(): pass")
# keyword-only arguments
self.check_suite("def f(*, a): pass")
self.check_suite("def f(*, a = 5): pass")
self.check_suite("def f(*, a = 5, b): pass")
self.check_suite("def f(*, a, b = 5): pass")
self.check_suite("def f(*, a, b = 5, **kwds): pass")
self.check_suite("def f(*args, a): pass")
self.check_suite("def f(*args, a = 5): pass")
self.check_suite("def f(*args, a = 5, b): pass")
self.check_suite("def f(*args, a, b = 5): pass")
self.check_suite("def f(*args, a, b = 5, **kwds): pass")
# function annotations
self.check_suite("def f(a: int): pass")
self.check_suite("def f(a: int = 5): pass")
self.check_suite("def f(*args: list): pass")
self.check_suite("def f(**kwds: dict): pass")
self.check_suite("def f(*, a: int): pass")
self.check_suite("def f(*, a: int = 5): pass")
self.check_suite("def f() -> int: pass")
def test_class_defs(self):
self.check_suite("class foo():pass")
self.check_suite("class foo(object):pass")
self.check_suite("@class_decorator\n"
"class foo():pass")
self.check_suite("@class_decorator(arg)\n"
"class foo():pass")
self.check_suite("@decorator1\n"
"@decorator2\n"
"class foo():pass")
def test_import_from_statement(self):
self.check_suite("from sys.path import *")
self.check_suite("from sys.path import dirname")
self.check_suite("from sys.path import (dirname)")
self.check_suite("from sys.path import (dirname,)")
self.check_suite("from sys.path import dirname as my_dirname")
self.check_suite("from sys.path import (dirname as my_dirname)")
self.check_suite("from sys.path import (dirname as my_dirname,)")
self.check_suite("from sys.path import dirname, basename")
self.check_suite("from sys.path import (dirname, basename)")
self.check_suite("from sys.path import (dirname, basename,)")
self.check_suite(
"from sys.path import dirname as my_dirname, basename")
self.check_suite(
"from sys.path import (dirname as my_dirname, basename)")
self.check_suite(
"from sys.path import (dirname as my_dirname, basename,)")
self.check_suite(
"from sys.path import dirname, basename as my_basename")
self.check_suite(
"from sys.path import (dirname, basename as my_basename)")
self.check_suite(
"from sys.path import (dirname, basename as my_basename,)")
self.check_suite("from .bogus import x")
def test_basic_import_statement(self):
self.check_suite("import sys")
self.check_suite("import sys as system")
self.check_suite("import sys, math")
self.check_suite("import sys as system, math")
self.check_suite("import sys, math as my_math")
def test_relative_imports(self):
self.check_suite("from . import name")
self.check_suite("from .. import name")
# check all the way up to '....', since '...' is tokenized
# differently from '.' (it's an ellipsis token).
self.check_suite("from ... import name")
self.check_suite("from .... import name")
self.check_suite("from .pkg import name")
self.check_suite("from ..pkg import name")
self.check_suite("from ...pkg import name")
self.check_suite("from ....pkg import name")
def test_pep263(self):
self.check_suite("# -*- coding: iso-8859-1 -*-\n"
"pass\n")
def test_assert(self):
self.check_suite("assert alo < ahi and blo < bhi\n")
def test_with(self):
self.check_suite("with open('x'): pass\n")
self.check_suite("with open('x') as f: pass\n")
self.check_suite("with open('x') as f, open('y') as g: pass\n")
def test_try_stmt(self):
self.check_suite("try: pass\nexcept: pass\n")
self.check_suite("try: pass\nfinally: pass\n")
self.check_suite("try: pass\nexcept A: pass\nfinally: pass\n")
self.check_suite("try: pass\nexcept A: pass\nexcept: pass\n"
"finally: pass\n")
self.check_suite("try: pass\nexcept: pass\nelse: pass\n")
self.check_suite("try: pass\nexcept: pass\nelse: pass\n"
"finally: pass\n")
def test_position(self):
# An absolutely minimal test of position information. Better
# tests would be a big project.
code = "def f(x):\n return x + 1"
st1 = parser.suite(code)
st2 = st1.totuple(line_info=1, col_info=1)
def walk(tree):
node_type = tree[0]
next = tree[1]
if isinstance(next, tuple):
for elt in tree[1:]:
for x in walk(elt):
yield x
else:
yield tree
terminals = list(walk(st2))
self.assertEqual([
(1, 'def', 1, 0),
(1, 'f', 1, 4),
(7, '(', 1, 5),
(1, 'x', 1, 6),
(8, ')', 1, 7),
(11, ':', 1, 8),
(4, '', 1, 9),
(5, '', 2, -1),
(1, 'return', 2, 4),
(1, 'x', 2, 11),
(14, '+', 2, 13),
(2, '1', 2, 15),
(4, '', 2, 16),
(6, '', 2, -1),
(4, '', 2, -1),
(0, '', 2, -1)],
terminals)
def test_extended_unpacking(self):
self.check_suite("*a = y")
self.check_suite("x, *b, = m")
self.check_suite("[*a, *b] = y")
self.check_suite("for [*x, b] in x: pass")
def test_raise_statement(self):
self.check_suite("raise\n")
self.check_suite("raise e\n")
self.check_suite("try:\n"
" suite\n"
"except Exception as e:\n"
" raise ValueError from e\n")
def test_set_displays(self):
self.check_expr('{2}')
self.check_expr('{2,}')
self.check_expr('{2, 3}')
self.check_expr('{2, 3,}')
def test_dict_displays(self):
self.check_expr('{}')
self.check_expr('{a:b}')
self.check_expr('{a:b,}')
self.check_expr('{a:b, c:d}')
self.check_expr('{a:b, c:d,}')
def test_set_comprehensions(self):
self.check_expr('{x for x in seq}')
self.check_expr('{f(x) for x in seq}')
self.check_expr('{f(x) for x in seq if condition(x)}')
def test_dict_comprehensions(self):
self.check_expr('{x:x for x in seq}')
self.check_expr('{x**2:x[3] for x in seq if condition(x)}')
self.check_expr('{x:x for x in seq1 for y in seq2 if condition(x, y)}')
#
# Second, we take *invalid* trees and make sure we get ParserError
# rejections for them.
#
class IllegalSyntaxTestCase(unittest.TestCase):
def check_bad_tree(self, tree, label):
try:
parser.sequence2st(tree)
except parser.ParserError:
pass
else:
self.fail("did not detect invalid tree for %r" % label)
def test_junk(self):
# not even remotely valid:
self.check_bad_tree((1, 2, 3), "<junk>")
def test_illegal_yield_1(self):
# Illegal yield statement: def f(): return 1; yield 1
tree = \
(257,
(264,
(285,
(259,
(1, 'def'),
(1, 'f'),
(260, (7, '('), (8, ')')),
(11, ':'),
(291,
(4, ''),
(5, ''),
(264,
(265,
(266,
(272,
(275,
(1, 'return'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302, (303, (304, (305, (2, '1')))))))))))))))))),
(264,
(265,
(266,
(272,
(276,
(1, 'yield'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302,
(303, (304, (305, (2, '1')))))))))))))))))),
(4, ''))),
(6, ''))))),
(4, ''),
(0, ''))))
self.check_bad_tree(tree, "def f():\n return 1\n yield 1")
def test_illegal_yield_2(self):
# Illegal return in generator: def f(): return 1; yield 1
tree = \
(257,
(264,
(265,
(266,
(278,
(1, 'from'),
(281, (1, '__future__')),
(1, 'import'),
(279, (1, 'generators')))),
(4, ''))),
(264,
(285,
(259,
(1, 'def'),
(1, 'f'),
(260, (7, '('), (8, ')')),
(11, ':'),
(291,
(4, ''),
(5, ''),
(264,
(265,
(266,
(272,
(275,
(1, 'return'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302, (303, (304, (305, (2, '1')))))))))))))))))),
(264,
(265,
(266,
(272,
(276,
(1, 'yield'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302,
(303, (304, (305, (2, '1')))))))))))))))))),
(4, ''))),
(6, ''))))),
(4, ''),
(0, ''))))
self.check_bad_tree(tree, "def f():\n return 1\n yield 1")
def test_a_comma_comma_c(self):
# Illegal input: a,,c
tree = \
(258,
(311,
(290,
(291,
(292,
(293,
(295,
(296,
(297,
(298, (299, (300, (301, (302, (303, (1, 'a')))))))))))))),
(12, ','),
(12, ','),
(290,
(291,
(292,
(293,
(295,
(296,
(297,
(298, (299, (300, (301, (302, (303, (1, 'c'))))))))))))))),
(4, ''),
(0, ''))
self.check_bad_tree(tree, "a,,c")
def test_illegal_operator(self):
# Illegal input: a $= b
tree = \
(257,
(264,
(265,
(266,
(267,
(312,
(291,
(292,
(293,
(294,
(296,
(297,
(298,
(299,
(300, (301, (302, (303, (304, (1, 'a'))))))))))))))),
(268, (37, '$=')),
(312,
(291,
(292,
(293,
(294,
(296,
(297,
(298,
(299,
(300, (301, (302, (303, (304, (1, 'b'))))))))))))))))),
(4, ''))),
(0, ''))
self.check_bad_tree(tree, "a $= b")
def test_malformed_global(self):
#doesn't have global keyword in ast
tree = (257,
(264,
(265,
(266,
(282, (1, 'foo'))), (4, ''))),
(4, ''),
(0, ''))
self.check_bad_tree(tree, "malformed global ast")
def test_missing_import_source(self):
# from import fred
tree = \
(257,
(268,
(269,
(270,
(282,
(284, (1, 'from'), (1, 'import'),
(287, (285, (1, 'fred')))))),
(4, ''))),
(4, ''), (0, ''))
self.check_bad_tree(tree, "from import fred")
class CompileTestCase(unittest.TestCase):
# These tests are very minimal. :-(
def test_compile_expr(self):
st = parser.expr('2 + 3')
code = parser.compilest(st)
self.assertEqual(eval(code), 5)
def test_compile_suite(self):
st = parser.suite('x = 2; y = x + 3')
code = parser.compilest(st)
globs = {}
exec(code, globs)
self.assertEqual(globs['y'], 5)
def test_compile_error(self):
st = parser.suite('1 = 3 + 4')
self.assertRaises(SyntaxError, parser.compilest, st)
def test_compile_badunicode(self):
st = parser.suite('a = "\\U12345678"')
self.assertRaises(SyntaxError, parser.compilest, st)
st = parser.suite('a = "\\u1"')
self.assertRaises(SyntaxError, parser.compilest, st)
def test_issue_9011(self):
# Issue 9011: compilation of an unary minus expression changed
# the meaning of the ST, so that a second compilation produced
# incorrect results.
st = parser.expr('-3')
code1 = parser.compilest(st)
self.assertEqual(eval(code1), -3)
code2 = parser.compilest(st)
self.assertEqual(eval(code2), -3)
class ParserStackLimitTestCase(unittest.TestCase):
"""try to push the parser to/over its limits.
see http://bugs.python.org/issue1881 for a discussion
"""
def _nested_expression(self, level):
return "["*level+"]"*level
def test_deeply_nested_list(self):
# XXX used to be 99 levels in 2.x
e = self._nested_expression(93)
st = parser.expr(e)
st.compile()
def test_trigger_memory_error(self):
e = self._nested_expression(100)
rc, out, err = assert_python_failure('-c', e)
# parsing the expression will result in an error message
# followed by a MemoryError (see #11963)
self.assertIn(b's_push: parser stack overflow', err)
self.assertIn(b'MemoryError', err)
class STObjectTestCase(unittest.TestCase):
"""Test operations on ST objects themselves"""
def test_comparisons(self):
# ST objects should support order and equality comparisons
st1 = parser.expr('2 + 3')
st2 = parser.suite('x = 2; y = x + 3')
st3 = parser.expr('list(x**3 for x in range(20))')
st1_copy = parser.expr('2 + 3')
st2_copy = parser.suite('x = 2; y = x + 3')
st3_copy = parser.expr('list(x**3 for x in range(20))')
# exercise fast path for object identity
self.assertEqual(st1 == st1, True)
self.assertEqual(st2 == st2, True)
self.assertEqual(st3 == st3, True)
# slow path equality
self.assertEqual(st1, st1_copy)
self.assertEqual(st2, st2_copy)
self.assertEqual(st3, st3_copy)
self.assertEqual(st1 == st2, False)
self.assertEqual(st1 == st3, False)
self.assertEqual(st2 == st3, False)
self.assertEqual(st1 != st1, False)
self.assertEqual(st2 != st2, False)
self.assertEqual(st3 != st3, False)
self.assertEqual(st1 != st1_copy, False)
self.assertEqual(st2 != st2_copy, False)
self.assertEqual(st3 != st3_copy, False)
self.assertEqual(st2 != st1, True)
self.assertEqual(st1 != st3, True)
self.assertEqual(st3 != st2, True)
# we don't particularly care what the ordering is; just that
# it's usable and self-consistent
self.assertEqual(st1 < st2, not (st2 <= st1))
self.assertEqual(st1 < st3, not (st3 <= st1))
self.assertEqual(st2 < st3, not (st3 <= st2))
self.assertEqual(st1 < st2, st2 > st1)
self.assertEqual(st1 < st3, st3 > st1)
self.assertEqual(st2 < st3, st3 > st2)
self.assertEqual(st1 <= st2, st2 >= st1)
self.assertEqual(st3 <= st1, st1 >= st3)
self.assertEqual(st2 <= st3, st3 >= st2)
# transitivity
bottom = min(st1, st2, st3)
top = max(st1, st2, st3)
mid = sorted([st1, st2, st3])[1]
self.assertTrue(bottom < mid)
self.assertTrue(bottom < top)
self.assertTrue(mid < top)
self.assertTrue(bottom <= mid)
self.assertTrue(bottom <= top)
self.assertTrue(mid <= top)
self.assertTrue(bottom <= bottom)
self.assertTrue(mid <= mid)
self.assertTrue(top <= top)
# interaction with other types
self.assertEqual(st1 == 1588.602459, False)
self.assertEqual('spanish armada' != st2, True)
self.assertRaises(TypeError, operator.ge, st3, None)
self.assertRaises(TypeError, operator.le, False, st1)
self.assertRaises(TypeError, operator.lt, st1, 1815)
self.assertRaises(TypeError, operator.gt, b'waterloo', st2)
check_sizeof = support.check_sizeof
@support.cpython_only
def test_sizeof(self):
def XXXROUNDUP(n):
if n <= 1:
return n
if n <= 128:
return (n + 3) & ~3
return 1 << (n - 1).bit_length()
basesize = support.calcobjsize('Pii')
nodesize = struct.calcsize('hP3iP0h')
def sizeofchildren(node):
if node is None:
return 0
res = 0
hasstr = len(node) > 1 and isinstance(node[-1], str)
if hasstr:
res += len(node[-1]) + 1
children = node[1:-1] if hasstr else node[1:]
if children:
res += XXXROUNDUP(len(children)) * nodesize
for child in children:
res += sizeofchildren(child)
return res
def check_st_sizeof(st):
self.check_sizeof(st, basesize + nodesize +
sizeofchildren(st.totuple()))
check_st_sizeof(parser.expr('2 + 3'))
check_st_sizeof(parser.expr('2 + 3 + 4'))
check_st_sizeof(parser.suite('x = 2 + 3'))
check_st_sizeof(parser.suite(''))
check_st_sizeof(parser.suite('# -*- coding: utf-8 -*-'))
check_st_sizeof(parser.expr('[' + '2,' * 1000 + ']'))
# XXX tests for pickling and unpickling of ST objects should go here
class OtherParserCase(unittest.TestCase):
def test_two_args_to_expr(self):
# See bug #12264
with self.assertRaises(TypeError):
parser.expr("a", "b")
def test_main():
support.run_unittest(
RoundtripLegalSyntaxTestCase,
IllegalSyntaxTestCase,
CompileTestCase,
ParserStackLimitTestCase,
STObjectTestCase,
OtherParserCase,
)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
manhong2112/CodeColle | Python/Pygame/pygame~/compat.py | 5 | 3445 | # coding: ascii
"""Python 2.x/3.x compatibility tools"""
import sys
__all__ = ['geterror', 'long_', 'xrange_', 'ord_', 'unichr_',
'unicode_', 'raw_input_', 'as_bytes', 'as_unicode',
'bytes_', 'next_', 'imap_', 'PY_MAJOR_VERSION', 'PY_MINOR_VERSION']
PY_MAJOR_VERSION, PY_MINOR_VERSION = sys.version_info[0:2]
def geterror():
return sys.exc_info()[1]
# Python 3
if PY_MAJOR_VERSION >= 3:
long_ = int
xrange_ = range
from io import StringIO
from io import BytesIO
unichr_ = chr
unicode_ = str
bytes_ = bytes
raw_input_ = input
imap_ = map
# Represent escaped bytes and strings in a portable way.
#
# as_bytes: Allow a Python 3.x string to represent a bytes object.
# e.g.: as_bytes("a\x01\b") == b"a\x01b" # Python 3.x
# as_bytes("a\x01\b") == "a\x01b" # Python 2.x
# as_unicode: Allow a Python "r" string to represent a unicode string.
# e.g.: as_unicode(r"Bo\u00F6tes") == u"Bo\u00F6tes" # Python 2.x
# as_unicode(r"Bo\u00F6tes") == "Bo\u00F6tes" # Python 3.x
def as_bytes(string):
""" '<binary literal>' => b'<binary literal>' """
return string.encode('latin-1', 'strict')
def as_unicode(rstring):
""" r'<Unicode literal>' => '<Unicode literal>' """
return rstring.encode('ascii', 'strict').decode('unicode_escape',
'strict')
# Python 2
else:
long_ = long
xrange_ = xrange
from cStringIO import StringIO
BytesIO = StringIO
unichr_ = unichr
unicode_ = unicode
bytes_ = str
raw_input_ = raw_input
from itertools import imap as imap_
# Represent escaped bytes and strings in a portable way.
#
# as_bytes: Allow a Python 3.x string to represent a bytes object.
# e.g.: as_bytes("a\x01\b") == b"a\x01b" # Python 3.x
# as_bytes("a\x01\b") == "a\x01b" # Python 2.x
# as_unicode: Allow a Python "r" string to represent a unicode string.
# e.g.: as_unicode(r"Bo\u00F6tes") == u"Bo\u00F6tes" # Python 2.x
# as_unicode(r"Bo\u00F6tes") == "Bo\u00F6tes" # Python 3.x
def as_bytes(string):
""" '<binary literal>' => '<binary literal>' """
return string
def as_unicode(rstring):
""" r'<Unicode literal>' => u'<Unicode literal>' """
return rstring.decode('unicode_escape', 'strict')
def get_BytesIO():
return BytesIO
def get_StringIO():
return StringIO
def ord_(o):
try:
return ord(o)
except TypeError:
return o
if sys.platform == 'win32':
filesystem_errors = "replace"
elif PY_MAJOR_VERSION >= 3:
filesystem_errors = "surrogateescape"
else:
filesystem_errors = "strict"
def filesystem_encode(u):
fsencoding = sys.getfilesystemencoding()
if fsencoding.lower() == 'ascii' and sys.platform.startswith('linux'):
# Don't believe Linux systems claiming ASCII-only filesystems. In
# practice, arbitrary bytes are allowed, and most things expect UTF-8.
fsencoding = 'utf-8'
return u.encode(fsencoding, filesystem_errors)
# Include a next compatible function for Python versions < 2.6
if (PY_MAJOR_VERSION, PY_MINOR_VERSION) >= (2, 6):
next_ = next
else:
def next_(i, *args):
try:
return i.next()
except StopIteration:
if args:
return args[0]
raise
| mit |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/view_tests/tests/test_i18n.py | 13 | 11317 | # -*- coding:utf-8 -*-
import gettext
import json
import os
import unittest
from os import path
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import (
LiveServerTestCase, TestCase, modify_settings, override_settings,
)
from django.utils import six
from django.utils._os import upath
from django.utils.module_loading import import_string
from django.utils.translation import LANGUAGE_SESSION_KEY, override
from ..urls import locale_dir
@override_settings(ROOT_URLCONF='view_tests.urls')
class I18NTests(TestCase):
""" Tests django views in django/views/i18n.py """
def test_setlang(self):
"""
The set_language view can be used to change the session language.
The user is redirected to the 'next' argument if provided.
"""
for lang_code, lang_name in settings.LANGUAGES:
post_data = dict(language=lang_code, next='/')
response = self.client.post('/i18n/setlang/', data=post_data)
self.assertRedirects(response, 'http://testserver/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_unsafe_next(self):
"""
The set_language view only redirects to the 'next' argument if it is
"safe".
"""
lang_code, lang_name = settings.LANGUAGES[0]
post_data = dict(language=lang_code, next='//unsafe/redirection/')
response = self.client.post('/i18n/setlang/', data=post_data)
self.assertEqual(response.url, 'http://testserver/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_reversal(self):
self.assertEqual(reverse('set_language'), '/i18n/setlang/')
def test_setlang_cookie(self):
# we force saving language to a cookie rather than a session
# by excluding session middleware and those which do require it
test_settings = dict(
MIDDLEWARE_CLASSES=('django.middleware.common.CommonMiddleware',),
LANGUAGE_COOKIE_NAME='mylanguage',
LANGUAGE_COOKIE_AGE=3600 * 7 * 2,
LANGUAGE_COOKIE_DOMAIN='.example.com',
LANGUAGE_COOKIE_PATH='/test/',
)
with self.settings(**test_settings):
post_data = dict(language='pl', next='/views/')
response = self.client.post('/i18n/setlang/', data=post_data)
language_cookie = response.cookies.get('mylanguage')
self.assertEqual(language_cookie.value, 'pl')
self.assertEqual(language_cookie['domain'], '.example.com')
self.assertEqual(language_cookie['path'], '/test/')
self.assertEqual(language_cookie['max-age'], 3600 * 7 * 2)
def test_jsi18n(self):
"""The javascript_catalog can be deployed with language settings"""
for lang_code in ['es', 'fr', 'ru']:
with override(lang_code):
catalog = gettext.translation('djangojs', locale_dir, [lang_code])
if six.PY3:
trans_txt = catalog.gettext('this is to be translated')
else:
trans_txt = catalog.ugettext('this is to be translated')
response = self.client.get('/jsi18n/')
# response content must include a line like:
# "this is to be translated": <value of trans_txt Python variable>
# json.dumps() is used to be able to check unicode strings
self.assertContains(response, json.dumps(trans_txt), 1)
if lang_code == 'fr':
# Message with context (msgctxt)
self.assertContains(response, r'"month name\u0004May": "mai"', 1)
@override_settings(ROOT_URLCONF='view_tests.urls')
class JsI18NTests(TestCase):
"""
Tests django views in django/views/i18n.py that need to change
settings.LANGUAGE_CODE.
"""
def test_jsi18n_with_missing_en_files(self):
"""
The javascript_catalog shouldn't load the fallback language in the
case that the current selected language is actually the one translated
from, and hence missing translation files completely.
This happens easily when you're translating from English to other
languages and you've set settings.LANGUAGE_CODE to some other language
than English.
"""
with self.settings(LANGUAGE_CODE='es'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'esto tiene que ser traducido')
def test_jsi18n_fallback_language(self):
"""
Let's make sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('fi'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'il faut le traduire')
def test_i18n_language_non_english_default(self):
"""
Check if the Javascript i18n view returns an empty language catalog
if the default language is non-English, the selected language
is English and there is not 'en' translation available. See #13388,
#3594 and #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'Choisir une heure')
@modify_settings(INSTALLED_APPS={'append': 'view_tests.app0'})
def test_non_english_default_english_userpref(self):
"""
Same as above with the difference that there IS an 'en' translation
available. The Javascript i18n view must return a NON empty language catalog
with the proper English translations. See #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n_english_translation/')
self.assertContains(response, 'this app0 string is to be translated')
def test_i18n_language_non_english_fallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('none'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'Choisir une heure')
def test_escaping(self):
# Force a language via GET otherwise the gettext functions are a noop!
response = self.client.get('/jsi18n_admin/?language=de')
self.assertContains(response, '\\x04')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app5']})
def test_non_BMP_char(self):
"""
Non-BMP characters should not break the javascript_catalog (#21725).
"""
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n/app5/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'emoji')
self.assertContains(response, '\\ud83d\\udca9')
@override_settings(ROOT_URLCONF='view_tests.urls')
class JsI18NTestsMultiPackage(TestCase):
"""
Tests for django views in django/views/i18n.py that need to change
settings.LANGUAGE_CODE and merge JS translation from several packages.
"""
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app1', 'view_tests.app2']})
def test_i18n_language_english_default(self):
"""
Check if the JavaScript i18n view returns a complete language catalog
if the default language is en-us, the selected language has a
translation available and a catalog composed by djangojs domain
translations of multiple Python packages is requested. See #13388,
#3594 and #13514 for more details.
"""
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n_multi_packages1/')
self.assertContains(response, 'il faut traduire cette cha\\u00eene de caract\\u00e8res de app1')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app3', 'view_tests.app4']})
def test_i18n_different_non_english_languages(self):
"""
Similar to above but with neither default or requested language being
English.
"""
with self.settings(LANGUAGE_CODE='fr'), override('es-ar'):
response = self.client.get('/jsi18n_multi_packages2/')
self.assertContains(response, 'este texto de app3 debe ser traducido')
def test_i18n_with_locale_paths(self):
extended_locale_paths = settings.LOCALE_PATHS + (
path.join(path.dirname(
path.dirname(path.abspath(upath(__file__)))), 'app3', 'locale'),)
with self.settings(LANGUAGE_CODE='es-ar', LOCALE_PATHS=extended_locale_paths):
with override('es-ar'):
response = self.client.get('/jsi18n/')
self.assertContains(response,
'este texto de app3 debe ser traducido')
skip_selenium = not os.environ.get('DJANGO_SELENIUM_TESTS', False)
@unittest.skipIf(skip_selenium, 'Selenium tests not requested')
@override_settings(ROOT_URLCONF='view_tests.urls')
class JavascriptI18nTests(LiveServerTestCase):
# The test cases use fixtures & translations from these apps.
available_apps = [
'django.contrib.admin', 'django.contrib.auth',
'django.contrib.contenttypes', 'view_tests',
]
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
@classmethod
def setUpClass(cls):
try:
cls.selenium = import_string(cls.webdriver_class)()
except Exception as e:
raise unittest.SkipTest('Selenium webdriver "%s" not installed or '
'not operational: %s' % (cls.webdriver_class, str(e)))
super(JavascriptI18nTests, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(JavascriptI18nTests, cls).tearDownClass()
@override_settings(LANGUAGE_CODE='de')
def test_javascript_gettext(self):
self.selenium.get('%s%s' % (self.live_server_url, '/jsi18n_template/'))
elem = self.selenium.find_element_by_id("gettext")
self.assertEqual(elem.text, "Entfernen")
elem = self.selenium.find_element_by_id("ngettext_sing")
self.assertEqual(elem.text, "1 Element")
elem = self.selenium.find_element_by_id("ngettext_plur")
self.assertEqual(elem.text, "455 Elemente")
elem = self.selenium.find_element_by_id("pgettext")
self.assertEqual(elem.text, "Kann")
elem = self.selenium.find_element_by_id("npgettext_sing")
self.assertEqual(elem.text, "1 Resultat")
elem = self.selenium.find_element_by_id("npgettext_plur")
self.assertEqual(elem.text, "455 Resultate")
class JavascriptI18nChromeTests(JavascriptI18nTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class JavascriptI18nIETests(JavascriptI18nTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| mit |
ModoUnreal/PyWeather | setup.py | 1 | 99122 | '''
_______
| \ \ / @@@;
| \ \ / `#....@
| | \ / ,;@.....;,;
| | \ / @..@........@` PyWeather Setup
| | \ / .............@ version 0.6.3 beta
| / \ / .............@ (c) 2017-2018 - o355
|_______/ | @...........#`
| | .+@@++++@#;
| | @ ; ,
| | : ' .
| | @ # .`
| | @ # .`
'''
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
if sys.version_info < (3, 0, 0):
print("You'll need Python 3 to run PyWeather.",
"Press enter to exit.")
input()
sys.exit()
elif (sys.version_info > (3, 0, 0)
and sys.version_info < (3, 5, 0)):
print("You have a Python version between 3.0 and 3.4.",
"While PyWeather will work, you may experience a few quirks.",
"Try updating to Python 3.6, as it works more reliably.",
"Please take note of this in PyWeather.","", sep="\n")
elif sys.version_info >= (3, 7, 0):
print("You have a Python version of 3.7 and greater.",
"Please note that PyWeather 0.6.2 beta is NOT certified to work with",
"Python 3.7. Python 3.6 and below should work just fine.", sep="\n")
import configparser
import traceback
import subprocess
import logging
import os
import urllib
# Now force the writing of the versioninfo file during setup, this should prevent issues
# in the event I forget to gitignore the file.
try:
open('updater//versioninfo.txt', 'w').close()
with open("updater//versioninfo.txt", 'a') as out:
out.write("0.6.3 beta")
out.close()
except:
print("Couldn't write the versioninfo file. This may cause issues with PyWeather down the road.")
config = configparser.ConfigParser()
config.read('storage//config.ini')
def configprovision():
try:
config.add_section("GEOCODER API")
except configparser.DuplicateSectionError:
print("Failed to add the Geocoder API section.")
try:
config.add_section("FAVORITE LOCATIONS")
except configparser.DuplicateSectionError:
print("Failed to add the favorite locations section.")
try:
config.add_section("PREVIOUS LOCATIONS")
except configparser.DuplicateSectionError:
print("Failed to add the previous locations section")
try:
config.add_section("HURRICANE")
except configparser.DuplicateSectionError:
print("Failed to add the hurricane section.")
try:
config.add_section("FIRSTINPUT")
except configparser.DuplicateSectionError:
print("Failed to add the firstinput section.")
try:
config.add_section('SUMMARY')
except configparser.DuplicateSectionError:
print("Failed to add the summary section.")
try:
config.add_section('VERBOSITY')
except configparser.DuplicateSectionError:
print("Failed to add the verbosity section.")
try:
config.add_section('TRACEBACK')
except configparser.DuplicateSectionError:
print("Failed to add the traceback section.")
try:
config.add_section('UI')
except configparser.DuplicateSectionError:
print("Failed to add the UI section.")
try:
config.add_section('PREFETCH')
except configparser.DuplicateSectionError:
print("Failed to add the prefetch section.")
try:
config.add_section('UPDATER')
except configparser.DuplicateSectionError:
print("Failed to add the updater section.")
try:
config.add_section('KEYBACKUP')
except configparser.DuplicateSectionError:
print("Failed to add the keybackup section.")
try:
config.add_section('PYWEATHER BOOT')
except configparser.DuplicateSectionError:
print("Failed to add the PyWeather Boot section.")
try:
config.add_section('USER')
except configparser.DuplicateSectionError:
print("Failed to add the user section.")
try:
config.add_section('CACHE')
except configparser.DuplicateSectionError:
print("Failed to add the cache section.")
try:
config.add_section('RADAR GUI')
except configparser.DuplicateSectionError:
print("Failed to add the Radar GUI section.")
try:
config.add_section('GEOCODER')
except configparser.DuplicateSectionError:
print("Failed to add the Geocoder section.")
config['SUMMARY']['sundata_summary'] = 'False'
config['SUMMARY']['almanac_summary'] = 'False'
config['SUMMARY']['showalertsonsummary'] = 'True'
config['SUMMARY']['showtideonsummary'] = 'False'
config['SUMMARY']['showyesterdayonsummary'] = 'False'
config['VERBOSITY']['verbosity'] = 'False'
config['VERBOSITY']['json_verbosity'] = 'False'
config['VERBOSITY']['setup_verbosity'] = 'False'
config['VERBOSITY']['setup_jsonverbosity'] = 'False'
config['VERBOSITY']['updater_verbosity'] = 'False'
config['VERBOSITY']['updater_jsonverbosity'] = 'False'
config['VERBOSITY']['keybackup_verbosity'] = 'False'
config['VERBOSITY']['configdefault_verbosity'] = 'False'
config['TRACEBACK']['tracebacks'] = 'False'
config['TRACEBACK']['setup_tracebacks'] = 'False'
config['TRACEBACK']['updater_tracebacks'] = 'False'
config['TRACEBACK']['configdefault_tracebacks'] = 'False'
config['UI']['show_entertocontinue'] = 'True'
config['UI']['detailedinfoloops'] = '6'
config['UI']['forecast_detailedinfoloops'] = '5'
config['UI']['show_completediterations'] = 'False'
config['UI']['alerts_usiterations'] = '1'
config['UI']['alerts_euiterations'] = '2'
config['UI']['extratools_enabled'] = 'False'
config['PREFETCH']['10dayfetch_atboot'] = 'False'
config['PREFETCH']['yesterdaydata_atboot'] = 'False'
config['UPDATER']['autocheckforupdates'] = 'False'
config['UPDATER']['show_updaterreleasetag'] = 'False'
config['KEYBACKUP']['savedirectory'] = 'backup//'
config['PYWEATHER BOOT']['validateapikey'] = 'True'
config['UPDATER']['showReleaseNotes'] = 'True'
config['UPDATER']['showReleaseNotes_uptodate'] = 'False'
config['UPDATER']['showNewVersionReleaseDate'] = 'True'
config['USER']['configprovisioned'] = 'True'
config['CACHE']['enabled'] = 'True'
config['CACHE']['alerts_cachedtime'] = '5'
config['CACHE']['current_cachedtime'] = '10'
config['CACHE']['threedayhourly_cachedtime'] = '60'
config['CACHE']['tendayhourly_cachedtime'] = '60'
config['CACHE']['forecast_cachedtime'] = '60'
config['CACHE']['almanac_cachedtime'] = '240'
config['CACHE']['sundata_cachedtime'] = '480'
config['CACHE']['tide_cachedtime'] = '480'
config['CACHE']['hurricane_cachedtime'] = '180'
config['CACHE']['yesterday_cachedtime'] = '720'
config['RADAR GUI']['radar_imagesize'] = 'normal'
config['RADAR GUI']['bypassconfirmation'] = 'False'
config['GEOCODER']['scheme'] = 'https'
config['GEOCODER API']['customkey_enabled'] = 'False'
config['GEOCODER API']['customkey'] = 'None'
config['PREFETCH']['hurricanedata_atboot'] = 'False'
config['FIRSTINPUT']['geoipservice_enabled'] = 'False'
config['FIRSTINPUT']['allow_pwsqueries'] = 'True'
config['HURRICANE']['enablenearestcity'] = 'False'
config['HURRICANE']['enablenearestcity_forecast'] = 'False'
config['HURRICANE']['api_username'] = 'pyweather_proj'
config['HURRICANE']['nearestcitysize'] = 'medium'
config['FAVORITE LOCATIONS']['enabled'] = 'True'
config['FAVORITE LOCATIONS']['favloc1'] = 'None'
config['FAVORITE LOCATIONS']['favloc2'] = 'None'
config['FAVORITE LOCATIONS']['favloc3'] = 'None'
config['FAVORITE LOCATIONS']['favloc4'] = 'None'
config['FAVORITE LOCATIONS']['favloc5'] = 'None'
config['FAVORITE LOCATIONS']['favloc1_data'] = 'None'
config['FAVORITE LOCATIONS']['favloc2_data'] = 'None'
config['FAVORITE LOCATIONS']['favloc3_data'] = 'None'
config['FAVORITE LOCATIONS']['favloc4_data'] = 'None'
config['FAVORITE LOCATIONS']['favloc5_data'] = 'None'
config['PREVIOUS LOCATIONS']['enabled'] = 'True'
config['PREVIOUS LOCATIONS']['prevloc1'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc2'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc3'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc4'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc5'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc1_data'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc2_data'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc3_data'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc4_data'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc5_data'] = 'None'
try:
with open('storage//config.ini', 'w') as configfile:
config.write(configfile)
except:
print("Hmmf...an odd error occurred. A full traceback will be",
"printed below. Please report this issue on GitHub",
"(github.com/o355/pyweather), as that would be greatly appreciated",
"for trying to fix the bug that you just encountered!", sep="\n")
traceback.print_exc()
# Giving users choice, unlike Microsoft.
print("Would you like to continue using PyWeather with an unprovisioned config?",
"It's highly recommended you don't continue, as you may encounter",
"unexpected errors and issues with using PyWeather. Yes or No.", sep="\n")
provisionfailed_continue = input("Input here: ").lower()
if provisionfailed_continue == "yes":
print("Continuing with PyWeather Setup. Please remember, you may encounter",
"unexpected errors and issues. You can always retry provisioning your config",
"by using the configsetup.py script in the storage folder.", sep="\n")
elif provisionfailed_continue == "no":
print("Stopping PyWeather Setup. You can retry to provision your config by using",
"the configsetup.py script in the storage folder.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
else:
print("Couldn't understand your input. By default, PyWeather Setup is stopping.",
"You can retry to provision your config by using the configsetup.py script",
"in the storage folder. Press enter to exit.", sep="\n")
input()
sys.exit()
# See if the config is "provisioned". If it isn't, a KeyError will occur,
# because it's not created. Here, we set up the config to defaults if it's not
# provisioned.
try:
configprovisioned = config.getboolean('USER', 'configprovisioned')
except:
print("Your config likely isn't provisioned. Would you like to provision your config?",
"It's highly recommended you provision your config. If you decide not to,",
"you may run into issues using PyWeather.",
"Yes or No.", sep="\n")
provisionconfig = input("Input here: ").lower()
if provisionconfig == "yes":
print("Provisioning your config.")
configprovision()
print("Config file provisioned successfully! Moving on with PyWeather setup...")
elif provisionconfig == "no":
print("Not provisioning your config. You may encounter unexpected errors",
"and issues when using PyWeather, however.", sep="\n")
else:
print("Couldn't understand your input. By default, I'm going to provision",
"your config. Beginning now...", sep="\n")
configprovision()
print("Config file provisioned successfully! Moving on with PyWeather setup...")
try:
verbosity = config.getboolean('VERBOSITY', 'setup_verbosity')
jsonVerbosity = config.getboolean('VERBOSITY', 'setup_jsonverbosity')
tracebacksEnabled = config.getboolean('TRACEBACK', 'setup_tracebacks')
except:
print("Couldn't load your config file. Make sure there aren't any typos",
"in the config, and that the config file is accessible.",
"Setting config variables to their defaults.",
"Here's the full traceback, in case you need it.", sep="\n")
traceback.print_exc()
verbosity = False
jsonVerbosity = False
tracebacksEnabled = False
def printException():
if tracebacksEnabled == True:
print("Here's the full traceback (for error reporting):")
traceback.print_exc()
def printException_loggerwarn():
if verbosity == True:
logger.warning("Oh snap! We ran into a non-critical error. Here's the traceback.")
traceback.print_exc()
logger = logging.getLogger(name='pyweather_setup_0.6.2beta')
logger.setLevel(logging.DEBUG)
logformat = '%(asctime)s | %(levelname)s | %(message)s'
logging.basicConfig(format=logformat)
if verbosity == True:
logger.setLevel(logging.DEBUG)
elif tracebacksEnabled == True:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.CRITICAL)
logger.debug("Listing configuration options:")
logger.debug("verbosity: %s ; jsonVerbosity: %s" %
(verbosity, jsonVerbosity))
logger.debug("tracebacksEnabled: %s" %
tracebacksEnabled)
print("Hi! Welcome to PyWeather 0.6.3 beta! Glad that you're here.",
"I'm here to help set up PyWeather, and let you configure it to your liking.",
"Let's begin!", sep="\n")
import shutil
import time
import json
import codecs
buildnumber = 63
buildversion = "0.6.3 beta"
logger.debug("buildnumber: %s ; buildversion: %s" %
(buildnumber, buildversion))
print("","Before we get started, I want to confirm some permissions from you.",
"Is it okay if I use 1-5 MB of data (downloading libraries), save a small",
"text file called apikey.txt (under 2 KB), and automatically install Python",
"libraries?",
"Please input yes or no below:", sep="\n")
confirmPermissions = input("Input here: ").lower()
logger.debug("confirmPermissions: %s" % confirmPermissions)
if confirmPermissions == "no":
logger.debug("User denied permissions. Closing...")
print("Okay! Closing now.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
elif confirmPermissions != "yes":
logger.debug("Couldn't understand. Closing...")
print("I couldn't understand what you said.",
"As a precaution, I won't proceed any further.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
print("","Cool! Let's start.",
"I'm going to start by checking for necessary libraries (to run PyWeather).",
"This can take a moment, so please hold tight while I check!", sep="\n")
try:
import pip
except ImportError:
logger.warn("pip is NOT installed! Asking user for automated install...")
printException_loggerwarn()
print("","Shucks! PIP couldn't be imported, and I need PIP to install",
"libraries for you. Would you like me to install PIP for you?",
"Yes or No.", sep="\n")
pipConfirm = input("Input here: ").lower()
logger.debug("pipConfirm: %s" % pipConfirm)
if pipConfirm == "no":
logger.info("User denied PIP install, closing...")
print("","Okay! I'm closing setup, as I need PIP to continue.",
"Press enter to continue.", sep="\n")
input()
sys.exit()
elif pipConfirm == "yes":
logger.info("User allowed PIP install. Starting...")
print("","Okay!",
"I'll download PIP's installer, and run it.",
"Doing such uses about 2-4 MB of data, and will quit PW setup.",
"When the setup script finishes, you'll need to run the setup script again."
"I'll start in a few seconds.", sep="\n")
time.sleep(3)
print("Downloading the installer...")
# We use the built-in urllib library, as some Python installs don't include requests.
try:
with urllib.request.urlopen('https://bootstrap.pypa.io/get-pip.py') as update_response, open('get-pip.py',
'wb') as update_out_file:
logger.debug("update_response: %s ; update_out_file: %s"
% (update_response, update_out_file))
shutil.copyfileobj(update_response, update_out_file)
except:
print("Couldn't download the PIP installer, either due to no internet connection, or the library that fetches",
"files has failed. As an alternative, you can download the installer yourself.",
"Please download this file: 'https://bootstrap.pypa.io/get-pip.py', and place it in PyWeather's base directory.",
"Afterwards, press enter to execute the installer. Press Control + C to exit.", sep="\n")
printException()
input()
print("Running the installer...")
logger.debug("Executing get-pip.py. If this script exits, please restart the setup script.")
exec(open("get-pip.py").read())
else:
logger.warn("Couldn't understand the input. Closing...")
print("","I didn't understand what you said.",
"As a precaution, I'm closing setup, as I need PIP to continue.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
except PermissionError:
traceback.print_exc()
print("PIP has incorrect permissions on your machine. Please attempt to fix",
"permissions on the folder that is listed in the traceback.",
"Linux users: Use sudo chown -R <yourusername> <folder>, this should fix the issue.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
print("Deleting the PIP installer file (if it exists)")
try:
os.remove("get-pip.py")
except:
printException_loggerwarn()
print("The file get-pip.py didn't exist, or we had wrong permissions.")
neededLibraries = 0
try:
import colorama
coloramaInstalled = True
logger.info("Colorama is installed.")
logger.debug("coloramaInstalled: %s" % coloramaInstalled)
except ImportError:
coloramaInstalled = False
neededLibraries = neededLibraries + 1
logger.warn("Colorama is not installed.")
printException_loggerwarn()
logger.debug("coloramaInstalled: %s ; neededLibraries: %s"
% (coloramaInstalled, neededLibraries))
try:
import geopy
geopyInstalled = True
logger.info("geopy is installed.")
logger.debug("geopyInstalled: %s" % geopyInstalled)
except ImportError:
geopyInstalled = False
neededLibraries = neededLibraries + 1
logger.info("geopy is NOT installed.")
printException_loggerwarn()
logger.debug("geopyInstalled: %s ; neededLibraries: %s"
% (geopyInstalled, neededLibraries))
try:
from appJar import gui
appjarInstalled = True
logger.info("appjar is installed.")
logger.debug("appjarInstalled: %s" % appjarInstalled)
except ImportError as e:
if e == "No module named '_tkinter', please install the python3-tk package":
print("appJar cannot run on this platform. Skipping installation...")
appjarInstalled = True
logger.debug("appjarInstalled: %s" % appjarInstalled)
else:
appjarInstalled = False
neededLibraries = neededLibraries + 1
logger.debug("appJar is NOT installed.")
printException_loggerwarn()
logger.debug("appjarInstalled: %s ; neededLibraries: %s" %
(appjarInstalled, neededLibraries))
try:
import requests
requestsInstalled = True
logger.debug("requests is installed.")
logger.debug("requestsInstalled: %s" % requestsInstalled)
except:
requestsInstalled = False
neededLibraries = neededLibraries + 1
logger.debug("requests is NOT installed.")
printException_loggerwarn()
logger.debug("requestsInstalled: %s ; neededLibraries: %s" %
(requestsInstalled, neededLibraries))
try:
import halo
haloInstalled = True
logger.debug("halo is installed.")
logger.debug("haloInstalled: %s" % haloInstalled)
except:
haloInstalled = False
neededLibraries += 1
logger.debug("halo is NOT installed.")
printException_loggerwarn()
logger.debug("haloInstalled: %s ; neededLibraries: %s" %
(haloInstalled, neededLibraries))
print("All done!")
if neededLibraries == 0:
logger.debug("All libraries are installed.")
print("All necessary libraries have been installed!")
else:
logger.debug("Libraries need to be installed.")
print("Shucks. Not all necessary libraries are installed. Here's what needs to be installed:")
if coloramaInstalled is False:
print("- Colorama")
if geopyInstalled is False:
print("- Geopy")
if appjarInstalled is False:
print("- appJar")
if requestsInstalled is False:
print("- Requests")
if haloInstalled is False:
print("- Halo")
print("If you want me to, I can automatically install these libraries.",
"Would you like me to do such? Yes or No.", sep="\n")
neededLibrariesConfirm = input("Input here: ").lower()
logger.debug("neededLibrariesConfirm: %s" % neededLibrariesConfirm)
if neededLibrariesConfirm == "no":
logger.warning("Not installing necessary libraries. Now exiting...")
print("Okay. I needed to install necessary libraries to continue.",
"Now quitting...",
"Press enter to exit.", sep="\n")
input()
sys.exit()
elif neededLibrariesConfirm == "yes":
print("Now installing necessary libraries...")
if coloramaInstalled is False:
print("Installing Colorama...")
pip.main(['install', 'colorama'])
if geopyInstalled is False:
print("Installing geopy...")
pip.main(['install', 'geopy'])
if appjarInstalled is False:
print("Installing appJar...")
pip.main(['install', 'appJar'])
if requestsInstalled is False:
print("Installing requests...")
pip.main(['install', 'requests'])
if haloInstalled is False:
print("Installing halo...")
pip.main(['install', 'halo'])
logger.info("Running the double check on libraries...")
print("Sweet! All libraries should be installed.",
"Just to confirm, I'm double checking if needed libraries are installed.", sep="\n")
try:
import colorama
logger.info("Colorama installed successfully.")
except ImportError:
logger.warn("colorama was not installed successfully.")
print("Hmm...Colorama didn't install properly.")
printException()
print("As a last resort, we can use sudo -H to install packages.",
"Do you want to use the shell option to install colorama?",
"WARNING: Using the last-resort method may screw up PIP, and",
"may require you to reinstall PIP on your machine."
"Yes or No.", sep="\n")
colorama_lastresort = input("Input here: ").lower()
logger.debug("colorama_lastresort: %s" % colorama_lastresort)
if colorama_lastresort == "yes":
try:
print("Now executing `sudo -H pip3 install colorama`.",
"Please enter the password for sudo when the prompt",
"comes up. Press Control + C to cancel.",
"Starting in 5 seconds...", sep="\n")
time.sleep(5)
try:
subprocess.call(["sudo -H pip3 install colorama"], shell=True)
try:
print("Attempting to reimport colorama.")
import colorama
print("Colorama is FINALLY installed!")
except:
print("Colorama still wasn't successfully installed.",
"Cannot continue without Colorama.",
"Try doing a manual install of Colorama with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except:
print("When running the command, an error occurred",
"Try doing a manual install of Colorama with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except KeyboardInterrupt:
print("Command execution aborted.",
"Cannot continue without Colorama.",
"Try and do a manual install of Colorama with PIP",
"in a command line.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
elif colorama_lastresort == "no":
print("Not installing Colorama with a shell command.",
"Cannot continue without Colorama.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
else:
print("Did not understand your input. Defaulting to not installing",
"via the shell. Cannot continue without Colorama.",
"Try installing Colorama with PIP.",
"Press enter to exit.")
input()
sys.exit()
try:
import geopy
logger.info("geopy installed successfully.")
except ImportError:
logger.warn("geopy was not installed successfully.")
print("Hmm...geopy didn't install properly.")
printException()
print("As a last resort, we can use sudo -H to install packages.",
"Do you want to use the shell option to install geopy?",
"WARNING: Using the last-resort method may screw up PIP, and",
"may require you to reinstall PIP on your machine."
"Yes or No.", sep="\n")
geopy_lastresort = input("Input here: ").lower()
logger.debug("geopy_lastresort: %s" % geopy_lastresort)
if geopy_lastresort == "yes":
try:
print("Now executing `sudo -H pip3 install geopy`.",
"Please enter the password for sudo when the prompt",
"comes up. Press Control + C to cancel.",
"Starting in 5 seconds...", sep="\n")
time.sleep(5)
try:
subprocess.call(["sudo -H pip3 install geopy"], shell=True)
try:
print("Attempting to reimport geopy.")
import geopy
print("Geopy is FINALLY installed!")
except:
print("Geopy still wasn't successfully installed.",
"Cannot continue without geopy.",
"Try doing a manual install of geopy with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except:
print("When running the command, an error occurred",
"Try doing a manual install of geopy with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except KeyboardInterrupt:
print("Command execution aborted.",
"Cannot continue without geopy.",
"Try and do a manual install of geopy with PIP",
"in a command line.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
elif geopy_lastresort == "no":
print("Not installing geopy with a shell command.",
"Cannot continue without geopy.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
else:
print("Did not understand your input. Defaulting to not installing",
"via the shell. Cannot continue without geopy.",
"Try installing geopy with PIP.",
"Press enter to exit.")
input()
sys.exit()
# Why is appJar not here? When appJar is straight up imported in a non-GUI environment, it'll throw an error
# even when it's installed. I don't check for an install because of this reason.
try:
import requests
logger.info("requests installed successfully.")
except ImportError:
logger.warning("Requests was not installed successfully.")
print("Hmm...requests didn't install properly.")
printException()
print("As a last resort, we can use sudo -H to install packages.",
"Do you want to use the shell option to install requests?",
"WARNING: Using the last-resort method may screw up PIP, and",
"may require you to reinstall PIP on your machine."
"Yes or No.", sep="\n")
requests_lastresort = input("Input here: ").lower()
logger.debug("requests_lastresort: %s" % requests_lastresort)
if requests_lastresort == "yes":
try:
print("Now executing `sudo -H pip3 install requests`.",
"Please enter the password for sudo when the prompt",
"comes up. Press Control + C to cancel.",
"Starting in 5 seconds...", sep="\n")
time.sleep(5)
try:
subprocess.call(["sudo -H pip3 install requests"], shell=True)
try:
# Fun fact: This is inside THREE try/except things.
print("Attempting to reimport requests.")
import requests
print("requests is FINALLY installed!")
except:
print("requests still wasn't successfully installed.",
"Cannot continue without requests.",
"Try doing a manual install of requests with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except:
print("When running the command, an error occurred",
"Try doing a manual install of requests with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except KeyboardInterrupt:
print("Command execution aborted.",
"Cannot continue without appJar.",
"Try and do a manual install of requests with PIP",
"in a command line.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
elif requests_lastresort == "no":
print("Not installing appJar with a shell command.",
"Cannot continue without requests.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
else:
print("Did not understand your input. Defaulting to not installing",
"via the shell. Cannot continue without requests.",
"Try installing requests with PIP.",
"Press enter to exit.")
input()
sys.exit()
try:
import halo
logger.info("Halo installed successfully.")
except ImportError:
logger.warn("halo was not installed successfully.")
print("Hmm...Halo didn't install properly.")
printException()
print("As a last resort, we can use sudo -H to install packages.",
"Do you want to use the shell option to install halo?",
"WARNING: Using the last-resort method may screw up PIP, and",
"may require you to reinstall PIP on your machine."
"Yes or No.", sep="\n")
halo_lastresort = input("Input here: ").lower()
logger.debug("halo_lastresort: %s" % halo_lastresort)
if halo_lastresort == "yes":
try:
print("Now executing `sudo -H pip3 install halo`.",
"Please enter the password for sudo when the prompt",
"comes up. Press Control + C to cancel.",
"Starting in 5 seconds...", sep="\n")
time.sleep(5)
try:
subprocess.call(["sudo -H pip3 install halo"], shell=True)
try:
print("Attempting to reimport halo.")
import colorama
print("Halo is now installed!")
except:
print("Halo still wasn't successfully installed.",
"Cannot continue without Halo.",
"Try doing a manual install of Halo with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except:
print("When running the command, an error occurred",
"Try doing a manual install of Halo with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except KeyboardInterrupt:
print("Command execution aborted.",
"Cannot continue without Halo.",
"Try and do a manual install of Halo with PIP",
"in a command line.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
elif halo_lastresort == "no":
print("Not installing Halo with a shell command.",
"Cannot continue without Halo.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
else:
print("Did not understand your input. Defaulting to not installing",
"via the shell. Cannot continue without Halo.",
"Try installing Halo with PIP.",
"Press enter to exit.")
input()
sys.exit()
print("","All libraries are installed!", sep="\n")
else:
logger.warn("Input was not understood. Closing...")
print("Your input wasn't understood for if you wanted to automatically import libraries.",
"As a precaution PyWeather Setup needs to now close. Press enter to exit.", sep="\n")
input()
sys.exit()
# Previously this updated all your pip packages. I then did this on my NAS (on FreeNAS 11).
# It broke my NAS! Woo hoo!
print("", "Would you like PyWeather to automatically update it's required packages?",
"Doing this is generally recommended, and will have benefits down the line when",
"some libraries fix known issues that occur in PyWeather. Yes or No.", sep="\n")
confirm_updatepip = input("Input here: ").lower()
logger.debug("confirm_updatepip: %s" % confirm_updatepip)
if confirm_updatepip == "yes":
print("")
print("Updating PIP packages.")
totalpackages = 5
updatecount = 1
pip_requiredlibraries = ['requests', 'halo', 'appjar', 'colorama', 'geopy']
for pkgname in pip_requiredlibraries:
print("Now updating package: %s (Update %s/%s)" %
(pkgname, updatecount, totalpackages))
pip.main(['install', '--upgrade', '%s' % pkgname])
updatecount = updatecount + 1
elif confirm_updatepip == "no":
print("Not updating PIP packages. You may run into issues with non-updated",
"packages in future versions of PyWeather.")
else:
print("Input not understood, not updating PIP packages. You may run into",
"issues with non-updated packages in future versions of PyWeather.")
# Verbosity is not needed here.
print("I'm now going to guide you through obtaining an API key.",
"Please carefully read my detailed instructions, so you don't mess anything up.", sep="\n")
print("","If you know how to acquire a Wunderground API key, or are resetting PyWeather,",
"hit enter 14 times to get to the API key entry.", sep="\n")
print("Let's begin.",
"Start by opening a web browser, and going to https://www.wunderground.com/weather/api/.",
"Press any key when you are done.", sep="\n")
input()
print("Next, click the 'Explore my options' button.",
"Press any key when you are done.", sep="\n")
input()
print("Next, click the small button next to 'ANVIL PLAN'.",
"After that, confirm that the total underneath the 'Purchase Key' button says",
"'$0 USD per month'.",
"If the total underneath the 'Purchase Key' button doesn't",
"say '$0 USD per month, please ensure that the small button next to 'Developer'",
"on the table in the middle of the screen is selected, and the total",
"says '$0 USD per month'",
"Press any key when you are done.", sep="\n")
input()
print("Next, click the 'Purchase Key' button.",
"Press any key when you are done.", sep="\n")
input()
print("Next, input your email, and a password to sign up for a Weather",
"Underground account.",
"Be sure to select the checkbox next to 'I agree to the Terms of Service'",
"It's best if you leave the checkbox next to 'I would like to receive WU",
"updates via email' unchecked.",
"Press any key when you are done and ready.", sep="\n")
input()
print("Next, press the 'Sign up for free' button.",
"When the welcome window pops up, be sure to click the X button at the top right of the popup.",
"When clicking the X, you should be redirected to wunderground.com.",
"Press any key when you are done and ready.", sep="\n")
input()
print("Next, click 'My Profile' at the top right corner of the homepage.",
"In the dropdown, click 'My Email & Text Alerts'",
"Press any key when you are done and ready.", sep="\n")
input()
print("Next, next to your email listed on the page, click the 'Edit / Verify' button.",
"After you click the button, click the 'Verify Email' button.",
"Press any key when you are done and ready.", sep="\n")
input()
print("Next, check your email in which you signed up with.",
"If you got a letter from Weather Underground, titled 'Daily Forecast",
"Email Verification', open that letter, and click the link.",
"If you didn't get the letter, wait a few minutes, and be sure to check your spam folder.",
"Hint: If you followed this guide exactly, WU will not be sending you daily forecasts to your email.",
"Press any key when you are done and ready.", sep="\n")
input()
print("Your email should be verified.",
"Next, in your web browser, head back to https://www.wunderground.com/weather/api/.",
"Then, click the 'Explore my Options' button, again.",
"Press any key when you are done and ready.", sep="\n")
input()
print("Next, at the top of the page, make sure the button next to 'ANVIL PLAN'",
"is selected.",
"After that, confirm that the total underneath the 'Purchase Key' button says",
"'$0 USD per month'",
"If the total doesn't say that, in the pricing table, make sure the button",
"next to 'Developer' is selected.",
"Press any key when you are done and ready.", sep="\n")
input()
print("Next, click the 'Purchase Key' button, on top of your total (which",
"should be $0 USD per month)",
"Next, fill out the form, considering these tips:",
"For the contact name/email, it's recommended you use your real name",
"(first name last initial is fine).",
"It's also recommended that you use your real email.",
"For the project name, put in something generic, like 'to use a script that",
"uses WU's API', or 'WU API test'. It's up to you.",
"For the project website, put in something generic, like 'google.com', or",
"some other site you feel like having as the project site.",
"For the question 'Where will the API be used', answer Other.",
"For the question 'Will the API be used for commercial use?', answer No.",
"For the question 'Will the API be used for manufacturing mobile chip",
"processing?', answer No.",
"Answer yes if you somehow are manufacturing mobile chip processing. I doubt",
"you are, however.",
"For the country that you are based in, put your location.",
"Before we move on, fill out these forms, and press any key when you are done "
"and ready.", sep="\n")
input()
print("Next, for the brief description, put something like 'using an API key",
"to use a script using Wunderground'.",
"After that, check both boxes at the bottom of the page. Read the ToS if you",
"feel like it.",
"Finally, click 'Purchase Key'.",
"You should land on a page that says 'Edit API Key'.",
"Press any key when you are done and ready.", sep="\n")
input()
print("In the table to the left of the page, copy the text that's under Key ID.",
"(Ctrl+C, right click)",
"I'm now going to ask you to input the API key into the text entry below.",
"The API key will be saved to storage/apikey.txt, so PyWeather can easily",
"pull it up.",
"Press any key when you are done and ready.", sep="\n")
input()
print("Please input your API key below.")
apikey_input = input("Input here: ")
logger.debug("apikey_input: %s" % apikey_input)
print("", "Just to confirm, the API key you gave me was: " + apikey_input
+ ".", sep="\n")
print("Please double check your input, and confirm in the dialogue below.")
apikey_confirm = input("Is the API key right? Yes or no: ").lower()
logger.debug("apikey_confirm: %s" % apikey_confirm)
if apikey_confirm == "no":
while True:
logger.debug("User now re-entering key...")
print("","Please input your API key below.", sep="\n")
apikey_input = input("Input here: ")
logger.debug("apikey_input: %s" % apikey_input)
print("Just to confirm, the API key you gave me was: " + apikey_input
+ ".")
apikey_confirm = input("Is the API key right? Yes or no: ").lower()
if apikey_confirm == "yes":
break
elif apikey_confirm == "no":
continue
else:
print("Couldn't understand your input.",
"I'll assume the API key is correct, moving on.", sep="\n")
print("Now saving your API key...")
open('storage//apikey.txt', 'w').close()
with open("storage//apikey.txt", 'a') as out:
logger.debug("out: %s" % out)
out.write(apikey_input)
out.close()
logger.debug("Performed ops: overwrite apikey.txt, out.write(apikey_input), out.close()")
print("", "I can also back up your API key, in case you do something wrong.",
sep="\n")
# A future release should bring customization as to the storage location.
print("Would you like me to save a backup? Yes or no.")
backup_APIkey = input("Input here: ").lower()
if backup_APIkey == "yes":
print("","Where would you want me to backup the key to?",
"This is a directory. If I wanted my key at directory/backkey.txt,",
"You would enter 'directory'. The default directory is 'backup'.", sep="\n")
# Doing a .lower() here to prevent case insensitiveness.
backup_APIkeydirectory = input("Input here: ").lower()
folder_argument = backup_APIkeydirectory + "//backkey.txt"
backup_APIkeydirectory2 = backup_APIkeydirectory + "//"
logger.debug("backup_APIkeydirectory: %s ; backup_APIkeydirectory2: %s" %
(backup_APIkeydirectory, backup_APIkeydirectory2))
logger.debug("folder_argument: %s" % folder_argument)
# These two variables will get reset if the directory is backup, or empty.
if backup_APIkeydirectory == "backup" or backup_APIkeydirectory == "":
print("Using the default directory of //backup.")
folder_argument = "backup//backkey.txt"
backup_APIkeydirectory2 = "backup//"
logger.debug("folder_argument: %s ; backup_APIkeydirectory2: %s" %
(folder_argument, backup_APIkeydirectory2))
elif backup_APIkeydirectory != "backup":
try:
os.mkdir(backup_APIkeydirectory2)
except:
printException_loggerwarn()
print("Couldn't make the directory, does it exist?")
# Overwrite the file, if it exists.
open(folder_argument, 'w').close()
open(folder_argument, 'a').write(apikey_input)
open(folder_argument).close()
config['KEYBACKUP']['savedirectory'] = backup_APIkeydirectory2
print("The API key was backed up successfully!")
logger.debug("Performed 3 ops. Overwrite "+ folder_argument + "backkey.txt, write to backkey.txt" +
", and close backkey.txt.")
print("", "Before we configure PyWeather, I'll now validate your API key.", sep="\n")
# Do an infinite loop of validation of the API key, so the user can reenter the API key
# if it was wrong.
while True:
apitest_URL = 'http://api.wunderground.com/api/' + apikey_input + '/conditions/q/NY/New_York.json'
testreader = codecs.getreader("utf-8")
logger.debug("apitest_URL: %s ; testreader: %s" %
(apitest_URL, testreader))
try:
testJSON = requests.get(apitest_URL)
logger.debug("testJSON: %s" % testJSON)
except:
logger.warn("Couldn't connect to Wunderground's API! No internet?")
print("When PyWeather Setup attempted to fetch the .json to validate your API key,",
"it ran into an error. If you're on a network with a filter, make sure that",
"'api.wunderground.com' is unblocked. Otherwise, make sure you have an internet",
"connection.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
test_json = json.loads(testJSON.text)
if jsonVerbosity == True:
logger.debug("test_json: %s" % test_json)
try:
test_conditions = str(test_json['current_observation']['temp_f'])
logger.debug("test_conditions: %s" % test_conditions)
print("Hurray! Your API key is valid and works.")
break
except:
logger.warn("Error! Is the API key invalid?")
print("When attempting to validate the API key that you entered/confirmed,",
"PyWeather ran into an error. Would you like to reenter your API key to revalidate it?",
"Please note, that this error might be caused by WU's API being down, or another cause.",
"However, 90% of the time, this is due to a bad API key.",
"Yes or No.", sep='\n')
revalidateAPIkey = input("Input here: ").lower()
if revalidateAPIkey == "yes":
print("Enter in your API key below.")
apikey_input = input("Input here: ")
logger.debug("apikey_input: %s")
print("Revalidating your API key...")
continue
elif revalidateAPIkey == "no":
print("Not revalidating your API key. You'll need a valid API key to continue.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
printException()
print("Press enter to exit.")
input()
sys.exit()
print("Let's configure PyWeather to your liking.")
logger.debug("config: %s" % config)
print("", "(1/42)","On the summary screen, would you like to show sunrise/sunset times?",
"By default, this is disabled.",
"Yes or No.", sep="\n")
sundata_Summary = input("Input here: ").lower()
logger.debug("sundata_Summary: %s" % sundata_Summary)
if sundata_Summary == "yes":
config['SUMMARY']['sundata_summary'] = 'True'
print("Changes saved.")
logger.debug("Sundata on the summary is now ENABLED.")
elif sundata_Summary == "no":
config['SUMMARY']['sundata_summary'] = 'False'
print("Changes saved.")
logger.debug("Sundata on the summary is now DISABLED.")
else:
print("Could not understand what you inputted.",
"Defaulting to 'False'", sep="\n")
config['SUMMARY']['sundata_summary'] = 'False'
print("Changes saved.")
logger.debug("Could not recognize input. Defaulting to DISABLED.")
print("", "(2/42)","On the summary screen, would you like to show almanac data?",
"By default, this is disabled.",
"Yes or no:", sep="\n")
almanacdata_Summary = input("Input here: ").lower()
logger.debug("almanacdata_Summary: %s" % almanacdata_Summary)
if almanacdata_Summary == "yes":
config['SUMMARY']['almanac_summary'] = 'True'
print("Changes saved.")
logger.debug("Almanac on the summary is now ENABLED.")
elif almanacdata_Summary == "no":
config['SUMMARY']['almanac_summary'] = 'False'
print("Changes saved.")
logger.debug("Almanac on the summary is now DISABLED.")
else:
print("Could not understand what you inputted.",
"Defaulting to 'False'", sep="\n")
config['SUMMARY']['almanac_summary'] = 'False'
print("Changes saved.")
logger.debug("Could not recognize input. Defaulting to DISABLED.")
print("", "(3/42)", "On the summary screen, would you like to show alerts data?",
"By default, this is enabled. Please note, Wunderground",
"only supports alert data in the US and EU at this time.",
"Yes or No.", sep="\n")
alertsdata_Summary = input("Input here: ").lower()
logger.debug("alertsdata_Summary: %s" % alertsdata_Summary)
if alertsdata_Summary == "yes":
config['SUMMARY']['showalertsonsummary'] = 'True'
print("Changes saved.")
logger.debug("Alerts on the summary is now ENABLED.")
elif alertsdata_Summary == "no":
config['SUMMARY']['showalertsonsummary'] = 'False'
print("Changes saved.")
logger.debug("Alerts on the summary is now DISABLED.")
else:
print("Could not understand what you inputted.",
"Defaulting to 'True'", sep="\n")
config['SUMMARY']['showAlertsOnSummary'] = 'True'
print("", "(4/42)","On boot, would you like PyWeather to check for updates?",
"By default, this is disabled, due to a load time increase of ~2-5 seconds.",
"Yes or No.", sep="\n")
checkForUpdates = input("Input here: ").lower()
logger.debug("checkForUpdates: %s" % checkForUpdates)
if checkForUpdates == "yes":
config['UPDATER']['autoCheckForUpdates'] = 'True'
print("Changes saved.")
logger.debug("Checking for updates on startup is ENABLED.")
elif checkForUpdates == "no":
config['UPDATER']['autoCheckForUpdates'] = 'False'
print("Changes saved.")
logger.debug("Checking for updates on startup is DISABLED.")
else:
print("Could not understand what you inputted.",
"Defaulting to 'False'", sep="\n")
config['UPDATER']['autoCheckForUpdates'] = 'False'
print("Changes saved.")
logger.debug("Could not recognize input. Defaulting to DISABLED.")
print("", "(5/42)","When an error occurs, would you like PyWeather to show the full error?",
"When enabled, you'll have easier access to the full error for reporting",
"the bug on GitHub.",
"By default, this is disabled, as errors look less pretty when enabled.",
"Yes or no.", sep="\n")
displayTracebacks = input("Input here: ").lower()
logger.debug("displayTracebacks: %s" % displayTracebacks)
if displayTracebacks == "yes":
config['TRACEBACK']['tracebacks'] = 'True'
config['TRACEBACK']['setup_tracebacks'] = 'True'
config['TRACEBACK']['updater_tracebacks'] = 'True'
config['TRACEBACK']['keybackup_tracebacks'] = 'True'
config['TRACEBACK']['configdefault_tracebacks'] = 'True'
print("Changes saved.")
logger.debug("Printing tracebacks is ENABLED.")
elif displayTracebacks == "no":
config['TRACEBACK']['tracebacks'] = 'False'
config['TRACEBACK']['setup_tracebacks'] = 'False'
config['TRACEBACK']['updater_tracebacks'] = 'False'
config['TRACEBACK']['keybackup_tracebacks'] = 'False'
config['TRACEBACK']['configdefault_tracebacks'] = 'False'
print("Changes saved.")
logger.debug("Printing tracebacks is DISABLED.")
else:
print("Couldn't understand what you inputted.",
"Defaulting to 'False'", sep="\n")
config['TRACEBACK']['tracebacks'] = 'False'
config['TRACEBACK']['setup_tracebacks'] = 'False'
config['TRACEBACK']['updater_tracebacks'] = 'False'
config['TRACEBACK']['keybackup_tracebacks'] = 'False'
print("Changes saved.")
logger.debug("Could not understand input. Defaulting to DISABLED.")
print("", "(6/42)", "When booting PyWeather up initially, would you like PyWeather to",
"fetch the 10-day hourly forecast, instead of the 3-day forecast?",
"This is disabled by default. When enabled, initial loading times are",
"increased. However, when you view the 10-day hourly forecast, you won't",
"have to wait for it to load, and use another API call.",
"Yes or No.", sep="\n")
tenday_onboot = input("Input here: ").lower()
if tenday_onboot == "yes":
config['PREFETCH']['10dayfetch_atboot'] = 'True'
print("Changes saved.")
logger.debug("Fetching 10 day JSON at boot is ENABLED.")
elif tenday_onboot == "no":
config['PREFETCH']['10dayfetch_atboot'] = 'False'
print("Changes saved.")
logger.debug("Fetching 10 day JSON at boot is DISABLED.")
else:
print("Couldn't understand what you inputted.",
"Defaulting to the default value 'False'", sep="\n")
config['PREFETCH']['10dayfetch_atboot'] = 'False'
print("Changes saved.")
logger.debug("Could not understand input. Defaulting to DISABLED.")
print("", "(7/42)", "When viewing detailed hourly, 10-day hourly, and historical hourly,",
"detailed information, how many iterations should PyWeather go through",
"before asking you to continue?",
"By default, this is 6. An input above 10",
"is not recommended.", sep="\n")
detailedloops = input("Input here: ")
try:
detailedloops = int(detailedloops)
detailedloops = str(detailedloops)
config['UI']['detailedinfoloops'] = detailedloops
print("Changes saved.")
logger.debug("Detailed info iterations now %s." % detailedloops)
except:
print("Couldn't convert input into a number. Defaulting to '6'.")
printException_loggerwarn()
config['UI']['detailedinfoloops'] = '6'
print("Changes saved.")
logger.debug("Detailed info loops now 6.")
print("", "(8/42)", "When viewing detailed 10-day forecast information, how many",
"iterations should PyWeather go through, before asking you to",
"continue?",
"By default, this is 5. An input above 10 will not prompt",
"the enter to continue prompt", sep="\n")
detailedForecastLoops = input("Input here: ")
try:
detailedForecastLoops = int(detailedForecastLoops)
detailedForecastLoops = str(detailedForecastLoops)
config['UI']['forecast_detailedinfoloops'] = detailedForecastLoops
print("Changes saved.")
logger.debug("Detailed forecast info iterations now %s" % detailedForecastLoops)
except:
print("Couldn't convert input into a number. Defaulting to '5'.")
printException_loggerwarn()
config['UI']['forecast_detailedinfoloops'] = '5'
print("Changes saved.")
logger.debug("Detailed forecast info loops now 5.")
print("", "(9/42)", "PyWeather has a caching system, in which if you're gone for some time",
"data will automatically refresh. Would you like to turn this on?",
"This is enabled by default. Yes or No.", sep="\n")
enablecache = input("Input here: ").lower()
if enablecache == "no":
print("Cache will be disabled.")
config['CACHE']['enabled'] = 'False'
print("Changes saved.")
else:
config['CACHE']['enabled'] = 'True'
print("You entered yes, or your input wasn't understood (yes is the default.)",
"In the next few inputs, enter the time in minutes that PyWeather should keep",
"certain types of data, before a data refresh is automatically requested.",
"If you want to leave cache values to their defaults, press enter at any prompt.", sep="\n")
print("", "(10/42)", "Please enter the cache time for alerts data in minutes (default = 5)", sep="\n")
alertscachetime = input("Input here: ").lower()
try:
alertscachetime = float(alertscachetime)
alertscachetime = str(alertscachetime)
config['CACHE']['alerts_cachedtime'] = alertscachetime
print("Changes saved.")
logger.debug("Alerts cache time now %s minutes." % alertscachetime)
except:
print("", "Your input couldn't be converted into a number. Setting alerts",
"cache time to it's default value of '5'.", sep="\n")
config['CACHE']['alerts_cachedtime'] = '5'
logger.debug("Alerts cache time now 5 minutes.")
print("", "(11/42)", "Please enter the cache time for current data in minutes (default = 10)", sep="\n")
currentcachetime = input("Input here: ").lower()
try:
currentcachetime = float(currentcachetime)
currentcachetime = str(currentcachetime)
config['CACHE']['current_cachedtime'] = currentcachetime
print("Changes saved.")
logger.debug("Current cache time now %s minutes." % alertscachetime)
except:
print("", "Your input couldn't be converted into a number. Setting current",
"cache time to it's default value of '10'.", sep="\n")
config['CACHE']['current_cachedtime'] = '10'
logger.debug("Current cache time now 10 minutes.")
print("", "(12/42)", "Please enter the cache time for forecast data in minutes (default = 60)", sep="\n")
forecastcachetime = input("Input here: ").lower()
try:
forecastcachetime = float(forecastcachetime)
forecastcachetime = str(forecastcachetime)
config['CACHE']['forecast_cachedtime'] = forecastcachetime
print("Changes saved.")
logger.debug("Forecast cache time now %s minutes." % forecastcachetime)
except:
print("", "Your input couldn't be converted into a number. Setting forecast",
"cache time to it's default value of '60'.", sep="\n")
config['CACHE']['forecast_cachedtime'] = '60'
logger.debug("Forecast cache time now 60 minutes.")
print("", "(13/42)", "Please enter the cache time for almanac data in minutes (default = 240)", sep="\n")
almanaccachetime = input("Input here: ").lower()
try:
almanaccachetime = float(almanaccachetime)
almanaccachetime = str(almanaccachetime)
config['CACHE']['almanac_cachedtime'] = almanaccachetime
print("Changes saved.")
logger.debug("Almanac cache time now %s minutes." % almanaccachetime)
except:
print("", "Your input couldn't be converted into a number. Setting almanac",
"cache time to it's default value of '240'.", sep="\n")
config['CACHE']['almanac_cachedtime'] = '240'
logger.debug("Almanac cache time now 240 minutes.")
print("", "(14/42)", "Please enter the cache time for 1.5 day hourly data in minutes (default = 60)", sep="\n")
threedayhourly_cachedtime = input("Input here: ").lower()
try:
threedayhourly = float(threedayhourly_cachedtime)
threedayhourly = str(threedayhourly_cachedtime)
config['CACHE']['threedayhourly_cachedtime'] = threedayhourly_cachedtime
print("Changes saved.")
logger.debug("3 day hourly cache time now %s minutes." % threedayhourly_cachedtime)
except:
print("", "Your input couldn't be converted into a number. Setting three day hourly",
"cache time to it's default value of '60'.", sep="\n")
config['CACHE']['threedayhourly_cachedtime'] = "60"
logger.debug("3 day hourly cache time now 60 minutes")
print("", "(15/42)", "Please enter the cache time for the ten day hourly data in minutes (default = 60)", sep="\n")
tendayhourly_cachedtime = input("Input here: ").lower()
try:
tendayhourly = float(tendayhourly_cachedtime)
tendayhourly = str(tendayhourly_cachedtime)
config['CACHE']['tendayhourly_cachedtime'] = tendayhourly_cachedtime
print("Changes saved.")
logger.debug("10 day hourly cache time now %s minutes." % tendayhourly_cachedtime)
except:
print("", "Your input couldn't be converted into a number. Setting ten day hourly",
"cache time to it's default value of '60'.", sep="\n")
config['CACHE']['tendayhourly_cachedtime'] = "60"
logger.debug("10 day hourly cache time now 60 minutes")
print("", "(16/42)", "Please enter the cache time for sun data in minutes (default = 480)", sep="\n")
sundatacachetime = input("Input here: ").lower()
try:
sundatacachetime = float(sundatacachetime)
sundatacachetime = str(sundatacachetime)
config['CACHE']['sundata_cachedtime'] = forecastcachetime
print("Changes saved.")
logger.debug("Sun data cache time now %s minutes." % sundatacachetime)
except:
print("", "Your input couldn't be converted into a number. Setting sun data",
"cache time to it's default value of '480'.", sep="\n")
config['CACHE']['sundata_cachedtime'] = '480'
logger.debug("Sun data cache time now 480 minutes.")
print("", "(17/42)", "Please enter the cache time for tide data in minutes (default = 480)", sep="\n")
tidecachetime = input("Input here: ").lower()
try:
tidecachetime = float(tidecachetime)
tidecachetime = str(tidecachetime)
config['CACHE']['tide_cachedtime'] = tidecachetime
print("Changes saved.")
logger.debug("Tide cache time now %s minutes." % tidecachetime)
except:
print("", "Your input couldn't be converted into a number. Setting tide data",
"cache time to it's default value of '480'.", sep="\n")
config['CACHE']['tide_cachedtime'] = '480'
logger.debug("Tide data cache time now 480 minutes.")
print("", "(18/42)", "Please enter the cache time for hurricane data in minutes (default = 480)", sep="\n")
hurricanecachetime = input("Input here: ").lower()
try:
hurricanecachetime = float(hurricanecachetime)
hurricanecachetime = str(hurricanecachetime)
config['CACHE']['hurricane_cachedtime'] = hurricanecachetime
print("Changes saved.")
logger.debug("Hurricane cache time now %s minutes" % hurricanecachetime)
except:
print("", "Your input couldn't be converted into a number. Setting hurricane data",
"cache time to it's default value of '180'.", sep="\n")
config['CACHE']['hurricane_cachedtime'] = '180'
logger.debug("Hurricane data cache time now 180 minutes.")
print("", "(19/42)", "Please enter the cache time for yesterday's weather data in minutes (default = 720)", sep="\n")
yesterdaycachedtime = input("Input here: ").lower()
try:
yesterdaycachedtime = float(yesterdaycachedtime)
yesterdaycachedtime = str(yesterdaycachedtime)
config['CACHE']['yesterday_cachedtime'] = yesterdaycachedtime
print("Changes saved.")
logger.debug("Yesterday cache time now %s minutess" % yesterdaycachedtime)
except:
print("", "Your input couldn't be converted into a number. Setting yesterday's weather data",
"cache time to it's default value of 720.", sep="\n")
config['CACHE']['yesterday_cachedtime'] = '720'
logger.debug("Yesterday data cache time now 720 minutes.")
print("", "(20/42)", "When viewing detailed EU alerts information, how many",
"iterations should PyWeather go through, before asking you to",
"continue?",
"By default, this is 2.", sep="\n")
EUalertsloops = input("Input here: ")
try:
EUalertsloops = int(EUalertsloops)
EUalertsloops = str(EUalertsloops)
config['UI']['alerts_EUiterations'] = EUalertsloops
print("Changes saved.")
logger.debug("Detailed EU alert iterations now %s" % EUalertsloops)
except:
print("Couldn't convert input into a number. Defaulting to '2'.")
printException_loggerwarn()
config['UI']['alerts_EUiterations'] = '2'
print("Changes saved.")
logger.debug("Detailed EU alert iterations now 2.")
print("", "(21/42)", "When viewing detailed US alerts information, how many",
"iterations should PyWeather go through, before asking you to",
"continue?",
"By default, this is 1.", sep="\n")
USalertsloops = input("Input here: ")
try:
USalertsloops = int(USalertsloops)
USalertsloops = str(USalertsloops)
config['UI']['alerts_USiterations'] = USalertsloops
print("Changes saved.")
logger.debug("Detailed US alert iterations now %s" % USalertsloops)
except:
print("Couldn't convert input to a number. Defaulting to '1'.")
printException_loggerwarn()
config['UI']['alerts_USiterations'] = '1'
print("Changes saved.")
logger.debug("Detailed US alert iterations now 1.")
print("", "(22/42)","When PyWeather is going through detailed information, it can show",
"how many iterations are completed.",
"By default, this is disabled.",
"Yes or No.", sep="\n")
showIterations = input("Input here: ").lower()
if showIterations == "yes":
config['UI']['show_completediterations'] = 'True'
print("Changes saved.")
logger.debug("Showing completed iterations is ENABLED.")
elif showIterations == "no":
config['UI']['show_completediterations'] = 'False'
print("Changes saved.")
logger.debug("Showing completed iterations is DISABLED.")
else:
print("Couldn't understand what you inputted.",
"Defaulting to 'FALSE'.", sep="\n")
config['UI']['show_completediterations'] = 'False'
print("Changes saved.")
logger.debug("Could not understand input. Defaulting to DISABLED.")
print("", "(23/42)", "When PyWeather is going through detailed information, would",
"you like the 'Enter to Continue' prompts to pop up?",
"By default, this is enabled.",
"Yes or No.", sep="\n")
showEnterToContinue = input("Input here: ").lower()
if showEnterToContinue == "yes":
config['UI']['show_entertocontinue'] = 'True'
print("Changes saved.")
logger.debug("Showing enter to continue prompts is ENABLED.")
elif showEnterToContinue == "no":
config['UI']['show_entertocontinue'] = 'False'
print("Changes saved.")
logger.debug("Showing enter to continue prompts is DISABLED.")
else:
print("Could not understand what you inputted.",
"Defaulting to 'True'.", sep="\n")
config['UI']['show_entertocontinue'] = 'True'
print("Changes saved.")
logger.debug("Could not understand input. Defaulting to ENABLED.")
print("", "(24/42)", "In the PyWeather Updater, the updater can show the release tag",
"associated with the latest release. Helpful for those using Git to",
"update PyWeather. By default, this is disabled.",
"Yes or No.", sep="\n")
showReleaseTag = input("Input here: ").lower()
if showReleaseTag == "yes":
config['UPDATER']['show_updaterreleasetag'] = 'True'
print("Changes saved.")
logger.debug("Showing release tag in updater is ENABLED.")
elif showReleaseTag == "no":
config['UPDATER']['show_updaterreleasetag'] = 'False'
print("Changes saved.")
logger.debug("Showing release tag in updater is DISABLED.")
else:
print("Could not understand what you inputted.",
"Defaulting to 'False'.", sep="\n")
config['UPDATER']['show_updaterreleasetag'] = 'False'
print("Changes saved.")
logger.debug("Could not understand input. Defaulting to DISABLED.")
print("", "(25/42)", "When PyWeather boots, it can validate your API key. If PyWeather",
"finds your primary API key is invalid, it'll attempt to validate your",
"backup key, and load that if it's validated successfully.",
"By default, this is enabled, as it's well worth the 1 API call to make",
"sure your key is valid. However, if you said 'Yes' to almanac/sun data",
"on the summary screen, you might not want to enable this.",
"Yes or No.", sep="\n")
validateKeyOnBoot = input("Input here: ").lower()
if validateKeyOnBoot == "yes":
config['PYWEATHER BOOT']['validateAPIKey'] = 'True'
print("Changes saved.")
logger.debug("Validating API key on boot is ENABLED.")
elif validateKeyOnBoot == "no":
config['PYWEATHER BOOT']['validateAPIKey'] = 'False'
print("Changes saved.")
logger.debug("Validating API key on boot is DISABLED.")
else:
print("Could not understand what you inputted.",
"Defaulting to 'True'.", sep="\n")
config['PYWEATHER BOOT']['validateAPIKey'] = 'False'
logger.debug("Could not understand input. Defaulting to ENABLED.")
print("", "(26/42)", "PyWeather now has a radar feature, which opens up a GUI on supported",
"platforms. Depending on your screen resolution, you'll have to set how large",
"the radar picture is when rendered. In the prompt below, enter one of five sizes.",
"extrasmall - 320x240 window",
"small - 480x320 window",
"normal - 640x480 window",
"large - 960x720 window",
"extralarge - 1280x960 window",
"By default, the resolution is normal. Adapt your choice to the screen resolution",
"of the machine you're using.", sep="\n")
radar_resolutions = ["extrasmall", "small", "normal", "large", "extralarge"]
logger.debug("radar_resolutions: %s" % radar_resolutions)
radar_resolutioninput = input("Input here: ").lower()
for x in range(0, 5):
if radar_resolutioninput == radar_resolutions[x]:
logger.debug("Resolution input matched, end result: %s" % radar_resolutions[x])
config['RADAR GUI']['radar_imagesize'] = radar_resolutions[x]
print("Changes saved.")
break
# This works by design. If x = 4 (extralarge), the if would catch first.
elif x == 5:
print("Could not understand what you inputted. Defaulting to 'normal'.")
config['RADAR GUI']['radar_imagesize'] = 'normal'
print("Changes saved.")
print("", "(27/42)", "PyWeather's radar feature is unfortunately experimental as of PyWeather 0.6.3 beta.",
"By default, a confirmation message will always appear when attempting to launch the radar.",
"However, this can be turned off, if you plan to use the experimental radar on a regular basis.",
"By default, bypassing the confirmation message is disabled. Yes or No.", sep="\n")
radar_bypassconfinput = input("Input here: ").lower()
logger.debug("radar_bypassconfinput: %s" % radar_bypassconfinput)
if radar_bypassconfinput == "yes":
config['RADAR GUI']['bypassconfirmation'] = 'True'
logger.debug("RADAR GUI/bypassconfirmation is now TRUE")
print("Changes saved.")
elif radar_bypassconfinput == "no":
config['RADAR GUI']['bypassconfirmation'] = 'False'
logger.debug("RADAR GUI/bypassconfirmation is now FALSE")
print("Changes saved.")
else:
print("Could not understand what you inputted. Defaulting to 'False'.")
config['RADAR GUI']['bypassconfirmation'] = 'False'
logger.debug("RADAR GUI/bypassconfirmation is now FALSE")
print("Changes saved.")
print("", "(28/42)", "On the summary screen, would you like tide data to be shown?",
"This uses an extra API call when enabled. By default, this is disabled.",
"Yes or No.", sep="\n")
tideonsummary = input("Input here: ").lower()
logger.debug("tideonsummary: %s" % tideonsummary)
if tideonsummary == "yes":
config['SUMMARY']['showtideonsummary'] = "True"
logger.debug("SUMMARY/showtideonsummary is now TRUE")
print("Changes saved.")
elif tideonsummary == "no":
config['SUMMARY']['showtideonsummary'] = "False"
logger.debug("SUMMARY/showtideonsummary is now FALSE")
print("Changes saved.")
else:
print("Could not understand what you inputted. Defaulting to 'False'.")
config['SUMMARY']['showtideonsummary'] = "False"
logger.debug("SUMMARY/showtideonsummary is now FALSE")
print("Changes saved.")
print("", "(29/42)", "When PyWeather boots, would you like hurricane data to be fetched?",
"Initial loading times will increase when this is on, but hurricane data will load faster.",
"This can use an extra API call, especially when you fetch hurricane data but don't check it",
"in PyWeather. By default, this is disabled.",
"Yes or No.", sep="\n")
hurricaneprefetch = input("Input here: ").lower()
logger.debug("hurricaneprefetch: %s" % hurricaneprefetch)
if hurricaneprefetch == "yes":
config['PREFETCH']['hurricanedata_atboot'] = 'True'
logger.debug("PREFETCH/hurricanedata_atbooot is now TRUE.")
print("Changes saved.")
elif hurricaneprefetch == "no":
config['PREFETCH']['hurricanedata_atboot'] = 'False'
logger.debug("PREFETCH/hurricanedata_atboot is now FALSE.")
print("Changes saved.")
else:
print("Could not understand what you inputted. Defaulting to 'False'.")
config['PREFETCH']['hurricanedata_atboot'] = 'False'
logger.debug("PREFETCH/hurricanedata_atboot is now FALSE.")
print("Changes saved.")
print("", "(30/42)", "PyWeather has a new feature where you can now easily call your current location at boot.",
"The current location feature allows you to enter 'currentlocation' at boot, and view the weather for your",
"approximate location. However, GeoIP lookups might be inaccurate, especially for mobile users. The GeoIP service",
"uses freegeoip.net. Would you like to enable this service? By default, this is disabled. Yes or No.", sep="\n")
allowgeoipservice = input("Input here: ").lower()
logger.debug("allowgeoipservice: %s" % allowgeoipservice)
if allowgeoipservice == "yes":
config['FIRSTINPUT']['geoipservice_enabled'] = 'True'
logger.debug("FIRSTINPUT/geoipservice_enabled is now TRUE.")
print("Changes saved.")
elif allowgeoipservice == "no":
config['FIRSTINPUT']['geoipservice_enabled'] = 'False'
logger.debug("FIRSTINPUT/geoipservice_enabled is now FALSE.")
else:
print("Could not understand what you inputted. Defaulting to 'False'.")
config['FIRSTINPUT']['geoipservice_enabled'] = 'False'
logger.debug("FIRSTINPUT/geoipservice_enabled is now FALSE.")
print("Changes saved.")
print("", "(31/42)", "PyWeather has a new feature where you can query indivdiual Wunderground PWS stations.",
"You can query any PWS globally by entering pws:<pws ID> when enabled, and where <pws ID> is the ID of the",
"PWS you want to query. However, this can be turned off if you don't want to have extra lines of text at boot,",
"or don't want the ability to query PWSes. By default, this is enabled. Yes or No.", sep="\n")
allowpwsqueries = input("Input here: ").lower()
logger.debug("allowpwsqueries: %s" % allowpwsqueries)
if allowpwsqueries == "yes":
config['FIRSTINPUT']['allow_pwsqueries'] = 'True'
logger.debug("FIRSTINPUT/allow_pwsqueries is now TRUE.")
print("Changes saved.")
elif allowpwsqueries == "no":
config['FIRSTINPUT']['allow_pwsqueries'] = 'False'
logger.debug("FIRSTINPUT/allow_pwsqueries is now FALSE.")
print("Changes saved.")
else:
print("Could not understand what you inputted. Defaulting to 'True'.")
config['FIRSTINPUT']['allow_pwsqueries'] = 'True'
logger.debug("FIRSTINPUT/allow_pwsqueries is now TRUE.")
print("Changes saved.")
print("", "(32/42)", "PyWeather has a new feature where in hurricane data, you can see the nearest city that a hurricane is to.",
"However, this feature uses a separate API (geonames.org), can only work when the hurricane is within 300km of a city,",
"and will drastically increase loading times. You may also run into issues with the default API key hitting rate limits.",
"Despite all of this, would you like to enable the nearest city features for non-forecast hurricane data?",
"Yes or No. By default, this is disabled.", sep="\n")
allownearestcities = input("Input here: ").lower()
logger.debug("allownearestcities: %s" % allownearestcities)
if allownearestcities == "yes":
additional_ncoptions = True
logger.debug("additional_ncoptions: %s" % additional_ncoptions)
config['HURRICANE']['enablenearestcity'] = 'True'
logger.debug("HURRICANE/enablenearestcity is now TRUE.")
print("Changes saved.")
elif allownearestcities == "no":
additional_ncoptions = False
logger.debug("additional_ncoptions: %s" % additional_ncoptions)
config['HURRICANE']['enablenearestcity'] = 'False'
logger.debug("HURRICANE/enablenearestcity is now FALSE.")
print("Changes saved.")
else:
additional_ncoptions = False
logger.debug("additional_ncoptions: %s" % additional_ncoptions)
print("Could not understand what you inputted. Defaulting to 'False'.")
config['HURRICANE']['enablenearestcity'] = 'False'
logger.debug("HURRICANE/enablenearestcity is now FALSE.")
print("Changes saved.")
# <--- Additional options for nearest city feature --->
if additional_ncoptions is True:
print("", "(33/42)", "By default, the nearest city feature is only enabled on the current data screen of hurricane data.",
"You can enable the nearest city feature to be enabled on forecast data. However, loading hurricane data becomes much",
"slower. By default, this is disabled. Yes or No.", sep="\n")
enable_ncforecast = input("Input here: ").lower()
if enable_ncforecast == "yes":
config['HURRICANE']['enablenearestcity_forecast'] = 'True'
logger.debug("HURRICANE/enablenearestcity_forecast is now TRUE.")
print("Changes saved.")
elif enable_ncforecast == "no":
config['HURRICANE']['enablenearestcity_forecast'] = 'False'
logger.debug("HURRICANE/enablenearestcity_forecast is now FALSE.")
print("Changes saved.")
else:
print("Could not understand your input. Defaulting to 'False'.")
config['HURRICANE']['enablenearestcity_forecast'] = 'False'
logger.debug("HURRICANE/enablenearestcity_forecast is now FALSE.")
print("Changes saved.")
print("", "(34/42)", "By default, PyWeather uses it's own API username for the nearest city features, which should be able to",
"handle PyWeather's user demands just fine. However, if you'd like to use your own account for the API, you may.",
"You can sign up at geonames.org, and follow all the steps. The confirmation letter may take some time to hit your inbox.",
"Would you like to define your own API username? Yes or No. By default, this is no.", sep="\n")
definegeonamesusername = input("Input here: ").lower()
logger.debug("definegeonamesusername: %s" % definegeonamesusername)
if definegeonamesusername == "yes":
# Enter into confirmation loop
while True:
print("Please enter the username that you'll use to access the geonames API.")
geonamesusername = input("Input here: ").lower()
logger.debug("geonamesusername: %s" % geonamesusername)
print("The API username you gave me was: %s" % geonamesusername,
"Is this the username that you'd like to use? Yes or No.",
"Please note that your username will not be validated.", sep="\n")
geonamesconfirmation = input("Input here: ").lower()
confirmurl = 'http://api.geonames.org/findNearbyPlaceNameJSON?lat=19.3&lng=102.2&username= ' + geonamesusername + '&radius=300&maxRows=1&cities=cities5000'
logger.debug("geonamesconfirmation: %s ; confirmurl: %s" %
(geonamesconfirmation, confirmurl))
if geonamesconfirmation == "yes":
config['HURRICANE']['api_username'] = geonamesusername
logger.debug("HURRICANE/api_username is now %s" % geonamesusername)
print("Changes saved.")
elif geonamesconfirmation == "no":
continue
else:
print("Input not understood. Will not validate username. If the username is",
"invalid, please change the HURRICANE/api_username option in the config.", sep="\n")
config['HURRICANE']['api_username'] = geonamesusername
logger.debug("HURRICANE/api_username is now %s" % geonamesusername)
print("Changes saved.")
elif definegeonamesusername == "no":
print("Defaulting to the default username for the geonames API.")
else:
print("Input not understood.",
"Defaulting to the default username for the geonames API.", sep="\n")
print("", "(35/42)", "For the nearest city feature, you can define how large a city has to be to show up as a nearest city.",
"You have three options for this. 'small' will set the threshold to cities with a 1,000 population and greater, but this",
"tends to include cities with very few or no people. 'medium' will set the threshold to cities with a 5,000 population",
"and greater, and 'large' for cities that have a population of 10,000 or greater. Please enter either 'small', 'medium'",
"or 'large' below. Default is 'medium'.", sep="\n")
nearestcitysize = input("Input here: ").lower()
logger.debug("nearestcitysize: %s" % nearestcitysize)
if nearestcitysize == "small":
config['HURRICANE']['nearestcitysize'] = 'small'
logger.debug("HURRICANE/nearestcitysize is now 'small'.")
print("Changes saved.")
elif nearestcitysize == "medium":
config['HURRICANE']['nearestcitysize'] = 'medium'
logger.debug("HURRICANE/nearestcitysize is now 'medium'")
print("Changes saved.")
else:
print("Could not understand your input. Defaulting to 'medium'.")
config['HURRICANE']['nearestcitysize'] = 'medium'
logger.debug("HURRICANE/nearestcitysize is now 'medium'.")
print("Changes saved.")
print("", "(36/42)", "PyWeather will now let you enable a favorite locations feature, which allows",
"you to quickly call up to 5 locations in PyWeather. You have the ability to configure your",
"favorite locations in a menu option in PyWeather. By default, this feature is enabled.",
"Yes or No.", sep="\n")
enable_favoritelocations = input("Input here: ").lower()
logger.debug("enable_favoritelocations: %s" % enable_favoritelocations)
if enable_favoritelocations == "yes":
config['FAVORITE LOCATIONS']['enabled'] = 'True'
logger.debug("FAVORITE LOCATIONS/enabled is now 'True'.")
print("Changes saved!")
elif enable_favoritelocations == "no":
config['FAVORITE LOCATIONS']['enabled'] = 'False'
logger.debug("FAVORITE LOCATIONS/enabled is now 'False'.")
print("Changes saved!")
else:
print("Could not understand your input. Defaulting to 'True'.")
config['FAVORITE LOCATIONS']['enabled'] = 'True'
logger.debug("FAVORITE LOCATIONS/enabled is now 'True'.")
print("Changes saved!")
print("", "(37/43)", "PyWeather can now store your previously searched locations.",
"You have the ability to configure your previous locations in a menu option",
"in PyWeather. By default this feature is enabled.",
"Yes or No.", sep="\n")
enable_previouslocations = input("Input here: ").lower()
logger.debug("enable_previouslocations: %s" % enable_previouslocations)
if enable_previouslocations == "yes":
config['PREVIOUS LOCATIONS']['enabled'] = 'True'
logger.debug("PREVIOUS LOCATIONS/enabled is now 'True'.")
print("Changes saved!")
elif enable_previouslocations == "no":
config['PREVIOUS LOCATIONS']['enabled'] = 'False'
logger.debug("PREVIOUS LOCATIONS/enabled is now 'False'.")
print("Changes saved.")
else:
print("Could not understand your input. Defaulting to 'True'.")
config['PREVIOUS LOCATIONS']['enabled'] = 'True'
logger.debug("PREVIOUS LOCATIONS/enabled is now 'True'.")
print("", "(37/42)", "PyWeather by default uses Google's geocoder, which can occasionally have rate limiting issues.",
"To get around this, you can manually use your own API key that you sign up for with Google. This is completely",
"optional, and you can continue past this step and not impede PyWeather's functionality. However, would you like",
"to enable the use of a custom API key for the geocoder? Yes or No.", sep="\n")
enablecustomgeocoderkey = input("Input here: ").lower()
logger.debug("enablecustomgeocoderkey: %s" % enablecustomgeocoderkey)
if enablecustomgeocoderkey == "yes":
print("", "(38/42)", "To sign up for a Google Maps API key, please visit this link: ",
"https://developers.google.com/maps/documentation/javascript/get-api-key",
"Press the button 'Get Key', and wait a minute. Copy and paste the key into the input",
"below. Your API key will NOT be validated. Enter 'exit' to exit this process, and to disable",
"a custom API key.", sep="\n")
customgeocoderkey = input("Input here: ")
logger.debug("customgeocoderkey: %s" % customgeocoderkey)
while True:
print("", "The API key you entered is: %s" % customgeocoderkey,
"Is this the API key you want to use? Yes or No.", sep="\n")
confirmcustomgeocoderkey = input("Input here: ").lower()
logger.debug("confirmcustomgeocoderkey: %s" % confirmcustomgeocoderkey)
if confirmcustomgeocoderkey == "yes":
break
else:
if confirmcustomgeocoderkey != "no":
print("Couldn't understand your input. Please input your API key again.")
print("Please enter the API key you want to use below.")
customgeocoderkey = input("Input here: ")
logger.debug("customgeocoderkey: %s" % customgeocoderkey)
if customgeocoderkey == "exit":
print("Exiting the custom geocoder key process, and disabling a custom geocoder key.")
config['GEOCODER API']['customkey_enabled'] = 'False'
logger.debug("GEOCODER API/customkey_enabled is now FALSE.")
print("Changes saved.")
else:
config['GEOCODER API']['customkey_enabled'] = 'True'
config['GEOCODER API']['customkey'] = str(customgeocoderkey)
logger.debug("GEOCODER API/customkey_enabled is now TRUE.")
print("Changes saved.")
elif enablecustomgeocoderkey == "no":
config['GEOCODER API']['customkey_enabled'] = 'False'
logger.debug("GEOCODER API/customkey_enabled is now FALSE.")
print("Changes saved.")
else:
print("Your input could not be understood. Defaulting to 'False'.")
config['GEOCODER API']['customkey_enabled'] = 'False'
logger.debug("GEOCODER API/customkey_enabled is now FALSE.")
print("Changes saved.")
print("", "(39/42)", "On the summary screen, you can now view a summary of the weather that occurred yesterday.",
"Enabling this will also enable the option to prefetch yesterday's weather at boot in the config file.",
"Please note that enabling this uses 1 extra API call at boot, and will increase PyWeather's loading time.",
"Would you like to turn on showing yesterday's weather on the summary screen? Yes or No. By default, this is",
"disabled.", sep="\n")
showyesterdayonsummary = input("Input here: ").lower()
logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary)
if showyesterdayonsummary == "yes":
config['SUMMARY']['showyesterdayonsummary'] = 'True'
logger.info("SUMMARY/showyesterdayonsummary is now 'True'.")
config['PREFETCH']['yesterdaydata_atboot'] = 'True'
logger.info("PREFETCH/yesterdaydata_atboot is now 'True'.")
showyesterdayonsummary = True
logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary)
print("Changes saved.")
elif showyesterdayonsummary == "no":
config['SUMMARY']['showyesterdayonsummary'] = 'False'
logger.info("SUMMARY/showyesterdayonsummary is now 'False'.")
showyesterdayonsummary = False
logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary)
print("Changes saved.")
else:
print("Your input could not be understood. Defaulting to 'False'.")
config['SUMMARY']['showyesterdayonsummary'] = 'False'
logger.info("SUMMARY/showyesterdayonsumary is now 'False'.")
showyesterdayonsummary = False
logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary)
print("Changes saved.")
if showyesterdayonsummary is False:
print("", "(40/42)", "When PyWeather boots up, you can have the option to have yesterday's weather data",
"prefetched during bootup. Enabling this will use 1 extra API call at boot, and will increase PyWeather's",
"loading time. Would you like to enable prefetching yesterday's weather data on boot? Yes or No.",
"By default, this is disabled.", sep="\n")
prefetchyesterdayatboot = input("Input here: ").lower()
logger.debug("prefetchyesterdayatboot: %s" % prefetchyesterdayatboot)
if prefetchyesterdayatboot == "yes":
config['PREFETCH']['yesterdaydata_atboot'] = 'True'
logger.info("PREFETCH/yesterdaydata_atboot is now 'True'.")
print("Changes saved.")
elif prefetchyesterdayatboot == "no":
config['PREFETCH']['yesterdaydata_atboot'] = 'False'
logger.info("PREFETCH/yesterdaydata_atboot is now 'False'.")
print("Changes saved.")
else:
print("Your input could not be understood. Defaulting to 'False'.")
config['PREFETCH']['yesterdaydata_atboot'] = 'False'
logger.info("PREFETCH/yesterdaydata_atboot is now 'False'.")
print("Changes saved.")
print("", "(41/42)", "In 0.6.3 beta and newer, you have the option to enable extra tools for PyWeather.",
"Extra tools are diagnostic tools, and so far you can see cache timings in PyWeather, and more extra tools",
"will be added as time goes on. Would you like to enable the ability to use extra tools? Yes or No. By default",
"this is disabled.", sep="\n")
enableextratools = input("Input here: ").lower()
logger.debug("enableextratools: %s" % enableextratools)
if enableextratools == "yes":
config['UI']['extratools_enabled'] = 'True'
logger.info("UI/extratools_enabled is now 'True'.")
print("Changes saved.")
elif enableextratools == "no":
config['UI']['extratools_enabled'] = 'False'
logger.info("UI/extratools_enabled is now 'False'.")
print("Changes saved.")
else:
print("Could not understand your input. Defaulting to 'False'.")
config['UI']['extratools_enabled'] = 'False'
logger.info("UI/extratools_enabled is now 'False'.")
print("Changes saved.")
print("", "(42/42)", "PyWeather's geocoder usually uses https, but issues have been discovered",
"on some platforms, where the geocoder cannot operate in the https mode. If you press enter",
"PyWeather will automatically detect which scheme to use. If you are an advanced user, and want",
"to configure the scheme yourself, enter advancedconfig at the prompt below.", sep="\n")
configuregeopyscheme = input("Input here: ").lower()
logger.debug("configuregeopyscheme: %s" % configuregeopyscheme)
if configuregeopyscheme == "advancedconfig":
print("Which geopy scheme would you like to use? 'https' works on most platforms",
"but 'http' is needed on some platforms (OS X, as an example). Please input",
"'https' or 'http' below.")
geopyschemetype = input("Input here: ").lower()
logger.debug("geopyschemetype: %s" % geopyschemetype)
if geopyschemetype == "https":
config['GEOCDER']['scheme'] = 'https'
logger.debug("GEOCODER/scheme is now 'https'")
print("Changes saved. Geocoder settings will not be validated.")
elif geopyschemetype == "http":
config['GEOCODER']['scheme'] = 'http'
logger.debug("GEOCODER/scheme is now 'http'")
print("Changes saved. Geocoder settings will not be validated.")
else:
print("Your input could not be understood. Defaulting to 'https'.")
logger.debug("GEOCODER/scheme is now 'https'")
print("Changes saved. Geocoder settings will not be validated.")
else:
print("Now automatically configuring your geopy scheme.")
# HTTPS validation
from geopy import GoogleV3
geocoder = GoogleV3(scheme='https')
# I've found that one "warm up request", and then waiting ~15 seconds somehow helps determine if a platform is HTTP/HTTPS compatible.
try:
geocoder.geocode("123 5th Avenue, New York, NY")
except:
logger.debug("Warm up geocode failed.")
print("I've just completed a warm-up geocode. However, sometimes a rate limit will",
"occur after this geocode. I've paused the setup process for 10 seconds. This",
"should help with figuring out what scheme works on your OS.", sep="\n")
time.sleep(10)
try:
geocoder.geocode("123 5th Avenue, New York, NY")
print("The geocoder can operate with HTTPS enabled on your OS. Saving these changes...")
config['GEOCODER']['scheme'] = 'https'
logger.debug("GEOCODER/scheme is now 'https'")
print("Changes saved.")
except geopy.exc.GeocoderServiceError:
print("Geopy probably can't run without HTTPS (or your internet went down). Trying HTTP as the scheme...")
geocoder = GoogleV3(scheme='http')
print("Waiting 10 seconds to avoid rate limiting after the previous geocode...")
time.sleep(10)
try:
geocoder.geocode("123 5th Avenue, New York, NY")
print("The geocoder can operate, but without HTTPS enabled on your OS. Saving these changes...")
config['GEOCODER']['scheme'] = 'http'
logger.debug("GEOCODER/scheme is now 'http'")
print("Changes saved.")
except geopy.exc.GeocoderServiceError:
print("You probably don't have an internet connection, as HTTPS and HTTP validation both failed.",
"Defaulting to HTTP as the geopy scheme...", sep="\n")
config['GEOCODER']['scheme'] = 'http'
logger.debug("GEOCODER/scheme is now 'http'")
print("Changes saved.")
# if showing yesterday is disabled show prefetch yesterday
# if show yest. on sum. is enabled enable prefetch too basically the same code
print("","That's it! Now commiting config changes...", sep="\n")
try:
with open('storage//config.ini', 'w') as configfile:
logger.debug("configfile: %s" % configfile)
config.write(configfile)
print("Changes committed!")
logger.info("Performed operation: config.write(configfile)")
except:
print("The config file couldn't be written to.",
"Make sure the config file can be written to.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
print("","Everything is set up and ready to rumble!",
"Enjoy using PyWeather! If you have any issues, please report them on GitHub!",
"Press enter to continue.", sep="\n")
input()
sys.exit()
| gpl-3.0 |
jkarnows/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
hsharsha/depot_tools | third_party/boto/pyami/helloworld.py | 120 | 1247 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.pyami.scriptbase import ScriptBase
class HelloWorld(ScriptBase):
def main(self):
self.log('Hello World!!!')
| bsd-3-clause |
Infusion-OS/android_external_skia | tools/skpdiff/skpdiff_server.py | 161 | 24230 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import BaseHTTPServer
import json
import os
import os.path
import re
import subprocess
import sys
import tempfile
import urllib2
# Grab the script path because that is where all the static assets are
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
# Find the tools directory for python imports
TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
# Find the root of the skia trunk for finding skpdiff binary
SKIA_ROOT_DIR = os.path.dirname(TOOLS_DIR)
# Find the default location of gm expectations
DEFAULT_GM_EXPECTATIONS_DIR = os.path.join(SKIA_ROOT_DIR, 'expectations', 'gm')
# Imports from within Skia
if TOOLS_DIR not in sys.path:
sys.path.append(TOOLS_DIR)
GM_DIR = os.path.join(SKIA_ROOT_DIR, 'gm')
if GM_DIR not in sys.path:
sys.path.append(GM_DIR)
import gm_json
import jsondiff
# A simple dictionary of file name extensions to MIME types. The empty string
# entry is used as the default when no extension was given or if the extension
# has no entry in this dictionary.
MIME_TYPE_MAP = {'': 'application/octet-stream',
'html': 'text/html',
'css': 'text/css',
'png': 'image/png',
'js': 'application/javascript',
'json': 'application/json'
}
IMAGE_FILENAME_RE = re.compile(gm_json.IMAGE_FILENAME_PATTERN)
SKPDIFF_INVOKE_FORMAT = '{} --jsonp=false -o {} -f {} {}'
def get_skpdiff_path(user_path=None):
"""Find the skpdiff binary.
@param user_path If none, searches in Release and Debug out directories of
the skia root. If set, checks that the path is a real file and
returns it.
"""
skpdiff_path = None
possible_paths = []
# Use the user given path, or try out some good default paths.
if user_path:
possible_paths.append(user_path)
else:
possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out',
'Release', 'skpdiff'))
possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out',
'Release', 'skpdiff.exe'))
possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out',
'Debug', 'skpdiff'))
possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out',
'Debug', 'skpdiff.exe'))
# Use the first path that actually points to the binary
for possible_path in possible_paths:
if os.path.isfile(possible_path):
skpdiff_path = possible_path
break
# If skpdiff was not found, print out diagnostic info for the user.
if skpdiff_path is None:
print('Could not find skpdiff binary. Either build it into the ' +
'default directory, or specify the path on the command line.')
print('skpdiff paths tried:')
for possible_path in possible_paths:
print(' ', possible_path)
return skpdiff_path
def download_file(url, output_path):
"""Download the file at url and place it in output_path"""
reader = urllib2.urlopen(url)
with open(output_path, 'wb') as writer:
writer.write(reader.read())
def download_gm_image(image_name, image_path, hash_val):
"""Download the gm result into the given path.
@param image_name The GM file name, for example imageblur_gpu.png.
@param image_path Path to place the image.
@param hash_val The hash value of the image.
"""
if hash_val is None:
return
# Separate the test name from a image name
image_match = IMAGE_FILENAME_RE.match(image_name)
test_name = image_match.group(1)
# Calculate the URL of the requested image
image_url = gm_json.CreateGmActualUrl(
test_name, gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5, hash_val)
# Download the image as requested
download_file(image_url, image_path)
def get_image_set_from_skpdiff(skpdiff_records):
"""Get the set of all images references in the given records.
@param skpdiff_records An array of records, which are dictionary objects.
"""
expected_set = frozenset([r['baselinePath'] for r in skpdiff_records])
actual_set = frozenset([r['testPath'] for r in skpdiff_records])
return expected_set | actual_set
def set_expected_hash_in_json(expected_results_json, image_name, hash_value):
"""Set the expected hash for the object extracted from
expected-results.json. Note that this only work with bitmap-64bitMD5 hash
types.
@param expected_results_json The Python dictionary with the results to
modify.
@param image_name The name of the image to set the hash of.
@param hash_value The hash to set for the image.
"""
expected_results = expected_results_json[gm_json.JSONKEY_EXPECTEDRESULTS]
if image_name in expected_results:
expected_results[image_name][gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS][0][1] = hash_value
else:
expected_results[image_name] = {
gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS:
[
[
gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5,
hash_value
]
]
}
def get_head_version(path):
"""Get the version of the file at the given path stored inside the HEAD of
the git repository. It is returned as a string.
@param path The path of the file whose HEAD is returned. It is assumed the
path is inside a git repo rooted at SKIA_ROOT_DIR.
"""
# git-show will not work with absolute paths. This ensures we give it a path
# relative to the skia root. This path also has to use forward slashes, even
# on windows.
git_path = os.path.relpath(path, SKIA_ROOT_DIR).replace('\\', '/')
git_show_proc = subprocess.Popen(['git', 'show', 'HEAD:' + git_path],
stdout=subprocess.PIPE)
# When invoked outside a shell, git will output the last committed version
# of the file directly to stdout.
git_version_content, _ = git_show_proc.communicate()
return git_version_content
class GMInstance:
"""Information about a GM test result on a specific device:
- device_name = the name of the device that rendered it
- image_name = the GM test name and config
- expected_hash = the current expected hash value
- actual_hash = the actual hash value
- is_rebaselined = True if actual_hash is what is currently in the expected
results file, False otherwise.
"""
def __init__(self,
device_name, image_name,
expected_hash, actual_hash,
is_rebaselined):
self.device_name = device_name
self.image_name = image_name
self.expected_hash = expected_hash
self.actual_hash = actual_hash
self.is_rebaselined = is_rebaselined
class ExpectationsManager:
def __init__(self, expectations_dir, expected_name, updated_name,
skpdiff_path):
"""
@param expectations_dir The directory to traverse for results files.
This should resemble expectations/gm in the Skia trunk.
@param expected_name The name of the expected result files. These
are in the format of expected-results.json.
@param updated_name The name of the updated expected result files.
Normally this matches --expectations-filename-output for the
rebaseline.py tool.
@param skpdiff_path The path used to execute the skpdiff command.
"""
self._expectations_dir = expectations_dir
self._expected_name = expected_name
self._updated_name = updated_name
self._skpdiff_path = skpdiff_path
self._generate_gm_comparison()
def _generate_gm_comparison(self):
"""Generate all the data needed to compare GMs:
- determine which GMs changed
- download the changed images
- compare them with skpdiff
"""
# Get the expectations and compare them with actual hashes
self._get_expectations()
# Create a temporary file tree that makes sense for skpdiff to operate
# on. We take the realpath of the new temp directory because some OSs
# (*cough* osx) put the temp directory behind a symlink that gets
# resolved later down the pipeline and breaks the image map.
image_output_dir = os.path.realpath(tempfile.mkdtemp('skpdiff'))
expected_image_dir = os.path.join(image_output_dir, 'expected')
actual_image_dir = os.path.join(image_output_dir, 'actual')
os.mkdir(expected_image_dir)
os.mkdir(actual_image_dir)
# Download expected and actual images that differed into the temporary
# file tree.
self._download_expectation_images(expected_image_dir, actual_image_dir)
# Invoke skpdiff with our downloaded images and place its results in the
# temporary directory.
self._skpdiff_output_path = os.path.join(image_output_dir,
'skpdiff_output.json')
skpdiff_cmd = SKPDIFF_INVOKE_FORMAT.format(self._skpdiff_path,
self._skpdiff_output_path,
expected_image_dir,
actual_image_dir)
os.system(skpdiff_cmd)
self._load_skpdiff_output()
def _get_expectations(self):
"""Fills self._expectations with GMInstance objects for each test whose
expectation is different between the following two files:
- the local filesystem's updated results file
- git's head version of the expected results file
"""
differ = jsondiff.GMDiffer()
self._expectations = []
for root, dirs, files in os.walk(self._expectations_dir):
for expectation_file in files:
# There are many files in the expectations directory. We only
# care about expected results.
if expectation_file != self._expected_name:
continue
# Get the name of the results file, and be sure there is an
# updated result to compare against. If there is not, there is
# no point in diffing this device.
expected_file_path = os.path.join(root, self._expected_name)
updated_file_path = os.path.join(root, self._updated_name)
if not os.path.isfile(updated_file_path):
continue
# Always get the expected results from git because we may have
# changed them in a previous instance of the server.
expected_contents = get_head_version(expected_file_path)
updated_contents = None
with open(updated_file_path, 'rb') as updated_file:
updated_contents = updated_file.read()
# Read the expected results on disk to determine what we've
# already rebaselined.
commited_contents = None
with open(expected_file_path, 'rb') as expected_file:
commited_contents = expected_file.read()
# Find all expectations that did not match.
expected_diff = differ.GenerateDiffDictFromStrings(
expected_contents,
updated_contents)
# Generate a set of images that have already been rebaselined
# onto disk.
rebaselined_diff = differ.GenerateDiffDictFromStrings(
expected_contents,
commited_contents)
rebaselined_set = set(rebaselined_diff.keys())
# The name of the device corresponds to the name of the folder
# we are in.
device_name = os.path.basename(root)
# Store old and new versions of the expectation for each GM
for image_name, hashes in expected_diff.iteritems():
self._expectations.append(
GMInstance(device_name, image_name,
hashes['old'], hashes['new'],
image_name in rebaselined_set))
def _load_skpdiff_output(self):
"""Loads the results of skpdiff and annotates them with whether they
have already been rebaselined or not. The resulting data is store in
self.skpdiff_records."""
self.skpdiff_records = None
with open(self._skpdiff_output_path, 'rb') as skpdiff_output_file:
self.skpdiff_records = json.load(skpdiff_output_file)['records']
for record in self.skpdiff_records:
record['isRebaselined'] = self.image_map[record['baselinePath']][1].is_rebaselined
def _download_expectation_images(self, expected_image_dir, actual_image_dir):
"""Download the expected and actual images for the _expectations array.
@param expected_image_dir The directory to download expected images
into.
@param actual_image_dir The directory to download actual images into.
"""
image_map = {}
# Look through expectations and download their images.
for expectation in self._expectations:
# Build appropriate paths to download the images into.
expected_image_path = os.path.join(expected_image_dir,
expectation.device_name + '-' +
expectation.image_name)
actual_image_path = os.path.join(actual_image_dir,
expectation.device_name + '-' +
expectation.image_name)
print('Downloading %s for device %s' % (
expectation.image_name, expectation.device_name))
# Download images
download_gm_image(expectation.image_name,
expected_image_path,
expectation.expected_hash)
download_gm_image(expectation.image_name,
actual_image_path,
expectation.actual_hash)
# Annotate the expectations with where the images were downloaded
# to.
expectation.expected_image_path = expected_image_path
expectation.actual_image_path = actual_image_path
# Map the image paths back to the expectations.
image_map[expected_image_path] = (False, expectation)
image_map[actual_image_path] = (True, expectation)
self.image_map = image_map
def _set_expected_hash(self, device_name, image_name, hash_value):
"""Set the expected hash for the image of the given device. This always
writes directly to the expected results file of the given device
@param device_name The name of the device to write the hash to.
@param image_name The name of the image whose hash to set.
@param hash_value The value of the hash to set.
"""
# Retrieve the expected results file as it is in the working tree
json_path = os.path.join(self._expectations_dir, device_name,
self._expected_name)
expectations = gm_json.LoadFromFile(json_path)
# Set the specified hash.
set_expected_hash_in_json(expectations, image_name, hash_value)
# Write it out to disk using gm_json to keep the formatting consistent.
gm_json.WriteToFile(expectations, json_path)
def commit_rebaselines(self, rebaselines):
"""Sets the expected results file to use the hashes of the images in
the rebaselines list. If a expected result image is not in rebaselines
at all, the old hash will be used.
@param rebaselines A list of image paths to use the hash of.
"""
# Reset all expectations to their old hashes because some of them may
# have been set to the new hash by a previous call to this function.
for expectation in self._expectations:
expectation.is_rebaselined = False
self._set_expected_hash(expectation.device_name,
expectation.image_name,
expectation.expected_hash)
# Take all the images to rebaseline
for image_path in rebaselines:
# Get the metadata about the image at the path.
is_actual, expectation = self.image_map[image_path]
expectation.is_rebaselined = is_actual
expectation_hash = expectation.actual_hash if is_actual else\
expectation.expected_hash
# Write out that image's hash directly to the expected results file.
self._set_expected_hash(expectation.device_name,
expectation.image_name,
expectation_hash)
self._load_skpdiff_output()
class SkPDiffHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_file(self, file_path):
# Grab the extension if there is one
extension = os.path.splitext(file_path)[1]
if len(extension) >= 1:
extension = extension[1:]
# Determine the MIME type of the file from its extension
mime_type = MIME_TYPE_MAP.get(extension, MIME_TYPE_MAP[''])
# Open the file and send it over HTTP
if os.path.isfile(file_path):
with open(file_path, 'rb') as sending_file:
self.send_response(200)
self.send_header('Content-type', mime_type)
self.end_headers()
self.wfile.write(sending_file.read())
else:
self.send_error(404)
def serve_if_in_dir(self, dir_path, file_path):
# Determine if the file exists relative to the given dir_path AND exists
# under the dir_path. This is to prevent accidentally serving files
# outside the directory intended using symlinks, or '../'.
real_path = os.path.normpath(os.path.join(dir_path, file_path))
if os.path.commonprefix([real_path, dir_path]) == dir_path:
if os.path.isfile(real_path):
self.send_file(real_path)
return True
return False
def do_GET(self):
# Simple rewrite rule of the root path to 'viewer.html'
if self.path == '' or self.path == '/':
self.path = '/viewer.html'
# The [1:] chops off the leading '/'
file_path = self.path[1:]
# Handle skpdiff_output.json manually because it is was processed by the
# server when it was started and does not exist as a file.
if file_path == 'skpdiff_output.json':
self.send_response(200)
self.send_header('Content-type', MIME_TYPE_MAP['json'])
self.end_headers()
# Add JSONP padding to the JSON because the web page expects it. It
# expects it because it was designed to run with or without a web
# server. Without a web server, the only way to load JSON is with
# JSONP.
skpdiff_records = self.server.expectations_manager.skpdiff_records
self.wfile.write('var SkPDiffRecords = ')
json.dump({'records': skpdiff_records}, self.wfile)
self.wfile.write(';')
return
# Attempt to send static asset files first.
if self.serve_if_in_dir(SCRIPT_DIR, file_path):
return
# WARNING: Serving any file the user wants is incredibly insecure. Its
# redeeming quality is that we only serve gm files on a white list.
if self.path in self.server.image_set:
self.send_file(self.path)
return
# If no file to send was found, just give the standard 404
self.send_error(404)
def do_POST(self):
if self.path == '/commit_rebaselines':
content_length = int(self.headers['Content-length'])
request_data = json.loads(self.rfile.read(content_length))
rebaselines = request_data['rebaselines']
self.server.expectations_manager.commit_rebaselines(rebaselines)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write('{"success":true}')
return
# If the we have no handler for this path, give em' the 404
self.send_error(404)
def run_server(expectations_manager, port=8080):
# It's important to parse the results file so that we can make a set of
# images that the web page might request.
skpdiff_records = expectations_manager.skpdiff_records
image_set = get_image_set_from_skpdiff(skpdiff_records)
# Do not bind to interfaces other than localhost because the server will
# attempt to serve files relative to the root directory as a last resort
# before 404ing. This means all of your files can be accessed from this
# server, so DO NOT let this server listen to anything but localhost.
server_address = ('127.0.0.1', port)
http_server = BaseHTTPServer.HTTPServer(server_address, SkPDiffHandler)
http_server.image_set = image_set
http_server.expectations_manager = expectations_manager
print('Navigate thine browser to: http://{}:{}/'.format(*server_address))
http_server.serve_forever()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--port', '-p', metavar='PORT',
type=int,
default=8080,
help='port to bind the server to; ' +
'defaults to %(default)s',
)
parser.add_argument('--expectations-dir', metavar='EXPECTATIONS_DIR',
default=DEFAULT_GM_EXPECTATIONS_DIR,
help='path to the gm expectations; ' +
'defaults to %(default)s'
)
parser.add_argument('--expected',
metavar='EXPECTATIONS_FILE_NAME',
default='expected-results.json',
help='the file name of the expectations JSON; ' +
'defaults to %(default)s'
)
parser.add_argument('--updated',
metavar='UPDATED_FILE_NAME',
default='updated-results.json',
help='the file name of the updated expectations JSON;' +
' defaults to %(default)s'
)
parser.add_argument('--skpdiff-path', metavar='SKPDIFF_PATH',
default=None,
help='the path to the skpdiff binary to use; ' +
'defaults to out/Release/skpdiff or out/Default/skpdiff'
)
args = vars(parser.parse_args()) # Convert args into a python dict
# Make sure we have access to an skpdiff binary
skpdiff_path = get_skpdiff_path(args['skpdiff_path'])
if skpdiff_path is None:
sys.exit(1)
# Print out the paths of things for easier debugging
print('script dir :', SCRIPT_DIR)
print('tools dir :', TOOLS_DIR)
print('root dir :', SKIA_ROOT_DIR)
print('expectations dir :', args['expectations_dir'])
print('skpdiff path :', skpdiff_path)
expectations_manager = ExpectationsManager(args['expectations_dir'],
args['expected'],
args['updated'],
skpdiff_path)
run_server(expectations_manager, port=args['port'])
if __name__ == '__main__':
main()
| bsd-3-clause |
vabs22/zulip | zerver/migrations/0070_userhotspot.py | 9 | 1032 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-28 00:22
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('zerver', '0069_realmauditlog_extra_data'),
]
operations = [
migrations.CreateModel(
name='UserHotspot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hotspot', models.CharField(max_length=30)),
('timestamp', models.DateTimeField(default=django.utils.timezone.now)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='userhotspot',
unique_together=set([('user', 'hotspot')]),
),
]
| apache-2.0 |
mehtapgundogan/Tellal | env/lib/python2.7/site-packages/pip/vcs/bazaar.py | 35 | 4455 | from __future__ import absolute_import
import logging
import os
import tempfile
import re
# TODO: Get this into six.moves.urllib.parse
try:
from urllib import parse as urllib_parse
except ImportError:
import urlparse as urllib_parse
from pip.utils import rmtree, display_path
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
logger = logging.getLogger(__name__)
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
schemes = (
'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp',
'bzr+lp',
)
def __init__(self, url=None, *args, **kwargs):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(['lp'])
urllib_parse.non_hierarchical.extend(['lp'])
def export(self, location):
"""
Export the Bazaar repository at the url to the destination location
"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
rmtree(location)
try:
self.run_command(['export', location], cwd=temp_dir,
filter_stdout=self._filter, show_stdout=False)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
self.run_command(['switch', url], cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['branch', '-q'] + rev_options + [url, dest])
def get_url_rev(self):
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev = super(Bazaar, self).get_url_rev()
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev
def get_url(self, location):
urls = self.run_command(['info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if self._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
def get_revision(self, location):
revision = self.run_command(
['revno'], show_stdout=False, cwd=location)
return revision.splitlines()[-1]
def get_tag_revs(self, location):
tags = self.run_command(
['tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([.\w-]+)\s*(.*)$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo:
return None
if not repo.lower().startswith('bzr:'):
repo = 'bzr+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
current_rev = self.get_revision(location)
tag_revs = self.get_tag_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
else:
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), current_rev)
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
vcs.register(Bazaar)
| gpl-2.0 |
lambdaq/pytr | core.py | 1 | 7912 | #!/usr/bin/env python
# coding: utf8
# from gevent import monkey
# monkey.patch_all()
import socket
import os, sys
import random, struct
import logging
from collections import deque, Counter, defaultdict
logger = logging.getLogger(__file__)
logger.addHandler(logging.StreamHandler(sys.stderr))
logger.setLevel(logging.ERROR)
class UdpIpParser(object):
"""parse IP+UDP"""
def __init__(self, data):
self.data = data
self.ip_hdrl = ip_hdrl = ((data[0]) & 0x0F) * 4
self.udp_payload_len = struct.unpack(
'!H',
data[ip_hdrl + 4:ip_hdrl + 6])[0]
@property
def payload(self):
udp_hdrl = 8
return self.data[self.ip_hdrl + udp_hdrl:self.ip_hdrl + self.udp_payload_len]
class IpPacket(object):
def __init__(self, data):
self.data = data
self.hdrl = (0x0F & (data[0])) * 4
self.payload = self.data[self.hdrl:]
self.ttl = self.data[8]
@property
def src_ip(self):
return socket.inet_ntoa(str(self.data[12:16]))
@property
def dst_ip(self):
return socket.inet_ntoa(str(self.data[16:20]))
class IcmpParser(object):
hdrl = 8
def __init__(self, data):
self.data = data
@property
def type(self):
return self.data[0]
@property
def payload(self):
return self.data[8:14]
@property
def id(self):
return struct.unpack('>H', self.data[4:6])[0]
def checksum(msg):
# simplest rfc1071. msg is bytearray
s = 0
for i in range(0, len(msg), 2):
w = msg[i] + (msg[i + 1] << 8)
c = s + w
s = (c & 0xffff) + (c >> 16)
return ~s & 0xffff
def create_ping(id=None):
id = id or random.randint(30000, 65500)
icmp_type = 8
icmp_code = 0
icmp_checksum = 0
icmp_seq = 1
icmp_timestamp = 0
data = '%06d' % id
s = struct.Struct('!bbHHhQ%ss' % len(data))
msg = bytearray(s.size)
s.pack_into(
msg, 0,
icmp_type, icmp_code, icmp_checksum, id,
icmp_seq, icmp_timestamp, data)
# calculate ICMP checksum, which can not be offloaded
cs = checksum(msg)
struct.pack_into('<H', msg, 2, cs)
return msg
def guess_hop(ttl):
if not ttl:
return
if ttl >= 128:
return 256 - ttl
elif 64 < ttl < 128:
return 128 - ttl
else:
return 64 - ttl
MAX_RETRY = 5
class Tracer(object):
MAX_TTL = 32
def __init__(self):
"""
packet send rate = self.batch_size/self.timeout
- hosts is iterable target IPs
"""
self.batch_size = 100
self.max_retry = 10
self.timeout = 1
self.running = self.timeout * self.max_retry
self.max_ttl = defaultdict(lambda: self.MAX_TTL)
self.echo_map = {}
self.in_flight = deque(maxlen=self.batch_size) # a list of ip-ttl tuples
self.retries = Counter() # remaining retries
self.result = defaultdict(dict) # {ip: [hop1, hop2, ...]}
self.sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
self.sock.bind(('', 0))
self.sock.settimeout(self.timeout)
def _iter_ip_and_ttl(self, hosts):
"""generate all IPs and their hops need to ping
Need consider retries.
"""
for ip in hosts:
for ttl in xrange(1, self.MAX_TTL + 1):
if ttl >= self.max_ttl[ip]:
break
resp = (ip.strip(), ttl)
self.in_flight.append(resp)
yield resp
def run(self, hosts):
"""would block"""
self.ip_and_ttl = self._iter_ip_and_ttl(hosts)
self.tick()
while self.running > 0:
data = bytearray(1024)
try:
nbytes, addr = self.sock.recvfrom_into(data)
self.on_data(data, addr[0])
except socket.timeout:
self.tick()
return self.result
def _iter_retry(self):
i = 0
while self.in_flight and self.retries:
if not i < len(self.in_flight):
return
key = self.in_flight[i]
if self.retries[key] > 0:
self.retries[key] -= 1
yield key
i += 1
if self.retries[key] <= 0:
self.on_retry_fail(*key)
i -= 1
def on_retry_fail(self, ip, ttl):
self.retries.pop((ip, ttl), None)
self.in_flight.remove((ip, ttl))
if ttl <= self.max_ttl[ip]:
self.result[ip][ttl] = '?'
@property
def on_tick(self):
return getattr(self, '_on_tick', None) or (lambda *args: None)
@on_tick.setter
def on_tick(self, func):
self._on_tick = func
@property
def on_pong(self):
return getattr(self, '_on_pong', None) or (lambda *args: None)
@on_pong.setter
def on_pong(self, func):
self._on_pong = func
def tick(self):
logger.debug('in_flight=%s, retries=%s', len(self.in_flight), self.retries.most_common(4))
self.on_tick(self)
sent = 0
for ip, ttl in self._iter_retry():
self.ping(ip, ttl)
sent += 1
if sent >= self.batch_size:
break
while sent < self.batch_size:
try:
ip, ttl = self.ip_and_ttl.next()
except StopIteration:
self.running -= self.timeout
return
self.ping(ip, ttl)
self.retries[(ip, ttl)] = self.max_retry
sent += 1
def ping(self, ip, ttl):
logger.debug("Ping %s, ttl=%s", ip, ttl)
key = (ip, ttl)
sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
sock.bind(('', 0))
sock.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
icmp_id = random.randint(30000, 60000)
self.echo_map[icmp_id] = (ip, ttl)
packet = create_ping(icmp_id)
sock.sendto(packet, (ip, 0))
sock.close()
return icmp_id
def pong(self, ping_ip, pong_ip, ttl):
# @ToDo: handle multi-path trace-route
if ping_ip == pong_ip:
ttl = min(ttl, self.max_ttl[ping_ip])
self.max_ttl[ping_ip] = ttl
for k in xrange(1, self.MAX_TTL):
ip = self.result[ping_ip].get(k)
if k > ttl or ip == ping_ip:
self.result[ping_ip].pop(k, None)
key = ping_ip, ttl
try:
self.in_flight.remove(key)
except ValueError:
pass
self.retries.pop(key, None)
else:
key = ping_ip, ttl
try:
self.in_flight.remove(key)
except ValueError:
pass
self.retries.pop(key, None)
self.result[ping_ip][ttl] = pong_ip
self.on_pong(self, ping_ip, pong_ip, ttl)
def on_data(self, data, addr):
# get IP packet inside returned IP
outer_ip = IpPacket(data)
inner_ip = IpPacket(outer_ip.payload[IcmpParser.hdrl:])
# the raw structure is: IP(ICMP(IP(ICMP)))
icmp = IcmpParser(inner_ip.payload)
icmp_id = None
if icmp.payload.isdigit():
icmp_id = int(icmp.payload)
if not icmp_id:
icmp_id = icmp.id
if icmp_id in self.echo_map:
ip, ttl = self.echo_map[icmp_id]
logger.debug('Pong %s, ip=%s, hop=%s', ip, addr, ttl)
# f.write('%s\t%s\t%s\n' % (ip, ttl, addr))
self.pong(ip, addr, ttl)
else:
logger.debug('Pong unknown %s -> %s type %s' % (
inner_ip.src_ip, inner_ip.dst_ip, icmp.type))
def get_hops(res):
return [res.get(i) or '?' for i in xrange(max(res.keys()), 0, -1)]
| bsd-2-clause |
SteveHNH/ansible | lib/ansible/modules/windows/win_group_membership.py | 47 | 3143 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Andrew Saraceni <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_group_membership
version_added: "2.4"
short_description: Manage Windows local group membership
description:
- Allows the addition and removal of local, service and domain users,
and domain groups from a local group.
options:
name:
description:
- Name of the local group to manage membership on.
required: true
members:
description:
- A list of members to ensure are present/absent from the group.
- Accepts local users as username, .\username, and SERVERNAME\username.
- Accepts domain users and groups as DOMAIN\username and username@DOMAIN.
- Accepts service users as NT AUTHORITY\username.
required: true
state:
description:
- Desired state of the members in the group.
choices:
- present
- absent
default: present
author:
- Andrew Saraceni (@andrewsaraceni)
'''
EXAMPLES = r'''
- name: Add a local and domain user to a local group
win_group_membership:
name: Remote Desktop Users
members:
- NewLocalAdmin
- DOMAIN\TestUser
state: present
- name: Remove a domain group and service user from a local group
win_group_membership:
name: Backup Operators
members:
- DOMAIN\TestGroup
- NT AUTHORITY\SYSTEM
state: absent
'''
RETURN = r'''
name:
description: The name of the target local group.
returned: always
type: string
sample: Administrators
added:
description: A list of members added when C(state) is C(present); this is
empty if no members are added.
returned: success and C(state) is C(present)
type: list
sample: ["NewLocalAdmin", "DOMAIN\\TestUser"]
removed:
description: A list of members removed when C(state) is C(absent); this is
empty if no members are removed.
returned: success and C(state) is C(absent)
type: list
sample: ["DOMAIN\\TestGroup", "NT AUTHORITY\\SYSTEM"]
members:
description: A list of all local group members at completion; this is empty
if the group contains no members.
returned: success
type: list
sample: ["DOMAIN\\TestUser", "NewLocalAdmin"]
'''
| gpl-3.0 |
nwalters512/the-blue-alliance | tests/test_validation_helper.py | 3 | 1494 | import unittest2
from helpers.validation_helper import ValidationHelper
class TestValidationHelper(unittest2.TestCase):
def testTeamValidation(self):
errors = ValidationHelper.validate([("team_id_validator", "frc01")])
self.assertEqual(errors, {"Errors": [{"team_id": "frc01 is not a valid team id"}]})
def testEventValidation(self):
errors = ValidationHelper.validate([("event_id_validator", "1cmp")])
self.assertEqual(errors, {"Errors": [{"event_id": "1cmp is not a valid event id"}]})
def testMatchValidation(self):
errors = ValidationHelper.validate([("match_id_validator", "0010c1_0m2")])
self.assertEqual(errors, {"Errors": [{"match_id": "0010c1_0m2 is not a valid match id"}]})
def testMichiganEigthFinalsValidValidation(self):
errors = ValidationHelper.validate([("match_id_validator", "2015micmp_ef3m1")])
self.assertEqual(None, errors)
def testComboValidation(self):
errors = ValidationHelper.validate([("match_id_validator", "0010c1_0m2"),
("team_id_validator", "frc01"),
("event_id_validator", "1cmp")])
self.assertEqual(errors, {"Errors": [{"match_id": "0010c1_0m2 is not a valid match id"}, {"team_id": "frc01 is not a valid team id"},{"event_id": "1cmp is not a valid event id"}]})
def testValidValidation(self):
errors = ValidationHelper.validate([("team_id_validator", "frc101")])
self.assertEqual(None, errors)
| mit |
gangadharkadam/office_erp | erpnext/hr/report/monthly_attendance_sheet/monthly_attendance_sheet.py | 16 | 2966 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, cint
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
conditions, filters = get_conditions(filters)
columns = get_columns(filters)
att_map = get_attendance_list(conditions, filters)
emp_map = get_employee_details()
data = []
for emp in sorted(att_map):
emp_det = emp_map.get(emp)
if not emp_det:
continue
row = [emp, emp_det.employee_name, emp_det.branch, emp_det.department, emp_det.designation,
emp_det.company]
total_p = total_a = 0.0
for day in range(filters["total_days_in_month"]):
status = att_map.get(emp).get(day + 1, "Absent")
status_map = {"Present": "P", "Absent": "A", "Half Day": "HD"}
row.append(status_map[status])
if status == "Present":
total_p += 1
elif status == "Absent":
total_a += 1
elif status == "Half Day":
total_p += 0.5
total_a += 0.5
row += [total_p, total_a]
data.append(row)
return columns, data
def get_columns(filters):
columns = [
"Employee:Link/Employee:120", "Employee Name::140", "Branch:Link/Branch:120",
"Department:Link/Department:120", "Designation:Link/Designation:120",
"Company:Link/Company:120"
]
for day in range(filters["total_days_in_month"]):
columns.append(cstr(day+1) +"::20")
columns += ["Total Present:Float:80", "Total Absent:Float:80"]
return columns
def get_attendance_list(conditions, filters):
attendance_list = frappe.db.sql("""select employee, day(att_date) as day_of_month,
status from tabAttendance where docstatus = 1 %s order by employee, att_date""" %
conditions, filters, as_dict=1)
att_map = {}
for d in attendance_list:
att_map.setdefault(d.employee, frappe._dict()).setdefault(d.day_of_month, "")
att_map[d.employee][d.day_of_month] = d.status
return att_map
def get_conditions(filters):
if not (filters.get("month") and filters.get("fiscal_year")):
msgprint(_("Please select month and year"), raise_exception=1)
filters["month"] = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
from calendar import monthrange
filters["total_days_in_month"] = monthrange(cint(filters["fiscal_year"].split("-")[-1]),
filters["month"])[1]
conditions = " and month(att_date) = %(month)s and fiscal_year = %(fiscal_year)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
return conditions, filters
def get_employee_details():
emp_map = frappe._dict()
for d in frappe.db.sql("""select name, employee_name, designation,
department, branch, company
from tabEmployee where docstatus < 2
and status = 'Active'""", as_dict=1):
emp_map.setdefault(d.name, d)
return emp_map
| agpl-3.0 |
lifeofguenter/google-appengine-wx-launcher | launcher/text_frame_unittest.py | 28 | 2392 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittests for text_frame.py"""
import unittest
import wx
import launcher
class TextFrameTest(unittest.TestCase):
def setUp(self):
# Must always create a wx.App first
self.app = wx.PySimpleApp()
def CreateConsole(self):
"""Create and return a generic console."""
lc = launcher.TextFrame('title')
return lc
def testBasicTile(self):
"""Test to make sure new windows don't overlap."""
pos = (0,0)
launcher.TextFrame._ResetTiling()
for i in range(3):
lc = launcher.TextFrame('big bad window title')
newpos = lc.GetPositionTuple()
self.assertTrue(newpos[0] > pos[0])
self.assertTrue(newpos[1] > pos[1])
pos = newpos
def testMuchTiling(self):
"""Make sure the top/left of our tile is always on-screen."""
launcher.TextFrame._ResetTiling()
area = wx.Display().GetClientArea()
lc = launcher.TextFrame('super dooper tytle 4 roolerz and doodz')
# Needs to be real big in case you have a large monitor. ~1000
# iterations needed for a (1440,874) laptop before a full reset
# happens.
for i in range(3000):
lc._ShiftTilePosition()
self.assertTrue(launcher.TextFrame._tile_position[0] > area[0])
self.assertTrue(launcher.TextFrame._tile_position[1] > area[1])
self.assertTrue(launcher.TextFrame._tile_position[0] < area[2])
self.assertTrue(launcher.TextFrame._tile_position[1] < area[3])
def testText(self):
"""Test adding text to the console."""
lc = self.CreateConsole()
contents = ""
self.assertEqual(contents, lc.GetText())
for str in ('a', 'foo', '\n\n\n', 'bar\nbaz\n choke choke zapf'):
contents += str
lc.AppendText(str)
self.assertEqual(contents, lc.GetText())
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
iamjakob/oauth2lib | oauth2lib/provider.py | 8 | 21633 | import json
import logging
from requests import Response
from cStringIO import StringIO
try:
from werkzeug.exceptions import Unauthorized
except ImportError:
Unauthorized = Exception
from . import utils
class Provider(object):
"""Base provider class for different types of OAuth 2.0 providers."""
def _handle_exception(self, exc):
"""Handle an internal exception that was caught and suppressed.
:param exc: Exception to process.
:type exc: Exception
"""
logger = logging.getLogger(__name__)
logger.exception(exc)
def _make_response(self, body='', headers=None, status_code=200):
"""Return a response object from the given parameters.
:param body: Buffer/string containing the response body.
:type body: str
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
"""
res = Response()
res.status_code = status_code
if headers is not None:
res.headers.update(headers)
res.raw = StringIO(body)
return res
def _make_redirect_error_response(self, redirect_uri, err):
"""Return a HTTP 302 redirect response object containing the error.
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:param err: OAuth error message.
:type err: str
:rtype: requests.Response
"""
params = {
'error': err,
'response_type': None,
'client_id': None,
'redirect_uri': None
}
redirect = utils.build_url(redirect_uri, params)
return self._make_response(headers={'Location': redirect},
status_code=302)
def _make_json_response(self, data, headers=None, status_code=200):
"""Return a response object from the given JSON data.
:param data: Data to JSON-encode.
:type data: mixed
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
"""
response_headers = {}
if headers is not None:
response_headers.update(headers)
response_headers['Content-Type'] = 'application/json;charset=UTF-8'
response_headers['Cache-Control'] = 'no-store'
response_headers['Pragma'] = 'no-cache'
return self._make_response(json.dumps(data),
response_headers,
status_code)
def _make_json_error_response(self, err):
"""Return a JSON-encoded response object representing the error.
:param err: OAuth error message.
:type err: str
:rtype: requests.Response
"""
return self._make_json_response({'error': err}, status_code=400)
def _invalid_redirect_uri_response(self):
"""What to return when the redirect_uri parameter is missing.
:rtype: requests.Response
"""
return self._make_json_error_response('invalid_request')
class AuthorizationProvider(Provider):
"""OAuth 2.0 authorization provider. This class manages authorization
codes and access tokens. Certain methods MUST be overridden in a
subclass, thus this class cannot be directly used as a provider.
These are the methods that must be implemented in a subclass:
validate_client_id(self, client_id)
# Return True or False
validate_client_secret(self, client_id, client_secret)
# Return True or False
validate_scope(self, client_id, scope)
# Return True or False
validate_redirect_uri(self, client_id, redirect_uri)
# Return True or False
validate_access(self) # Use this to validate your app session user
# Return True or False
from_authorization_code(self, client_id, code, scope)
# Return mixed data or None on invalid
from_refresh_token(self, client_id, refresh_token, scope)
# Return mixed data or None on invalid
persist_authorization_code(self, client_id, code, scope)
# Return value ignored
persist_token_information(self, client_id, scope, access_token,
token_type, expires_in, refresh_token,
data)
# Return value ignored
discard_authorization_code(self, client_id, code)
# Return value ignored
discard_refresh_token(self, client_id, refresh_token)
# Return value ignored
Optionally, the following may be overridden to acheive desired behavior:
@property
token_length(self)
@property
token_type(self)
@property
token_expires_in(self)
generate_authorization_code(self)
generate_access_token(self)
generate_refresh_token(self)
"""
@property
def token_length(self):
"""Property method to get the length used to generate tokens.
:rtype: int
"""
return 40
@property
def token_type(self):
"""Property method to get the access token type.
:rtype: str
"""
return 'Bearer'
@property
def token_expires_in(self):
"""Property method to get the token expiration time in seconds.
:rtype: int
"""
return 3600
def generate_authorization_code(self):
"""Generate a random authorization code.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def generate_access_token(self):
"""Generate a random access token.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def generate_refresh_token(self):
"""Generate a random refresh token.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def get_authorization_code(self,
response_type,
client_id,
redirect_uri,
**params):
"""Generate authorization code HTTP response.
:param response_type: Desired response type. Must be exactly "code".
:type response_type: str
:param client_id: Client ID.
:type client_id: str
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:rtype: requests.Response
"""
# Ensure proper response_type
if response_type != 'code':
err = 'unsupported_response_type'
return self._make_redirect_error_response(redirect_uri, err)
# Check redirect URI
is_valid_redirect_uri = self.validate_redirect_uri(client_id,
redirect_uri)
if not is_valid_redirect_uri:
return self._invalid_redirect_uri_response()
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_access = self.validate_access()
scope = params.get('scope', '')
is_valid_scope = self.validate_scope(client_id, scope)
# Return proper error responses on invalid conditions
if not is_valid_client_id:
err = 'unauthorized_client'
return self._make_redirect_error_response(redirect_uri, err)
if not is_valid_access:
err = 'access_denied'
return self._make_redirect_error_response(redirect_uri, err)
if not is_valid_scope:
err = 'invalid_scope'
return self._make_redirect_error_response(redirect_uri, err)
# Generate authorization code
code = self.generate_authorization_code()
# Save information to be used to validate later requests
self.persist_authorization_code(client_id=client_id,
code=code,
scope=scope)
# Return redirection response
params.update({
'code': code,
'response_type': None,
'client_id': None,
'redirect_uri': None
})
redirect = utils.build_url(redirect_uri, params)
return self._make_response(headers={'Location': redirect},
status_code=302)
def refresh_token(self,
grant_type,
client_id,
client_secret,
refresh_token,
**params):
"""Generate access token HTTP response from a refresh token.
:param grant_type: Desired grant type. Must be "refresh_token".
:type grant_type: str
:param client_id: Client ID.
:type client_id: str
:param client_secret: Client secret.
:type client_secret: str
:param refresh_token: Refresh token.
:type refresh_token: str
:rtype: requests.Response
"""
# Ensure proper grant_type
if grant_type != 'refresh_token':
return self._make_json_error_response('unsupported_grant_type')
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_client_secret = self.validate_client_secret(client_id,
client_secret)
scope = params.get('scope', '')
is_valid_scope = self.validate_scope(client_id, scope)
data = self.from_refresh_token(client_id, refresh_token, scope)
is_valid_refresh_token = data is not None
# Return proper error responses on invalid conditions
if not (is_valid_client_id and is_valid_client_secret):
return self._make_json_error_response('invalid_client')
if not is_valid_scope:
return self._make_json_error_response('invalid_scope')
if not is_valid_refresh_token:
return self._make_json_error_response('invalid_grant')
# Discard original refresh token
self.discard_refresh_token(client_id, refresh_token)
# Generate access tokens once all conditions have been met
access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
refresh_token = self.generate_refresh_token()
# Save information to be used to validate later requests
self.persist_token_information(client_id=client_id,
scope=scope,
access_token=access_token,
token_type=token_type,
expires_in=expires_in,
refresh_token=refresh_token,
data=data)
# Return json response
return self._make_json_response({
'access_token': access_token,
'token_type': token_type,
'expires_in': expires_in,
'refresh_token': refresh_token
})
def get_token(self,
grant_type,
client_id,
client_secret,
redirect_uri,
code,
**params):
"""Generate access token HTTP response.
:param grant_type: Desired grant type. Must be "authorization_code".
:type grant_type: str
:param client_id: Client ID.
:type client_id: str
:param client_secret: Client secret.
:type client_secret: str
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:param code: Authorization code.
:type code: str
:rtype: requests.Response
"""
# Ensure proper grant_type
if grant_type != 'authorization_code':
return self._make_json_error_response('unsupported_grant_type')
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_client_secret = self.validate_client_secret(client_id,
client_secret)
is_valid_redirect_uri = self.validate_redirect_uri(client_id,
redirect_uri)
scope = params.get('scope', '')
is_valid_scope = self.validate_scope(client_id, scope)
data = self.from_authorization_code(client_id, code, scope)
is_valid_grant = data is not None
# Return proper error responses on invalid conditions
if not (is_valid_client_id and is_valid_client_secret):
return self._make_json_error_response('invalid_client')
if not is_valid_grant or not is_valid_redirect_uri:
return self._make_json_error_response('invalid_grant')
if not is_valid_scope:
return self._make_json_error_response('invalid_scope')
# Discard original authorization code
self.discard_authorization_code(client_id, code)
# Generate access tokens once all conditions have been met
access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
refresh_token = self.generate_refresh_token()
# Save information to be used to validate later requests
self.persist_token_information(client_id=client_id,
scope=scope,
access_token=access_token,
token_type=token_type,
expires_in=expires_in,
refresh_token=refresh_token,
data=data)
# Return json response
return self._make_json_response({
'access_token': access_token,
'token_type': token_type,
'expires_in': expires_in,
'refresh_token': refresh_token
})
def get_authorization_code_from_uri(self, uri):
"""Get authorization code response from a URI. This method will
ignore the domain and path of the request, instead
automatically parsing the query string parameters.
:param uri: URI to parse for authorization information.
:type uri: str
:rtype: requests.Response
"""
params = utils.url_query_params(uri)
try:
if 'response_type' not in params:
raise TypeError('Missing parameter response_type in URL query')
if 'client_id' not in params:
raise TypeError('Missing parameter client_id in URL query')
if 'redirect_uri' not in params:
raise TypeError('Missing parameter redirect_uri in URL query')
return self.get_authorization_code(**params)
except TypeError as exc:
self._handle_exception(exc)
# Catch missing parameters in request
err = 'invalid_request'
if 'redirect_uri' in params:
u = params['redirect_uri']
return self._make_redirect_error_response(u, err)
else:
return self._invalid_redirect_uri_response()
except StandardError as exc:
self._handle_exception(exc)
# Catch all other server errors
err = 'server_error'
u = params['redirect_uri']
return self._make_redirect_error_response(u, err)
def get_token_from_post_data(self, data):
"""Get a token response from POST data.
:param data: POST data containing authorization information.
:type data: dict
:rtype: requests.Response
"""
try:
# Verify OAuth 2.0 Parameters
for x in ['grant_type', 'client_id', 'client_secret']:
if not data.get(x):
raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x))
# Handle get token from refresh_token
if 'refresh_token' in data:
return self.refresh_token(**data)
# Handle get token from authorization code
for x in ['redirect_uri', 'code']:
if not data.get(x):
raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x))
return self.get_token(**data)
except TypeError as exc:
self._handle_exception(exc)
# Catch missing parameters in request
return self._make_json_error_response('invalid_request')
except StandardError as exc:
self._handle_exception(exc)
# Catch all other server errors
return self._make_json_error_response('server_error')
def validate_client_id(self, client_id):
raise NotImplementedError('Subclasses must implement ' \
'validate_client_id.')
def validate_client_secret(self, client_id, client_secret):
raise NotImplementedError('Subclasses must implement ' \
'validate_client_secret.')
def validate_redirect_uri(self, client_id, redirect_uri):
raise NotImplementedError('Subclasses must implement ' \
'validate_redirect_uri.')
def validate_scope(self, client_id, scope):
raise NotImplementedError('Subclasses must implement ' \
'validate_scope.')
def validate_access(self):
raise NotImplementedError('Subclasses must implement ' \
'validate_access.')
def from_authorization_code(self, client_id, code, scope):
raise NotImplementedError('Subclasses must implement ' \
'from_authorization_code.')
def from_refresh_token(self, client_id, refresh_token, scope):
raise NotImplementedError('Subclasses must implement ' \
'from_refresh_token.')
def persist_authorization_code(self, client_id, code, scope):
raise NotImplementedError('Subclasses must implement ' \
'persist_authorization_code.')
def persist_token_information(self, client_id, scope, access_token,
token_type, expires_in, refresh_token,
data):
raise NotImplementedError('Subclasses must implement ' \
'persist_token_information.')
def discard_authorization_code(self, client_id, code):
raise NotImplementedError('Subclasses must implement ' \
'discard_authorization_code.')
def discard_refresh_token(self, client_id, refresh_token):
raise NotImplementedError('Subclasses must implement ' \
'discard_refresh_token.')
class OAuthError(Unauthorized):
"""OAuth error, including the OAuth error reason."""
def __init__(self, reason, *args, **kwargs):
self.reason = reason
super(OAuthError, self).__init__(*args, **kwargs)
class ResourceAuthorization(object):
"""A class containing an OAuth 2.0 authorization."""
is_oauth = False
is_valid = None
token = None
client_id = None
expires_in = None
error = None
def raise_error_if_invalid(self):
if not self.is_valid:
raise OAuthError(self.error, 'OAuth authorization error')
class ResourceProvider(Provider):
"""OAuth 2.0 resource provider. This class provides an interface
to validate an incoming request and authenticate resource access.
Certain methods MUST be overridden in a subclass, thus this
class cannot be directly used as a resource provider.
These are the methods that must be implemented in a subclass:
get_authorization_header(self)
# Return header string for key "Authorization" or None
validate_access_token(self, access_token, authorization)
# Set is_valid=True, client_id, and expires_in attributes
# on authorization if authorization was successful.
# Return value is ignored
"""
@property
def authorization_class(self):
return ResourceAuthorization
def get_authorization(self):
"""Get authorization object representing status of authentication."""
auth = self.authorization_class()
header = self.get_authorization_header()
if not header or not header.split:
return auth
header = header.split()
if len(header) > 1 and header[0] == 'Bearer':
auth.is_oauth = True
access_token = header[1]
self.validate_access_token(access_token, auth)
if not auth.is_valid:
auth.error = 'access_denied'
return auth
def get_authorization_header(self):
raise NotImplementedError('Subclasses must implement ' \
'get_authorization_header.')
def validate_access_token(self, access_token, authorization):
raise NotImplementedError('Subclasses must implement ' \
'validate_token.')
| mit |
jagg81/translate-toolkit | translate/storage/statsdb.py | 2 | 27596 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007-2010 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Module to provide a cache of statistics in a database.
@organization: Zuza Software Foundation
@copyright: 2007 Zuza Software Foundation
@license: U{GPL <http://www.fsf.org/licensing/licenses/gpl.html>}
"""
try:
from sqlite3 import dbapi2
except ImportError:
from pysqlite2 import dbapi2
import os.path
import re
import sys
import stat
import thread
from UserDict import UserDict
from translate import __version__ as toolkitversion
from translate.lang.common import Common
from translate.misc.multistring import multistring
from translate.storage import factory
from translate.storage.workflow import StateEnum
#kdepluralre = re.compile("^_n: ") #Restore this if you really need support for old kdeplurals
brtagre = re.compile("<br\s*?/?>")
xmltagre = re.compile("<[^>]+>")
numberre = re.compile("\\D\\.\\D")
extended_state_strings = {
StateEnum.EMPTY: "empty",
StateEnum.NEEDS_WORK: "needs-work",
StateEnum.REJECTED: "rejected",
StateEnum.NEEDS_REVIEW: "needs-review",
StateEnum.UNREVIEWED: "unreviewed",
StateEnum.FINAL: "final",
}
UNTRANSLATED = StateEnum.EMPTY
FUZZY = StateEnum.NEEDS_WORK
TRANSLATED = StateEnum.UNREVIEWED
state_strings = {
UNTRANSLATED: "untranslated",
FUZZY: "fuzzy",
TRANSLATED: "translated",
}
def wordcount(string):
# TODO: po class should understand KDE style plurals ##
#string = kdepluralre.sub("", string) #Restore this if you really need support for old kdeplurals
string = brtagre.sub("\n", string)
string = xmltagre.sub("", string)
string = numberre.sub(" ", string)
#TODO: This should still use the correct language to count in the target
#language
return len(Common.words(string))
def wordsinunit(unit):
"""Counts the words in the unit's source and target, taking plurals into
account. The target words are only counted if the unit is translated."""
(sourcewords, targetwords) = (0, 0)
if isinstance(unit.source, multistring):
sourcestrings = unit.source.strings
else:
sourcestrings = [unit.source or ""]
for s in sourcestrings:
sourcewords += wordcount(s)
if not unit.istranslated():
return sourcewords, targetwords
if isinstance(unit.target, multistring):
targetstrings = unit.target.strings
else:
targetstrings = [unit.target or ""]
for s in targetstrings:
targetwords += wordcount(s)
return sourcewords, targetwords
class Record(UserDict):
def __init__(self, record_keys, record_values=None, compute_derived_values=lambda x: x):
if record_values == None:
record_values = (0 for _i in record_keys)
self.record_keys = record_keys
self.data = dict(zip(record_keys, record_values))
self._compute_derived_values = compute_derived_values
self._compute_derived_values(self)
def to_tuple(self):
return tuple(self[key] for key in self.record_keys)
def __add__(self, other):
result = Record(self.record_keys)
for key in self.keys():
result[key] = self[key] + other[key]
self._compute_derived_values(self)
return result
def __sub__(self, other):
result = Record(self.record_keys)
for key in self.keys():
result[key] = self[key] - other[key]
self._compute_derived_values(self)
return result
def as_string_for_db(self):
return ",".join([repr(x) for x in self.to_tuple()])
def transaction(f):
"""Modifies f to commit database changes if it executes without exceptions.
Otherwise it rolls back the database.
ALL publicly accessible methods in StatsCache MUST be decorated with this
decorator.
"""
def decorated_f(self, *args, **kwargs):
try:
result = f(self, *args, **kwargs)
self.con.commit()
return result
except:
# If ANY exception is raised, we're left in an
# uncertain state and we MUST roll back any changes to avoid getting
# stuck in an inconsistent state.
if self.con:
self.con.rollback()
raise
return decorated_f
def statefordb(unit):
"""Returns the numeric database state for the unit."""
if unit.istranslated():
return TRANSLATED
if unit.isfuzzy() and unit.target:
return FUZZY
return UNTRANSLATED
class FileTotals(object):
keys = ['translatedsourcewords',
'fuzzysourcewords',
'untranslatedsourcewords',
'translated',
'fuzzy',
'untranslated',
'translatedtargetwords']
def db_keys(self):
return ",".join(self.keys)
def __init__(self, cur):
self.cur = cur
self.cur.execute("""
CREATE TABLE IF NOT EXISTS filetotals(
fileid INTEGER PRIMARY KEY AUTOINCREMENT,
translatedsourcewords INTEGER NOT NULL,
fuzzysourcewords INTEGER NOT NULL,
untranslatedsourcewords INTEGER NOT NULL,
translated INTEGER NOT NULL,
fuzzy INTEGER NOT NULL,
untranslated INTEGER NOT NULL,
translatedtargetwords INTEGER NOT NULL);""")
def new_record(cls, state_for_db=None, sourcewords=None, targetwords=None):
record = Record(cls.keys, compute_derived_values=cls._compute_derived_values)
if state_for_db is not None:
if state_for_db is UNTRANSLATED:
record['untranslated'] = 1
record['untranslatedsourcewords'] = sourcewords
if state_for_db is TRANSLATED:
record['translated'] = 1
record['translatedsourcewords'] = sourcewords
record['translatedtargetwords'] = targetwords
elif state_for_db is FUZZY:
record['fuzzy'] = 1
record['fuzzysourcewords'] = sourcewords
return record
new_record = classmethod(new_record)
def _compute_derived_values(cls, record):
record["total"] = record["untranslated"] + \
record["translated"] + \
record["fuzzy"]
record["totalsourcewords"] = record["untranslatedsourcewords"] + \
record["translatedsourcewords"] + \
record["fuzzysourcewords"]
record["review"] = 0
_compute_derived_values = classmethod(_compute_derived_values)
def __getitem__(self, fileid):
result = self.cur.execute("""
SELECT %(keys)s
FROM filetotals
WHERE fileid=?;""" % {'keys': self.db_keys()}, (fileid,))
return Record(FileTotals.keys, result.fetchone(), self._compute_derived_values)
def __setitem__(self, fileid, record):
self.cur.execute("""
INSERT OR REPLACE into filetotals
VALUES (%(fileid)d, %(vals)s);
""" % {'fileid': fileid, 'vals': record.as_string_for_db()})
def __delitem__(self, fileid):
self.cur.execute("""
DELETE FROM filetotals
WHERE fileid=?;
""", (fileid,))
def emptyfiletotals():
"""Returns a dictionary with all statistics initalised to 0."""
return FileTotals.new_record()
def emptyfilechecks():
return {}
def emptyfilestats():
return {"total": [], "translated": [], "fuzzy": [], "untranslated": []}
def emptyunitstats():
return {"sourcewordcount": [], "targetwordcount": []}
# We allow the caller to specify which value to return when errors_return_empty
# is True. We do this, since Poolte wants None to be returned when it calls
# get_mod_info directly, whereas we want an integer to be returned for
# uses of get_mod_info within this module.
# TODO: Get rid of empty_return when Pootle code is improved to not require
# this.
def get_mod_info(file_path):
file_stat = os.stat(file_path)
assert not stat.S_ISDIR(file_stat.st_mode)
return file_stat.st_mtime, file_stat.st_size
def suggestion_extension():
return os.path.extsep + 'pending'
def suggestion_filename(filename):
return filename + suggestion_extension()
# ALL PUBLICLY ACCESSIBLE METHODS MUST BE DECORATED WITH THE transaction DECORATOR.
class StatsCache(object):
"""An object instantiated as a singleton for each statsfile that provides
access to the database cache from a pool of StatsCache objects."""
_caches = {}
defaultfile = None
con = None
"""This cache's connection"""
cur = None
"""The current cursor"""
def __new__(cls, statsfile=None):
current_thread = thread.get_ident()
def make_database(statsfile):
def connect(cache):
cache.con = dbapi2.connect(statsfile)
cache.cur = cache.con.cursor()
def clear_old_data(cache):
try:
cache.cur.execute("""SELECT min(toolkitbuild) FROM files""")
val = cache.cur.fetchone()
# If the database is empty, we have no idea whether its layout
# is correct, so we might as well delete it.
if val is None or val[0] < toolkitversion.build:
cache.con.close()
del cache
os.unlink(statsfile)
return True
return False
except dbapi2.OperationalError:
return False
cache = cls._caches.setdefault(current_thread, {})[statsfile] = object.__new__(cls)
connect(cache)
if clear_old_data(cache):
connect(cache)
cache.create()
return cache
if not statsfile:
if not cls.defaultfile:
userdir = os.path.expanduser("~")
cachedir = None
if os.name == "nt":
cachedir = os.path.join(userdir, "Translate Toolkit")
else:
cachedir = os.path.join(userdir, ".translate_toolkit")
if not os.path.exists(cachedir):
os.mkdir(cachedir)
cls.defaultfile = os.path.realpath(os.path.join(cachedir, "stats.db"))
statsfile = cls.defaultfile
else:
statsfile = os.path.realpath(statsfile)
# First see if a cache for this file already exists:
if current_thread in cls._caches and statsfile in cls._caches[current_thread]:
return cls._caches[current_thread][statsfile]
# No existing cache. Let's build a new one and keep a copy
return make_database(statsfile)
@transaction
def create(self):
"""Create all tables and indexes."""
self.file_totals = FileTotals(self.cur)
self.cur.execute("""CREATE TABLE IF NOT EXISTS files(
fileid INTEGER PRIMARY KEY AUTOINCREMENT,
path VARCHAR NOT NULL UNIQUE,
st_mtime INTEGER NOT NULL,
st_size INTEGER NOT NULL,
toolkitbuild INTEGER NOT NULL);""")
self.cur.execute("""CREATE UNIQUE INDEX IF NOT EXISTS filepathindex
ON files (path);""")
self.cur.execute("""CREATE TABLE IF NOT EXISTS units(
id INTEGER PRIMARY KEY AUTOINCREMENT,
unitid VARCHAR NOT NULL,
fileid INTEGER NOT NULL,
unitindex INTEGER NOT NULL,
source VARCHAR NOT NULL,
target VARCHAR,
state INTEGER,
e_state INTEGER,
sourcewords INTEGER,
targetwords INTEGER);""")
self.cur.execute("""CREATE INDEX IF NOT EXISTS fileidindex
ON units(fileid);""")
self.cur.execute("""CREATE TABLE IF NOT EXISTS checkerconfigs(
configid INTEGER PRIMARY KEY AUTOINCREMENT,
config VARCHAR);""")
self.cur.execute("""CREATE INDEX IF NOT EXISTS configindex
ON checkerconfigs(config);""")
self.cur.execute("""CREATE TABLE IF NOT EXISTS uniterrors(
errorid INTEGER PRIMARY KEY AUTOINCREMENT,
unitindex INTEGER NOT NULL,
fileid INTEGER NOT NULL,
configid INTEGER NOT NULL,
name VARCHAR NOT NULL,
message VARCHAR);""")
self.cur.execute("""CREATE INDEX IF NOT EXISTS uniterrorindex
ON uniterrors(fileid, configid);""")
@transaction
def _getfileid(self, filename, check_mod_info=True, store=None):
"""return fileid representing the given file in the statscache.
if file not in cache or has been updated since last record
update, recalculate stats.
optional argument store can be used to avoid unnessecary
reparsing of already loaded translation files.
store can be a TranslationFile object or a callback that returns one.
"""
if isinstance(filename, str):
filename = unicode(filename, sys.getfilesystemencoding())
realpath = os.path.realpath(filename)
self.cur.execute("""SELECT fileid, st_mtime, st_size FROM files
WHERE path=?;""", (realpath,))
filerow = self.cur.fetchone()
mod_info = get_mod_info(realpath)
if filerow:
fileid = filerow[0]
if not check_mod_info:
# Update the mod_info of the file
self.cur.execute("""UPDATE files
SET st_mtime=?, st_size=?
WHERE fileid=?;""", (mod_info[0], mod_info[1], fileid))
return fileid
if (filerow[1], filerow[2]) == mod_info:
return fileid
# file wasn't in db at all, lets recache it
if callable(store):
store = store()
else:
store = store or factory.getobject(realpath)
return self._cachestore(store, realpath, mod_info)
def _getstoredcheckerconfig(self, checker):
"""See if this checker configuration has been used before."""
config = str(checker.config.__dict__)
self.cur.execute("""SELECT configid, config FROM checkerconfigs WHERE
config=?;""", (config,))
configrow = self.cur.fetchone()
if not configrow or configrow[1] != config:
return None
else:
return configrow[0]
@transaction
def _cacheunitstats(self, units, fileid, unitindex=None, file_totals_record=FileTotals.new_record()):
"""Cache the statistics for the supplied unit(s)."""
unitvalues = []
for index, unit in enumerate(units):
if unit.istranslatable():
sourcewords, targetwords = wordsinunit(unit)
if unitindex:
index = unitindex
# what about plurals in .source and .target?
unit_state_for_db = statefordb(unit)
unitvalues.append((unit.getid(), fileid, index, \
unit.source, unit.target, \
sourcewords, targetwords, \
unit_state_for_db,
unit.get_state_id()))
file_totals_record = file_totals_record + FileTotals.new_record(unit_state_for_db, sourcewords, targetwords)
# XXX: executemany is non-standard
self.cur.executemany("""INSERT INTO units
(unitid, fileid, unitindex, source, target, sourcewords, targetwords, state, e_state)
values (?, ?, ?, ?, ?, ?, ?, ?, ?);""",
unitvalues)
self.file_totals[fileid] = file_totals_record
if unitindex:
return state_strings[statefordb(units[0])]
return ""
@transaction
def _cachestore(self, store, realpath, mod_info):
"""Calculates and caches the statistics of the given store
unconditionally."""
self.cur.execute("""DELETE FROM files WHERE
path=?;""", (realpath,))
self.cur.execute("""INSERT INTO files
(fileid, path, st_mtime, st_size, toolkitbuild) values (NULL, ?, ?, ?, ?);""",
(realpath, mod_info[0], mod_info[1], toolkitversion.build))
fileid = self.cur.lastrowid
self.cur.execute("""DELETE FROM units WHERE
fileid=?""", (fileid,))
self._cacheunitstats(store.units, fileid)
return fileid
def file_extended_totals(self, filename, store=None):
stats = {}
fileid = self._getfileid(filename, store=store)
self.cur.execute("""SELECT e_state, COUNT(id), SUM(sourcewords), SUM(targetwords)
FROM units WHERE fileid=? GROUP BY e_state""", (fileid,))
values = self.cur.fetchall()
for value in values:
stats[extended_state_strings[value[0]]] = {
"units": value[1],
"sourcewords": value[2],
"targetwords": value[3],
}
return stats
def filetotals(self, filename, store=None, extended=False):
"""Retrieves the statistics for the given file if possible, otherwise
delegates to cachestore()."""
stats = self.file_totals[self._getfileid(filename, store=store)]
if extended:
stats["extended"] = self.file_extended_totals(filename, store=store)
return stats
@transaction
def _cacheunitschecks(self, units, fileid, configid, checker, unitindex=None):
"""Helper method for cachestorechecks() and recacheunit()"""
# We always want to store one dummy error to know that we have actually
# run the checks on this file with the current checker configuration
dummy = (-1, fileid, configid, "noerror", "")
unitvalues = [dummy]
# if we are doing a single unit, we want to return the checknames
errornames = []
for index, unit in enumerate(units):
if unit.istranslatable():
# Correctly assign the unitindex
if unitindex:
index = unitindex
failures = checker.run_filters(unit)
for checkname, checkmessage in failures.iteritems():
unitvalues.append((index, fileid, configid, checkname, checkmessage))
errornames.append("check-" + checkname)
checker.setsuggestionstore(None)
if unitindex:
# We are only updating a single unit, so we don't want to add an
# extra noerror-entry
unitvalues.remove(dummy)
errornames.append("total")
# XXX: executemany is non-standard
self.cur.executemany("""INSERT INTO uniterrors
(unitindex, fileid, configid, name, message)
values (?, ?, ?, ?, ?);""",
unitvalues)
return errornames
@transaction
def _cachestorechecks(self, fileid, store, checker, configid):
"""Calculates and caches the error statistics of the given store
unconditionally."""
# Let's purge all previous failures because they will probably just
# fill up the database without much use.
self.cur.execute("""DELETE FROM uniterrors WHERE
fileid=?;""", (fileid,))
self._cacheunitschecks(store.units, fileid, configid, checker)
return fileid
def get_unit_stats(self, fileid, unitid):
values = self.cur.execute("""
SELECT state, sourcewords, targetwords
FROM units
WHERE fileid=? AND unitid=?
""", (fileid, unitid))
result = values.fetchone()
if result is not None:
return result
else:
print >> sys.stderr, """WARNING: Database in inconsistent state.
fileid %d and unitid %s have no entries in the table units.""" % (fileid, unitid)
# If values.fetchone() is None, then we return an empty list,
# to make FileTotals.new_record(*self.get_unit_stats(fileid, unitid))
# do the right thing.
return []
@transaction
def recacheunit(self, filename, checker, unit):
"""Recalculate all information for a specific unit. This is necessary
for updating all statistics when a translation of a unit took place,
for example.
This method assumes that everything was up to date before (file totals,
checks, checker config, etc."""
fileid = self._getfileid(filename, check_mod_info=False)
configid = self._get_config_id(fileid, checker)
unitid = unit.getid()
# get the unit index
totals_without_unit = self.file_totals[fileid] - \
FileTotals.new_record(*self.get_unit_stats(fileid, unitid))
self.cur.execute("""SELECT unitindex FROM units WHERE
fileid=? AND unitid=?;""", (fileid, unitid))
unitindex = self.cur.fetchone()[0]
self.cur.execute("""DELETE FROM units WHERE
fileid=? AND unitid=?;""", (fileid, unitid))
state = [self._cacheunitstats([unit], fileid, unitindex, totals_without_unit)]
# remove the current errors
self.cur.execute("""DELETE FROM uniterrors WHERE
fileid=? AND unitindex=?;""", (fileid, unitindex))
if os.path.exists(suggestion_filename(filename)):
checker.setsuggestionstore(factory.getobject(suggestion_filename(filename), ignore=suggestion_extension()))
state.extend(self._cacheunitschecks([unit], fileid, configid, checker, unitindex))
return state
def _checkerrors(self, filename, fileid, configid, checker, store):
def geterrors():
self.cur.execute("""SELECT
name,
unitindex
FROM uniterrors WHERE fileid=? and configid=?
ORDER BY unitindex;""", (fileid, configid))
return self.cur.fetchone(), self.cur
first, cur = geterrors()
if first is not None:
return first, cur
# This could happen if we haven't done the checks before, or the
# file changed, or we are using a different configuration
if callable(store):
store = store()
else:
store = store or factory.getobject(filename)
if os.path.exists(suggestion_filename(filename)):
checker.setsuggestionstore(factory.getobject(suggestion_filename(filename), ignore=suggestion_extension()))
self._cachestorechecks(fileid, store, checker, configid)
return geterrors()
def _geterrors(self, filename, fileid, configid, checker, store):
result = []
first, cur = self._checkerrors(filename, fileid, configid, checker, store)
result.append(first)
result.extend(cur.fetchall())
return result
@transaction
def _get_config_id(self, fileid, checker):
configid = self._getstoredcheckerconfig(checker)
if configid:
return configid
self.cur.execute("""INSERT INTO checkerconfigs
(configid, config) values (NULL, ?);""",
(str(checker.config.__dict__),))
return self.cur.lastrowid
def filechecks(self, filename, checker, store=None):
"""Retrieves the error statistics for the given file if possible,
otherwise delegates to cachestorechecks()."""
fileid = self._getfileid(filename, store=store)
configid = self._get_config_id(fileid, checker)
values = self._geterrors(filename, fileid, configid, checker, store)
errors = emptyfilechecks()
for value in values:
if value[1] == -1:
continue
checkkey = 'check-' + value[0] #value[0] is the error name
if not checkkey in errors:
errors[checkkey] = []
errors[checkkey].append(value[1]) #value[1] is the unitindex
return errors
def file_fails_test(self, filename, checker, name):
fileid = self._getfileid(filename)
configid = self._get_config_id(fileid, checker)
self._checkerrors(filename, fileid, configid, checker, None)
self.cur.execute("""SELECT
name,
unitindex
FROM uniterrors
WHERE fileid=? and configid=? and name=?;""", (fileid, configid, name))
return self.cur.fetchone() is not None
def filestatestats(self, filename, store=None, extended=False):
"""Return a dictionary of unit stats mapping sets of unit
indices with those states"""
stats = emptyfilestats()
if extended:
stats["extended"] = {}
fileid = self._getfileid(filename, store=store)
self.cur.execute("""SELECT state, e_state, unitindex
FROM units WHERE fileid=? ORDER BY unitindex;""", (fileid,))
values = self.cur.fetchall()
for value in values:
stats[state_strings[value[0]]].append(value[2])
if extended:
if value[1] not in stats["extended"]:
stats["extended"][value[1]] = []
stats["extended"][value[1]].append(value[2])
stats["total"].append(value[2])
return stats
def filestats(self, filename, checker, store=None, extended=False):
"""Return a dictionary of property names mapping sets of unit
indices with those properties."""
stats = emptyfilestats()
stats.update(self.filechecks(filename, checker, store))
stats.update(self.filestatestats(filename, store, extended=extended))
return stats
def unitstats(self, filename, _lang=None, store=None):
# For now, lang and store are unused. lang will allow the user to
# base stats information on the given language. See the commented
# line containing stats.update below.
"""Return a dictionary of property names mapping to arrays which
map unit indices to property values.
Please note that this is different from filestats, since filestats
supplies sets of unit indices with a given property, whereas this
method supplies arrays which map unit indices to given values."""
stats = emptyunitstats()
#stats.update(self.unitchecks(filename, lang, store))
fileid = self._getfileid(filename, store=store)
self.cur.execute("""SELECT
sourcewords, targetwords
FROM units WHERE fileid=?
ORDER BY unitindex;""", (fileid,))
for sourcecount, targetcount in self.cur.fetchall():
stats["sourcewordcount"].append(sourcecount)
stats["targetwordcount"].append(targetcount)
return stats
| gpl-2.0 |
agry/NGECore2 | scripts/mobiles/rori/shallow_torton.py | 2 | 1728 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('shallow_torton')
mobileTemplate.setLevel(47)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(.5)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(650)
mobileTemplate.setHideType("Wooly Hide")
mobileTemplate.setHideAmount(575)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(650)
mobileTemplate.setSocialGroup("torton")
mobileTemplate.setAssistRange(4)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_shallow_torton.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_dampen_pain_3')
attacks.add('bm_deflective_hide')
attacks.add('bm_puncture_1')
attacks.add('shaken_3')
attacks.add('bm_stomp_3')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('shallow_torton', mobileTemplate)
return | lgpl-3.0 |
brownharryb/erpnext | erpnext/patches/v5_1/fix_against_account.py | 107 | 1271 | from __future__ import unicode_literals
import frappe
from erpnext.accounts.doctype.gl_entry.gl_entry import update_against_account
def execute():
from_date = "2015-05-01"
for doc in frappe.get_all("Journal Entry",
filters={"creation": (">", from_date), "docstatus": "1"}):
# update in gl_entry
update_against_account("Journal Entry", doc.name)
# update in jv
doc = frappe.get_doc("Journal Entry", doc.name)
doc.set_against_account()
doc.db_update()
for doc in frappe.get_all("Sales Invoice",
filters={"creation": (">", from_date), "docstatus": "1"},
fields=["name", "customer"]):
frappe.db.sql("""update `tabGL Entry` set against=%s
where voucher_type='Sales Invoice' and voucher_no=%s
and credit > 0 and ifnull(party, '')=''""",
(doc.customer, doc.name))
for doc in frappe.get_all("Purchase Invoice",
filters={"creation": (">", from_date), "docstatus": "1"},
fields=["name", "supplier"]):
frappe.db.sql("""update `tabGL Entry` set against=%s
where voucher_type='Purchase Invoice' and voucher_no=%s
and debit > 0 and ifnull(party, '')=''""",
(doc.supplier, doc.name))
| gpl-3.0 |
zaventh/android_kernel_lge_hammerhead | tools/perf/scripts/python/net_dropmonitor.py | 4235 | 1554 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
baylee/django | tests/forms_tests/widget_tests/test_selectdatewidget.py | 35 | 20646 | from datetime import date
from django.forms import DateField, Form, SelectDateWidget
from django.test import override_settings
from django.utils import translation
from django.utils.dates import MONTHS_AP
from .base import WidgetTest
class SelectDateWidgetTest(WidgetTest):
maxDiff = None
widget = SelectDateWidget(
years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'),
)
def test_render_empty(self):
self.check_html(self.widget, 'mydate', '', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
))
def test_render_none(self):
"""
Rendering the None or '' values should yield the same output.
"""
self.assertHTMLEqual(
self.widget.render('mydate', None),
self.widget.render('mydate', ''),
)
def test_render_string(self):
self.check_html(self.widget, 'mydate', '2010-04-15', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4" selected="selected">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15" selected="selected">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
))
def test_render_datetime(self):
self.assertHTMLEqual(
self.widget.render('mydate', date(2010, 4, 15)),
self.widget.render('mydate', '2010-04-15'),
)
def test_render_invalid_date(self):
"""
Invalid dates should still render the failed date.
"""
self.check_html(self.widget, 'mydate', '2010-02-31', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2" selected="selected">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31" selected="selected">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
))
def test_custom_months(self):
widget = SelectDateWidget(months=MONTHS_AP, years=('2013',))
self.check_html(widget, 'mydate', '', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">Jan.</option>
<option value="2">Feb.</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">Aug.</option>
<option value="9">Sept.</option>
<option value="10">Oct.</option>
<option value="11">Nov.</option>
<option value="12">Dec.</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2013">2013</option>
</select>
"""
))
def test_selectdate_required(self):
class GetNotRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=False)
class GetRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=True)
self.assertFalse(GetNotRequiredDate().fields['mydate'].widget.is_required)
self.assertTrue(GetRequiredDate().fields['mydate'].widget.is_required)
def test_selectdate_empty_label(self):
w = SelectDateWidget(years=('2014',), empty_label='empty_label')
# Rendering the default state with empty_label setted as string.
self.assertInHTML('<option value="0">empty_label</option>', w.render('mydate', ''), count=3)
w = SelectDateWidget(years=('2014',), empty_label=('empty_year', 'empty_month', 'empty_day'))
# Rendering the default state with empty_label tuple.
self.assertHTMLEqual(
w.render('mydate', ''),
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">empty_month</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">empty_day</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">empty_year</option>
<option value="2014">2014</option>
</select>
""",
)
with self.assertRaisesMessage(ValueError, 'empty_label list/tuple must have 3 elements.'):
SelectDateWidget(years=('2014',), empty_label=('not enough', 'values'))
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n(self):
w = SelectDateWidget(
years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016')
)
self.assertEqual(
w.value_from_datadict({'date_year': '2010', 'date_month': '8', 'date_day': '13'}, {}, 'date'),
'13-08-2010',
)
self.assertHTMLEqual(
w.render('date', '13-08-2010'),
"""
<select name="date_day" id="id_date_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13" selected="selected">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="date_month" id="id_date_month">
<option value="0">---</option>
<option value="1">januari</option>
<option value="2">februari</option>
<option value="3">maart</option>
<option value="4">april</option>
<option value="5">mei</option>
<option value="6">juni</option>
<option value="7">juli</option>
<option value="8" selected="selected">augustus</option>
<option value="9">september</option>
<option value="10">oktober</option>
<option value="11">november</option>
<option value="12">december</option>
</select>
<select name="date_year" id="id_date_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
""",
)
# Even with an invalid date, the widget should reflect the entered value (#17401).
self.assertEqual(w.render('mydate', '2010-02-30').count('selected="selected"'), 3)
# Years before 1900 should work.
w = SelectDateWidget(years=('1899',))
self.assertEqual(
w.value_from_datadict({'date_year': '1899', 'date_month': '8', 'date_day': '13'}, {}, 'date'),
'13-08-1899',
)
| bsd-3-clause |
drexly/tonginBlobStore | lib/django/contrib/admin/helpers.py | 79 | 14890 | from __future__ import unicode_literals
import warnings
from django import forms
from django.conf import settings
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.utils import (
display_for_field, flatten_fieldsets, help_text_for_field, label_for_field,
lookup_field,
)
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields.related import ManyToManyRel
from django.forms.utils import flatatt
from django.template.defaultfilters import capfirst, linebreaksbr
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from django.utils.encoding import force_text, smart_text
from django.utils.functional import cached_property
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
ACTION_CHECKBOX_NAME = '_selected_action'
class ActionForm(forms.Form):
action = forms.ChoiceField(label=_('Action:'))
select_across = forms.BooleanField(label='', required=False, initial=0,
widget=forms.HiddenInput({'class': 'select-across'}))
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
class AdminForm(object):
def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None):
self.form, self.fieldsets = form, fieldsets
self.prepopulated_fields = [{
'field': form[field_name],
'dependencies': [form[f] for f in dependencies]
} for field_name, dependencies in prepopulated_fields.items()]
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(
self.form, name,
readonly_fields=self.readonly_fields,
model_admin=self.model_admin,
**options
)
def _media(self):
media = self.form.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class Fieldset(object):
def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(),
description=None, model_admin=None):
self.form = form
self.name, self.fields = name, fields
self.classes = ' '.join(classes)
self.description = description
self.model_admin = model_admin
self.readonly_fields = readonly_fields
def _media(self):
if 'collapse' in self.classes:
extra = '' if settings.DEBUG else '.min'
js = ['vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'collapse%s.js' % extra]
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
return forms.Media()
media = property(_media)
def __iter__(self):
for field in self.fields:
yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)
class Fieldline(object):
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__") or isinstance(field, six.text_type):
self.fields = [field]
else:
self.fields = field
self.has_visible_field = not all(field in self.form.fields and
self.form.fields[field].widget.is_hidden
for field in self.fields)
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
yield AdminReadonlyField(self.form, field, is_first=(i == 0),
model_admin=self.model_admin)
else:
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe(
'\n'.join(self.form[f].errors.as_ul()
for f in self.fields if f not in self.readonly_fields).strip('\n')
)
class AdminField(object):
def __init__(self, form, field, is_first):
self.field = form[field] # A django.forms.BoundField instance
self.is_first = is_first # Whether this field is first on the line
self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)
self.is_readonly = False
def label_tag(self):
classes = []
contents = conditional_escape(force_text(self.field.label))
if self.is_checkbox:
classes.append('vCheckboxLabel')
if self.field.field.required:
classes.append('required')
if not self.is_first:
classes.append('inline')
attrs = {'class': ' '.join(classes)} if classes else {}
# checkboxes should not have a label suffix as the checkbox appears
# to the left of the label.
return self.field.label_tag(contents=mark_safe(contents), attrs=attrs,
label_suffix='' if self.is_checkbox else None)
def errors(self):
return mark_safe(self.field.errors.as_ul())
class AdminReadonlyField(object):
def __init__(self, form, field, is_first, model_admin=None):
# Make self.field look a little bit like a field. This means that
# {{ field.name }} must be a useful class name to identify the field.
# For convenience, store other field-related data here too.
if callable(field):
class_name = field.__name__ if field.__name__ != '<lambda>' else ''
else:
class_name = field
if form._meta.labels and class_name in form._meta.labels:
label = form._meta.labels[class_name]
else:
label = label_for_field(field, form._meta.model, model_admin)
if form._meta.help_texts and class_name in form._meta.help_texts:
help_text = form._meta.help_texts[class_name]
else:
help_text = help_text_for_field(class_name, form._meta.model)
self.field = {
'name': class_name,
'label': label,
'help_text': help_text,
'field': field,
}
self.form = form
self.model_admin = model_admin
self.is_first = is_first
self.is_checkbox = False
self.is_readonly = True
self.empty_value_display = model_admin.get_empty_value_display()
def label_tag(self):
attrs = {}
if not self.is_first:
attrs["class"] = "inline"
label = self.field['label']
return format_html('<label{}>{}:</label>',
flatatt(attrs),
capfirst(force_text(label)))
def contents(self):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin
try:
f, attr, value = lookup_field(field, obj, model_admin)
except (AttributeError, ValueError, ObjectDoesNotExist):
result_repr = self.empty_value_display
else:
if f is None:
boolean = getattr(attr, "boolean", False)
if boolean:
result_repr = _boolean_icon(value)
else:
if hasattr(value, "__html__"):
result_repr = value
else:
result_repr = smart_text(value)
if getattr(attr, "allow_tags", False):
warnings.warn(
"Deprecated allow_tags attribute used on %s. "
"Use django.utils.safestring.format_html(), "
"format_html_join(), or mark_safe() instead." % attr,
RemovedInDjango20Warning
)
result_repr = mark_safe(value)
else:
result_repr = linebreaksbr(result_repr)
else:
if isinstance(f.remote_field, ManyToManyRel) and value is not None:
result_repr = ", ".join(map(six.text_type, value.all()))
else:
result_repr = display_for_field(value, f, self.empty_value_display)
return conditional_escape(result_repr)
class InlineAdminFormSet(object):
"""
A wrapper around an inline formset for use in the admin system.
"""
def __init__(self, inline, formset, fieldsets, prepopulated_fields=None,
readonly_fields=None, model_admin=None):
self.opts = inline
self.formset = formset
self.fieldsets = fieldsets
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
if prepopulated_fields is None:
prepopulated_fields = {}
self.prepopulated_fields = prepopulated_fields
def __iter__(self):
for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()):
view_on_site_url = self.opts.get_view_on_site_url(original)
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, original, self.readonly_fields,
model_admin=self.opts, view_on_site_url=view_on_site_url)
for form in self.formset.extra_forms:
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, None, self.readonly_fields,
model_admin=self.opts)
yield InlineAdminForm(self.formset, self.formset.empty_form,
self.fieldsets, self.prepopulated_fields, None,
self.readonly_fields, model_admin=self.opts)
def fields(self):
fk = getattr(self.formset, "fk", None)
for i, field_name in enumerate(flatten_fieldsets(self.fieldsets)):
if fk and fk.name == field_name:
continue
if field_name in self.readonly_fields:
yield {
'label': label_for_field(field_name, self.opts.model, self.opts),
'widget': {
'is_hidden': False
},
'required': False,
'help_text': help_text_for_field(field_name, self.opts.model),
}
else:
yield self.formset.form.base_fields[field_name]
def _media(self):
media = self.opts.media + self.formset.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class InlineAdminForm(AdminForm):
"""
A wrapper around an inline form for use in the admin system.
"""
def __init__(self, formset, form, fieldsets, prepopulated_fields, original,
readonly_fields=None, model_admin=None, view_on_site_url=None):
self.formset = formset
self.model_admin = model_admin
self.original = original
self.show_url = original and view_on_site_url is not None
self.absolute_url = view_on_site_url
super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields,
readonly_fields, model_admin)
@cached_property
def original_content_type_id(self):
warnings.warn(
'InlineAdminForm.original_content_type_id is deprecated and will be '
'removed in Django 1.10. If you were using this attribute to construct '
'the "view on site" URL, use the `absolute_url` attribute instead.',
RemovedInDjango110Warning, stacklevel=2
)
if self.original is not None:
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(self.original).pk
raise AttributeError
def __iter__(self):
for name, options in self.fieldsets:
yield InlineFieldset(self.formset, self.form, name,
self.readonly_fields, model_admin=self.model_admin, **options)
def needs_explicit_pk_field(self):
# Auto fields are editable (oddly), so need to check for auto or non-editable pk
if self.form._meta.model._meta.has_auto_field or not self.form._meta.model._meta.pk.editable:
return True
# Also search any parents for an auto field. (The pk info is propagated to child
# models so that does not need to be checked in parents.)
for parent in self.form._meta.model._meta.get_parent_list():
if parent._meta.has_auto_field:
return True
return False
def pk_field(self):
return AdminField(self.form, self.formset._pk_field.name, False)
def fk_field(self):
fk = getattr(self.formset, "fk", None)
if fk:
return AdminField(self.form, fk.name, False)
else:
return ""
def deletion_field(self):
from django.forms.formsets import DELETION_FIELD_NAME
return AdminField(self.form, DELETION_FIELD_NAME, False)
def ordering_field(self):
from django.forms.formsets import ORDERING_FIELD_NAME
return AdminField(self.form, ORDERING_FIELD_NAME, False)
class InlineFieldset(Fieldset):
def __init__(self, formset, *args, **kwargs):
self.formset = formset
super(InlineFieldset, self).__init__(*args, **kwargs)
def __iter__(self):
fk = getattr(self.formset, "fk", None)
for field in self.fields:
if fk and fk.name == field:
continue
yield Fieldline(self.form, field, self.readonly_fields,
model_admin=self.model_admin)
class AdminErrorList(forms.utils.ErrorList):
"""
Stores all errors for the form/formsets in an add/change stage view.
"""
def __init__(self, form, inline_formsets):
super(AdminErrorList, self).__init__()
if form.is_bound:
self.extend(form.errors.values())
for inline_formset in inline_formsets:
self.extend(inline_formset.non_form_errors())
for errors_in_inline_form in inline_formset.errors:
self.extend(errors_in_inline_form.values())
| bsd-3-clause |
adit-chandra/tensorflow | tensorflow/tools/docs/parser.py | 3 | 58781 | # Lint as: python2, python3
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Turn Python docstrings into Markdown for TensorFlow documentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import functools
import itertools
import json
import os
import re
import astor
import six
from six.moves import zip
from google.protobuf.message import Message as ProtoMessage
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_inspect
from tensorflow.tools.docs import doc_controls
def is_free_function(py_object, full_name, index):
"""Check if input is a free function (and not a class- or static method).
Args:
py_object: The the object in question.
full_name: The full name of the object, like `tf.module.symbol`.
index: The {full_name:py_object} dictionary for the public API.
Returns:
True if the obeject is a stand-alone function, and not part of a class
definition.
"""
if not tf_inspect.isfunction(py_object):
return False
parent_name = six.ensure_str(full_name).rsplit('.', 1)[0]
if tf_inspect.isclass(index[parent_name]):
return False
return True
# A regular expression capturing a python identifier.
IDENTIFIER_RE = r'[a-zA-Z_]\w*'
class TFDocsError(Exception):
pass
class _Errors(object):
"""A collection of errors."""
def __init__(self):
self._errors = []
def log_all(self):
"""Log all the collected errors to the standard error."""
template = 'ERROR:\n output file name: %s\n %s\n\n'
for full_name, message in self._errors:
logging.warn(template, full_name, message)
def append(self, full_name, message):
"""Add an error to the collection.
Args:
full_name: The path to the file in which the error occurred.
message: The message to display with the error.
"""
self._errors.append((full_name, message))
def __len__(self):
return len(self._errors)
def __eq__(self, other):
if not isinstance(other, _Errors):
return False
return self._errors == other._errors # pylint: disable=protected-access
def documentation_path(full_name, is_fragment=False):
"""Returns the file path for the documentation for the given API symbol.
Given the fully qualified name of a library symbol, compute the path to which
to write the documentation for that symbol (relative to a base directory).
Documentation files are organized into directories that mirror the python
module/class structure.
Args:
full_name: Fully qualified name of a library symbol.
is_fragment: If `False` produce a direct markdown link (`tf.a.b.c` -->
`tf/a/b/c.md`). If `True` produce fragment link, `tf.a.b.c` -->
`tf/a/b.md#c`
Returns:
The file path to which to write the documentation for `full_name`.
"""
parts = six.ensure_str(full_name).split('.')
if is_fragment:
parts, fragment = parts[:-1], parts[-1]
result = six.ensure_str(os.path.join(*parts)) + '.md'
if is_fragment:
result = six.ensure_str(result) + '#' + six.ensure_str(fragment)
return result
def _get_raw_docstring(py_object):
"""Get the docs for a given python object.
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
Returns:
The docstring, or the empty string if no docstring was found.
"""
# For object instances, tf_inspect.getdoc does give us the docstring of their
# type, which is not what we want. Only return the docstring if it is useful.
if (tf_inspect.isclass(py_object) or tf_inspect.ismethod(py_object) or
tf_inspect.isfunction(py_object) or tf_inspect.ismodule(py_object) or
isinstance(py_object, property)):
return tf_inspect.getdoc(py_object) or ''
else:
return ''
# A regular expression for capturing a @{symbol} reference.
SYMBOL_REFERENCE_RE = re.compile(
r"""
# Start with a literal "@{".
@\{
# Group at least 1 symbol, not "}".
([^}]+)
# Followed by a closing "}"
\}
""",
flags=re.VERBOSE)
AUTO_REFERENCE_RE = re.compile(r'`([a-zA-Z0-9_.]+?)`')
class ReferenceResolver(object):
"""Class for replacing @{...} references with Markdown links.
Attributes:
current_doc_full_name: A string (or None) indicating the name of the
document currently being processed, so errors can reference the broken
doc.
"""
def __init__(self, duplicate_of, doc_index, is_fragment, py_module_names):
"""Initializes a Reference Resolver.
Args:
duplicate_of: A map from duplicate names to preferred names of API
symbols.
doc_index: A `dict` mapping symbol name strings to objects with `url`
and `title` fields. Used to resolve @{$doc} references in docstrings.
is_fragment: A map from full names to bool for each symbol. If True the
object lives at a page fragment `tf.a.b.c` --> `tf/a/b#c`. If False
object has a page to itself: `tf.a.b.c` --> `tf/a/b/c`.
py_module_names: A list of string names of Python modules.
"""
self._duplicate_of = duplicate_of
self._doc_index = doc_index
self._is_fragment = is_fragment
self._all_names = set(is_fragment.keys())
self._py_module_names = py_module_names
self.current_doc_full_name = None
self._errors = _Errors()
def add_error(self, message):
self._errors.append(self.current_doc_full_name, message)
def log_errors(self):
self._errors.log_all()
def num_errors(self):
return len(self._errors)
@classmethod
def from_visitor(cls, visitor, doc_index, **kwargs):
"""A factory function for building a ReferenceResolver from a visitor.
Args:
visitor: an instance of `DocGeneratorVisitor`
doc_index: a dictionary mapping document names to references objects with
"title" and "url" fields
**kwargs: all remaining args are passed to the constructor
Returns:
an instance of `ReferenceResolver` ()
"""
is_fragment = {}
for name, obj in visitor.index.items():
has_page = (
tf_inspect.isclass(obj) or tf_inspect.ismodule(obj) or
is_free_function(obj, name, visitor.index))
is_fragment[name] = not has_page
return cls(
duplicate_of=visitor.duplicate_of,
doc_index=doc_index,
is_fragment=is_fragment,
**kwargs)
@classmethod
def from_json_file(cls, filepath, doc_index):
with open(filepath) as f:
json_dict = json.load(f)
return cls(doc_index=doc_index, **json_dict)
def to_json_file(self, filepath):
"""Converts the RefenceResolver to json and writes it to the specified file.
Args:
filepath: The file path to write the json to.
"""
try:
os.makedirs(os.path.dirname(filepath))
except OSError:
pass
json_dict = {}
for key, value in self.__dict__.items():
# Drop these two fields. `_doc_index` is not serializable. `_all_names` is
# generated by the constructor.
if key in ('_doc_index', '_all_names',
'_errors', 'current_doc_full_name'):
continue
# Strip off any leading underscores on field names as these are not
# recognized by the constructor.
json_dict[key.lstrip('_')] = value
with open(filepath, 'w') as f:
json.dump(json_dict, f, indent=2, sort_keys=True)
def replace_references(self, string, relative_path_to_root):
"""Replace "@{symbol}" references with links to symbol's documentation page.
This functions finds all occurrences of "@{symbol}" in `string`
and replaces them with markdown links to the documentation page
for "symbol".
`relative_path_to_root` is the relative path from the document
that contains the "@{symbol}" reference to the root of the API
documentation that is linked to. If the containing page is part of
the same API docset, `relative_path_to_root` can be set to
`os.path.dirname(documentation_path(name))`, where `name` is the
python name of the object whose documentation page the reference
lives on.
Args:
string: A string in which "@{symbol}" references should be replaced.
relative_path_to_root: The relative path from the containing document to
the root of the API documentation that is being linked to.
Returns:
`string`, with "@{symbol}" references replaced by Markdown links.
"""
def strict_one_ref(match):
try:
return self._one_ref(match, relative_path_to_root)
except TFDocsError as e:
self.add_error(e.message)
return 'BAD_LINK'
string = re.sub(SYMBOL_REFERENCE_RE, strict_one_ref, six.ensure_str(string))
def sloppy_one_ref(match):
try:
return self._one_ref(match, relative_path_to_root)
except TFDocsError:
return match.group(0)
string = re.sub(AUTO_REFERENCE_RE, sloppy_one_ref, string)
return string
def python_link(self, link_text, ref_full_name, relative_path_to_root,
code_ref=True):
"""Resolve a "@{python symbol}" reference to a Markdown link.
This will pick the canonical location for duplicate symbols. The
input to this function should already be stripped of the '@' and
'{}'. This function returns a Markdown link. If `code_ref` is
true, it is assumed that this is a code reference, so the link
text will be rendered as code (using backticks).
`link_text` should refer to a library symbol, starting with 'tf.'.
Args:
link_text: The text of the Markdown link.
ref_full_name: The fully qualified name of the symbol to link to.
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation.
code_ref: If true (the default), put `link_text` in `...`.
Returns:
A markdown link to the documentation page of `ref_full_name`.
"""
url = self.reference_to_url(ref_full_name, relative_path_to_root)
if code_ref:
link_text = link_text.join(['<code>', '</code>'])
else:
link_text = self._link_text_to_html(link_text)
return '<a href="{}">{}</a>'.format(url, link_text)
@staticmethod
def _link_text_to_html(link_text):
code_re = '`(.*?)`'
return re.sub(code_re, r'<code>\1</code>', six.ensure_str(link_text))
def py_master_name(self, full_name):
"""Return the master name for a Python symbol name."""
return self._duplicate_of.get(full_name, full_name)
def reference_to_url(self, ref_full_name, relative_path_to_root):
"""Resolve a "@{python symbol}" reference to a relative path.
The input to this function should already be stripped of the '@'
and '{}', and its output is only the link, not the full Markdown.
If `ref_full_name` is the name of a class member, method, or property, the
link will point to the page of the containing class, and it will include the
method name as an anchor. For example, `tf.module.MyClass.my_method` will be
translated into a link to
`os.join.path(relative_path_to_root, 'tf/module/MyClass.md#my_method')`.
Args:
ref_full_name: The fully qualified name of the symbol to link to.
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation.
Returns:
A relative path that links from the documentation page of `from_full_name`
to the documentation page of `ref_full_name`.
Raises:
RuntimeError: If `ref_full_name` is not documented.
TFDocsError: If the @{} syntax cannot be decoded.
"""
master_name = self._duplicate_of.get(ref_full_name, ref_full_name)
# Check whether this link exists
if master_name not in self._all_names:
raise TFDocsError(
'Cannot make link to "%s": Not in index.' % master_name)
ref_path = documentation_path(master_name, self._is_fragment[master_name])
return os.path.join(relative_path_to_root, ref_path)
def _one_ref(self, match, relative_path_to_root):
"""Return a link for a single "@{symbol}" reference."""
string = match.group(1)
# Look for link text after $.
dollar = string.rfind('$')
if dollar > 0: # Ignore $ in first character
link_text = string[dollar + 1:]
string = string[:dollar]
manual_link_text = True
else:
link_text = string
manual_link_text = False
# Handle different types of references.
if six.ensure_str(string).startswith('$'): # Doc reference
return self._doc_link(string, link_text, manual_link_text,
relative_path_to_root)
elif six.ensure_str(string).startswith('tensorflow::'):
# C++ symbol
return self._cc_link(string, link_text, manual_link_text,
relative_path_to_root)
else:
is_python = False
for py_module_name in self._py_module_names:
if string == py_module_name or string.startswith(
six.ensure_str(py_module_name) + '.'):
is_python = True
break
if is_python: # Python symbol
return self.python_link(
link_text,
string,
relative_path_to_root,
code_ref=not manual_link_text)
# Error!
raise TFDocsError('Did not understand "%s"' % match.group(0),
'BROKEN_LINK')
def _doc_link(self, string, link_text, manual_link_text,
relative_path_to_root):
"""Generate a link for a @{$...} reference."""
string = string[1:] # remove leading $
# If string has a #, split that part into `hash_tag`
hash_pos = six.ensure_str(string).find('#')
if hash_pos > -1:
hash_tag = string[hash_pos:]
string = string[:hash_pos]
else:
hash_tag = ''
if string in self._doc_index:
if not manual_link_text: link_text = self._doc_index[string].title
url = os.path.normpath(os.path.join(
relative_path_to_root, '../..', self._doc_index[string].url))
link_text = self._link_text_to_html(link_text)
return '<a href="{}{}">{}</a>'.format(url, hash_tag, link_text)
return self._doc_missing(string, hash_tag, link_text, manual_link_text,
relative_path_to_root)
def _doc_missing(self, string, unused_hash_tag, unused_link_text,
unused_manual_link_text, unused_relative_path_to_root):
"""Generate an error for unrecognized @{$...} references."""
raise TFDocsError('Unknown Document "%s"' % string)
def _cc_link(self, string, link_text, unused_manual_link_text,
relative_path_to_root):
"""Generate a link for a @{tensorflow::...} reference."""
# TODO(josh11b): Fix this hard-coding of paths.
if string == 'tensorflow::ClientSession':
ret = 'class/tensorflow/client-session.md'
elif string == 'tensorflow::Scope':
ret = 'class/tensorflow/scope.md'
elif string == 'tensorflow::Status':
ret = 'class/tensorflow/status.md'
elif string == 'tensorflow::Tensor':
ret = 'class/tensorflow/tensor.md'
elif string == 'tensorflow::ops::Const':
ret = 'namespace/tensorflow/ops.md#const'
else:
raise TFDocsError('C++ reference not understood: "%s"' % string)
# relative_path_to_root gets you to api_docs/python, we go from there
# to api_docs/cc, and then add ret.
cc_relative_path = os.path.normpath(os.path.join(
relative_path_to_root, '../cc', ret))
return '<a href="{}"><code>{}</code></a>'.format(cc_relative_path,
link_text)
# TODO(aselle): Collect these into a big list for all modules and functions
# and make a rosetta stone page.
def _handle_compatibility(doc):
"""Parse and remove compatibility blocks from the main docstring.
Args:
doc: The docstring that contains compatibility notes"
Returns:
a tuple of the modified doc string and a hash that maps from compatibility
note type to the text of the note.
"""
compatibility_notes = {}
match_compatibility = re.compile(r'[ \t]*@compatibility\((\w+)\)\s*\n'
r'((?:[^@\n]*\n)+)'
r'\s*@end_compatibility')
for f in match_compatibility.finditer(doc):
compatibility_notes[f.group(1)] = f.group(2)
return match_compatibility.subn(r'', doc)[0], compatibility_notes
def _gen_pairs(items):
"""Given an list of items [a,b,a,b...], generate pairs [(a,b),(a,b)...].
Args:
items: A list of items (length must be even)
Yields:
The original items, in pairs
"""
assert len(items) % 2 == 0
items = iter(items)
while True:
try:
yield next(items), next(items)
except StopIteration:
return
class _FunctionDetail(
collections.namedtuple('_FunctionDetail', ['keyword', 'header', 'items'])):
"""A simple class to contain function details.
Composed of a "keyword", a possibly empty "header" string, and a possibly
empty
list of key-value pair "items".
"""
__slots__ = []
def __str__(self):
"""Return the original string that represents the function detail."""
parts = [six.ensure_str(self.keyword) + ':\n']
parts.append(self.header)
for key, value in self.items:
parts.append(' ' + six.ensure_str(key) + ': ')
parts.append(value)
return ''.join(parts)
def _parse_function_details(docstring):
r"""Given a docstring, split off the header and parse the function details.
For example the docstring of tf.nn.relu:
'''Computes rectified linear: `max(features, 0)`.
Args:
features: A `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`,
`half`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
'''
This is parsed, and returned as:
```
('Computes rectified linear: `max(features, 0)`.\n\n', [
_FunctionDetail(
keyword='Args',
header='',
items=[
('features', ' A `Tensor`. Must be ...'),
('name', ' A name for the operation (optional).\n\n')]),
_FunctionDetail(
keyword='Returns',
header=' A `Tensor`. Has the same type as `features`.',
items=[])
])
```
Args:
docstring: The docstring to parse
Returns:
A (header, function_details) pair, where header is a string and
function_details is a (possibly empty) list of `_FunctionDetail` objects.
"""
detail_keywords = '|'.join([
'Args', 'Arguments', 'Fields', 'Returns', 'Yields', 'Raises', 'Attributes'
])
tag_re = re.compile('(?<=\n)(' + detail_keywords + '):\n', re.MULTILINE)
parts = tag_re.split(docstring)
# The first part is the main docstring
docstring = parts[0]
# Everything else alternates keyword-content
pairs = list(_gen_pairs(parts[1:]))
function_details = []
item_re = re.compile(r'^ ? ?(\*?\*?\w[\w.]*?\s*):\s', re.MULTILINE)
for keyword, content in pairs:
content = item_re.split(six.ensure_str(content))
header = content[0]
items = list(_gen_pairs(content[1:]))
function_details.append(_FunctionDetail(keyword, header, items))
return docstring, function_details
_DocstringInfo = collections.namedtuple('_DocstringInfo', [
'brief', 'docstring', 'function_details', 'compatibility'
])
def _parse_md_docstring(py_object, relative_path_to_root, reference_resolver):
"""Parse the object's docstring and return a `_DocstringInfo`.
This function clears @@'s from the docstring, and replaces @{} references
with markdown links.
For links within the same set of docs, the `relative_path_to_root` for a
docstring on the page for `full_name` can be set to:
```python
relative_path_to_root = os.path.relpath(
path='.', start=os.path.dirname(documentation_path(full_name)) or '.')
```
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
relative_path_to_root: The relative path from the location of the current
document to the root of the Python API documentation. This is used to
compute links for "@{symbol}" references.
reference_resolver: An instance of ReferenceResolver.
Returns:
A _DocstringInfo object, all fields will be empty if no docstring was found.
"""
# TODO(wicke): If this is a partial, use the .func docstring and add a note.
raw_docstring = _get_raw_docstring(py_object)
raw_docstring = reference_resolver.replace_references(
raw_docstring, relative_path_to_root)
atat_re = re.compile(r' *@@[a-zA-Z_.0-9]+ *$')
raw_docstring = '\n'.join(
line for line in six.ensure_str(raw_docstring).split('\n')
if not atat_re.match(six.ensure_str(line)))
docstring, compatibility = _handle_compatibility(raw_docstring)
docstring, function_details = _parse_function_details(docstring)
if 'Generated by: tensorflow/tools/api/generator' in docstring:
docstring = ''
return _DocstringInfo(
docstring.split('\n')[0], docstring, function_details, compatibility)
def _get_arg_spec(func):
"""Extracts signature information from a function or functools.partial object.
For functions, uses `tf_inspect.getfullargspec`. For `functools.partial`
objects, corrects the signature of the underlying function to take into
account the removed arguments.
Args:
func: A function whose signature to extract.
Returns:
An `FullArgSpec` namedtuple `(args, varargs, varkw, defaults, etc.)`,
as returned by `tf_inspect.getfullargspec`.
"""
# getfullargspec does not work for functools.partial objects directly.
if isinstance(func, functools.partial):
argspec = tf_inspect.getfullargspec(func.func)
# Remove the args from the original function that have been used up.
first_default_arg = (
len(argspec.args or []) - len(argspec.defaults or []))
partial_args = len(func.args)
argspec_args = []
if argspec.args:
argspec_args = list(argspec.args[partial_args:])
argspec_defaults = list(argspec.defaults or ())
if argspec.defaults and partial_args > first_default_arg:
argspec_defaults = list(argspec.defaults[partial_args-first_default_arg:])
first_default_arg = max(0, first_default_arg - partial_args)
for kwarg in (func.keywords or []):
if kwarg in (argspec.args or []):
i = argspec_args.index(kwarg)
argspec_args.pop(i)
if i >= first_default_arg:
argspec_defaults.pop(i-first_default_arg)
else:
first_default_arg -= 1
return tf_inspect.FullArgSpec(
args=argspec_args,
varargs=argspec.varargs,
varkw=argspec.varkw,
defaults=tuple(argspec_defaults),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
else: # Regular function or method, getargspec will work fine.
return tf_inspect.getfullargspec(func)
def _remove_first_line_indent(string):
indent = len(re.match(r'^\s*', six.ensure_str(string)).group(0))
return '\n'.join(
[line[indent:] for line in six.ensure_str(string).split('\n')])
PAREN_NUMBER_RE = re.compile(r'^\(([0-9.e-]+)\)')
def _generate_signature(func, reverse_index):
"""Given a function, returns a list of strings representing its args.
This function produces a list of strings representing the arguments to a
python function. It uses tf_inspect.getfullargspec, which
does not generalize well to Python 3.x, which is more flexible in how *args
and **kwargs are handled. This is not a problem in TF, since we have to remain
compatible to Python 2.7 anyway.
This function uses `__name__` for callables if it is available. This can lead
to poor results for functools.partial and other callable objects.
The returned string is Python code, so if it is included in a Markdown
document, it should be typeset as code (using backticks), or escaped.
Args:
func: A function, method, or functools.partial to extract the signature for.
reverse_index: A map from object ids to canonical full names to use.
Returns:
A list of strings representing the argument signature of `func` as python
code.
"""
args_list = []
argspec = _get_arg_spec(func)
first_arg_with_default = (
len(argspec.args or []) - len(argspec.defaults or []))
# Python documentation skips `self` when printing method signatures.
# Note we cannot test for ismethod here since unbound methods do not register
# as methods (in Python 3).
first_arg = 1 if 'self' in argspec.args[:1] else 0
# Add all args without defaults.
for arg in argspec.args[first_arg:first_arg_with_default]:
args_list.append(arg)
# Add all args with defaults.
if argspec.defaults:
try:
source = _remove_first_line_indent(tf_inspect.getsource(func))
func_ast = ast.parse(source)
ast_defaults = func_ast.body[0].args.defaults
except IOError: # If this is a builtin, getsource fails with IOError
# If we cannot get the source, assume the AST would be equal to the repr
# of the defaults.
ast_defaults = [None] * len(argspec.defaults)
for arg, default, ast_default in zip(
argspec.args[first_arg_with_default:], argspec.defaults, ast_defaults):
if id(default) in reverse_index:
default_text = reverse_index[id(default)]
elif ast_default is not None:
default_text = (
six.ensure_str(astor.to_source(ast_default)).rstrip('\n').replace(
'\t', '\\t').replace('\n', '\\n').replace('"""', "'"))
default_text = PAREN_NUMBER_RE.sub('\\1', six.ensure_str(default_text))
if default_text != repr(default):
# This may be an internal name. If so, handle the ones we know about.
# TODO(wicke): This should be replaced with a lookup in the index.
# TODO(wicke): (replace first ident with tf., check if in index)
internal_names = {
'ops.GraphKeys': 'tf.GraphKeys',
'_ops.GraphKeys': 'tf.GraphKeys',
'init_ops.zeros_initializer': 'tf.zeros_initializer',
'init_ops.ones_initializer': 'tf.ones_initializer',
'saver_pb2.SaverDef': 'tf.train.SaverDef',
}
full_name_re = '^%s(.%s)+' % (IDENTIFIER_RE, IDENTIFIER_RE)
match = re.match(full_name_re, default_text)
if match:
lookup_text = default_text
for internal_name, public_name in six.iteritems(internal_names):
if match.group(0).startswith(internal_name):
lookup_text = public_name + default_text[len(internal_name):]
break
if default_text is lookup_text:
logging.warn(
'WARNING: Using default arg, failed lookup: %s, repr: %r',
default_text, default)
else:
default_text = lookup_text
else:
default_text = repr(default)
args_list.append('%s=%s' % (arg, default_text))
# Add *args and *kwargs.
if argspec.varargs:
args_list.append('*' + six.ensure_str(argspec.varargs))
if argspec.varkw:
args_list.append('**' + six.ensure_str(argspec.varkw))
return args_list
def _get_guides_markdown(duplicate_names, guide_index, relative_path):
all_guides = []
for name in duplicate_names:
all_guides.extend(guide_index.get(name, []))
if not all_guides: return ''
prefix = '../' * (relative_path.count('/') + 3)
links = sorted(set([guide_ref.make_md_link(prefix)
for guide_ref in all_guides]))
return 'See the guide%s: %s\n\n' % (
's' if len(links) > 1 else '', ', '.join(links))
def _get_defining_class(py_class, name):
for cls in tf_inspect.getmro(py_class):
if name in cls.__dict__:
return cls
return None
class _LinkInfo(
collections.namedtuple(
'_LinkInfo', ['short_name', 'full_name', 'obj', 'doc', 'url'])):
__slots__ = []
def is_link(self):
return True
class _OtherMemberInfo(
collections.namedtuple('_OtherMemberInfo',
['short_name', 'full_name', 'obj', 'doc'])):
__slots__ = []
def is_link(self):
return False
_PropertyInfo = collections.namedtuple(
'_PropertyInfo', ['short_name', 'full_name', 'obj', 'doc'])
_MethodInfo = collections.namedtuple('_MethodInfo', [
'short_name', 'full_name', 'obj', 'doc', 'signature', 'decorators'
])
class _FunctionPageInfo(object):
"""Collects docs For a function Page."""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._signature = None
self._decorators = []
def for_function(self):
return True
def for_class(self):
return False
def for_module(self):
return False
@property
def full_name(self):
return self._full_name
@property
def short_name(self):
return six.ensure_str(self._full_name).split('.')[-1]
@property
def defined_in(self):
return self._defined_in
def set_defined_in(self, defined_in):
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
return self._aliases
def set_aliases(self, aliases):
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
return self._doc
def set_doc(self, doc):
assert self.doc is None
self._doc = doc
@property
def guides(self):
return self._guides
def set_guides(self, guides):
assert self.guides is None
self._guides = guides
@property
def signature(self):
return self._signature
def set_signature(self, function, reverse_index):
"""Attach the function's signature.
Args:
function: The python function being documented.
reverse_index: A map from object ids in the index to full names.
"""
assert self.signature is None
self._signature = _generate_signature(function, reverse_index)
@property
def decorators(self):
return list(self._decorators)
def add_decorator(self, dec):
self._decorators.append(dec)
def get_metadata_html(self):
return _Metadata(self.full_name).build_html()
class _ClassPageInfo(object):
"""Collects docs for a class page.
Attributes:
full_name: The fully qualified name of the object at the master
location. Aka `master_name`. For example: `tf.nn.sigmoid`.
short_name: The last component of the `full_name`. For example: `sigmoid`.
defined_in: The path to the file where this object is defined.
aliases: The list of all fully qualified names for the locations where the
object is visible in the public api. This includes the master location.
doc: A `_DocstringInfo` object representing the object's docstring (can be
created with `_parse_md_docstring`).
guides: A markdown string, of back links pointing to the api_guides that
reference this object.
bases: A list of `_LinkInfo` objects pointing to the docs for the parent
classes.
properties: A list of `_PropertyInfo` objects documenting the class'
properties (attributes that use `@property`).
methods: A list of `_MethodInfo` objects documenting the class' methods.
classes: A list of `_LinkInfo` objects pointing to docs for any nested
classes.
other_members: A list of `_OtherMemberInfo` objects documenting any other
object's defined inside the class object (mostly enum style fields).
"""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._namedtuplefields = None
self._bases = None
self._properties = []
self._methods = []
self._classes = []
self._other_members = []
def for_function(self):
"""Returns true if this object documents a function."""
return False
def for_class(self):
"""Returns true if this object documents a class."""
return True
def for_module(self):
"""Returns true if this object documents a module."""
return False
@property
def full_name(self):
"""Returns the documented object's fully qualified name."""
return self._full_name
@property
def short_name(self):
"""Returns the documented object's short name."""
return six.ensure_str(self._full_name).split('.')[-1]
@property
def defined_in(self):
"""Returns the path to the file where the documented object is defined."""
return self._defined_in
def set_defined_in(self, defined_in):
"""Sets the `defined_in` path."""
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
"""Returns a list of all full names for the documented object."""
return self._aliases
def set_aliases(self, aliases):
"""Sets the `aliases` list.
Args:
aliases: A list of strings. Containing all the object's full names.
"""
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
"""Returns a `_DocstringInfo` created from the object's docstring."""
return self._doc
def set_doc(self, doc):
"""Sets the `doc` field.
Args:
doc: An instance of `_DocstringInfo`.
"""
assert self.doc is None
self._doc = doc
@property
def guides(self):
"""Returns a markdown string containing backlinks to relevant api_guides."""
return self._guides
def set_guides(self, guides):
"""Sets the `guides` field.
Args:
guides: A markdown string containing backlinks to all the api_guides that
link to the documented object.
"""
assert self.guides is None
self._guides = guides
@property
def namedtuplefields(self):
return self._namedtuplefields
def set_namedtuplefields(self, py_class):
if issubclass(py_class, tuple):
if all(
hasattr(py_class, attr)
for attr in ('_asdict', '_fields', '_make', '_replace')):
self._namedtuplefields = py_class._fields
@property
def bases(self):
"""Returns a list of `_LinkInfo` objects pointing to the class' parents."""
return self._bases
def _set_bases(self, relative_path, parser_config):
"""Builds the `bases` attribute, to document this class' parent-classes.
This method sets the `bases` to a list of `_LinkInfo` objects point to the
doc pages for the class' parents.
Args:
relative_path: The relative path from the doc this object describes to
the documentation root.
parser_config: An instance of `ParserConfig`.
"""
bases = []
obj = parser_config.py_name_to_object(self.full_name)
for base in obj.__bases__:
base_full_name = parser_config.reverse_index.get(id(base), None)
if base_full_name is None:
continue
base_doc = _parse_md_docstring(base, relative_path,
parser_config.reference_resolver)
base_url = parser_config.reference_resolver.reference_to_url(
base_full_name, relative_path)
link_info = _LinkInfo(
short_name=six.ensure_str(base_full_name).split('.')[-1],
full_name=base_full_name,
obj=base,
doc=base_doc,
url=base_url)
bases.append(link_info)
self._bases = bases
@property
def properties(self):
"""Returns a list of `_PropertyInfo` describing the class' properties."""
props_dict = {prop.short_name: prop for prop in self._properties}
props = []
if self.namedtuplefields:
for field in self.namedtuplefields:
props.append(props_dict.pop(field))
props.extend(sorted(props_dict.values()))
return props
def _add_property(self, short_name, full_name, obj, doc):
"""Adds a `_PropertyInfo` entry to the `properties` list.
Args:
short_name: The property's short name.
full_name: The property's fully qualified name.
obj: The property object itself
doc: The property's parsed docstring, a `_DocstringInfo`.
"""
# Hide useless namedtuple docs-trings
if re.match('Alias for field number [0-9]+', six.ensure_str(doc.docstring)):
doc = doc._replace(docstring='', brief='')
property_info = _PropertyInfo(short_name, full_name, obj, doc)
self._properties.append(property_info)
@property
def methods(self):
"""Returns a list of `_MethodInfo` describing the class' methods."""
return self._methods
def _add_method(self, short_name, full_name, obj, doc, signature, decorators):
"""Adds a `_MethodInfo` entry to the `methods` list.
Args:
short_name: The method's short name.
full_name: The method's fully qualified name.
obj: The method object itself
doc: The method's parsed docstring, a `_DocstringInfo`
signature: The method's parsed signature (see: `_generate_signature`)
decorators: A list of strings describing the decorators that should be
mentioned on the object's docs page.
"""
method_info = _MethodInfo(short_name, full_name, obj, doc, signature,
decorators)
self._methods.append(method_info)
@property
def classes(self):
"""Returns a list of `_LinkInfo` pointing to any nested classes."""
return self._classes
def get_metadata_html(self):
meta_data = _Metadata(self.full_name)
for item in itertools.chain(self.classes, self.properties, self.methods,
self.other_members):
meta_data.append(item)
return meta_data.build_html()
def _add_class(self, short_name, full_name, obj, doc, url):
"""Adds a `_LinkInfo` for a nested class to `classes` list.
Args:
short_name: The class' short name.
full_name: The class' fully qualified name.
obj: The class object itself
doc: The class' parsed docstring, a `_DocstringInfo`
url: A url pointing to where the nested class is documented.
"""
page_info = _LinkInfo(short_name, full_name, obj, doc, url)
self._classes.append(page_info)
@property
def other_members(self):
"""Returns a list of `_OtherMemberInfo` describing any other contents."""
return self._other_members
def _add_other_member(self, short_name, full_name, obj, doc):
"""Adds an `_OtherMemberInfo` entry to the `other_members` list.
Args:
short_name: The class' short name.
full_name: The class' fully qualified name.
obj: The class object itself
doc: The class' parsed docstring, a `_DocstringInfo`
"""
other_member_info = _OtherMemberInfo(short_name, full_name, obj, doc)
self._other_members.append(other_member_info)
def collect_docs_for_class(self, py_class, parser_config):
"""Collects information necessary specifically for a class's doc page.
Mainly, this is details about the class's members.
Args:
py_class: The class object being documented
parser_config: An instance of ParserConfig.
"""
self.set_namedtuplefields(py_class)
doc_path = documentation_path(self.full_name)
relative_path = os.path.relpath(
path='.', start=os.path.dirname(doc_path) or '.')
self._set_bases(relative_path, parser_config)
for short_name in parser_config.tree[self.full_name]:
# Remove builtin members that we never want to document.
if short_name in [
'__class__', '__base__', '__weakref__', '__doc__', '__module__',
'__dict__', '__abstractmethods__', '__slots__', '__getnewargs__',
'__str__', '__repr__', '__hash__', '__reduce__'
]:
continue
child_name = '.'.join([self.full_name, short_name])
child = parser_config.py_name_to_object(child_name)
# Don't document anything that is defined in object or by protobuf.
defining_class = _get_defining_class(py_class, short_name)
if defining_class in [object, type, tuple, BaseException, Exception]:
continue
# The following condition excludes most protobuf-defined symbols.
if (defining_class and
defining_class.__name__ in ['CMessage', 'Message', 'MessageMeta']):
continue
# TODO(markdaoust): Add a note in child docs showing the defining class.
if doc_controls.should_skip_class_attr(py_class, short_name):
continue
child_doc = _parse_md_docstring(child, relative_path,
parser_config.reference_resolver)
if isinstance(child, property):
self._add_property(short_name, child_name, child, child_doc)
elif tf_inspect.isclass(child):
if defining_class is None:
continue
url = parser_config.reference_resolver.reference_to_url(
child_name, relative_path)
self._add_class(short_name, child_name, child, child_doc, url)
elif (tf_inspect.ismethod(child) or tf_inspect.isfunction(child) or
tf_inspect.isroutine(child)):
if defining_class is None:
continue
# Omit methods defined by namedtuple.
original_method = defining_class.__dict__[short_name]
if (hasattr(original_method, '__module__') and six.ensure_str(
(original_method.__module__ or '')).startswith('namedtuple')):
continue
# Some methods are often overridden without documentation. Because it's
# obvious what they do, don't include them in the docs if there's no
# docstring.
if not child_doc.brief.strip() and short_name in [
'__del__', '__copy__'
]:
continue
try:
child_signature = _generate_signature(child,
parser_config.reverse_index)
except TypeError:
# If this is a (dynamically created) slot wrapper, tf_inspect will
# raise typeerror when trying to get to the code. Ignore such
# functions.
continue
child_decorators = []
try:
if isinstance(py_class.__dict__[short_name], classmethod):
child_decorators.append('classmethod')
except KeyError:
pass
try:
if isinstance(py_class.__dict__[short_name], staticmethod):
child_decorators.append('staticmethod')
except KeyError:
pass
self._add_method(short_name, child_name, child, child_doc,
child_signature, child_decorators)
else:
# Exclude members defined by protobuf that are useless
if issubclass(py_class, ProtoMessage):
if (six.ensure_str(short_name).endswith('_FIELD_NUMBER') or
short_name in ['__slots__', 'DESCRIPTOR']):
continue
# TODO(wicke): We may want to also remember the object itself.
self._add_other_member(short_name, child_name, child, child_doc)
class _ModulePageInfo(object):
"""Collects docs for a module page."""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._modules = []
self._classes = []
self._functions = []
self._other_members = []
def for_function(self):
return False
def for_class(self):
return False
def for_module(self):
return True
@property
def full_name(self):
return self._full_name
@property
def short_name(self):
return six.ensure_str(self._full_name).split('.')[-1]
@property
def defined_in(self):
return self._defined_in
def set_defined_in(self, defined_in):
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
return self._aliases
def set_aliases(self, aliases):
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
return self._doc
def set_doc(self, doc):
assert self.doc is None
self._doc = doc
@property
def guides(self):
return self._guides
def set_guides(self, guides):
assert self.guides is None
self._guides = guides
@property
def modules(self):
return self._modules
def _add_module(self, short_name, full_name, obj, doc, url):
self._modules.append(_LinkInfo(short_name, full_name, obj, doc, url))
@property
def classes(self):
return self._classes
def _add_class(self, short_name, full_name, obj, doc, url):
self._classes.append(_LinkInfo(short_name, full_name, obj, doc, url))
@property
def functions(self):
return self._functions
def _add_function(self, short_name, full_name, obj, doc, url):
self._functions.append(_LinkInfo(short_name, full_name, obj, doc, url))
@property
def other_members(self):
return self._other_members
def _add_other_member(self, short_name, full_name, obj, doc):
self._other_members.append(
_OtherMemberInfo(short_name, full_name, obj, doc))
def get_metadata_html(self):
meta_data = _Metadata(self.full_name)
# Objects with their own pages are not added to the matadata list for the
# module, the module only has a link to the object page. No docs.
for item in self.other_members:
meta_data.append(item)
return meta_data.build_html()
def collect_docs_for_module(self, parser_config):
"""Collect information necessary specifically for a module's doc page.
Mainly this is information about the members of the module.
Args:
parser_config: An instance of ParserConfig.
"""
relative_path = os.path.relpath(
path='.',
start=os.path.dirname(documentation_path(self.full_name)) or '.')
member_names = parser_config.tree.get(self.full_name, [])
for name in member_names:
if name in ['__builtins__', '__doc__', '__file__',
'__name__', '__path__', '__package__',
'__cached__', '__loader__', '__spec__']:
continue
member_full_name = six.ensure_str(self.full_name) + '.' + six.ensure_str(
name) if self.full_name else name
member = parser_config.py_name_to_object(member_full_name)
member_doc = _parse_md_docstring(member, relative_path,
parser_config.reference_resolver)
url = parser_config.reference_resolver.reference_to_url(
member_full_name, relative_path)
if tf_inspect.ismodule(member):
self._add_module(name, member_full_name, member, member_doc, url)
elif tf_inspect.isclass(member):
self._add_class(name, member_full_name, member, member_doc, url)
elif tf_inspect.isfunction(member):
self._add_function(name, member_full_name, member, member_doc, url)
else:
self._add_other_member(name, member_full_name, member, member_doc)
class ParserConfig(object):
"""Stores all indexes required to parse the docs."""
def __init__(self, reference_resolver, duplicates, duplicate_of, tree, index,
reverse_index, guide_index, base_dir):
"""Object with the common config for docs_for_object() calls.
Args:
reference_resolver: An instance of ReferenceResolver.
duplicates: A `dict` mapping fully qualified names to a set of all
aliases of this name. This is used to automatically generate a list of
all aliases for each name.
duplicate_of: A map from duplicate names to preferred names of API
symbols.
tree: A `dict` mapping a fully qualified name to the names of all its
members. Used to populate the members section of a class or module page.
index: A `dict` mapping full names to objects.
reverse_index: A `dict` mapping object ids to full names.
guide_index: A `dict` mapping symbol name strings to objects with a
`make_md_link()` method.
base_dir: A base path that is stripped from file locations written to the
docs.
"""
self.reference_resolver = reference_resolver
self.duplicates = duplicates
self.duplicate_of = duplicate_of
self.tree = tree
self.reverse_index = reverse_index
self.index = index
self.guide_index = guide_index
self.base_dir = base_dir
self.defined_in_prefix = 'tensorflow/'
self.code_url_prefix = (
'/code/stable/tensorflow/') # pylint: disable=line-too-long
def py_name_to_object(self, full_name):
"""Return the Python object for a Python symbol name."""
return self.index[full_name]
def docs_for_object(full_name, py_object, parser_config):
"""Return a PageInfo object describing a given object from the TF API.
This function uses _parse_md_docstring to parse the docs pertaining to
`object`.
This function resolves '@{symbol}' references in the docstrings into links to
the appropriate location. It also adds a list of alternative names for the
symbol automatically.
It assumes that the docs for each object live in a file given by
`documentation_path`, and that relative links to files within the
documentation are resolvable.
Args:
full_name: The fully qualified name of the symbol to be
documented.
py_object: The Python object to be documented. Its documentation is sourced
from `py_object`'s docstring.
parser_config: A ParserConfig object.
Returns:
Either a `_FunctionPageInfo`, `_ClassPageInfo`, or a `_ModulePageInfo`
depending on the type of the python object being documented.
Raises:
RuntimeError: If an object is encountered for which we don't know how
to make docs.
"""
# Which other aliases exist for the object referenced by full_name?
master_name = parser_config.reference_resolver.py_master_name(full_name)
duplicate_names = parser_config.duplicates.get(master_name, [full_name])
# TODO(wicke): Once other pieces are ready, enable this also for partials.
if (tf_inspect.ismethod(py_object) or tf_inspect.isfunction(py_object) or
# Some methods in classes from extensions come in as routines.
tf_inspect.isroutine(py_object)):
page_info = _FunctionPageInfo(master_name)
page_info.set_signature(py_object, parser_config.reverse_index)
elif tf_inspect.isclass(py_object):
page_info = _ClassPageInfo(master_name)
page_info.collect_docs_for_class(py_object, parser_config)
elif tf_inspect.ismodule(py_object):
page_info = _ModulePageInfo(master_name)
page_info.collect_docs_for_module(parser_config)
else:
raise RuntimeError('Cannot make docs for object %s: %r' % (full_name,
py_object))
relative_path = os.path.relpath(
path='.', start=os.path.dirname(documentation_path(full_name)) or '.')
page_info.set_doc(_parse_md_docstring(
py_object, relative_path, parser_config.reference_resolver))
page_info.set_aliases(duplicate_names)
page_info.set_guides(_get_guides_markdown(
duplicate_names, parser_config.guide_index, relative_path))
page_info.set_defined_in(_get_defined_in(py_object, parser_config))
return page_info
class _PythonBuiltin(object):
"""This class indicated that the object in question is a python builtin.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def is_builtin(self):
return True
def is_python_file(self):
return False
def is_generated_file(self):
return False
def __str__(self):
return 'This is an alias for a Python built-in.\n\n'
class _PythonFile(object):
"""This class indicates that the object is defined in a regular python file.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
self.code_url_prefix = parser_config.code_url_prefix
def is_builtin(self):
return False
def is_python_file(self):
return True
def is_generated_file(self):
return False
def __str__(self):
return 'Defined in [`{prefix}{path}`]({code_prefix}{path}).\n\n'.format(
path=self.path, prefix=self.path_prefix,
code_prefix=self.code_url_prefix)
class _ProtoFile(object):
"""This class indicates that the object is defined in a .proto file.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
self.code_url_prefix = parser_config.code_url_prefix
def is_builtin(self):
return False
def is_python_file(self):
return False
def is_generated_file(self):
return False
def __str__(self):
return 'Defined in [`{prefix}{path}`]({code_prefix}{path}).\n\n'.format(
path=self.path, prefix=self.path_prefix,
code_prefix=self.code_url_prefix)
class _GeneratedFile(object):
"""This class indicates that the object is defined in a generated python file.
Generated files should not be linked to directly.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
def is_builtin(self):
return False
def is_python_file(self):
return False
def is_generated_file(self):
return True
def __str__(self):
return 'Defined in generated file: `%s%s`.\n\n' % (self.path_prefix,
self.path)
def _get_defined_in(py_object, parser_config):
"""Returns a description of where the passed in python object was defined.
Args:
py_object: The Python object.
parser_config: A ParserConfig object.
Returns:
Either a `_PythonBuiltin`, `_PythonFile`, or a `_GeneratedFile`
"""
# Every page gets a note about where this object is defined
# TODO(wicke): If py_object is decorated, get the decorated object instead.
# TODO(wicke): Only use decorators that support this in TF.
try:
path = os.path.relpath(path=tf_inspect.getfile(py_object),
start=parser_config.base_dir)
except TypeError: # getfile throws TypeError if py_object is a builtin.
return _PythonBuiltin()
# TODO(wicke): If this is a generated file, link to the source instead.
# TODO(wicke): Move all generated files to a generated/ directory.
# TODO(wicke): And make their source file predictable from the file name.
# In case this is compiled, point to the original
if six.ensure_str(path).endswith('.pyc'):
path = path[:-1]
# Never include links outside this code base.
if six.ensure_str(path).startswith('..') or re.search(r'\b_api\b',
six.ensure_str(path)):
return None
if re.match(r'.*/gen_[^/]*\.py$', six.ensure_str(path)):
return _GeneratedFile(path, parser_config)
if 'genfiles' in path or 'tools/api/generator' in path:
return _GeneratedFile(path, parser_config)
elif re.match(r'.*_pb2\.py$', six.ensure_str(path)):
# The _pb2.py files all appear right next to their defining .proto file.
return _ProtoFile(six.ensure_str(path[:-7]) + '.proto', parser_config)
else:
return _PythonFile(path, parser_config)
# TODO(markdaoust): This should just parse, pretty_docs should generate the md.
def generate_global_index(library_name, index, reference_resolver):
"""Given a dict of full names to python objects, generate an index page.
The index page generated contains a list of links for all symbols in `index`
that have their own documentation page.
Args:
library_name: The name for the documented library to use in the title.
index: A dict mapping full names to python objects.
reference_resolver: An instance of ReferenceResolver.
Returns:
A string containing an index page as Markdown.
"""
symbol_links = []
for full_name, py_object in six.iteritems(index):
if (tf_inspect.ismodule(py_object) or tf_inspect.isfunction(py_object) or
tf_inspect.isclass(py_object)):
# In Python 3, unbound methods are functions, so eliminate those.
if tf_inspect.isfunction(py_object):
if full_name.count('.') == 0:
parent_name = ''
else:
parent_name = full_name[:full_name.rfind('.')]
if parent_name in index and tf_inspect.isclass(index[parent_name]):
# Skip methods (=functions with class parents).
continue
symbol_links.append((
full_name, reference_resolver.python_link(full_name, full_name, '.')))
lines = ['# All symbols in %s' % library_name, '']
for _, link in sorted(symbol_links, key=lambda x: x[0]):
lines.append('* %s' % link)
# TODO(markdaoust): use a _ModulePageInfo -> prety_docs.build_md_page()
return '\n'.join(lines)
class _Metadata(object):
"""A class for building a page's Metadata block.
Attributes:
name: The name of the page being described by the Metadata block.
version: The source version.
"""
def __init__(self, name, version='Stable'):
"""Creates a Metadata builder.
Args:
name: The name of the page being described by the Metadata block.
version: The source version.
"""
self.name = name
self.version = version
self._content = []
def append(self, item):
"""Adds an item from the page to the Metadata block.
Args:
item: The parsed page section to add.
"""
self._content.append(item.short_name)
def build_html(self):
"""Returns the Metadata block as an Html string."""
schema = 'http://developers.google.com/ReferenceObject'
parts = ['<div itemscope itemtype="%s">' % schema]
parts.append('<meta itemprop="name" content="%s" />' % self.name)
parts.append('<meta itemprop="path" content="%s" />' % self.version)
for item in self._content:
parts.append('<meta itemprop="property" content="%s"/>' % item)
parts.extend(['</div>', ''])
return '\n'.join(parts)
| apache-2.0 |
franosincic/edx-platform | common/djangoapps/student/admin.py | 22 | 6073 | """ Django admin pages for student app """
from django import forms
from django.contrib.auth.models import User
from ratelimitbackend import admin
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from config_models.admin import ConfigurationModelAdmin
from student.models import (
UserProfile, UserTestGroup, CourseEnrollmentAllowed, DashboardConfiguration, CourseEnrollment, Registration,
PendingNameChange, CourseAccessRole, LinkedInAddToProfileConfiguration
)
from student.roles import REGISTERED_ACCESS_ROLES
class CourseAccessRoleForm(forms.ModelForm):
"""Form for adding new Course Access Roles view the Django Admin Panel."""
class Meta(object):
model = CourseAccessRole
fields = '__all__'
email = forms.EmailField(required=True)
COURSE_ACCESS_ROLES = [(role_name, role_name) for role_name in REGISTERED_ACCESS_ROLES.keys()]
role = forms.ChoiceField(choices=COURSE_ACCESS_ROLES)
def clean_course_id(self):
"""
Checking course-id format and course exists in module store.
This field can be null.
"""
if self.cleaned_data['course_id']:
course_id = self.cleaned_data['course_id']
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
raise forms.ValidationError(u"Invalid CourseID. Please check the format and re-try.")
if not modulestore().has_course(course_key):
raise forms.ValidationError(u"Cannot find course with id {} in the modulestore".format(course_id))
return course_key
return None
def clean_org(self):
"""If org and course-id exists then Check organization name
against the given course.
"""
if self.cleaned_data.get('course_id') and self.cleaned_data['org']:
org = self.cleaned_data['org']
org_name = self.cleaned_data.get('course_id').org
if org.lower() != org_name.lower():
raise forms.ValidationError(
u"Org name {} is not valid. Valid name is {}.".format(
org, org_name
)
)
return self.cleaned_data['org']
def clean_email(self):
"""
Checking user object against given email id.
"""
email = self.cleaned_data['email']
try:
user = User.objects.get(email=email)
except Exception:
raise forms.ValidationError(
u"Email does not exist. Could not find {email}. Please re-enter email address".format(
email=email
)
)
return user
def clean(self):
"""
Checking the course already exists in db.
"""
cleaned_data = super(CourseAccessRoleForm, self).clean()
if not self.errors:
if CourseAccessRole.objects.filter(
user=cleaned_data.get("email"),
org=cleaned_data.get("org"),
course_id=cleaned_data.get("course_id"),
role=cleaned_data.get("role")
).exists():
raise forms.ValidationError("Duplicate Record.")
return cleaned_data
def __init__(self, *args, **kwargs):
super(CourseAccessRoleForm, self).__init__(*args, **kwargs)
if self.instance.user_id:
self.fields['email'].initial = self.instance.user.email
class CourseAccessRoleAdmin(admin.ModelAdmin):
"""Admin panel for the Course Access Role. """
form = CourseAccessRoleForm
raw_id_fields = ("user",)
exclude = ("user",)
fieldsets = (
(None, {
'fields': ('email', 'course_id', 'org', 'role',)
}),
)
list_display = (
'id', 'user', 'org', 'course_id', 'role',
)
search_fields = (
'id', 'user__username', 'user__email', 'org', 'course_id', 'role',
)
def save_model(self, request, obj, form, change):
obj.user = form.cleaned_data['email']
super(CourseAccessRoleAdmin, self).save_model(request, obj, form, change)
class LinkedInAddToProfileConfigurationAdmin(admin.ModelAdmin):
"""Admin interface for the LinkedIn Add to Profile configuration. """
class Meta(object):
model = LinkedInAddToProfileConfiguration
# Exclude deprecated fields
exclude = ('dashboard_tracking_code',)
class CourseEnrollmentAdmin(admin.ModelAdmin):
""" Admin interface for the CourseEnrollment model. """
list_display = ('id', 'course_id', 'mode', 'user', 'is_active',)
list_filter = ('mode', 'is_active',)
raw_id_fields = ('user',)
search_fields = ('course_id', 'mode', 'user__username',)
def queryset(self, request):
return super(CourseEnrollmentAdmin, self).queryset(request).select_related('user')
class Meta(object):
model = CourseEnrollment
class UserProfileAdmin(admin.ModelAdmin):
""" Admin interface for UserProfile model. """
list_display = ('user', 'name',)
raw_id_fields = ('user',)
search_fields = ('user__username', 'user__first_name', 'user__last_name', 'user__email', 'name',)
def get_readonly_fields(self, request, obj=None):
# The user field should not be editable for an existing user profile.
if obj:
return self.readonly_fields + ('user',)
return self.readonly_fields
class Meta(object):
model = UserProfile
admin.site.register(UserTestGroup)
admin.site.register(CourseEnrollmentAllowed)
admin.site.register(Registration)
admin.site.register(PendingNameChange)
admin.site.register(CourseAccessRole, CourseAccessRoleAdmin)
admin.site.register(DashboardConfiguration, ConfigurationModelAdmin)
admin.site.register(LinkedInAddToProfileConfiguration, LinkedInAddToProfileConfigurationAdmin)
admin.site.register(CourseEnrollment, CourseEnrollmentAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
| agpl-3.0 |
waxkinetic/fabcloudkit | fabcloudkit/build_tools/python_build.py | 1 | 7221 | """
fabcloudkit
:copyright: (c) 2013 by Rick Bohrer.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
# pypi
from fabric.context_managers import cd, prefix, settings
from fabric.operations import run, sudo
from fabric.state import env
# package
from fabcloudkit import ctx
from ..build import build_repo, BuildInfo
from ..internal import *
from ..toolbase import Tool
from ..tool.virtualenv import VirtualEnvTool
from ..util import copy_file_from
class PythonBuildTool(Tool):
def build(self, repos, reference_repo=None, post_build=None, interpreter=None, tarball=False, unittest=None):
"""Performs a 'python' build.
Performs a python build by running setup.py in each identified repo. If desired, repos can
be refreshed first (e.g., via git pull).
:param repos:
specifies the list of repos in which to run setup.py.
:param reference_repo:
optional; the reference repo from which to retrieve the head commit id.
this id used as a component of the build name. if not specified, the
first repo in the context is used.
:param post_build:
a list of post-build commands. a list of dictionaries. each dict must
contain the key "command" that specifies the command to execute. optionally,
it may include a "sudo" value of [True|False], and an "ignore_fail" value
of [True|False].
:param interpreter:
specifies the Python interpreter to use in the build's virtualenv. if
not specified, the operating system default interpreter is used. note
that the interpreter must already exist on the system.
:param tarball:
True to create a tarball of the build; this is required if any other
instance will use "copy_from".
:param unittest:
TBD
:return:
the new build name
"""
start_msg('Executing build for instance in role "{0}":'.format(env.role_name))
# increment the build name and create a new virtualenv for the build.
build_name = self._increment_name(reference_repo)
build_env_dir = ctx().build_path(build_name)
VirtualEnvTool().ensure(build_env_dir, interpreter)
# run "setup.py install" in each repo.
for repo_name in ([repos] if isinstance(repos, basestring) else repos):
build_repo(build_env_dir, ctx().get_repo(repo_name))
# run tests.
self._unittest(unittest, build_name)
# save the last known good build-name.
BuildInfo.set_last_good(build_name)
if tarball:
self._tarball(build_name)
# execute any post-build commands.
if post_build:
self._execute_post_build(post_build, build_name)
# make the build_name available to the caller; it'll be set as an instance-tag.
succeed_msg('Build completed successfully for role "{0}".'.format(env.role_name))
env.role.set_env(build_result=build_name)
return self
def copy_from(self, role_name, post_build=None, delete_tar=True):
"""Copies an existing build from an instance in the specified role.
Instead of building itself, a build is copied from another instance to the current
instance.
:param role_name: the role of the instance to copy the build tarball from.
:param post_build: list of post-build commands to execute.
:param delete_tar: True to delete the tarball, False otherwise.
:return: the name of the copied build.
"""
# get the last known good build from the source machine.
# note: we could alternatively get this from an instance tag.
message('Copying build from instance in role: "{0}"'.format(role_name))
inst, role = ctx().get_host_in_role(role_name)
with settings(host_string=inst.public_dns_name, user=role.user):
message('Getting last good build-name from: "{0}"'.format(role_name))
src_build_name = BuildInfo().get_last_good()
# copy it from the source machine. note that all machines must have been provisioned
# properly to allow the current machine access to the source machine.
tarball = self._tarball_name(src_build_name)
path = ctx().build_path(tarball)
copy_file_from(role.user, inst.private_dns_name, path, path)
with cd(ctx().builds_root()):
# untar it.
command = 'tar -x --file={tarball}'.format(**locals())
result = run(command)
if result.failed:
raise HaltError('Failed to untar: "{0}"'.format(path))
# delete the tar.
if delete_tar:
run('rm {tarball}'.format(**locals()))
# update the build information.
BuildInfo().set_last_good(src_build_name)
# execute any post-build commands.
if post_build:
self._execute_post_build(post_build, src_build_name)
succeed_msg('Successfully copied build: "{0}"'.format(src_build_name))
return src_build_name
def _execute_post_build(self, cmd_lst, build_name):
message('Running post-build commands:')
with prefix(VirtualEnvTool.activate_prefix(ctx().build_path(build_name))):
for desc in cmd_lst:
f = sudo if desc.get('sudo', False) else run
result = f(desc['command'])
if result.failed and not desc.get('ignore_fail', False):
raise HaltError('Post-build command failed: "{0}"'.format(desc['command']))
message('Completed post-build commands.')
return self
def _increment_name(self, ref_repo_name):
# some projects have more than one repo. in this case one is designated as the "reference".
# the reference repo gives it's most recent commit ID that's used in the new build name.
# if no reference is given, just use the first (hopefully, the only) repo in the Context.
if ref_repo_name:
ref_repo = ctx().get_repo(ref_repo_name)
else:
ref_repo = ctx().repos()[0]
name = BuildInfo.next(ref_repo.dir)
succeed_msg('Created new build name: "{0}"'.format(name))
return name
def _tarball(self, build_name):
tarball = self._tarball_name(build_name)
dir_to_tar = ctx().build_path(build_name)
with cd(ctx().builds_root()):
options = '--create --gzip --format=ustar --owner=0 --group=0'
command = 'tar {options} --file={tarball} {build_name}'.format(**locals())
result = run(command)
if result.failed:
raise HaltError('Failed to create tarball for: "{0}"'.format(dir_to_tar))
succeed_msg('Created build tarball: "{0}"'.format(tarball))
return self
def _tarball_name(self, build_name):
return '{build_name}.tar.gz'.format(**locals())
def _unittest(self, plan, build_name):
failed_msg('The action "unittest" is not implemented (yet).')
return self
# register.
Tool.__tools__['python_build'] = PythonBuildTool
| bsd-3-clause |
huchoi/edx-platform | common/djangoapps/django_future/csrf.py | 104 | 2882 | # Taken from Django 1.4
import warnings
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.utils.decorators import decorator_from_middleware, available_attrs
from functools import wraps
csrf_protect = decorator_from_middleware(CsrfViewMiddleware)
csrf_protect.__name__ = "csrf_protect"
csrf_protect.__doc__ = """
This decorator adds CSRF protection in exactly the same way as
CsrfViewMiddleware, but it can be used on a per view basis. Using both, or
using the decorator multiple times, is harmless and efficient.
"""
class _EnsureCsrfToken(CsrfViewMiddleware):
# We need this to behave just like the CsrfViewMiddleware, but not reject
# requests.
def _reject(self, request, reason):
return None
requires_csrf_token = decorator_from_middleware(_EnsureCsrfToken)
requires_csrf_token.__name__ = 'requires_csrf_token'
requires_csrf_token.__doc__ = """
Use this decorator on views that need a correct csrf_token available to
RequestContext, but without the CSRF protection that csrf_protect
enforces.
"""
class _EnsureCsrfCookie(CsrfViewMiddleware):
def _reject(self, request, reason):
return None
def process_view(self, request, callback, callback_args, callback_kwargs):
retval = super(_EnsureCsrfCookie, self).process_view(request, callback, callback_args, callback_kwargs)
# Forces process_response to send the cookie
get_token(request)
return retval
ensure_csrf_cookie = decorator_from_middleware(_EnsureCsrfCookie)
ensure_csrf_cookie.__name__ = 'ensure_csrf_cookie'
ensure_csrf_cookie.__doc__ = """
Use this decorator to ensure that a view sets a CSRF cookie, whether or not it
uses the csrf_token template tag, or the CsrfViewMiddleware is used.
"""
def csrf_response_exempt(view_func):
"""
Modifies a view function so that its response is exempt
from the post-processing of the CSRF middleware.
"""
warnings.warn("csrf_response_exempt is deprecated. It no longer performs a "
"function, and calls to it can be removed.",
PendingDeprecationWarning)
return view_func
def csrf_view_exempt(view_func):
"""
Marks a view function as being exempt from CSRF view protection.
"""
warnings.warn("csrf_view_exempt is deprecated. Use csrf_exempt instead.",
PendingDeprecationWarning)
return csrf_exempt(view_func)
def csrf_exempt(view_func):
"""
Marks a view function as being exempt from the CSRF view protection.
"""
# We could just do view_func.csrf_exempt = True, but decorators
# are nicer if they don't have side-effects, so we return a new
# function.
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.csrf_exempt = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| agpl-3.0 |
cloudera/hue | desktop/core/ext-py/ply-3.11/ply/ctokens.py | 17 | 3155 | # ----------------------------------------------------------------------
# ctokens.py
#
# Token specifications for symbols in ANSI C and C++. This file is
# meant to be used as a library in other tokenizers.
# ----------------------------------------------------------------------
# Reserved words
tokens = [
# Literals (identifier, integer constant, float constant, string constant, char const)
'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
# Increment/decrement (++,--)
'INCREMENT', 'DECREMENT',
# Structure dereference (->)
'ARROW',
# Ternary operator (?)
'TERNARY',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON',
# Ellipsis (...)
'ELLIPSIS',
]
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MODULO = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'\^='
# Increment/decrement
t_INCREMENT = r'\+\+'
t_DECREMENT = r'--'
# ->
t_ARROW = r'->'
# ?
t_TERNARY = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Identifiers
t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
# Integer literal
t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_STRING = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
# Comment (C-Style)
def t_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
return t
# Comment (C++-Style)
def t_CPPCOMMENT(t):
r'//.*\n'
t.lexer.lineno += 1
return t
| apache-2.0 |
t0mm0/youtube-dl | youtube_dl/extractor/kaltura.py | 63 | 4867 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
int_or_none,
)
class KalturaIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:kaltura:|
https?://(:?(?:www|cdnapisec)\.)?kaltura\.com/index\.php/kwidget/(?:[^/]+/)*?wid/_
)(?P<partner_id>\d+)
(?::|
/(?:[^/]+/)*?entry_id/
)(?P<id>[0-9a-z_]+)'''
_API_BASE = 'http://cdnapi.kaltura.com/api_v3/index.php?'
_TESTS = [
{
'url': 'kaltura:269692:1_1jc2y3e4',
'md5': '3adcbdb3dcc02d647539e53f284ba171',
'info_dict': {
'id': '1_1jc2y3e4',
'ext': 'mp4',
'title': 'Track 4',
'upload_date': '20131219',
'uploader_id': '[email protected]',
'description': 'The Allman Brothers Band, 12/16/1981',
'thumbnail': 're:^https?://.*/thumbnail/.*',
'timestamp': int,
},
},
{
'url': 'http://www.kaltura.com/index.php/kwidget/cache_st/1300318621/wid/_269692/uiconf_id/3873291/entry_id/1_1jc2y3e4',
'only_matching': True,
},
{
'url': 'https://cdnapisec.kaltura.com/index.php/kwidget/wid/_557781/uiconf_id/22845202/entry_id/1_plr1syf3',
'only_matching': True,
},
]
def _kaltura_api_call(self, video_id, actions, *args, **kwargs):
params = actions[0]
if len(actions) > 1:
for i, a in enumerate(actions[1:], start=1):
for k, v in a.items():
params['%d:%s' % (i, k)] = v
query = compat_urllib_parse.urlencode(params)
url = self._API_BASE + query
data = self._download_json(url, video_id, *args, **kwargs)
status = data if len(actions) == 1 else data[0]
if status.get('objectType') == 'KalturaAPIException':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, status['message']))
return data
def _get_kaltura_signature(self, video_id, partner_id):
actions = [{
'apiVersion': '3.1',
'expiry': 86400,
'format': 1,
'service': 'session',
'action': 'startWidgetSession',
'widgetId': '_%s' % partner_id,
}]
return self._kaltura_api_call(
video_id, actions, note='Downloading Kaltura signature')['ks']
def _get_video_info(self, video_id, partner_id):
signature = self._get_kaltura_signature(video_id, partner_id)
actions = [
{
'action': 'null',
'apiVersion': '3.1.5',
'clientTag': 'kdp:v3.8.5',
'format': 1, # JSON, 2 = XML, 3 = PHP
'service': 'multirequest',
'ks': signature,
},
{
'action': 'get',
'entryId': video_id,
'service': 'baseentry',
'version': '-1',
},
{
'action': 'getContextData',
'contextDataParams:objectType': 'KalturaEntryContextDataParams',
'contextDataParams:referrer': 'http://www.kaltura.com/',
'contextDataParams:streamerType': 'http',
'entryId': video_id,
'service': 'baseentry',
},
]
return self._kaltura_api_call(
video_id, actions, note='Downloading video info JSON')
def _real_extract(self, url):
video_id = self._match_id(url)
mobj = re.match(self._VALID_URL, url)
partner_id, entry_id = mobj.group('partner_id'), mobj.group('id')
info, source_data = self._get_video_info(entry_id, partner_id)
formats = [{
'format_id': '%(fileExt)s-%(bitrate)s' % f,
'ext': f['fileExt'],
'tbr': f['bitrate'],
'fps': f.get('frameRate'),
'filesize_approx': int_or_none(f.get('size'), invscale=1024),
'container': f.get('containerFormat'),
'vcodec': f.get('videoCodecId'),
'height': f.get('height'),
'width': f.get('width'),
'url': '%s/flavorId/%s' % (info['dataUrl'], f['id']),
} for f in source_data['flavorAssets']]
self._sort_formats(formats)
return {
'id': video_id,
'title': info['name'],
'formats': formats,
'description': info.get('description'),
'thumbnail': info.get('thumbnailUrl'),
'duration': info.get('duration'),
'timestamp': info.get('createdAt'),
'uploader_id': info.get('userId'),
'view_count': info.get('plays'),
}
| unlicense |
koniiiik/django | tests/string_lookup/models.py | 281 | 1533 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Foo(models.Model):
name = models.CharField(max_length=50)
friend = models.CharField(max_length=50, blank=True)
def __str__(self):
return "Foo %s" % self.name
@python_2_unicode_compatible
class Bar(models.Model):
name = models.CharField(max_length=50)
normal = models.ForeignKey(Foo, models.CASCADE, related_name='normal_foo')
fwd = models.ForeignKey("Whiz", models.CASCADE)
back = models.ForeignKey("Foo", models.CASCADE)
def __str__(self):
return "Bar %s" % self.place.name
@python_2_unicode_compatible
class Whiz(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "Whiz %s" % self.name
@python_2_unicode_compatible
class Child(models.Model):
parent = models.OneToOneField('Base', models.CASCADE)
name = models.CharField(max_length=50)
def __str__(self):
return "Child %s" % self.name
@python_2_unicode_compatible
class Base(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "Base %s" % self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=50)
text = models.TextField()
submitted_from = models.GenericIPAddressField(blank=True, null=True)
def __str__(self):
return "Article %s" % self.name
| bsd-3-clause |
autosub-team/autosub | src/plugins/vels_ob/swagger_client/models/task.py | 2 | 6371 | # coding: utf-8
"""
HDL Testing Platform
REST API for HDL TP # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Task(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'user_id': 'str',
'hdl_file': 'str',
'design': 'str',
'pblock': 'int',
'peripherals': 'list[str]',
'pins': 'list[Pin]'
}
attribute_map = {
'user_id': 'user_id',
'hdl_file': 'hdl_file',
'design': 'design',
'pblock': 'pblock',
'peripherals': 'peripherals',
'pins': 'pins'
}
def __init__(self, user_id=None, hdl_file=None, design=None, pblock=None, peripherals=None, pins=None): # noqa: E501
"""Task - a model defined in Swagger""" # noqa: E501
self._user_id = None
self._hdl_file = None
self._design = None
self._pblock = None
self._peripherals = None
self._pins = None
self.discriminator = None
self.user_id = user_id
if hdl_file is not None:
self.hdl_file = hdl_file
if design is not None:
self.design = design
if pblock is not None:
self.pblock = pblock
if peripherals is not None:
self.peripherals = peripherals
if pins is not None:
self.pins = pins
@property
def user_id(self):
"""Gets the user_id of this Task. # noqa: E501
user Identifier # noqa: E501
:return: The user_id of this Task. # noqa: E501
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this Task.
user Identifier # noqa: E501
:param user_id: The user_id of this Task. # noqa: E501
:type: str
"""
if user_id is None:
raise ValueError("Invalid value for `user_id`, must not be `None`") # noqa: E501
self._user_id = user_id
@property
def hdl_file(self):
"""Gets the hdl_file of this Task. # noqa: E501
uploaded hdl file name # noqa: E501
:return: The hdl_file of this Task. # noqa: E501
:rtype: str
"""
return self._hdl_file
@hdl_file.setter
def hdl_file(self, hdl_file):
"""Sets the hdl_file of this Task.
uploaded hdl file name # noqa: E501
:param hdl_file: The hdl_file of this Task. # noqa: E501
:type: str
"""
self._hdl_file = hdl_file
@property
def design(self):
"""Gets the design of this Task. # noqa: E501
design # noqa: E501
:return: The design of this Task. # noqa: E501
:rtype: str
"""
return self._design
@design.setter
def design(self, design):
"""Sets the design of this Task.
design # noqa: E501
:param design: The design of this Task. # noqa: E501
:type: str
"""
self._design = design
@property
def pblock(self):
"""Gets the pblock of this Task. # noqa: E501
pblock # noqa: E501
:return: The pblock of this Task. # noqa: E501
:rtype: int
"""
return self._pblock
@pblock.setter
def pblock(self, pblock):
"""Sets the pblock of this Task.
pblock # noqa: E501
:param pblock: The pblock of this Task. # noqa: E501
:type: int
"""
self._pblock = pblock
@property
def peripherals(self):
"""Gets the peripherals of this Task. # noqa: E501
:return: The peripherals of this Task. # noqa: E501
:rtype: list[str]
"""
return self._peripherals
@peripherals.setter
def peripherals(self, peripherals):
"""Sets the peripherals of this Task.
:param peripherals: The peripherals of this Task. # noqa: E501
:type: list[str]
"""
self._peripherals = peripherals
@property
def pins(self):
"""Gets the pins of this Task. # noqa: E501
:return: The pins of this Task. # noqa: E501
:rtype: list[Pin]
"""
return self._pins
@pins.setter
def pins(self, pins):
"""Sets the pins of this Task.
:param pins: The pins of this Task. # noqa: E501
:type: list[Pin]
"""
self._pins = pins
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Task, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Task):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| gpl-2.0 |
ajstarna/RicochetRobots | Brobot/model.py | 1 | 9336 | import itertools
import random
# Directions
NORTH = 'N'
EAST = 'E'
SOUTH = 'S'
WEST = 'W'
DIRECTIONS = [NORTH, EAST, SOUTH, WEST]
REVERSE = {
NORTH: SOUTH,
EAST: WEST,
SOUTH: NORTH,
WEST: EAST,
}
OFFSET = {
NORTH: -16,
EAST: 1,
SOUTH: 16,
WEST: -1,
}
# Masks
M_NORTH = 0x01
M_EAST = 0x02
M_SOUTH = 0x04
M_WEST = 0x08
M_ROBOT = 0x10
M_LOOKUP = {
NORTH: M_NORTH,
EAST: M_EAST,
SOUTH: M_SOUTH,
WEST: M_WEST,
}
# Colors
RED = 'R'
GREEN = 'G'
BLUE = 'B'
YELLOW = 'Y'
COLORS = [RED, GREEN, BLUE, YELLOW]
# Shapes
CIRCLE = 'C'
TRIANGLE = 'T'
SQUARE = 'Q'
HEXAGON = 'H'
SHAPES = [CIRCLE, TRIANGLE, SQUARE, HEXAGON]
# Tokens
TOKENS = [''.join(token) for token in itertools.product(COLORS, SHAPES)]
# Quadrants
QUAD_1A = (
'NW,N,N,N,NE,NW,N,N,'
'W,S,X,X,X,X,SEYH,W,'
'WE,NWGT,X,X,X,X,N,X,'
'W,X,X,X,X,X,X,X,'
'W,X,X,X,X,X,S,X,'
'SW,X,X,X,X,X,NEBQ,W,'
'NW,X,E,SWRC,X,X,X,S,'
'W,X,X,N,X,X,E,NW'
)
QUAD_1B = (
'NW,NE,NW,N,NS,N,N,N,'
'W,S,X,E,NWRC,X,X,X,'
'W,NEGT,W,X,X,X,X,X,'
'W,X,X,X,X,X,SEYH,W,'
'W,X,X,X,X,X,N,X,'
'SW,X,X,X,X,X,X,X,'
'NW,X,E,SWBQ,X,X,X,S,'
'W,X,X,N,X,X,E,NW'
)
QUAD_2A = (
'NW,N,N,NE,NW,N,N,N,'
'W,X,X,X,X,E,SWBC,X,'
'W,S,X,X,X,X,N,X,'
'W,NEYT,W,X,X,S,X,X,'
'W,X,X,X,E,NWGQ,X,X,'
'W,X,SERH,W,X,X,X,X,'
'SW,X,N,X,X,X,X,S,'
'NW,X,X,X,X,X,E,NW'
)
QUAD_2B = (
'NW,N,N,N,NE,NW,N,N,'
'W,X,SERH,W,X,X,X,X,'
'W,X,N,X,X,X,X,X,'
'WE,SWGQ,X,X,X,X,S,X,'
'SW,N,X,X,X,E,NWYT,X,'
'NW,X,X,X,X,S,X,X,'
'W,X,X,X,X,NEBC,W,S,'
'W,X,X,X,X,X,E,NW'
)
QUAD_3A = (
'NW,N,N,NE,NW,N,N,N,'
'W,X,X,X,X,SEGH,W,X,'
'WE,SWRQ,X,X,X,N,X,X,'
'SW,N,X,X,X,X,S,X,'
'NW,X,X,X,X,E,NWYC,X,'
'W,X,S,X,X,X,X,X,'
'W,X,NEBT,W,X,X,X,S,'
'W,X,X,X,X,X,E,NW'
)
QUAD_3B = (
'NW,N,NS,N,NE,NW,N,N,'
'W,E,NWYC,X,X,X,X,X,'
'W,X,X,X,X,X,X,X,'
'W,X,X,X,X,E,SWBT,X,'
'SW,X,X,X,S,X,N,X,'
'NW,X,X,X,NERQ,W,X,X,'
'W,SEGH,W,X,X,X,X,S,'
'W,N,X,X,X,X,E,NW'
)
QUAD_4A = (
'NW,N,N,NE,NW,N,N,N,'
'W,X,X,X,X,X,X,X,'
'W,X,X,X,X,SEBH,W,X,'
'W,X,S,X,X,N,X,X,'
'SW,X,NEGC,W,X,X,X,X,'
'NW,S,X,X,X,X,E,SWRT,'
'WE,NWYQ,X,X,X,X,X,NS,'
'W,X,X,X,X,X,E,NW'
)
QUAD_4B = (
'NW,N,N,NE,NW,N,N,N,'
'WE,SWRT,X,X,X,X,S,X,'
'W,N,X,X,X,X,NEGC,W,'
'W,X,X,X,X,X,X,X,'
'W,X,SEBH,W,X,X,X,S,'
'SW,X,N,X,X,X,E,NWYQ,'
'NW,X,X,X,X,X,X,S,'
'W,X,X,X,X,X,E,NW'
)
QUADS = [
(QUAD_1A, QUAD_1B),
(QUAD_2A, QUAD_2B),
(QUAD_3A, QUAD_3B),
(QUAD_4A, QUAD_4B),
]
# Rotation
ROTATE_QUAD = [
56, 48, 40, 32, 24, 16, 8, 0,
57, 49, 41, 33, 25, 17, 9, 1,
58, 50, 42, 34, 26, 18, 10, 2,
59, 51, 43, 35, 27, 19, 11, 3,
60, 52, 44, 36, 28, 20, 12, 4,
61, 53, 45, 37, 29, 21, 13, 5,
62, 54, 46, 38, 30, 22, 14, 6,
63, 55, 47, 39, 31, 23, 15, 7,
]
ROTATE_WALL = {
NORTH: EAST,
EAST: SOUTH,
SOUTH: WEST,
WEST: NORTH,
}
# Helper Functions
def idx(x, y, size=16):
return y * size + x
def xy(index, size=16):
x = index % size
y = index / size
return (x, y)
def rotate_quad(data, times=1):
for i in range(times):
result = [data[index] for index in ROTATE_QUAD]
result = [''.join(ROTATE_WALL.get(c, c) for c in x) for x in result]
data = result
return data
def create_grid(quads=None):
if quads is None:
quads = [random.choice(pair) for pair in QUADS]
random.shuffle(quads)
quads = [quad.split(',') for quad in quads]
quads = [rotate_quad(quads[i], i) for i in [0, 1, 3, 2]]
result = [None for i in range(16 * 16)]
for i, quad in enumerate(quads):
dx, dy = xy(i, 2)
for j, data in enumerate(quad):
x, y = xy(j, 8)
x += dx * 8
y += dy * 8
index = idx(x, y)
result[index] = data
return result
def to_mask(cell):
result = 0
for letter, mask in M_LOOKUP.items():
if letter in cell:
result |= mask
return result
# Game
class Game(object):
@staticmethod
def hardest():
quads = [QUAD_2B, QUAD_4B, QUAD_3B, QUAD_1B]
robots = [226, 48, 43, 18]
token = 'BT'
return Game(quads=quads, robots=robots, token=token)
def __init__(self, seed=None, quads=None, robots=None, token=None):
if seed:
random.seed(seed)
self.grid = create_grid(quads)
if robots is None:
self.robots = self.place_robots()
else:
self.robots = dict(zip(COLORS, robots))
self.token = token or random.choice(TOKENS)
self.moves = 0
self.last = None
def place_robots(self):
result = {}
used = set()
for color in COLORS:
while True:
index = random.randint(0, 255)
if index in (119, 120, 135, 136):
continue
if self.grid[index][-2:] in TOKENS:
continue
if index in used:
continue
result[color] = index
used.add(index)
break
return result
def get_robot(self, index):
for color, position in self.robots.iteritems():
if position == index:
return color
return None
def can_move(self, color, direction):
if self.last == (color, REVERSE[direction]):
return False
index = self.robots[color]
if direction in self.grid[index]:
return False
new_index = index + OFFSET[direction]
if new_index in self.robots.itervalues():
return False
return True
def compute_move(self, color, direction):
index = self.robots[color]
robots = self.robots.values()
while True:
if direction in self.grid[index]:
break
new_index = index + OFFSET[direction]
if new_index in robots:
break
index = new_index
return index
def do_move(self, color, direction):
start = self.robots[color]
last = self.last
if last == (color, REVERSE[direction]):
print 'reverse'
#raise Exception
end = self.compute_move(color, direction)
if start == end:
print 'wall move'
#raise Exception
self.moves += 1
self.robots[color] = end
self.last = (color, direction)
return (color, start, last)
def undo_move(self, data):
color, start, last = data
self.moves -= 1
self.robots[color] = start
self.last = last
def get_moves(self, colors=None):
result = []
colors = colors or COLORS
for color in colors:
for direction in DIRECTIONS:
if self.can_move(color, direction):
result.append((color, direction))
return result
def over(self):
color = self.token[0]
return self.token in self.grid[self.robots[color]]
def key(self):
return tuple(self.robots.itervalues())
def search(self):
max_depth = 1
while True:
#print 'Searching to depth:', max_depth
result = self._search([], set(), 0, max_depth)
if result is not None:
return result
max_depth += 1
def _search(self, path, memo, depth, max_depth):
if self.over():
return list(path)
if depth == max_depth:
return None
key = (depth, self.key())
if key in memo:
return None
memo.add(key)
if depth == max_depth - 1:
colors = [self.token[0]]
else:
colors = None
moves = self.get_moves(colors)
for move in moves:
data = self.do_move(*move)
path.append(move)
result = self._search(path, memo, depth + 1, max_depth)
path.pop(-1)
self.undo_move(data)
if result:
return result
return None
def export(self):
grid = []
token = None
robots = [self.robots[color] for color in COLORS]
for index, cell in enumerate(self.grid):
mask = to_mask(cell)
if index in robots:
mask |= M_ROBOT
grid.append(mask)
if self.token in cell:
token = index
robot = COLORS.index(self.token[0])
return {
'grid': grid,
'robot': robot,
'token': token,
'robots': robots,
}
def export2(self):
grid = []
token = None
robots = [self.robots[color] for color in COLORS]
for index, cell in enumerate(self.grid):
mask = to_mask(cell)
grid.append(mask)
if self.token in cell:
token = index
robot = COLORS.index(self.token[0])
return {
'grid': grid,
'robot': robot,
'token': token,
'robots': robots,
}
| bsd-2-clause |
tongxindao/Flask-micblog | PyFlk-Framework/pyflk/session/__init__.py | 2 | 5139 | import os
import json
import time
import base64
def create_session_id():
'''
create Session ID
'''
# first of all, get current time stamp and convert byte stream, in Base64 code, decode to string and put off Base64 code "=" symbol, get second to last bit, final reverse order array
return base64.encodebytes(str(time.time()).encode()).decode().replace("=", "")[:-2][::-1]
def get_session_id(request):
'''
from request gets Session ID
'''
return request.cookies.get('session_id', '')
class Session:
'''
Session
'''
# instance object
__instance = None
# init method
def __init__(self):
# session mapping table
self.__session_map__ = {}
# session local store folder
self.__storage_path__ = None
def set_storage_path(self, path):
''' set session storage path '''
self.__storage_path__ = path
def storage(self, session_id):
''' save session record to local '''
# constructor Session local file path, file name is Session ID
session_path = os.path.join(self.__storage_path__, session_id)
# if already set Session storage path, then start cache to local
if self.__storage_path__ is not None:
with open(session_path, 'wb') as f:
# convert session record to string
content = json.dumps(self.__session_map__[session_id])
# base64 code and write to file, prevent sone specified binary data cannot right write in
f.write(base64.encodebytes(content.encode()))
def __new__(cls, *args, **kwargs):
''' singleton, realization global public a Session instance object '''
if cls.__instance is None:
cls.__instance = super(Session, cls).__new__(cls, *args, **kwargs)
return cls.__instance
def push(self, request, item, value):
''' update or add record '''
# from request gets client's Session ID
session_id = get_session_id(request)
# if this Session ID exist in mapping table, then add new data key: value, if not, then first init a null dict, and add data key: value
if session in self.__session_map__:
# from current session add data
self.__session_map__[get_session_id(request)][item] = value
else:
# init current session
self.__session_map__[session_id] = {}
# from current session add data
self.__session_map__[session_id][item] = value
# Session happen change, update cache to local
self.storage(session_id)
def pop(self, request, item, value=True):
''' delete current session's some item '''
# gets current session
session_id = get_session_id(request)
current_session = self.__session_map__.get(session_id, {})
# decide data item's key whether or not in current session, if yes then delete it
if item in current_session:
current_session.pop(item, value)
# Session happen change, update cache to local
self.storage(session_id)
def load_local_session(self):
''' load local session '''
# if already set Session storage path, then start load cache from local
if self.__storage_path__ is not None:
# from local storage folder get all of the Session record file list, file name is Session ID
session_path_list = os.listdir(self.__storage_path__)
# ergodic Session record file list
for session_id in session_path_list:
# constructor Session record file folder
path = os.path.join(self.__storage_path__, session_id)
# read file content
with open(path, 'rb') as f:
content = f.read()
# file content decode to base64
content = base64.decodebytes(content)
# Session ID content bind and add to Session mapping table
self.__session_map__[session_id] = json.loads(content.decode())
def map(self, request):
''' gets current Session record '''
return self.__session_map__.get(get_session_id(request), {})
def get(self, request, item):
''' gets current Session some item '''
return self.__session_map__.get(get_session_id(request), {}).get(item, None)
class AuthSession:
''' Session verification decorator '''
@classmethod
def auth_session(cls, f, *args, **options):
def decorator(obj, request):
return f(obj, request) if cls.auth_logic(request, *args, **options) else cls.auth_fail_callback(request, *args, **options)
return decorator
@staticmethod
def auth_logic(request, *args, **options):
''' verification logic interface, return a boolean '''
raise NotImplementedError
@staticmethod
def auth_fail_callback(request, *args, **options):
''' verification fail callback interface '''
raise NotImplementedError
# singleton global object
session = Session()
| apache-2.0 |
dhanzhang/shadowsocks-1 | shadowsocks/crypto/util.py | 1032 | 4287 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import logging
def find_library_nt(name):
# modified from ctypes.util
# ctypes.util.find_library just returns first result he found
# but we want to try them all
# because on Windows, users may have both 32bit and 64bit version installed
results = []
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.isfile(fname):
results.append(fname)
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.isfile(fname):
results.append(fname)
return results
def find_library(possible_lib_names, search_symbol, library_name):
import ctypes.util
from ctypes import CDLL
paths = []
if type(possible_lib_names) not in (list, tuple):
possible_lib_names = [possible_lib_names]
lib_names = []
for lib_name in possible_lib_names:
lib_names.append(lib_name)
lib_names.append('lib' + lib_name)
for name in lib_names:
if os.name == "nt":
paths.extend(find_library_nt(name))
else:
path = ctypes.util.find_library(name)
if path:
paths.append(path)
if not paths:
# We may get here when find_library fails because, for example,
# the user does not have sufficient privileges to access those
# tools underlying find_library on linux.
import glob
for name in lib_names:
patterns = [
'/usr/local/lib*/lib%s.*' % name,
'/usr/lib*/lib%s.*' % name,
'lib%s.*' % name,
'%s.dll' % name]
for pat in patterns:
files = glob.glob(pat)
if files:
paths.extend(files)
for path in paths:
try:
lib = CDLL(path)
if hasattr(lib, search_symbol):
logging.info('loading %s from %s', library_name, path)
return lib
else:
logging.warn('can\'t find symbol %s in %s', search_symbol,
path)
except Exception:
pass
return None
def run_cipher(cipher, decipher):
from os import urandom
import random
import time
BLOCK_SIZE = 16384
rounds = 1 * 1024
plain = urandom(BLOCK_SIZE * rounds)
results = []
pos = 0
print('test start')
start = time.time()
while pos < len(plain):
l = random.randint(100, 32768)
c = cipher.update(plain[pos:pos + l])
results.append(c)
pos += l
pos = 0
c = b''.join(results)
results = []
while pos < len(plain):
l = random.randint(100, 32768)
results.append(decipher.update(c[pos:pos + l]))
pos += l
end = time.time()
print('speed: %d bytes/s' % (BLOCK_SIZE * rounds / (end - start)))
assert b''.join(results) == plain
def test_find_library():
assert find_library('c', 'strcpy', 'libc') is not None
assert find_library(['c'], 'strcpy', 'libc') is not None
assert find_library(('c',), 'strcpy', 'libc') is not None
assert find_library(('crypto', 'eay32'), 'EVP_CipherUpdate',
'libcrypto') is not None
assert find_library('notexist', 'strcpy', 'libnotexist') is None
assert find_library('c', 'symbol_not_exist', 'c') is None
assert find_library(('notexist', 'c', 'crypto', 'eay32'),
'EVP_CipherUpdate', 'libc') is not None
if __name__ == '__main__':
test_find_library()
| apache-2.0 |
hkchenhongyi/django | tests/generic_views/test_detail.py | 284 | 8387 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectTemplateResponseMixin
from django.views.generic.edit import ModelFormMixin
from .models import Artist, Author, Book, Page
@override_settings(ROOT_URLCONF='generic_views.urls')
class DetailViewTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.artist1 = Artist.objects.create(name='Rene Magritte')
cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano')
cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg')
cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1))
cls.book1.authors.add(cls.author1)
cls.book2 = Book.objects.create(
name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1)
)
cls.page1 = Page.objects.create(
content='I was once bitten by a moose.', template='generic_views/page_template.html'
)
def test_simple_object(self):
res = self.client.get('/detail/obj/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], {'foo': 'bar'})
self.assertIsInstance(res.context['view'], View)
self.assertTemplateUsed(res, 'generic_views/detail.html')
def test_detail_by_pk(self):
res = self.client.get('/detail/author/%s/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_missing_object(self):
res = self.client.get('/detail/author/500/')
self.assertEqual(res.status_code, 404)
def test_detail_object_does_not_exist(self):
self.assertRaises(ObjectDoesNotExist, self.client.get, '/detail/doesnotexist/1/')
def test_detail_by_custom_pk(self):
res = self.client.get('/detail/author/bycustompk/%s/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_slug(self):
res = self.client.get('/detail/author/byslug/scott-rosenberg/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg'))
self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg'))
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_custom_slug(self):
res = self.client.get('/detail/author/bycustomslug/scott-rosenberg/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg'))
self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg'))
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_pk_ignore_slug(self):
res = self.client.get('/detail/author/bypkignoreslug/%s-roberto-bolano/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_pk_ignore_slug_mismatch(self):
res = self.client.get('/detail/author/bypkignoreslug/%s-scott-rosenberg/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_pk_and_slug(self):
res = self.client.get('/detail/author/bypkandslug/%s-roberto-bolano/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_pk_and_slug_mismatch_404(self):
res = self.client.get('/detail/author/bypkandslug/%s-scott-rosenberg/' % self.author1.pk)
self.assertEqual(res.status_code, 404)
def test_verbose_name(self):
res = self.client.get('/detail/artist/%s/' % self.artist1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.artist1)
self.assertEqual(res.context['artist'], self.artist1)
self.assertTemplateUsed(res, 'generic_views/artist_detail.html')
def test_template_name(self):
res = self.client.get('/detail/author/%s/template_name/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/about.html')
def test_template_name_suffix(self):
res = self.client.get('/detail/author/%s/template_name_suffix/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_view.html')
def test_template_name_field(self):
res = self.client.get('/detail/page/%s/field/' % self.page1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.page1)
self.assertEqual(res.context['page'], self.page1)
self.assertTemplateUsed(res, 'generic_views/page_template.html')
def test_context_object_name(self):
res = self.client.get('/detail/author/%s/context_object_name/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['thingy'], self.author1)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_duplicated_context_object_name(self):
res = self.client.get('/detail/author/%s/dupe_context_object_name/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_deferred_queryset_template_name(self):
class FormContext(SingleObjectTemplateResponseMixin):
request = RequestFactory().get('/')
model = Author
object = Author.objects.defer('name').get(pk=self.author1.pk)
self.assertEqual(FormContext().get_template_names()[0], 'generic_views/author_detail.html')
def test_deferred_queryset_context_object_name(self):
class FormContext(ModelFormMixin):
request = RequestFactory().get('/')
model = Author
object = Author.objects.defer('name').get(pk=self.author1.pk)
fields = ('name',)
form_context_data = FormContext().get_context_data()
self.assertEqual(form_context_data['object'], self.author1)
self.assertEqual(form_context_data['author'], self.author1)
def test_invalid_url(self):
self.assertRaises(AttributeError, self.client.get, '/detail/author/invalid/url/')
def test_invalid_queryset(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/detail/author/invalid/qs/')
def test_non_model_object_with_meta(self):
res = self.client.get('/detail/nonmodel/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'].id, "non_model_1")
| bsd-3-clause |
jonashagstedt/django-jsx | demo/demo/settings.py | 2 | 3162 | """
Django settings for demo project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '86mvm#x9^9dakhk^cu#laf$-_cr-9k$cv3@&mmqbfzf#=0($rn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_jsx',
'demo',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
{
'BACKEND': 'django_jsx.template.backend.JsTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'static/js')
]
}
]
# DJANGO_ISOMORPHIC_RENDERER = os.path.join(BASE_DIR, 'custom_renderer/custom-transpiled.js')
WSGI_APPLICATION = 'demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Additional locations of static files
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
| bsd-3-clause |
zzzeek/sqlalchemy | lib/sqlalchemy/event/api.py | 3 | 6794 | # event/api.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Public API functions for the event system.
"""
from __future__ import absolute_import
from .base import _registrars
from .registry import _EventKey
from .. import exc
from .. import util
CANCEL = util.symbol("CANCEL")
NO_RETVAL = util.symbol("NO_RETVAL")
def _event_key(target, identifier, fn):
for evt_cls in _registrars[identifier]:
tgt = evt_cls._accept_with(target)
if tgt is not None:
return _EventKey(target, identifier, fn, tgt)
else:
raise exc.InvalidRequestError(
"No such event '%s' for target '%s'" % (identifier, target)
)
def listen(target, identifier, fn, *args, **kw):
"""Register a listener function for the given target.
The :func:`.listen` function is part of the primary interface for the
SQLAlchemy event system, documented at :ref:`event_toplevel`.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
event.listen(
UniqueConstraint,
"after_parent_attach",
unique_constraint_name)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
def on_config():
do_config()
event.listen(Mapper, "before_configure", on_config, once=True)
.. warning:: The ``once`` argument does not imply automatic de-registration
of the listener function after it has been invoked a first time; a
listener entry will remain associated with the target object.
Associating an arbitrarily high number of listeners without explicitly
removing them will cause memory to grow unbounded even if ``once=True``
is specified.
.. note::
The :func:`.listen` function cannot be called at the same time
that the target event is being run. This has implications
for thread safety, and also means an event cannot be added
from inside the listener function for itself. The list of
events to be run are present inside of a mutable collection
that can't be changed during iteration.
Event registration and removal is not intended to be a "high
velocity" operation; it is a configurational operation. For
systems that need to quickly associate and deassociate with
events at high scale, use a mutable structure that is handled
from inside of a single listener.
.. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now
used as the container for the list of events, which explicitly
disallows collection mutation while the collection is being
iterated.
.. seealso::
:func:`.listens_for`
:func:`.remove`
"""
_event_key(target, identifier, fn).listen(*args, **kw)
def listens_for(target, identifier, *args, **kw):
"""Decorate a function as a listener for the given target + identifier.
The :func:`.listens_for` decorator is part of the primary interface for the
SQLAlchemy event system, documented at :ref:`event_toplevel`.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
@event.listens_for(UniqueConstraint, "after_parent_attach")
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
@event.listens_for(Mapper, "before_configure", once=True)
def on_config():
do_config()
.. warning:: The ``once`` argument does not imply automatic de-registration
of the listener function after it has been invoked a first time; a
listener entry will remain associated with the target object.
Associating an arbitrarily high number of listeners without explicitly
removing them will cause memory to grow unbounded even if ``once=True``
is specified.
.. seealso::
:func:`.listen` - general description of event listening
"""
def decorate(fn):
listen(target, identifier, fn, *args, **kw)
return fn
return decorate
def remove(target, identifier, fn):
"""Remove an event listener.
The arguments here should match exactly those which were sent to
:func:`.listen`; all the event registration which proceeded as a result
of this call will be reverted by calling :func:`.remove` with the same
arguments.
e.g.::
# if a function was registered like this...
@event.listens_for(SomeMappedClass, "before_insert", propagate=True)
def my_listener_function(*arg):
pass
# ... it's removed like this
event.remove(SomeMappedClass, "before_insert", my_listener_function)
Above, the listener function associated with ``SomeMappedClass`` was also
propagated to subclasses of ``SomeMappedClass``; the :func:`.remove`
function will revert all of these operations.
.. note::
The :func:`.remove` function cannot be called at the same time
that the target event is being run. This has implications
for thread safety, and also means an event cannot be removed
from inside the listener function for itself. The list of
events to be run are present inside of a mutable collection
that can't be changed during iteration.
Event registration and removal is not intended to be a "high
velocity" operation; it is a configurational operation. For
systems that need to quickly associate and deassociate with
events at high scale, use a mutable structure that is handled
from inside of a single listener.
.. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now
used as the container for the list of events, which explicitly
disallows collection mutation while the collection is being
iterated.
.. seealso::
:func:`.listen`
"""
_event_key(target, identifier, fn).remove()
def contains(target, identifier, fn):
"""Return True if the given target/ident/fn is set up to listen."""
return _event_key(target, identifier, fn).contains()
| mit |
wolfgangmauerer/prosoda | prosoda/interactive.py | 1 | 1232 | # Commands that are useful after adist.yp has been
# run in ipython
# This file is part of prosoda. prosoda is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Copyright 2010, 2011, 2012 by Wolfgang Mauerer <[email protected]>
# All Rights Reserved.
initialiseR()
git = shelve.open("/home/wolfgang/linux-14-33")["git"]
res = createSeries(git, "__main__", ["v2.6.24", "v2.6.25"])
writeToFile(res, "/home/wolfgang/raw.dat")
runR('raw = as.xts(read.zoo(file="/home/wolfgang/raw.dat", FUN=tstamp_to_date))')
runR('reg = to.regts(raw[,1], 250)')
reg = RtoPython(runR('reg'))
raw = RtoPython(runR('raw'))
# ... and then commence with the analysis as desired
| gpl-2.0 |
c-o-m-m-a-n-d-e-r/CouchPotatoServer | libs/pyasn1/type/constraint.py | 382 | 7279 | #
# ASN.1 subtype constraints classes.
#
# Constraints are relatively rare, but every ASN1 object
# is doing checks all the time for whether they have any
# constraints and whether they are applicable to the object.
#
# What we're going to do is define objects/functions that
# can be called unconditionally if they are present, and that
# are simply not present if there are no constraints.
#
# Original concept and code by Mike C. Fletcher.
#
import sys
from pyasn1.type import error
class AbstractConstraint:
"""Abstract base-class for constraint objects
Constraints should be stored in a simple sequence in the
namespace of their client Asn1Item sub-classes.
"""
def __init__(self, *values):
self._valueMap = {}
self._setValues(values)
self.__hashedValues = None
def __call__(self, value, idx=None):
try:
self._testValue(value, idx)
except error.ValueConstraintError:
raise error.ValueConstraintError(
'%s failed at: \"%s\"' % (self, sys.exc_info()[1])
)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join([repr(x) for x in self._values])
)
def __eq__(self, other):
return self is other and True or self._values == other
def __ne__(self, other): return self._values != other
def __lt__(self, other): return self._values < other
def __le__(self, other): return self._values <= other
def __gt__(self, other): return self._values > other
def __ge__(self, other): return self._values >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._values)
else:
def __bool__(self): return bool(self._values)
def __hash__(self):
if self.__hashedValues is None:
self.__hashedValues = hash((self.__class__.__name__, self._values))
return self.__hashedValues
def _setValues(self, values): self._values = values
def _testValue(self, value, idx):
raise error.ValueConstraintError(value)
# Constraints derivation logic
def getValueMap(self): return self._valueMap
def isSuperTypeOf(self, otherConstraint):
return self in otherConstraint.getValueMap() or \
otherConstraint is self or otherConstraint == self
def isSubTypeOf(self, otherConstraint):
return otherConstraint in self._valueMap or \
otherConstraint is self or otherConstraint == self
class SingleValueConstraint(AbstractConstraint):
"""Value must be part of defined values constraint"""
def _testValue(self, value, idx):
# XXX index vals for performance?
if value not in self._values:
raise error.ValueConstraintError(value)
class ContainedSubtypeConstraint(AbstractConstraint):
"""Value must satisfy all of defined set of constraints"""
def _testValue(self, value, idx):
for c in self._values:
c(value, idx)
class ValueRangeConstraint(AbstractConstraint):
"""Value must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
if value < self.start or value > self.stop:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 2:
raise error.PyAsn1Error(
'%s: bad constraint values' % (self.__class__.__name__,)
)
self.start, self.stop = values
if self.start > self.stop:
raise error.PyAsn1Error(
'%s: screwed constraint values (start > stop): %s > %s' % (
self.__class__.__name__,
self.start, self.stop
)
)
AbstractConstraint._setValues(self, values)
class ValueSizeConstraint(ValueRangeConstraint):
"""len(value) must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
l = len(value)
if l < self.start or l > self.stop:
raise error.ValueConstraintError(value)
class PermittedAlphabetConstraint(SingleValueConstraint):
def _setValues(self, values):
self._values = ()
for v in values:
self._values = self._values + tuple(v)
def _testValue(self, value, idx):
for v in value:
if v not in self._values:
raise error.ValueConstraintError(value)
# This is a bit kludgy, meaning two op modes within a single constraing
class InnerTypeConstraint(AbstractConstraint):
"""Value must satisfy type and presense constraints"""
def _testValue(self, value, idx):
if self.__singleTypeConstraint:
self.__singleTypeConstraint(value)
elif self.__multipleTypeConstraint:
if idx not in self.__multipleTypeConstraint:
raise error.ValueConstraintError(value)
constraint, status = self.__multipleTypeConstraint[idx]
if status == 'ABSENT': # XXX presense is not checked!
raise error.ValueConstraintError(value)
constraint(value)
def _setValues(self, values):
self.__multipleTypeConstraint = {}
self.__singleTypeConstraint = None
for v in values:
if isinstance(v, tuple):
self.__multipleTypeConstraint[v[0]] = v[1], v[2]
else:
self.__singleTypeConstraint = v
AbstractConstraint._setValues(self, values)
# Boolean ops on constraints
class ConstraintsExclusion(AbstractConstraint):
"""Value must not fit the single constraint"""
def _testValue(self, value, idx):
try:
self._values[0](value, idx)
except error.ValueConstraintError:
return
else:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 1:
raise error.PyAsn1Error('Single constraint expected')
AbstractConstraint._setValues(self, values)
class AbstractConstraintSet(AbstractConstraint):
"""Value must not satisfy the single constraint"""
def __getitem__(self, idx): return self._values[idx]
def __add__(self, value): return self.__class__(self, value)
def __radd__(self, value): return self.__class__(self, value)
def __len__(self): return len(self._values)
# Constraints inclusion in sets
def _setValues(self, values):
self._values = values
for v in values:
self._valueMap[v] = 1
self._valueMap.update(v.getValueMap())
class ConstraintsIntersection(AbstractConstraintSet):
"""Value must satisfy all constraints"""
def _testValue(self, value, idx):
for v in self._values:
v(value, idx)
class ConstraintsUnion(AbstractConstraintSet):
"""Value must satisfy at least one constraint"""
def _testValue(self, value, idx):
for v in self._values:
try:
v(value, idx)
except error.ValueConstraintError:
pass
else:
return
raise error.ValueConstraintError(
'all of %s failed for \"%s\"' % (self._values, value)
)
# XXX
# add tests for type check
| gpl-3.0 |
kdebrab/pandas | pandas/core/indexes/category.py | 1 | 30548 | import operator
import numpy as np
from pandas._libs import index as libindex
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.generic import ABCCategorical, ABCSeries
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
is_categorical_dtype,
ensure_platform_int,
is_list_like,
is_interval_dtype,
is_scalar)
from pandas.core.dtypes.missing import array_equivalent, isna
from pandas.core.algorithms import take_1d
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.config import get_option
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core import accessor
import pandas.core.common as com
import pandas.core.missing as missing
import pandas.core.indexes.base as ibase
from pandas.core.arrays.categorical import Categorical, contains
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass='CategoricalIndex'))
class CategoricalIndex(Index, accessor.PandasDelegate):
"""
Immutable Index implementing an ordered, sliceable set. CategoricalIndex
represents a sparsely populated Index with an underlying Categorical.
Parameters
----------
data : array-like or Categorical, (1-dimensional)
categories : optional, array-like
categories for the CategoricalIndex
ordered : boolean,
designating if the categories are ordered
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Attributes
----------
codes
categories
ordered
Methods
-------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
as_ordered
as_unordered
map
See Also
--------
Categorical, Index
"""
_typ = 'categoricalindex'
_engine_type = libindex.Int64Engine
_attributes = ['name']
def __new__(cls, data=None, categories=None, ordered=None, dtype=None,
copy=False, name=None, fastpath=False):
if fastpath:
return cls._simple_new(data, name=name, dtype=dtype)
if name is None and hasattr(data, 'name'):
name = data.name
if isinstance(data, ABCCategorical):
data = cls._create_categorical(data, categories, ordered,
dtype)
elif isinstance(data, CategoricalIndex):
data = data._data
data = cls._create_categorical(data, categories, ordered,
dtype)
else:
# don't allow scalars
# if data is None, then categories must be provided
if is_scalar(data):
if data is not None or categories is None:
cls._scalar_data_error(data)
data = []
data = cls._create_categorical(data, categories, ordered,
dtype)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
def _create_from_codes(self, codes, categories=None, ordered=None,
name=None):
"""
*this is an internal non-public method*
create the correct categorical from codes
Parameters
----------
codes : new codes
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
name : optional name attribute, defaults to existing
Returns
-------
CategoricalIndex
"""
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
if name is None:
name = self.name
cat = Categorical.from_codes(codes, categories=categories,
ordered=self.ordered)
return CategoricalIndex(cat, name=name)
@classmethod
def _create_categorical(cls, data, categories=None, ordered=None,
dtype=None):
"""
*this is an internal non-public method*
create the correct categorical from data and the properties
Parameters
----------
data : data for new Categorical
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
dtype : CategoricalDtype, defaults to existing
Returns
-------
Categorical
"""
if (isinstance(data, (cls, ABCSeries)) and
is_categorical_dtype(data)):
data = data.values
if not isinstance(data, ABCCategorical):
if ordered is None and dtype is None:
ordered = False
data = Categorical(data, categories=categories, ordered=ordered,
dtype=dtype)
else:
if categories is not None:
data = data.set_categories(categories, ordered=ordered)
elif ordered is not None and ordered != data.ordered:
data = data.set_ordered(ordered)
if isinstance(dtype, CategoricalDtype) and dtype != data.dtype:
# we want to silently ignore dtype='category'
data = data._set_dtype(dtype)
return data
@classmethod
def _simple_new(cls, values, name=None, categories=None, ordered=None,
dtype=None, **kwargs):
result = object.__new__(cls)
values = cls._create_categorical(values, categories, ordered,
dtype=dtype)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, categories=None, ordered=None,
dtype=None, **kwargs):
# categories and ordered can't be part of attributes,
# as these are properties
# we want to reuse self.dtype if possible, i.e. neither are
# overridden.
if dtype is not None and (categories is not None or
ordered is not None):
raise TypeError("Cannot specify both `dtype` and `categories` "
"or `ordered`")
if categories is None and ordered is None:
dtype = self.dtype if dtype is None else dtype
return super(CategoricalIndex, self)._shallow_copy(
values=values, dtype=dtype, **kwargs)
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
return super(CategoricalIndex, self)._shallow_copy(
values=values, categories=categories,
ordered=ordered, **kwargs)
def _is_dtype_compat(self, other):
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Raises
------
TypeError if the dtypes are not compatible
"""
if is_categorical_dtype(other):
if isinstance(other, CategoricalIndex):
other = other._values
if not other.is_dtype_equal(self):
raise TypeError("categories must match existing categories "
"when appending")
else:
values = other
if not is_list_like(values):
values = [values]
other = CategoricalIndex(self._create_categorical(
other, dtype=self.dtype))
if not other.isin(values).all():
raise TypeError("cannot append a non-category item to a "
"CategoricalIndex")
return other
def equals(self, other):
"""
Determines if two CategorialIndex objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
try:
other = self._is_dtype_compat(other)
return array_equivalent(self._data, other)
except (TypeError, ValueError):
pass
return False
@property
def _formatter_func(self):
return self.categories._formatter_func
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
attrs = [
('categories',
ibase.default_pprint(self.categories,
max_seq_items=max_categories)),
('ordered', self.ordered)]
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
attrs.append(('dtype', "'%s'" % self.dtype.name))
max_seq_items = get_option('display.max_seq_items') or len(self)
if len(self) > max_seq_items:
attrs.append(('length', len(self)))
return attrs
@property
def inferred_type(self):
return 'categorical'
@property
def values(self):
""" return the underlying data, which is a Categorical """
return self._data
@property
def itemsize(self):
# Size of the items in categories, not codes.
return self.values.itemsize
def get_values(self):
""" return the underlying data as an ndarray """
return self._data.get_values()
def tolist(self):
return self._data.tolist()
@property
def codes(self):
return self._data.codes
@property
def categories(self):
return self._data.categories
@property
def ordered(self):
return self._data.ordered
def _reverse_indexer(self):
return self._data._reverse_indexer()
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.hasnans
return contains(self, key, container=self._engine)
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
return key in self
def __array__(self, dtype=None):
""" the array interface, return my values """
return np.array(self._data, dtype=dtype)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_interval_dtype(dtype):
from pandas import IntervalIndex
return IntervalIndex(np.array(self))
elif is_categorical_dtype(dtype):
# GH 18630
dtype = self.dtype.update_dtype(dtype)
if dtype == self.dtype:
return self.copy() if copy else self
return super(CategoricalIndex, self).astype(dtype=dtype, copy=copy)
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
return self._data.codes == -1
@Appender(ibase._index_shared_docs['fillna'])
def fillna(self, value, downcast=None):
self._assert_can_do_op(value)
return CategoricalIndex(self._data.fillna(value), name=self.name)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@cache_readonly
def _engine(self):
# we are going to look things up with the codes themselves
return self._engine_type(lambda: self.codes.astype('i8'), len(self))
# introspection
@cache_readonly
def is_unique(self):
return self._engine.is_unique
@property
def is_monotonic_increasing(self):
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
return self._engine.is_monotonic_decreasing
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = self.values.unique()
# CategoricalIndex._shallow_copy keeps original categories
# and ordered if not otherwise specified
return self._shallow_copy(result, categories=result.categories,
ordered=result.ordered)
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep='first'):
from pandas._libs.hashtable import duplicated_int64
codes = self.codes.astype('i8')
return duplicated_int64(codes, keep)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.astype('object')
def get_loc(self, key, method=None):
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}
* default: exact matches only.
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> unique_index = pd.CategoricalIndex(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.CategoricalIndex(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = pd.CategoricalIndex(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True], dtype=bool)
"""
codes = self.categories.get_loc(key)
if (codes == -1):
raise KeyError(key)
return self._engine.get_loc(codes)
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
try:
k = com._values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
indexer = self.get_loc(k)
return series.iloc[indexer]
except (KeyError, TypeError):
pass
# we might be a positional inexer
return super(CategoricalIndex, self).get_value(series, key)
def _can_reindex(self, indexer):
""" always allow reindexing """
pass
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
cat = Categorical(values,
categories=self.categories,
ordered=self.ordered)
return self._shallow_copy(cat, **self._get_attributes_dict())
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError("argument method is not implemented for "
"CategoricalIndex.reindex")
if level is not None:
raise NotImplementedError("argument level is not implemented for "
"CategoricalIndex.reindex")
if limit is not None:
raise NotImplementedError("argument limit is not implemented for "
"CategoricalIndex.reindex")
target = ibase.ensure_index(target)
if not is_categorical_dtype(target) and not target.is_unique:
raise ValueError("cannot reindex with a non-unique indexer")
indexer, missing = self.get_indexer_non_unique(np.array(target))
if len(self.codes):
new_target = self.take(indexer)
else:
new_target = target
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
new_target, indexer, _ = result._reindex_non_unique(
np.array(target))
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
new_target = self._create_from_codes(codes)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an initial Categorical to begin with
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
new_target = target._shallow_copy(new_target, name=self.name)
else:
new_target = Index(new_target, name=self.name)
return new_target, indexer
def _reindex_non_unique(self, target):
""" reindex from a non-unique; which CategoricalIndex's are almost
always
"""
new_target, indexer = self.reindex(target)
new_indexer = None
check = indexer == -1
if check.any():
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[check] = -1
cats = self.categories.get_indexer(target)
if not (cats == -1).any():
# .reindex returns normal Index. Revert to CategoricalIndex if
# all targets are included in my categories
new_target = self._shallow_copy(new_target)
return new_target, indexer, new_indexer
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
from pandas.core.arrays.categorical import _recode_for_categories
method = missing.clean_reindex_fill_method(method)
target = ibase.ensure_index(target)
if self.is_unique and self.equals(target):
return np.arange(len(self), dtype='intp')
if method == 'pad' or method == 'backfill':
raise NotImplementedError("method='pad' and method='backfill' not "
"implemented yet for CategoricalIndex")
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for CategoricalIndex')
if (isinstance(target, CategoricalIndex) and
self.values.is_dtype_equal(target)):
if self.values.equals(target.values):
# we have the same codes
codes = target.codes
else:
codes = _recode_for_categories(target.codes,
target.categories,
self.values.categories)
else:
if isinstance(target, CategoricalIndex):
code_indexer = self.categories.get_indexer(target.categories)
codes = take_1d(code_indexer, target.codes, fill_value=-1)
else:
codes = self.categories.get_indexer(target)
indexer, _ = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ibase.ensure_index(target)
if isinstance(target, CategoricalIndex):
# Indexing on codes is more efficient if categories are the same:
if target.categories is self.categories:
target = target.codes
indexer, missing = self._engine.get_indexer_non_unique(target)
return ensure_platform_int(indexer), missing
target = target.values
codes = self.categories.get_indexer(target)
indexer, missing = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer), missing
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if self.categories._defer_to_indexing:
return self.categories._convert_scalar_indexer(key, kind=kind)
return super(CategoricalIndex, self)._convert_scalar_indexer(
key, kind=kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
# Return our indexer or raise if all of the values are not included in
# the categories
if self.categories._defer_to_indexing:
indexer = self.categories._convert_list_indexer(keyarr, kind=kind)
return Index(self.codes).get_indexer_for(indexer)
indexer = self.categories.get_indexer(np.asarray(keyarr))
if (indexer == -1).any():
raise KeyError(
"a list-indexer must only "
"include values that are "
"in the categories")
return self.get_indexer(keyarr)
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = com._asarray_tuplesafe(keyarr)
if self.categories._defer_to_indexing:
return keyarr
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_platform_int(indices)
taken = self._assert_take_fillable(self.codes, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1)
return self._create_from_codes(taken)
def is_dtype_equal(self, other):
return self._data.is_dtype_equal(other)
take_nd = take
def map(self, mapper):
"""
Map values using input correspondence (a dict, Series, or function).
Maps the values (their categories, not the codes) of the index to new
categories. If the mapping correspondence is one-to-one the result is a
:class:`~pandas.CategoricalIndex` which has the same order property as
the original, otherwise an :class:`~pandas.Index` is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.CategoricalIndex or pandas.Index
Mapped index.
See Also
--------
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=False, dtype='category')
>>> idx.map(lambda x: x.upper())
CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
ordered=False, dtype='category')
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})
CategoricalIndex(['first', 'second', 'third'], categories=['first',
'second', 'third'], ordered=False, dtype='category')
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=True, dtype='category')
>>> idx.map({'a': 3, 'b': 2, 'c': 1})
CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,
dtype='category')
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> idx.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
return self._shallow_copy_with_infer(self.values.map(mapper))
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._create_from_codes(np.delete(self.codes, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
Raises
------
ValueError if the item is not in the categories
"""
code = self.categories.get_indexer([item])
if (code == -1) and not (is_scalar(item) and isna(item)):
raise TypeError("cannot insert an item into a CategoricalIndex "
"that is not already an existing category")
codes = self.codes
codes = np.concatenate((codes[:loc], code, codes[loc:]))
return self._create_from_codes(codes)
def _concat(self, to_concat, name):
# if calling index is category, don't check dtype of others
return CategoricalIndex._concat_same_dtype(self, to_concat, name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
ValueError if other is not in the categories
"""
to_concat = [self._is_dtype_compat(c) for c in to_concat]
codes = np.concatenate([c.codes for c in to_concat])
result = self._create_from_codes(codes, name=name)
# if name is None, _create_from_codes sets self.name
result.name = name
return result
def _codes_for_groupby(self, sort, observed):
""" Return a Categorical adjusted for groupby """
return self.values._codes_for_groupby(sort, observed)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
def _make_compare(op):
opname = '__{op}__'.format(op=op.__name__)
def _evaluate_compare(self, other):
# if we have a Categorical type, then must have the same
# categories
if isinstance(other, CategoricalIndex):
other = other._values
elif isinstance(other, Index):
other = self._create_categorical(
other._values, dtype=self.dtype)
if isinstance(other, (ABCCategorical, np.ndarray,
ABCSeries)):
if len(self.values) != len(other):
raise ValueError("Lengths must match to compare")
if isinstance(other, ABCCategorical):
if not self.values.is_dtype_equal(other):
raise TypeError("categorical index comparisons must "
"have the same categories and ordered "
"attributes")
result = op(self.values, other)
if isinstance(result, ABCSeries):
# Dispatch to pd.Categorical returned NotImplemented
# and we got a Series back; down-cast to ndarray
result = result.values
return result
return compat.set_function_name(_evaluate_compare, opname, cls)
cls.__eq__ = _make_compare(operator.eq)
cls.__ne__ = _make_compare(operator.ne)
cls.__lt__ = _make_compare(operator.lt)
cls.__gt__ = _make_compare(operator.gt)
cls.__le__ = _make_compare(operator.le)
cls.__ge__ = _make_compare(operator.ge)
def _delegate_method(self, name, *args, **kwargs):
""" method delegation to the ._values """
method = getattr(self._values, name)
if 'inplace' in kwargs:
raise ValueError("cannot use inplace with CategoricalIndex")
res = method(*args, **kwargs)
if is_scalar(res):
return res
return CategoricalIndex(res, name=self.name)
@classmethod
def _add_accessors(cls):
""" add in Categorical accessor methods """
CategoricalIndex._add_delegate_accessors(
delegate=Categorical, accessors=["rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered", "as_unordered",
"min", "max"],
typ='method', overwrite=True)
CategoricalIndex._add_numeric_methods_add_sub_disabled()
CategoricalIndex._add_numeric_methods_disabled()
CategoricalIndex._add_logical_methods_disabled()
CategoricalIndex._add_comparison_methods()
CategoricalIndex._add_accessors()
| bsd-3-clause |
floresconlimon/qutebrowser | qutebrowser/mainwindow/statusbar/prompter.py | 5 | 14437 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Manager for questions to be shown in the statusbar."""
import sip
import collections
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QTimer, QObject
from PyQt5.QtWidgets import QLineEdit
from qutebrowser.keyinput import modeman
from qutebrowser.commands import cmdutils
from qutebrowser.utils import usertypes, log, qtutils, objreg, utils
PromptContext = collections.namedtuple('PromptContext',
['question', 'text', 'input_text',
'echo_mode', 'input_visible'])
AuthTuple = collections.namedtuple('AuthTuple', ['user', 'password'])
class Prompter(QObject):
"""Manager for questions to be shown in the statusbar.
The way in which multiple questions are handled deserves some explanation.
If a question is blocking, we *need* to ask it immediately, and can't wait
for previous questions to finish. We could theoretically ask a blocking
question inside of another blocking one, so in ask_question we simply save
the current prompt state on the stack, let the user answer the *most
recent* question, and then restore the previous state.
With a non-blocking question, things are a bit easier. We simply add it to
self._queue if we're still busy handling another question, since it can be
answered at any time.
In either case, as soon as we finished handling a question, we call
_pop_later() which schedules a _pop to ask the next question in _queue. We
schedule it rather than doing it immediately because then the order of how
things happen is clear, e.g. on_mode_left can't happen after we already set
up the *new* question.
Class Attributes:
KEY_MODES: A mapping of PromptModes to KeyModes.
Attributes:
_shutting_down: Whether we're currently shutting down the prompter and
should ignore future questions to avoid segfaults.
_question: A Question object with the question to be asked to the user.
_loops: A list of local EventLoops to spin in when blocking.
_queue: A deque of waiting questions.
_busy: If we're currently busy with asking a question.
_win_id: The window ID this object is associated with.
Signals:
show_prompt: Emitted when the prompt widget should be shown.
hide_prompt: Emitted when the prompt widget should be hidden.
"""
KEY_MODES = {
usertypes.PromptMode.yesno: usertypes.KeyMode.yesno,
usertypes.PromptMode.text: usertypes.KeyMode.prompt,
usertypes.PromptMode.user_pwd: usertypes.KeyMode.prompt,
usertypes.PromptMode.alert: usertypes.KeyMode.prompt,
}
show_prompt = pyqtSignal()
hide_prompt = pyqtSignal()
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._shutting_down = False
self._question = None
self._loops = []
self._queue = collections.deque()
self._busy = False
self._win_id = win_id
def __repr__(self):
return utils.get_repr(self, loops=len(self._loops),
question=self._question, queue=len(self._queue),
busy=self._busy)
def _pop_later(self):
"""Helper to call self._pop as soon as everything else is done."""
QTimer.singleShot(0, self._pop)
def _pop(self):
"""Pop a question from the queue and ask it, if there are any."""
log.statusbar.debug("Popping from queue {}".format(self._queue))
if self._queue:
question = self._queue.popleft()
if not sip.isdeleted(question):
# the question could already be deleted, e.g. by a cancelled
# download. See
# https://github.com/The-Compiler/qutebrowser/issues/415
self.ask_question(question, blocking=False)
def _get_ctx(self):
"""Get a PromptContext based on the current state."""
if not self._busy:
return None
prompt = objreg.get('prompt', scope='window', window=self._win_id)
ctx = PromptContext(question=self._question,
text=prompt.txt.text(),
input_text=prompt.lineedit.text(),
echo_mode=prompt.lineedit.echoMode(),
input_visible=prompt.lineedit.isVisible())
return ctx
def _restore_ctx(self, ctx):
"""Restore state from a PromptContext.
Args:
ctx: A PromptContext previously saved by _get_ctx, or None.
Return: True if a context was restored, False otherwise.
"""
log.statusbar.debug("Restoring context {}".format(ctx))
if ctx is None:
self.hide_prompt.emit()
self._busy = False
return False
self._question = ctx.question
prompt = objreg.get('prompt', scope='window', window=self._win_id)
prompt.txt.setText(ctx.text)
prompt.lineedit.setText(ctx.input_text)
prompt.lineedit.setEchoMode(ctx.echo_mode)
prompt.lineedit.setVisible(ctx.input_visible)
self.show_prompt.emit()
mode = self.KEY_MODES[ctx.question.mode]
ctx.question.aborted.connect(
lambda: modeman.maybe_leave(self._win_id, mode, 'aborted'))
modeman.enter(self._win_id, mode, 'question asked')
return True
def _display_question(self):
"""Display the question saved in self._question."""
prompt = objreg.get('prompt', scope='window', window=self._win_id)
if self._question.mode == usertypes.PromptMode.yesno:
if self._question.default is None:
suffix = ""
elif self._question.default:
suffix = " (yes)"
else:
suffix = " (no)"
prompt.txt.setText(self._question.text + suffix)
prompt.lineedit.hide()
elif self._question.mode == usertypes.PromptMode.text:
prompt.txt.setText(self._question.text)
if self._question.default:
prompt.lineedit.setText(self._question.default)
prompt.lineedit.show()
elif self._question.mode == usertypes.PromptMode.user_pwd:
prompt.txt.setText(self._question.text)
if self._question.default:
prompt.lineedit.setText(self._question.default)
prompt.lineedit.show()
elif self._question.mode == usertypes.PromptMode.alert:
prompt.txt.setText(self._question.text + ' (ok)')
prompt.lineedit.hide()
else:
raise ValueError("Invalid prompt mode!")
log.modes.debug("Question asked, focusing {!r}".format(
prompt.lineedit))
prompt.lineedit.setFocus()
self.show_prompt.emit()
self._busy = True
def shutdown(self):
"""Cancel all blocking questions.
Quits and removes all running event loops.
Return:
True if loops needed to be aborted,
False otherwise.
"""
self._shutting_down = True
if self._loops:
for loop in self._loops:
loop.quit()
loop.deleteLater()
return True
else:
return False
@pyqtSlot(usertypes.KeyMode)
def on_mode_left(self, mode):
"""Clear and reset input when the mode was left."""
prompt = objreg.get('prompt', scope='window', window=self._win_id)
if mode in (usertypes.KeyMode.prompt, usertypes.KeyMode.yesno):
prompt.txt.setText('')
prompt.lineedit.clear()
prompt.lineedit.setEchoMode(QLineEdit.Normal)
self.hide_prompt.emit()
self._busy = False
if self._question.answer is None and not self._question.is_aborted:
self._question.cancel()
@cmdutils.register(instance='prompter', hide=True, scope='window',
modes=[usertypes.KeyMode.prompt,
usertypes.KeyMode.yesno])
def prompt_accept(self):
"""Accept the current prompt.
//
This executes the next action depending on the question mode, e.g. asks
for the password or leaves the mode.
"""
prompt = objreg.get('prompt', scope='window', window=self._win_id)
if (self._question.mode == usertypes.PromptMode.user_pwd and
self._question.user is None):
# User just entered a username
self._question.user = prompt.lineedit.text()
prompt.txt.setText("Password:")
prompt.lineedit.clear()
prompt.lineedit.setEchoMode(QLineEdit.Password)
elif self._question.mode == usertypes.PromptMode.user_pwd:
# User just entered a password
password = prompt.lineedit.text()
self._question.answer = AuthTuple(self._question.user, password)
modeman.maybe_leave(self._win_id, usertypes.KeyMode.prompt,
'prompt accept')
self._question.done()
elif self._question.mode == usertypes.PromptMode.text:
# User just entered text.
self._question.answer = prompt.lineedit.text()
modeman.maybe_leave(self._win_id, usertypes.KeyMode.prompt,
'prompt accept')
self._question.done()
elif self._question.mode == usertypes.PromptMode.yesno:
# User wants to accept the default of a yes/no question.
self._question.answer = self._question.default
modeman.maybe_leave(self._win_id, usertypes.KeyMode.yesno,
'yesno accept')
self._question.done()
elif self._question.mode == usertypes.PromptMode.alert:
# User acknowledged an alert
self._question.answer = None
modeman.maybe_leave(self._win_id, usertypes.KeyMode.prompt,
'alert accept')
self._question.done()
else:
raise ValueError("Invalid question mode!")
@cmdutils.register(instance='prompter', hide=True, scope='window',
modes=[usertypes.KeyMode.yesno])
def prompt_yes(self):
"""Answer yes to a yes/no prompt."""
if self._question.mode != usertypes.PromptMode.yesno:
# We just ignore this if we don't have a yes/no question.
return
self._question.answer = True
modeman.maybe_leave(self._win_id, usertypes.KeyMode.yesno,
'yesno accept')
self._question.done()
@cmdutils.register(instance='prompter', hide=True, scope='window',
modes=[usertypes.KeyMode.yesno])
def prompt_no(self):
"""Answer no to a yes/no prompt."""
if self._question.mode != usertypes.PromptMode.yesno:
# We just ignore this if we don't have a yes/no question.
return
self._question.answer = False
modeman.maybe_leave(self._win_id, usertypes.KeyMode.yesno,
'prompt accept')
self._question.done()
@pyqtSlot(usertypes.Question, bool)
def ask_question(self, question, blocking):
"""Dispkay a question in the statusbar.
Args:
question: The Question object to ask.
blocking: If True, this function blocks and returns the result.
Return:
The answer of the user when blocking=True.
None if blocking=False.
"""
log.statusbar.debug("Asking question {}, blocking {}, loops {}, queue "
"{}".format(question, blocking, self._loops,
self._queue))
if self._shutting_down:
# If we're currently shutting down we have to ignore this question
# to avoid segfaults - see
# https://github.com/The-Compiler/qutebrowser/issues/95
log.statusbar.debug("Ignoring question because we're shutting "
"down.")
question.abort()
return None
if self._busy and not blocking:
# We got an async question, but we're already busy with one, so we
# just queue it up for later.
log.statusbar.debug("Adding {} to queue.".format(question))
self._queue.append(question)
return
if blocking:
# If we're blocking we save the old state on the stack, so we can
# restore it after exec, if exec gets called multiple times.
context = self._get_ctx()
self._question = question
self._display_question()
mode = self.KEY_MODES[self._question.mode]
question.aborted.connect(
lambda: modeman.maybe_leave(self._win_id, mode, 'aborted'))
modeman.enter(self._win_id, mode, 'question asked')
if blocking:
loop = qtutils.EventLoop()
self._loops.append(loop)
loop.destroyed.connect(lambda: self._loops.remove(loop))
question.completed.connect(loop.quit)
question.completed.connect(loop.deleteLater)
loop.exec_()
if not self._restore_ctx(context):
# Nothing left to restore, so we can go back to popping async
# questions.
if self._queue:
self._pop_later()
return self._question.answer
else:
question.completed.connect(self._pop_later)
| gpl-3.0 |
csgrad/ns-3-9-ngwmn | bindings/python/apidefs/gcc-ILP32/ns3_module_udp_echo.py | 10 | 7949 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
def register_types(module):
root_module = module.get_root()
## udp-echo-client.h: ns3::UdpEchoClient [class]
module.add_class('UdpEchoClient', parent=root_module['ns3::Application'])
## udp-echo-server.h: ns3::UdpEchoServer [class]
module.add_class('UdpEchoServer', parent=root_module['ns3::Application'])
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace addressUtils
nested_module = module.add_cpp_namespace('addressUtils')
register_types_ns3_addressUtils(nested_module)
## Register a nested module for the namespace aodv
nested_module = module.add_cpp_namespace('aodv')
register_types_ns3_aodv(nested_module)
## Register a nested module for the namespace dot11s
nested_module = module.add_cpp_namespace('dot11s')
register_types_ns3_dot11s(nested_module)
## Register a nested module for the namespace flame
nested_module = module.add_cpp_namespace('flame')
register_types_ns3_flame(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
## Register a nested module for the namespace olsr
nested_module = module.add_cpp_namespace('olsr')
register_types_ns3_olsr(nested_module)
def register_types_ns3_Config(module):
root_module = module.get_root()
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_addressUtils(module):
root_module = module.get_root()
def register_types_ns3_aodv(module):
root_module = module.get_root()
def register_types_ns3_dot11s(module):
root_module = module.get_root()
def register_types_ns3_flame(module):
root_module = module.get_root()
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_types_ns3_olsr(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3UdpEchoClient_methods(root_module, root_module['ns3::UdpEchoClient'])
register_Ns3UdpEchoServer_methods(root_module, root_module['ns3::UdpEchoServer'])
return
def register_Ns3UdpEchoClient_methods(root_module, cls):
## udp-echo-client.h: ns3::UdpEchoClient::UdpEchoClient(ns3::UdpEchoClient const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UdpEchoClient const &', 'arg0')])
## udp-echo-client.h: ns3::UdpEchoClient::UdpEchoClient() [constructor]
cls.add_constructor([])
## udp-echo-client.h: uint32_t ns3::UdpEchoClient::GetDataSize() const [member function]
cls.add_method('GetDataSize',
'uint32_t',
[],
is_const=True)
## udp-echo-client.h: static ns3::TypeId ns3::UdpEchoClient::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## udp-echo-client.h: void ns3::UdpEchoClient::SetDataSize(uint32_t dataSize) [member function]
cls.add_method('SetDataSize',
'void',
[param('uint32_t', 'dataSize')])
## udp-echo-client.h: void ns3::UdpEchoClient::SetFill(std::string fill) [member function]
cls.add_method('SetFill',
'void',
[param('std::string', 'fill')])
## udp-echo-client.h: void ns3::UdpEchoClient::SetFill(uint8_t fill, uint32_t dataSize) [member function]
cls.add_method('SetFill',
'void',
[param('uint8_t', 'fill'), param('uint32_t', 'dataSize')])
## udp-echo-client.h: void ns3::UdpEchoClient::SetFill(uint8_t * fill, uint32_t fillSize, uint32_t dataSize) [member function]
cls.add_method('SetFill',
'void',
[param('uint8_t *', 'fill'), param('uint32_t', 'fillSize'), param('uint32_t', 'dataSize')])
## udp-echo-client.h: void ns3::UdpEchoClient::SetRemote(ns3::Ipv4Address ip, uint16_t port) [member function]
cls.add_method('SetRemote',
'void',
[param('ns3::Ipv4Address', 'ip'), param('uint16_t', 'port')])
## udp-echo-client.h: void ns3::UdpEchoClient::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## udp-echo-client.h: void ns3::UdpEchoClient::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## udp-echo-client.h: void ns3::UdpEchoClient::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3UdpEchoServer_methods(root_module, cls):
## udp-echo-server.h: ns3::UdpEchoServer::UdpEchoServer(ns3::UdpEchoServer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UdpEchoServer const &', 'arg0')])
## udp-echo-server.h: ns3::UdpEchoServer::UdpEchoServer() [constructor]
cls.add_constructor([])
## udp-echo-server.h: static ns3::TypeId ns3::UdpEchoServer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## udp-echo-server.h: void ns3::UdpEchoServer::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## udp-echo-server.h: void ns3::UdpEchoServer::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## udp-echo-server.h: void ns3::UdpEchoServer::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_Config(module.get_submodule('Config'), root_module)
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_addressUtils(module.get_submodule('addressUtils'), root_module)
register_functions_ns3_aodv(module.get_submodule('aodv'), root_module)
register_functions_ns3_dot11s(module.get_submodule('dot11s'), root_module)
register_functions_ns3_flame(module.get_submodule('flame'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
register_functions_ns3_olsr(module.get_submodule('olsr'), root_module)
return
def register_functions_ns3_Config(module, root_module):
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_addressUtils(module, root_module):
return
def register_functions_ns3_aodv(module, root_module):
return
def register_functions_ns3_dot11s(module, root_module):
return
def register_functions_ns3_flame(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def register_functions_ns3_olsr(module, root_module):
return
| gpl-2.0 |
jicruz/heroku-bot | lib/youtube_dl/extractor/movingimage.py | 64 | 1774 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
unescapeHTML,
parse_duration,
)
class MovingImageIE(InfoExtractor):
_VALID_URL = r'https?://movingimage\.nls\.uk/film/(?P<id>\d+)'
_TEST = {
'url': 'http://movingimage.nls.uk/film/3561',
'md5': '4caa05c2b38453e6f862197571a7be2f',
'info_dict': {
'id': '3561',
'ext': 'mp4',
'title': 'SHETLAND WOOL',
'description': 'md5:c5afca6871ad59b4271e7704fe50ab04',
'duration': 900,
'thumbnail': r're:^https?://.*\.jpg$',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = self._extract_m3u8_formats(
self._html_search_regex(r'file\s*:\s*"([^"]+)"', webpage, 'm3u8 manifest URL'),
video_id, ext='mp4', entry_protocol='m3u8_native')
def search_field(field_name, fatal=False):
return self._search_regex(
r'<span\s+class="field_title">%s:</span>\s*<span\s+class="field_content">([^<]+)</span>' % field_name,
webpage, 'title', fatal=fatal)
title = unescapeHTML(search_field('Title', fatal=True)).strip('()[]')
description = unescapeHTML(search_field('Description'))
duration = parse_duration(search_field('Running time'))
thumbnail = self._search_regex(
r"image\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'duration': duration,
'thumbnail': thumbnail,
}
| gpl-3.0 |
joakim-hove/django | tests/fixtures/tests.py | 113 | 35712 | from __future__ import unicode_literals
import os
import sys
import tempfile
import unittest
import warnings
from django.apps import apps
from django.contrib.sites.models import Site
from django.core import management
from django.core.serializers.base import ProgressBar
from django.db import IntegrityError, connection
from django.test import (
TestCase, TransactionTestCase, ignore_warnings, skipUnlessDBFeature,
)
from django.utils import six
from django.utils.encoding import force_text
from .models import Article, Spy, Tag, Visa
class TestCaseFixtureLoadingTests(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Check that test case has installed 3 fixture objects"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
class SubclassTestCaseFixtureLoadingTests(TestCaseFixtureLoadingTests):
"""
Make sure that subclasses can remove fixtures from parent class (#21089).
"""
fixtures = []
def testClassFixtures(self):
"Check that there were no fixture objects installed"
self.assertEqual(Article.objects.count(), 0)
class DumpDataAssertMixin(object):
def _dumpdata_assert(self, args, output, format='json', filename=None,
natural_foreign_keys=False, natural_primary_keys=False,
use_base_manager=False, exclude_list=[], primary_keys=''):
new_io = six.StringIO()
if filename:
filename = os.path.join(tempfile.gettempdir(), filename)
management.call_command('dumpdata', *args, **{'format': format,
'stdout': new_io,
'stderr': new_io,
'output': filename,
'use_natural_foreign_keys': natural_foreign_keys,
'use_natural_primary_keys': natural_primary_keys,
'use_base_manager': use_base_manager,
'exclude': exclude_list,
'primary_keys': primary_keys})
if filename:
with open(filename, "r") as f:
command_output = f.read()
os.remove(filename)
else:
command_output = new_io.getvalue().strip()
if format == "json":
self.assertJSONEqual(command_output, output)
elif format == "xml":
self.assertXMLEqual(command_output, output)
else:
self.assertEqual(command_output, output)
class FixtureLoadingTests(DumpDataAssertMixin, TestCase):
def test_loading_and_dumping(self):
apps.clear_cache()
Site.objects.all().delete()
# Load fixture 1. Single JSON file, with two objects.
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Try just dumping the contents of fixtures.Category
self._dumpdata_assert(['fixtures.Category'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]')
# ...and just fixtures.Article
self._dumpdata_assert(['fixtures.Article'], '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# ...and both
self._dumpdata_assert(['fixtures.Category', 'fixtures.Article'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Specify a specific model twice
self._dumpdata_assert(['fixtures.Article', 'fixtures.Article'], '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Specify a dump that specifies Article both explicitly and implicitly
self._dumpdata_assert(['fixtures.Article', 'fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Specify a dump that specifies Article both explicitly and implicitly,
# but lists the app first (#22025).
self._dumpdata_assert(['fixtures', 'fixtures.Article'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Same again, but specify in the reverse order
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Specify one model from one application, and an entire other application.
self._dumpdata_assert(['fixtures.Category', 'sites'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]')
# Load fixture 2. JSON file imported by default. Overwrites some existing objects
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
# Load fixture 3, XML format.
management.call_command('loaddata', 'fixture3.xml', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# Load fixture 6, JSON file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture6.json', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "law">',
], ordered=False)
# Load fixture 7, XML file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture7.xml', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "legal">',
'<Tag: <Article: Django conquers world!> tagged "django">',
'<Tag: <Article: Django conquers world!> tagged "world domination">',
], ordered=False)
# Load fixture 8, JSON file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture8.json', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user>',
'<Visa: Prince >'
], ordered=False)
# Load fixture 9, XML file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture9.xml', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user, Can delete user>',
'<Visa: Artist formerly known as "Prince" Can change user>'
], ordered=False)
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# By default, you get raw keys on dumpdata
self._dumpdata_assert(['fixtures.book'], '[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [3, 1]}}]')
# But you can get natural keys if you ask for them and they are available
self._dumpdata_assert(['fixtures.book'], '[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]', natural_foreign_keys=True)
# You can also omit the primary keys for models that we can get later with natural keys.
self._dumpdata_assert(['fixtures.person'], '[{"fields": {"name": "Django Reinhardt"}, "model": "fixtures.person"}, {"fields": {"name": "Stephane Grappelli"}, "model": "fixtures.person"}, {"fields": {"name": "Artist formerly known as \\"Prince\\""}, "model": "fixtures.person"}]', natural_primary_keys=True)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker on TV is great!", "pub_date": "2006-06-16T11:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}, {"pk": 4, "model": "fixtures.article", "fields": {"headline": "Django conquers world!", "pub_date": "2006-06-16T15:00:00"}}, {"pk": 5, "model": "fixtures.article", "fields": {"headline": "XML identified as leading cause of cancer", "pub_date": "2006-06-16T16:00:00"}}, {"pk": 1, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": 3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "legal", "tagged_id": 3}}, {"pk": 3, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "django", "tagged_id": 4}}, {"pk": 4, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "world domination", "tagged_id": 4}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", "fields": {"name": "Artist formerly known as \\"Prince\\""}}, {"pk": 1, "model": "fixtures.visa", "fields": {"person": ["Django Reinhardt"], "permissions": [["add_user", "auth", "user"], ["change_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 2, "model": "fixtures.visa", "fields": {"person": ["Stephane Grappelli"], "permissions": [["add_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 3, "model": "fixtures.visa", "fields": {"person": ["Artist formerly known as \\"Prince\\""], "permissions": [["change_user", "auth", "user"]]}}, {"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]', natural_foreign_keys=True)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(['fixtures'], """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0"><object pk="1" model="fixtures.category"><field type="CharField" name="title">News Stories</field><field type="TextField" name="description">Latest news stories</field></object><object pk="2" model="fixtures.article"><field type="CharField" name="headline">Poker on TV is great!</field><field type="DateTimeField" name="pub_date">2006-06-16T11:00:00</field></object><object pk="3" model="fixtures.article"><field type="CharField" name="headline">Copyright is fine the way it is</field><field type="DateTimeField" name="pub_date">2006-06-16T14:00:00</field></object><object pk="4" model="fixtures.article"><field type="CharField" name="headline">Django conquers world!</field><field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field></object><object pk="5" model="fixtures.article"><field type="CharField" name="headline">XML identified as leading cause of cancer</field><field type="DateTimeField" name="pub_date">2006-06-16T16:00:00</field></object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">legal</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="3" model="fixtures.tag"><field type="CharField" name="name">django</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="4" model="fixtures.tag"><field type="CharField" name="name">world domination</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">Artist formerly known as "Prince"</field></object><object pk="1" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Django Reinhardt</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>change_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="2" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Stephane Grappelli</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="3" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Artist formerly known as "Prince"</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>change_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="1" model="fixtures.book"><field type="CharField" name="name">Music for all ages</field><field to="fixtures.person" name="authors" rel="ManyToManyRel"><object><natural>Artist formerly known as "Prince"</natural></object><object><natural>Django Reinhardt</natural></object></field></object></django-objects>""", format='xml', natural_foreign_keys=True)
def test_dumpdata_with_excludes(self):
# Load fixture1 which has a site, two articles, and a category
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1.json', verbosity=0)
# Excluding fixtures app should only leave sites
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]',
exclude_list=['fixtures'])
# Excluding fixtures.Article/Book should leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, {"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book'])
# Excluding fixtures and fixtures.Article/Book should be a no-op
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, {"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book'])
# Excluding sites and fixtures.Article/Book should only leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book', 'sites'])
# Excluding a bogus app should throw an error
with six.assertRaisesRegex(self, management.CommandError,
"No installed app with label 'foo_app'."):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['foo_app'])
# Excluding a bogus model should throw an error
with six.assertRaisesRegex(self, management.CommandError,
"Unknown model in excludes: fixtures.FooModel"):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['fixtures.FooModel'])
@unittest.skipIf(sys.platform.startswith('win'), "Windows doesn't support '?' in filenames.")
def test_load_fixture_with_special_characters(self):
management.call_command('loaddata', 'fixture_with[special]chars', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), ['<Article: How To Deal With Special Characters>'])
def test_dumpdata_with_filtering_manager(self):
spy1 = Spy.objects.create(name='Paul')
spy2 = Spy.objects.create(name='Alex', cover_blown=True)
self.assertQuerysetEqual(Spy.objects.all(),
['<Spy: Paul>'])
# Use the default manager
self._dumpdata_assert(['fixtures.Spy'], '[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy1.pk)
# Dump using Django's base manager. Should return all objects,
# even those normally filtered by the manager
self._dumpdata_assert(['fixtures.Spy'], '[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": true}}, {"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % (spy2.pk, spy1.pk), use_base_manager=True)
def test_dumpdata_with_pks(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}]',
primary_keys='2'
)
with six.assertRaisesRegex(self, management.CommandError,
"You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with six.assertRaisesRegex(self, management.CommandError,
"You can only use --pks option with one model"):
self._dumpdata_assert(
'',
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with six.assertRaisesRegex(self, management.CommandError,
"You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures.Article', 'fixtures.category'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
def test_dumpdata_with_file_output(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]',
filename='dumpdata.json')
def test_dumpdata_progressbar(self):
"""
Dumpdata shows a progress bar on the command line when --output is set,
stdout is a tty, and verbosity > 0.
"""
management.call_command('loaddata', 'fixture1.json', verbosity=0)
new_io = six.StringIO()
new_io.isatty = lambda: True
_, filename = tempfile.mkstemp()
options = {
'format': 'json',
'stdout': new_io,
'stderr': new_io,
'output': filename,
}
management.call_command('dumpdata', 'fixtures', **options)
self.assertTrue(new_io.getvalue().endswith('[' + '.' * ProgressBar.progress_width + ']\n'))
# Test no progress bar when verbosity = 0
options['verbosity'] = 0
new_io = six.StringIO()
new_io.isatty = lambda: True
management.call_command('dumpdata', 'fixtures', **options)
self.assertEqual(new_io.getvalue(), '')
def test_compress_format_loading(self):
# Load fixture 4 (compressed), using format specification
management.call_command('loaddata', 'fixture4.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
])
def test_compressed_specified_loading(self):
# Load fixture 5 (compressed), using format *and* compression specification
management.call_command('loaddata', 'fixture5.json.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_compressed_loading(self):
# Load fixture 5 (compressed), only compression specification
management.call_command('loaddata', 'fixture5.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_ambiguous_compressed_fixture(self):
# The name "fixture5" is ambiguous, so loading it will raise an error
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture5', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture5'", cm.exception.args[0])
def test_db_loading(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier implicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0)
management.call_command('loaddata', 'db_fixture_2', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_loaddata_error_message(self):
"""
Verifies that loading a fixture which contains an invalid object
outputs an error message which contains the pk of the object
that triggered the error.
"""
# MySQL needs a little prodding to reject invalid data.
# This won't affect other tests because the database connection
# is closed at the end of each test.
if connection.vendor == 'mysql':
connection.cursor().execute("SET sql_mode = 'TRADITIONAL'")
with self.assertRaises(IntegrityError) as cm:
management.call_command('loaddata', 'invalid.json', verbosity=0)
self.assertIn("Could not load fixtures.Article(pk=1):", cm.exception.args[0])
@ignore_warnings(category=UserWarning, message="No fixture named")
def test_loaddata_app_option(self):
"""
Verifies that the --app option works.
"""
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="someotherapp")
self.assertQuerysetEqual(Article.objects.all(), [])
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="fixtures")
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
])
def test_loaddata_verbosity_three(self):
output = six.StringIO()
management.call_command('loaddata', 'fixture1.json', verbosity=3, stdout=output, stderr=output)
command_output = force_text(output.getvalue())
self.assertIn(
"\rProcessed 1 object(s).\rProcessed 2 object(s)."
"\rProcessed 3 object(s).\rProcessed 4 object(s).\n",
command_output
)
def test_loading_using(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier explicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0, using='default')
management.call_command('loaddata', 'db_fixture_2', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
@ignore_warnings(category=UserWarning, message="No fixture named")
def test_unmatched_identifier_loading(self):
# Try to load db fixture 3. This won't load because the database identifier doesn't match
management.call_command('loaddata', 'db_fixture_3', verbosity=0)
management.call_command('loaddata', 'db_fixture_3', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [])
def test_output_formats(self):
# Load back in fixture 1, we need the articles from it
management.call_command('loaddata', 'fixture1', verbosity=0)
# Try to load fixture 6 using format discovery
management.call_command('loaddata', 'fixture6', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Time to reform copyright> tagged "copyright">',
'<Tag: <Article: Time to reform copyright> tagged "law">'
], ordered=False)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 1, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": 3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "law", "tagged_id": 3}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", "fields": {"name": "Prince"}}]', natural_foreign_keys=True)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(['fixtures'], """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0"><object pk="1" model="fixtures.category"><field type="CharField" name="title">News Stories</field><field type="TextField" name="description">Latest news stories</field></object><object pk="2" model="fixtures.article"><field type="CharField" name="headline">Poker has no place on ESPN</field><field type="DateTimeField" name="pub_date">2006-06-16T12:00:00</field></object><object pk="3" model="fixtures.article"><field type="CharField" name="headline">Time to reform copyright</field><field type="DateTimeField" name="pub_date">2006-06-16T13:00:00</field></object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">law</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">Prince</field></object></django-objects>""", format='xml', natural_foreign_keys=True)
class NonExistentFixtureTests(TestCase):
"""
Custom class to limit fixture dirs.
"""
available_apps = ['django.contrib.auth', 'django.contrib.contenttypes']
def test_loaddata_not_existent_fixture_file(self):
stdout_output = six.StringIO()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# With verbosity=2, we get both stdout output and a warning
management.call_command(
'loaddata',
'this_fixture_doesnt_exist',
verbosity=2,
stdout=stdout_output,
)
self.assertIn("No fixture 'this_fixture_doesnt_exist' in",
force_text(stdout_output.getvalue()))
self.assertEqual(len(w), 1)
self.assertEqual(force_text(w[0].message),
"No fixture named 'this_fixture_doesnt_exist' found.")
class FixtureTransactionTests(DumpDataAssertMixin, TransactionTestCase):
available_apps = [
'fixtures',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
]
@skipUnlessDBFeature('supports_forward_references')
def test_format_discovery(self):
# Load fixture 1 again, using format discovery
management.call_command('loaddata', 'fixture1', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Try to load fixture 2 using format discovery; this will fail
# because there are two fixture2's in the fixtures directory
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture2', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture2'", cm.exception.args[0])
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Load fixture 4 (compressed), using format discovery
management.call_command('loaddata', 'fixture4', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
| bsd-3-clause |
chop-dbhi/varify-data-warehouse | vdw/genes/models.py | 1 | 4984 | from django.db import models
from django.contrib.auth.models import User
from objectset.models import ObjectSet, SetObject
from vdw.literature.models import PubMed
from vdw.genome.models import Chromosome
from vdw.phenotypes.models import Phenotype, PhenotypeThrough
from .managers import GeneManager
class GeneFamily(models.Model):
"Gene family tags and descriptions."
tag = models.CharField(max_length=30, null=True)
description = models.CharField(max_length=200, null=True)
class Meta(object):
db_table = 'gene_family'
class Synonym(models.Model):
"""Model which contains known alternate gene names and symbols for
the canonical genes. This can be used as an index for search-related
queries.
"""
# Call it a label since this may be a symbol, a name or something else
label = models.CharField(max_length=255, db_index=True)
class Meta(object):
db_table = 'synonym'
class Gene(models.Model):
"""Unified gene model. This includes data from multiple sources with
the appropriate `id` defined to which references the source. If multiple
sources contain have overlap, the respective `id`s will be filled in.
The canonical source is HGNC, which approves gene names and symbols, the
`approved` flag should be set if this is the approved gene name and
symbol by HGNC.
"""
chr = models.ForeignKey(Chromosome)
symbol = models.CharField(max_length=255, db_index=True)
name = models.TextField('full name', blank=True)
hgnc_id = models.IntegerField('HGNC ID', null=True, blank=True)
# Via the HGNC documentation: "Families/groups may be either structural or
# functional, therefore a gene may belong to more than one family/group"
families = models.ManyToManyField(GeneFamily, blank=True)
# Literature
articles = models.ManyToManyField(PubMed, db_table='gene_pubmed')
# Synonyms
synonyms = models.ManyToManyField(Synonym, db_table='gene_synonym')
# Phenotypes
phenotypes = models.ManyToManyField(Phenotype, through='GenePhenotype')
objects = GeneManager()
class Meta(object):
db_table = 'gene'
def __unicode__(self):
return self.symbol
def approved(self):
return self.hgnc_id is not None
def hgnc_url(self):
if self.hgnc_id:
return 'http://www.genenames.org/data/hgnc_data.php?hgnc_id=' + \
str(self.hgnc_id)
class GenePhenotype(PhenotypeThrough):
gene = models.ForeignKey(Gene)
class Meta(object):
db_table = 'gene_phenotype'
class Exon(models.Model):
"Gene-specific exon region"
gene = models.ForeignKey(Gene)
index = models.IntegerField('exon index')
start = models.IntegerField('exon start position')
end = models.IntegerField('exon end position')
class Meta(object):
db_table = 'exon'
class Transcript(models.Model):
"Gene transcripts"
refseq_id = models.CharField(max_length=100, unique=True)
strand = models.CharField(max_length=1, null=True, blank=True,
help_text='+ or - for strand')
start = models.IntegerField('transcript start position', null=True,
blank=True)
end = models.IntegerField('transcript end position', null=True, blank=True)
coding_start = models.IntegerField('coding region start position',
null=True, blank=True)
coding_end = models.IntegerField('coding region end position', null=True,
blank=True)
coding_start_status = models.CharField('coding region start status',
max_length=20, null=True,
blank=True)
coding_end_status = models.CharField('coding region end status',
max_length=20, null=True, blank=True)
exon_count = models.IntegerField('number of exons', null=True, blank=True)
gene = models.ForeignKey(Gene, null=True, blank=True)
exons = models.ManyToManyField(Exon, db_table='transcript_exon')
class Meta(object):
db_table = 'transcript'
def ncbi_url(self):
return 'http://www.ncbi.nlm.nih.gov/nuccore/' + self.refseq_id
class GeneSet(ObjectSet):
user = models.ForeignKey(User, null=True, blank=True)
name = models.CharField(max_length=100, null=True, blank=True)
genes = models.ManyToManyField(Gene, through='GeneSetObject')
published = models.BooleanField(default=True)
set_object_rel = 'genes'
label_field = 'name'
def __unicode__(self):
return unicode(self.name)
class Meta(object):
db_table = 'geneset'
ordering = ('user', 'name',)
class GeneSetObject(SetObject):
object_set = models.ForeignKey(GeneSet, db_column='set_id')
set_object = models.ForeignKey(Gene, db_column='object_id')
class Meta(object):
db_table = 'geneset_setobject'
| bsd-2-clause |
mattjmorrison/ReportLab | src/reportlab/lib/colors.py | 10 | 35800 | #Copyright ReportLab Europe Ltd. 2000-2010
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/colors.py
__version__=''' $Id: colors.py 3780 2010-09-17 13:40:59Z rgbecker $ '''
__doc__='''Defines standard colour-handling classes and colour names.
We define standard classes to hold colours in two models: RGB and CMYK.
These can be constructed from several popular formats. We also include
- pre-built colour objects for the HTML standard colours
- pre-built colours used in ReportLab's branding
- various conversion and construction functions
'''
import math
from reportlab.lib.utils import fp_str
class Color:
"""This class is used to represent color. Components red, green, blue
are in the range 0 (dark) to 1 (full intensity)."""
def __init__(self, red=0, green=0, blue=0, alpha=1):
"Initialize with red, green, blue in range [0-1]."
self.red = red
self.green = green
self.blue = blue
self.alpha = alpha
def __repr__(self):
return "Color(%s)" % fp_str(*(self.red, self.green, self.blue,self.alpha)).replace(' ',',')
def __hash__(self):
return hash((self.red, self.green, self.blue, self.alpha))
def __cmp__(self,other):
'''simple comparison by component; cmyk != color ever
>>> cmp(Color(0,0,0),None)
-1
>>> cmp(Color(0,0,0),black)
0
>>> cmp(Color(0,0,0),CMYKColor(0,0,0,1)),Color(0,0,0).rgba()==CMYKColor(0,0,0,1).rgba()
(-1, True)
'''
if isinstance(other,CMYKColor) or not isinstance(other,Color): return -1
try:
return cmp((self.red, self.green, self.blue, self.alpha),
(other.red, other.green, other.blue, other.alpha))
except:
return -1
return 0
def rgb(self):
"Returns a three-tuple of components"
return (self.red, self.green, self.blue)
def rgba(self):
"Returns a four-tuple of components"
return (self.red, self.green, self.blue, self.alpha)
def bitmap_rgb(self):
return tuple(map(lambda x: int(x*255)&255, self.rgb()))
def bitmap_rgba(self):
return tuple(map(lambda x: int(x*255)&255, self.rgba()))
def hexval(self):
return '0x%02x%02x%02x' % self.bitmap_rgb()
def hexvala(self):
return '0x%02x%02x%02x%02x' % self.bitmap_rgba()
_cKwds='red green blue alpha'.split()
def cKwds(self):
for k in self._cKwds:
yield k,getattr(self,k)
cKwds=property(cKwds)
def clone(self,**kwds):
'''copy then change values in kwds'''
D = dict([kv for kv in self.cKwds])
D.update(kwds)
return self.__class__(**D)
def _lookupName(self,D={}):
if not D:
for n,v in getAllNamedColors().iteritems():
if not isinstance(v,CMYKColor):
t = v.red,v.green,v.blue
if t in D:
n = n+'/'+D[t]
D[t] = n
t = self.red,self.green,self.blue
return t in D and D[t] or None
class CMYKColor(Color):
"""This represents colors using the CMYK (cyan, magenta, yellow, black)
model commonly used in professional printing. This is implemented
as a derived class so that renderers which only know about RGB "see it"
as an RGB color through its 'red','green' and 'blue' attributes, according
to an approximate function.
The RGB approximation is worked out when the object in constructed, so
the color attributes should not be changed afterwards.
Extra attributes may be attached to the class to support specific ink models,
and renderers may look for these."""
_scale = 1.0
def __init__(self, cyan=0, magenta=0, yellow=0, black=0,
spotName=None, density=1, knockout=None, alpha=1):
"""
Initialize with four colors in range [0-1]. the optional
spotName, density & knockout may be of use to specific renderers.
spotName is intended for use as an identifier to the renderer not client programs.
density is used to modify the overall amount of ink.
knockout is a renderer dependent option that determines whether the applied colour
knocksout (removes) existing colour; None means use the global default.
"""
self.cyan = cyan
self.magenta = magenta
self.yellow = yellow
self.black = black
self.spotName = spotName
self.density = max(min(density,1),0) # force into right range
self.knockout = knockout
self.alpha = alpha
# now work out the RGB approximation. override
self.red, self.green, self.blue = cmyk2rgb( (cyan, magenta, yellow, black) )
if density<1:
#density adjustment of rgb approximants, effectively mix with white
r, g, b = self.red, self.green, self.blue
r = density*(r-1)+1
g = density*(g-1)+1
b = density*(b-1)+1
self.red, self.green, self.blue = (r,g,b)
def __repr__(self):
return "%s(%s%s%s%s%s)" % (self.__class__.__name__,
fp_str(self.cyan, self.magenta, self.yellow, self.black).replace(' ',','),
(self.spotName and (',spotName='+repr(self.spotName)) or ''),
(self.density!=1 and (',density='+fp_str(self.density)) or ''),
(self.knockout is not None and (',knockout=%d' % self.knockout) or ''),
(self.alpha is not None and (',alpha=%s' % self.alpha) or ''),
)
def fader(self, n, reverse=False):
'''return n colors based on density fade
*NB* note this dosen't reach density zero'''
scale = self._scale
dd = scale/float(n)
L = [self.clone(density=scale - i*dd) for i in xrange(n)]
if reverse: L.reverse()
return L
def __hash__(self):
return hash( (self.cyan, self.magenta, self.yellow, self.black, self.density, self.spotName, self.alpha) )
def __cmp__(self,other):
"""obvious way to compare colours
Comparing across the two color models is of limited use.
>>> cmp(CMYKColor(0,0,0,1),None)
-1
>>> cmp(CMYKColor(0,0,0,1),_CMYK_black)
0
>>> cmp(PCMYKColor(0,0,0,100),_CMYK_black)
0
>>> cmp(CMYKColor(0,0,0,1),Color(0,0,1)),Color(0,0,0).rgba()==CMYKColor(0,0,0,1).rgba()
(-1, True)
"""
if not isinstance(other, CMYKColor): return -1
try:
return cmp(
(self.cyan, self.magenta, self.yellow, self.black, self.density, self.alpha, self.spotName),
(other.cyan, other.magenta, other.yellow, other.black, other.density, other.alpha, other.spotName))
except: # or just return 'not equal' if not a color
return -1
return 0
def cmyk(self):
"Returns a tuple of four color components - syntactic sugar"
return (self.cyan, self.magenta, self.yellow, self.black)
def cmyka(self):
"Returns a tuple of five color components - syntactic sugar"
return (self.cyan, self.magenta, self.yellow, self.black, self.alpha)
def _density_str(self):
return fp_str(self.density)
_cKwds='cyan magenta yellow black density alpha spotName knockout'.split()
def _lookupName(self,D={}):
if not D:
for n,v in getAllNamedColors().iteritems():
if isinstance(v,CMYKColor):
t = v.cyan,v.magenta,v.yellow,v.black
if t in D:
n = n+'/'+D[t]
D[t] = n
t = self.cyan,self.magenta,self.yellow,self.black
return t in D and D[t] or None
class PCMYKColor(CMYKColor):
'''100 based CMYKColor with density and a spotName; just like Rimas uses'''
_scale = 100.
def __init__(self,cyan,magenta,yellow,black,density=100,spotName=None,knockout=None,alpha=100):
CMYKColor.__init__(self,cyan/100.,magenta/100.,yellow/100.,black/100.,spotName,density/100.,knockout=knockout,alpha=alpha/100.)
def __repr__(self):
return "%s(%s%s%s%s%s)" % (self.__class__.__name__,
fp_str(self.cyan*100, self.magenta*100, self.yellow*100, self.black*100).replace(' ',','),
(self.spotName and (',spotName='+repr(self.spotName)) or ''),
(self.density!=1 and (',density='+fp_str(self.density*100)) or ''),
(self.knockout is not None and (',knockout=%d' % self.knockout) or ''),
(self.alpha is not None and (',alpha=%s' % (fp_str(self.alpha*100))) or ''),
)
def cKwds(self):
K=self._cKwds
S=K[:6]
for k in self._cKwds:
v=getattr(self,k)
if k in S: v*=100
yield k,v
cKwds=property(cKwds)
class CMYKColorSep(CMYKColor):
'''special case color for making separating pdfs'''
_scale = 1.
def __init__(self, cyan=0, magenta=0, yellow=0, black=0,
spotName=None, density=1,alpha=1):
CMYKColor.__init__(self,cyan,magenta,yellow,black,spotName,density,knockout=None,alpha=alpha)
_cKwds='cyan magenta yellow black density alpha spotName'.split()
class PCMYKColorSep(PCMYKColor,CMYKColorSep):
'''special case color for making separating pdfs'''
_scale = 100.
def __init__(self, cyan=0, magenta=0, yellow=0, black=0,
spotName=None, density=100, alpha=100):
PCMYKColor.__init__(self,cyan,magenta,yellow,black,density,spotName,knockout=None,alpha=alpha)
_cKwds='cyan magenta yellow black density alpha spotName'.split()
def cmyk2rgb(cmyk,density=1):
"Convert from a CMYK color tuple to an RGB color tuple"
c,m,y,k = cmyk
# From the Adobe Postscript Ref. Manual 2nd ed.
r = 1.0 - min(1.0, c + k)
g = 1.0 - min(1.0, m + k)
b = 1.0 - min(1.0, y + k)
return (r,g,b)
def rgb2cmyk(r,g,b):
'''one way to get cmyk from rgb'''
c = 1 - r
m = 1 - g
y = 1 - b
k = min(c,m,y)
c = min(1,max(0,c-k))
m = min(1,max(0,m-k))
y = min(1,max(0,y-k))
k = min(1,max(0,k))
return (c,m,y,k)
def color2bw(colorRGB):
"Transform an RGB color to a black and white equivalent."
col = colorRGB
r, g, b, a = col.red, col.green, col.blue, col.alpha
n = (r + g + b) / 3.0
bwColorRGB = Color(n, n, n, a)
return bwColorRGB
def HexColor(val, htmlOnly=False, alpha=False):
"""This function converts a hex string, or an actual integer number,
into the corresponding color. E.g., in "#AABBCC" or 0xAABBCC,
AA is the red, BB is the green, and CC is the blue (00-FF).
An alpha value can also be given in the form #AABBCCDD or 0xAABBCCDD where
DD is the alpha value.
For completeness I assume that #aabbcc or 0xaabbcc are hex numbers
otherwise a pure integer is converted as decimal rgb. If htmlOnly is true,
only the #aabbcc form is allowed.
>>> HexColor('#ffffff')
Color(1,1,1,1)
>>> HexColor('#FFFFFF')
Color(1,1,1,1)
>>> HexColor('0xffffff')
Color(1,1,1,1)
>>> HexColor('16777215')
Color(1,1,1,1)
An '0x' or '#' prefix is required for hex (as opposed to decimal):
>>> HexColor('ffffff')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: 'ffffff'
>>> HexColor('#FFFFFF', htmlOnly=True)
Color(1,1,1,1)
>>> HexColor('0xffffff', htmlOnly=True)
Traceback (most recent call last):
ValueError: not a hex string
>>> HexColor('16777215', htmlOnly=True)
Traceback (most recent call last):
ValueError: not a hex string
""" #" for emacs
if isinstance(val,basestring):
b = 10
if val[:1] == '#':
val = val[1:]
b = 16
if len(val) == 8:
alpha = True
else:
if htmlOnly:
raise ValueError('not a hex string')
if val[:2].lower() == '0x':
b = 16
val = val[2:]
if len(val) == 8:
alpha = True
val = int(val,b)
if alpha:
return Color((val>>24)&0xFF/255.0,((val>>16)&0xFF)/255.0,((val>>8)&0xFF)/255.0,(val&0xFF)/255.0)
return Color(((val>>16)&0xFF)/255.0,((val>>8)&0xFF)/255.0,(val&0xFF)/255.0)
def linearlyInterpolatedColor(c0, c1, x0, x1, x):
"""
Linearly interpolates colors. Can handle RGB, CMYK and PCMYK
colors - give ValueError if colours aren't the same.
Doesn't currently handle 'Spot Color Interpolation'.
"""
if c0.__class__ != c1.__class__:
raise ValueError("Color classes must be the same for interpolation!\nGot %r and %r'"%(c0,c1))
if x1<x0:
x0,x1,c0,c1 = x1,x0,c1,c0 # normalized so x1>x0
if x<x0-1e-8 or x>x1+1e-8: # fudge factor for numerical problems
raise ValueError, "Can't interpolate: x=%f is not between %f and %f!" % (x,x0,x1)
if x<=x0:
return c0
elif x>=x1:
return c1
cname = c0.__class__.__name__
dx = float(x1-x0)
x = x-x0
if cname == 'Color': # RGB
r = c0.red+x*(c1.red - c0.red)/dx
g = c0.green+x*(c1.green- c0.green)/dx
b = c0.blue+x*(c1.blue - c0.blue)/dx
a = c0.alpha+x*(c1.alpha - c0.alpha)/dx
return Color(r,g,b,alpha=a)
elif cname == 'CMYKColor':
if cmykDistance(c0,c1)<1e-8:
#colors same do density and preserve spotName if any
assert c0.spotName == c1.spotName, "Identical cmyk, but different spotName"
c = c0.cyan
m = c0.magenta
y = c0.yellow
k = c0.black
d = c0.density+x*(c1.density - c0.density)/dx
a = c0.alpha+x*(c1.alpha - c0.alpha)/dx
return CMYKColor(c,m,y,k, density=d, spotName=c0.spotName, alpha=a)
elif cmykDistance(c0,_CMYK_white)<1e-8:
#special c0 is white
c = c1.cyan
m = c1.magenta
y = c1.yellow
k = c1.black
d = x*c1.density/dx
a = x*c1.alpha/dx
return CMYKColor(c,m,y,k, density=d, spotName=c1.spotName, alpha=a)
elif cmykDistance(c1,_CMYK_white)<1e-8:
#special c1 is white
c = c0.cyan
m = c0.magenta
y = c0.yellow
k = c0.black
d = x*c0.density/dx
d = c0.density*(1-x/dx)
a = c0.alpha*(1-x/dx)
return PCMYKColor(c,m,y,k, density=d, spotName=c0.spotName, alpha=a)
else:
c = c0.cyan+x*(c1.cyan - c0.cyan)/dx
m = c0.magenta+x*(c1.magenta - c0.magenta)/dx
y = c0.yellow+x*(c1.yellow - c0.yellow)/dx
k = c0.black+x*(c1.black - c0.black)/dx
d = c0.density+x*(c1.density - c0.density)/dx
a = c0.alpha+x*(c1.alpha - c0.alpha)/dx
return CMYKColor(c,m,y,k, density=d, alpha=a)
elif cname == 'PCMYKColor':
if cmykDistance(c0,c1)<1e-8:
#colors same do density and preserve spotName if any
assert c0.spotName == c1.spotName, "Identical cmyk, but different spotName"
c = c0.cyan
m = c0.magenta
y = c0.yellow
k = c0.black
d = c0.density+x*(c1.density - c0.density)/dx
a = c0.alpha+x*(c1.alpha - c0.alpha)/dx
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100,
spotName=c0.spotName, alpha=100*a)
elif cmykDistance(c0,_CMYK_white)<1e-8:
#special c0 is white
c = c1.cyan
m = c1.magenta
y = c1.yellow
k = c1.black
d = x*c1.density/dx
a = x*c1.alpha/dx
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100,
spotName=c1.spotName, alpha=a*100)
elif cmykDistance(c1,_CMYK_white)<1e-8:
#special c1 is white
c = c0.cyan
m = c0.magenta
y = c0.yellow
k = c0.black
d = x*c0.density/dx
d = c0.density*(1-x/dx)
a = c0.alpha*(1-x/dx)
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100,
spotName=c0.spotName, alpha=a*100)
else:
c = c0.cyan+x*(c1.cyan - c0.cyan)/dx
m = c0.magenta+x*(c1.magenta - c0.magenta)/dx
y = c0.yellow+x*(c1.yellow - c0.yellow)/dx
k = c0.black+x*(c1.black - c0.black)/dx
d = c0.density+x*(c1.density - c0.density)/dx
a = c0.alpha+x*(c1.alpha - c0.alpha)/dx
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100, alpha=a*100)
else:
raise ValueError, "Can't interpolate: Unknown color class %s!" % cname
def obj_R_G_B(c):
'''attempt to convert an object to (red,green,blue)'''
if isinstance(c,Color):
return c.red,c.green,c.blue
elif isinstance(c,(tuple,list)):
if len(c)==3:
return tuple(c)
elif len(c)==4:
return toColor(c).rgb()
else:
raise ValueError('obj_R_G_B(%r) bad argument' % (c))
# special case -- indicates no drawing should be done
# this is a hangover from PIDDLE - suggest we ditch it since it is not used anywhere
transparent = Color(0,0,0,alpha=0)
_CMYK_white=CMYKColor(0,0,0,0)
_PCMYK_white=PCMYKColor(0,0,0,0)
_CMYK_black=CMYKColor(0,0,0,1)
_PCMYK_black=PCMYKColor(0,0,0,100)
# Special colors
ReportLabBlueOLD = HexColor(0x4e5688)
ReportLabBlue = HexColor(0x00337f)
ReportLabBluePCMYK = PCMYKColor(100,65,0,30,spotName='Pantone 288U')
ReportLabLightBlue = HexColor(0xb7b9d3)
ReportLabFidBlue=HexColor(0x3366cc)
ReportLabFidRed=HexColor(0xcc0033)
ReportLabGreen = HexColor(0x336600)
ReportLabLightGreen = HexColor(0x339933)
# color constants -- mostly from HTML standard
aliceblue = HexColor(0xF0F8FF)
antiquewhite = HexColor(0xFAEBD7)
aqua = HexColor(0x00FFFF)
aquamarine = HexColor(0x7FFFD4)
azure = HexColor(0xF0FFFF)
beige = HexColor(0xF5F5DC)
bisque = HexColor(0xFFE4C4)
black = HexColor(0x000000)
blanchedalmond = HexColor(0xFFEBCD)
blue = HexColor(0x0000FF)
blueviolet = HexColor(0x8A2BE2)
brown = HexColor(0xA52A2A)
burlywood = HexColor(0xDEB887)
cadetblue = HexColor(0x5F9EA0)
chartreuse = HexColor(0x7FFF00)
chocolate = HexColor(0xD2691E)
coral = HexColor(0xFF7F50)
cornflowerblue = cornflower = HexColor(0x6495ED)
cornsilk = HexColor(0xFFF8DC)
crimson = HexColor(0xDC143C)
cyan = HexColor(0x00FFFF)
darkblue = HexColor(0x00008B)
darkcyan = HexColor(0x008B8B)
darkgoldenrod = HexColor(0xB8860B)
darkgray = HexColor(0xA9A9A9)
darkgrey = darkgray
darkgreen = HexColor(0x006400)
darkkhaki = HexColor(0xBDB76B)
darkmagenta = HexColor(0x8B008B)
darkolivegreen = HexColor(0x556B2F)
darkorange = HexColor(0xFF8C00)
darkorchid = HexColor(0x9932CC)
darkred = HexColor(0x8B0000)
darksalmon = HexColor(0xE9967A)
darkseagreen = HexColor(0x8FBC8B)
darkslateblue = HexColor(0x483D8B)
darkslategray = HexColor(0x2F4F4F)
darkslategrey = darkslategray
darkturquoise = HexColor(0x00CED1)
darkviolet = HexColor(0x9400D3)
deeppink = HexColor(0xFF1493)
deepskyblue = HexColor(0x00BFFF)
dimgray = HexColor(0x696969)
dimgrey = dimgray
dodgerblue = HexColor(0x1E90FF)
firebrick = HexColor(0xB22222)
floralwhite = HexColor(0xFFFAF0)
forestgreen = HexColor(0x228B22)
fuchsia = HexColor(0xFF00FF)
gainsboro = HexColor(0xDCDCDC)
ghostwhite = HexColor(0xF8F8FF)
gold = HexColor(0xFFD700)
goldenrod = HexColor(0xDAA520)
gray = HexColor(0x808080)
grey = gray
green = HexColor(0x008000)
greenyellow = HexColor(0xADFF2F)
honeydew = HexColor(0xF0FFF0)
hotpink = HexColor(0xFF69B4)
indianred = HexColor(0xCD5C5C)
indigo = HexColor(0x4B0082)
ivory = HexColor(0xFFFFF0)
khaki = HexColor(0xF0E68C)
lavender = HexColor(0xE6E6FA)
lavenderblush = HexColor(0xFFF0F5)
lawngreen = HexColor(0x7CFC00)
lemonchiffon = HexColor(0xFFFACD)
lightblue = HexColor(0xADD8E6)
lightcoral = HexColor(0xF08080)
lightcyan = HexColor(0xE0FFFF)
lightgoldenrodyellow = HexColor(0xFAFAD2)
lightgreen = HexColor(0x90EE90)
lightgrey = HexColor(0xD3D3D3)
lightpink = HexColor(0xFFB6C1)
lightsalmon = HexColor(0xFFA07A)
lightseagreen = HexColor(0x20B2AA)
lightskyblue = HexColor(0x87CEFA)
lightslategray = HexColor(0x778899)
lightslategrey = lightslategray
lightsteelblue = HexColor(0xB0C4DE)
lightyellow = HexColor(0xFFFFE0)
lime = HexColor(0x00FF00)
limegreen = HexColor(0x32CD32)
linen = HexColor(0xFAF0E6)
magenta = HexColor(0xFF00FF)
maroon = HexColor(0x800000)
mediumaquamarine = HexColor(0x66CDAA)
mediumblue = HexColor(0x0000CD)
mediumorchid = HexColor(0xBA55D3)
mediumpurple = HexColor(0x9370DB)
mediumseagreen = HexColor(0x3CB371)
mediumslateblue = HexColor(0x7B68EE)
mediumspringgreen = HexColor(0x00FA9A)
mediumturquoise = HexColor(0x48D1CC)
mediumvioletred = HexColor(0xC71585)
midnightblue = HexColor(0x191970)
mintcream = HexColor(0xF5FFFA)
mistyrose = HexColor(0xFFE4E1)
moccasin = HexColor(0xFFE4B5)
navajowhite = HexColor(0xFFDEAD)
navy = HexColor(0x000080)
oldlace = HexColor(0xFDF5E6)
olive = HexColor(0x808000)
olivedrab = HexColor(0x6B8E23)
orange = HexColor(0xFFA500)
orangered = HexColor(0xFF4500)
orchid = HexColor(0xDA70D6)
palegoldenrod = HexColor(0xEEE8AA)
palegreen = HexColor(0x98FB98)
paleturquoise = HexColor(0xAFEEEE)
palevioletred = HexColor(0xDB7093)
papayawhip = HexColor(0xFFEFD5)
peachpuff = HexColor(0xFFDAB9)
peru = HexColor(0xCD853F)
pink = HexColor(0xFFC0CB)
plum = HexColor(0xDDA0DD)
powderblue = HexColor(0xB0E0E6)
purple = HexColor(0x800080)
red = HexColor(0xFF0000)
rosybrown = HexColor(0xBC8F8F)
royalblue = HexColor(0x4169E1)
saddlebrown = HexColor(0x8B4513)
salmon = HexColor(0xFA8072)
sandybrown = HexColor(0xF4A460)
seagreen = HexColor(0x2E8B57)
seashell = HexColor(0xFFF5EE)
sienna = HexColor(0xA0522D)
silver = HexColor(0xC0C0C0)
skyblue = HexColor(0x87CEEB)
slateblue = HexColor(0x6A5ACD)
slategray = HexColor(0x708090)
slategrey = slategray
snow = HexColor(0xFFFAFA)
springgreen = HexColor(0x00FF7F)
steelblue = HexColor(0x4682B4)
tan = HexColor(0xD2B48C)
teal = HexColor(0x008080)
thistle = HexColor(0xD8BFD8)
tomato = HexColor(0xFF6347)
turquoise = HexColor(0x40E0D0)
violet = HexColor(0xEE82EE)
wheat = HexColor(0xF5DEB3)
white = HexColor(0xFFFFFF)
whitesmoke = HexColor(0xF5F5F5)
yellow = HexColor(0xFFFF00)
yellowgreen = HexColor(0x9ACD32)
fidblue=HexColor(0x3366cc)
fidred=HexColor(0xcc0033)
fidlightblue=HexColor("#d6e0f5")
ColorType=type(black)
################################################################
#
# Helper functions for dealing with colors. These tell you
# which are predefined, so you can print color charts;
# and can give the nearest match to an arbitrary color object
#
#################################################################
def colorDistance(col1, col2):
"""Returns a number between 0 and root(3) stating how similar
two colours are - distance in r,g,b, space. Only used to find
names for things."""
return math.sqrt(
(col1.red - col2.red)**2 +
(col1.green - col2.green)**2 +
(col1.blue - col2.blue)**2
)
def cmykDistance(col1, col2):
"""Returns a number between 0 and root(4) stating how similar
two colours are - distance in r,g,b, space. Only used to find
names for things."""
return math.sqrt(
(col1.cyan - col2.cyan)**2 +
(col1.magenta - col2.magenta)**2 +
(col1.yellow - col2.yellow)**2 +
(col1.black - col2.black)**2
)
_namedColors = None
def getAllNamedColors():
#returns a dictionary of all the named ones in the module
# uses a singleton for efficiency
global _namedColors
if _namedColors is not None: return _namedColors
import colors
_namedColors = {}
for (name, value) in colors.__dict__.items():
if isinstance(value, Color):
_namedColors[name] = value
return _namedColors
def describe(aColor,mode=0):
'''finds nearest colour match to aColor.
mode=0 print a string desription
mode=1 return a string description
mode=2 return (distance, colorName)
'''
namedColors = getAllNamedColors()
closest = (10, None, None) #big number, name, color
for (name, color) in namedColors.items():
distance = colorDistance(aColor, color)
if distance < closest[0]:
closest = (distance, name, color)
if mode<=1:
s = 'best match is %s, distance %0.4f' % (closest[1], closest[0])
if mode==0: print s
else: return s
elif mode==2:
return (closest[1], closest[0])
else:
raise ValueError, "Illegal value for mode "+str(mode)
def hue2rgb(m1, m2, h):
if h<0: h += 1
if h>1: h -= 1
if h*6<1: return m1+(m2-m1)*h*6
if h*2<1: return m2
if h*3<2: return m1+(m2-m1)*(4-6*h)
return m1
def hsl2rgb(h, s, l):
if l<=0.5:
m2 = l*(s+1)
else:
m2 = l+s-l*s
m1 = l*2-m2
return hue2rgb(m1, m2, h+1./3),hue2rgb(m1, m2, h),hue2rgb(m1, m2, h-1./3)
class cssParse:
def pcVal(self,v):
v = v.strip()
try:
c=eval(v[:-1])
if not isinstance(c,(float,int)): raise ValueError
c=min(100,max(0,c))/100.
except:
raise ValueError('bad percentage argument value %r in css color %r' % (v,self.s))
return c
def rgbPcVal(self,v):
return int(self.pcVal(v)*255+0.5)/255.
def rgbVal(self,v):
v = v.strip()
try:
c=eval(v[:])
if not isinstance(c,int): raise ValueError
return int(min(255,max(0,c)))/255.
except:
raise ValueError('bad argument value %r in css color %r' % (v,self.s))
def hueVal(self,v):
v = v.strip()
try:
c=eval(v[:])
if not isinstance(c,(int,float)): raise ValueError
return ((c%360+360)%360)/360.
except:
raise ValueError('bad hue argument value %r in css color %r' % (v,self.s))
def alphaVal(self,v,c=1,n='alpha'):
try:
a = eval(v.strip())
if not isinstance(a,(int,float)): raise ValueError
return min(c,max(0,a))
except:
raise ValueError('bad %s argument value %r in css color %r' % (n,v,self.s))
def __call__(self,s):
s = s.strip()
hsl = s.startswith('hsl')
rgb = s.startswith('rgb')
cmyk = s.startswith('cmyk')
c = 1
if hsl: n = 3
if rgb: n = 3
if cmyk:
n = 4
else:
cmyk = s.startswith('pcmyk')
if cmyk:
n = 5
c = 100
if not (rgb or hsl or cmyk): return None
self.s = s
n = s[n:]
ha = n.startswith('a')
n = n[(ha and 1 or 0):].strip()
if not n.startswith('(') or not n.endswith(')'):
raise ValueError('improperly formatted css style color %r' % s)
n = n[1:-1].split(',') #strip parens and split on comma
a = len(n)
b = cmyk and 4 or 3
if ha and a!=(b+1) or not ha and a!=b:
raise ValueError('css color %r has wrong number of components' % s)
if ha:
n,a = n[:b],self.alphaVal(n[b],c)
else:
a = c
if cmyk:
C = self.alphaVal(n[0],c,'cyan')
M = self.alphaVal(n[1],c,'magenta')
Y = self.alphaVal(n[2],c,'yellow')
K = self.alphaVal(n[3],c,'black')
return (c>1 and PCMYKColor or CMYKColor)(C,M,Y,K,alpha=a)
else:
if hsl:
R,G,B= hsl2rgb(self.hueVal(n[0]),self.pcVal(n[1]),self.pcVal(n[2]))
else:
R,G,B = map('%' in n[0] and self.rgbPcVal or self.rgbVal,n)
return Color(R,G,B,a)
cssParse=cssParse()
class toColor:
def __init__(self):
self.extraColorsNS = {} #used for overriding/adding to existing color names
#make case insensitive if that's your wish
def setExtraColorsNameSpace(self,NS):
self.extraColorsNS = NS
def __call__(self,arg,default=None):
'''try to map an arbitrary arg to a color instance
>>> toColor('rgb(128,0,0)')==toColor('rgb(50%,0%,0%)')
True
>>> toColor('rgb(50%,0%,0%)')!=Color(0.5,0,0,1)
True
>>> toColor('hsl(0,100%,50%)')==toColor('rgb(255,0,0)')
True
>>> toColor('hsl(-120,100%,50%)')==toColor('rgb(0,0,255)')
True
>>> toColor('hsl(120,100%,50%)')==toColor('rgb(0,255,0)')
True
>>> toColor('rgba(255,0,0,0.5)')==Color(1,0,0,0.5)
True
>>> toColor('cmyk(1,0,0,0)')==CMYKColor(1,0,0,0)
True
>>> toColor('pcmyk(100,0,0,0)')==PCMYKColor(100,0,0,0)
True
>>> toColor('cmyka(1,0,0,0,0.5)')==CMYKColor(1,0,0,0,alpha=0.5)
True
>>> toColor('pcmyka(100,0,0,0,0.5)')==PCMYKColor(100,0,0,0,alpha=0.5)
True
'''
if isinstance(arg,Color): return arg
if isinstance(arg,(tuple,list)):
assert 3<=len(arg)<=4, 'Can only convert 3 and 4 sequences to color'
assert 0<=min(arg) and max(arg)<=1
return len(arg)==3 and Color(arg[0],arg[1],arg[2]) or CMYKColor(arg[0],arg[1],arg[2],arg[3])
elif isinstance(arg,basestring):
C = cssParse(arg)
if C: return C
if arg in self.extraColorsNS: return self.extraColorsNS[arg]
C = getAllNamedColors()
s = arg.lower()
if s in C: return C[s]
try:
return toColor(eval(arg))
except:
pass
try:
return HexColor(arg)
except:
if default is None:
raise ValueError('Invalid color value %r' % arg)
return default
toColor = toColor()
def toColorOrNone(arg,default=None):
'''as above but allows None as a legal value'''
if arg is None:
return None
else:
return toColor(arg, default)
def setColors(**kw):
UNDEF = []
progress = 1
assigned = {}
while kw and progress:
progress = 0
for k, v in kw.items():
if isinstance(v,(tuple,list)):
c = map(lambda x,UNDEF=UNDEF: toColor(x,UNDEF),v)
if isinstance(v,tuple): c = tuple(c)
ok = UNDEF not in c
else:
c = toColor(v,UNDEF)
ok = c is not UNDEF
if ok:
assigned[k] = c
del kw[k]
progress = 1
if kw: raise ValueError("Can't convert\n%s" % str(kw))
getAllNamedColors()
for k, c in assigned.items():
globals()[k] = c
if isinstance(c,Color): _namedColors[k] = c
def Whiter(c,f):
'''given a color combine with white as c*f w*(1-f) 0<=f<=1'''
c = toColor(c)
if isinstance(c,CMYKColorSep):
c = c.clone()
if isinstance(c,PCMYKColorSep):
c.__class__ = PCMYKColor
else:
c.__class__ = CMYKColor
if isinstance(c,PCMYKColor):
w = _PCMYK_white
elif isinstance(c,CMYKColor): w = _CMYK_white
else: w = white
return linearlyInterpolatedColor(w, c, 0, 1, f)
def Blacker(c,f):
'''given a color combine with black as c*f+b*(1-f) 0<=f<=1'''
c = toColor(c)
if isinstance(c,CMYKColorSep):
c = c.clone()
if isinstance(c,PCMYKColorSep):
c.__class__ = PCMYKColor
else:
c.__class__ = CMYKColor
if isinstance(c,PCMYKColor):
b = _PCMYK_black
elif isinstance(c,CMYKColor): b = _CMYK_black
else: b = black
return linearlyInterpolatedColor(b, c, 0, 1, f)
def fade(aSpotColor, percentages):
"""Waters down spot colors and returns a list of new ones
e.g fade(myColor, [100,80,60,40,20]) returns a list of five colors
"""
out = []
for percent in percentages:
frac = percent * 0.01 #assume they give us numbers from 0 to 100
newCyan = frac * aSpotColor.cyan
newMagenta = frac * aSpotColor.magenta
newYellow = frac * aSpotColor.yellow
newBlack = frac * aSpotColor.black
newDensity = frac * aSpotColor.density
newSpot = CMYKColor( newCyan, newMagenta, newYellow, newBlack,
spotName = aSpotColor.spotName,
density = newDensity)
out.append(newSpot)
return out
def _enforceError(kind,c,tc):
if isinstance(tc,Color):
xtra = tc._lookupName()
xtra = xtra and '(%s)'%xtra or ''
else:
xtra = ''
raise ValueError('Non %s color %r%s' % (kind,c,xtra))
def _enforceSEP(c):
'''pure separating colors only, this makes black a problem'''
tc = toColor(c)
if not isinstance(tc,CMYKColorSep):
_enforceError('separating',c,tc)
return tc
def _enforceSEP_BLACK(c):
'''separating + blacks only'''
tc = toColor(c)
if not isinstance(tc,CMYKColorSep):
if isinstance(tc,Color) and tc.red==tc.blue==tc.green: #ahahahah it's a grey
tc = _CMYK_black.clone(density=1-tc.red)
elif not (isinstance(tc,CMYKColor) and tc.cyan==tc.magenta==tc.yellow==0): #ie some shade of grey
_enforceError('separating or black',c,tc)
return tc
def _enforceSEP_CMYK(c):
'''separating or cmyk only'''
tc = toColor(c)
if not isinstance(tc,CMYKColorSep):
if isinstance(tc,Color) and tc.red==tc.blue==tc.green: #ahahahah it's a grey
tc = _CMYK_black.clone(density=1-tc.red)
elif not isinstance(tc,CMYKColor):
_enforceError('separating or CMYK',c,tc)
return tc
def _enforceCMYK(c):
'''cmyk outputs only (rgb greys converted)'''
tc = toColor(c)
if not isinstance(tc,CMYKColor):
if isinstance(tc,Color) and tc.red==tc.blue==tc.green: #ahahahah it's a grey
tc = _CMYK_black.clone(black=1-tc.red,alpha=tc.alpha)
else:
_enforceError('CMYK',c,tc)
elif isinstance(tc,CMYKColorSep):
tc = tc.clone()
tc.__class__ = CMYKColor
return tc
def _enforceRGB(c):
tc = toColor(c)
if isinstance(tc,CMYKColor):
if tc.cyan==tc.magenta==tc.yellow==0: #ahahahah it's grey
v = 1-tc.black*tc.density
tc = Color(v,v,v,alpha=tc.alpha)
else:
_enforceError('RGB',c,tc)
return tc
def _chooseEnforceColorSpace(enforceColorSpace):
if enforceColorSpace is not None and not callable(enforceColorSpace):
if isinstance(enforceColorSpace,basestring): enforceColorSpace=enforceColorSpace.upper()
if enforceColorSpace=='CMYK':
enforceColorSpace = _enforceCMYK
elif enforceColorSpace=='RGB':
enforceColorSpace = _enforceRGB
elif enforceColorSpace=='SEP':
enforceColorSpace = _enforceSEP
elif enforceColorSpace=='SEP_BLACK':
enforceColorSpace = _enforceSEP_BLACK
elif enforceColorSpace=='SEP_CMYK':
enforceColorSpace = _enforceSEP_CMYK
else:
raise ValueError('Invalid value for Canvas argument enforceColorSpace=%r' % enforceColorSpace)
return enforceColorSpace
if __name__ == "__main__":
import doctest
doctest.testmod()
| bsd-3-clause |
adaur/SickRage | lib/guessit/transfo/guess_movie_title_from_position.py | 28 | 8579 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.plugins.transformers import Transformer
from guessit.matcher import found_property
from guessit import u
from guessit.patterns.list import all_separators
from guessit.language import all_lang_prefixes_suffixes
class GuessMovieTitleFromPosition(Transformer):
def __init__(self):
Transformer.__init__(self, -200)
def supported_properties(self):
return ['title']
def should_process(self, mtree, options=None):
options = options or {}
return not options.get('skip_title') and not mtree.guess.get('type', '').startswith('episode')
@staticmethod
def excluded_word(*values):
for value in values:
if value.clean_value.lower() in all_separators + all_lang_prefixes_suffixes:
return True
return False
def process(self, mtree, options=None):
"""
try to identify the remaining unknown groups by looking at their
position relative to other known elements
"""
if 'title' in mtree.info:
return
path_nodes = list(filter(lambda x: x.category == 'path', mtree.nodes()))
basename = path_nodes[-2]
all_valid = lambda leaf: len(leaf.clean_value) > 0
basename_leftover = list(basename.unidentified_leaves(valid=all_valid))
try:
folder = path_nodes[-3]
folder_leftover = list(folder.unidentified_leaves())
except IndexError:
folder = None
folder_leftover = []
self.log.debug('folder: %s' % u(folder_leftover))
self.log.debug('basename: %s' % u(basename_leftover))
# specific cases:
# if we find the same group both in the folder name and the filename,
# it's a good candidate for title
if (folder_leftover and basename_leftover and
folder_leftover[0].clean_value == basename_leftover[0].clean_value and
not GuessMovieTitleFromPosition.excluded_word(folder_leftover[0])):
found_property(folder_leftover[0], 'title', confidence=0.8)
return
# specific cases:
# if the basename contains a number first followed by an unidentified
# group, and the folder only contains 1 unidentified one, then we have
# a series
# ex: Millenium Trilogy (2009)/(1)The Girl With The Dragon Tattoo(2009).mkv
if len(folder_leftover) > 0 and len(basename_leftover) > 1:
series = folder_leftover[0]
film_number = basename_leftover[0]
title = basename_leftover[1]
basename_leaves = list(basename.leaves())
num = None
try:
num = int(film_number.clean_value)
except ValueError:
pass
if num:
self.log.debug('series: %s' % series.clean_value)
self.log.debug('title: %s' % title.clean_value)
if (series.clean_value != title.clean_value and
series.clean_value != film_number.clean_value and
basename_leaves.index(film_number) == 0 and
basename_leaves.index(title) == 1 and
not GuessMovieTitleFromPosition.excluded_word(title, series)):
found_property(title, 'title', confidence=0.6)
found_property(series, 'filmSeries', confidence=0.6)
found_property(film_number, 'filmNumber', num, confidence=0.6)
return
if folder:
year_group = folder.first_leaf_containing('year')
if year_group:
groups_before = folder.previous_unidentified_leaves(year_group)
if groups_before:
try:
node = next(groups_before)
if not GuessMovieTitleFromPosition.excluded_word(node):
found_property(node, 'title', confidence=0.8)
return
except StopIteration:
pass
# if we have either format or videoCodec in the folder containing the
# file or one of its parents, then we should probably look for the title
# in there rather than in the basename
try:
props = list(mtree.previous_leaves_containing(mtree.children[-2],
['videoCodec',
'format',
'language']))
except IndexError:
props = []
if props:
group_idx = props[0].node_idx[0]
if all(g.node_idx[0] == group_idx for g in props):
# if they're all in the same group, take leftover info from there
leftover = mtree.node_at((group_idx,)).unidentified_leaves()
try:
node = next(leftover)
if not GuessMovieTitleFromPosition.excluded_word(node):
found_property(node, 'title', confidence=0.7)
return
except StopIteration:
pass
# look for title in basename if there are some remaining unidentified
# groups there
if basename_leftover:
# if basename is only one word and the containing folder has at least
# 3 words in it, we should take the title from the folder name
# ex: Movies/Alice in Wonderland DVDRip.XviD-DiAMOND/dmd-aw.avi
# ex: Movies/Somewhere.2010.DVDRip.XviD-iLG/i-smwhr.avi <-- TODO: gets caught here?
if (basename_leftover[0].clean_value.count(' ') == 0 and
folder_leftover and folder_leftover[0].clean_value.count(' ') >= 2 and
not GuessMovieTitleFromPosition.excluded_word(folder_leftover[0])):
found_property(folder_leftover[0], 'title', confidence=0.7)
return
# if there are only many unidentified groups, take the first of which is
# not inside brackets or parentheses.
# ex: Movies/[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi
if basename_leftover[0].is_explicit():
for basename_leftover_elt in basename_leftover:
if not basename_leftover_elt.is_explicit() and not GuessMovieTitleFromPosition.excluded_word(basename_leftover_elt):
found_property(basename_leftover_elt, 'title', confidence=0.8)
return
# if all else fails, take the first remaining unidentified group in the
# basename as title
if not GuessMovieTitleFromPosition.excluded_word(basename_leftover[0]):
found_property(basename_leftover[0], 'title', confidence=0.6)
return
# if there are no leftover groups in the basename, look in the folder name
if folder_leftover and not GuessMovieTitleFromPosition.excluded_word(folder_leftover[0]):
found_property(folder_leftover[0], 'title', confidence=0.5)
return
# if nothing worked, look if we have a very small group at the beginning
# of the basename
basename_leftover = basename.unidentified_leaves(valid=lambda leaf: True)
try:
node = next(basename_leftover)
if not GuessMovieTitleFromPosition.excluded_word(node):
found_property(node, 'title', confidence=0.4)
return
except StopIteration:
pass
| gpl-3.0 |
numericube/twistranet | twistranet/twistapp/forms/fields.py | 1 | 7275 | """
The twistranet Fields
"""
import os
import urlparse
from django import forms
from django.core.validators import URL_VALIDATOR_USER_AGENT
from django.db import models
from django.core.validators import EMPTY_VALUES
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext as _
from twistranet.twistapp.lib.log import log
import widgets
from validators import URLValidator, ViewPathValidator
class PermissionFormField(forms.ChoiceField):
"""
This overrides the regular ChoiceField to add additional rendering.
"""
widget = widgets.PermissionsWidget
def __init__(
self, choices = (), required=True, widget=None, max_length = None,
label=None, initial=None, help_text=None, to_field_name=None,
*args, **kwargs
):
super(PermissionFormField, self).__init__(choices, required, widget, label, initial, help_text, *args, **kwargs)
# We put this here to avoid import errors
self.default_error_messages = {
'invalid_choice': _(u'Select a valid choice. That choice is not one of'
u' the available choices.'),
}
class PermissionsFormField(forms.ChoiceField):
"""
This overrides the regular ChoiceField to add additional rendering.
"""
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
for id, name, description in self.choices:
if value == smart_unicode(id):
return True
return False
class ModelInputField(forms.Field):
"""
This is a field used to enter a foreign key value inside a classic Input widget.
This is used when there are a lot of values to check against (and ModelChoiceField is not
efficient anymore), plus the value is checked against the QuerySet very late in the process.
"""
def __init__(
self, model, filter = None, required=True, widget=None,
label=None, initial=None, help_text=None, to_field_name=None,
*args, **kwargs
):
super(ModelInputField, self).__init__(required, widget, label, initial, help_text,
*args, **kwargs)
self.model = model
self.filter = filter
self.to_field_name = to_field_name
# We put this here to avoid import errors
self.default_error_messages = {
'invalid_choice': _(u'Select a valid choice. That choice is not one of'
u' the available choices.'),
}
def to_python(self, value):
"""
'Resolve' the query set at validation time.
This way, we're sure to have the freshest version of the QS.
"""
if value in EMPTY_VALUES:
return None
try:
key = self.to_field_name or 'pk'
qs = self.model.objects.get_query_set()
if self.filter:
qs = qs.filter(self.filter)
value = qs.get(**{key: value})
except self.queryset.model.DoesNotExist:
raise ValidationError(self.error_messages['invalid_choice'])
return value
class ResourceFormField(forms.MultiValueField):
"""
The ResourceFormField is a resource browser.
You can pass it a few parameters:
- model which is the subclass you want to read your resources from (default: twistranet.Resource).
Useful if you want to display only images for example.
- filter which will be passed to model.objects.filter() call before rendering the widget.
These model / filter params are the only solution to handle choices WITH the security model.
- allow_upload (upload is ok)
- allow_select (can select an existing resource from the given filter)
"""
widget = widgets.ResourceWidget
field = ModelInputField
model = None
filter = None
def __init__(self, *args, **kwargs):
# Initial values
from twistranet.twistapp.models import Resource
self.model = kwargs.pop("model", Resource)
self.filter = kwargs.pop("filter", None)
self.allow_upload = kwargs.pop("allow_upload", True)
self.allow_select = kwargs.pop("allow_select", True)
self.display_renderer = kwargs.pop("display_renderer", True)
self.media_type = kwargs.pop("media_type", 'file')
self.widget = kwargs.pop("widget", self.widget(
model = self.model, filter = self.filter,
allow_upload = self.allow_upload,
allow_select = self.allow_select,
display_renderer = self.display_renderer,
media_type = self.media_type
))
self.required = kwargs.pop("required", True)
# The fields we'll use:
# - A ModelInputField used to handle the ForeignKey.
# - A FileField used to handle data upload.
fields = []
field0 = self.field(model = self.model, filter = self.filter, required = self.required)
# no more used
# field1 = forms.FileField(required = False)
dummy = forms.CharField(required = False)
if self.allow_select or self.allow_upload:
fields.append(field0)
else:
fields.append(dummy)
# # Compatibility with form_for_instance
# if kwargs.get('initial'):
# initial = kwargs['initial']
# else:
# initial = None
# self.widget = self.widget(initial=initial)
super(ResourceFormField, self).__init__(fields, label = kwargs.pop('label'), required = False) #self.required)
def prepare_value(self, value):
"""
Pass the query_set to the underlying widget, so that it's computed as late as possible.
"""
qs = self.model.objects.get_query_set()
if self.filter:
qs = qs.filter(self.filter)
self.widget.query_set = qs
return super(ResourceFormField, self).prepare_value(value)
def compress(self, data_list):
return data_list
# URLField which also accept relative urls
class LargeURLField(forms.CharField):
"""
A URL field which accepts internal link
and intranet links (without a standard domain)
"""
def __init__(self, max_length=None, min_length=None, verify_exists=False,
validator_user_agent=URL_VALIDATOR_USER_AGENT, *args, **kwargs):
super(LargeURLField, self).__init__(max_length, min_length, *args,
**kwargs)
self.validators.append(URLValidator(verify_exists=verify_exists, validator_user_agent=validator_user_agent))
def to_python(self, value):
if value:
value = urlparse.urlunparse(urlparse.urlparse(value))
return super(LargeURLField, self).to_python(value)
class ViewPathField(forms.CharField):
"""
View Path field (could be improved)
"""
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ViewPathField, self).__init__(max_length, min_length, *args,
**kwargs)
self.validators.append(ViewPathValidator())
self.default_error_messages = { 'invalid': _(u'Enter a valid Path.'),}
| agpl-3.0 |
SravanthiSinha/edx-platform | pavelib/utils/test/suites/bokchoy_suite.py | 57 | 6408 | """
Class used for defining and running Bok Choy acceptance test suite
"""
from time import sleep
from paver.easy import sh
from pavelib.utils.test.suites.suite import TestSuite
from pavelib.utils.envs import Env
from pavelib.utils.test import bokchoy_utils
from pavelib.utils.test import utils as test_utils
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text # pylint: disable=invalid-name
__test__ = False # do not collect
class BokChoyTestSuite(TestSuite):
"""
TestSuite for running Bok Choy tests
Properties (below is a subset):
test_dir - parent directory for tests
log_dir - directory for test output
report_dir - directory for reports (e.g., coverage) related to test execution
xunit_report - directory for xunit-style output (xml)
fasttest - when set, skip various set-up tasks (e.g., collectstatic)
serversonly - prepare and run the necessary servers, only stopping when interrupted with Ctrl-C
testsonly - assume servers are running (as per above) and run tests with no setup or cleaning of environment
test_spec - when set, specifies test files, classes, cases, etc. See platform doc.
default_store - modulestore to use when running tests (split or draft)
"""
def __init__(self, *args, **kwargs):
super(BokChoyTestSuite, self).__init__(*args, **kwargs)
self.test_dir = Env.BOK_CHOY_DIR / kwargs.get('test_dir', 'tests')
self.log_dir = Env.BOK_CHOY_LOG_DIR
self.report_dir = Env.BOK_CHOY_REPORT_DIR
self.xunit_report = self.report_dir / "xunit.xml"
self.cache = Env.BOK_CHOY_CACHE
self.fasttest = kwargs.get('fasttest', False)
self.serversonly = kwargs.get('serversonly', False)
self.testsonly = kwargs.get('testsonly', False)
self.test_spec = kwargs.get('test_spec', None)
self.default_store = kwargs.get('default_store', None)
self.verbosity = kwargs.get('verbosity', 2)
self.extra_args = kwargs.get('extra_args', '')
self.har_dir = self.log_dir / 'hars'
self.imports_dir = kwargs.get('imports_dir', None)
def __enter__(self):
super(BokChoyTestSuite, self).__enter__()
# Ensure that we have a directory to put logs and reports
self.log_dir.makedirs_p()
self.har_dir.makedirs_p()
self.report_dir.makedirs_p()
test_utils.clean_reports_dir()
if not (self.fasttest or self.skip_clean):
test_utils.clean_test_files()
msg = colorize('green', "Checking for mongo, memchache, and mysql...")
print msg
bokchoy_utils.check_services()
if not self.testsonly:
self.prepare_bokchoy_run()
msg = colorize('green', "Confirming servers have started...")
print msg
bokchoy_utils.wait_for_test_servers()
if self.serversonly:
self.run_servers_continuously()
def __exit__(self, exc_type, exc_value, traceback):
super(BokChoyTestSuite, self).__exit__(exc_type, exc_value, traceback)
msg = colorize('green', "Cleaning up databases...")
print msg
# Clean up data we created in the databases
sh("./manage.py lms --settings bok_choy flush --traceback --noinput")
bokchoy_utils.clear_mongo()
def prepare_bokchoy_run(self):
"""
Sets up and starts servers for a Bok Choy run. If --fasttest is not
specified then static assets are collected
"""
sh("{}/scripts/reset-test-db.sh".format(Env.REPO_ROOT))
if not self.fasttest:
self.generate_optimized_static_assets()
# Clear any test data already in Mongo or MySQLand invalidate
# the cache
bokchoy_utils.clear_mongo()
self.cache.flush_all()
sh(
"DEFAULT_STORE={default_store}"
" ./manage.py lms --settings bok_choy loaddata --traceback"
" common/test/db_fixtures/*.json".format(
default_store=self.default_store,
)
)
if self.imports_dir:
sh(
"DEFAULT_STORE={default_store}"
" ./manage.py cms --settings=bok_choy import {import_dir}".format(
default_store=self.default_store,
import_dir=self.imports_dir
)
)
# Ensure the test servers are available
msg = colorize('green', "Confirming servers are running...")
print msg
bokchoy_utils.start_servers(self.default_store)
def run_servers_continuously(self):
"""
Infinite loop. Servers will continue to run in the current session unless interrupted.
"""
print 'Bok-choy servers running. Press Ctrl-C to exit...\n'
print 'Note: pressing Ctrl-C multiple times can corrupt noseid files and system state. Just press it once.\n'
while True:
try:
sleep(10000)
except KeyboardInterrupt:
print "Stopping bok-choy servers.\n"
break
@property
def cmd(self):
"""
This method composes the nosetests command to send to the terminal. If nosetests aren't being run,
the command returns an empty string.
"""
# Default to running all tests if no specific test is specified
if not self.test_spec:
test_spec = self.test_dir
else:
test_spec = self.test_dir / self.test_spec
# Skip any additional commands (such as nosetests) if running in
# servers only mode
if self.serversonly:
return ""
# Construct the nosetests command, specifying where to save
# screenshots and XUnit XML reports
cmd = [
"DEFAULT_STORE={}".format(self.default_store),
"SCREENSHOT_DIR='{}'".format(self.log_dir),
"BOK_CHOY_HAR_DIR='{}'".format(self.har_dir),
"SELENIUM_DRIVER_LOG_DIR='{}'".format(self.log_dir),
"nosetests",
test_spec,
"--with-xunit",
"--xunit-file={}".format(self.xunit_report),
"--verbosity={}".format(self.verbosity),
]
if self.pdb:
cmd.append("--pdb")
cmd.append(self.extra_args)
cmd = (" ").join(cmd)
return cmd
| agpl-3.0 |
QijunPan/ansible | lib/ansible/modules/cloud/rackspace/rax_cbs.py | 25 | 7272 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rax_cbs
short_description: Manipulate Rackspace Cloud Block Storage Volumes
description:
- Manipulate Rackspace Cloud Block Storage Volumes
version_added: 1.6
options:
description:
description:
- Description to give the volume being created
default: null
image:
description:
- image to use for bootable volumes. Can be an C(id), C(human_id) or
C(name). This option requires C(pyrax>=1.9.3)
default: null
version_added: 1.9
meta:
description:
- A hash of metadata to associate with the volume
default: null
name:
description:
- Name to give the volume being created
default: null
required: true
size:
description:
- Size of the volume to create in Gigabytes
default: 100
required: true
snapshot_id:
description:
- The id of the snapshot to create the volume from
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
required: true
volume_type:
description:
- Type of the volume being created
choices:
- SATA
- SSD
default: SATA
required: true
wait:
description:
- wait for the volume to be in state 'available' before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Block Storage Volume
gather_facts: False
hosts: local
connection: local
tasks:
- name: Storage volume create request
local_action:
module: rax_cbs
credentials: ~/.raxpub
name: my-volume
description: My Volume
volume_type: SSD
size: 150
region: DFW
wait: yes
state: present
meta:
app: my-cool-app
register: my_volume
'''
from distutils.version import LooseVersion
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout,
image):
changed = False
volume = None
instance = {}
cbs = pyrax.cloud_blockstorage
if cbs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if image:
# pyrax<1.9.3 did not have support for specifying an image when
# creating a volume which is required for bootable volumes
if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
module.fail_json(msg='Creating a bootable volume requires '
'pyrax>=1.9.3')
image = rax_find_image(module, pyrax, image)
volume = rax_find_volume(module, pyrax, name)
if state == 'present':
if not volume:
kwargs = dict()
if image:
kwargs['image'] = image
try:
volume = cbs.create(name, size=size, volume_type=volume_type,
description=description,
metadata=meta,
snapshot_id=snapshot_id, **kwargs)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
if wait:
attempts = wait_timeout / 5
pyrax.utils.wait_for_build(volume, interval=5,
attempts=attempts)
volume.get()
instance = rax_to_dict(volume)
result = dict(changed=changed, volume=instance)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
elif wait and volume.status not in VOLUME_STATUS:
result['msg'] = 'Timeout waiting on %s' % volume.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
if volume:
instance = rax_to_dict(volume)
try:
volume.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, volume=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
description=dict(type='str'),
image=dict(type='str'),
meta=dict(type='dict', default={}),
name=dict(required=True),
size=dict(type='int', default=100),
snapshot_id=dict(),
state=dict(default='present', choices=['present', 'absent']),
volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
description = module.params.get('description')
image = module.params.get('image')
meta = module.params.get('meta')
name = module.params.get('name')
size = module.params.get('size')
snapshot_id = module.params.get('snapshot_id')
state = module.params.get('state')
volume_type = module.params.get('volume_type')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout,
image)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
if __name__ == '__main__':
main()
| gpl-3.0 |
boundarydevices/android_external_chromium_org | build/landmine_utils.py | 59 | 2787 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import logging
import os
import shlex
import sys
def memoize(default=None):
"""This decorator caches the return value of a parameterless pure function"""
def memoizer(func):
val = []
@functools.wraps(func)
def inner():
if not val:
ret = func()
val.append(ret if ret is not None else default)
if logging.getLogger().isEnabledFor(logging.INFO):
print '%s -> %r' % (func.__name__, val[0])
return val[0]
return inner
return memoizer
@memoize()
def IsWindows():
return sys.platform in ['win32', 'cygwin']
@memoize()
def IsLinux():
return sys.platform.startswith(('linux', 'freebsd'))
@memoize()
def IsMac():
return sys.platform == 'darwin'
@memoize()
def gyp_defines():
"""Parses and returns GYP_DEFINES env var as a dictionary."""
return dict(arg.split('=', 1)
for arg in shlex.split(os.environ.get('GYP_DEFINES', '')))
@memoize()
def gyp_msvs_version():
return os.environ.get('GYP_MSVS_VERSION', '')
@memoize()
def distributor():
"""
Returns a string which is the distributed build engine in use (if any).
Possible values: 'goma', 'ib', ''
"""
if 'goma' in gyp_defines():
return 'goma'
elif IsWindows():
if 'CHROME_HEADLESS' in os.environ:
return 'ib' # use (win and !goma and headless) as approximation of ib
@memoize()
def platform():
"""
Returns a string representing the platform this build is targetted for.
Possible values: 'win', 'mac', 'linux', 'ios', 'android'
"""
if 'OS' in gyp_defines():
if 'android' in gyp_defines()['OS']:
return 'android'
else:
return gyp_defines()['OS']
elif IsWindows():
return 'win'
elif IsLinux():
return 'linux'
else:
return 'mac'
@memoize()
def builder():
"""
Returns a string representing the build engine (not compiler) to use.
Possible values: 'make', 'ninja', 'xcode', 'msvs', 'scons'
"""
if 'GYP_GENERATORS' in os.environ:
# for simplicity, only support the first explicit generator
generator = os.environ['GYP_GENERATORS'].split(',')[0]
if generator.endswith('-android'):
return generator.split('-')[0]
elif generator.endswith('-ninja'):
return 'ninja'
else:
return generator
else:
if platform() == 'android':
# Good enough for now? Do any android bots use make?
return 'ninja'
elif platform() == 'ios':
return 'xcode'
elif IsWindows():
return 'ninja'
elif IsLinux():
return 'ninja'
elif IsMac():
return 'ninja'
else:
assert False, 'Don\'t know what builder we\'re using!'
| bsd-3-clause |
eleonrk/SickRage | lib/github/tests/Github_.py | 9 | 17814 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import datetime
import Framework
import github
class Github(Framework.TestCase):
def testGetGists(self):
self.assertListKeyBegin(self.g.get_gists(), lambda g: g.id, ["2729695", "2729656", "2729597", "2729584", "2729569", "2729554", "2729543", "2729537", "2729536", "2729533", "2729525", "2729522", "2729519", "2729515", "2729506", "2729487", "2729484", "2729482", "2729441", "2729432", "2729420", "2729398", "2729372", "2729371", "2729351", "2729346", "2729316", "2729304", "2729296", "2729276", "2729272", "2729265", "2729195", "2729160", "2729143", "2729127", "2729119", "2729113", "2729103", "2729069", "2729059", "2729051", "2729029", "2729027", "2729026", "2729022", "2729002", "2728985", "2728979", "2728964", "2728937", "2728933", "2728884", "2728869", "2728866", "2728855", "2728854", "2728853", "2728846", "2728825", "2728814", "2728813", "2728812", "2728805", "2728802", "2728800", "2728798", "2728797", "2728796", "2728793", "2728758", "2728754", "2728751", "2728748", "2728721", "2728716", "2728715", "2728705", "2728701", "2728699", "2728697", "2728688", "2728683", "2728677", "2728649", "2728640", "2728625", "2728620", "2728615", "2728614", "2728565", "2728564", "2728554", "2728523", "2728519", "2728511", "2728497", "2728496", "2728495", "2728487"])
def testLegacySearchRepos(self):
repos = self.g.legacy_search_repos("github api v3")
self.assertListKeyBegin(repos, lambda r: r.name, ["github", "octonode", "PyGithub"])
self.assertEqual(repos[0].full_name, "peter-murach/github")
# Attributes retrieved from legacy API without lazy completion call
self.assertEqual(repos[2].created_at, datetime.datetime(2012, 2, 25, 12, 53, 47))
self.assertEqual(repos[2].name, "PyGithub")
self.assertEqual(repos[2].watchers, 365)
self.assertTrue(repos[2].has_downloads)
self.assertEqual(repos[2].homepage, "http://jacquev6.github.io/PyGithub")
self.assertEqual(repos[2].url, "/repos/jacquev6/PyGithub")
self.assertFalse(repos[2].fork)
self.assertTrue(repos[2].has_issues)
self.assertFalse(repos[2].has_wiki)
self.assertEqual(repos[2].forks, 102)
self.assertEqual(repos[2].size, 11373)
self.assertFalse(repos[2].private)
self.assertEqual(repos[2].open_issues, 14)
self.assertEqual(repos[2].pushed_at, datetime.datetime(2014, 3, 16, 17, 1, 56))
self.assertEqual(repos[2].description, "Python library implementing the full Github API v3")
self.assertEqual(repos[2].language, "Python")
self.assertEqual(repos[2].owner.login, "jacquev6")
self.assertEqual(repos[2].owner.url, "/users/jacquev6")
def testLegacySearchReposPagination(self):
repos = self.g.legacy_search_repos("document")
self.assertListKeyBegin(repos, lambda r: r.name, ["git", "nimbus", "kss", "sstoolkit", "lawnchair", "appledoc", "jQ.Mobi", "ipython", "mongoengine", "ravendb", "substance", "symfony-docs", "JavaScript-Garden", "DocSets-for-iOS", "yard", "phpDocumentor2", "phpsh", "Tangle", "Ingredients", "documentjs", "xhp", "couchdb-lucene", "dox", "magento2", "javascriptmvc", "FastPdfKit", "roar", "DocumentUp", "NoRM", "jsdoc", "tagger", "mongodb-csharp", "php-github-api", "beautiful-docs", "mongodb-odm", "iodocs", "seesaw", "bcx-api", "developer.github.com", "amqp", "docsplit", "pycco", "standards-and-practices", "tidy-html5", "redis-doc", "tomdoc", "docs", "flourish", "userguide", "swagger-ui", "rfc", "Weasel-Diesel", "yuidoc", "apigen", "document-viewer", "develop.github.com", "Shanty-Mongo", "PTShowcaseViewController", "gravatar_image_tag", "api-wow-docs", "mongoid-tree", "safari-json-formatter", "mayan", "orm-documentation", "jsfiddle-docs-alpha", "core", "documentcloud", "flexible-nav", "writeCapture", "readium", "xmldocument", "Documentation-Examples", "grails-doc", "stdeb", "aws-autoscaling", "voteable_mongo", "review", "spreadsheet_on_rails", "UKSyntaxColoredTextDocument", "mandango", "bdoc", "Documentation", "documents.com", "rghost", "ticket_mule", "vendo", "khan-api", "spring-data-document-examples", "rspec_api_documentation", "axlsx", "phpdox", "documentation", "Sami", "innershiv", "doxyclean", "documents", "rvm-site", "jqapi", "documentation", "hadoopy", "VichUploaderBundle", "pdoc", "documentation", "wii-js", "oss-docs", "scala-maven-plugin", "Documents", "documenter", "behemoth", "documentation", "documentation", "propelorm.github.com", "Kobold2D", "AutoObjectDocumentation", "php-mongodb-admin", "django-mongokit", "puppet-docs", "docs", "Document", "vendorer", "symfony1-docs", "shocco", "documentation", "jog", "docs", "documentation", "documentation", "documentation", "documentation", "Documentation", "documentation", "documentation", "phpunit-documentation", "ADCtheme", "NelmioApiDocBundle", "iCloud-Singleton-CloudMe", "Documentation", "document", "document_mapper", "heroku-docs", "couchdb-odm", "documentation", "documentation", "document", "documentation", "NanoStore", "documentation", "Documentation", "documentation", "Documentation", "documentation", "document", "documentation", "documentation", "Documentation", "Documentation", "grendel", "ceylon-compiler", "mbtiles-spec", "documentation", "documents", "documents", "Documents", "Documentation", "documentation", "Documentation", "documentation", "documents", "Documentation", "documentation", "documentation", "documents", "Documentation", "documentation", "documenter", "documentation", "documents", "Documents", "documents", "documents", "documentation", "Document", "document", "rdoc", "mongoid_token", "travis-ci.github.com", "Documents", "Documents", "documents", "Document", "Documentation", "documents", "Documents", "Documentation", "documents", "documents", "documents", "documentation", "Documents", "Document", "documents", "documents", "Documentation", "Documentation", "Document", "documents", "Documents", "Documents", "Documentation", "Documents", "documents", "Documents", "document", "documents", "Documentation", "Documents", "documents", "documents", "Documents", "documents", "Documentation", "documentation", "Document", "Documents", "documents", "documents", "documents", "Documentation", "Documentation", "Documents", "Documents", "Documents", "Documenter", "document", "Documentation", "Documents", "Documents", "documentation", "documentation", "Document", "Documents", "Documentation", "Documentation", "Documents", "documents", "Documents", "document", "documentation", "Documents", "documentation", "documentation", "documentation", "Documentation", "Documents", "Documents", "documentation", "Documents", "Documents", "documentation", "documentation", "documents", "Documentation", "documents", "documentation", "Documentation", "Documents", "documentation", "documentation", "documents", "documentation", "Umbraco5Docs", "documents", "Documents", "Documentation", "documents", "document", "documents", "document", "documents", "documentation", "Documents", "documents", "document", "Documents", "Documentation", "Documentation", "documentation", "Documentation", "document", "documentation", "documents", "documents", "Documentations", "document", "documentation", "Documentation", "Document", "Documents", "Documents", "Document"])
def testLegacySearchReposExplicitPagination(self):
repos = self.g.legacy_search_repos("python")
self.assertEqual([r.name for r in repos.get_page(4)], ["assetic", "cartodb", "cuisine", "gae-sessions", "geoalchemy2", "Multicorn", "wmfr-timeline", "redis-rdb-tools", "applet-workflows", "TweetBuff", "groovy-core", "StarTrekGame", "Nuevo", "Cupid", "node-sqlserver", "Magnet2Torrent", "GroundControl", "mock-django", "4bit", "mock-django", "Fabulous", "SFML", "pydicas", "flixel", "up", "mongrel2", "SimpleHTTPServerJs", "ultimos", "Archipel", "JSbooks", "nova", "nodebox", "simplehttp", "dablooms", "solarized", "landslide", "jQuery-File-Upload", "jQuery-File-Upload", "jQuery-File-Upload", "password-manager", "electrum", "twitter_nlp", "djangbone", "pyxfst", "node-gyp", "flare", "www.gittip.com", "wymeditor", "Kokobox", "MyCQ", "runwalk", "git-sweep", "HPCPythonSC2012", "sundown", "node2dm", "statirator", "fantastic-futures", "chainsaw", "itcursos-gerenciador-tarefas", "TideSDK", "genmaybot", "melpa", "ConnectedWire", "tarantool", "anserindicus_sn", "luvit", "Minecraft-Overviewer", "Iconic", "pyist.net", "wikibok", "mejorenvo-scraper", "NewsBlur", "SocketRocket", "spf13-vim", "IWantToWorkAtGloboCom", "ruby-style-guide", "aery32-refguide", "fafsite", "compsense_demo", "enaml", "mpi4py", "fi.pycon.org", "scikits-image", "scikits-image", "uni", "mako.vim", "mako.vim", "slumber", "de-composer", "nvm", "helloshopply", "Alianza", "vimfiles", "socorro-crashstats", "menu", "analytics", "elFinder", "riak_wiki", "livestreamer", "git-goggles"])
def testLegacySearchReposWithLanguage(self):
repos = self.g.legacy_search_repos("document", language="Python")
self.assertListKeyBegin(repos, lambda r: r.name, ["ipython", "mongoengine", "tagger"])
self.assertEqual(repos[0].full_name, "ipython/ipython")
def testLegacySearchUsers(self):
users = self.g.legacy_search_users("vincent")
self.assertListKeyBegin(users, lambda u: u.login, ["nvie", "obra", "lusis"])
# Attributes retrieved from legacy API without lazy completion call
self.assertEqual(users[0].gravatar_id, "c5a7f21b46df698f3db31c37ed0cf55a")
self.assertEqual(users[0].name, "Vincent Driessen")
self.assertEqual(users[0].created_at, datetime.datetime(2009, 5, 12, 21, 19, 38))
self.assertEqual(users[0].location, "Netherlands")
self.assertEqual(users[0].followers, 310)
self.assertEqual(users[0].public_repos, 63)
self.assertEqual(users[0].login, "nvie")
def testLegacySearchUsersPagination(self):
self.assertEqual(len(list(self.g.legacy_search_users("Lucy"))), 146)
def testLegacySearchUsersExplicitPagination(self):
users = self.g.legacy_search_users("Lucy")
self.assertEqual([u.login for u in users.get_page(1)], ["lucievh", "lucyim", "Lucief", "RevolverUpstairs", "seriousprogramming", "reicul", "davincidubai", "LucianaNascimentodoPrado", "lucia-huenchunao", "kraji20", "Lucywolo", "Luciel", "sunnysummer", "elush", "oprealuci", "Flika", "lsher", "datadrivenjournalism", "nill2020", "doobi", "lucilu", "deldeldel", "lucianacocca", "lucyli-sfdc", "lucysatchell", "UBM", "kolousek", "lucyzhang", "lmegia", "luisolivo", "Lucyzhen", "Luhzinha", "beautifly", "lucybm96", "BuonocoreL", "lucywilliams", "ZxOxZ", "Motwinb", "johnlucy", "Aquanimation", "alaltaieri", "lucylin", "lucychambers", "JuanSesma", "cdwwebware", "ZachWills"])
def testLegacySearchUserByEmail(self):
user = self.g.legacy_search_user_by_email("[email protected]")
self.assertEqual(user.login, "jacquev6")
self.assertEqual(user.followers, 13)
def testGetHooks(self):
hooks = self.g.get_hooks()
hook = hooks[0]
self.assertEqual(hook.name, "activecollab")
self.assertEqual(hook.supported_events, ["push"])
self.assertEqual(hook.events, ["push"])
self.assertEqual(hook.schema, [["string", "url"], ["string", "token"], ["string", "project_id"], ["string", "milestone_id"], ["string", "category_id"]])
def testGetEmojis(self):
emojis = self.g.get_emojis()
first = emojis.get("+1")
self.assertEqual(first, "https://github.global.ssl.fastly.net/images/icons/emoji/+1.png?v5")
def testGetHook(self):
hook = self.g.get_hook("activecollab")
self.assertEqual(hook.name, "activecollab")
self.assertEqual(hook.supported_events, ["push"])
self.assertEqual(hook.events, ["push"])
self.assertEqual(hook.schema, [["string", "url"], ["string", "token"], ["string", "project_id"], ["string", "milestone_id"], ["string", "category_id"]])
def testGetRepoFromFullName(self):
self.assertEqual(self.g.get_repo("jacquev6/PyGithub").description, "Python library implementing the full Github API v3")
def testGetRepoFromId(self):
self.assertEqual(self.g.get_repo(3544490).description, "Python library implementing the full Github API v3")
def testGetGitignoreTemplates(self):
self.assertEqual(self.g.get_gitignore_templates(), ["Actionscript", "Android", "AppceleratorTitanium", "Autotools", "Bancha", "C", "C++", "CFWheels", "CMake", "CSharp", "CakePHP", "Clojure", "CodeIgniter", "Compass", "Concrete5", "Coq", "Delphi", "Django", "Drupal", "Erlang", "ExpressionEngine", "Finale", "ForceDotCom", "FuelPHP", "GWT", "Go", "Grails", "Haskell", "Java", "Jboss", "Jekyll", "Joomla", "Jython", "Kohana", "LaTeX", "Leiningen", "LemonStand", "Lilypond", "Lithium", "Magento", "Maven", "Node", "OCaml", "Objective-C", "Opa", "OracleForms", "Perl", "PlayFramework", "Python", "Qooxdoo", "Qt", "R", "Rails", "RhodesRhomobile", "Ruby", "Scala", "Sdcc", "SeamGen", "SketchUp", "SugarCRM", "Symfony", "Symfony2", "SymphonyCMS", "Target3001", "Tasm", "Textpattern", "TurboGears2", "Unity", "VB.Net", "Waf", "Wordpress", "Yii", "ZendFramework", "gcov", "nanoc", "opencart"])
def testGetGitignoreTemplate(self):
t = self.g.get_gitignore_template("Python")
self.assertEqual(t.name, "Python")
self.assertEqual(t.source, "*.py[cod]\n\n# C extensions\n*.so\n\n# Packages\n*.egg\n*.egg-info\ndist\nbuild\neggs\nparts\nbin\nvar\nsdist\ndevelop-eggs\n.installed.cfg\nlib\nlib64\n\n# Installer logs\npip-log.txt\n\n# Unit test / coverage reports\n.coverage\n.tox\nnosetests.xml\n\n# Translations\n*.mo\n\n# Mr Developer\n.mr.developer.cfg\n.project\n.pydevproject\n")
t = self.g.get_gitignore_template("C++")
self.assertEqual(t.name, "C++")
self.assertEqual(t.source, "# Compiled Object files\n*.slo\n*.lo\n*.o\n\n# Compiled Dynamic libraries\n*.so\n*.dylib\n\n# Compiled Static libraries\n*.lai\n*.la\n*.a\n")
def testStringOfNotSet(self):
self.assertEqual(str(github.GithubObject.NotSet), "NotSet")
def testGetUsers(self):
self.assertListKeyBegin(self.g.get_users(), lambda u: u.login, ["mojombo", "defunkt", "pjhyett", "wycats", "ezmobius", "ivey", "evanphx", "vanpelt", "wayneeseguin", "brynary", "kevinclark", "technoweenie", "macournoyer", "takeo", "Caged", "topfunky", "anotherjesse", "roland", "lukas", "fanvsfan", "tomtt", "railsjitsu", "nitay", "kevwil", "KirinDave", "jamesgolick", "atmos", "errfree", "mojodna", "bmizerany", "jnewland", "joshknowles", "hornbeck", "jwhitmire", "elbowdonkey", "reinh", "timocratic", "bs", "rsanheim", "schacon", "uggedal", "bruce", "sam", "mmower", "abhay", "rabble", "benburkert", "indirect", "fearoffish", "ry", "engineyard", "jsierles", "tweibley", "peimei", "brixen", "tmornini", "outerim", "daksis", "sr", "lifo", "rsl", "imownbey", "dylanegan", "jm", "willcodeforfoo", "jvantuyl", "BrianTheCoder", "freeformz", "hassox", "automatthew", "queso", "lancecarlson", "drnic", "lukesutton", "danwrong", "hcatlin", "jfrost", "mattetti", "ctennis", "lawrencepit", "marcjeanson", "grempe", "peterc", "ministrycentered", "afarnham", "up_the_irons", "evilchelu", "heavysixer", "brosner", "danielmorrison", "danielharan", "kvnsmth", "collectiveidea", "canadaduane", "nate", "dstrelau", "sunny", "dkubb", "jnicklas", "richcollins", "simonjefford"])
def testGetUsersSince(self):
self.assertListKeyBegin(self.g.get_users(since=1000), lambda u: u.login, ["sbecker"])
def testGetRepos(self):
self.assertListKeyBegin(self.g.get_repos(), lambda r: r.name, ["grit", "merb-core", "rubinius", "god", "jsawesome", "jspec", "exception_logger", "ambition"])
def testGetReposSince(self):
self.assertListKeyBegin(self.g.get_repos(since=1000), lambda r: r.name, ["jquery-humanize-messages-plugin", "4slicer", "fixture-scenarios", "mongrel_proctitle", "rails-plugins"])
| gpl-3.0 |
jeremygibbs/microhh | cases/lasso/lasso_init.py | 5 | 6596 | import netCDF4 as nc
import numpy as np
import f90nml
import shutil
import os
from datetime import datetime
#import os.path
#import os
#import sys
#import glob
float_type = "f8"
largescale = True
# Define some constants
cp = 1004.
rd = 287.04
grav = 9.8
rho = 1.225
p0 = 1e5
Lv = 2.5e6
tau = 21600.
# Get number of vertical levels and size from .ini file
shutil.copy2('testbed.ini', 'testbed.tmp')
with open('testbed.tmp') as f:
for line in f:
if(line.split('=')[0] == 'ktot'):
kmax = int(line.split('=')[1])
if(line.split('=')[0] == 'zsize'):
zsize = float(line.split('=')[1])
dz = zsize / kmax
zstretch = 5800.
stretch = 1.04
# Read WRF Namelist
fnml = "config/namelist.input"
nml = f90nml.read(fnml)
runtime_in = nml["time_control"]["run_days"] * 86400 + nml["time_control"]["run_hours"] * \
3600 + nml["time_control"]["run_minutes"] * 60 + nml["time_control"]["run_seconds"]
# Read Surface pressure
fname_in = "config/wrfinput_d01.nc"
f = nc.Dataset(fname_in, 'r+')
ps_in = f.variables['PB'][:][0, 0, 0, 0]
#wrfstattime = f.variables['XTIME'][:]*60.
f.close()
# Read WRF Surface Forcing
fname_in = "config/input_sfc_forcing.nc"
f = nc.Dataset(fname_in, 'r+')
H_in = f.variables['PRE_SH_FLX'][:]
LE_in = f.variables['PRE_LH_FLX'][:]
f.close()
# Read WRF LS Forcing & Nudging
fname_in = "config/input_ls_forcing.nc"
f = nc.Dataset(fname_in, 'r+')
timestr = f.variables['Times'][:]
z_in = f.variables['Z_LS'][:]
u_in = f.variables['U_LS'][:]
v_in = f.variables['V_LS'][:]
thl_in = f.variables['TH_RLX'][:]
qt_in = f.variables['QV_RLX'][:]
thlls_in = f.variables['TH_ADV'][:]
qtls_in = f.variables['QV_ADV'][:]
wls_in = f.variables['W_LS'][:]
f.close()
str = np.chararray(timestr.shape)
dt = np.empty(timestr.shape[0], dtype=datetime)
tnudge = np.zeros(timestr.shape[0])
for i in range(timestr.shape[0]):
dt[i] = datetime.strptime(
timestr[i].tostring().decode(),
'%Y-%m-%d_%H:%M:%S')
tnudge[i] = (dt[i] - dt[0]).total_seconds()
ntnudge = tnudge.size
#
# interpolate onto Microhh grid
ntnudge = timestr.shape[0]
# non-equidistant grid
z = np.zeros(kmax)
z[0] = 0.5 * dz
for k in range(1, kmax):
z[k] = z[k - 1] + 0.5 * dz
if z[k] > zstretch:
dz *= stretch
z[k] += 0.5 * dz
zh = np.zeros(kmax)
zh[:-1] = (z[1:] + z[:-1]) / 2
zh[-1] = 2 * z[-1] - zh[-2]
u = np.zeros((ntnudge, z.size))
v = np.zeros(np.shape(u))
thl = np.zeros(np.shape(u))
qt = np.zeros(np.shape(u))
thlls = np.zeros(np.shape(u))
qtls = np.zeros(np.shape(u))
wls = np.zeros(np.shape(u))
nudge_factor = np.zeros(np.shape(u))
p_sbot = np.zeros((ntnudge))
#p_sbot = np.interp(tnudge, wrfstattime,ps_in)
p_sbot[:] = ps_in
for t in range(tnudge.size):
u[t, :] = np.interp(z, z_in[t, :], u_in[t, :])
v[t, :] = np.interp(z, z_in[t, :], v_in[t, :])
thl[t, :] = np.interp(z, z_in[t, :], thl_in[t, :])
qt[t, :] = np.interp(z, z_in[t, :], qt_in[t, :])
thlls[t, :] = np.interp(z, z_in[t, :], thlls_in[t, :])
qtls[t, :] = np.interp(z, z_in[t, :], qtls_in[t, :])
wls[t, :] = np.interp(zh, z_in[t, :], wls_in[t, :])
ug = u
vg = v
nudge_factor[:, :] = 1. / tau
# Surface fluxes
rhosurf = p_sbot / (rd * thl[:, 0] * (1. + 0.61 * qt[:, 0]))
sbotthl = H_in / (rhosurf * cp)
sbotqt = LE_in / (rhosurf * Lv)
# Modify .ini file: Add comments for case description; Alter lines where
# necessary.
inifile = open('testbed.ini', 'w')
inifile.write("#Converted from LASSO WRF" + "\n")
# inifile.write("#Start Date = " + timestr[0]+ "\n")
# inifile.write("#Stop Date = " + timestr[-1]+"\n")
with open('testbed.tmp') as f:
for line_in in f:
if(line_in.split('=')[0] == 'zsize'):
line = "zsize={0:f}\n".format(zh[-1])
elif(line_in.split('=')[0] == 'pbot'):
line = "pbot={0:f}\n".format(p_sbot[0])
else:
line = line_in
inifile.write(line)
inifile.close()
os.remove('testbed.tmp')
# Save all the input data to NetCDF
nc_file = nc.Dataset(
"testbed_input.nc",
mode="w",
datamodel="NETCDF4",
clobber=False)
nc_file.createDimension("z", kmax)
nc_z = nc_file.createVariable("z", float_type, ("z"))
nc_z[:] = z[:]
nc_file.createDimension("zh", kmax)
nc_zh = nc_file.createVariable("zh", float_type, ("zh"))
nc_zh[:] = zh[:]
# Create a group called "init" for the initial profiles.
nc_group_init = nc_file.createGroup("init")
nc_thl = nc_group_init.createVariable("thl", float_type, ("z"))
nc_qt = nc_group_init.createVariable("qt", float_type, ("z"))
nc_u = nc_group_init.createVariable("u", float_type, ("z"))
nc_v = nc_group_init.createVariable("v", float_type, ("z"))
nc_ug = nc_group_init.createVariable("u_geo", float_type, ("z"))
nc_vg = nc_group_init.createVariable("v_geo", float_type, ("z"))
nc_nudge_factor = nc_group_init.createVariable("nudgefac", float_type, ("z"))
nc_thl[:] = thl[0, :]
nc_qt[:] = qt[0, :]
nc_u[:] = u[0, :]
nc_v[:] = v[0, :]
nc_ug[:] = ug[0, :]
nc_vg[:] = vg[0, :]
nc_nudge_factor[:] = nudge_factor[0, :]
# Create a group called "timedep" for the timedep.
nc_group_timedep = nc_file.createGroup("timedep")
nc_group_timedep.createDimension("time", tnudge.size)
nc_time = nc_group_timedep.createVariable("time", float_type, ("time"))
nc_time[:] = tnudge[:]
nc_thl_sbot = nc_group_timedep.createVariable("thl_sbot", float_type, ("time"))
nc_qt_sbot = nc_group_timedep.createVariable("qt_sbot", float_type, ("time"))
nc_p_sbot = nc_group_timedep.createVariable("p_sbot", float_type, ("time"))
nc_thl_sbot[:] = sbotthl[:]
nc_qt_sbot[:] = sbotqt[:]
nc_p_sbot[:] = sbotqt[:]
nc_thl_ls = nc_group_timedep.createVariable(
"thl_ls", float_type, ("time", "z"))
nc_qt_ls = nc_group_timedep.createVariable("qt_ls", float_type, ("time", "z"))
nc_w_ls = nc_group_timedep.createVariable("w_ls", float_type, ("time", "zh"))
nc_u_g = nc_group_timedep.createVariable("u_geo", float_type, ("time", "z"))
nc_v_g = nc_group_timedep.createVariable("v_geo", float_type, ("time", "z"))
nc_u_nudge = nc_group_timedep.createVariable(
"u_nudge", float_type, ("time", "z"))
nc_v_nudge = nc_group_timedep.createVariable(
"v_nudge", float_type, ("time", "z"))
nc_thl_nudge = nc_group_timedep.createVariable(
"thl_nudge", float_type, ("time", "z"))
nc_qt_nudge = nc_group_timedep.createVariable(
"qt_nudge", float_type, ("time", "z"))
nc_thl_ls[:, :] = thlls[:, :]
nc_qt_ls[:, :] = qtls[:, :]
nc_w_ls[:, :] = wls[:, :]
nc_u_g[:, :] = ug[:, :]
nc_v_g[:, :] = vg[:, :]
nc_u_nudge[:, :] = u[:, :]
nc_v_nudge[:, :] = v[:, :]
nc_thl_nudge[:, :] = thl[:, :]
nc_qt_nudge[:, :] = qt[:, :]
nc_file.close()
print("done")
| gpl-3.0 |
melvon22/osmc | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x0be.py | 253 | 4849 | data = (
'byum', # 0x00
'byub', # 0x01
'byubs', # 0x02
'byus', # 0x03
'byuss', # 0x04
'byung', # 0x05
'byuj', # 0x06
'byuc', # 0x07
'byuk', # 0x08
'byut', # 0x09
'byup', # 0x0a
'byuh', # 0x0b
'beu', # 0x0c
'beug', # 0x0d
'beugg', # 0x0e
'beugs', # 0x0f
'beun', # 0x10
'beunj', # 0x11
'beunh', # 0x12
'beud', # 0x13
'beul', # 0x14
'beulg', # 0x15
'beulm', # 0x16
'beulb', # 0x17
'beuls', # 0x18
'beult', # 0x19
'beulp', # 0x1a
'beulh', # 0x1b
'beum', # 0x1c
'beub', # 0x1d
'beubs', # 0x1e
'beus', # 0x1f
'beuss', # 0x20
'beung', # 0x21
'beuj', # 0x22
'beuc', # 0x23
'beuk', # 0x24
'beut', # 0x25
'beup', # 0x26
'beuh', # 0x27
'byi', # 0x28
'byig', # 0x29
'byigg', # 0x2a
'byigs', # 0x2b
'byin', # 0x2c
'byinj', # 0x2d
'byinh', # 0x2e
'byid', # 0x2f
'byil', # 0x30
'byilg', # 0x31
'byilm', # 0x32
'byilb', # 0x33
'byils', # 0x34
'byilt', # 0x35
'byilp', # 0x36
'byilh', # 0x37
'byim', # 0x38
'byib', # 0x39
'byibs', # 0x3a
'byis', # 0x3b
'byiss', # 0x3c
'bying', # 0x3d
'byij', # 0x3e
'byic', # 0x3f
'byik', # 0x40
'byit', # 0x41
'byip', # 0x42
'byih', # 0x43
'bi', # 0x44
'big', # 0x45
'bigg', # 0x46
'bigs', # 0x47
'bin', # 0x48
'binj', # 0x49
'binh', # 0x4a
'bid', # 0x4b
'bil', # 0x4c
'bilg', # 0x4d
'bilm', # 0x4e
'bilb', # 0x4f
'bils', # 0x50
'bilt', # 0x51
'bilp', # 0x52
'bilh', # 0x53
'bim', # 0x54
'bib', # 0x55
'bibs', # 0x56
'bis', # 0x57
'biss', # 0x58
'bing', # 0x59
'bij', # 0x5a
'bic', # 0x5b
'bik', # 0x5c
'bit', # 0x5d
'bip', # 0x5e
'bih', # 0x5f
'bba', # 0x60
'bbag', # 0x61
'bbagg', # 0x62
'bbags', # 0x63
'bban', # 0x64
'bbanj', # 0x65
'bbanh', # 0x66
'bbad', # 0x67
'bbal', # 0x68
'bbalg', # 0x69
'bbalm', # 0x6a
'bbalb', # 0x6b
'bbals', # 0x6c
'bbalt', # 0x6d
'bbalp', # 0x6e
'bbalh', # 0x6f
'bbam', # 0x70
'bbab', # 0x71
'bbabs', # 0x72
'bbas', # 0x73
'bbass', # 0x74
'bbang', # 0x75
'bbaj', # 0x76
'bbac', # 0x77
'bbak', # 0x78
'bbat', # 0x79
'bbap', # 0x7a
'bbah', # 0x7b
'bbae', # 0x7c
'bbaeg', # 0x7d
'bbaegg', # 0x7e
'bbaegs', # 0x7f
'bbaen', # 0x80
'bbaenj', # 0x81
'bbaenh', # 0x82
'bbaed', # 0x83
'bbael', # 0x84
'bbaelg', # 0x85
'bbaelm', # 0x86
'bbaelb', # 0x87
'bbaels', # 0x88
'bbaelt', # 0x89
'bbaelp', # 0x8a
'bbaelh', # 0x8b
'bbaem', # 0x8c
'bbaeb', # 0x8d
'bbaebs', # 0x8e
'bbaes', # 0x8f
'bbaess', # 0x90
'bbaeng', # 0x91
'bbaej', # 0x92
'bbaec', # 0x93
'bbaek', # 0x94
'bbaet', # 0x95
'bbaep', # 0x96
'bbaeh', # 0x97
'bbya', # 0x98
'bbyag', # 0x99
'bbyagg', # 0x9a
'bbyags', # 0x9b
'bbyan', # 0x9c
'bbyanj', # 0x9d
'bbyanh', # 0x9e
'bbyad', # 0x9f
'bbyal', # 0xa0
'bbyalg', # 0xa1
'bbyalm', # 0xa2
'bbyalb', # 0xa3
'bbyals', # 0xa4
'bbyalt', # 0xa5
'bbyalp', # 0xa6
'bbyalh', # 0xa7
'bbyam', # 0xa8
'bbyab', # 0xa9
'bbyabs', # 0xaa
'bbyas', # 0xab
'bbyass', # 0xac
'bbyang', # 0xad
'bbyaj', # 0xae
'bbyac', # 0xaf
'bbyak', # 0xb0
'bbyat', # 0xb1
'bbyap', # 0xb2
'bbyah', # 0xb3
'bbyae', # 0xb4
'bbyaeg', # 0xb5
'bbyaegg', # 0xb6
'bbyaegs', # 0xb7
'bbyaen', # 0xb8
'bbyaenj', # 0xb9
'bbyaenh', # 0xba
'bbyaed', # 0xbb
'bbyael', # 0xbc
'bbyaelg', # 0xbd
'bbyaelm', # 0xbe
'bbyaelb', # 0xbf
'bbyaels', # 0xc0
'bbyaelt', # 0xc1
'bbyaelp', # 0xc2
'bbyaelh', # 0xc3
'bbyaem', # 0xc4
'bbyaeb', # 0xc5
'bbyaebs', # 0xc6
'bbyaes', # 0xc7
'bbyaess', # 0xc8
'bbyaeng', # 0xc9
'bbyaej', # 0xca
'bbyaec', # 0xcb
'bbyaek', # 0xcc
'bbyaet', # 0xcd
'bbyaep', # 0xce
'bbyaeh', # 0xcf
'bbeo', # 0xd0
'bbeog', # 0xd1
'bbeogg', # 0xd2
'bbeogs', # 0xd3
'bbeon', # 0xd4
'bbeonj', # 0xd5
'bbeonh', # 0xd6
'bbeod', # 0xd7
'bbeol', # 0xd8
'bbeolg', # 0xd9
'bbeolm', # 0xda
'bbeolb', # 0xdb
'bbeols', # 0xdc
'bbeolt', # 0xdd
'bbeolp', # 0xde
'bbeolh', # 0xdf
'bbeom', # 0xe0
'bbeob', # 0xe1
'bbeobs', # 0xe2
'bbeos', # 0xe3
'bbeoss', # 0xe4
'bbeong', # 0xe5
'bbeoj', # 0xe6
'bbeoc', # 0xe7
'bbeok', # 0xe8
'bbeot', # 0xe9
'bbeop', # 0xea
'bbeoh', # 0xeb
'bbe', # 0xec
'bbeg', # 0xed
'bbegg', # 0xee
'bbegs', # 0xef
'bben', # 0xf0
'bbenj', # 0xf1
'bbenh', # 0xf2
'bbed', # 0xf3
'bbel', # 0xf4
'bbelg', # 0xf5
'bbelm', # 0xf6
'bbelb', # 0xf7
'bbels', # 0xf8
'bbelt', # 0xf9
'bbelp', # 0xfa
'bbelh', # 0xfb
'bbem', # 0xfc
'bbeb', # 0xfd
'bbebs', # 0xfe
'bbes', # 0xff
)
| gpl-2.0 |
TeachAtTUM/edx-platform | common/djangoapps/terrain/browser.py | 15 | 10334 | """
Browser set up for acceptance tests.
"""
# pylint: disable=no-member
# pylint: disable=unused-argument
from base64 import encodestring
from json import dumps
from logging import getLogger
import requests
from django.conf import settings
from django.core.management import call_command
from lettuce import after, before, world
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from splinter.browser import Browser
from xmodule.contentstore.django import _CONTENTSTORE
LOGGER = getLogger(__name__)
LOGGER.info("Loading the lettuce acceptance testing terrain file...")
MAX_VALID_BROWSER_ATTEMPTS = 20
GLOBAL_SCRIPT_TIMEOUT = 60
def get_saucelabs_username_and_key():
"""
Returns the Sauce Labs username and access ID as set by environment variables
"""
return {"username": settings.SAUCE.get('USERNAME'), "access-key": settings.SAUCE.get('ACCESS_ID')}
def set_saucelabs_job_status(jobid, passed=True):
"""
Sets the job status on sauce labs
"""
config = get_saucelabs_username_and_key()
url = 'http://saucelabs.com/rest/v1/{}/jobs/{}'.format(config['username'], world.jobid)
body_content = dumps({"passed": passed})
base64string = encodestring('{}:{}'.format(config['username'], config['access-key']))[:-1]
headers = {"Authorization": "Basic {}".format(base64string)}
result = requests.put(url, data=body_content, headers=headers)
return result.status_code == 200
def make_saucelabs_desired_capabilities():
"""
Returns a DesiredCapabilities object corresponding to the environment sauce parameters
"""
desired_capabilities = settings.SAUCE.get('BROWSER', DesiredCapabilities.CHROME)
desired_capabilities['platform'] = settings.SAUCE.get('PLATFORM')
desired_capabilities['version'] = settings.SAUCE.get('VERSION')
desired_capabilities['device-type'] = settings.SAUCE.get('DEVICE')
desired_capabilities['name'] = settings.SAUCE.get('SESSION')
desired_capabilities['build'] = settings.SAUCE.get('BUILD')
desired_capabilities['video-upload-on-pass'] = False
desired_capabilities['sauce-advisor'] = False
desired_capabilities['capture-html'] = True
desired_capabilities['record-screenshots'] = True
desired_capabilities['selenium-version'] = "2.34.0"
desired_capabilities['max-duration'] = 3600
desired_capabilities['public'] = 'public restricted'
return desired_capabilities
@before.harvest
def initial_setup(server):
"""
Launch the browser once before executing the tests.
"""
world.absorb(settings.LETTUCE_SELENIUM_CLIENT, 'LETTUCE_SELENIUM_CLIENT')
if world.LETTUCE_SELENIUM_CLIENT == 'local':
browser_driver = getattr(settings, 'LETTUCE_BROWSER', 'chrome')
if browser_driver == 'chrome':
desired_capabilities = DesiredCapabilities.CHROME
desired_capabilities['loggingPrefs'] = {
'browser': 'ALL',
}
else:
desired_capabilities = {}
# There is an issue with ChromeDriver2 r195627 on Ubuntu
# in which we sometimes get an invalid browser session.
# This is a work-around to ensure that we get a valid session.
success = False
num_attempts = 0
while (not success) and num_attempts < MAX_VALID_BROWSER_ATTEMPTS:
# Load the browser and try to visit the main page
# If the browser couldn't be reached or
# the browser session is invalid, this will
# raise a WebDriverException
try:
if browser_driver == 'firefox':
# Lettuce initializes differently for firefox, and sending
# desired_capabilities will not work. So initialize without
# sending desired_capabilities.
world.browser = Browser(browser_driver)
else:
world.browser = Browser(browser_driver, desired_capabilities=desired_capabilities)
world.browser.driver.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
world.visit('/')
except WebDriverException:
LOGGER.warn("Error acquiring %s browser, retrying", browser_driver, exc_info=True)
if hasattr(world, 'browser'):
world.browser.quit()
num_attempts += 1
else:
success = True
# If we were unable to get a valid session within the limit of attempts,
# then we cannot run the tests.
if not success:
raise IOError("Could not acquire valid {driver} browser session.".format(driver=browser_driver))
world.absorb(0, 'IMPLICIT_WAIT')
world.browser.driver.set_window_size(1280, 1024)
elif world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
config = get_saucelabs_username_and_key()
world.browser = Browser(
'remote',
url="http://{}:{}@ondemand.saucelabs.com:80/wd/hub".format(config['username'], config['access-key']),
**make_saucelabs_desired_capabilities()
)
world.absorb(30, 'IMPLICIT_WAIT')
world.browser.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
elif world.LETTUCE_SELENIUM_CLIENT == 'grid':
world.browser = Browser(
'remote',
url=settings.SELENIUM_GRID.get('URL'),
browser=settings.SELENIUM_GRID.get('BROWSER'),
)
world.absorb(30, 'IMPLICIT_WAIT')
world.browser.driver.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
else:
raise Exception("Unknown selenium client '{}'".format(world.LETTUCE_SELENIUM_CLIENT))
world.browser.driver.implicitly_wait(world.IMPLICIT_WAIT)
world.absorb(world.browser.driver.session_id, 'jobid')
@before.each_scenario
def reset_data(scenario):
"""
Clean out the django test database defined in the
envs/acceptance.py file: edx-platform/db/test_edx.db
"""
LOGGER.debug("Flushing the test database...")
call_command('flush', interactive=False, verbosity=0)
world.absorb({}, 'scenario_dict')
@before.each_scenario
def configure_screenshots(scenario):
"""
Before each scenario, turn off automatic screenshots.
Args: str, scenario. Name of current scenario.
"""
world.auto_capture_screenshots = False
@after.each_scenario
def clear_data(scenario):
world.spew('scenario_dict')
@after.each_scenario
def reset_databases(scenario):
"""
After each scenario, all databases are cleared/dropped. Contentstore data are stored in unique databases
whereas modulestore data is in unique collection names. This data is created implicitly during the scenarios.
If no data is created during the test, these lines equivilently do nothing.
"""
import xmodule.modulestore.django
xmodule.modulestore.django.modulestore()._drop_database() # pylint: disable=protected-access
xmodule.modulestore.django.clear_existing_modulestores()
_CONTENTSTORE.clear()
@world.absorb
def capture_screenshot(image_name):
"""
Capture a screenshot outputting it to a defined directory.
This function expects only the name of the file. It will generate
the full path of the output screenshot.
If the name contains spaces, they ill be converted to underscores.
"""
output_dir = '{}/log/auto_screenshots'.format(settings.TEST_ROOT)
image_name = '{}/{}.png'.format(output_dir, image_name.replace(' ', '_'))
try:
world.browser.driver.save_screenshot(image_name)
except WebDriverException:
LOGGER.error("Could not capture a screenshot '{}'".format(image_name))
@after.each_scenario
def screenshot_on_error(scenario):
"""
Save a screenshot to help with debugging.
"""
if scenario.failed:
try:
output_dir = '{}/log'.format(settings.TEST_ROOT)
image_name = '{}/{}.png'.format(output_dir, scenario.name.replace(' ', '_'))
world.browser.driver.save_screenshot(image_name)
except WebDriverException:
LOGGER.error('Could not capture a screenshot')
@after.each_scenario
def capture_console_log(scenario):
"""
Save the console log to help with debugging.
"""
if scenario.failed:
log = world.browser.driver.get_log('browser')
try:
output_dir = '{}/log'.format(settings.TEST_ROOT)
file_name = '{}/{}.log'.format(output_dir, scenario.name.replace(' ', '_'))
with open(file_name, 'w') as output_file:
for line in log:
output_file.write("{}{}".format(dumps(line), '\n'))
except WebDriverException:
LOGGER.error('Could not capture the console log')
def capture_screenshot_for_step(step, when):
"""
Useful method for debugging acceptance tests that are run in Vagrant.
This method runs automatically before and after each step of an acceptance
test scenario. The variable:
world.auto_capture_screenshots
either enables or disabled the taking of screenshots. To change the
variable there is a convenient step defined:
I (enable|disable) auto screenshots
If you just want to capture a single screenshot at a desired point in code,
you should use the method:
world.capture_screenshot("image_name")
"""
if world.auto_capture_screenshots:
scenario_num = step.scenario.feature.scenarios.index(step.scenario) + 1
step_num = step.scenario.steps.index(step) + 1
step_func_name = step.defined_at.function.func_name
image_name = "{prefix:03d}__{num:03d}__{name}__{postfix}".format(
prefix=scenario_num,
num=step_num,
name=step_func_name,
postfix=when
)
world.capture_screenshot(image_name)
@before.each_step
def before_each_step(step):
capture_screenshot_for_step(step, '1_before')
@after.each_step
def after_each_step(step):
capture_screenshot_for_step(step, '2_after')
@after.harvest
def saucelabs_status(total):
"""
Collect data for saucelabs.
"""
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
set_saucelabs_job_status(world.jobid, total.scenarios_ran == total.scenarios_passed)
| agpl-3.0 |
sebgoa/client-python | kubernetes/client/models/v1_node_system_info.py | 2 | 12434 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1NodeSystemInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, architecture=None, boot_id=None, container_runtime_version=None, kernel_version=None, kube_proxy_version=None, kubelet_version=None, machine_id=None, operating_system=None, os_image=None, system_uuid=None):
"""
V1NodeSystemInfo - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'architecture': 'str',
'boot_id': 'str',
'container_runtime_version': 'str',
'kernel_version': 'str',
'kube_proxy_version': 'str',
'kubelet_version': 'str',
'machine_id': 'str',
'operating_system': 'str',
'os_image': 'str',
'system_uuid': 'str'
}
self.attribute_map = {
'architecture': 'architecture',
'boot_id': 'bootID',
'container_runtime_version': 'containerRuntimeVersion',
'kernel_version': 'kernelVersion',
'kube_proxy_version': 'kubeProxyVersion',
'kubelet_version': 'kubeletVersion',
'machine_id': 'machineID',
'operating_system': 'operatingSystem',
'os_image': 'osImage',
'system_uuid': 'systemUUID'
}
self._architecture = architecture
self._boot_id = boot_id
self._container_runtime_version = container_runtime_version
self._kernel_version = kernel_version
self._kube_proxy_version = kube_proxy_version
self._kubelet_version = kubelet_version
self._machine_id = machine_id
self._operating_system = operating_system
self._os_image = os_image
self._system_uuid = system_uuid
@property
def architecture(self):
"""
Gets the architecture of this V1NodeSystemInfo.
The Architecture reported by the node
:return: The architecture of this V1NodeSystemInfo.
:rtype: str
"""
return self._architecture
@architecture.setter
def architecture(self, architecture):
"""
Sets the architecture of this V1NodeSystemInfo.
The Architecture reported by the node
:param architecture: The architecture of this V1NodeSystemInfo.
:type: str
"""
if architecture is None:
raise ValueError("Invalid value for `architecture`, must not be `None`")
self._architecture = architecture
@property
def boot_id(self):
"""
Gets the boot_id of this V1NodeSystemInfo.
Boot ID reported by the node.
:return: The boot_id of this V1NodeSystemInfo.
:rtype: str
"""
return self._boot_id
@boot_id.setter
def boot_id(self, boot_id):
"""
Sets the boot_id of this V1NodeSystemInfo.
Boot ID reported by the node.
:param boot_id: The boot_id of this V1NodeSystemInfo.
:type: str
"""
if boot_id is None:
raise ValueError("Invalid value for `boot_id`, must not be `None`")
self._boot_id = boot_id
@property
def container_runtime_version(self):
"""
Gets the container_runtime_version of this V1NodeSystemInfo.
ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
:return: The container_runtime_version of this V1NodeSystemInfo.
:rtype: str
"""
return self._container_runtime_version
@container_runtime_version.setter
def container_runtime_version(self, container_runtime_version):
"""
Sets the container_runtime_version of this V1NodeSystemInfo.
ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
:param container_runtime_version: The container_runtime_version of this V1NodeSystemInfo.
:type: str
"""
if container_runtime_version is None:
raise ValueError("Invalid value for `container_runtime_version`, must not be `None`")
self._container_runtime_version = container_runtime_version
@property
def kernel_version(self):
"""
Gets the kernel_version of this V1NodeSystemInfo.
Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
:return: The kernel_version of this V1NodeSystemInfo.
:rtype: str
"""
return self._kernel_version
@kernel_version.setter
def kernel_version(self, kernel_version):
"""
Sets the kernel_version of this V1NodeSystemInfo.
Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
:param kernel_version: The kernel_version of this V1NodeSystemInfo.
:type: str
"""
if kernel_version is None:
raise ValueError("Invalid value for `kernel_version`, must not be `None`")
self._kernel_version = kernel_version
@property
def kube_proxy_version(self):
"""
Gets the kube_proxy_version of this V1NodeSystemInfo.
KubeProxy Version reported by the node.
:return: The kube_proxy_version of this V1NodeSystemInfo.
:rtype: str
"""
return self._kube_proxy_version
@kube_proxy_version.setter
def kube_proxy_version(self, kube_proxy_version):
"""
Sets the kube_proxy_version of this V1NodeSystemInfo.
KubeProxy Version reported by the node.
:param kube_proxy_version: The kube_proxy_version of this V1NodeSystemInfo.
:type: str
"""
if kube_proxy_version is None:
raise ValueError("Invalid value for `kube_proxy_version`, must not be `None`")
self._kube_proxy_version = kube_proxy_version
@property
def kubelet_version(self):
"""
Gets the kubelet_version of this V1NodeSystemInfo.
Kubelet Version reported by the node.
:return: The kubelet_version of this V1NodeSystemInfo.
:rtype: str
"""
return self._kubelet_version
@kubelet_version.setter
def kubelet_version(self, kubelet_version):
"""
Sets the kubelet_version of this V1NodeSystemInfo.
Kubelet Version reported by the node.
:param kubelet_version: The kubelet_version of this V1NodeSystemInfo.
:type: str
"""
if kubelet_version is None:
raise ValueError("Invalid value for `kubelet_version`, must not be `None`")
self._kubelet_version = kubelet_version
@property
def machine_id(self):
"""
Gets the machine_id of this V1NodeSystemInfo.
MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
:return: The machine_id of this V1NodeSystemInfo.
:rtype: str
"""
return self._machine_id
@machine_id.setter
def machine_id(self, machine_id):
"""
Sets the machine_id of this V1NodeSystemInfo.
MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
:param machine_id: The machine_id of this V1NodeSystemInfo.
:type: str
"""
if machine_id is None:
raise ValueError("Invalid value for `machine_id`, must not be `None`")
self._machine_id = machine_id
@property
def operating_system(self):
"""
Gets the operating_system of this V1NodeSystemInfo.
The Operating System reported by the node
:return: The operating_system of this V1NodeSystemInfo.
:rtype: str
"""
return self._operating_system
@operating_system.setter
def operating_system(self, operating_system):
"""
Sets the operating_system of this V1NodeSystemInfo.
The Operating System reported by the node
:param operating_system: The operating_system of this V1NodeSystemInfo.
:type: str
"""
if operating_system is None:
raise ValueError("Invalid value for `operating_system`, must not be `None`")
self._operating_system = operating_system
@property
def os_image(self):
"""
Gets the os_image of this V1NodeSystemInfo.
OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
:return: The os_image of this V1NodeSystemInfo.
:rtype: str
"""
return self._os_image
@os_image.setter
def os_image(self, os_image):
"""
Sets the os_image of this V1NodeSystemInfo.
OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
:param os_image: The os_image of this V1NodeSystemInfo.
:type: str
"""
if os_image is None:
raise ValueError("Invalid value for `os_image`, must not be `None`")
self._os_image = os_image
@property
def system_uuid(self):
"""
Gets the system_uuid of this V1NodeSystemInfo.
SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
:return: The system_uuid of this V1NodeSystemInfo.
:rtype: str
"""
return self._system_uuid
@system_uuid.setter
def system_uuid(self, system_uuid):
"""
Sets the system_uuid of this V1NodeSystemInfo.
SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
:param system_uuid: The system_uuid of this V1NodeSystemInfo.
:type: str
"""
if system_uuid is None:
raise ValueError("Invalid value for `system_uuid`, must not be `None`")
self._system_uuid = system_uuid
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1NodeSystemInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 |
touchpro/android_kernel_lge_msm8226 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
PhyloStar/PyBayes | params_moves.py | 1 | 1027 | import numpy as np
from scipy.stats import dirichlet
import random, math
dir_alpha = 100.0
scaler_alpha = 1.25
epsilon = 1e-10
def mvDirichlet(pi):
pi_new = dirichlet.rvs(dir_alpha*pi)[0]
#print(pi, pi_new)
hastings_ratio = dirichlet.logpdf(pi, pi_new) - dirichlet.logpdf(pi_new, pi)
return pi_new, hastings_ratio
def mvDualSlider(pi):
i, j = random.sample(range(pi.shape[0]),2 )
sum_ij = pi[i]+pi[j]
x = random.uniform(epsilon, sum_ij)
y = sum_ij -x
pi[i], pi[j] = x, y
return pi, 0.0
def mvScaler(x):
log_c = scaler_alpha*(np.random.uniform()-0.5)
c = math.exp(log_c)
x_new = x*c
return x_new, log_c
def mvVecScaler(X):
log_c = scaler_alpha*(np.random.uniform()-0.5)
c = math.exp(log_c)
X_new = X*c
return X_new, log_c
def mvSlider(x, a, b):
""" a and b are bounds
"""
x_hat = np.random.uniform(x-0.5, x+0.5)
if x_hat < a:
return 2.0*a -x_hat
elif xhat > b:
return 2.0*b -x_hat
else:
return x_hat
| gpl-2.0 |
gangadhar-kadam/verve_test_erp | erpnext/patches/v4_0/apply_user_permissions.py | 87 | 1999 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.hr.doctype.employee.employee import EmployeeUserDisabledError
def execute():
update_hr_permissions()
update_permissions()
remove_duplicate_user_permissions()
frappe.clear_cache()
def update_hr_permissions():
from frappe.core.page.user_permissions import user_permissions
# add set user permissions rights to HR Manager
frappe.db.sql("""update `tabDocPerm` set `set_user_permissions`=1 where parent in ('Employee', 'Leave Application')
and role='HR Manager' and permlevel=0 and `read`=1""")
# apply user permissions on Employee and Leave Application
frappe.db.sql("""update `tabDocPerm` set `apply_user_permissions`=1 where parent in ('Employee', 'Leave Application')
and role in ('Employee', 'Leave Approver') and permlevel=0 and `read`=1""")
frappe.clear_cache()
# save employees to run on_update events
for employee in frappe.db.sql_list("""select name from `tabEmployee` where docstatus < 2"""):
try:
emp = frappe.get_doc("Employee", employee)
emp.flags.ignore_mandatory = True
emp.save()
except EmployeeUserDisabledError:
pass
def update_permissions():
# clear match conditions other than owner
frappe.db.sql("""update tabDocPerm set `match`=''
where ifnull(`match`,'') not in ('', 'owner')""")
def remove_duplicate_user_permissions():
# remove duplicate user_permissions (if they exist)
for d in frappe.db.sql("""select parent, defkey, defvalue,
count(*) as cnt from tabDefaultValue
where parent not in ('__global', '__default')
group by parent, defkey, defvalue""", as_dict=1):
if d.cnt > 1:
# order by parenttype so that user permission does not get removed!
frappe.db.sql("""delete from tabDefaultValue where `parent`=%s and `defkey`=%s and
`defvalue`=%s order by parenttype limit %s""", (d.parent, d.defkey, d.defvalue, d.cnt-1))
| agpl-3.0 |
tino1b2be/LARMAS | suit/widgets.py | 10 | 4988 | from django.contrib.admin.widgets import AdminTimeWidget, AdminDateWidget
from django.forms import TextInput, Select, Textarea
from django.utils.safestring import mark_safe
from django import forms
from django.utils.translation import ugettext as _
from django.contrib.admin.templatetags.admin_static import static
class NumberInput(TextInput):
"""
HTML5 Number input
Left for backwards compatibility
"""
input_type = 'number'
class HTML5Input(TextInput):
"""
Supports any HTML5 input
http://www.w3schools.com/html/html5_form_input_types.asp
"""
def __init__(self, attrs=None, input_type=None):
self.input_type = input_type
super(HTML5Input, self).__init__(attrs)
#
class LinkedSelect(Select):
"""
Linked select - Adds link to foreign item, when used with foreign key field
"""
def __init__(self, attrs=None, choices=()):
attrs = _make_attrs(attrs, classes="linked-select")
super(LinkedSelect, self).__init__(attrs, choices)
class EnclosedInput(TextInput):
"""
Widget for bootstrap appended/prepended inputs
"""
def __init__(self, attrs=None, prepend=None, append=None):
"""
For prepend, append parameters use string like %, $ or html
"""
self.prepend = prepend
self.append = append
super(EnclosedInput, self).__init__(attrs=attrs)
def enclose_value(self, value):
"""
If value doesn't starts with html open sign "<", enclose in add-on tag
"""
if value.startswith("<"):
return value
if value.startswith("icon-"):
value = '<i class="%s"></i>' % value
return '<span class="add-on">%s</span>' % value
def render(self, name, value, attrs=None):
output = super(EnclosedInput, self).render(name, value, attrs)
div_classes = []
if self.prepend:
div_classes.append('input-prepend')
self.prepend = self.enclose_value(self.prepend)
output = ''.join((self.prepend, output))
if self.append:
div_classes.append('input-append')
self.append = self.enclose_value(self.append)
output = ''.join((output, self.append))
return mark_safe(
'<div class="%s">%s</div>' % (' '.join(div_classes), output))
class AutosizedTextarea(Textarea):
"""
Autosized Textarea - textarea height dynamically grows based on user input
"""
def __init__(self, attrs=None):
new_attrs = _make_attrs(attrs, {"rows": 2}, "autosize")
super(AutosizedTextarea, self).__init__(new_attrs)
@property
def media(self):
return forms.Media(js=[static("suit/js/jquery.autosize-min.js")])
def render(self, name, value, attrs=None):
output = super(AutosizedTextarea, self).render(name, value, attrs)
output += mark_safe(
"<script type=\"text/javascript\">Suit.$('#id_%s').autosize();</script>"
% name)
return output
#
# Original date widgets with addition html
#
class SuitDateWidget(AdminDateWidget):
def __init__(self, attrs=None, format=None):
defaults = {'placeholder': _('Date:')[:-1]}
new_attrs = _make_attrs(attrs, defaults, "vDateField input-small")
super(SuitDateWidget, self).__init__(attrs=new_attrs, format=format)
def render(self, name, value, attrs=None):
output = super(SuitDateWidget, self).render(name, value, attrs)
return mark_safe(
'<div class="input-append suit-date">%s<span '
'class="add-on"><i class="icon-calendar"></i></span></div>' %
output)
class SuitTimeWidget(AdminTimeWidget):
def __init__(self, attrs=None, format=None):
defaults = {'placeholder': _('Time:')[:-1]}
new_attrs = _make_attrs(attrs, defaults, "vTimeField input-small")
super(SuitTimeWidget, self).__init__(attrs=new_attrs, format=format)
def render(self, name, value, attrs=None):
output = super(SuitTimeWidget, self).render(name, value, attrs)
return mark_safe(
'<div class="input-append suit-date suit-time">%s<span '
'class="add-on"><i class="icon-time"></i></span></div>' %
output)
class SuitSplitDateTimeWidget(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [SuitDateWidget, SuitTimeWidget]
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
out_tpl = '<div class="datetime">%s %s</div>'
return mark_safe(out_tpl % (rendered_widgets[0], rendered_widgets[1]))
def _make_attrs(attrs, defaults=None, classes=None):
result = defaults.copy() if defaults else {}
if attrs:
result.update(attrs)
if classes:
result["class"] = " ".join((classes, result.get("class", "")))
return result
| agpl-3.0 |
offbye/paparazzi | sw/tools/calibration/calibrate_gyro.py | 87 | 4686 | #! /usr/bin/env python
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
#
# calibrate gyrometers using turntable measurements
#
from __future__ import print_function, division
from optparse import OptionParser
import os
import sys
from scipy import linspace, polyval, stats
import matplotlib.pyplot as plt
import calibration_utils
#
# lisa 3
# p : a=-4511.16 b=31948.34, std error= 0.603
# q : a=-4598.46 b=31834.48, std error= 0.734
# r : a=-4525.63 b=32687.95, std error= 0.624
#
# lisa 4
# p : a=-4492.05 b=32684.94, std error= 0.600
# q : a=-4369.63 b=33260.96, std error= 0.710
# r : a=-4577.13 b=32707.72, std error= 0.730
#
# crista
# p : a= 3864.82 b=31288.09, std error= 0.866
# q : a= 3793.71 b=32593.89, std error= 3.070
# r : a= 3817.11 b=32709.70, std error= 3.296
#
def main():
usage = "usage: %prog --id <ac_id> --tt_id <tt_id> --axis <axis> [options] log_filename.data" + "\n" + "Run %prog --help to list the options."
parser = OptionParser(usage)
parser.add_option("-i", "--id", dest="ac_id",
action="store", type=int, default=-1,
help="aircraft id to use")
parser.add_option("-t", "--tt_id", dest="tt_id",
action="store", type=int, default=-1,
help="turntable id to use")
parser.add_option("-a", "--axis", dest="axis",
type="choice", choices=['p', 'q', 'r'],
help="axis to calibrate (p, q, r)",
action="store")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
else:
if os.path.isfile(args[0]):
filename = args[0]
else:
print(args[0] + " not found")
sys.exit(1)
if not filename.endswith(".data"):
parser.error("Please specify a *.data log file")
if options.ac_id < 0 or options.ac_id > 255:
parser.error("Specify a valid aircraft id number!")
if options.tt_id < 0 or options.tt_id > 255:
parser.error("Specify a valid turntable id number!")
if options.verbose:
print("reading file "+filename+" for aircraft "+str(options.ac_id)+" and turntable "+str(options.tt_id))
samples = calibration_utils.read_turntable_log(options.ac_id, options.tt_id, filename, 1, 7)
if len(samples) == 0:
print("Error: found zero matching messages in log file!")
print("Was looking for IMU_TURNTABLE from id: "+str(options.tt_id)+" and IMU_GYRO_RAW from id: "+str(options.ac_id)+" in file "+filename)
sys.exit(1)
if options.verbose:
print("found "+str(len(samples))+" records")
if options.axis == 'p':
axis_idx = 1
elif options.axis == 'q':
axis_idx = 2
elif options.axis == 'r':
axis_idx = 3
else:
parser.error("Specify a valid axis!")
#Linear regression using stats.linregress
t = samples[:, 0]
xn = samples[:, axis_idx]
(a_s, b_s, r, tt, stderr) = stats.linregress(t, xn)
print('Linear regression using stats.linregress')
print(('regression: a=%.2f b=%.2f, std error= %.3f' % (a_s, b_s, stderr)))
print(('<define name="GYRO_X_NEUTRAL" value="%d"/>' % (b_s)))
print(('<define name="GYRO_X_SENS" value="%f" integer="16"/>' % (pow(2, 12)/a_s)))
#
# overlay fited value
#
ovl_omega = linspace(1, 7.5, 10)
ovl_adc = polyval([a_s, b_s], ovl_omega)
plt.title('Linear Regression Example')
plt.subplot(3, 1, 1)
plt.plot(samples[:, 1])
plt.plot(samples[:, 2])
plt.plot(samples[:, 3])
plt.legend(['p', 'q', 'r'])
plt.subplot(3, 1, 2)
plt.plot(samples[:, 0])
plt.subplot(3, 1, 3)
plt.plot(samples[:, 0], samples[:, axis_idx], 'b.')
plt.plot(ovl_omega, ovl_adc, 'r')
plt.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
spektom/incubator-airflow | tests/dags/subdir2/test_dont_ignore_this.py | 5 | 1147 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from airflow.models import DAG
from airflow.operators.bash import BashOperator
DEFAULT_DATE = datetime(2019, 12, 1)
dag = DAG(dag_id='test_dag_under_subdir2', start_date=DEFAULT_DATE, schedule_interval=None)
task = BashOperator(
task_id='task1',
bash_command='echo "test dag under sub directory subdir2"',
dag=dag)
| apache-2.0 |
hakuna-m/wubiuefi | src/pypack/altgraph/Dot.py | 9 | 8425 | '''
Interface to the dot language
=============================
The B{Dot} module provides a simple interface to the
file format used in the U{graphviz<http://www.research.att.com/sw/tools/graphviz/>}
program. The module is intended to offload the most tedious part of the process
(the B{dot} file generation) while transparently exposing most of its features.
To display the graphs or to generate image files the U{graphviz<http://www.research.att.com/sw/tools/graphviz/>}
package needs to be installed on the system, moreover the C{dot} and C{dotty} programs must
be accesible in the program path so that they can be ran from processes spawned
within the module. See the L{Dot} documentation for further information on the setup.
Example usage
-------------
Here is a typical usage::
from altgraph import Graph, Dot
# create a graph
edges = [ (1,2), (1,3), (3,4), (3,5), (4,5), (5,4) ]
graph = Graph.Graph(edges)
# create a dot representation of the graph
dot = Dot.Dot(graph)
# display the graph
dot.display()
# save the dot representation into the mydot.dot file
dot.save_dot(file_name='mydot.dot')
# save dot file as gif image into the graph.gif file
dot.save_img(file_name='graph', file_type='gif')
Customizing the output
----------------------
The graph drawing process may be customized by passing
valid B{dot} parameters for the nodes and edges. For a list of all
parameters see the U{graphviz<http://www.research.att.com/sw/tools/graphviz/>}
documentation.
Example::
# customizing the way the overall graph is drawn
dot.style(size='10,10', rankdir='RL', page='5, 5' , ranksep=0.75)
# customizing node drawing
dot.node_style(1, label='BASE_NODE',shape='box', color='blue' )
dot.node_style(2, style='filled', fillcolor='red')
# customizing edge drawing
dot.edge_style(1, 2, style='dotted')
dot.edge_style(3, 5, arrowhead='dot', label='binds', labelangle='90')
dot.edge_style(4, 5, arrowsize=2, style='bold')
B{Observation}: dotty (invoked via L{Dot.display}) may not be able to
display all graphics styles. To verify the output save it to an image file
and look at it that way.
Valid attributes
----------------
- dot styles, passed via the L{Dot.style} method::
rankdir = 'LR' (draws the graph horizontally, left to right)
ranksep = number (rank separation in inches)
- node attributes, passed via the L{Dot.node_style} method::
style = 'filled' | 'invisible' | 'diagonals' | 'rounded'
shape = 'box' | 'ellipse' | 'circle' | 'point' | 'triangle'
- edge attributes, passed via the L{Dot.edge_style} method::
style = 'dashed' | 'dotted' | 'solid' | 'invis' | 'bold'
arrowhead = 'box' | 'crow' | 'diamond' | 'dot' | 'inv' | 'none' | 'tee' | 'vee'
weight = number (the larger the number the closer the nodes will be)
- valid U{graphviz colors<http://www.research.att.com/~erg/graphviz/info/colors.html>}
- for more details on how to control the graph drawing process see the
U{graphviz reference <http://www.research.att.com/sw/tools/graphviz/refs.html>}.
'''
import os
from altgraph import GraphError
from altgraph.compat import *
class Dot(object):
'''
A class providing a B{graphviz} (dot language) representation
allowing a fine grained control over how the graph is being
displayed.
If the C{dot} and C{dotty} programs are not in the current system path
their location needs to be specified in the L{constructor<__init__>}.
For detailed example usage see the L{Dot} module documentation.
'''
def __init__(self, graph=None, nodes=None, edgefn=None, nodevisitor=None, edgevisitor=None, name="G", dot='dot', dotty='dotty', neato='neato'):
'''
Initialization.
'''
self.name, self.attr = name, {}
self.temp_dot = "tmp_dot.dot"
self.temp_neo = "tmp_neo.dot"
self.dot, self.dotty, self.neato = dot, dotty, neato
self.nodes, self.edges = {}, {}
if graph is not None and nodes is None:
nodes = graph
if graph is not None and edgefn is None:
def edgefn(node, graph=graph):
return imap(graph.tail, graph.out_edges(node))
if nodes is None:
nodes = ()
seen = set()
for node in nodes:
if nodevisitor is None:
style = {}
else:
style = nodevisitor(node)
if style is not None:
self.node_style(node, **style)
seen.add(node)
if edgefn is not None:
for head in seen:
for tail in ifilter(seen.__contains__, edgefn(head)):
if edgevisitor is None:
edgestyle = {}
else:
edgestyle = edgevisitor(head, tail)
if edgestyle is not None:
self.edge_style(head, tail, **edgestyle)
def style(self, **attr):
'''
Changes the overall style
'''
self.attr = attr
def display(self, mode='dot'):
'''
Displays the current graph via dotty
'''
if mode == 'neato':
self.save_dot(self.temp_neo)
neato_cmd = "%s -o %s %s" % (self.neato, self.temp_dot, self.temp_neo)
os.system(neato_cmd)
else:
self.save_dot(self.temp_dot)
plot_cmd = "%s %s" % (self.dotty, self.temp_dot)
os.system(plot_cmd)
def node_style(self, node, **kwargs):
'''
Modifies a node style to the dot representation.
'''
if node not in self.edges:
self.edges[node] = {}
self.nodes[node] = kwargs
def all_node_style(self, **kwargs):
'''
Modifies all node styles
'''
for node in self.nodes:
self.node_style(node, **kwargs)
def edge_style(self, head, tail, **kwargs):
'''
Modifies an edge style to the dot representation.
'''
try:
if tail not in self.edges[head]:
self.edges[head][tail]= {}
self.edges[head][tail] = kwargs
except KeyError:
raise GraphError("invalid edge %s -> %s " % (head, tail) )
def iterdot(self):
# write graph title
yield 'digraph %s {\n' % (self.name,)
# write overall graph attributes
for attr_name, attr_value in self.attr.iteritems():
yield '%s="%s";' % (attr_name, attr_value)
yield '\n'
# some reusable patterns
cpatt = '%s="%s",' # to separate attributes
epatt = '];\n' # to end attributes
# write node attributes
for node_name, node_attr in self.nodes.iteritems():
yield '\t"%s" [' % (node_name,)
for attr_name, attr_value in node_attr.iteritems():
yield cpatt % (attr_name, attr_value)
yield epatt
# write edge attributes
for head in self.edges:
for tail in self.edges[head]:
yield '\t"%s" -> "%s" [' % (head, tail)
for attr_name, attr_value in self.edges[head][tail].iteritems():
yield cpatt % (attr_name, attr_value)
yield epatt
# finish file
yield '}\n'
def __iter__(self):
return self.iterdot()
def save_dot(self, file_name=None):
'''
Saves the current graph representation into a file
'''
if not file_name:
file_name = self.temp_dot
fp = open(file_name, "w")
write = fp.write
for chunk in self.iterdot():
write(chunk)
fp.close()
def save_img(self, file_name="out", file_type="gif", mode='dot'):
'''
Saves the dot file as an image file
'''
if mode == 'neato':
self.save_dot(self.temp_neo)
neato_cmd = "%s -o %s %s" % (self.neato, self.temp_dot, self.temp_neo)
os.system(neato_cmd)
plot_cmd = self.neato
else:
self.save_dot(self.temp_dot)
plot_cmd = self.dot
file_name = "%s.%s" % (file_name, file_type)
create_cmd = "%s -T%s %s -o %s" % (plot_cmd, file_type, self.temp_dot, file_name)
os.system(create_cmd)
| gpl-2.0 |
propublica/Capitol-Words | capitolweb/scraper/management/commands/run_crec_scraper.py | 2 | 1705 | import logging
from datetime import datetime
from datetime import timedelta
from django.conf import settings
from django.core.management.base import BaseCommand
from scraper.models import CRECScraperResult
from scraper.crec_scraper import CRECScraper
class Command(BaseCommand):
help = 'Scrapes CREC from gpo.gov for the given date range and stores in s3.'
def add_arguments(self, parser):
parser.add_argument(
'--debug',
help='If true, will not upload to es and instead print to stdout.',
default=False,
)
parser.add_argument(
'--start_date',
help='Start of date range to pull data for, inclusive.',
type=lambda d: datetime.strptime(d, '%Y-%m-%d'),
)
parser.add_argument(
'--end_date',
help='End of date range to pull data for, exclusive.',
type=lambda d: datetime.strptime(d, '%Y-%m-%d'),
default=None,
)
parser.add_argument(
'--s3_bucket',
help='Location of crec data.',
default=settings.CREC_STAGING_S3_BUCKET
)
def handle(self, *args, **options):
start_date = options['start_date']
if options['end_date'] is None:
end_date = datetime.utcnow()
else:
end_date = options['end_date']
start_date.replace(hour=0, minute=0, second=0, microsecond=0)
end_date.replace(hour=0, minute=0, second=0, microsecond=0)
scraper = CRECScraper(options['s3_bucket'])
while start_date < end_date:
result = scraper.scrape_files_for_date(start_date)
start_date += timedelta(days=1)
| bsd-3-clause |
Vixionar/django | django/contrib/gis/db/backends/oracle/introspection.py | 539 | 1977 | import sys
import cx_Oracle
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils import six
class OracleIntrospection(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. Of course,
# this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY,
# but it is the only object type supported within Django anyways.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information.
try:
cursor.execute(
'SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" '
'WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), geo_col.upper())
)
row = cursor.fetchone()
except Exception as msg:
new_msg = (
'Could not find entry in USER_SDO_GEOM_METADATA '
'corresponding to "%s"."%s"\n'
'Error message: %s.') % (table_name, geo_col, msg)
six.reraise(Exception, Exception(new_msg), sys.exc_info()[2])
# TODO: Research way to find a more specific geometry field type for
# the column's contents.
field_type = 'GeometryField'
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params['srid'] = srid
# Length of object array ( SDO_DIM_ARRAY ) is number of dimensions.
dim = len(dim)
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause |
The-WebOps-Club/odia-forum | pybbm_tag/views.py | 1 | 2818 | from django.shortcuts import render
from pybb.models import *
from pybbm_tag.models import Tag
from pybb.views import ForumView,AddPostView,EditPostView,TopicView
def add_tag(request,**kwargs):
# check permissions before calling this function
# in kwargs we expect the LABEL of the tag to add(not object) and the TOPIC object(not name).
topic = kwargs['topic']
tagname = kwargs['tag']
lst = Tag.objects.filter(label = tagname)
if not lst.count() == 0:
lst[0].topics.add(topic)
lst[0].save()
else:
tag = Tag(label = tagname,desc="Empty")
tag.save()
tag.topics.add(topic)
def remove_all_tags(request,**kwargs):
topic = kwargs['topic']
for i in Tag.objects.filter(topics__in = [topic]):
i.topics.remove(topic)
def remove_tag(request,**kwargs):
# check permissions before calling this function.
topic = kwargs['topic']
tagname = kwargs['tag']
lst = Tag.objects.filter(label = tagname)
lst[0].topics.remove(topic)
# tag additions to the views that are affected by tags.
class AddPostViewWrapper(AddPostView):
def post(self, request, *args, **kwargs):
try:
ret = super(AddPostViewWrapper, self).post(request, *args, **kwargs)
taglist = request.POST['taglist'].split('+')
#import pdb;pdb.set_trace()
for i in taglist:
add_tag(request, topic=self.object.topic, tag=i)
except KeyError:
pass
return ret
def get_context_data(self,**kwargs):
ctx = super(AddPostViewWrapper, self).get_context_data(**kwargs)
if ctx['forum']:
ctx['taglist_input'] = 1
return ctx
class ForumViewWrapper(ForumView):
def get_context_data(self):
ctx = super(ForumViewWrapper, self).get_context_data()
topic_list = ctx['topic_list']
tags = []
for i in topic_list:
tags.append(Tag.objects.filter(topics__in = [i]))
ctx['tags'] = Tag.objects.all()
return ctx
class TopicViewWrapper(TopicView):
def get_context_data(self):
ctx = super(TopicViewWrapper, self).get_context_data()
ctx['tags'] = Tag.objects.all()
return ctx
class EditPostViewWrapper(EditPostView):
def post(self, request, *args, **kwargs):
ret = super(EditPostViewWrapper, self).post(request, *args, **kwargs)
try:
taglist = request.POST['taglist'].split('+')
remove_all_tags(request, topic=self.object.topic)
for i in taglist:
add_tag(request, topic=self.object.topic, tag=i)
except KeyError:
pass
return ret
def make_tag_string(self,topic):
str = ""
for i in Tag.objects.filter(topics__in = [topic]):
str+=(i.label+"+")
if len(str) > 0:
str = str[:-1]
return str
def get_context_data(self, **kwargs):
ctx = super(EditPostViewWrapper, self).get_context_data(**kwargs)
post = ctx['post']
if post.topic.user == self.request.user:
ctx['taglist_input'] = 1
ctx['taglist_initial'] = self.make_tag_string(post.topic)
return ctx | gpl-2.0 |
Gaia3D/QGIS | python/ext-libs/owslib/wms.py | 28 | 23814 | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2005 Nuxeo SARL <http://nuxeo.com>
#
# Authors : Sean Gillies <[email protected]>
# Julien Anguenot <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
"""
API for Web Map Service (WMS) methods and metadata.
Currently supports only version 1.1.1 of the WMS protocol.
"""
import cgi
import urllib2
from urllib import urlencode
import warnings
from etree import etree
from .util import openURL, testXMLValue, extract_xml_list, xmltag_split
from fgdc import Metadata
from iso import MD_Metadata
class ServiceException(Exception):
"""WMS ServiceException
Attributes:
message -- short error message
xml -- full xml error message from server
"""
def __init__(self, message, xml):
self.message = message
self.xml = xml
def __str__(self):
return repr(self.message)
class CapabilitiesError(Exception):
pass
class WebMapService(object):
"""Abstraction for OGC Web Map Service (WMS).
Implements IWebMapService.
"""
def __getitem__(self,name):
''' check contents dictionary to allow dict like access to service layers'''
if name in self.__getattribute__('contents').keys():
return self.__getattribute__('contents')[name]
else:
raise KeyError, "No content named %s" % name
def __init__(self, url, version='1.1.1', xml=None,
username=None, password=None, parse_remote_metadata=False
):
"""Initialize."""
self.url = url
self.username = username
self.password = password
self.version = version
self._capabilities = None
# Authentication handled by Reader
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
if xml: # read from stored xml
self._capabilities = reader.readString(xml)
else: # read from server
self._capabilities = reader.read(self.url)
# avoid building capabilities metadata if the response is a ServiceExceptionReport
se = self._capabilities.find('ServiceException')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
# build metadata objects
self._buildMetadata(parse_remote_metadata)
def _getcapproperty(self):
if not self._capabilities:
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
self._capabilities = ServiceMetadata(reader.read(self.url))
return self._capabilities
def _buildMetadata(self, parse_remote_metadata=False):
''' set up capabilities metadata objects '''
#serviceIdentification metadata
serviceelem=self._capabilities.find('Service')
self.identification=ServiceIdentification(serviceelem, self.version)
#serviceProvider metadata
self.provider=ServiceProvider(serviceelem)
#serviceOperations metadata
self.operations=[]
for elem in self._capabilities.find('Capability/Request')[:]:
self.operations.append(OperationMetadata(elem))
#serviceContents metadata: our assumption is that services use a top-level
#layer as a metadata organizer, nothing more.
self.contents={}
caps = self._capabilities.find('Capability')
#recursively gather content metadata for all layer elements.
#To the WebMapService.contents store only metadata of named layers.
def gather_layers(parent_elem, parent_metadata):
for index, elem in enumerate(parent_elem.findall('Layer')):
cm = ContentMetadata(elem, parent=parent_metadata, index=index+1, parse_remote_metadata=parse_remote_metadata)
if cm.id:
if cm.id in self.contents:
warnings.warn('Content metadata for layer "%s" already exists. Using child layer' % cm.id)
self.contents[cm.id] = cm
gather_layers(elem, cm)
gather_layers(caps, None)
#exceptions
self.exceptions = [f.text for f \
in self._capabilities.findall('Capability/Exception/Format')]
def items(self):
'''supports dict-like items() access'''
items=[]
for item in self.contents:
items.append((item,self.contents[item]))
return items
def getcapabilities(self):
"""Request and return capabilities document from the WMS as a
file-like object.
NOTE: this is effectively redundant now"""
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
u = self._open(reader.capabilities_url(self.url))
# check for service exceptions, and return
if u.info().gettype() == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = str(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message, se_xml)
return u
def getmap(self, layers=None, styles=None, srs=None, bbox=None,
format=None, size=None, time=None, transparent=False,
bgcolor='#FFFFFF',
exceptions='application/vnd.ogc.se_xml',
method='Get',
**kwargs
):
"""Request and return an image from the WMS as a file-like object.
Parameters
----------
layers : list
List of content layer names.
styles : list
Optional list of named styles, must be the same length as the
layers list.
srs : string
A spatial reference system identifier.
bbox : tuple
(left, bottom, right, top) in srs units.
format : string
Output image format such as 'image/jpeg'.
size : tuple
(width, height) in pixels.
transparent : bool
Optional. Transparent background if True.
bgcolor : string
Optional. Image background color.
method : string
Optional. HTTP DCP method name: Get or Post.
**kwargs : extra arguments
anything else e.g. vendor specific parameters
Example
-------
>>> wms = WebMapService('http://giswebservices.massgis.state.ma.us/geoserver/wms', version='1.1.1')
>>> img = wms.getmap(layers=['massgis:GISDATA.SHORELINES_ARC'],\
styles=[''],\
srs='EPSG:4326',\
bbox=(-70.8, 42, -70, 42.8),\
size=(300, 300),\
format='image/jpeg',\
transparent=True)
>>> out = open('example.jpg.jpg', 'wb')
>>> out.write(img.read())
>>> out.close()
"""
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetMap').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
request = {'version': self.version, 'request': 'GetMap'}
# check layers and styles
assert len(layers) > 0
request['layers'] = ','.join(layers)
if styles:
assert len(styles) == len(layers)
request['styles'] = ','.join(styles)
else:
request['styles'] = ''
# size
request['width'] = str(size[0])
request['height'] = str(size[1])
request['srs'] = str(srs)
request['bbox'] = ','.join([repr(x) for x in bbox])
request['format'] = str(format)
request['transparent'] = str(transparent).upper()
request['bgcolor'] = '0x' + bgcolor[1:7]
request['exceptions'] = str(exceptions)
if time is not None:
request['time'] = str(time)
if kwargs:
for kw in kwargs:
request[kw]=kwargs[kw]
data = urlencode(request)
u = openURL(base_url, data, method, username = self.username, password = self.password)
# check for service exceptions, and return
if u.info()['Content-Type'] == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = unicode(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message, se_xml)
return u
def getServiceXML(self):
xml = None
if self._capabilities is not None:
xml = etree.tostring(self._capabilities)
return xml
def getfeatureinfo(self):
raise NotImplementedError
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError, "No operation named %s" % name
class ServiceIdentification(object):
''' Implements IServiceIdentificationMetadata '''
def __init__(self, infoset, version):
self._root=infoset
self.type = testXMLValue(self._root.find('Name'))
self.version = version
self.title = testXMLValue(self._root.find('Title'))
self.abstract = testXMLValue(self._root.find('Abstract'))
self.keywords = extract_xml_list(self._root.findall('KeywordList/Keyword'))
self.accessconstraints = testXMLValue(self._root.find('AccessConstraints'))
self.fees = testXMLValue(self._root.find('Fees'))
class ServiceProvider(object):
''' Implements IServiceProviderMetatdata '''
def __init__(self, infoset):
self._root=infoset
name=self._root.find('ContactInformation/ContactPersonPrimary/ContactOrganization')
if name is not None:
self.name=name.text
else:
self.name=None
self.url=self._root.find('OnlineResource').attrib.get('{http://www.w3.org/1999/xlink}href', '')
#contact metadata
contact = self._root.find('ContactInformation')
## sometimes there is a contact block that is empty, so make
## sure there are children to parse
if contact is not None and contact[:] != []:
self.contact = ContactMetadata(contact)
else:
self.contact = None
def getContentByName(self, name):
"""Return a named content item."""
for item in self.contents:
if item.name == name:
return item
raise KeyError, "No content named %s" % name
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError, "No operation named %s" % name
class ContentMetadata:
"""
Abstraction for WMS layer metadata.
Implements IContentMetadata.
"""
def __init__(self, elem, parent=None, index=0, parse_remote_metadata=False, timeout=30):
if elem.tag != 'Layer':
raise ValueError('%s should be a Layer' % (elem,))
self.parent = parent
if parent:
self.index = "%s.%d" % (parent.index, index)
else:
self.index = str(index)
self.id = self.name = testXMLValue(elem.find('Name'))
# layer attributes
self.queryable = int(elem.attrib.get('queryable', 0))
self.cascaded = int(elem.attrib.get('cascaded', 0))
self.opaque = int(elem.attrib.get('opaque', 0))
self.noSubsets = int(elem.attrib.get('noSubsets', 0))
self.fixedWidth = int(elem.attrib.get('fixedWidth', 0))
self.fixedHeight = int(elem.attrib.get('fixedHeight', 0))
# title is mandatory property
self.title = None
title = testXMLValue(elem.find('Title'))
if title is not None:
self.title = title.strip()
self.abstract = testXMLValue(elem.find('Abstract'))
# bboxes
b = elem.find('BoundingBox')
self.boundingBox = None
if b is not None:
try: #sometimes the SRS attribute is (wrongly) not provided
srs=b.attrib['SRS']
except KeyError:
srs=None
self.boundingBox = (
float(b.attrib['minx']),
float(b.attrib['miny']),
float(b.attrib['maxx']),
float(b.attrib['maxy']),
srs,
)
elif self.parent:
if hasattr(self.parent, 'boundingBox'):
self.boundingBox = self.parent.boundingBox
# ScaleHint
sh = elem.find('ScaleHint')
self.scaleHint = None
if sh is not None:
self.scaleHint = {'min': sh.attrib['min'], 'max': sh.attrib['max']}
attribution = elem.find('Attribution')
if attribution is not None:
self.attribution = dict()
title = attribution.find('Title')
url = attribution.find('OnlineResource')
logo = attribution.find('LogoURL')
if title is not None:
self.attribution['title'] = title.text
if url is not None:
self.attribution['url'] = url.attrib['{http://www.w3.org/1999/xlink}href']
if logo is not None:
self.attribution['logo_size'] = (int(logo.attrib['width']), int(logo.attrib['height']))
self.attribution['logo_url'] = logo.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
b = elem.find('LatLonBoundingBox')
if b is not None:
self.boundingBoxWGS84 = (
float(b.attrib['minx']),
float(b.attrib['miny']),
float(b.attrib['maxx']),
float(b.attrib['maxy']),
)
elif self.parent:
self.boundingBoxWGS84 = self.parent.boundingBoxWGS84
else:
self.boundingBoxWGS84 = None
#SRS options
self.crsOptions = []
#Copy any parent SRS options (they are inheritable properties)
if self.parent:
self.crsOptions = list(self.parent.crsOptions)
#Look for SRS option attached to this layer
if elem.find('SRS') is not None:
## some servers found in the wild use a single SRS
## tag containing a whitespace separated list of SRIDs
## instead of several SRS tags. hence the inner loop
for srslist in map(lambda x: x.text, elem.findall('SRS')):
if srslist:
for srs in srslist.split():
self.crsOptions.append(srs)
#Get rid of duplicate entries
self.crsOptions = list(set(self.crsOptions))
#Set self.crsOptions to None if the layer (and parents) had no SRS options
if len(self.crsOptions) == 0:
#raise ValueError('%s no SRS available!?' % (elem,))
#Comment by D Lowe.
#Do not raise ValueError as it is possible that a layer is purely a parent layer and does not have SRS specified. Instead set crsOptions to None
# Comment by Jachym:
# Do not set it to None, but to [], which will make the code
# work further. Fixed by anthonybaxter
self.crsOptions=[]
#Styles
self.styles = {}
#Copy any parent styles (they are inheritable properties)
if self.parent:
self.styles = self.parent.styles.copy()
#Get the styles for this layer (items with the same name are replaced)
for s in elem.findall('Style'):
name = s.find('Name')
title = s.find('Title')
if name is None or title is None:
raise ValueError('%s missing name or title' % (s,))
style = { 'title' : title.text }
# legend url
legend = s.find('LegendURL/OnlineResource')
if legend is not None:
style['legend'] = legend.attrib['{http://www.w3.org/1999/xlink}href']
self.styles[name.text] = style
# keywords
self.keywords = [f.text for f in elem.findall('KeywordList/Keyword')]
# timepositions - times for which data is available.
self.timepositions=None
self.defaulttimeposition = None
for extent in elem.findall('Extent'):
if extent.attrib.get("name").lower() =='time':
if extent.text:
self.timepositions=extent.text.split(',')
self.defaulttimeposition = extent.attrib.get("default")
break
# Elevations - available vertical levels
self.elevations=None
for extent in elem.findall('Extent'):
if extent.attrib.get("name").lower() =='elevation':
if extent.text:
self.elevations=extent.text.split(',')
break
# MetadataURLs
self.metadataUrls = []
for m in elem.findall('MetadataURL'):
metadataUrl = {
'type': testXMLValue(m.attrib['type'], attrib=True),
'format': testXMLValue(m.find('Format')),
'url': testXMLValue(m.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href'], attrib=True)
}
if metadataUrl['url'] is not None and parse_remote_metadata: # download URL
try:
content = urllib2.urlopen(metadataUrl['url'], timeout=timeout)
doc = etree.parse(content)
if metadataUrl['type'] is not None:
if metadataUrl['type'] == 'FGDC':
metadataUrl['metadata'] = Metadata(doc)
if metadataUrl['type'] == 'TC211':
metadataUrl['metadata'] = MD_Metadata(doc)
except Exception, err:
metadataUrl['metadata'] = None
self.metadataUrls.append(metadataUrl)
# DataURLs
self.dataUrls = []
for m in elem.findall('DataURL'):
dataUrl = {
'format': m.find('Format').text.strip(),
'url': m.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
}
self.dataUrls.append(dataUrl)
self.layers = []
for child in elem.findall('Layer'):
self.layers.append(ContentMetadata(child, self))
def __str__(self):
return 'Layer Name: %s Title: %s' % (self.name, self.title)
class OperationMetadata:
"""Abstraction for WMS OperationMetadata.
Implements IOperationMetadata.
"""
def __init__(self, elem):
"""."""
self.name = xmltag_split(elem.tag)
# formatOptions
self.formatOptions = [f.text for f in elem.findall('Format')]
self.methods = []
for verb in elem.findall('DCPType/HTTP/*'):
url = verb.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type' : xmltag_split(verb.tag), 'url': url})
class ContactMetadata:
"""Abstraction for contact details advertised in GetCapabilities.
"""
def __init__(self, elem):
name = elem.find('ContactPersonPrimary/ContactPerson')
if name is not None:
self.name=name.text
else:
self.name=None
email = elem.find('ContactElectronicMailAddress')
if email is not None:
self.email=email.text
else:
self.email=None
self.address = self.city = self.region = None
self.postcode = self.country = None
address = elem.find('ContactAddress')
if address is not None:
street = address.find('Address')
if street is not None: self.address = street.text
city = address.find('City')
if city is not None: self.city = city.text
region = address.find('StateOrProvince')
if region is not None: self.region = region.text
postcode = address.find('PostCode')
if postcode is not None: self.postcode = postcode.text
country = address.find('Country')
if country is not None: self.country = country.text
organization = elem.find('ContactPersonPrimary/ContactOrganization')
if organization is not None: self.organization = organization.text
else:self.organization = None
position = elem.find('ContactPosition')
if position is not None: self.position = position.text
else: self.position = None
class WMSCapabilitiesReader:
"""Read and parse capabilities document into a lxml.etree infoset
"""
def __init__(self, version='1.1.1', url=None, un=None, pw=None):
"""Initialize"""
self.version = version
self._infoset = None
self.url = url
self.username = un
self.password = pw
#if self.username and self.password:
## Provide login information in order to use the WMS server
## Create an OpenerDirector with support for Basic HTTP
## Authentication...
#passman = HTTPPasswordMgrWithDefaultRealm()
#passman.add_password(None, self.url, self.username, self.password)
#auth_handler = HTTPBasicAuthHandler(passman)
#opener = build_opener(auth_handler)
#self._open = opener.open
def capabilities_url(self, service_url):
"""Return a capabilities url
"""
qs = []
if service_url.find('?') != -1:
qs = cgi.parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'WMS'))
if 'request' not in params:
qs.append(('request', 'GetCapabilities'))
if 'version' not in params:
qs.append(('version', self.version))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, service_url):
"""Get and parse a WMS capabilities document, returning an
elementtree instance
service_url is the base url, to which is appended the service,
version, and request parameters
"""
getcaprequest = self.capabilities_url(service_url)
#now split it up again to use the generic openURL function...
spliturl=getcaprequest.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get', username = self.username, password = self.password)
return etree.fromstring(u.read())
def readString(self, st):
"""Parse a WMS capabilities document, returning an elementtree instance
string should be an XML capabilities document
"""
if not isinstance(st, str):
raise ValueError("String must be of type string, not %s" % type(st))
return etree.fromstring(st)
| gpl-2.0 |
mammadori/pyglet | tools/wraptypes/wrap.py | 29 | 9533 | #!/usr/bin/env python
'''Generate a Python ctypes wrapper file for a header file.
Usage example::
wrap.py -lGL -oGL.py /usr/include/GL/gl.h
>>> from GL import *
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypesparser import *
import textwrap
import sys
class CtypesWrapper(CtypesParser, CtypesTypeVisitor):
file=None
def begin_output(self, output_file, library, link_modules=(),
emit_filenames=(), all_headers=False):
self.library = library
self.file = output_file
self.all_names = []
self.known_types = {}
self.structs = set()
self.enums = set()
self.emit_filenames = emit_filenames
self.all_headers = all_headers
self.linked_symbols = {}
for name in link_modules:
module = __import__(name, globals(), locals(), ['foo'])
for symbol in dir(module):
if symbol not in self.linked_symbols:
self.linked_symbols[symbol] = '%s.%s' % (name, symbol)
self.link_modules = link_modules
self.print_preamble()
self.print_link_modules_imports()
def wrap(self, filename, source=None):
assert self.file, 'Call begin_output first'
self.parse(filename, source)
def end_output(self):
self.print_epilogue()
self.file = None
def does_emit(self, symbol, filename):
return self.all_headers or filename in self.emit_filenames
def print_preamble(self):
import textwrap
import time
print >> self.file, textwrap.dedent("""
'''Wrapper for %(library)s
Generated with:
%(argv)s
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
import pyglet.lib
_lib = pyglet.lib.load_library(%(library)r)
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
""" % {
'library': self.library,
'date': time.ctime(),
'class': self.__class__.__name__,
'argv': ' '.join(sys.argv),
}).lstrip()
def print_link_modules_imports(self):
for name in self.link_modules:
print >> self.file, 'import %s' % name
print >> self.file
def print_epilogue(self):
print >> self.file
print >> self.file, '\n'.join(textwrap.wrap(
'__all__ = [%s]' % ', '.join([repr(n) for n in self.all_names]),
width=78,
break_long_words=False))
def handle_ctypes_constant(self, name, value, filename, lineno):
if self.does_emit(name, filename):
print >> self.file, '%s = %r' % (name, value),
print >> self.file, '\t# %s:%d' % (filename, lineno)
self.all_names.append(name)
def handle_ctypes_type_definition(self, name, ctype, filename, lineno):
if self.does_emit(name, filename):
self.all_names.append(name)
if name in self.linked_symbols:
print >> self.file, '%s = %s' % \
(name, self.linked_symbols[name])
else:
ctype.visit(self)
self.emit_type(ctype)
print >> self.file, '%s = %s' % (name, str(ctype)),
print >> self.file, '\t# %s:%d' % (filename, lineno)
else:
self.known_types[name] = (ctype, filename, lineno)
def emit_type(self, t):
t.visit(self)
for s in t.get_required_type_names():
if s in self.known_types:
if s in self.linked_symbols:
print >> self.file, '%s = %s' % (s, self.linked_symbols[s])
else:
s_ctype, s_filename, s_lineno = self.known_types[s]
s_ctype.visit(self)
self.emit_type(s_ctype)
print >> self.file, '%s = %s' % (s, str(s_ctype)),
print >> self.file, '\t# %s:%d' % (s_filename, s_lineno)
del self.known_types[s]
def visit_struct(self, struct):
if struct.tag in self.structs:
return
self.structs.add(struct.tag)
base = {True: 'Union', False: 'Structure'}[struct.is_union]
print >> self.file, 'class struct_%s(%s):' % (struct.tag, base)
print >> self.file, ' __slots__ = ['
if not struct.opaque:
for m in struct.members:
print >> self.file, " '%s'," % m[0]
print >> self.file, ' ]'
# Set fields after completing class, so incomplete structs can be
# referenced within struct.
for name, typ in struct.members:
self.emit_type(typ)
print >> self.file, 'struct_%s._fields_ = [' % struct.tag
if struct.opaque:
print >> self.file, " ('_opaque_struct', c_int)"
self.structs.remove(struct.tag)
else:
for m in struct.members:
print >> self.file, " ('%s', %s)," % (m[0], m[1])
print >> self.file, ']'
print >> self.file
def visit_enum(self, enum):
if enum.tag in self.enums:
return
self.enums.add(enum.tag)
print >> self.file, 'enum_%s = c_int' % enum.tag
for name, value in enum.enumerators:
self.all_names.append(name)
print >> self.file, '%s = %d' % (name, value)
def handle_ctypes_function(self, name, restype, argtypes, filename, lineno):
if self.does_emit(name, filename):
# Also emit any types this func requires that haven't yet been
# written.
self.emit_type(restype)
for a in argtypes:
self.emit_type(a)
self.all_names.append(name)
print >> self.file, '# %s:%d' % (filename, lineno)
print >> self.file, '%s = _lib.%s' % (name, name)
print >> self.file, '%s.restype = %s' % (name, str(restype))
print >> self.file, '%s.argtypes = [%s]' % \
(name, ', '.join([str(a) for a in argtypes]))
print >> self.file
def handle_ctypes_variable(self, name, ctype, filename, lineno):
# This doesn't work.
#self.all_names.append(name)
#print >> self.file, '%s = %s.indll(_lib, %r)' % \
# (name, str(ctype), name)
pass
def main(*argv):
import optparse
import sys
import os.path
usage = 'usage: %prog [options] <header.h>'
op = optparse.OptionParser(usage=usage)
op.add_option('-o', '--output', dest='output',
help='write wrapper to FILE', metavar='FILE')
op.add_option('-l', '--library', dest='library',
help='link to LIBRARY', metavar='LIBRARY')
op.add_option('-D', '--define', dest='defines', default=[],
help='define token NAME=VALUE', action='append')
op.add_option('-i', '--include-file', action='append', dest='include_files',
help='assume FILE is iincluded', metavar='FILE',
default=[])
op.add_option('-I', '--include-dir', action='append', dest='include_dirs',
help='add DIR to include search path', metavar='DIR',
default=[])
op.add_option('-m', '--link-module', action='append', dest='link_modules',
help='use symbols from MODULE', metavar='MODULE',
default=[])
op.add_option('-a', '--all-headers', action='store_true',
dest='all_headers',
help='include symbols from all headers', default=False)
(options, args) = op.parse_args(list(argv[1:]))
if len(args) < 1:
print >> sys.stderr, 'No header files specified.'
sys.exit(1)
headers = args
if options.library is None:
options.library = os.path.splitext(headers[0])[0]
if options.output is None:
options.output = '%s.py' % options.library
wrapper = CtypesWrapper()
wrapper.begin_output(open(options.output, 'w'),
library=options.library,
emit_filenames=headers,
link_modules=options.link_modules,
all_headers=options.all_headers)
wrapper.preprocessor_parser.include_path += options.include_dirs
for define in options.defines:
name, value = define.split('=')
wrapper.preprocessor_parser.define(name, value)
for file in options.include_files:
wrapper.wrap(file)
for header in headers:
wrapper.wrap(header)
wrapper.end_output()
print 'Wrapped to %s' % options.output
if __name__ == '__main__':
main(*sys.argv)
| bsd-3-clause |
rlkelly/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | sandbox/Chapter10_/github_datapull.py | 108 | 2852 |
try:
import numpy as np
from requests import get
from bs4 import BeautifulSoup
stars_to_explore = ( 2**np.arange( -1, 16 ) ).astype("int")
forks_to_explore = ( 2**np.arange( -1, 16 ) ).astype("int")
repo_with_stars = np.ones_like( stars_to_explore )
repo_with_forks = np.ones_like( forks_to_explore )
URL = "https://github.com/search"
print "Scrapping data from Github. Sorry Github..."
print "The data is contained in variables `foo_to_explore` and `repo_with_foo`"
print
print "stars first..."
payload = {"q":""}
for i, _star in enumerate(stars_to_explore):
payload["q"] = "stars:>=%d"%_star
r = get( URL, params = payload )
soup = BeautifulSoup( r.text )
try:
h3 = soup.find( class_="sort-bar").find( "h3" ).text #hopefully the github search results page plays nicely.
value = int( h3.split(" ")[2].replace(",", "" ) )
except AttributeError as e:
#there might be less than 10 repos, so I'll count the number of display results
value = len( soup.findAll(class_= "mega-icon-public-repo" ) )
repo_with_stars[i] = value
print "number of repos with greater than or equal to %d stars: %d"%(_star, value )
#repo_with_stars = repo_with_stars.astype("float")/repo_with_stars[0]
print
print "forks second..."
payload = {"q":""}
for i, _fork in enumerate(stars_to_explore):
payload["q"] = "forks:>=%d"%_fork
r = get( URL, params = payload )
soup = BeautifulSoup( r.text )
try:
h3 = soup.find( class_="sort-bar").find( "h3" ).text #hopefully the github search results page plays nicely.
value = int( h3.split(" ")[2].replace(",", "" ) )
except AttributeError as e:
#there might be less than 10 repos, so I'll count the number of display results
value = len( soup.findAll(class_= "mega-icon-public-repo" ) )
repo_with_forks[i] = value
print "number of repos with greater than or equal to %d forks: %d"%(_fork, value )
#repo_with_forks = repo_with_forks.astype("float")/repo_with_forks[0]
np.savetxt( "data/gh_forks.csv", np.concatenate( [forks_to_explore, repo_with_forks], axis=1) )
np.savetxt( "data/gh_stars.csv", np.concatenate( [stars_to_explore, repo_with_stars], axis=1) )
except ImportError as e:
print e
print "requests / BeautifulSoup not found. Using data pulled on Feburary 11, 2013"
_data = np.genfromtxt( "data/gh_forks.csv", delimiter = "," ) #cehck this.
forks_to_explore = _data[:,0]
repo_with_forks = _data[:,1]
_data = np.genfromtxt( "data/gh_stars.csv", delimiter = "," ) #cehck this.
stars_to_explore = _data[:,0]
repo_with_stars = _data[:,1]
| mit |
Titan-C/sympy | sympy/crypto/crypto.py | 17 | 56885 | # -*- coding: utf-8 -*-
"""
This file contains some classical ciphers and routines
implementing a linear-feedback shift register (LFSR)
and the Diffie-Hellman key exchange.
"""
from __future__ import print_function
from string import whitespace, ascii_uppercase as uppercase, printable
from sympy import nextprime
from sympy.core import Rational, Symbol
from sympy.core.numbers import igcdex, mod_inverse
from sympy.core.compatibility import range
from sympy.matrices import Matrix
from sympy.ntheory import isprime, totient, primitive_root
from sympy.polys.domains import FF
from sympy.polys.polytools import gcd, Poly
from sympy.utilities.misc import filldedent, translate
from sympy.utilities.iterables import uniq
from sympy.utilities.randtest import _randrange
def AZ(s=None):
"""Return the letters of ``s`` in uppercase. In case more than
one string is passed, each of them will be processed and a list
of upper case strings will be returned.
Examples
========
>>> from sympy.crypto.crypto import AZ
>>> AZ('Hello, world!')
'HELLOWORLD'
>>> AZ('Hello, world!'.split())
['HELLO', 'WORLD']
See Also
========
check_and_join
"""
if not s:
return uppercase
t = type(s) is str
if t:
s = [s]
rv = [check_and_join(i.upper().split(), uppercase, filter=True)
for i in s]
if t:
return rv[0]
return rv
bifid5 = AZ().replace('J', '')
bifid6 = AZ() + '0123456789'
bifid10 = printable
def padded_key(key, symbols, filter=True):
"""Return a string of the distinct characters of ``symbols`` with
those of ``key`` appearing first, omitting characters in ``key``
that are not in ``symbols``. A ValueError is raised if a) there are
duplicate characters in ``symbols`` or b) there are characters
in ``key`` that are not in ``symbols``.
Examples
========
>>> from sympy.crypto.crypto import padded_key
>>> padded_key('PUPPY', 'OPQRSTUVWXY')
'PUYOQRSTVWX'
>>> padded_key('RSA', 'ARTIST')
Traceback (most recent call last):
...
ValueError: duplicate characters in symbols: T
"""
syms = list(uniq(symbols))
if len(syms) != len(symbols):
extra = ''.join(sorted(set(
[i for i in symbols if symbols.count(i) > 1])))
raise ValueError('duplicate characters in symbols: %s' % extra)
extra = set(key) - set(syms)
if extra:
raise ValueError(
'characters in key but not symbols: %s' % ''.join(
sorted(extra)))
key0 = ''.join(list(uniq(key)))
return key0 + ''.join([i for i in syms if i not in key0])
def check_and_join(phrase, symbols=None, filter=None):
"""
Joins characters of `phrase` and if ``symbols`` is given, raises
an error if any character in ``phrase`` is not in ``symbols``.
Parameters
==========
phrase: string or list of strings to be returned as a string
symbols: iterable of characters allowed in ``phrase``;
if ``symbols`` is None, no checking is performed
Examples
========
>>> from sympy.crypto.crypto import check_and_join
>>> check_and_join('a phrase')
'a phrase'
>>> check_and_join('a phrase'.upper().split())
'APHRASE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE', filter=True)
'ARAE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE')
Traceback (most recent call last):
...
ValueError: characters in phrase but not symbols: "!HPS"
"""
rv = ''.join(''.join(phrase))
if symbols is not None:
symbols = check_and_join(symbols)
missing = ''.join(list(sorted(set(rv) - set(symbols))))
if missing:
if not filter:
raise ValueError(
'characters in phrase but not symbols: "%s"' % missing)
rv = translate(rv, None, missing)
return rv
def _prep(msg, key, alp, default=None):
if not alp:
if not default:
alp = AZ()
msg = AZ(msg)
key = AZ(key)
else:
alp = default
else:
alp = ''.join(alp)
key = check_and_join(key, alp, filter=True)
msg = check_and_join(msg, alp, filter=True)
return msg, key, alp
def cycle_list(k, n):
"""
Returns the elements of the list ``range(n)`` shifted to the
left by ``k`` (so the list starts with ``k`` (mod ``n``)).
Examples
========
>>> from sympy.crypto.crypto import cycle_list
>>> cycle_list(3, 10)
[3, 4, 5, 6, 7, 8, 9, 0, 1, 2]
"""
k = k % n
return list(range(k, n)) + list(range(k))
######## shift cipher examples ############
def encipher_shift(msg, key, symbols=None):
"""
Performs shift cipher encryption on plaintext msg, and returns the
ciphertext.
Notes
=====
The shift cipher is also called the Caesar cipher, after
Julius Caesar, who, according to Suetonius, used it with a
shift of three to protect messages of military significance.
Caesar's nephew Augustus reportedly used a similar cipher, but
with a right shift of 1.
ALGORITHM:
INPUT:
``key``: an integer (the secret key)
``msg``: plaintext of upper-case letters
OUTPUT:
``ct``: ciphertext of upper-case letters
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L1`` of
corresponding integers.
2. Compute from the list ``L1`` a new list ``L2``, given by
adding ``(k mod 26)`` to each element in ``L1``.
3. Compute from the list ``L2`` a string ``ct`` of
corresponding letters.
Examples
========
>>> from sympy.crypto.crypto import encipher_shift, decipher_shift
>>> msg = "GONAVYBEATARMY"
>>> ct = encipher_shift(msg, 1); ct
'HPOBWZCFBUBSNZ'
To decipher the shifted text, change the sign of the key:
>>> encipher_shift(ct, -1)
'GONAVYBEATARMY'
There is also a convenience function that does this with the
original key:
>>> decipher_shift(ct, 1)
'GONAVYBEATARMY'
"""
msg, _, A = _prep(msg, '', symbols)
shift = len(A) - key % len(A)
key = A[shift:] + A[:shift]
return translate(msg, key, A)
def decipher_shift(msg, key, symbols=None):
"""
Return the text by shifting the characters of ``msg`` to the
left by the amount given by ``key``.
Examples
========
>>> from sympy.crypto.crypto import encipher_shift, decipher_shift
>>> msg = "GONAVYBEATARMY"
>>> ct = encipher_shift(msg, 1); ct
'HPOBWZCFBUBSNZ'
To decipher the shifted text, change the sign of the key:
>>> encipher_shift(ct, -1)
'GONAVYBEATARMY'
Or use this function with the original key:
>>> decipher_shift(ct, 1)
'GONAVYBEATARMY'
"""
return encipher_shift(msg, -key, symbols)
######## affine cipher examples ############
def encipher_affine(msg, key, symbols=None, _inverse=False):
r"""
Performs the affine cipher encryption on plaintext ``msg``, and
returns the ciphertext.
Encryption is based on the map `x \rightarrow ax+b` (mod `N`)
where ``N`` is the number of characters in the alphabet.
Decryption is based on the map `x \rightarrow cx+d` (mod `N`),
where `c = a^{-1}` (mod `N`) and `d = -a^{-1}b` (mod `N`).
In particular, for the map to be invertible, we need
`\mathrm{gcd}(a, N) = 1` and an error will be raised if this is
not true.
Notes
=====
This is a straightforward generalization of the shift cipher with
the added complexity of requiring 2 characters to be deciphered in
order to recover the key.
ALGORITHM:
INPUT:
``msg``: string of characters that appear in ``symbols``
``a, b``: a pair integers, with ``gcd(a, N) = 1``
(the secret key)
``symbols``: string of characters (default = uppercase
letters). When no symbols are given, ``msg`` is converted
to upper case letters and all other charactes are ignored.
OUTPUT:
``ct``: string of characters (the ciphertext message)
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L1`` of
corresponding integers.
2. Compute from the list ``L1`` a new list ``L2``, given by
replacing ``x`` by ``a*x + b (mod N)``, for each element
``x`` in ``L1``.
3. Compute from the list ``L2`` a string ``ct`` of
corresponding letters.
See Also
========
decipher_affine
"""
msg, _, A = _prep(msg, '', symbols)
N = len(A)
a, b = key
assert gcd(a, N) == 1
if _inverse:
c = mod_inverse(a, N)
d = -b*c
a, b = c, d
B = ''.join([A[(a*i + b) % N] for i in range(N)])
return translate(msg, A, B)
def decipher_affine(msg, key, symbols=None):
r"""
Return the deciphered text that was made from the mapping,
`x \rightarrow ax+b` (mod `N`), where ``N`` is the
number of characters in the alphabet. Deciphering is done by
reciphering with a new key: `x \rightarrow cx+d` (mod `N`),
where `c = a^{-1}` (mod `N`) and `d = -a^{-1}b` (mod `N`).
Examples
========
>>> from sympy.crypto.crypto import encipher_affine, decipher_affine
>>> msg = "GO NAVY BEAT ARMY"
>>> key = (3, 1)
>>> encipher_affine(msg, key)
'TROBMVENBGBALV'
>>> decipher_affine(_, key)
'GONAVYBEATARMY'
"""
return encipher_affine(msg, key, symbols, _inverse=True)
#################### substitution cipher ###########################
def encipher_substitution(msg, old, new=None):
"""
Returns the ciphertext obtained by replacing each character that
appears in ``old`` with the corresponding character in ``new``.
If ``old`` is a mapping, then new is ignored and the replacements
defined by ``old`` are used.
Notes
=====
This is a more general than the affine cipher in that the key can
only be recovered by determining the mapping for each symbol.
Though in practice, once a few symbols are recognized the mappings
for other characters can be quickly guessed.
Examples
========
>>> from sympy.crypto.crypto import encipher_substitution, AZ
>>> old = 'OEYAG'
>>> new = '034^6'
>>> msg = AZ("go navy! beat army!")
>>> ct = encipher_substitution(msg, old, new); ct
'60N^V4B3^T^RM4'
To decrypt a substitution, reverse the last two arguments:
>>> encipher_substitution(ct, new, old)
'GONAVYBEATARMY'
In the special case where ``old`` and ``new`` are a permuation of
order 2 (representing a transposition of characters) their order
is immaterial:
>>> old = 'NAVY'
>>> new = 'ANYV'
>>> encipher = lambda x: encipher_substitution(x, old, new)
>>> encipher('NAVY')
'ANYV'
>>> encipher(_)
'NAVY'
The substitution cipher, in general, is a method
whereby "units" (not necessarily single characters) of plaintext
are replaced with ciphertext according to a regular system.
>>> ords = dict(zip('abc', ['\\%i' % ord(i) for i in 'abc']))
>>> print(encipher_substitution('abc', ords))
\97\98\99
"""
return translate(msg, old, new)
######################################################################
#################### Vigenère cipher examples ########################
######################################################################
def encipher_vigenere(msg, key, symbols=None):
"""
Performs the Vigenère cipher encryption on plaintext ``msg``, and
returns the ciphertext.
Examples
========
>>> from sympy.crypto.crypto import encipher_vigenere, AZ
>>> key = "encrypt"
>>> msg = "meet me on monday"
>>> encipher_vigenere(msg, key)
'QRGKKTHRZQEBPR'
Section 1 of the Kryptos sculpture at the CIA headquarters
uses this cipher and also changes the order of the the
alphabet [2]_. Here is the first line of that section of
the sculpture:
>>> from sympy.crypto.crypto import decipher_vigenere, padded_key
>>> alp = padded_key('KRYPTOS', AZ())
>>> key = 'PALIMPSEST'
>>> msg = 'EMUFPHZLRFAXYUSDJKZLDKRNSHGNFIVJ'
>>> decipher_vigenere(msg, key, alp)
'BETWEENSUBTLESHADINGANDTHEABSENC'
Notes
=====
The Vigenère cipher is named after Blaise de Vigenère, a sixteenth
century diplomat and cryptographer, by a historical accident.
Vigenère actually invented a different and more complicated cipher.
The so-called *Vigenère cipher* was actually invented
by Giovan Batista Belaso in 1553.
This cipher was used in the 1800's, for example, during the American
Civil War. The Confederacy used a brass cipher disk to implement the
Vigenère cipher (now on display in the NSA Museum in Fort
Meade) [1]_.
The Vigenère cipher is a generalization of the shift cipher.
Whereas the shift cipher shifts each letter by the same amount
(that amount being the key of the shift cipher) the Vigenère
cipher shifts a letter by an amount determined by the key (which is
a word or phrase known only to the sender and receiver).
For example, if the key was a single letter, such as "C", then the
so-called Vigenere cipher is actually a shift cipher with a
shift of `2` (since "C" is the 2nd letter of the alphabet, if
you start counting at `0`). If the key was a word with two
letters, such as "CA", then the so-called Vigenère cipher will
shift letters in even positions by `2` and letters in odd positions
are left alone (shifted by `0`, since "A" is the 0th letter, if
you start counting at `0`).
ALGORITHM:
INPUT:
``msg``: string of characters that appear in ``symbols``
(the plaintext)
``key``: a string of characters that appear in ``symbols``
(the secret key)
``symbols``: a string of letters defining the alphabet
OUTPUT:
``ct``: string of characters (the ciphertext message)
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``key`` a list ``L1`` of
corresponding integers. Let ``n1 = len(L1)``.
2. Compute from the string ``msg`` a list ``L2`` of
corresponding integers. Let ``n2 = len(L2)``.
3. Break ``L2`` up sequencially into sublists of size
``n1``; the last sublist may be smaller than ``n1``
4. For each of these sublists ``L`` of ``L2``, compute a
new list ``C`` given by ``C[i] = L[i] + L1[i] (mod N)``
to the ``i``-th element in the sublist, for each ``i``.
5. Assemble these lists ``C`` by concatenation into a new
list of length ``n2``.
6. Compute from the new list a string ``ct`` of
corresponding letters.
Once it is known that the key is, say, `n` characters long,
frequency analysis can be applied to every `n`-th letter of
the ciphertext to determine the plaintext. This method is
called *Kasiski examination* (although it was first discovered
by Babbage). If they key is as long as the message and is
comprised of randomly selected characters -- a one-time pad -- the
message is theoretically unbreakable.
The cipher Vigenère actually discovered is an "auto-key" cipher
described as follows.
ALGORITHM:
INPUT:
``key``: a string of letters (the secret key)
``msg``: string of letters (the plaintext message)
OUTPUT:
``ct``: string of upper-case letters (the ciphertext message)
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L2`` of
corresponding integers. Let ``n2 = len(L2)``.
2. Let ``n1`` be the length of the key. Append to the
string ``key`` the first ``n2 - n1`` characters of
the plaintext message. Compute from this string (also of
length ``n2``) a list ``L1`` of integers corresponding
to the letter numbers in the first step.
3. Compute a new list ``C`` given by
``C[i] = L1[i] + L2[i] (mod N)``.
4. Compute from the new list a string ``ct`` of letters
corresponding to the new integers.
To decipher the auto-key ciphertext, the key is used to decipher
the first ``n1`` characters and then those characters become the
key to decipher the next ``n1`` characters, etc...:
>>> m = AZ('go navy, beat army! yes you can'); m
'GONAVYBEATARMYYESYOUCAN'
>>> key = AZ('gold bug'); n1 = len(key); n2 = len(m)
>>> auto_key = key + m[:n2 - n1]; auto_key
'GOLDBUGGONAVYBEATARMYYE'
>>> ct = encipher_vigenere(m, auto_key); ct
'MCYDWSHKOGAMKZCELYFGAYR'
>>> n1 = len(key)
>>> pt = []
>>> while ct:
... part, ct = ct[:n1], ct[n1:]
... pt.append(decipher_vigenere(part, key))
... key = pt[-1]
...
>>> ''.join(pt) == m
True
References
==========
.. [1] http://en.wikipedia.org/wiki/Vigenere_cipher
.. [2] http://web.archive.org/web/20071116100808/
http://filebox.vt.edu/users/batman/kryptos.html
"""
msg, key, A = _prep(msg, key, symbols)
map = {c: i for i, c in enumerate(A)}
key = [map[c] for c in key]
N = len(map)
k = len(key)
rv = []
for i, m in enumerate(msg):
rv.append(A[(map[m] + key[i % k]) % N])
rv = ''.join(rv)
return rv
def decipher_vigenere(msg, key, symbols=None):
"""
Decode using the Vigenère cipher.
Examples
========
>>> from sympy.crypto.crypto import decipher_vigenere
>>> key = "encrypt"
>>> ct = "QRGK kt HRZQE BPR"
>>> decipher_vigenere(ct, key)
'MEETMEONMONDAY'
"""
msg, key, A = _prep(msg, key, symbols)
map = {c: i for i, c in enumerate(A)}
N = len(A) # normally, 26
K = [map[c] for c in key]
n = len(K)
C = [map[c] for c in msg]
rv = ''.join([A[(-K[i % n] + c) % N] for i, c in enumerate(C)])
return rv
#################### Hill cipher ########################
def encipher_hill(msg, key, symbols=None, pad="Q"):
r"""
Return the Hill cipher encryption of ``msg``.
Notes
=====
The Hill cipher [1]_, invented by Lester S. Hill in the 1920's [2]_,
was the first polygraphic cipher in which it was practical
(though barely) to operate on more than three symbols at once.
The following discussion assumes an elementary knowledge of
matrices.
First, each letter is first encoded as a number starting with 0.
Suppose your message `msg` consists of `n` capital letters, with no
spaces. This may be regarded an `n`-tuple M of elements of
`Z_{26}` (if the letters are those of the English alphabet). A key
in the Hill cipher is a `k x k` matrix `K`, all of whose entries
are in `Z_{26}`, such that the matrix `K` is invertible (i.e., the
linear transformation `K: Z_{N}^k \rightarrow Z_{N}^k`
is one-to-one).
ALGORITHM:
INPUT:
``msg``: plaintext message of `n` upper-case letters
``key``: a `k x k` invertible matrix `K`, all of whose
entries are in `Z_{26}` (or whatever number of symbols
are being used).
``pad``: character (default "Q") to use to make length
of text be a multiple of ``k``
OUTPUT:
``ct``: ciphertext of upper-case letters
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L`` of
corresponding integers. Let ``n = len(L)``.
2. Break the list ``L`` up into ``t = ceiling(n/k)``
sublists ``L_1``, ..., ``L_t`` of size ``k`` (with
the last list "padded" to ensure its size is
``k``).
3. Compute new list ``C_1``, ..., ``C_t`` given by
``C[i] = K*L_i`` (arithmetic is done mod N), for each
``i``.
4. Concatenate these into a list ``C = C_1 + ... + C_t``.
5. Compute from ``C`` a string ``ct`` of corresponding
letters. This has length ``k*t``.
References
==========
.. [1] en.wikipedia.org/wiki/Hill_cipher
.. [2] Lester S. Hill, Cryptography in an Algebraic Alphabet,
The American Mathematical Monthly Vol.36, June-July 1929,
pp.306-312.
See Also
========
decipher_hill
"""
assert key.is_square
assert len(pad) == 1
msg, pad, A = _prep(msg, pad, symbols)
map = {c: i for i, c in enumerate(A)}
P = [map[c] for c in msg]
N = len(A)
k = key.cols
n = len(P)
m, r = divmod(n, k)
if r:
P = P + [map[pad]]*(k - r)
m += 1
rv = ''.join([A[c % N] for j in range(m) for c in
list(key*Matrix(k, 1, [P[i]
for i in range(k*j, k*(j + 1))]))])
return rv
def decipher_hill(msg, key, symbols=None):
"""
Deciphering is the same as enciphering but using the inverse of the
key matrix.
Examples
========
>>> from sympy.crypto.crypto import encipher_hill, decipher_hill
>>> from sympy import Matrix
>>> key = Matrix([[1, 2], [3, 5]])
>>> encipher_hill("meet me on monday", key)
'UEQDUEODOCTCWQ'
>>> decipher_hill(_, key)
'MEETMEONMONDAY'
When the length of the plaintext (stripped of invalid characters)
is not a multiple of the key dimension, extra characters will
appear at the end of the enciphered and deciphered text. In order to
decipher the text, those characters must be included in the text to
be deciphered. In the following, the key has a dimension of 4 but
the text is 2 short of being a multiple of 4 so two characters will
be added.
>>> key = Matrix([[1, 1, 1, 2], [0, 1, 1, 0],
... [2, 2, 3, 4], [1, 1, 0, 1]])
>>> msg = "ST"
>>> encipher_hill(msg, key)
'HJEB'
>>> decipher_hill(_, key)
'STQQ'
>>> encipher_hill(msg, key, pad="Z")
'ISPK'
>>> decipher_hill(_, key)
'STZZ'
If the last two characters of the ciphertext were ignored in
either case, the wrong plaintext would be recovered:
>>> decipher_hill("HD", key)
'ORMV'
>>> decipher_hill("IS", key)
'UIKY'
"""
assert key.is_square
msg, _, A = _prep(msg, '', symbols)
map = {c: i for i, c in enumerate(A)}
C = [map[c] for c in msg]
N = len(A)
k = key.cols
n = len(C)
m, r = divmod(n, k)
if r:
C = C + [0]*(k - r)
m += 1
key_inv = key.inv_mod(N)
rv = ''.join([A[p % N] for j in range(m) for p in
list(key_inv*Matrix(
k, 1, [C[i] for i in range(k*j, k*(j + 1))]))])
return rv
#################### Bifid cipher ########################
def encipher_bifid(msg, key, symbols=None):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
This is the version of the Bifid cipher that uses an `n \times n`
Polybius square.
INPUT:
``msg``: plaintext string
``key``: short string for key; duplicate characters are
ignored and then it is padded with the characters in
``symbols`` that were not in the short key
``symbols``: `n \times n` characters defining the alphabet
(default is string.printable)
OUTPUT:
ciphertext (using Bifid5 cipher without spaces)
See Also
========
decipher_bifid, encipher_bifid5, encipher_bifid6
"""
msg, key, A = _prep(msg, key, symbols, bifid10)
long_key = ''.join(uniq(key)) or A
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
N = int(n)
if len(long_key) < N**2:
long_key = list(long_key) + [x for x in A if x not in long_key]
# the fractionalization
row_col = dict([(ch, divmod(i, N))
for i, ch in enumerate(long_key)])
r, c = zip(*[row_col[x] for x in msg])
rc = r + c
ch = {i: ch for ch, i in row_col.items()}
rv = ''.join((ch[i] for i in zip(rc[::2], rc[1::2])))
return rv
def decipher_bifid(msg, key, symbols=None):
r"""
Performs the Bifid cipher decryption on ciphertext ``msg``, and
returns the plaintext.
This is the version of the Bifid cipher that uses the `n \times n`
Polybius square.
INPUT:
``msg``: ciphertext string
``key``: short string for key; duplicate characters are
ignored and then it is padded with the characters in
``symbols`` that were not in the short key
``symbols``: `n \times n` characters defining the alphabet
(default=string.printable, a `10 \times 10` matrix)
OUTPUT:
deciphered text
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_bifid, decipher_bifid, AZ)
Do an encryption using the bifid5 alphabet:
>>> alp = AZ().replace('J', '')
>>> ct = AZ("meet me on monday!")
>>> key = AZ("gold bug")
>>> encipher_bifid(ct, key, alp)
'IEILHHFSTSFQYE'
When entering the text or ciphertext, spaces are ignored so it
can be formatted as desired. Re-entering the ciphertext from the
preceding, putting 4 characters per line and padding with an extra
J, does not cause problems for the deciphering:
>>> decipher_bifid('''
... IEILH
... HFSTS
... FQYEJ''', key, alp)
'MEETMEONMONDAY'
When no alphabet is given, all 100 printable characters will be
used:
>>> key = ''
>>> encipher_bifid('hello world!', key)
'bmtwmg-bIo*w'
>>> decipher_bifid(_, key)
'hello world!'
If the key is changed, a different encryption is obtained:
>>> key = 'gold bug'
>>> encipher_bifid('hello world!', 'gold_bug')
'hg2sfuei7t}w'
And if the key used to decrypt the message is not exact, the
original text will not be perfectly obtained:
>>> decipher_bifid(_, 'gold pug')
'heldo~wor6d!'
"""
msg, _, A = _prep(msg, '', symbols, bifid10)
long_key = ''.join(uniq(key)) or A
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
N = int(n)
if len(long_key) < N**2:
long_key = list(long_key) + [x for x in A if x not in long_key]
# the reverse fractionalization
row_col = dict(
[(ch, divmod(i, N)) for i, ch in enumerate(long_key)])
rc = [i for c in msg for i in row_col[c]]
n = len(msg)
rc = zip(*(rc[:n], rc[n:]))
ch = {i: ch for ch, i in row_col.items()}
rv = ''.join((ch[i] for i in rc))
return rv
def bifid_square(key):
"""Return characters of ``key`` arranged in a square.
Examples
========
>>> from sympy.crypto.crypto import (
... bifid_square, AZ, padded_key, bifid5)
>>> bifid_square(AZ().replace('J', ''))
Matrix([
[A, B, C, D, E],
[F, G, H, I, K],
[L, M, N, O, P],
[Q, R, S, T, U],
[V, W, X, Y, Z]])
>>> bifid_square(padded_key(AZ('gold bug!'), bifid5))
Matrix([
[G, O, L, D, B],
[U, A, C, E, F],
[H, I, K, M, N],
[P, Q, R, S, T],
[V, W, X, Y, Z]])
See Also
========
padded_key
"""
A = ''.join(uniq(''.join(key)))
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
n = int(n)
f = lambda i, j: Symbol(A[n*i + j])
rv = Matrix(n, n, f)
return rv
def encipher_bifid5(msg, key):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
This is the version of the Bifid cipher that uses the `5 \times 5`
Polybius square. The letter "J" is ignored so it must be replaced
with something else (traditionally an "I") before encryption.
Notes
=====
The Bifid cipher was invented around 1901 by Felix Delastelle.
It is a *fractional substitution* cipher, where letters are
replaced by pairs of symbols from a smaller alphabet. The
cipher uses a `5 \times 5` square filled with some ordering of the
alphabet, except that "J" is replaced with "I" (this is a so-called
Polybius square; there is a `6 \times 6` analog if you add back in
"J" and also append onto the usual 26 letter alphabet, the digits
0, 1, ..., 9).
According to Helen Gaines' book *Cryptanalysis*, this type of cipher
was used in the field by the German Army during World War I.
ALGORITHM: (5x5 case)
INPUT:
``msg``: plaintext string; converted to upper case and
filtered of anything but all letters except J.
``key``: short string for key; non-alphabetic letters, J
and duplicated characters are ignored and then, if the
length is less than 25 characters, it is padded with other
letters of the alphabet (in alphabetical order).
OUTPUT:
ciphertext (all caps, no spaces)
STEPS:
0. Create the `5 \times 5` Polybius square ``S`` associated
to ``key`` as follows:
a) moving from left-to-right, top-to-bottom,
place the letters of the key into a `5 \times 5`
matrix,
b) if the key has less than 25 letters, add the
letters of the alphabet not in the key until the
`5 \times 5` square is filled.
1. Create a list ``P`` of pairs of numbers which are the
coordinates in the Polybius square of the letters in
``msg``.
2. Let ``L1`` be the list of all first coordinates of ``P``
(length of ``L1 = n``), let ``L2`` be the list of all
second coordinates of ``P`` (so the length of ``L2``
is also ``n``).
3. Let ``L`` be the concatenation of ``L1`` and ``L2``
(length ``L = 2*n``), except that consecutive numbers
are paired ``(L[2*i], L[2*i + 1])``. You can regard
``L`` as a list of pairs of length ``n``.
4. Let ``C`` be the list of all letters which are of the
form ``S[i, j]``, for all ``(i, j)`` in ``L``. As a
string, this is the ciphertext of ``msg``.
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_bifid5, decipher_bifid5)
"J" will be omitted unless it is replaced with somthing else:
>>> round_trip = lambda m, k: \
... decipher_bifid5(encipher_bifid5(m, k), k)
>>> key = 'a'
>>> msg = "JOSIE"
>>> round_trip(msg, key)
'OSIE'
>>> round_trip(msg.replace("J", "I"), key)
'IOSIE'
>>> j = "QIQ"
>>> round_trip(msg.replace("J", j), key).replace(j, "J")
'JOSIE'
See Also
========
decipher_bifid5, encipher_bifid
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return encipher_bifid(msg, '', key)
def decipher_bifid5(msg, key):
r"""
Return the Bifid cipher decryption of ``msg``.
This is the version of the Bifid cipher that uses the `5 \times 5`
Polybius square; the letter "J" is ignored unless a ``key`` of
length 25 is used.
INPUT:
``msg``: ciphertext string
``key``: short string for key; duplicated characters are
ignored and if the length is less then 25 characters, it
will be padded with other letters from the alphabet omitting
"J". Non-alphabetic characters are ignored.
OUTPUT:
plaintext from Bifid5 cipher (all caps, no spaces)
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid5, decipher_bifid5
>>> key = "gold bug"
>>> encipher_bifid5('meet me on friday', key)
'IEILEHFSTSFXEE'
>>> encipher_bifid5('meet me on monday', key)
'IEILHHFSTSFQYE'
>>> decipher_bifid5(_, key)
'MEETMEONMONDAY'
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return decipher_bifid(msg, '', key)
def bifid5_square(key=None):
r"""
5x5 Polybius square.
Produce the Polybius square for the `5 \times 5` Bifid cipher.
Examples
========
>>> from sympy.crypto.crypto import bifid5_square
>>> bifid5_square("gold bug")
Matrix([
[G, O, L, D, B],
[U, A, C, E, F],
[H, I, K, M, N],
[P, Q, R, S, T],
[V, W, X, Y, Z]])
"""
if not key:
key = bifid5
else:
_, key, _ = _prep('', key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return bifid_square(key)
def encipher_bifid6(msg, key):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
This is the version of the Bifid cipher that uses the `6 \times 6`
Polybius square.
INPUT:
``msg``: plaintext string (digits okay)
``key``: short string for key (digits okay). If ``key`` is
less than 36 characters long, the square will be filled with
letters A through Z and digits 0 through 9.
OUTPUT:
ciphertext from Bifid cipher (all caps, no spaces)
See Also
========
decipher_bifid6, encipher_bifid
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return encipher_bifid(msg, '', key)
def decipher_bifid6(msg, key):
r"""
Performs the Bifid cipher decryption on ciphertext ``msg``, and
returns the plaintext.
This is the version of the Bifid cipher that uses the `6 \times 6`
Polybius square.
INPUT:
``msg``: ciphertext string (digits okay); converted to upper case
``key``: short string for key (digits okay). If ``key`` is
less than 36 characters long, the square will be filled with
letters A through Z and digits 0 through 9. All letters are
converted to uppercase.
OUTPUT:
plaintext from Bifid cipher (all caps, no spaces)
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid6, decipher_bifid6
>>> key = "gold bug"
>>> encipher_bifid6('meet me on monday at 8am', key)
'KFKLJJHF5MMMKTFRGPL'
>>> decipher_bifid6(_, key)
'MEETMEONMONDAYAT8AM'
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return decipher_bifid(msg, '', key)
def bifid6_square(key=None):
r"""
6x6 Polybius square.
Produces the Polybius square for the `6 \times 6` Bifid cipher.
Assumes alphabet of symbols is "A", ..., "Z", "0", ..., "9".
Examples
========
>>> from sympy.crypto.crypto import bifid6_square
>>> key = "gold bug"
>>> bifid6_square(key)
Matrix([
[G, O, L, D, B, U],
[A, C, E, F, H, I],
[J, K, M, N, P, Q],
[R, S, T, V, W, X],
[Y, Z, 0, 1, 2, 3],
[4, 5, 6, 7, 8, 9]])
"""
if not key:
key = bifid6
else:
_, key, _ = _prep('', key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return bifid_square(key)
#################### RSA #############################
def rsa_public_key(p, q, e):
r"""
Return the RSA *public key* pair, `(n, e)`, where `n`
is a product of two primes and `e` is relatively
prime (coprime) to the Euler totient `\phi(n)`. False
is returned if any assumption is violated.
Examples
========
>>> from sympy.crypto.crypto import rsa_public_key
>>> p, q, e = 3, 5, 7
>>> rsa_public_key(p, q, e)
(15, 7)
>>> rsa_public_key(p, q, 30)
False
"""
n = p*q
if isprime(p) and isprime(q):
phi = totient(n)
if gcd(e, phi) == 1:
return n, e
return False
def rsa_private_key(p, q, e):
r"""
Return the RSA *private key*, `(n,d)`, where `n`
is a product of two primes and `d` is the inverse of
`e` (mod `\phi(n)`). False is returned if any assumption
is violated.
Examples
========
>>> from sympy.crypto.crypto import rsa_private_key
>>> p, q, e = 3, 5, 7
>>> rsa_private_key(p, q, e)
(15, 7)
>>> rsa_private_key(p, q, 30)
False
"""
n = p*q
if isprime(p) and isprime(q):
phi = totient(n)
if gcd(e, phi) == 1:
d = mod_inverse(e, phi)
return n, d
return False
def encipher_rsa(i, key):
"""
Return encryption of ``i`` by computing `i^e` (mod `n`),
where ``key`` is the public key `(n, e)`.
Examples
========
>>> from sympy.crypto.crypto import encipher_rsa, rsa_public_key
>>> p, q, e = 3, 5, 7
>>> puk = rsa_public_key(p, q, e)
>>> msg = 12
>>> encipher_rsa(msg, puk)
3
"""
n, e = key
return pow(i, e, n)
def decipher_rsa(i, key):
"""
Return decyption of ``i`` by computing `i^d` (mod `n`),
where ``key`` is the private key `(n, d)`.
Examples
========
>>> from sympy.crypto.crypto import decipher_rsa, rsa_private_key
>>> p, q, e = 3, 5, 7
>>> prk = rsa_private_key(p, q, e)
>>> msg = 3
>>> decipher_rsa(msg, prk)
12
"""
n, d = key
return pow(i, d, n)
#################### kid krypto (kid RSA) #############################
def kid_rsa_public_key(a, b, A, B):
r"""
Kid RSA is a version of RSA useful to teach grade school children
since it does not involve exponentiation.
Alice wants to talk to Bob. Bob generates keys as follows.
Key generation:
* Select positive integers `a, b, A, B` at random.
* Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`,
`n = (e d - 1)//M`.
* The *public key* is `(n, e)`. Bob sends these to Alice.
* The *private key* is `(n, d)`, which Bob keeps secret.
Encryption: If `p` is the plaintext message then the
ciphertext is `c = p e \pmod n`.
Decryption: If `c` is the ciphertext message then the
plaintext is `p = c d \pmod n`.
Examples
========
>>> from sympy.crypto.crypto import kid_rsa_public_key
>>> a, b, A, B = 3, 4, 5, 6
>>> kid_rsa_public_key(a, b, A, B)
(369, 58)
"""
M = a*b - 1
e = A*M + a
d = B*M + b
n = (e*d - 1)//M
return n, e
def kid_rsa_private_key(a, b, A, B):
"""
Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`,
`n = (e d - 1) / M`. The *private key* is `d`, which Bob
keeps secret.
Examples
========
>>> from sympy.crypto.crypto import kid_rsa_private_key
>>> a, b, A, B = 3, 4, 5, 6
>>> kid_rsa_private_key(a, b, A, B)
(369, 70)
"""
M = a*b - 1
e = A*M + a
d = B*M + b
n = (e*d - 1)//M
return n, d
def encipher_kid_rsa(msg, key):
"""
Here ``msg`` is the plaintext and ``key`` is the public key.
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_kid_rsa, kid_rsa_public_key)
>>> msg = 200
>>> a, b, A, B = 3, 4, 5, 6
>>> key = kid_rsa_public_key(a, b, A, B)
>>> encipher_kid_rsa(msg, key)
161
"""
n, e = key
return (msg*e) % n
def decipher_kid_rsa(msg, key):
"""
Here ``msg`` is the plaintext and ``key`` is the private key.
Examples
========
>>> from sympy.crypto.crypto import (
... kid_rsa_public_key, kid_rsa_private_key,
... decipher_kid_rsa, encipher_kid_rsa)
>>> a, b, A, B = 3, 4, 5, 6
>>> d = kid_rsa_private_key(a, b, A, B)
>>> msg = 200
>>> pub = kid_rsa_public_key(a, b, A, B)
>>> pri = kid_rsa_private_key(a, b, A, B)
>>> ct = encipher_kid_rsa(msg, pub)
>>> decipher_kid_rsa(ct, pri)
200
"""
n, d = key
return (msg*d) % n
#################### Morse Code ######################################
morse_char = {
".-": "A", "-...": "B",
"-.-.": "C", "-..": "D",
".": "E", "..-.": "F",
"--.": "G", "....": "H",
"..": "I", ".---": "J",
"-.-": "K", ".-..": "L",
"--": "M", "-.": "N",
"---": "O", ".--.": "P",
"--.-": "Q", ".-.": "R",
"...": "S", "-": "T",
"..-": "U", "...-": "V",
".--": "W", "-..-": "X",
"-.--": "Y", "--..": "Z",
"-----": "0", "----": "1",
"..---": "2", "...--": "3",
"....-": "4", ".....": "5",
"-....": "6", "--...": "7",
"---..": "8", "----.": "9",
".-.-.-": ".", "--..--": ",",
"---...": ":", "-.-.-.": ";",
"..--..": "?", "-...-": "-",
"..--.-": "_", "-.--.": "(",
"-.--.-": ")", ".----.": "'",
"-...-": "=", ".-.-.": "+",
"-..-.": "/", ".--.-.": "@",
"...-..-": "$", "-.-.--": "!"}
char_morse = {v: k for k, v in morse_char.items()}
def encode_morse(msg, sep='|', mapping=None):
"""
Encodes a plaintext into popular Morse Code with letters
separated by `sep` and words by a double `sep`.
References
==========
.. [1] http://en.wikipedia.org/wiki/Morse_code
Examples
========
>>> from sympy.crypto.crypto import encode_morse
>>> msg = 'ATTACK RIGHT FLANK'
>>> encode_morse(msg)
'.-|-|-|.-|-.-.|-.-||.-.|..|--.|....|-||..-.|.-..|.-|-.|-.-'
"""
mapping = mapping or char_morse
assert sep not in mapping
word_sep = 2*sep
mapping[" "] = word_sep
suffix = msg and msg[-1] in whitespace
# normalize whitespace
msg = (' ' if word_sep else '').join(msg.split())
# omit unmapped chars
chars = set(''.join(msg.split()))
ok = set(mapping.keys())
msg = translate(msg, None, ''.join(chars - ok))
morsestring = []
words = msg.split()
for word in words:
morseword = []
for letter in word:
morseletter = mapping[letter]
morseword.append(morseletter)
word = sep.join(morseword)
morsestring.append(word)
return word_sep.join(morsestring) + (word_sep if suffix else '')
def decode_morse(msg, sep='|', mapping=None):
"""
Decodes a Morse Code with letters separated by `sep`
(default is '|') and words by `word_sep` (default is '||)
into plaintext.
References
==========
.. [1] http://en.wikipedia.org/wiki/Morse_code
Examples
========
>>> from sympy.crypto.crypto import decode_morse
>>> mc = '--|---|...-|.||.|.-|...|-'
>>> decode_morse(mc)
'MOVE EAST'
"""
mapping = mapping or morse_char
word_sep = 2*sep
characterstring = []
words = msg.strip(word_sep).split(word_sep)
for word in words:
letters = word.split(sep)
chars = [mapping[c] for c in letters]
word = ''.join(chars)
characterstring.append(word)
rv = " ".join(characterstring)
return rv
#################### LFSRs ##########################################
def lfsr_sequence(key, fill, n):
r"""
This function creates an lfsr sequence.
INPUT:
``key``: a list of finite field elements,
`[c_0, c_1, \ldots, c_k].`
``fill``: the list of the initial terms of the lfsr
sequence, `[x_0, x_1, \ldots, x_k].`
``n``: number of terms of the sequence that the
function returns.
OUTPUT:
The lfsr sequence defined by
`x_{n+1} = c_k x_n + \ldots + c_0 x_{n-k}`, for
`n \leq k`.
Notes
=====
S. Golomb [G]_ gives a list of three statistical properties a
sequence of numbers `a = \{a_n\}_{n=1}^\infty`,
`a_n \in \{0,1\}`, should display to be considered
"random". Define the autocorrelation of `a` to be
.. math::
C(k) = C(k,a) = \lim_{N\rightarrow \infty} {1\over N}\sum_{n=1}^N (-1)^{a_n + a_{n+k}}.
In the case where `a` is periodic with period
`P` then this reduces to
.. math::
C(k) = {1\over P}\sum_{n=1}^P (-1)^{a_n + a_{n+k}}.
Assume `a` is periodic with period `P`.
- balance:
.. math::
\left|\sum_{n=1}^P(-1)^{a_n}\right| \leq 1.
- low autocorrelation:
.. math::
C(k) = \left\{ \begin{array}{cc} 1,& k = 0,\\ \epsilon, & k \ne 0. \end{array} \right.
(For sequences satisfying these first two properties, it is known
that `\epsilon = -1/P` must hold.)
- proportional runs property: In each period, half the runs have
length `1`, one-fourth have length `2`, etc.
Moreover, there are as many runs of `1`'s as there are of
`0`'s.
References
==========
.. [G] Solomon Golomb, Shift register sequences, Aegean Park Press,
Laguna Hills, Ca, 1967
Examples
========
>>> from sympy.crypto.crypto import lfsr_sequence
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> lfsr_sequence(key, fill, 10)
[1 mod 2, 1 mod 2, 0 mod 2, 1 mod 2, 0 mod 2,
1 mod 2, 1 mod 2, 0 mod 2, 0 mod 2, 1 mod 2]
"""
if not isinstance(key, list):
raise TypeError("key must be a list")
if not isinstance(fill, list):
raise TypeError("fill must be a list")
p = key[0].mod
F = FF(p)
s = fill
k = len(fill)
L = []
for i in range(n):
s0 = s[:]
L.append(s[0])
s = s[1:k]
x = sum([int(key[i]*s0[i]) for i in range(k)])
s.append(F(x))
return L # use [x.to_int() for x in L] for int version
def lfsr_autocorrelation(L, P, k):
"""
This function computes the LFSR autocorrelation function.
INPUT:
``L``: is a periodic sequence of elements of `GF(2)`.
``L`` must have length larger than ``P``.
``P``: the period of ``L``
``k``: an integer (`0 < k < p`)
OUTPUT:
the ``k``-th value of the autocorrelation of the LFSR ``L``
Examples
========
>>> from sympy.crypto.crypto import (
... lfsr_sequence, lfsr_autocorrelation)
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_autocorrelation(s, 15, 7)
-1/15
>>> lfsr_autocorrelation(s, 15, 0)
1
"""
if not isinstance(L, list):
raise TypeError("L (=%s) must be a list" % L)
P = int(P)
k = int(k)
L0 = L[:P] # slices makes a copy
L1 = L0 + L0[:k]
L2 = [(-1)**(L1[i].to_int() + L1[i + k].to_int()) for i in range(P)]
tot = sum(L2)
return Rational(tot, P)
def lfsr_connection_polynomial(s):
"""
This function computes the LFSR connection polynomial.
INPUT:
``s``: a sequence of elements of even length, with entries in
a finite field
OUTPUT:
``C(x)``: the connection polynomial of a minimal LFSR yielding
``s``.
This implements the algorithm in section 3 of J. L. Massey's
article [M]_.
References
==========
.. [M] James L. Massey, "Shift-Register Synthesis and BCH Decoding."
IEEE Trans. on Information Theory, vol. 15(1), pp. 122-127,
Jan 1969.
Examples
========
>>> from sympy.crypto.crypto import (
... lfsr_sequence, lfsr_connection_polynomial)
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**4 + x + 1
>>> fill = [F(1), F(0), F(0), F(1)]
>>> key = [F(1), F(1), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + 1
>>> fill = [F(1), F(0), F(1)]
>>> key = [F(1), F(1), F(0)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + x**2 + 1
>>> fill = [F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + x + 1
"""
# Initialization:
p = s[0].mod
F = FF(p)
x = Symbol("x")
C = 1*x**0
B = 1*x**0
m = 1
b = 1*x**0
L = 0
N = 0
while N < len(s):
if L > 0:
dC = Poly(C).degree()
r = min(L + 1, dC + 1)
coeffsC = [C.subs(x, 0)] + [C.coeff(x**i)
for i in range(1, dC + 1)]
d = (s[N].to_int() + sum([coeffsC[i]*s[N - i].to_int()
for i in range(1, r)])) % p
if L == 0:
d = s[N].to_int()*x**0
if d == 0:
m += 1
N += 1
if d > 0:
if 2*L > N:
C = (C - d*((b**(p - 2)) % p)*x**m*B).expand()
m += 1
N += 1
else:
T = C
C = (C - d*((b**(p - 2)) % p)*x**m*B).expand()
L = N + 1 - L
m = 1
b = d
B = T
N += 1
dC = Poly(C).degree()
coeffsC = [C.subs(x, 0)] + [C.coeff(x**i) for i in range(1, dC + 1)]
return sum([coeffsC[i] % p*x**i for i in range(dC + 1)
if coeffsC[i] is not None])
#################### ElGamal #############################
def elgamal_private_key(digit=10, seed=None):
"""
Return three number tuple as private key.
Elgamal encryption is based on the mathmatical problem
called the Discrete Logarithm Problem (DLP). For example,
`a^{b} \equiv c \pmod p`
In general, if ``a`` and ``b`` are known, ``ct`` is easily
calculated. If ``b`` is unknown, it is hard to use
``a`` and ``ct`` to get ``b``.
Parameters
==========
digit : minimum number of binary digits for key
Returns
=======
(p, r, d) : p = prime number, r = primitive root, d = random number
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.utilities.randtest._randrange.
Examples
========
>>> from sympy.crypto.crypto import elgamal_private_key
>>> from sympy.ntheory import is_primitive_root, isprime
>>> a, b, _ = elgamal_private_key()
>>> isprime(a)
True
>>> is_primitive_root(b, a)
True
"""
randrange = _randrange(seed)
p = nextprime(2**digit)
return p, primitive_root(p), randrange(2, p)
def elgamal_public_key(key):
"""
Return three number tuple as public key.
Parameters
==========
key : Tuple (p, r, e) generated by ``elgamal_private_key``
Returns
=======
(p, r, e = r**d mod p) : d is a random number in private key.
Examples
========
>>> from sympy.crypto.crypto import elgamal_public_key
>>> elgamal_public_key((1031, 14, 636))
(1031, 14, 212)
"""
p, r, e = key
return p, r, pow(r, e, p)
def encipher_elgamal(i, key, seed=None):
"""
Encrypt message with public key
``i`` is a plaintext message expressed as an integer.
``key`` is public key (p, r, e). In order to encrypt
a message, a random number ``a`` in ``range(2, p)``
is generated and the encryped message is returned as
`c_{1}` and `c_{2}` where:
`c_{1} \equiv r^{a} \pmod p`
`c_{2} \equiv m e^{a} \pmod p`
Parameters
==========
msg : int of encoded message
key : public key
Returns
=======
(c1, c2) : Encipher into two number
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.utilities.randtest._randrange.
Examples
========
>>> from sympy.crypto.crypto import encipher_elgamal, elgamal_private_key, elgamal_public_key
>>> pri = elgamal_private_key(5, seed=[3]); pri
(37, 2, 3)
>>> pub = elgamal_public_key(pri); pub
(37, 2, 8)
>>> msg = 36
>>> encipher_elgamal(msg, pub, seed=[3])
(8, 6)
"""
p, r, e = key
if i < 0 or i >= p:
raise ValueError(
'Message (%s) should be in range(%s)' % (i, p))
randrange = _randrange(seed)
a = randrange(2, p)
return pow(r, a, p), i*pow(e, a, p) % p
def decipher_elgamal(msg, key):
r"""
Decrypt message with private key
`msg = (c_{1}, c_{2})`
`key = (p, r, d)`
According to extended Eucliden theorem,
`u c_{1}^{d} + p n = 1`
`u \equiv 1/{{c_{1}}^d} \pmod p`
`u c_{2} \equiv \frac{1}{c_{1}^d} c_{2} \equiv \frac{1}{r^{ad}} c_{2} \pmod p`
`\frac{1}{r^{ad}} m e^a \equiv \frac{1}{r^{ad}} m {r^{d a}} \equiv m \pmod p`
Examples
========
>>> from sympy.crypto.crypto import decipher_elgamal
>>> from sympy.crypto.crypto import encipher_elgamal
>>> from sympy.crypto.crypto import elgamal_private_key
>>> from sympy.crypto.crypto import elgamal_public_key
>>> pri = elgamal_private_key(5, seed=[3])
>>> pub = elgamal_public_key(pri); pub
(37, 2, 8)
>>> msg = 17
>>> decipher_elgamal(encipher_elgamal(msg, pub), pri) == msg
True
"""
p, r, d = key
c1, c2 = msg
u = igcdex(c1**d, p)[0]
return u * c2 % p
################ Diffie-Hellman Key Exchange #########################
def dh_private_key(digit=10, seed=None):
"""
Return three integer tuple as private key.
Diffie-Hellman key exchange is based on the mathematical problem
called the Discrete Logarithm Problem (see ElGamal).
Diffie-Hellman key exchange is divided into the following steps:
* Alice and Bob agree on a base that consist of a prime ``p``
and a primitive root of ``p`` called ``g``
* Alice choses a number ``a`` and Bob choses a number ``b`` where
``a`` and ``b`` are random numbers in range `[2, p)`. These are
their private keys.
* Alice then publicly sends Bob `g^{a} \pmod p` while Bob sends
Alice `g^{b} \pmod p`
* They both raise the received value to their secretly chosen
number (``a`` or ``b``) and now have both as their shared key
`g^{ab} \pmod p`
Parameters
==========
digit: minimum number of binary digits required in key
Returns
=======
(p, g, a) : p = prime number, g = primitive root of p,
a = random number from 2 thru p - 1
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.utilities.randtest._randrange.
Examples
========
>>> from sympy.crypto.crypto import dh_private_key
>>> from sympy.ntheory import isprime, is_primitive_root
>>> p, g, _ = dh_private_key()
>>> isprime(p)
True
>>> is_primitive_root(g, p)
True
>>> p, g, _ = dh_private_key(5)
>>> isprime(p)
True
>>> is_primitive_root(g, p)
True
"""
p = nextprime(2**digit)
g = primitive_root(p)
randrange = _randrange(seed)
a = randrange(2, p)
return p, g, a
def dh_public_key(key):
"""
Return three number tuple as public key.
This is the tuple that Alice sends to Bob.
Parameters
==========
key: Tuple (p, g, a) generated by ``dh_private_key``
Returns
=======
(p, g, g^a mod p) : p, g and a as in Parameters
Examples
========
>>> from sympy.crypto.crypto import dh_private_key, dh_public_key
>>> p, g, a = dh_private_key();
>>> _p, _g, x = dh_public_key((p, g, a))
>>> p == _p and g == _g
True
>>> x == pow(g, a, p)
True
"""
p, g, a = key
return p, g, pow(g, a, p)
def dh_shared_key(key, b):
"""
Return an integer that is the shared key.
This is what Bob and Alice can both calculate using the public
keys they received from each other and their private keys.
Parameters
==========
key: Tuple (p, g, x) generated by ``dh_public_key``
b: Random number in the range of 2 to p - 1
(Chosen by second key exchange member (Bob))
Returns
=======
shared key (int)
Examples
========
>>> from sympy.crypto.crypto import (
... dh_private_key, dh_public_key, dh_shared_key)
>>> prk = dh_private_key();
>>> p, g, x = dh_public_key(prk);
>>> sk = dh_shared_key((p, g, x), 1000)
>>> sk == pow(x, 1000, p)
True
"""
p, _, x = key
if 1 >= b or b >= p:
raise ValueError(filldedent('''
Value of b should be greater 1 and less
than prime %s.''' % p))
return pow(x, b, p)
| bsd-3-clause |
jkorell/PTVS | Python/Product/Analyzer/BuiltinScraper.py | 18 | 21219 | # ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
import re
import sys
import types
import PythonScraper
try:
import thread
except:
import _thread as thread
try:
import __builtin__ as __builtins__
except ImportError:
import builtins as __builtins__
def safe_dir(obj):
try:
return frozenset(obj.__dict__) | frozenset(dir(obj))
except:
# Some types crash when we access __dict__ and/or dir()
pass
try:
return frozenset(dir(obj))
except:
pass
try:
return frozenset(obj.__dict__)
except:
pass
return frozenset()
def builtins_keys():
if isinstance(__builtins__, dict):
return __builtins__.keys()
return dir(__builtins__)
def get_builtin(name):
if isinstance(__builtins__, dict):
return __builtins__[name]
return getattr(__builtins__, name)
safe_getattr = PythonScraper.safe_getattr
BUILTIN_TYPES = [type_name for type_name in builtins_keys() if type(get_builtin(type_name)) is type]
if sys.version_info[0] >= 3:
BUILTIN = 'builtins'
unicode = str
else:
BUILTIN = '__builtin__'
TYPE_OVERRIDES = {
'string': PythonScraper.type_to_typeref(types.CodeType),
's': PythonScraper.type_to_typeref(str),
'integer': PythonScraper.type_to_typeref(int),
'boolean': PythonScraper.type_to_typeref(bool),
'number': PythonScraper.type_to_typeref(int),
'pid': PythonScraper.type_to_typeref(int),
'ppid': PythonScraper.type_to_typeref(int),
'fd': PythonScraper.type_to_typeref(int),
'handle': PythonScraper.type_to_typeref(int),
'Exit': PythonScraper.type_to_typeref(int),
'fd2': PythonScraper.type_to_typeref(int),
'Integral': PythonScraper.type_to_typeref(int),
'exit_status':PythonScraper.type_to_typeref(int),
'old_mask': PythonScraper.type_to_typeref(int),
'source': PythonScraper.type_to_typeref(str),
'newpos': PythonScraper.type_to_typeref(int),
'key': PythonScraper.type_to_typeref(str),
'dictionary': PythonScraper.type_to_typeref(dict),
'None': PythonScraper.type_to_typeref(type(None)),
'floating': PythonScraper.type_to_typeref(float),
'filename': PythonScraper.type_to_typeref(str),
'path': PythonScraper.type_to_typeref(str),
'byteswritten': PythonScraper.type_to_typeref(int),
'unicode': PythonScraper.type_to_typeref(unicode),
'Unicode': PythonScraper.type_to_typeref(unicode),
'True': PythonScraper.type_to_typeref(bool),
'False': PythonScraper.type_to_typeref(bool),
'lock': PythonScraper.type_to_typeref(thread.LockType),
'code': PythonScraper.type_to_typeref(types.CodeType),
'module': PythonScraper.type_to_typeref(types.ModuleType),
'size': PythonScraper.type_to_typeref(int),
'INT': PythonScraper.type_to_typeref(int),
'STRING': PythonScraper.type_to_typeref(str),
'TUPLE': PythonScraper.type_to_typeref(tuple),
'OBJECT': PythonScraper.type_to_typeref(object),
'LIST': PythonScraper.type_to_typeref(list),
'DICT': PythonScraper.type_to_typeref(dict),
'char *': PythonScraper.type_to_typeref(str),
'wchar_t *': PythonScraper.type_to_typeref(unicode),
'CHAR *': PythonScraper.type_to_typeref(str),
'TCHAR *': PythonScraper.type_to_typeref(str),
'WCHAR *': PythonScraper.type_to_typeref(unicode),
'LPSTR': PythonScraper.type_to_typeref(str),
'LPCSTR': PythonScraper.type_to_typeref(str),
'LPTSTR': PythonScraper.type_to_typeref(str),
'LPCTSTR': PythonScraper.type_to_typeref(str),
'LPWSTR': PythonScraper.type_to_typeref(unicode),
'LPCWSTR': PythonScraper.type_to_typeref(unicode),
}
try:
TYPE_OVERRIDES['file object'] = PythonScraper.type_to_typeref(file)
except NameError:
try:
import _io
TYPE_OVERRIDES['file object'] = PythonScraper.type_to_typeref(_io._IOBase)
except (NameError, ImportError):
pass
RETURN_TYPE_OVERRIDES = dict(TYPE_OVERRIDES)
RETURN_TYPE_OVERRIDES.update({'string': PythonScraper.type_to_typeref(str)})
def type_name_to_typeref(name, mod, type_overrides = TYPE_OVERRIDES):
arg_type = type_overrides.get(name, None)
if arg_type is None:
if name in BUILTIN_TYPES:
arg_type = PythonScraper.type_to_typeref(get_builtin(name))
elif mod is not None and name in mod.__dict__:
arg_type = PythonScraper.typename_to_typeref(mod.__name__, name)
elif name.startswith('list'):
arg_type = PythonScraper.type_to_typeref(list)
else:
# see if we can find it in any module we've imported...
for mod_name, mod in sys.modules.items():
if mod is not None and name in mod.__dict__ and isinstance(mod.__dict__[name], type):
arg_type = PythonScraper.typename_to_typeref(mod_name, name)
break
else:
first_space = name.find(' ')
if first_space != -1:
return type_name_to_typeref(name[:first_space], mod, type_overrides)
arg_type = PythonScraper.typename_to_typeref(name)
return arg_type
OBJECT_TYPE = PythonScraper.type_to_typeref(object)
TOKENS_REGEX = '(' + '|'.join([
r'(?:[a-zA-Z_][0-9a-zA-Z_-]*)', # identifier
r'(?:-?[0-9]+[lL]?(?!\.))', # integer value
r'(?:-?[0-9]*\.[0-9]+)', # floating point value
r'(?:-?[0-9]+\.[0-9]*)', # floating point value
r'(?:\s*\'.*?(?<!\\)\')', # single-quote string
r'(?:\s*".*?(?<!\\)")', # double-quote string
r'(?:\.\.\.)', # ellipsis
r'(?:\.)', # dot
r'(?:\()', # open paren
r'(?:\))', # close paren
r'(?:\:)', # colon
r'(?:-->)', # return value
r'(?:->)', # return value
r'(?:=>)', # return value
r'(?:,)', # comma
r'(?:=)', # assignment (default value)
r'(?:\[)',
r'(?:\])',
r'(?:\*\*)',
r'(?:\*)',
]) + ')'
def get_ret_type(ret_type, obj_class, mod):
if ret_type is not None:
if ret_type == 'copy' and obj_class is not None:
# returns a copy of self
return PythonScraper.type_to_typelist(obj_class)
else:
return [type_name_to_typeref(ret_type, mod, RETURN_TYPE_OVERRIDES)]
RETURNS_REGEX = [r'^\s*returns?[\s\-]*[a-z_]\w*\s*:\s*([a-z_]\w*)']
def update_overload_from_doc_str(overload, doc_str, obj_class, mod):
# see if we can get additional information from the doc string
if 'ret_type' not in overload:
for ret_regex in RETURNS_REGEX:
match = re.search(ret_regex, doc_str, re.MULTILINE | re.IGNORECASE)
if match:
ret_type = match.groups(0)[0]
overload['ret_type'] = get_ret_type(ret_type, obj_class, mod)
break
def parse_doc_str(input_str, module_name, mod, func_name, extra_args = [], obj_class = None):
# we split, so as long as we have all tokens every other item is a token, and the
# rest are empty space. If we have unrecognized tokens (for example during the description
# of the function) those will show up in the even locations. We do join's and bring the
# entire range together in that case.
tokens = re.split(TOKENS_REGEX, input_str)
start_token = 0
last_identifier = None
cur_token = 1
overloads = []
while cur_token < len(tokens):
token = tokens[cur_token]
# see if we have modname.funcname(
if (cur_token + 10 < len(tokens) and
token == module_name and
tokens[cur_token + 2] == '.' and
tokens[cur_token + 4] == func_name and
tokens[cur_token + 6] == '('):
sig_start = cur_token
args, ret_type, cur_token = parse_args(tokens, cur_token + 8, mod)
doc_str = ''.join(tokens[start_token:sig_start])
if doc_str.find(' ') == -1:
doc_str = ''
if (not args or doc_str) and overloads:
# if we already parsed an overload, and are now getting an argless
# overload we're likely just seeing a reference to the function in
# a doc string, let's ignore that. This is betting on the idea that
# people list overloads first, then doc strings, and that people tend
# to list overloads from simplest to more complex. an example of this
# is the future_builtins.ascii doc string
# We also skip it if we have a doc string, this comes up in overloads
# like isinstance which has example calls embedded in the doc string
continue
start_token = cur_token
overload = {'args': tuple(extra_args + args), 'doc': doc_str}
ret_types = get_ret_type(ret_type, obj_class, mod)
if ret_types is not None:
overload['ret_type'] = ret_types
update_overload_from_doc_str(overload, doc_str, obj_class, mod)
overloads.append(overload)
# see if we have funcname(
elif (cur_token + 4 < len(tokens) and
token == func_name and
tokens[cur_token + 2] == '('):
sig_start = cur_token
args, ret_type, cur_token = parse_args(tokens, cur_token + 4, mod)
doc_str = ''.join(tokens[start_token:sig_start])
if doc_str.find(' ') == -1:
doc_str = ''
if (not args or doc_str) and overloads:
# if we already parsed an overload, and are now getting an argless
# overload we're likely just seeing a reference to the function in
# a doc string, let's ignore that. This is betting on the idea that
# people list overloads first, then doc strings, and that people tend
# to list overloads from simplest to more complex. an example of this
# is the future_builtins.ascii doc string
# We also skip it if we have a doc string, this comes up in overloads
# like isinstance which has example calls embedded in the doc string
continue
start_token = cur_token
overload = {'args': tuple(extra_args + args), 'doc': doc_str}
ret_types = get_ret_type(ret_type, obj_class, mod)
if ret_types is not None:
overload['ret_type'] = ret_types
update_overload_from_doc_str(overload, doc_str, obj_class, mod)
overloads.append(overload)
else:
# append to doc string
cur_token += 2
finish_doc = ''.join(tokens[start_token:cur_token])
if finish_doc:
if not overloads:
# This occurs when the docstring does not include a function spec
overloads.append({
'args': ({'name': 'args', 'arg_format': '*'}, {'name': 'kwargs', 'arg_format': '**'}),
'doc': ''
})
for overload in overloads:
overload['doc'] += finish_doc
update_overload_from_doc_str(overload, overload['doc'], obj_class, mod)
return overloads
IDENTIFIER_REGEX = re.compile('^[a-zA-Z_][a-zA-Z_0-9-]*$')
def is_identifier(token):
if IDENTIFIER_REGEX.match(token):
return True
return False
RETURN_TOKENS = set(['-->', '->', '=>', 'return'])
def parse_args(tokens, cur_token, module):
args = []
arg = []
annotation = None
default_value = None
ignore = False
arg_tokens = []
next_is_optional = False
is_optional = False
paren_nesting = 0
while cur_token < len(tokens):
token = tokens[cur_token]
cur_token += 1
if token in (',', ')') and paren_nesting == 0:
arg_tokens.append((arg, annotation, default_value, is_optional))
is_optional = False
arg = []
annotation = None
default_value = None
if token == ')':
cur_token += 1
break
elif ignore:
continue
elif token == '=':
if default_value is None:
default_value = []
else:
ignore = True
elif token == ':':
if annotation is None and default_value is None:
annotation = []
else:
ignore = True
elif default_value is not None:
default_value.append(token)
elif annotation is not None:
annotation.append(token)
elif token == '[':
next_is_optional = True
elif token in (']', ' ', ''):
pass
else:
arg.append(token)
if next_is_optional:
is_optional, next_is_optional = True, False
if token == '(':
paren_nesting += 1
elif token == ')':
paren_nesting -= 1
#from pprint import pprint; pprint(arg_tokens)
for arg, annotation, default_value, is_optional in arg_tokens:
if not arg or arg[0] == '/':
continue
arg_name = None
star_args = None
if arg[0] == '(':
names = [arg.pop(0)]
while names[-1] != ')' and arg:
names.append(arg.pop(0))
if names[-1] == ')':
names.pop()
arg_name = ', '.join(n for n in names[1:] if is_identifier(n))
elif is_identifier(arg[-1]):
arg_name = arg.pop()
elif arg[-1] == '...':
arg_name = 'args'
star_args = '*'
if not annotation and arg:
if len(arg) > 1 and arg[-1] == '*':
# C style prototype
annotation = [' '.join(a for a in arg if a != 'const')]
elif is_identifier(arg[-1]):
annotation = arg[-1:]
elif arg[-1] == ')':
annotation = [arg.pop()]
while annotation[0] != '(':
annotation.insert(0, arg.pop())
if arg and arg[0] in ('*', '**'):
star_args = arg[0]
data = { }
if arg_name:
data['name'] = arg_name
elif star_args == '*':
data['name'] = 'args'
elif star_args == '**':
data['name'] = 'kwargs'
else:
data['name'] = 'arg'
if annotation and len(annotation) == 1:
data['type'] = [type_name_to_typeref(annotation[0], module)]
if default_value:
default_value = [d for d in default_value if d]
if is_optional and default_value[-1] == ']':
default_value.pop()
data['default_value'] = ''.join(default_value).strip()
elif is_optional:
data['default_value'] = 'None'
if star_args:
data['arg_format'] = star_args
args.append(data)
# end of params, check for ret value
ret_type = None
if cur_token + 2 < len(tokens) and tokens[cur_token] in RETURN_TOKENS:
ret_type_start = cur_token + 2
# we might have a descriptive return value, 'list of fob'
while ret_type_start < len(tokens) and is_identifier(tokens[ret_type_start]):
if tokens[ret_type_start - 1].find('\n') != -1:
break
ret_type_start += 2
if ret_type_start < len(tokens) and ',' in tokens[ret_type_start]:
# fob(oar, baz) -> some info about the return, and more info, and more info.
# "some info" is unlikely to be a return type
ret_type = ''
cur_token += 2
else:
ret_type = ''.join(tokens[cur_token + 2:ret_type_start]).strip()
cur_token = ret_type_start
elif (cur_token + 4 < len(tokens) and
tokens[cur_token] == ':' and tokens[cur_token + 2] in RETURN_TOKENS):
ret_type_start = cur_token + 4
# we might have a descriptive return value, 'list of fob'
while ret_type_start < len(tokens) and is_identifier(tokens[ret_type_start]):
if tokens[ret_type_start - 1].find('\n') != -1:
break
ret_type_start += 2
if ret_type_start < len(tokens) and ',' in tokens[ret_type_start]:
# fob(oar, baz) -> some info about the return, and more info, and more info.
# "some info" is unlikely to be a return type
ret_type = ''
cur_token += 4
else:
ret_type = ''.join(tokens[cur_token + 4:ret_type_start]).strip()
cur_token = ret_type_start
return args, ret_type, cur_token
if sys.version > '3.':
str_types = (str, bytes)
else:
str_types = (str, unicode)
def get_overloads_from_doc_string(doc_str, mod, obj_class, func_name, extra_args = []):
if isinstance(doc_str, str_types):
decl_mod = None
if isinstance(mod, types.ModuleType):
decl_mod = mod
mod = decl_mod.__name__
elif mod is not None:
decl_mod = sys.modules.get(mod, None)
res = parse_doc_str(doc_str, mod, decl_mod, func_name, extra_args, obj_class)
if res:
for i, v in enumerate(res):
if 'ret_type' not in v or (not v['ret_type'] or v['ret_type'] == ('', '')):
alt_ret_type = v['doc'].find('returned as a ')
if alt_ret_type != -1:
last_space = v['doc'].find(' ', alt_ret_type + 14)
last_new_line = v['doc'].find('\n', alt_ret_type + 14)
if last_space == -1:
if last_new_line == -1:
last_space = None
else:
last_space = last_new_line
elif last_new_line == -1:
last_space = None
else:
last_space = last_new_line
ret_type_str = v['doc'][alt_ret_type+14:last_space]
if ret_type_str.endswith('.') or ret_type_str.endswith(','):
ret_type_str = ret_type_str[:-1]
new_ret_type = get_ret_type(ret_type_str, obj_class, decl_mod)
res[i]['ret_type'] = new_ret_type
return res
return None
def get_overloads(func, is_method = False):
if is_method:
extra_args = [{'type': PythonScraper.type_to_typelist(object), 'name': 'self'}]
else:
extra_args = []
func_doc = safe_getattr(func, '__doc__', None)
if not func_doc:
return None
return get_overloads_from_doc_string(
func_doc,
safe_getattr(func, '__module__', None),
safe_getattr(func, '__objclass__', None),
safe_getattr(func, '__name__', None),
extra_args,
)
def get_descriptor_type(descriptor):
return object
def get_new_overloads(type_obj, obj):
try:
type_doc = safe_getattr(type_obj, '__doc__', None)
type_type = type(type_obj)
except:
return None
res = get_overloads_from_doc_string(
type_doc,
safe_getattr(type_obj, '__module__', None),
type_type,
safe_getattr(type_obj, '__name__', None),
[{'type': PythonScraper.type_to_typelist(type), 'name': 'cls'}],
)
if not res:
obj_doc = safe_getattr(obj, '__doc__', None)
if not obj_doc:
return None
res = get_overloads_from_doc_string(
obj_doc,
safe_getattr(type_obj, '__module__', None),
type_type,
safe_getattr(type_obj, '__name__', None),
)
return res
def should_include_module(name):
return True
| apache-2.0 |
orwell-int/agent-server-game-python | setup.py | 1 | 1239 | #!/usr/bin/env python
import setuptools
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
try:
import multiprocessing
assert multiprocessing
except ImportError:
pass
setuptools.setup(
name='orwell.agent',
version='0.0.1',
description='Agent connecting to the game server.',
author='',
author_email='',
packages=setuptools.find_packages(exclude="test"),
test_suite='nose.collector',
install_requires=['pyzmq', 'cliff'],
tests_require=['nose', 'coverage', 'mock'],
entry_points={
'console_scripts': [
'thought_police = orwell.agent.main:main',
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6'],
python_requires='>=3.6.0',
)
| bsd-3-clause |
Turpial/Turpial | turpial/ui/gtk/tray.py | 3 | 2115 | # -*- coding: utf-8 -*-
# GTK3 tray icon for Turpial
from gi.repository import Gtk
from turpial import DESC
from turpial.ui.lang import i18n
class TrayIcon(Gtk.StatusIcon):
def __init__(self, base):
Gtk.StatusIcon.__init__(self)
self.base = base
self.set_from_pixbuf(self.base.load_image('turpial-tray.png', True))
self.set_tooltip_text(DESC)
self.menu = Gtk.Menu()
def __build_common_menu(self):
accounts = Gtk.MenuItem(i18n.get('accounts'))
preferences = Gtk.MenuItem(i18n.get('preferences'))
sounds = Gtk.CheckMenuItem(i18n.get('enable_sounds'))
#sound_.set_active(not self.sound._disable)
exit_ = Gtk.MenuItem(i18n.get('exit'))
self.menu.append(accounts)
self.menu.append(preferences)
self.menu.append(sounds)
self.menu.append(Gtk.SeparatorMenuItem())
self.menu.append(exit_)
accounts.connect('activate', self.base.show_accounts_dialog)
preferences.connect('activate', self.base.show_preferences_dialog)
sounds.connect('toggled', self.base.disable_sound)
exit_.connect('activate', self.base.main_quit)
def empty(self):
self.menu = Gtk.Menu()
self.__build_common_menu()
def normal(self):
self.menu = Gtk.Menu()
tweet = Gtk.MenuItem(i18n.get('new_tweet'))
tweet.connect('activate', self.base.show_update_box)
direct = Gtk.MenuItem(i18n.get('direct_message'))
direct.connect('activate', self.base.show_update_box, True)
self.menu.append(tweet)
self.menu.append(direct)
self.__build_common_menu()
def popup(self, button, activate_time):
self.menu.show_all()
self.menu.popup(None, None, None, None, button, activate_time)
return True
# Change the tray icon image to indicate updates
def notify(self):
self.set_from_pixbuf(self.base.load_image('turpial-tray-update.png', True))
# Clear the tray icon image
def clear(self):
self.set_from_pixbuf(self.base.load_image('turpial-tray.png', True))
| gpl-3.0 |
sidmitra/django_nonrel_testapp | django/forms/util.py | 311 | 1983 | from django.utils.html import conditional_escape
from django.utils.encoding import StrAndUnicode, force_unicode
from django.utils.safestring import mark_safe
# Import ValidationError so that it can be imported from this
# module to maintain backwards compatibility.
from django.core.exceptions import ValidationError
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. It is assumed that the keys do not need to be XML-escaped.
If the passed dictionary is empty, then return an empty string.
"""
return u''.join([u' %s="%s"' % (k, conditional_escape(v)) for k, v in attrs.items()])
class ErrorDict(dict, StrAndUnicode):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def __unicode__(self):
return self.as_ul()
def as_ul(self):
if not self: return u''
return mark_safe(u'<ul class="errorlist">%s</ul>'
% ''.join([u'<li>%s%s</li>' % (k, force_unicode(v))
for k, v in self.items()]))
def as_text(self):
return u'\n'.join([u'* %s\n%s' % (k, u'\n'.join([u' * %s' % force_unicode(i) for i in v])) for k, v in self.items()])
class ErrorList(list, StrAndUnicode):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __unicode__(self):
return self.as_ul()
def as_ul(self):
if not self: return u''
return mark_safe(u'<ul class="errorlist">%s</ul>'
% ''.join([u'<li>%s</li>' % conditional_escape(force_unicode(e)) for e in self]))
def as_text(self):
if not self: return u''
return u'\n'.join([u'* %s' % force_unicode(e) for e in self])
def __repr__(self):
return repr([force_unicode(e) for e in self])
| bsd-3-clause |
robclark/chromium | third_party/tlslite/tlslite/TLSConnection.py | 6 | 71226 | """
MAIN CLASS FOR TLS LITE (START HERE!).
"""
from __future__ import generators
import socket
from utils.compat import formatExceptionTrace
from TLSRecordLayer import TLSRecordLayer
from Session import Session
from constants import *
from utils.cryptomath import getRandomBytes
from errors import *
from messages import *
from mathtls import *
from HandshakeSettings import HandshakeSettings
class TLSConnection(TLSRecordLayer):
"""
This class wraps a socket and provides TLS handshaking and data
transfer.
To use this class, create a new instance, passing a connected
socket into the constructor. Then call some handshake function.
If the handshake completes without raising an exception, then a TLS
connection has been negotiated. You can transfer data over this
connection as if it were a socket.
This class provides both synchronous and asynchronous versions of
its key functions. The synchronous versions should be used when
writing single-or multi-threaded code using blocking sockets. The
asynchronous versions should be used when performing asynchronous,
event-based I/O with non-blocking sockets.
Asynchronous I/O is a complicated subject; typically, you should
not use the asynchronous functions directly, but should use some
framework like asyncore or Twisted which TLS Lite integrates with
(see
L{tlslite.integration.TLSAsyncDispatcherMixIn.TLSAsyncDispatcherMixIn} or
L{tlslite.integration.TLSTwistedProtocolWrapper.TLSTwistedProtocolWrapper}).
"""
def __init__(self, sock):
"""Create a new TLSConnection instance.
@param sock: The socket data will be transmitted on. The
socket should already be connected. It may be in blocking or
non-blocking mode.
@type sock: L{socket.socket}
"""
TLSRecordLayer.__init__(self, sock)
def handshakeClientSRP(self, username, password, session=None,
settings=None, checker=None, async=False):
"""Perform an SRP handshake in the role of client.
This function performs a TLS/SRP handshake. SRP mutually
authenticates both parties to each other using only a
username and password. This function may also perform a
combined SRP and server-certificate handshake, if the server
chooses to authenticate itself with a certificate chain in
addition to doing SRP.
TLS/SRP is non-standard. Most TLS implementations don't
support it. See
U{http://www.ietf.org/html.charters/tls-charter.html} or
U{http://trevp.net/tlssrp/} for the latest information on
TLS/SRP.
Like any handshake function, this can be called on a closed
TLS connection, or on a TLS connection that is already open.
If called on an open connection it performs a re-handshake.
If the function completes without raising an exception, the
TLS connection will be open and available for data transfer.
If an exception is raised, the connection will have been
automatically closed (if it was ever open).
@type username: str
@param username: The SRP username.
@type password: str
@param password: The SRP password.
@type session: L{tlslite.Session.Session}
@param session: A TLS session to attempt to resume. This
session must be an SRP session performed with the same username
and password as were passed in. If the resumption does not
succeed, a full SRP handshake will be performed.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
@type checker: L{tlslite.Checker.Checker}
@param checker: A Checker instance. This instance will be
invoked to examine the other party's authentication
credentials, if the handshake completes succesfully.
@type async: bool
@param async: If False, this function will block until the
handshake is completed. If True, this function will return a
generator. Successive invocations of the generator will
return 0 if it is waiting to read from the socket, 1 if it is
waiting to write to the socket, or will raise StopIteration if
the handshake operation is completed.
@rtype: None or an iterable
@return: If 'async' is True, a generator object will be
returned.
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
@raise tlslite.errors.TLSAuthenticationError: If the checker
doesn't like the other party's authentication credentials.
"""
handshaker = self._handshakeClientAsync(srpParams=(username, password),
session=session, settings=settings, checker=checker)
if async:
return handshaker
for result in handshaker:
pass
def handshakeClientCert(self, certChain=None, privateKey=None,
session=None, settings=None, checker=None,
async=False):
"""Perform a certificate-based handshake in the role of client.
This function performs an SSL or TLS handshake. The server
will authenticate itself using an X.509 or cryptoID certificate
chain. If the handshake succeeds, the server's certificate
chain will be stored in the session's serverCertChain attribute.
Unless a checker object is passed in, this function does no
validation or checking of the server's certificate chain.
If the server requests client authentication, the
client will send the passed-in certificate chain, and use the
passed-in private key to authenticate itself. If no
certificate chain and private key were passed in, the client
will attempt to proceed without client authentication. The
server may or may not allow this.
Like any handshake function, this can be called on a closed
TLS connection, or on a TLS connection that is already open.
If called on an open connection it performs a re-handshake.
If the function completes without raising an exception, the
TLS connection will be open and available for data transfer.
If an exception is raised, the connection will have been
automatically closed (if it was ever open).
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: The certificate chain to be used if the
server requests client authentication.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: The private key to be used if the server
requests client authentication.
@type session: L{tlslite.Session.Session}
@param session: A TLS session to attempt to resume. If the
resumption does not succeed, a full handshake will be
performed.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
@type checker: L{tlslite.Checker.Checker}
@param checker: A Checker instance. This instance will be
invoked to examine the other party's authentication
credentials, if the handshake completes succesfully.
@type async: bool
@param async: If False, this function will block until the
handshake is completed. If True, this function will return a
generator. Successive invocations of the generator will
return 0 if it is waiting to read from the socket, 1 if it is
waiting to write to the socket, or will raise StopIteration if
the handshake operation is completed.
@rtype: None or an iterable
@return: If 'async' is True, a generator object will be
returned.
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
@raise tlslite.errors.TLSAuthenticationError: If the checker
doesn't like the other party's authentication credentials.
"""
handshaker = self._handshakeClientAsync(certParams=(certChain,
privateKey), session=session, settings=settings,
checker=checker)
if async:
return handshaker
for result in handshaker:
pass
def handshakeClientUnknown(self, srpCallback=None, certCallback=None,
session=None, settings=None, checker=None,
async=False):
"""Perform a to-be-determined type of handshake in the role of client.
This function performs an SSL or TLS handshake. If the server
requests client certificate authentication, the
certCallback will be invoked and should return a (certChain,
privateKey) pair. If the callback returns None, the library
will attempt to proceed without client authentication. The
server may or may not allow this.
If the server requests SRP authentication, the srpCallback
will be invoked and should return a (username, password) pair.
If the callback returns None, the local implementation will
signal a user_canceled error alert.
After the handshake completes, the client can inspect the
connection's session attribute to determine what type of
authentication was performed.
Like any handshake function, this can be called on a closed
TLS connection, or on a TLS connection that is already open.
If called on an open connection it performs a re-handshake.
If the function completes without raising an exception, the
TLS connection will be open and available for data transfer.
If an exception is raised, the connection will have been
automatically closed (if it was ever open).
@type srpCallback: callable
@param srpCallback: The callback to be used if the server
requests SRP authentication. If None, the client will not
offer support for SRP ciphersuites.
@type certCallback: callable
@param certCallback: The callback to be used if the server
requests client certificate authentication.
@type session: L{tlslite.Session.Session}
@param session: A TLS session to attempt to resume. If the
resumption does not succeed, a full handshake will be
performed.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
@type checker: L{tlslite.Checker.Checker}
@param checker: A Checker instance. This instance will be
invoked to examine the other party's authentication
credentials, if the handshake completes succesfully.
@type async: bool
@param async: If False, this function will block until the
handshake is completed. If True, this function will return a
generator. Successive invocations of the generator will
return 0 if it is waiting to read from the socket, 1 if it is
waiting to write to the socket, or will raise StopIteration if
the handshake operation is completed.
@rtype: None or an iterable
@return: If 'async' is True, a generator object will be
returned.
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
@raise tlslite.errors.TLSAuthenticationError: If the checker
doesn't like the other party's authentication credentials.
"""
handshaker = self._handshakeClientAsync(unknownParams=(srpCallback,
certCallback), session=session, settings=settings,
checker=checker)
if async:
return handshaker
for result in handshaker:
pass
def handshakeClientSharedKey(self, username, sharedKey, settings=None,
checker=None, async=False):
"""Perform a shared-key handshake in the role of client.
This function performs a shared-key handshake. Using shared
symmetric keys of high entropy (128 bits or greater) mutually
authenticates both parties to each other.
TLS with shared-keys is non-standard. Most TLS
implementations don't support it. See
U{http://www.ietf.org/html.charters/tls-charter.html} for the
latest information on TLS with shared-keys. If the shared-keys
Internet-Draft changes or is superceded, TLS Lite will track
those changes, so the shared-key support in later versions of
TLS Lite may become incompatible with this version.
Like any handshake function, this can be called on a closed
TLS connection, or on a TLS connection that is already open.
If called on an open connection it performs a re-handshake.
If the function completes without raising an exception, the
TLS connection will be open and available for data transfer.
If an exception is raised, the connection will have been
automatically closed (if it was ever open).
@type username: str
@param username: The shared-key username.
@type sharedKey: str
@param sharedKey: The shared key.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
@type checker: L{tlslite.Checker.Checker}
@param checker: A Checker instance. This instance will be
invoked to examine the other party's authentication
credentials, if the handshake completes succesfully.
@type async: bool
@param async: If False, this function will block until the
handshake is completed. If True, this function will return a
generator. Successive invocations of the generator will
return 0 if it is waiting to read from the socket, 1 if it is
waiting to write to the socket, or will raise StopIteration if
the handshake operation is completed.
@rtype: None or an iterable
@return: If 'async' is True, a generator object will be
returned.
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
@raise tlslite.errors.TLSAuthenticationError: If the checker
doesn't like the other party's authentication credentials.
"""
handshaker = self._handshakeClientAsync(sharedKeyParams=(username,
sharedKey), settings=settings, checker=checker)
if async:
return handshaker
for result in handshaker:
pass
def _handshakeClientAsync(self, srpParams=(), certParams=(),
unknownParams=(), sharedKeyParams=(),
session=None, settings=None, checker=None,
recursive=False):
handshaker = self._handshakeClientAsyncHelper(srpParams=srpParams,
certParams=certParams, unknownParams=unknownParams,
sharedKeyParams=sharedKeyParams, session=session,
settings=settings, recursive=recursive)
for result in self._handshakeWrapperAsync(handshaker, checker):
yield result
def _handshakeClientAsyncHelper(self, srpParams, certParams, unknownParams,
sharedKeyParams, session, settings, recursive):
if not recursive:
self._handshakeStart(client=True)
#Unpack parameters
srpUsername = None # srpParams
password = None # srpParams
clientCertChain = None # certParams
privateKey = None # certParams
srpCallback = None # unknownParams
certCallback = None # unknownParams
#session # sharedKeyParams (or session)
#settings # settings
if srpParams:
srpUsername, password = srpParams
elif certParams:
clientCertChain, privateKey = certParams
elif unknownParams:
srpCallback, certCallback = unknownParams
elif sharedKeyParams:
session = Session()._createSharedKey(*sharedKeyParams)
if not settings:
settings = HandshakeSettings()
settings = settings._filter()
#Validate parameters
if srpUsername and not password:
raise ValueError("Caller passed a username but no password")
if password and not srpUsername:
raise ValueError("Caller passed a password but no username")
if clientCertChain and not privateKey:
raise ValueError("Caller passed a certChain but no privateKey")
if privateKey and not clientCertChain:
raise ValueError("Caller passed a privateKey but no certChain")
if clientCertChain:
foundType = False
try:
import cryptoIDlib.CertChain
if isinstance(clientCertChain, cryptoIDlib.CertChain.CertChain):
if "cryptoID" not in settings.certificateTypes:
raise ValueError("Client certificate doesn't "\
"match Handshake Settings")
settings.certificateTypes = ["cryptoID"]
foundType = True
except ImportError:
pass
if not foundType and isinstance(clientCertChain,
X509CertChain):
if "x509" not in settings.certificateTypes:
raise ValueError("Client certificate doesn't match "\
"Handshake Settings")
settings.certificateTypes = ["x509"]
foundType = True
if not foundType:
raise ValueError("Unrecognized certificate type")
if session:
if not session.valid():
session = None #ignore non-resumable sessions...
elif session.resumable and \
(session.srpUsername != srpUsername):
raise ValueError("Session username doesn't match")
#Add Faults to parameters
if srpUsername and self.fault == Fault.badUsername:
srpUsername += "GARBAGE"
if password and self.fault == Fault.badPassword:
password += "GARBAGE"
if sharedKeyParams:
identifier = sharedKeyParams[0]
sharedKey = sharedKeyParams[1]
if self.fault == Fault.badIdentifier:
identifier += "GARBAGE"
session = Session()._createSharedKey(identifier, sharedKey)
elif self.fault == Fault.badSharedKey:
sharedKey += "GARBAGE"
session = Session()._createSharedKey(identifier, sharedKey)
#Initialize locals
serverCertChain = None
cipherSuite = 0
certificateType = CertificateType.x509
premasterSecret = None
#Get client nonce
clientRandom = getRandomBytes(32)
#Initialize acceptable ciphersuites
cipherSuites = []
if srpParams:
cipherSuites += CipherSuite.getSrpRsaSuites(settings.cipherNames)
cipherSuites += CipherSuite.getSrpSuites(settings.cipherNames)
elif certParams:
cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames)
elif unknownParams:
if srpCallback:
cipherSuites += \
CipherSuite.getSrpRsaSuites(settings.cipherNames)
cipherSuites += \
CipherSuite.getSrpSuites(settings.cipherNames)
cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames)
elif sharedKeyParams:
cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames)
else:
cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames)
#Initialize acceptable certificate types
certificateTypes = settings._getCertificateTypes()
#Tentatively set the version to the client's minimum version.
#We'll use this for the ClientHello, and if an error occurs
#parsing the Server Hello, we'll use this version for the response
self.version = settings.maxVersion
#Either send ClientHello (with a resumable session)...
if session:
#If it's a resumable (i.e. not a shared-key session), then its
#ciphersuite must be one of the acceptable ciphersuites
if (not sharedKeyParams) and \
session.cipherSuite not in cipherSuites:
raise ValueError("Session's cipher suite not consistent "\
"with parameters")
else:
clientHello = ClientHello()
clientHello.create(settings.maxVersion, clientRandom,
session.sessionID, cipherSuites,
certificateTypes, session.srpUsername)
#Or send ClientHello (without)
else:
clientHello = ClientHello()
clientHello.create(settings.maxVersion, clientRandom,
createByteArraySequence([]), cipherSuites,
certificateTypes, srpUsername)
for result in self._sendMsg(clientHello):
yield result
#Get ServerHello (or missing_srp_username)
for result in self._getMsg((ContentType.handshake,
ContentType.alert),
HandshakeType.server_hello):
if result in (0,1):
yield result
else:
break
msg = result
if isinstance(msg, ServerHello):
serverHello = msg
elif isinstance(msg, Alert):
alert = msg
#If it's not a missing_srp_username, re-raise
if alert.description != AlertDescription.missing_srp_username:
self._shutdown(False)
raise TLSRemoteAlert(alert)
#If we're not in SRP callback mode, we won't have offered SRP
#without a username, so we shouldn't get this alert
if not srpCallback:
for result in self._sendError(\
AlertDescription.unexpected_message):
yield result
srpParams = srpCallback()
#If the callback returns None, cancel the handshake
if srpParams == None:
for result in self._sendError(AlertDescription.user_canceled):
yield result
#Recursively perform handshake
for result in self._handshakeClientAsyncHelper(srpParams,
None, None, None, None, settings, True):
yield result
return
#Get the server version. Do this before anything else, so any
#error alerts will use the server's version
self.version = serverHello.server_version
#Future responses from server must use this version
self._versionCheck = True
#Check ServerHello
if serverHello.server_version < settings.minVersion:
for result in self._sendError(\
AlertDescription.protocol_version,
"Too old version: %s" % str(serverHello.server_version)):
yield result
if serverHello.server_version > settings.maxVersion:
for result in self._sendError(\
AlertDescription.protocol_version,
"Too new version: %s" % str(serverHello.server_version)):
yield result
if serverHello.cipher_suite not in cipherSuites:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Server responded with incorrect ciphersuite"):
yield result
if serverHello.certificate_type not in certificateTypes:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Server responded with incorrect certificate type"):
yield result
if serverHello.compression_method != 0:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Server responded with incorrect compression method"):
yield result
#Get the server nonce
serverRandom = serverHello.random
#If the server agrees to resume
if session and session.sessionID and \
serverHello.session_id == session.sessionID:
#If a shared-key, we're flexible about suites; otherwise the
#server-chosen suite has to match the session's suite
if sharedKeyParams:
session.cipherSuite = serverHello.cipher_suite
elif serverHello.cipher_suite != session.cipherSuite:
for result in self._sendError(\
AlertDescription.illegal_parameter,\
"Server's ciphersuite doesn't match session"):
yield result
#Set the session for this connection
self.session = session
#Calculate pending connection states
self._calcPendingStates(clientRandom, serverRandom,
settings.cipherImplementations)
#Exchange ChangeCipherSpec and Finished messages
for result in self._getFinished():
yield result
for result in self._sendFinished():
yield result
#Mark the connection as open
self._handshakeDone(resumed=True)
#If server DOES NOT agree to resume
else:
if sharedKeyParams:
for result in self._sendError(\
AlertDescription.user_canceled,
"Was expecting a shared-key resumption"):
yield result
#We've already validated these
cipherSuite = serverHello.cipher_suite
certificateType = serverHello.certificate_type
#If the server chose an SRP suite...
if cipherSuite in CipherSuite.srpSuites:
#Get ServerKeyExchange, ServerHelloDone
for result in self._getMsg(ContentType.handshake,
HandshakeType.server_key_exchange, cipherSuite):
if result in (0,1):
yield result
else:
break
serverKeyExchange = result
for result in self._getMsg(ContentType.handshake,
HandshakeType.server_hello_done):
if result in (0,1):
yield result
else:
break
serverHelloDone = result
#If the server chose an SRP+RSA suite...
elif cipherSuite in CipherSuite.srpRsaSuites:
#Get Certificate, ServerKeyExchange, ServerHelloDone
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate, certificateType):
if result in (0,1):
yield result
else:
break
serverCertificate = result
for result in self._getMsg(ContentType.handshake,
HandshakeType.server_key_exchange, cipherSuite):
if result in (0,1):
yield result
else:
break
serverKeyExchange = result
for result in self._getMsg(ContentType.handshake,
HandshakeType.server_hello_done):
if result in (0,1):
yield result
else:
break
serverHelloDone = result
#If the server chose an RSA suite...
elif cipherSuite in CipherSuite.rsaSuites:
#Get Certificate[, CertificateRequest], ServerHelloDone
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate, certificateType):
if result in (0,1):
yield result
else:
break
serverCertificate = result
for result in self._getMsg(ContentType.handshake,
(HandshakeType.server_hello_done,
HandshakeType.certificate_request)):
if result in (0,1):
yield result
else:
break
msg = result
certificateRequest = None
if isinstance(msg, CertificateRequest):
certificateRequest = msg
for result in self._getMsg(ContentType.handshake,
HandshakeType.server_hello_done):
if result in (0,1):
yield result
else:
break
serverHelloDone = result
elif isinstance(msg, ServerHelloDone):
serverHelloDone = msg
else:
raise AssertionError()
#Calculate SRP premaster secret, if server chose an SRP or
#SRP+RSA suite
if cipherSuite in CipherSuite.srpSuites + \
CipherSuite.srpRsaSuites:
#Get and check the server's group parameters and B value
N = serverKeyExchange.srp_N
g = serverKeyExchange.srp_g
s = serverKeyExchange.srp_s
B = serverKeyExchange.srp_B
if (g,N) not in goodGroupParameters:
for result in self._sendError(\
AlertDescription.untrusted_srp_parameters,
"Unknown group parameters"):
yield result
if numBits(N) < settings.minKeySize:
for result in self._sendError(\
AlertDescription.untrusted_srp_parameters,
"N value is too small: %d" % numBits(N)):
yield result
if numBits(N) > settings.maxKeySize:
for result in self._sendError(\
AlertDescription.untrusted_srp_parameters,
"N value is too large: %d" % numBits(N)):
yield result
if B % N == 0:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Suspicious B value"):
yield result
#Check the server's signature, if server chose an
#SRP+RSA suite
if cipherSuite in CipherSuite.srpRsaSuites:
#Hash ServerKeyExchange/ServerSRPParams
hashBytes = serverKeyExchange.hash(clientRandom,
serverRandom)
#Extract signature bytes from ServerKeyExchange
sigBytes = serverKeyExchange.signature
if len(sigBytes) == 0:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Server sent an SRP ServerKeyExchange "\
"message without a signature"):
yield result
#Get server's public key from the Certificate message
for result in self._getKeyFromChain(serverCertificate,
settings):
if result in (0,1):
yield result
else:
break
publicKey, serverCertChain = result
#Verify signature
if not publicKey.verify(sigBytes, hashBytes):
for result in self._sendError(\
AlertDescription.decrypt_error,
"Signature failed to verify"):
yield result
#Calculate client's ephemeral DH values (a, A)
a = bytesToNumber(getRandomBytes(32))
A = powMod(g, a, N)
#Calculate client's static DH values (x, v)
x = makeX(bytesToString(s), srpUsername, password)
v = powMod(g, x, N)
#Calculate u
u = makeU(N, A, B)
#Calculate premaster secret
k = makeK(N, g)
S = powMod((B - (k*v)) % N, a+(u*x), N)
if self.fault == Fault.badA:
A = N
S = 0
premasterSecret = numberToBytes(S)
#Send ClientKeyExchange
for result in self._sendMsg(\
ClientKeyExchange(cipherSuite).createSRP(A)):
yield result
#Calculate RSA premaster secret, if server chose an RSA suite
elif cipherSuite in CipherSuite.rsaSuites:
#Handle the presence of a CertificateRequest
if certificateRequest:
if unknownParams and certCallback:
certParamsNew = certCallback()
if certParamsNew:
clientCertChain, privateKey = certParamsNew
#Get server's public key from the Certificate message
for result in self._getKeyFromChain(serverCertificate,
settings):
if result in (0,1):
yield result
else:
break
publicKey, serverCertChain = result
#Calculate premaster secret
premasterSecret = getRandomBytes(48)
premasterSecret[0] = settings.maxVersion[0]
premasterSecret[1] = settings.maxVersion[1]
if self.fault == Fault.badPremasterPadding:
premasterSecret[0] = 5
if self.fault == Fault.shortPremasterSecret:
premasterSecret = premasterSecret[:-1]
#Encrypt premaster secret to server's public key
encryptedPreMasterSecret = publicKey.encrypt(premasterSecret)
#If client authentication was requested, send Certificate
#message, either with certificates or empty
if certificateRequest:
clientCertificate = Certificate(certificateType)
if clientCertChain:
#Check to make sure we have the same type of
#certificates the server requested
wrongType = False
if certificateType == CertificateType.x509:
if not isinstance(clientCertChain, X509CertChain):
wrongType = True
elif certificateType == CertificateType.cryptoID:
if not isinstance(clientCertChain,
cryptoIDlib.CertChain.CertChain):
wrongType = True
if wrongType:
for result in self._sendError(\
AlertDescription.handshake_failure,
"Client certificate is of wrong type"):
yield result
clientCertificate.create(clientCertChain)
for result in self._sendMsg(clientCertificate):
yield result
else:
#The server didn't request client auth, so we
#zeroize these so the clientCertChain won't be
#stored in the session.
privateKey = None
clientCertChain = None
#Send ClientKeyExchange
clientKeyExchange = ClientKeyExchange(cipherSuite,
self.version)
clientKeyExchange.createRSA(encryptedPreMasterSecret)
for result in self._sendMsg(clientKeyExchange):
yield result
#If client authentication was requested and we have a
#private key, send CertificateVerify
if certificateRequest and privateKey:
if self.version == (3,0):
#Create a temporary session object, just for the
#purpose of creating the CertificateVerify
session = Session()
session._calcMasterSecret(self.version,
premasterSecret,
clientRandom,
serverRandom)
verifyBytes = self._calcSSLHandshakeHash(\
session.masterSecret, "")
elif self.version in ((3,1), (3,2)):
verifyBytes = stringToBytes(\
self._handshake_md5.digest() + \
self._handshake_sha.digest())
if self.fault == Fault.badVerifyMessage:
verifyBytes[0] = ((verifyBytes[0]+1) % 256)
signedBytes = privateKey.sign(verifyBytes)
certificateVerify = CertificateVerify()
certificateVerify.create(signedBytes)
for result in self._sendMsg(certificateVerify):
yield result
#Create the session object
self.session = Session()
self.session._calcMasterSecret(self.version, premasterSecret,
clientRandom, serverRandom)
self.session.sessionID = serverHello.session_id
self.session.cipherSuite = cipherSuite
self.session.srpUsername = srpUsername
self.session.clientCertChain = clientCertChain
self.session.serverCertChain = serverCertChain
#Calculate pending connection states
self._calcPendingStates(clientRandom, serverRandom,
settings.cipherImplementations)
#Exchange ChangeCipherSpec and Finished messages
for result in self._sendFinished():
yield result
for result in self._getFinished():
yield result
#Mark the connection as open
self.session._setResumable(True)
self._handshakeDone(resumed=False)
def handshakeServer(self, sharedKeyDB=None, verifierDB=None,
certChain=None, privateKey=None, reqCert=False,
sessionCache=None, settings=None, checker=None,
reqCAs=None, tlsIntolerant=False):
"""Perform a handshake in the role of server.
This function performs an SSL or TLS handshake. Depending on
the arguments and the behavior of the client, this function can
perform a shared-key, SRP, or certificate-based handshake. It
can also perform a combined SRP and server-certificate
handshake.
Like any handshake function, this can be called on a closed
TLS connection, or on a TLS connection that is already open.
If called on an open connection it performs a re-handshake.
This function does not send a Hello Request message before
performing the handshake, so if re-handshaking is required,
the server must signal the client to begin the re-handshake
through some other means.
If the function completes without raising an exception, the
TLS connection will be open and available for data transfer.
If an exception is raised, the connection will have been
automatically closed (if it was ever open).
@type sharedKeyDB: L{tlslite.SharedKeyDB.SharedKeyDB}
@param sharedKeyDB: A database of shared symmetric keys
associated with usernames. If the client performs a
shared-key handshake, the session's sharedKeyUsername
attribute will be set.
@type verifierDB: L{tlslite.VerifierDB.VerifierDB}
@param verifierDB: A database of SRP password verifiers
associated with usernames. If the client performs an SRP
handshake, the session's srpUsername attribute will be set.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: The certificate chain to be used if the
client requests server certificate authentication.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: The private key to be used if the client
requests server certificate authentication.
@type reqCert: bool
@param reqCert: Whether to request client certificate
authentication. This only applies if the client chooses server
certificate authentication; if the client chooses SRP or
shared-key authentication, this will be ignored. If the client
performs a client certificate authentication, the sessions's
clientCertChain attribute will be set.
@type sessionCache: L{tlslite.SessionCache.SessionCache}
@param sessionCache: An in-memory cache of resumable sessions.
The client can resume sessions from this cache. Alternatively,
if the client performs a full handshake, a new session will be
added to the cache.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites and SSL/TLS version chosen by the server.
@type checker: L{tlslite.Checker.Checker}
@param checker: A Checker instance. This instance will be
invoked to examine the other party's authentication
credentials, if the handshake completes succesfully.
@type reqCAs: list of L{array.array} of unsigned bytes
@param reqCAs: A collection of DER-encoded DistinguishedNames that
will be sent along with a certificate request. This does not affect
verification.
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
@raise tlslite.errors.TLSAuthenticationError: If the checker
doesn't like the other party's authentication credentials.
"""
for result in self.handshakeServerAsync(sharedKeyDB, verifierDB,
certChain, privateKey, reqCert, sessionCache, settings,
checker, reqCAs, tlsIntolerant):
pass
def handshakeServerAsync(self, sharedKeyDB=None, verifierDB=None,
certChain=None, privateKey=None, reqCert=False,
sessionCache=None, settings=None, checker=None,
reqCAs=None, tlsIntolerant=False):
"""Start a server handshake operation on the TLS connection.
This function returns a generator which behaves similarly to
handshakeServer(). Successive invocations of the generator
will return 0 if it is waiting to read from the socket, 1 if it is
waiting to write to the socket, or it will raise StopIteration
if the handshake operation is complete.
@rtype: iterable
@return: A generator; see above for details.
"""
handshaker = self._handshakeServerAsyncHelper(\
sharedKeyDB=sharedKeyDB,
verifierDB=verifierDB, certChain=certChain,
privateKey=privateKey, reqCert=reqCert,
sessionCache=sessionCache, settings=settings,
reqCAs=reqCAs,
tlsIntolerant=tlsIntolerant)
for result in self._handshakeWrapperAsync(handshaker, checker):
yield result
def _handshakeServerAsyncHelper(self, sharedKeyDB, verifierDB,
certChain, privateKey, reqCert, sessionCache,
settings, reqCAs, tlsIntolerant):
self._handshakeStart(client=False)
if (not sharedKeyDB) and (not verifierDB) and (not certChain):
raise ValueError("Caller passed no authentication credentials")
if certChain and not privateKey:
raise ValueError("Caller passed a certChain but no privateKey")
if privateKey and not certChain:
raise ValueError("Caller passed a privateKey but no certChain")
if reqCAs and not reqCert:
raise ValueError("Caller passed reqCAs but not reqCert")
if not settings:
settings = HandshakeSettings()
settings = settings._filter()
#Initialize acceptable cipher suites
cipherSuites = []
if verifierDB:
if certChain:
cipherSuites += \
CipherSuite.getSrpRsaSuites(settings.cipherNames)
cipherSuites += CipherSuite.getSrpSuites(settings.cipherNames)
if sharedKeyDB or certChain:
cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames)
#Initialize acceptable certificate type
certificateType = None
if certChain:
try:
import cryptoIDlib.CertChain
if isinstance(certChain, cryptoIDlib.CertChain.CertChain):
certificateType = CertificateType.cryptoID
except ImportError:
pass
if isinstance(certChain, X509CertChain):
certificateType = CertificateType.x509
if certificateType == None:
raise ValueError("Unrecognized certificate type")
#Initialize locals
clientCertChain = None
serverCertChain = None #We may set certChain to this later
postFinishedError = None
#Tentatively set version to most-desirable version, so if an error
#occurs parsing the ClientHello, this is what we'll use for the
#error alert
self.version = settings.maxVersion
#Get ClientHello
for result in self._getMsg(ContentType.handshake,
HandshakeType.client_hello):
if result in (0,1):
yield result
else:
break
clientHello = result
#If client's version is too low, reject it
if clientHello.client_version < settings.minVersion:
self.version = settings.minVersion
for result in self._sendError(\
AlertDescription.protocol_version,
"Too old version: %s" % str(clientHello.client_version)):
yield result
if tlsIntolerant and clientHello.client_version > (3, 0):
for result in self._sendError(\
AlertDescription.handshake_failure):
yield result
#If client's version is too high, propose my highest version
elif clientHello.client_version > settings.maxVersion:
self.version = settings.maxVersion
else:
#Set the version to the client's version
self.version = clientHello.client_version
#Get the client nonce; create server nonce
clientRandom = clientHello.random
serverRandom = getRandomBytes(32)
#Calculate the first cipher suite intersection.
#This is the 'privileged' ciphersuite. We'll use it if we're
#doing a shared-key resumption or a new negotiation. In fact,
#the only time we won't use it is if we're resuming a non-sharedkey
#session, in which case we use the ciphersuite from the session.
#
#Given the current ciphersuite ordering, this means we prefer SRP
#over non-SRP.
for cipherSuite in cipherSuites:
if cipherSuite in clientHello.cipher_suites:
break
else:
for result in self._sendError(\
AlertDescription.handshake_failure):
yield result
#If resumption was requested...
if clientHello.session_id and (sharedKeyDB or sessionCache):
session = None
#Check in the sharedKeys container
if sharedKeyDB and len(clientHello.session_id)==16:
try:
#Trim off zero padding, if any
for x in range(16):
if clientHello.session_id[x]==0:
break
self.allegedSharedKeyUsername = bytesToString(\
clientHello.session_id[:x])
session = sharedKeyDB[self.allegedSharedKeyUsername]
if not session.sharedKey:
raise AssertionError()
#use privileged ciphersuite
session.cipherSuite = cipherSuite
except KeyError:
pass
#Then check in the session cache
if sessionCache and not session:
try:
session = sessionCache[bytesToString(\
clientHello.session_id)]
if session.sharedKey:
raise AssertionError()
if not session.resumable:
raise AssertionError()
#Check for consistency with ClientHello
if session.cipherSuite not in cipherSuites:
for result in self._sendError(\
AlertDescription.handshake_failure):
yield result
if session.cipherSuite not in clientHello.cipher_suites:
for result in self._sendError(\
AlertDescription.handshake_failure):
yield result
if clientHello.srp_username:
if clientHello.srp_username != session.srpUsername:
for result in self._sendError(\
AlertDescription.handshake_failure):
yield result
except KeyError:
pass
#If a session is found..
if session:
#Set the session
self.session = session
#Send ServerHello
serverHello = ServerHello()
serverHello.create(self.version, serverRandom,
session.sessionID, session.cipherSuite,
certificateType)
for result in self._sendMsg(serverHello):
yield result
#From here on, the client's messages must have the right version
self._versionCheck = True
#Calculate pending connection states
self._calcPendingStates(clientRandom, serverRandom,
settings.cipherImplementations)
#Exchange ChangeCipherSpec and Finished messages
for result in self._sendFinished():
yield result
for result in self._getFinished():
yield result
#Mark the connection as open
self._handshakeDone(resumed=True)
return
#If not a resumption...
#TRICKY: we might have chosen an RSA suite that was only deemed
#acceptable because of the shared-key resumption. If the shared-
#key resumption failed, because the identifier wasn't recognized,
#we might fall through to here, where we have an RSA suite
#chosen, but no certificate.
if cipherSuite in CipherSuite.rsaSuites and not certChain:
for result in self._sendError(\
AlertDescription.handshake_failure):
yield result
#If an RSA suite is chosen, check for certificate type intersection
#(We do this check down here because if the mismatch occurs but the
# client is using a shared-key session, it's okay)
if cipherSuite in CipherSuite.rsaSuites + \
CipherSuite.srpRsaSuites:
if certificateType not in clientHello.certificate_types:
for result in self._sendError(\
AlertDescription.handshake_failure,
"the client doesn't support my certificate type"):
yield result
#Move certChain -> serverCertChain, now that we're using it
serverCertChain = certChain
#Create sessionID
if sessionCache:
sessionID = getRandomBytes(32)
else:
sessionID = createByteArraySequence([])
#If we've selected an SRP suite, exchange keys and calculate
#premaster secret:
if cipherSuite in CipherSuite.srpSuites + CipherSuite.srpRsaSuites:
#If there's no SRP username...
if not clientHello.srp_username:
#Ask the client to re-send ClientHello with one
for result in self._sendMsg(Alert().create(\
AlertDescription.missing_srp_username,
AlertLevel.warning)):
yield result
#Get ClientHello
for result in self._getMsg(ContentType.handshake,
HandshakeType.client_hello):
if result in (0,1):
yield result
else:
break
clientHello = result
#Check ClientHello
#If client's version is too low, reject it (COPIED CODE; BAD!)
if clientHello.client_version < settings.minVersion:
self.version = settings.minVersion
for result in self._sendError(\
AlertDescription.protocol_version,
"Too old version: %s" % str(clientHello.client_version)):
yield result
#If client's version is too high, propose my highest version
elif clientHello.client_version > settings.maxVersion:
self.version = settings.maxVersion
else:
#Set the version to the client's version
self.version = clientHello.client_version
#Recalculate the privileged cipher suite, making sure to
#pick an SRP suite
cipherSuites = [c for c in cipherSuites if c in \
CipherSuite.srpSuites + \
CipherSuite.srpRsaSuites]
for cipherSuite in cipherSuites:
if cipherSuite in clientHello.cipher_suites:
break
else:
for result in self._sendError(\
AlertDescription.handshake_failure):
yield result
#Get the client nonce; create server nonce
clientRandom = clientHello.random
serverRandom = getRandomBytes(32)
#The username better be there, this time
if not clientHello.srp_username:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Client resent a hello, but without the SRP"\
" username"):
yield result
#Get username
self.allegedSrpUsername = clientHello.srp_username
#Get parameters from username
try:
entry = verifierDB[self.allegedSrpUsername]
except KeyError:
for result in self._sendError(\
AlertDescription.unknown_srp_username):
yield result
(N, g, s, v) = entry
#Calculate server's ephemeral DH values (b, B)
b = bytesToNumber(getRandomBytes(32))
k = makeK(N, g)
B = (powMod(g, b, N) + (k*v)) % N
#Create ServerKeyExchange, signing it if necessary
serverKeyExchange = ServerKeyExchange(cipherSuite)
serverKeyExchange.createSRP(N, g, stringToBytes(s), B)
if cipherSuite in CipherSuite.srpRsaSuites:
hashBytes = serverKeyExchange.hash(clientRandom,
serverRandom)
serverKeyExchange.signature = privateKey.sign(hashBytes)
#Send ServerHello[, Certificate], ServerKeyExchange,
#ServerHelloDone
msgs = []
serverHello = ServerHello()
serverHello.create(self.version, serverRandom, sessionID,
cipherSuite, certificateType)
msgs.append(serverHello)
if cipherSuite in CipherSuite.srpRsaSuites:
certificateMsg = Certificate(certificateType)
certificateMsg.create(serverCertChain)
msgs.append(certificateMsg)
msgs.append(serverKeyExchange)
msgs.append(ServerHelloDone())
for result in self._sendMsgs(msgs):
yield result
#From here on, the client's messages must have the right version
self._versionCheck = True
#Get and check ClientKeyExchange
for result in self._getMsg(ContentType.handshake,
HandshakeType.client_key_exchange,
cipherSuite):
if result in (0,1):
yield result
else:
break
clientKeyExchange = result
A = clientKeyExchange.srp_A
if A % N == 0:
postFinishedError = (AlertDescription.illegal_parameter,
"Suspicious A value")
#Calculate u
u = makeU(N, A, B)
#Calculate premaster secret
S = powMod((A * powMod(v,u,N)) % N, b, N)
premasterSecret = numberToBytes(S)
#If we've selected an RSA suite, exchange keys and calculate
#premaster secret:
elif cipherSuite in CipherSuite.rsaSuites:
#Send ServerHello, Certificate[, CertificateRequest],
#ServerHelloDone
msgs = []
msgs.append(ServerHello().create(self.version, serverRandom,
sessionID, cipherSuite, certificateType))
msgs.append(Certificate(certificateType).create(serverCertChain))
if reqCert and reqCAs:
msgs.append(CertificateRequest().create([], reqCAs))
elif reqCert:
msgs.append(CertificateRequest())
msgs.append(ServerHelloDone())
for result in self._sendMsgs(msgs):
yield result
#From here on, the client's messages must have the right version
self._versionCheck = True
#Get [Certificate,] (if was requested)
if reqCert:
if self.version == (3,0):
for result in self._getMsg((ContentType.handshake,
ContentType.alert),
HandshakeType.certificate,
certificateType):
if result in (0,1):
yield result
else:
break
msg = result
if isinstance(msg, Alert):
#If it's not a no_certificate alert, re-raise
alert = msg
if alert.description != \
AlertDescription.no_certificate:
self._shutdown(False)
raise TLSRemoteAlert(alert)
elif isinstance(msg, Certificate):
clientCertificate = msg
if clientCertificate.certChain and \
clientCertificate.certChain.getNumCerts()!=0:
clientCertChain = clientCertificate.certChain
else:
raise AssertionError()
elif self.version in ((3,1), (3,2)):
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate,
certificateType):
if result in (0,1):
yield result
else:
break
clientCertificate = result
if clientCertificate.certChain and \
clientCertificate.certChain.getNumCerts()!=0:
clientCertChain = clientCertificate.certChain
else:
raise AssertionError()
#Get ClientKeyExchange
for result in self._getMsg(ContentType.handshake,
HandshakeType.client_key_exchange,
cipherSuite):
if result in (0,1):
yield result
else:
break
clientKeyExchange = result
#Decrypt ClientKeyExchange
premasterSecret = privateKey.decrypt(\
clientKeyExchange.encryptedPreMasterSecret)
randomPreMasterSecret = getRandomBytes(48)
versionCheck = (premasterSecret[0], premasterSecret[1])
if not premasterSecret:
premasterSecret = randomPreMasterSecret
elif len(premasterSecret)!=48:
premasterSecret = randomPreMasterSecret
elif versionCheck != clientHello.client_version:
if versionCheck != self.version: #Tolerate buggy IE clients
premasterSecret = randomPreMasterSecret
#Get and check CertificateVerify, if relevant
if clientCertChain:
if self.version == (3,0):
#Create a temporary session object, just for the purpose
#of checking the CertificateVerify
session = Session()
session._calcMasterSecret(self.version, premasterSecret,
clientRandom, serverRandom)
verifyBytes = self._calcSSLHandshakeHash(\
session.masterSecret, "")
elif self.version in ((3,1), (3,2)):
verifyBytes = stringToBytes(self._handshake_md5.digest() +\
self._handshake_sha.digest())
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate_verify):
if result in (0,1):
yield result
else:
break
certificateVerify = result
publicKey = clientCertChain.getEndEntityPublicKey()
if len(publicKey) < settings.minKeySize:
postFinishedError = (AlertDescription.handshake_failure,
"Client's public key too small: %d" % len(publicKey))
if len(publicKey) > settings.maxKeySize:
postFinishedError = (AlertDescription.handshake_failure,
"Client's public key too large: %d" % len(publicKey))
if not publicKey.verify(certificateVerify.signature,
verifyBytes):
postFinishedError = (AlertDescription.decrypt_error,
"Signature failed to verify")
#Create the session object
self.session = Session()
self.session._calcMasterSecret(self.version, premasterSecret,
clientRandom, serverRandom)
self.session.sessionID = sessionID
self.session.cipherSuite = cipherSuite
self.session.srpUsername = self.allegedSrpUsername
self.session.clientCertChain = clientCertChain
self.session.serverCertChain = serverCertChain
#Calculate pending connection states
self._calcPendingStates(clientRandom, serverRandom,
settings.cipherImplementations)
#Exchange ChangeCipherSpec and Finished messages
for result in self._getFinished():
yield result
#If we were holding a post-finished error until receiving the client
#finished message, send it now. We delay the call until this point
#because calling sendError() throws an exception, and our caller might
#shut down the socket upon receiving the exception. If he did, and the
#client was still sending its ChangeCipherSpec or Finished messages, it
#would cause a socket error on the client side. This is a lot of
#consideration to show to misbehaving clients, but this would also
#cause problems with fault-testing.
if postFinishedError:
for result in self._sendError(*postFinishedError):
yield result
for result in self._sendFinished():
yield result
#Add the session object to the session cache
if sessionCache and sessionID:
sessionCache[bytesToString(sessionID)] = self.session
#Mark the connection as open
self.session._setResumable(True)
self._handshakeDone(resumed=False)
def _handshakeWrapperAsync(self, handshaker, checker):
if not self.fault:
try:
for result in handshaker:
yield result
if checker:
try:
checker(self)
except TLSAuthenticationError:
alert = Alert().create(AlertDescription.close_notify,
AlertLevel.fatal)
for result in self._sendMsg(alert):
yield result
raise
except:
self._shutdown(False)
raise
else:
try:
for result in handshaker:
yield result
if checker:
try:
checker(self)
except TLSAuthenticationError:
alert = Alert().create(AlertDescription.close_notify,
AlertLevel.fatal)
for result in self._sendMsg(alert):
yield result
raise
except socket.error, e:
raise TLSFaultError("socket error!")
except TLSAbruptCloseError, e:
raise TLSFaultError("abrupt close error!")
except TLSAlert, alert:
if alert.description not in Fault.faultAlerts[self.fault]:
raise TLSFaultError(str(alert))
else:
pass
except:
self._shutdown(False)
raise
else:
raise TLSFaultError("No error!")
def _getKeyFromChain(self, certificate, settings):
#Get and check cert chain from the Certificate message
certChain = certificate.certChain
if not certChain or certChain.getNumCerts() == 0:
for result in self._sendError(AlertDescription.illegal_parameter,
"Other party sent a Certificate message without "\
"certificates"):
yield result
#Get and check public key from the cert chain
publicKey = certChain.getEndEntityPublicKey()
if len(publicKey) < settings.minKeySize:
for result in self._sendError(AlertDescription.handshake_failure,
"Other party's public key too small: %d" % len(publicKey)):
yield result
if len(publicKey) > settings.maxKeySize:
for result in self._sendError(AlertDescription.handshake_failure,
"Other party's public key too large: %d" % len(publicKey)):
yield result
yield publicKey, certChain
| bsd-3-clause |
tensorflow/examples | tensorflow_examples/lite/model_maker/demo/image_classification_demo_test.py | 1 | 2699 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from unittest.mock import patch
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core import test_util
from tensorflow_examples.lite.model_maker.demo import image_classification_demo
from tflite_model_maker import image_classifier
from_folder_fn = image_classifier.DataLoader.from_folder
def patch_data_loader():
"""Patch to train partial dataset rather than all of them."""
def side_effect(*args, **kwargs):
tf.compat.v1.logging.info('Train on partial dataset')
data_loader = from_folder_fn(*args, **kwargs)
if len(data_loader) > 10: # Trim dataset to at most 10.
data_loader._size = 10
# TODO(b/171449557): Change this once the dataset is lazily loaded.
data_loader._dataset = data_loader._dataset.take(10)
return data_loader
return patch.object(
image_classifier.DataLoader, 'from_folder', side_effect=side_effect)
class ImageClassificationDemoTest(tf.test.TestCase):
def test_image_classification_demo(self):
with patch_data_loader():
with tempfile.TemporaryDirectory() as temp_dir:
# Use cached training data if exists.
data_dir = image_classification_demo.download_demo_data(
cache_dir=test_util.get_cache_dir(temp_dir, 'flower_photos.tgz'),
file_hash='6f87fb78e9cc9ab41eff2015b380011d')
tflite_filename = os.path.join(temp_dir, 'model.tflite')
label_filename = os.path.join(temp_dir, 'labels.txt')
image_classification_demo.run(
data_dir,
temp_dir,
spec='efficientnet_lite0',
epochs=1,
batch_size=1)
self.assertTrue(tf.io.gfile.exists(tflite_filename))
self.assertGreater(os.path.getsize(tflite_filename), 0)
self.assertFalse(tf.io.gfile.exists(label_filename))
if __name__ == '__main__':
# Load compressed models from tensorflow_hub
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'
tf.test.main()
| apache-2.0 |
takis/odoo | openerp/report/report_sxw.py | 217 | 27364 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import StringIO
import cStringIO
import base64
from datetime import datetime
import os
import re
import time
from interface import report_rml
import preprocess
import logging
import openerp.tools as tools
import zipfile
import common
import openerp
from openerp import SUPERUSER_ID
from openerp.osv.fields import float as float_field, function as function_field, datetime as datetime_field
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.safe_eval import safe_eval as eval
_logger = logging.getLogger(__name__)
rml_parents = {
'tr':1,
'li':1,
'story': 0,
'section': 0
}
rml_tag="para"
sxw_parents = {
'table-row': 1,
'list-item': 1,
'body': 0,
'section': 0,
}
html_parents = {
'tr' : 1,
'body' : 0,
'div' : 0
}
sxw_tag = "p"
rml2sxw = {
'para': 'p',
}
def get_date_length(date_format=DEFAULT_SERVER_DATE_FORMAT):
return len((datetime.now()).strftime(date_format))
class rml_parse(object):
def __init__(self, cr, uid, name, parents=rml_parents, tag=rml_tag, context=None):
if not context:
context={}
self.cr = cr
self.uid = uid
self.pool = openerp.registry(cr.dbname)
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
self.localcontext = {
'user': user,
'setCompany': self.setCompany,
'repeatIn': self.repeatIn,
'setLang': self.setLang,
'setTag': self.setTag,
'removeParentNode': self.removeParentNode,
'format': self.format,
'formatLang': self.formatLang,
'lang' : user.company_id.partner_id.lang,
'translate' : self._translate,
'setHtmlImage' : self.set_html_image,
'strip_name' : self._strip_name,
'time' : time,
'display_address': self.display_address,
# more context members are setup in setCompany() below:
# - company_id
# - logo
}
self.setCompany(user.company_id)
self.localcontext.update(context)
self.name = name
self._node = None
self.parents = parents
self.tag = tag
self._lang_cache = {}
self.lang_dict = {}
self.default_lang = {}
self.lang_dict_called = False
self._transl_regex = re.compile('(\[\[.+?\]\])')
def setTag(self, oldtag, newtag, attrs=None):
return newtag, attrs
def _ellipsis(self, char, size=100, truncation_str='...'):
if not char:
return ''
if len(char) <= size:
return char
return char[:size-len(truncation_str)] + truncation_str
def setCompany(self, company_id):
if company_id:
self.localcontext['company'] = company_id
self.localcontext['logo'] = company_id.logo
self.rml_header = company_id.rml_header
self.rml_header2 = company_id.rml_header2
self.rml_header3 = company_id.rml_header3
self.logo = company_id.logo
def _strip_name(self, name, maxlen=50):
return self._ellipsis(name, maxlen)
def format(self, text, oldtag=None):
return text.strip()
def removeParentNode(self, tag=None):
raise GeneratorExit('Skip')
def set_html_image(self,id,model=None,field=None,context=None):
if not id :
return ''
if not model:
model = 'ir.attachment'
try :
id = int(id)
res = self.pool[model].read(self.cr,self.uid,id)
if field :
return res[field]
elif model =='ir.attachment' :
return res['datas']
else :
return ''
except Exception:
return ''
def setLang(self, lang):
self.localcontext['lang'] = lang
self.lang_dict_called = False
# re-evaluate self.objects in a different environment
env = self.objects.env(self.cr, self.uid, self.localcontext)
self.objects = self.objects.with_env(env)
def _get_lang_dict(self):
pool_lang = self.pool['res.lang']
lang = self.localcontext.get('lang', 'en_US') or 'en_US'
lang_ids = pool_lang.search(self.cr,self.uid,[('code','=',lang)])
if not lang_ids:
lang_ids = pool_lang.search(self.cr,self.uid,[('code','=','en_US')])
lang_obj = pool_lang.browse(self.cr,self.uid,lang_ids[0])
self.lang_dict.update({'lang_obj':lang_obj,'date_format':lang_obj.date_format,'time_format':lang_obj.time_format})
self.default_lang[lang] = self.lang_dict.copy()
return True
def digits_fmt(self, obj=None, f=None, dp=None):
digits = self.get_digits(obj, f, dp)
return "%%.%df" % (digits, )
def get_digits(self, obj=None, f=None, dp=None):
d = DEFAULT_DIGITS = 2
if dp:
decimal_precision_obj = self.pool['decimal.precision']
d = decimal_precision_obj.precision_get(self.cr, self.uid, dp)
elif obj and f:
res_digits = getattr(obj._columns[f], 'digits', lambda x: ((16, DEFAULT_DIGITS)))
if isinstance(res_digits, tuple):
d = res_digits[1]
else:
d = res_digits(self.cr)[1]
elif (hasattr(obj, '_field') and\
isinstance(obj._field, (float_field, function_field)) and\
obj._field.digits):
d = obj._field.digits[1]
if not d and d is not 0:
d = DEFAULT_DIGITS
return d
def formatLang(self, value, digits=None, date=False, date_time=False, grouping=True, monetary=False, dp=False, currency_obj=False):
"""
Assuming 'Account' decimal.precision=3:
formatLang(value) -> digits=2 (default)
formatLang(value, digits=4) -> digits=4
formatLang(value, dp='Account') -> digits=3
formatLang(value, digits=5, dp='Account') -> digits=5
"""
if digits is None:
if dp:
digits = self.get_digits(dp=dp)
else:
digits = self.get_digits(value)
if isinstance(value, (str, unicode)) and not value:
return ''
if not self.lang_dict_called:
self._get_lang_dict()
self.lang_dict_called = True
if date or date_time:
if not value:
return ''
date_format = self.lang_dict['date_format']
parse_format = DEFAULT_SERVER_DATE_FORMAT
if date_time:
value = value.split('.')[0]
date_format = date_format + " " + self.lang_dict['time_format']
parse_format = DEFAULT_SERVER_DATETIME_FORMAT
if isinstance(value, basestring):
# FIXME: the trimming is probably unreliable if format includes day/month names
# and those would need to be translated anyway.
date = datetime.strptime(value[:get_date_length(parse_format)], parse_format)
elif isinstance(value, time.struct_time):
date = datetime(*value[:6])
else:
date = datetime(*value.timetuple()[:6])
if date_time:
# Convert datetime values to the expected client/context timezone
date = datetime_field.context_timestamp(self.cr, self.uid,
timestamp=date,
context=self.localcontext)
return date.strftime(date_format.encode('utf-8'))
res = self.lang_dict['lang_obj'].format('%.' + str(digits) + 'f', value, grouping=grouping, monetary=monetary)
if currency_obj:
if currency_obj.position == 'after':
res = u'%s\N{NO-BREAK SPACE}%s' % (res, currency_obj.symbol)
elif currency_obj and currency_obj.position == 'before':
res = u'%s\N{NO-BREAK SPACE}%s' % (currency_obj.symbol, res)
return res
def display_address(self, address_record, without_company=False):
# FIXME handle `without_company`
return address_record.contact_address
def repeatIn(self, lst, name,nodes_parent=False):
ret_lst = []
for id in lst:
ret_lst.append({name:id})
return ret_lst
def _translate(self,text):
lang = self.localcontext['lang']
if lang and text and not text.isspace():
transl_obj = self.pool['ir.translation']
piece_list = self._transl_regex.split(text)
for pn in range(len(piece_list)):
if not self._transl_regex.match(piece_list[pn]):
source_string = piece_list[pn].replace('\n', ' ').strip()
if len(source_string):
translated_string = transl_obj._get_source(self.cr, self.uid, self.name, ('report', 'rml'), lang, source_string)
if translated_string:
piece_list[pn] = piece_list[pn].replace(source_string, translated_string)
text = ''.join(piece_list)
return text
def _add_header(self, rml_dom, header='external'):
if header=='internal':
rml_head = self.rml_header2
elif header=='internal landscape':
rml_head = self.rml_header3
else:
rml_head = self.rml_header
head_dom = etree.XML(rml_head)
for tag in head_dom:
found = rml_dom.find('.//'+tag.tag)
if found is not None and len(found):
if tag.get('position'):
found.append(tag)
else :
found.getparent().replace(found,tag)
return True
def set_context(self, objects, data, ids, report_type = None):
self.localcontext['data'] = data
self.localcontext['objects'] = objects
self.localcontext['digits_fmt'] = self.digits_fmt
self.localcontext['get_digits'] = self.get_digits
self.datas = data
self.ids = ids
self.objects = objects
if report_type:
if report_type=='odt' :
self.localcontext.update({'name_space' :common.odt_namespace})
else:
self.localcontext.update({'name_space' :common.sxw_namespace})
# WARNING: the object[0].exists() call below is slow but necessary because
# some broken reporting wizards pass incorrect IDs (e.g. ir.ui.menu ids)
if objects and len(objects) == 1 and \
objects[0].exists() and 'company_id' in objects[0] and objects[0].company_id:
# When we print only one record, we can auto-set the correct
# company in the localcontext. For other cases the report
# will have to call setCompany() inside the main repeatIn loop.
self.setCompany(objects[0].company_id)
class report_sxw(report_rml, preprocess.report):
"""
The register=True kwarg has been added to help remove the
openerp.netsvc.LocalService() indirection and the related
openerp.report.interface.report_int._reports dictionary:
report_sxw registered in XML with auto=False are also registered in Python.
In that case, they are registered in the above dictionary. Since
registration is automatically done upon instanciation, and that
instanciation is needed before rendering, a way was needed to
instanciate-without-register a report. In the future, no report
should be registered in the above dictionary and it will be dropped.
"""
def __init__(self, name, table, rml=False, parser=rml_parse, header='external', store=False, register=True):
report_rml.__init__(self, name, table, rml, '', register=register)
self.name = name
self.parser = parser
self.header = header
self.store = store
self.internal_header=False
if header=='internal' or header=='internal landscape':
self.internal_header=True
def getObjects(self, cr, uid, ids, context):
table_obj = openerp.registry(cr.dbname)[self.table]
return table_obj.browse(cr, uid, ids, context=context)
def create(self, cr, uid, ids, data, context=None):
context = dict(context or {})
if self.internal_header:
context.update(internal_header=self.internal_header)
# skip osv.fields.sanitize_binary_value() because we want the raw bytes in all cases
context.update(bin_raw=True)
registry = openerp.registry(cr.dbname)
ir_obj = registry['ir.actions.report.xml']
registry['res.font'].font_scan(cr, SUPERUSER_ID, lazy=True, context=context)
report_xml_ids = ir_obj.search(cr, uid,
[('report_name', '=', self.name[7:])], context=context)
if report_xml_ids:
report_xml = ir_obj.browse(cr, uid, report_xml_ids[0], context=context)
else:
title = ''
report_file = tools.file_open(self.tmpl, subdir=None)
try:
rml = report_file.read()
report_type= data.get('report_type', 'pdf')
class a(object):
def __init__(self, *args, **argv):
for key,arg in argv.items():
setattr(self, key, arg)
report_xml = a(title=title, report_type=report_type, report_rml_content=rml, name=title, attachment=False, header=self.header)
finally:
report_file.close()
# We add an attribute on the ir.actions.report.xml instance.
# This attribute 'use_global_header' will be used by
# the create_single_XXX function of the report engine.
# This change has been done to avoid a big change of the API.
setattr(report_xml, 'use_global_header', self.header if report_xml.header else False)
report_type = report_xml.report_type
if report_type in ['sxw','odt']:
fnct = self.create_source_odt
elif report_type in ['pdf','raw','txt','html']:
fnct = self.create_source_pdf
elif report_type=='html2html':
fnct = self.create_source_html2html
elif report_type=='mako2html':
fnct = self.create_source_mako2html
else:
raise NotImplementedError(_('Unknown report type: %s') % report_type)
fnct_ret = fnct(cr, uid, ids, data, report_xml, context)
if not fnct_ret:
return False, False
return fnct_ret
def create_source_odt(self, cr, uid, ids, data, report_xml, context=None):
return self.create_single_odt(cr, uid, ids, data, report_xml, context or {})
def create_source_html2html(self, cr, uid, ids, data, report_xml, context=None):
return self.create_single_html2html(cr, uid, ids, data, report_xml, context or {})
def create_source_mako2html(self, cr, uid, ids, data, report_xml, context=None):
return self.create_single_mako2html(cr, uid, ids, data, report_xml, context or {})
def create_source_pdf(self, cr, uid, ids, data, report_xml, context=None):
if not context:
context={}
registry = openerp.registry(cr.dbname)
attach = report_xml.attachment
if attach:
objs = self.getObjects(cr, uid, ids, context)
results = []
for obj in objs:
aname = eval(attach, {'object':obj, 'time':time})
result = False
if report_xml.attachment_use and aname and context.get('attachment_use', True):
aids = registry['ir.attachment'].search(cr, uid, [('datas_fname','=',aname+'.pdf'),('res_model','=',self.table),('res_id','=',obj.id)])
if aids:
brow_rec = registry['ir.attachment'].browse(cr, uid, aids[0])
if not brow_rec.datas:
continue
d = base64.decodestring(brow_rec.datas)
results.append((d,'pdf'))
continue
result = self.create_single_pdf(cr, uid, [obj.id], data, report_xml, context)
if not result:
return False
if aname:
try:
name = aname+'.'+result[1]
# Remove the default_type entry from the context: this
# is for instance used on the account.account_invoices
# and is thus not intended for the ir.attachment type
# field.
ctx = dict(context)
ctx.pop('default_type', None)
registry['ir.attachment'].create(cr, uid, {
'name': aname,
'datas': base64.encodestring(result[0]),
'datas_fname': name,
'res_model': self.table,
'res_id': obj.id,
}, context=ctx
)
except Exception:
#TODO: should probably raise a proper osv_except instead, shouldn't we? see LP bug #325632
_logger.error('Could not create saved report attachment', exc_info=True)
results.append(result)
if results:
if results[0][1]=='pdf':
from pyPdf import PdfFileWriter, PdfFileReader
output = PdfFileWriter()
for r in results:
reader = PdfFileReader(cStringIO.StringIO(r[0]))
for page in range(reader.getNumPages()):
output.addPage(reader.getPage(page))
s = cStringIO.StringIO()
output.write(s)
return s.getvalue(), results[0][1]
return self.create_single_pdf(cr, uid, ids, data, report_xml, context)
def create_single_pdf(self, cr, uid, ids, data, report_xml, context=None):
if not context:
context={}
logo = None
context = context.copy()
title = report_xml.name
rml = report_xml.report_rml_content
# if no rml file is found
if not rml:
return False
rml_parser = self.parser(cr, uid, self.name2, context=context)
objs = self.getObjects(cr, uid, ids, context)
rml_parser.set_context(objs, data, ids, report_xml.report_type)
processed_rml = etree.XML(rml)
if report_xml.use_global_header:
rml_parser._add_header(processed_rml, self.header)
processed_rml = self.preprocess_rml(processed_rml,report_xml.report_type)
if rml_parser.logo:
logo = base64.decodestring(rml_parser.logo)
create_doc = self.generators[report_xml.report_type]
pdf = create_doc(etree.tostring(processed_rml),rml_parser.localcontext,logo,title.encode('utf8'))
return pdf, report_xml.report_type
def create_single_odt(self, cr, uid, ids, data, report_xml, context=None):
context = dict(context or {})
context['parents'] = sxw_parents
report_type = report_xml.report_type
binary_report_content = report_xml.report_sxw_content
if isinstance(report_xml.report_sxw_content, unicode):
# if binary content was passed as unicode, we must
# re-encode it as a 8-bit string using the pass-through
# 'latin1' encoding, to restore the original byte values.
# See also osv.fields.sanitize_binary_value()
binary_report_content = report_xml.report_sxw_content.encode("latin1")
sxw_io = StringIO.StringIO(binary_report_content)
sxw_z = zipfile.ZipFile(sxw_io, mode='r')
rml = sxw_z.read('content.xml')
meta = sxw_z.read('meta.xml')
mime_type = sxw_z.read('mimetype')
if mime_type == 'application/vnd.sun.xml.writer':
mime_type = 'sxw'
else :
mime_type = 'odt'
sxw_z.close()
rml_parser = self.parser(cr, uid, self.name2, context=context)
rml_parser.parents = sxw_parents
rml_parser.tag = sxw_tag
objs = self.getObjects(cr, uid, ids, context)
rml_parser.set_context(objs, data, ids, mime_type)
rml_dom_meta = node = etree.XML(meta)
elements = node.findall(rml_parser.localcontext['name_space']["meta"]+"user-defined")
for pe in elements:
if pe.get(rml_parser.localcontext['name_space']["meta"]+"name"):
if pe.get(rml_parser.localcontext['name_space']["meta"]+"name") == "Info 3":
pe[0].text=data['id']
if pe.get(rml_parser.localcontext['name_space']["meta"]+"name") == "Info 4":
pe[0].text=data['model']
meta = etree.tostring(rml_dom_meta, encoding='utf-8',
xml_declaration=True)
rml_dom = etree.XML(rml)
elements = []
key1 = rml_parser.localcontext['name_space']["text"]+"p"
key2 = rml_parser.localcontext['name_space']["text"]+"drop-down"
for n in rml_dom.iterdescendants():
if n.tag == key1:
elements.append(n)
if mime_type == 'odt':
for pe in elements:
e = pe.findall(key2)
for de in e:
pp=de.getparent()
if de.text or de.tail:
pe.text = de.text or de.tail
for cnd in de:
if cnd.text or cnd.tail:
if pe.text:
pe.text += cnd.text or cnd.tail
else:
pe.text = cnd.text or cnd.tail
pp.remove(de)
else:
for pe in elements:
e = pe.findall(key2)
for de in e:
pp = de.getparent()
if de.text or de.tail:
pe.text = de.text or de.tail
for cnd in de:
text = cnd.get("{http://openoffice.org/2000/text}value",False)
if text:
if pe.text and text.startswith('[['):
pe.text += text
elif text.startswith('[['):
pe.text = text
if de.getparent():
pp.remove(de)
rml_dom = self.preprocess_rml(rml_dom, mime_type)
create_doc = self.generators[mime_type]
odt = etree.tostring(create_doc(rml_dom, rml_parser.localcontext),
encoding='utf-8', xml_declaration=True)
sxw_contents = {'content.xml':odt, 'meta.xml':meta}
if report_xml.use_global_header:
#Add corporate header/footer
rml_file = tools.file_open(os.path.join('base', 'report', 'corporate_%s_header.xml' % report_type))
try:
rml = rml_file.read()
rml_parser = self.parser(cr, uid, self.name2, context=context)
rml_parser.parents = sxw_parents
rml_parser.tag = sxw_tag
objs = self.getObjects(cr, uid, ids, context)
rml_parser.set_context(objs, data, ids, report_xml.report_type)
rml_dom = self.preprocess_rml(etree.XML(rml),report_type)
create_doc = self.generators[report_type]
odt = create_doc(rml_dom,rml_parser.localcontext)
if report_xml.use_global_header:
rml_parser._add_header(odt)
odt = etree.tostring(odt, encoding='utf-8',
xml_declaration=True)
sxw_contents['styles.xml'] = odt
finally:
rml_file.close()
#created empty zip writing sxw contents to avoid duplication
sxw_out = StringIO.StringIO()
sxw_out_zip = zipfile.ZipFile(sxw_out, mode='w')
sxw_template_zip = zipfile.ZipFile (sxw_io, 'r')
for item in sxw_template_zip.infolist():
if item.filename not in sxw_contents:
buffer = sxw_template_zip.read(item.filename)
sxw_out_zip.writestr(item.filename, buffer)
for item_filename, buffer in sxw_contents.iteritems():
sxw_out_zip.writestr(item_filename, buffer)
sxw_template_zip.close()
sxw_out_zip.close()
final_op = sxw_out.getvalue()
sxw_io.close()
sxw_out.close()
return final_op, mime_type
def create_single_html2html(self, cr, uid, ids, data, report_xml, context=None):
context = dict(context or {})
context['parents'] = html_parents
report_type = 'html'
html = report_xml.report_rml_content
html_parser = self.parser(cr, uid, self.name2, context=context)
html_parser.parents = html_parents
html_parser.tag = sxw_tag
objs = self.getObjects(cr, uid, ids, context)
html_parser.set_context(objs, data, ids, report_type)
html_dom = etree.HTML(html)
html_dom = self.preprocess_rml(html_dom,'html2html')
create_doc = self.generators['html2html']
html = etree.tostring(create_doc(html_dom, html_parser.localcontext))
return html.replace('&','&').replace('<', '<').replace('>', '>').replace('</br>',''), report_type
def create_single_mako2html(self, cr, uid, ids, data, report_xml, context=None):
mako_html = report_xml.report_rml_content
html_parser = self.parser(cr, uid, self.name2, context)
objs = self.getObjects(cr, uid, ids, context)
html_parser.set_context(objs, data, ids, 'html')
create_doc = self.generators['makohtml2html']
html = create_doc(mako_html,html_parser.localcontext)
return html,'html'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MrLucasCardoso/pycards | tests/test_amex.py | 1 | 2451 | import json
import pytest
from pycards import CreditCard
from datetime import datetime
from pycards.settings import FIXTURES_PATH
@pytest.fixture(scope="session")
def data():
with open(FIXTURES_PATH) as data_file:
return json.load(data_file)['AMEX']
def test_init(data):
assert len(data) > 0
cards = [CreditCard(card['name'], code=card['code']) for card in data]
assert len(cards) == len(data)
def test_is_valid(data):
assert all(CreditCard(card['name'], code=card['code']).is_valid for card in data)
def test_brand(data):
cards = [CreditCard(card['name'], code=card['code']) for card in data]
assert len(cards) == len([card for card in cards if card.brand == 'Amex'])
def test_cardholder(data):
cards = [CreditCard(card['name'], code=card['code'], cardholder='TESTE DADOS') for card in data]
assert len(cards) == len([card for card in cards if card.cardholder == 'TESTE DADOS'])
def test_number(data):
numbers = [card['name'] for card in data]
cards = [CreditCard(card['name'], code=card['code']) for card in data]
assert all([True for c in cards if c.number in numbers]) and any([True for c in cards if c.number in numbers])
def test_expires(data):
cards = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2021') for card in data]
assert all(True for c in cards if type(c.expires) == datetime)
def test_expires_string(data):
cards = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2021') for card in data]
assert all(True for c in cards if c.expires_string == '07/21') and any(True for c in cards if c.expires_string == '07/21')
def test_is_not_expired(data):
card = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2021') for card in data][0]
assert not card.is_expired
def test_is_expired(data):
card = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2016') for card in data][0]
assert card.is_expired
def test_code_name(data):
card = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2016') for card in data][0]
assert card.code_name == 'CVV'
def test_code(data):
codes = [card['code'] for card in data]
cards = [CreditCard(card['name'], code=card['code']) for card in data]
assert all([True for c in cards if c.code in codes]) and any([True for c in cards if c.code in codes])
| mit |
steveklabnik/servo | components/script/dom/bindings/codegen/parser/tests/test_extended_attributes.py | 149 | 2846 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
[NoInterfaceObject]
interface TestExtendedAttr {
[Unforgeable] readonly attribute byte b;
};
""")
results = parser.finish()
parser = parser.reset()
parser.parse("""
[Pref="foo.bar",Pref=flop]
interface TestExtendedAttr {
[Pref="foo.bar"] attribute byte b;
};
""")
results = parser.finish()
parser = parser.reset()
parser.parse("""
interface TestLenientThis {
[LenientThis] attribute byte b;
};
""")
results = parser.finish()
harness.ok(results[0].members[0].hasLenientThis(),
"Should have a lenient this")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestLenientThis2 {
[LenientThis=something] attribute byte b;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[LenientThis] must take no arguments")
parser = parser.reset()
parser.parse("""
interface TestClamp {
void testClamp([Clamp] long foo);
void testNotClamp(long foo);
};
""")
results = parser.finish()
# Pull out the first argument out of the arglist of the first (and
# only) signature.
harness.ok(results[0].members[0].signatures()[0][1][0].clamp,
"Should be clamped")
harness.ok(not results[0].members[1].signatures()[0][1][0].clamp,
"Should not be clamped")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestClamp2 {
void testClamp([Clamp=something] long foo);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[Clamp] must take no arguments")
parser = parser.reset()
parser.parse("""
interface TestEnforceRange {
void testEnforceRange([EnforceRange] long foo);
void testNotEnforceRange(long foo);
};
""")
results = parser.finish()
# Pull out the first argument out of the arglist of the first (and
# only) signature.
harness.ok(results[0].members[0].signatures()[0][1][0].enforceRange,
"Should be enforceRange")
harness.ok(not results[0].members[1].signatures()[0][1][0].enforceRange,
"Should not be enforceRange")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestEnforceRange2 {
void testEnforceRange([EnforceRange=something] long foo);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[EnforceRange] must take no arguments")
| mpl-2.0 |
lichengshuang/createvhost | others/webvirtmgr/delServer.py | 1 | 1305 | #!/usr/bin/python
#-*-encoding:utf-8-*-
#author: asher
#date: 20160429 on train D909
# this scripts useed for add server ip to webvirtmgr
# if not , each server must add by website,it's too slow, and very not interesting.
# use this , it's make you feel very happy
import sqlite3
try:
conn = sqlite3.connect('../webvirtmgr.sqlite3')
cur = conn.cursor()
print "Input the server ip address like:"
ips = raw_input("Ips 172.23.32:").strip()
ips1 = int(raw_input("Input start last ip num: 1:>").strip())
ips2 = int(raw_input("Input end ip num: 100:>").strip())
# jifang = str(raw_input("DataCenter like:jxq:>").strip())
# login = str(raw_input("User:admin or others:>").strip())
# password = str(raw_input("Password:>").strip())
while True:
if ips1 <= ips2:
ips1 = str(ips1)
newip = ips + "." + ips1
# jifang1 = jifang + "_" + newip
print "Del %s into database\n" % newip
cur.execute("delete from servers_compute where hostname == '%s'" % newip)
ips1 = int(ips1)
ips1 += 1
conn.commit()
else:
break
finally:
allservers = cur.execute("select id,name,hostname,login,type from servers_compute").fetchall()
for i in allservers:
print i
conn.close()
| apache-2.0 |
esikachev/sahara-backup | sahara/tests/unit/swift/test_utils.py | 7 | 1447 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara.swift import utils
from sahara.tests.unit import base as testbase
class SwiftUtilsTest(testbase.SaharaTestCase):
def setUp(self):
super(SwiftUtilsTest, self).setUp()
self.override_config('use_identity_api_v3', True)
@mock.patch('sahara.utils.openstack.base.url_for')
def test_retrieve_auth_url(self, url_for_mock):
correct = "https://127.0.0.1:8080/v2.0/"
def _assert(uri):
url_for_mock.return_value = uri
self.assertEqual(correct, utils.retrieve_auth_url())
_assert("%s/" % correct)
_assert("https://127.0.0.1:8080")
_assert("https://127.0.0.1:8080/")
_assert("https://127.0.0.1:8080/v2.0")
_assert("https://127.0.0.1:8080/v2.0/")
_assert("https://127.0.0.1:8080/v42/")
_assert("https://127.0.0.1:8080/foo")
| apache-2.0 |
Subsets and Splits