repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringclasses
981 values
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
ahmadiga/min_edx
common/test/acceptance/performance/test_studio_performance.py
139
3307
""" Single page performance tests for Studio. """ from bok_choy.web_app_test import WebAppTest, with_cache from ..pages.studio.auto_auth import AutoAuthPage from ..pages.studio.overview import CourseOutlinePage from nose.plugins.attrib import attr @attr(har_mode='explicit') class StudioPagePerformanceTest(WebAppTest): """ Base class to capture studio performance with HTTP Archives. To import courses for the bok choy tests, pass the --imports_dir=<course directory> argument to the paver command where <course directory> contains the (un-archived) courses to be imported. """ course_org = 'edX' course_num = 'Open_DemoX' course_run = 'edx_demo_course' def setUp(self): """ Authenticate as staff so we can view and edit courses. """ super(StudioPagePerformanceTest, self).setUp() AutoAuthPage(self.browser, staff=True).visit() def record_visit_outline(self): """ Produce a HAR for loading the course outline page. """ course_outline_page = CourseOutlinePage(self.browser, self.course_org, self.course_num, self.course_run) har_name = 'OutlinePage_{org}_{course}'.format( org=self.course_org, course=self.course_num ) self.har_capturer.add_page(self.browser, har_name) course_outline_page.visit() self.har_capturer.save_har(self.browser, har_name) def record_visit_unit(self, section_title, subsection_title, unit_title): """ Produce a HAR for loading a unit page. """ course_outline_page = CourseOutlinePage(self.browser, self.course_org, self.course_num, self.course_run).visit() course_outline_unit = course_outline_page.section(section_title).subsection(subsection_title).expand_subsection().unit(unit_title) har_name = 'UnitPage_{org}_{course}'.format( org=self.course_org, course=self.course_num ) self.har_capturer.add_page(self.browser, har_name) course_outline_unit.go_to() self.har_capturer.save_har(self.browser, har_name) class StudioJusticePerformanceTest(StudioPagePerformanceTest): """ Test performance on the HarvardX Justice course. """ course_org = 'HarvardX' course_num = 'ER22x' course_run = '2013_Spring' @with_cache def test_visit_outline(self): """Record visiting the Justice course outline page""" self.record_visit_outline() @with_cache def test_visit_unit(self): """Record visiting a Justice unit page""" self.record_visit_unit( 'Lecture 1 - Doing the Right Thing', 'Discussion Prompt: Ethics of Torture', 'Discussion Prompt: Ethics of Torture' ) class StudioPub101PerformanceTest(StudioPagePerformanceTest): """ Test performance on Andy's PUB101 outline page. """ course_org = 'AndyA' course_num = 'PUB101' course_run = 'PUB101' @with_cache def test_visit_outline(self): """Record visiting the PUB101 course outline page""" self.record_visit_outline() @with_cache def test_visit_unit(self): """Record visiting the PUB101 unit page""" self.record_visit_unit('Released', 'Released', 'Released')
agpl-3.0
davipeterlini/routeflow_tcc
pox/tests/unit/openflow/switch_impl_test.py
23
6728
#!/usr/bin/env python import unittest import sys import os.path from copy import copy sys.path.append(os.path.dirname(__file__) + "/../../..") from pox.openflow.libopenflow_01 import * from pox.datapaths.switch import * class MockConnection(object): def __init__(self): self.received = [] @property def last(self): return self.received[-1] def set_message_handler(self, handler): self.on_message_received = handler def to_switch(self, msg): self.on_message_received(self, msg) # from switch def send(self, msg): self.received.append(msg) class SwitchImplTest(unittest.TestCase): def setUp(self): self.conn = MockConnection() self.switch = SoftwareSwitch(1, name="sw1") self.switch.set_connection(self.conn) self.packet = ethernet(src=EthAddr("00:00:00:00:00:01"), dst=EthAddr("00:00:00:00:00:02"), payload=ipv4(srcip=IPAddr("1.2.3.4"), dstip=IPAddr("1.2.3.5"), payload=udp(srcport=1234, dstport=53, payload="haha"))) def test_hello(self): c = self.conn c.to_switch(ofp_hello(xid=123)) self.assertEqual(len(c.received), 1) self.assertTrue(isinstance(c.last, ofp_hello), "should have received hello but got %s" % c.last) def test_echo_request(self): c = self.conn c.to_switch(ofp_echo_request(xid=123)) self.assertEqual(len(c.received), 1) self.assertTrue(isinstance(c.last, ofp_echo_reply) and c.last.xid == 123, "should have received echo reply but got %s" % c.last) def test_barrier(self): c = self.conn c.to_switch(ofp_barrier_request(xid=123)) self.assertEqual(len(c.received), 1) self.assertTrue(isinstance(c.last, ofp_barrier_reply) and c.last.xid == 123, "should have received echo reply but got %s" % c.last) def test_flow_mod(self): c = self.conn s = self.switch c.to_switch(ofp_flow_mod(xid=124, priority=1, match=ofp_match(in_port=1, nw_src="1.2.3.4"))) self.assertEqual(len(c.received), 0) self.assertEqual(len(s.table), 1) e = s.table.entries[0] self.assertEqual(e.priority,1) self.assertEqual(e.match, ofp_match(in_port=1, nw_src="1.2.3.4")) def test_packet_out(self): c = self.conn s = self.switch received = [] s.addListener(DpPacketOut, lambda(event): received.append(event)) packet = self.packet c.to_switch(ofp_packet_out(data=packet, actions=[ofp_action_output(port=2)])) self.assertEqual(len(c.received), 0) self.assertEqual(len(received), 1) event = received[0] self.assertEqual(event.port.port_no,2) self.assertEqual(event.packet.pack(), packet.pack()) def test_send_packet_in(self): c = self.conn s = self.switch s.send_packet_in(in_port=1, buffer_id=123, packet=self.packet, reason=OFPR_NO_MATCH) self.assertEqual(len(c.received), 1) self.assertTrue(isinstance(c.last, ofp_packet_in) and c.last.xid == 0, "should have received packet_in but got %s" % c.last) self.assertEqual(c.last.in_port,1) self.assertEqual(c.last.buffer_id,123) self.assertEqual(c.last.data, self.packet.pack()) def test_rx_packet(self): c = self.conn s = self.switch received = [] s.addListener(DpPacketOut, lambda(event): received.append(event)) # no flow entries -> should result in a packet_in s.rx_packet(self.packet, in_port=1) self.assertEqual(len(c.received), 1) self.assertTrue(isinstance(c.last, ofp_packet_in), "should have received packet_in but got %s" % c.last) self.assertTrue(c.last.buffer_id > 0) # let's send a flow_mod with a buffer id c.to_switch(ofp_flow_mod(xid=124, buffer_id=c.last.buffer_id, priority=1, match=ofp_match(in_port=1, nw_src="1.2.3.4"), actions = [ ofp_action_output(port=3) ] )) # that should have send the packet out port 3 self.assertEqual(len(received), 1) event = received[0] self.assertEqual(event.port.port_no,3) self.assertEqual(event.packet, self.packet) # now the next packet should go through on the fast path c.received = [] received = [] s.rx_packet(self.packet, in_port=1) self.assertEqual(len(c.received), 0) self.assertEqual(len(received), 1) event = received[0] self.assertEqual(event.port.port_no,3) self.assertEqual(event.packet, self.packet) def test_delete_port(self): c = self.conn s = self.switch original_num_ports = len(self.switch.ports) p = self.switch.ports.values()[0] s.delete_port(p) new_num_ports = len(self.switch.ports) self.assertTrue(new_num_ports == original_num_ports - 1, "Should have removed the port") self.assertEqual(len(c.received), 1) self.assertTrue(isinstance(c.last, ofp_port_status), "should have received port_status but got %s" % c.last) self.assertTrue(c.last.reason == OFPPR_DELETE) def test_add_port(self): c = self.conn s = self.switch port_count = len(self.switch.ports) old_port = s.delete_port(1) self.assertTrue(port_count - 1 == len(self.switch.ports), "Should have removed port") self.assertFalse(old_port.port_no in self.switch.ports, "Should have removedport") s.add_port(old_port) self.assertTrue(old_port.port_no in self.switch.ports, "Should have added port") self.assertEqual(len(c.received), 2) self.assertTrue(isinstance(c.last, ofp_port_status), "should have received port_status but got %s" % c.last) self.assertTrue(c.last.reason == OFPPR_ADD) def test_port_mod_failed(self): c = self.conn # test wrong port msg = ofp_port_mod() msg.port_no = 1234 c.to_switch(msg) self.assertEqual(len(c.received), 1) self.assertTrue(isinstance(c.last, ofp_error)) self.assertTrue(c.last.type == OFPET_PORT_MOD_FAILED) self.assertTrue(c.last.code == OFPPMFC_BAD_PORT) # test wrong hw_addr msg.port_no = 1 msg.hw_addr = EthAddr("11:22:33:44:55:66") c.to_switch(msg) self.assertEqual(len(c.received), 2) self.assertTrue(isinstance(c.last, ofp_error)) self.assertTrue(c.last.type == OFPET_PORT_MOD_FAILED) self.assertTrue(c.last.code == OFPPMFC_BAD_HW_ADDR) def test_port_mod_link_down(self): c = self.conn s = self.switch # test wrong port msg = ofp_port_mod() msg.port_no = 1 msg.hw_addr = s.ports[1].hw_addr msg.mask = OFPPC_PORT_DOWN msg.config = OFPPC_PORT_DOWN c.to_switch(msg) self.assertEqual(len(c.received), 1) self.assertTrue(isinstance(c.last, ofp_port_status)) if __name__ == '__main__': unittest.main()
apache-2.0
whatsthehubbub/rippleeffect
nousernameregistration/models.py
1
10449
from django.conf import settings try: from django.contrib.auth import get_user_model User = get_user_model() except: pass from django.db import models from django.db import transaction from django.template.loader import render_to_string from django.utils.translation import ugettext_lazy as _ import datetime import hashlib import random import re try: from django.utils.timezone import now as datetime_now except ImportError: datetime_now = datetime.datetime.now SHA1_RE = re.compile('^[a-f0-9]{40}$') class RegistrationManager(models.Manager): """ Custom manager for the ``RegistrationProfile`` model. The methods defined here provide shortcuts for account creation and activation (including generation and emailing of activation keys), and for cleaning out expired inactive accounts. """ def activate_user(self, activation_key): """ Validate an activation key and activate the corresponding ``User`` if valid. If the key is valid and has not expired, return the ``User`` after activating. If the key is not valid or has expired, return ``False``. If the key is valid but the ``User`` is already active, return ``False``. To prevent reactivation of an account which has been deactivated by site administrators, the activation key is reset to the string constant ``RegistrationProfile.ACTIVATED`` after successful activation. """ # Make sure the key we're trying conforms to the pattern of a # SHA1 hash; if it doesn't, no point trying to look it up in # the database. if SHA1_RE.search(activation_key): try: profile = self.get(activation_key=activation_key) except self.model.DoesNotExist: return False if not profile.activation_key_expired(): user = profile.user user.is_active = True user.save() profile.activation_key = self.model.ACTIVATED profile.save() return user return False def create_inactive_user(self, email, password, site, send_email=True): """ Create a new, inactive ``User``, generate a ``RegistrationProfile`` and email its activation key to the ``User``, returning the new ``User``. By default, an activation email will be sent to the new user. To disable this, pass ``send_email=False``. """ new_user = User.objects.create_user(email, password) new_user.is_active = False new_user.save() registration_profile = self.create_profile(new_user) if send_email: registration_profile.send_activation_email(site) return new_user create_inactive_user = transaction.commit_on_success(create_inactive_user) def create_profile(self, user): """ Create a ``RegistrationProfile`` for a given ``User``, and return the ``RegistrationProfile``. The activation key for the ``RegistrationProfile`` will be a SHA1 hash, generated from a combination of the ``User``'s username and a random salt. """ salt = hashlib.sha1(str(random.random())).hexdigest()[:5] email = user.email if isinstance(email, unicode): email = email.encode('utf-8') activation_key = hashlib.sha1(salt+email).hexdigest() return self.create(user=user, activation_key=activation_key) def delete_expired_users(self): """ Remove expired instances of ``RegistrationProfile`` and their associated ``User``s. Accounts to be deleted are identified by searching for instances of ``RegistrationProfile`` with expired activation keys, and then checking to see if their associated ``User`` instances have the field ``is_active`` set to ``False``; any ``User`` who is both inactive and has an expired activation key will be deleted. It is recommended that this method be executed regularly as part of your routine site maintenance; this application provides a custom management command which will call this method, accessible as ``manage.py cleanupregistration``. Regularly clearing out accounts which have never been activated serves two useful purposes: 1. It alleviates the ocasional need to reset a ``RegistrationProfile`` and/or re-send an activation email when a user does not receive or does not act upon the initial activation email; since the account will be deleted, the user will be able to simply re-register and receive a new activation key. 2. It prevents the possibility of a malicious user registering one or more accounts and never activating them (thus denying the use of those usernames to anyone else); since those accounts will be deleted, the usernames will become available for use again. If you have a troublesome ``User`` and wish to disable their account while keeping it in the database, simply delete the associated ``RegistrationProfile``; an inactive ``User`` which does not have an associated ``RegistrationProfile`` will not be deleted. """ for profile in self.all(): try: if profile.activation_key_expired(): user = profile.user if not user.is_active: user.delete() profile.delete() except User.DoesNotExist: profile.delete() class RegistrationProfile(models.Model): """ A simple profile which stores an activation key for use during user account registration. Generally, you will not want to interact directly with instances of this model; the provided manager includes methods for creating and activating new accounts, as well as for cleaning out accounts which have never been activated. While it is possible to use this model as the value of the ``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do so. This model's sole purpose is to store data temporarily during account registration and activation. """ ACTIVATED = u"ALREADY_ACTIVATED" user = models.ForeignKey(settings.AUTH_USER_MODEL, unique=True, verbose_name=_('user')) activation_key = models.CharField(_('activation key'), max_length=40) objects = RegistrationManager() class Meta: verbose_name = _('registration profile') verbose_name_plural = _('registration profiles') def __unicode__(self): return u"Registration information for %s" % self.user def activation_key_expired(self): """ Determine whether this ``RegistrationProfile``'s activation key has expired, returning a boolean -- ``True`` if the key has expired. Key expiration is determined by a two-step process: 1. If the user has already activated, the key will have been reset to the string constant ``ACTIVATED``. Re-activating is not permitted, and so this method returns ``True`` in this case. 2. Otherwise, the date the user signed up is incremented by the number of days specified in the setting ``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of days after signup during which a user is allowed to activate their account); if the result is less than or equal to the current date, the key has expired and this method returns ``True``. """ expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS) return self.activation_key == self.ACTIVATED or \ (self.user.date_joined + expiration_date <= datetime_now()) activation_key_expired.boolean = True def send_activation_email(self, site): """ Send an activation email to the user associated with this ``RegistrationProfile``. The activation email will make use of two templates: ``registration/activation_email_subject.txt`` This template will be used for the subject line of the email. Because it is used as the subject line of an email, this template's output **must** be only a single line of text; output longer than one line will be forcibly joined into only a single line. ``registration/activation_email.txt`` This template will be used for the body of the email. These templates will each receive the following context variables: ``activation_key`` The activation key for the new account. ``expiration_days`` The number of days remaining during which the account may be activated. ``site`` An object representing the site on which the user registered; depending on whether ``django.contrib.sites`` is installed, this may be an instance of either ``django.contrib.sites.models.Site`` (if the sites application is installed) or ``django.contrib.sites.models.RequestSite`` (if not). Consult the documentation for the Django sites framework for details regarding these objects' interfaces. """ ctx_dict = {'activation_key': self.activation_key, 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS, 'site': site} subject = render_to_string('registration/activation_email_subject.txt', ctx_dict) # Email subject *must not* contain newlines subject = ''.join(subject.splitlines()) message = render_to_string('registration/activation_email.txt', ctx_dict) self.user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
mit
patilsangram/erpnext
erpnext/templates/pages/help.py
17
1260
from __future__ import unicode_literals import frappe, json import requests def get_context(context): context.no_cache = 1 settings = frappe.get_doc("Support Settings", "Support Settings") s = settings # Get Started sections sections = json.loads(s.get_started_sections) context.get_started_sections = sections # Forum posts topics_data, post_params = get_forum_posts(s) context.post_params = post_params context.forum_url = s.forum_url context.topics = topics_data[:3] # Issues if frappe.session.user != "Guest": context.issues = frappe.get_list("Issue", fields=["name", "status", "subject", "modified"])[:3] else: context.issues = [] def get_forum_posts(s): response = requests.get(s.forum_url + '/' + s.get_latest_query) response.raise_for_status() response_json = response.json() topics_data = {} # it will actually be an array key_list = s.response_key_list.split(',') for key in key_list: topics_data = response_json.get(key) if not topics_data else topics_data.get(key) for topic in topics_data: topic["link"] = s.forum_url + '/' + s.post_route_string + '/' + str(topic.get(s.post_route_key)) post_params = { "title": s.post_title_key, "description": s.post_description_key } return topics_data, post_params
gpl-3.0
xaviercobain88/framework-python
openerp/addons/base/ir/workflow/__init__.py
79
1093
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import workflow import print_instance # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
terkaa/linuxcnc
src/hal/user_comps/pyvcp.py
32
3152
#!/usr/bin/env python # This is a component of emc # Copyright 2007 Anders Wallin <[email protected]> # # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ Python Virtual Control Panel for EMC A virtual control panel (VCP) is used to display and control HAL pins, which are either BIT or FLOAT valued. Usage: pyvcp -g WxH+X+Y -c compname myfile.xml compname is the name of the HAL component to be created. The name of the HAL pins associated with the VCP will begin with 'compname.' myfile.xml is an XML file which specifies the layout of the VCP. Valid XML tags are described in the documentation for pyvcp_widgets.py -g option allows setting of the inital size and/or position of the panel """ import sys, os BASE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..")) sys.path.insert(0, os.path.join(BASE, "lib", "python")) import vcpparse import hal from Tkinter import Tk import getopt def usage(): """ prints the usage message """ print "Usage: pyvcp [-g WIDTHxHEIGHT+XOFFSET+YOFFSET][-c hal_component_name] myfile.xml" print "If the component name is not specified, the basename of the xml file is used." print "-g options are in pixel units, XOFFSET/YOFFSET is referenced from top left of screen" print "use -g WIDTHxHEIGHT for just setting size or -g +XOFFSET+YOFFSET for just position" def main(): """ creates a HAL component. calls vcpparse with the specified XML file. """ try: opts, args = getopt.getopt(sys.argv[1:], "c:g:") except getopt.GetoptError, detail: print detail usage() sys.exit(1) window_geometry = None component_name = None for o, a in opts: if o == "-c": component_name = a if o == "-g": window_geometry = a try: filename=args[0] except: usage() sys.exit(1) if component_name is None: component_name = os.path.splitext(os.path.basename(filename))[0] pyvcp0 = Tk() pyvcp0.title(component_name) if window_geometry: pyvcp0.geometry(window_geometry) vcpparse.filename=filename pycomp=vcpparse.create_vcp(compname=component_name, master=pyvcp0) pycomp.ready() try: try: pyvcp0.mainloop() except KeyboardInterrupt: sys.exit(0) finally: pycomp.exit() if __name__ == '__main__': main()
gpl-2.0
who-emro/meerkat_frontend
meerkat_frontend/views/messaging.py
1
15049
""" messaging.py A Flask Blueprint module for Meerkat messaging services. """ from flask.ext.babel import gettext from flask import Blueprint, render_template from flask import redirect, flash, request, current_app, g, jsonify import random from meerkat_frontend import app, auth import meerkat_libs as libs from .. import common as c messaging = Blueprint('messaging', __name__) @messaging.route('/') @messaging.route('/loc_<int:locID>') @auth.authorise(*app.config['AUTH'].get('messaging', [['BROKEN'], ['']])) def subscribe(locID=None): """ Subscription Process Stage 1: Render the page with the subscription form. Args: locID (int): The location ID of a location to be automatically loaded into the location selector. """ # Initialise locID to allowed location # Can't be done during function declaration because outside app context locID = g.allowed_location if not locID else locID return render_template('messaging/subscribe.html', content=g.config['MESSAGING_CONFIG'], loc=locID, week=c.api('/epi_week')) @messaging.route('/subscribe/subscribed', methods=['POST']) @auth.authorise(*app.config['AUTH'].get('messaging', [['BROKEN'], ['']])) def subscribed(): """ Subscription Process Stage 2: Confirms successful subscription request and informs the user of the verification process. This method assembles the HTML form data into a structure Meerkat Hermes understands and then uses the Meerkat Hermes "subscribe" resource to create the subscriber. It further assembles the email and SMS verification messages and uses the Meerkat Hermes to send it out. """ # Convert form immutabledict to dict. data = {} for key in request.form.keys(): key_list = request.form.getlist(key) if(len(key_list) > 1): data[key] = key_list else: data[key] = key_list[0] # Call hermes subscribe method. subscribe_response = libs.hermes('/subscribe', 'PUT', data) # Assemble and send verification email. url = request.url_root + \ g.get("language") + "/messaging/subscribe/verify/" + \ subscribe_response['subscriber_id'] verify_text = gettext(g.config['MESSAGING_CONFIG']['messages'].get( 'verify_text', "Dear {first_name} {last_name} ,\n\n" + "Your subscription to receive public health surveillance " "notifications from {country} has been created or updated. An " "administrator of the system may have done this on your behalf. " "\n\nIn order to receive future notifications, please " "verify your contact details by copying and pasting the following url " "into your address bar: {url}\n" )).format( first_name=data["first_name"], last_name=data["last_name"], country=current_app.config['MESSAGING_CONFIG']['messages']['country'], url=url ) verify_html = gettext(g.config['MESSAGING_CONFIG']['messages'].get( 'verify_html', "<p>Dear {first_name} {last_name},</p>" "<p>Your subscription to receive public health surveillance " "notifications from {country} has been created or updated. " "An administrator of the system may have done this on your " "behalf.</p><p> To receive future notifications, please verify " "your contact details by <a href='{url}' target='_blank'>" "clicking here</a>.</p>" )).format( first_name=data["first_name"], last_name=data["last_name"], country=current_app.config['MESSAGING_CONFIG']['messages']['country'], url=url ) libs.hermes('/email', 'PUT', { 'email': data['email'], 'subject': gettext('Please verify your contact details'), 'message': verify_text, 'html': verify_html, 'from': current_app.config['MESSAGING_CONFIG']['messages']['from'] }) # Set and send sms verification code. if 'sms' in data: __set_code(subscribe_response['subscriber_id'], data['sms']) # Delete the old account if it exists. Inform the user of success. if data.get('id', None): response = libs.hermes('/subscribe/' + data['id'], 'DELETE') if hasattr(response, 'status_code') and response.status_code != 200: flash(gettext( 'Account update failed: invalid ID. ' 'Creating new subscription instead.' )) else: flash( gettext('Subscription updated for ') + data['first_name'] + " " + data['last_name'] + "." ) return render_template('messaging/subscribed.html', content=g.config['MESSAGING_CONFIG'], week=c.api('/epi_week'), data=data) @messaging.route('/subscribe/verify/<string:subscriber_id>') def verify(subscriber_id): """ Subscription Process Stage 3: Verfies contact details for the subscriber ID specified in the URL. If no SMS number is provided, then just landing on this page is enough to verify the users email address (assuming the ID is not guessable). In this case we do a redirect to Stage 4. If the user has already been verified, then we also redirect to stage four with a flash message to remind them that they have already verified. In all other cases we show the SMS verification form. Args: subscriber_id (str): The UUID that is assigned to the subscriber upon creation by Meerkat Hermes. """ # Get the subscriber subscriber = libs.hermes('/subscribe/' + subscriber_id, 'GET') if subscriber['Item']['verified'] is True: flash(gettext('You have already verified your account.')) return redirect( "/" + g.get("language") + '/messaging/subscribe/verified/' + subscriber_id, code=302 ) elif 'sms' not in subscriber['Item']: current_app.logger.warning(str(subscriber['Item'])) libs.hermes('/verify/' + subscriber_id, 'GET') return redirect( "/" + g.get("language") + '/messaging/subscribe/verified/' + subscriber_id ) else: return render_template('messaging/verify.html', content=g.config['MESSAGING_CONFIG'], week=c.api('/epi_week'), data=subscriber['Item']) @messaging.route('/subscribe/verified/<string:subscriber_id>') def verified(subscriber_id): """ Subscription Process Stage 4: Confirms that the users details has been verified, and sends out a confirmation email as well. Args: subscriber_id (str): The UUID that is assigned to the subscriber upon creation by Meerkat Hermes. """ # Get the subscriber subscriber = libs.hermes('/subscribe/' + subscriber_id, 'GET')['Item'] # If the subscriber isn't verified redirect to the verify stage. if not subscriber['verified']: return redirect( '/' + g.get("language") + '/messaging/subscribe/verify/' + subscriber_id, code=302 ) country = current_app.config['MESSAGING_CONFIG']['messages']['country'] # Send a confirmation e-mail with the unsubscribe link. confirmation_text = gettext(g.config['MESSAGING_CONFIG']['messages'].get( 'confirmation_text', "Dear {first_name} {last_name},\n\n" "Thank you for subscribing to receive public health surveillance " "notifications from {country}. We can confirm that your contact " "details have been successfully verified.\n\nYou can unsubscribe at " "any time by clicking on the relevant link in your e-mails.\n\n If " "you wish to unsubscribe now copy and paste the following url into " "your address bar:\n{url}/unsubscribe/{subscriber_id}" )).format( first_name=subscriber["first_name"], last_name=subscriber["last_name"], country=country, url=current_app.config["HERMES_ROOT"], subscriber_id=subscriber_id ) confirmation_html = gettext(g.config['MESSAGING_CONFIG']['messages'].get( 'confirmation_html', "<p>Dear {first_name} {last_name},</p>" "<p>Thank you for subscribing to receive public health surveillance " "notifications from {country}. We can confirm that your contact " "details have been successfully verified.</p><p>You can unsubscribe " "at any time by clicking on the relevant link in your e-mails.</p><p> " "If you wish to unsubscribe now " "<a href='{url}/unsubscribe/{subscriber_id}'>click here.</a></p>" )).format( first_name=subscriber["first_name"], last_name=subscriber["last_name"], country=country, url=current_app.config["HERMES_ROOT"], subscriber_id=subscriber_id ) email = { 'email': subscriber['email'], 'subject': gettext("Your subscription has been successful"), 'message': confirmation_text, 'html': confirmation_html, 'from': current_app.config['MESSAGING_CONFIG']['messages']['from'] } email_response = libs.hermes('/email', 'PUT', email) current_app.logger.warning('Response is: ' + str(email_response)) return render_template('messaging/verified.html', content=g.config['MESSAGING_CONFIG'], week=c.api('/epi_week')) @messaging.route('/subscribe/sms_code/<string:subscriber_id>', methods=['get', 'post']) def sms_code(subscriber_id): """ Chooses, sets and checks SMS verification codes for the subscriber corresponding to the ID given in the URL. If a POST request is made to this URL it checks whether the code supplied in the POST request form data matches the code sent to the phone. If it does, it rediects to Stage 4, if it doesn't it redirects to stage 3 again with a flash informing the user they got the wrong code. If a GET request is made to this URL, the function selects a new code and sends the code out to the phone. It then redirects to Stage 3 with a flash message informing the user whether the new code has been succesffully sent. Args: subscriber_id (str): The UUID that is assigned to the subscriber upon creation by Meerkat Hermes. """ # If a POST request is made we check the given verification code. if request.method == 'POST': if __check_code(subscriber_id, request.form['code']): libs.hermes('/verify/' + subscriber_id, 'GET') return redirect( "/" + g.get("language") + "/messaging/subscribe/verified/" + subscriber_id, code=302 ) else: flash('You submitted the wrong code.', 'error') return redirect( "/" + g.get("language") + "/messaging/subscribe/verify/" + subscriber_id, code=302 ) # If a GET request is made we send a new code. else: subscriber = libs.hermes('/subscribe/' + subscriber_id, 'GET') response = __set_code(subscriber_id, subscriber['Item']['sms']) if response['ResponseMetadata']['HTTPStatusCode'] == 200: flash(gettext('A new code has been sent to your phone.')) return redirect( "/" + g.get("language") + "/messaging/subscribe/verify/" + subscriber_id, code=302 ) else: current_app.logger.error( "Request to send SMS failed. Response:\n{}".format(response) ) flash( gettext('Error: Try again later, or contact administrator.'), 'error' ) return redirect( "/" + g.get("language") + "/messaging/subscribe/verify/" + subscriber_id, code=302 ) @messaging.route('/get_subscribers') @auth.authorise(*app.config['AUTH'].get('admin', [['BROKEN'], ['']])) def get_subscribers(): """ Function that securely uses the server's access to hermes api to extract subscriber data from hermes. If the request went straight from the browsers console to hermes, we would have to give the user direct access to hermes. This is not safe. """ country = current_app.config['MESSAGING_CONFIG']['messages']['country'] subscribers = libs.hermes('/subscribers/'+country, 'GET') return jsonify({'rows': subscribers}) @messaging.route('/delete_subscribers', methods=['POST']) @auth.authorise(*app.config['AUTH'].get('admin', [['BROKEN'], ['']])) def delete_subscribers(): """ Delete the subscribers specified in the post arguments. """ # Load the list of subscribers to be deleted. subscribers = request.get_json() # Try to delete each subscriber, flag up if there is an error error = False for subscriber_id in subscribers: response = libs.hermes('/subscribe/' + subscriber_id, 'DELETE') if response['status'] != 'successful': error = True if error: return "ERROR: There was an error deleting some users." else: return "Users successfully deleted." def __check_code(subscriber_id, code): """ Checks if the given code for the given subscriber ID is the correct SMS verification code. Args: subscriber_id (str): The UUID that is assigned to the subscriber upon creation by Meerkat Hermes. code (str): The code to be checked. Returns: bool: True if there is a match, False otherwise. """ response = libs.hermes('/verify', 'POST', {'subscriber_id': subscriber_id, 'code': code}) current_app.logger.warning(str(response)) return bool(response['matched']) def __set_code(subscriber_id, sms): """ Sets a new sms verification code for the given subscriber ID. Args: subscriber_id (str): The UUID that is assigned to the subscriber upon creation by Meerkat Hermes. sms (int): The SMS number to which the new code should be sent. Returns: The Meerkat Hermes response object. """ code = round(random.random()*9999) message = gettext( 'Your verification code for {country} public health ' 'surveillance notifications is: {code}. For further information ' 'please see your email.' ).format( country=current_app.config['MESSAGING_CONFIG']['messages']['country'], code=code ) data = {'sms': sms, 'message': message} response = libs.hermes('/verify', 'PUT', {'subscriber_id': subscriber_id, 'code': code}) response = libs.hermes('/sms', 'PUT', data) return response
mit
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
orcid_api_v3/models/funding_v30.py
1
16706
# coding: utf-8 """ ORCID Member No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: Latest Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from orcid_api_v3.models.amount_v30 import AmountV30 # noqa: F401,E501 from orcid_api_v3.models.created_date_v30 import CreatedDateV30 # noqa: F401,E501 from orcid_api_v3.models.external_i_ds_v30 import ExternalIDsV30 # noqa: F401,E501 from orcid_api_v3.models.funding_contributors_v30 import FundingContributorsV30 # noqa: F401,E501 from orcid_api_v3.models.funding_title_v30 import FundingTitleV30 # noqa: F401,E501 from orcid_api_v3.models.fuzzy_date_v30 import FuzzyDateV30 # noqa: F401,E501 from orcid_api_v3.models.last_modified_date_v30 import LastModifiedDateV30 # noqa: F401,E501 from orcid_api_v3.models.organization_defined_funding_sub_type_v30 import OrganizationDefinedFundingSubTypeV30 # noqa: F401,E501 from orcid_api_v3.models.organization_v30 import OrganizationV30 # noqa: F401,E501 from orcid_api_v3.models.source_v30 import SourceV30 # noqa: F401,E501 from orcid_api_v3.models.url_v30 import UrlV30 # noqa: F401,E501 class FundingV30(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'created_date': 'CreatedDateV30', 'last_modified_date': 'LastModifiedDateV30', 'source': 'SourceV30', 'put_code': 'int', 'path': 'str', 'type': 'str', 'organization_defined_type': 'OrganizationDefinedFundingSubTypeV30', 'title': 'FundingTitleV30', 'short_description': 'str', 'amount': 'AmountV30', 'url': 'UrlV30', 'start_date': 'FuzzyDateV30', 'end_date': 'FuzzyDateV30', 'external_ids': 'ExternalIDsV30', 'contributors': 'FundingContributorsV30', 'organization': 'OrganizationV30', 'visibility': 'str' } attribute_map = { 'created_date': 'created-date', 'last_modified_date': 'last-modified-date', 'source': 'source', 'put_code': 'put-code', 'path': 'path', 'type': 'type', 'organization_defined_type': 'organization-defined-type', 'title': 'title', 'short_description': 'short-description', 'amount': 'amount', 'url': 'url', 'start_date': 'start-date', 'end_date': 'end-date', 'external_ids': 'external-ids', 'contributors': 'contributors', 'organization': 'organization', 'visibility': 'visibility' } def __init__(self, created_date=None, last_modified_date=None, source=None, put_code=None, path=None, type=None, organization_defined_type=None, title=None, short_description=None, amount=None, url=None, start_date=None, end_date=None, external_ids=None, contributors=None, organization=None, visibility=None): # noqa: E501 """FundingV30 - a model defined in Swagger""" # noqa: E501 self._created_date = None self._last_modified_date = None self._source = None self._put_code = None self._path = None self._type = None self._organization_defined_type = None self._title = None self._short_description = None self._amount = None self._url = None self._start_date = None self._end_date = None self._external_ids = None self._contributors = None self._organization = None self._visibility = None self.discriminator = None if created_date is not None: self.created_date = created_date if last_modified_date is not None: self.last_modified_date = last_modified_date if source is not None: self.source = source if put_code is not None: self.put_code = put_code if path is not None: self.path = path if type is not None: self.type = type if organization_defined_type is not None: self.organization_defined_type = organization_defined_type if title is not None: self.title = title if short_description is not None: self.short_description = short_description if amount is not None: self.amount = amount if url is not None: self.url = url if start_date is not None: self.start_date = start_date if end_date is not None: self.end_date = end_date if external_ids is not None: self.external_ids = external_ids if contributors is not None: self.contributors = contributors if organization is not None: self.organization = organization if visibility is not None: self.visibility = visibility @property def created_date(self): """Gets the created_date of this FundingV30. # noqa: E501 :return: The created_date of this FundingV30. # noqa: E501 :rtype: CreatedDateV30 """ return self._created_date @created_date.setter def created_date(self, created_date): """Sets the created_date of this FundingV30. :param created_date: The created_date of this FundingV30. # noqa: E501 :type: CreatedDateV30 """ self._created_date = created_date @property def last_modified_date(self): """Gets the last_modified_date of this FundingV30. # noqa: E501 :return: The last_modified_date of this FundingV30. # noqa: E501 :rtype: LastModifiedDateV30 """ return self._last_modified_date @last_modified_date.setter def last_modified_date(self, last_modified_date): """Sets the last_modified_date of this FundingV30. :param last_modified_date: The last_modified_date of this FundingV30. # noqa: E501 :type: LastModifiedDateV30 """ self._last_modified_date = last_modified_date @property def source(self): """Gets the source of this FundingV30. # noqa: E501 :return: The source of this FundingV30. # noqa: E501 :rtype: SourceV30 """ return self._source @source.setter def source(self, source): """Sets the source of this FundingV30. :param source: The source of this FundingV30. # noqa: E501 :type: SourceV30 """ self._source = source @property def put_code(self): """Gets the put_code of this FundingV30. # noqa: E501 :return: The put_code of this FundingV30. # noqa: E501 :rtype: int """ return self._put_code @put_code.setter def put_code(self, put_code): """Sets the put_code of this FundingV30. :param put_code: The put_code of this FundingV30. # noqa: E501 :type: int """ self._put_code = put_code @property def path(self): """Gets the path of this FundingV30. # noqa: E501 :return: The path of this FundingV30. # noqa: E501 :rtype: str """ return self._path @path.setter def path(self, path): """Sets the path of this FundingV30. :param path: The path of this FundingV30. # noqa: E501 :type: str """ self._path = path @property def type(self): """Gets the type of this FundingV30. # noqa: E501 :return: The type of this FundingV30. # noqa: E501 :rtype: str """ return self._type @type.setter def type(self, type): """Sets the type of this FundingV30. :param type: The type of this FundingV30. # noqa: E501 :type: str """ if type is None: raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501 allowed_values = ["GRANT", "CONTRACT", "AWARD", "SALARY_AWARD", "grant", "contract", "award", "salary-award"] # noqa: E501 if type not in allowed_values: raise ValueError( "Invalid value for `type` ({0}), must be one of {1}" # noqa: E501 .format(type, allowed_values) ) self._type = type @property def organization_defined_type(self): """Gets the organization_defined_type of this FundingV30. # noqa: E501 :return: The organization_defined_type of this FundingV30. # noqa: E501 :rtype: OrganizationDefinedFundingSubTypeV30 """ return self._organization_defined_type @organization_defined_type.setter def organization_defined_type(self, organization_defined_type): """Sets the organization_defined_type of this FundingV30. :param organization_defined_type: The organization_defined_type of this FundingV30. # noqa: E501 :type: OrganizationDefinedFundingSubTypeV30 """ self._organization_defined_type = organization_defined_type @property def title(self): """Gets the title of this FundingV30. # noqa: E501 :return: The title of this FundingV30. # noqa: E501 :rtype: FundingTitleV30 """ return self._title @title.setter def title(self, title): """Sets the title of this FundingV30. :param title: The title of this FundingV30. # noqa: E501 :type: FundingTitleV30 """ if title is None: raise ValueError("Invalid value for `title`, must not be `None`") # noqa: E501 self._title = title @property def short_description(self): """Gets the short_description of this FundingV30. # noqa: E501 :return: The short_description of this FundingV30. # noqa: E501 :rtype: str """ return self._short_description @short_description.setter def short_description(self, short_description): """Sets the short_description of this FundingV30. :param short_description: The short_description of this FundingV30. # noqa: E501 :type: str """ self._short_description = short_description @property def amount(self): """Gets the amount of this FundingV30. # noqa: E501 :return: The amount of this FundingV30. # noqa: E501 :rtype: AmountV30 """ return self._amount @amount.setter def amount(self, amount): """Sets the amount of this FundingV30. :param amount: The amount of this FundingV30. # noqa: E501 :type: AmountV30 """ self._amount = amount @property def url(self): """Gets the url of this FundingV30. # noqa: E501 :return: The url of this FundingV30. # noqa: E501 :rtype: UrlV30 """ return self._url @url.setter def url(self, url): """Sets the url of this FundingV30. :param url: The url of this FundingV30. # noqa: E501 :type: UrlV30 """ self._url = url @property def start_date(self): """Gets the start_date of this FundingV30. # noqa: E501 :return: The start_date of this FundingV30. # noqa: E501 :rtype: FuzzyDateV30 """ return self._start_date @start_date.setter def start_date(self, start_date): """Sets the start_date of this FundingV30. :param start_date: The start_date of this FundingV30. # noqa: E501 :type: FuzzyDateV30 """ self._start_date = start_date @property def end_date(self): """Gets the end_date of this FundingV30. # noqa: E501 :return: The end_date of this FundingV30. # noqa: E501 :rtype: FuzzyDateV30 """ return self._end_date @end_date.setter def end_date(self, end_date): """Sets the end_date of this FundingV30. :param end_date: The end_date of this FundingV30. # noqa: E501 :type: FuzzyDateV30 """ self._end_date = end_date @property def external_ids(self): """Gets the external_ids of this FundingV30. # noqa: E501 :return: The external_ids of this FundingV30. # noqa: E501 :rtype: ExternalIDsV30 """ return self._external_ids @external_ids.setter def external_ids(self, external_ids): """Sets the external_ids of this FundingV30. :param external_ids: The external_ids of this FundingV30. # noqa: E501 :type: ExternalIDsV30 """ self._external_ids = external_ids @property def contributors(self): """Gets the contributors of this FundingV30. # noqa: E501 :return: The contributors of this FundingV30. # noqa: E501 :rtype: FundingContributorsV30 """ return self._contributors @contributors.setter def contributors(self, contributors): """Sets the contributors of this FundingV30. :param contributors: The contributors of this FundingV30. # noqa: E501 :type: FundingContributorsV30 """ self._contributors = contributors @property def organization(self): """Gets the organization of this FundingV30. # noqa: E501 :return: The organization of this FundingV30. # noqa: E501 :rtype: OrganizationV30 """ return self._organization @organization.setter def organization(self, organization): """Sets the organization of this FundingV30. :param organization: The organization of this FundingV30. # noqa: E501 :type: OrganizationV30 """ if organization is None: raise ValueError("Invalid value for `organization`, must not be `None`") # noqa: E501 self._organization = organization @property def visibility(self): """Gets the visibility of this FundingV30. # noqa: E501 :return: The visibility of this FundingV30. # noqa: E501 :rtype: str """ return self._visibility @visibility.setter def visibility(self, visibility): """Sets the visibility of this FundingV30. :param visibility: The visibility of this FundingV30. # noqa: E501 :type: str """ allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE", "public", "private", "limited", "registered-only"] # noqa: E501 if visibility not in allowed_values: raise ValueError( "Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501 .format(visibility, allowed_values) ) self._visibility = visibility def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(FundingV30, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, FundingV30): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
mit
tvibliani/odoo
addons/note_pad/__openerp__.py
312
1691
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Memos pad', 'version': '0.1', 'category': 'Tools', 'description': """ This module update memos inside OpenERP for using an external pad ================================================================= Use for update your text memo in real time with the following user that you invite. """, 'author': 'OpenERP SA', 'website': 'https://www.odoo.com/page/notes', 'summary': 'Sticky memos, Collaborative', 'depends': [ 'mail', 'pad', 'note', ], 'data': [ 'note_pad_view.xml', ], 'installable': True, 'application': False, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
tensorflow/models
official/nlp/transformer/transformer_forward_test.py
1
6052
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Forward pass test for Transformer model refactoring.""" import numpy as np import tensorflow as tf from official.nlp.modeling import models from official.nlp.transformer import metrics from official.nlp.transformer import model_params from official.nlp.transformer import transformer def _count_params(layer, trainable_only=True): """Returns the count of all model parameters, or just trainable ones.""" if not trainable_only: return layer.count_params() else: return int( np.sum([ tf.keras.backend.count_params(p) for p in layer.trainable_weights ])) def _create_model(params, is_train): """Creates transformer model.""" encdec_kwargs = dict( num_layers=params["num_hidden_layers"], num_attention_heads=params["num_heads"], intermediate_size=params["filter_size"], activation="relu", dropout_rate=params["relu_dropout"], attention_dropout_rate=params["attention_dropout"], use_bias=False, norm_first=True, norm_epsilon=1e-6, intermediate_dropout=params["relu_dropout"]) encoder_layer = models.TransformerEncoder(**encdec_kwargs) decoder_layer = models.TransformerDecoder(**encdec_kwargs) model_kwargs = dict( vocab_size=params["vocab_size"], embedding_width=params["hidden_size"], dropout_rate=params["layer_postprocess_dropout"], padded_decode=params["padded_decode"], decode_max_length=params["decode_max_length"], dtype=params["dtype"], extra_decode_length=params["extra_decode_length"], beam_size=params["beam_size"], alpha=params["alpha"], encoder_layer=encoder_layer, decoder_layer=decoder_layer, name="transformer_v2") if is_train: inputs = tf.keras.layers.Input((None,), dtype="int64", name="inputs") targets = tf.keras.layers.Input((None,), dtype="int64", name="targets") internal_model = models.Seq2SeqTransformer(**model_kwargs) logits = internal_model( dict(inputs=inputs, targets=targets), training=is_train) vocab_size = params["vocab_size"] label_smoothing = params["label_smoothing"] if params["enable_metrics_in_training"]: logits = metrics.MetricLayer(vocab_size)([logits, targets]) logits = tf.keras.layers.Lambda( lambda x: x, name="logits", dtype=tf.float32)( logits) model = tf.keras.Model([inputs, targets], logits) loss = metrics.transformer_loss(logits, targets, label_smoothing, vocab_size) model.add_loss(loss) return model batch_size = params["decode_batch_size"] if params["padded_decode"] else None inputs = tf.keras.layers.Input((None,), batch_size=batch_size, dtype="int64", name="inputs") internal_model = models.Seq2SeqTransformer(**model_kwargs) ret = internal_model(dict(inputs=inputs), training=is_train) outputs, scores = ret["outputs"], ret["scores"] return tf.keras.Model(inputs, [outputs, scores]) class TransformerForwardTest(tf.test.TestCase): def setUp(self): super(TransformerForwardTest, self).setUp() self.params = params = model_params.TINY_PARAMS params["batch_size"] = params["default_batch_size"] = 16 params["hidden_size"] = 12 params["num_hidden_layers"] = 3 params["filter_size"] = 14 params["num_heads"] = 2 params["vocab_size"] = 41 params["extra_decode_length"] = 0 params["beam_size"] = 3 params["dtype"] = tf.float32 params["layer_postprocess_dropout"] = 0.0 params["attention_dropout"] = 0.0 params["relu_dropout"] = 0.0 def test_forward_pass_train(self): # Set input_len different from target_len inputs = np.asarray([[5, 2, 1], [7, 5, 0], [1, 4, 0], [7, 5, 11]]) targets = np.asarray([[4, 3, 4, 0], [13, 19, 17, 8], [20, 14, 1, 2], [5, 7, 3, 0]]) # src_model is the original model before refactored. src_model = transformer.create_model(self.params, True) src_num_weights = _count_params(src_model) src_weights = src_model.get_weights() src_model_output = src_model([inputs, targets], training=True) # dest_model is the refactored model. dest_model = _create_model(self.params, True) dest_num_weights = _count_params(dest_model) self.assertEqual(src_num_weights, dest_num_weights) dest_model.set_weights(src_weights) dest_model_output = dest_model([inputs, targets], training=True) self.assertAllEqual(src_model_output, dest_model_output) def test_forward_pass_not_train(self): inputs = np.asarray([[5, 2, 1], [7, 5, 0], [1, 4, 0], [7, 5, 11]]) # src_model is the original model before refactored. src_model = transformer.create_model(self.params, False) src_num_weights = _count_params(src_model) src_weights = src_model.get_weights() src_model_output = src_model([inputs], training=False) # dest_model is the refactored model. dest_model = _create_model(self.params, False) dest_num_weights = _count_params(dest_model) self.assertEqual(src_num_weights, dest_num_weights) dest_model.set_weights(src_weights) dest_model_output = dest_model([inputs], training=False) self.assertAllEqual(src_model_output[0], dest_model_output[0]) self.assertAllEqual(src_model_output[1], dest_model_output[1]) if __name__ == "__main__": tf.test.main()
apache-2.0
repotvsupertuga/tvsupertuga.repository
plugin.video.playlistLoader/resources/lib/chardet/euckrprober.py
2931
1675
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCKRDistributionAnalysis from .mbcssm import EUCKRSMModel class EUCKRProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(EUCKRSMModel) self._mDistributionAnalyzer = EUCKRDistributionAnalysis() self.reset() def get_charset_name(self): return "EUC-KR"
gpl-2.0
andreif/django
tests/template_tests/filter_tests/test_escapejs.py
324
2055
from __future__ import unicode_literals from django.template.defaultfilters import escapejs_filter from django.test import SimpleTestCase from ..utils import setup class EscapejsTests(SimpleTestCase): @setup({'escapejs01': '{{ a|escapejs }}'}) def test_escapejs01(self): output = self.engine.render_to_string('escapejs01', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'}) self.assertEqual(output, 'testing\\u000D\\u000Ajavascript ' '\\u0027string\\u0022 \\u003Cb\\u003E' 'escaping\\u003C/b\\u003E') @setup({'escapejs02': '{% autoescape off %}{{ a|escapejs }}{% endautoescape %}'}) def test_escapejs02(self): output = self.engine.render_to_string('escapejs02', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'}) self.assertEqual(output, 'testing\\u000D\\u000Ajavascript ' '\\u0027string\\u0022 \\u003Cb\\u003E' 'escaping\\u003C/b\\u003E') class FunctionTests(SimpleTestCase): def test_quotes(self): self.assertEqual( escapejs_filter('"double quotes" and \'single quotes\''), '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027', ) def test_backslashes(self): self.assertEqual(escapejs_filter(r'\ : backslashes, too'), '\\u005C : backslashes, too') def test_whitespace(self): self.assertEqual( escapejs_filter('and lots of whitespace: \r\n\t\v\f\b'), 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008', ) def test_script(self): self.assertEqual( escapejs_filter(r'<script>and this</script>'), '\\u003Cscript\\u003Eand this\\u003C/script\\u003E', ) def test_paragraph_separator(self): self.assertEqual( escapejs_filter('paragraph separator:\u2029and line separator:\u2028'), 'paragraph separator:\\u2029and line separator:\\u2028', )
bsd-3-clause
babycaseny/poedit
deps/boost/tools/build/test/direct_request_test.py
44
1396
#!/usr/bin/python import BoostBuild t = BoostBuild.Tester(use_test_config=False) # First check some startup. t.write("jamroot.jam", "") t.write("jamfile.jam", """\ exe a : a.cpp b ; lib b : b.cpp ; """) t.write("a.cpp", """\ void # ifdef _WIN32 __declspec(dllimport) # endif foo(); int main() { foo(); } """) t.write("b.cpp", """\ #ifdef MACROS void # ifdef _WIN32 __declspec(dllexport) # endif foo() {} #endif # ifdef _WIN32 int __declspec(dllexport) force_implib_creation; # endif """) t.run_build_system(["define=MACROS"]) t.expect_addition("bin/$toolset/debug/" * (BoostBuild.List("a.obj b.obj b.dll a.exe"))) # When building a debug version, the 'define' still applies. t.rm("bin") t.run_build_system(["debug", "define=MACROS"]) t.expect_addition("bin/$toolset/debug/" * (BoostBuild.List("a.obj b.obj b.dll a.exe"))) # When building a release version, the 'define' still applies. t.write("jamfile.jam", """\ exe a : a.cpp b : <variant>debug ; lib b : b.cpp ; """) t.rm("bin") t.run_build_system(["release", "define=MACROS"]) # Regression test: direct build request was not working when there was more # than one level of 'build-project'. t.rm(".") t.write("jamroot.jam", "") t.write("jamfile.jam", "build-project a ;") t.write("a/jamfile.jam", "build-project b ;") t.write("a/b/jamfile.jam", "") t.run_build_system(["release"]) t.cleanup()
mit
ryangallen/django
django/contrib/gis/sitemaps/views.py
341
2421
from __future__ import unicode_literals from django.apps import apps from django.contrib.gis.db.models.fields import GeometryField from django.contrib.gis.db.models.functions import AsKML, Transform from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz from django.core.exceptions import FieldDoesNotExist from django.db import DEFAULT_DB_ALIAS, connections from django.http import Http404 def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS): """ This view generates KML for the given app label, model, and field name. The model's default manager must be GeoManager, and the field name must be that of a geographic field. """ placemarks = [] try: klass = apps.get_model(label, model) except LookupError: raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model)) if field_name: try: field = klass._meta.get_field(field_name) if not isinstance(field, GeometryField): raise FieldDoesNotExist except FieldDoesNotExist: raise Http404('Invalid geometry field.') connection = connections[using] if connection.features.has_AsKML_function: # Database will take care of transformation. placemarks = klass._default_manager.using(using).annotate(kml=AsKML(field_name)) else: # If the database offers no KML method, we use the `kml` # attribute of the lazy geometry instead. placemarks = [] if connection.features.has_Transform_function: qs = klass._default_manager.using(using).annotate( **{'%s_4326' % field_name: Transform(field_name, 4326)}) field_name += '_4326' else: qs = klass._default_manager.using(using).all() for mod in qs: mod.kml = getattr(mod, field_name).kml placemarks.append(mod) # Getting the render function and rendering to the correct. if compress: render = render_to_kmz else: render = render_to_kml return render('gis/kml/placemarks.kml', {'places': placemarks}) def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS): """ This view returns KMZ for the given app label, model, and field name. """ return kml(request, label, model, field_name, compress=True, using=using)
bsd-3-clause
PrincetonUniversity/pox
pox/lib/packet/eapol.py
47
3220
# Copyright 2011 James McCauley # Copyright 2008 (C) Nicira, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is derived from the packet library in NOX, which was # developed by Nicira, Inc. #====================================================================== # # EAPOL Header Format (see IEEE 802.1X-2004): # # Octet 0: Protocol version (1 or 2). # Octet 1: Packet type: # 0 = EAP packet # 1 = EAPOL-Start # 2 = EAPOL-Logoff # 3 = EAPOL-Key # 4 = EAPOL-Encapsulated-ASF-Alert # Octets 2-3: Length of packet body field (0 if packet body is absent) # Octets 4-end: Packet body (present only for packet types 0, 3, 4) # #====================================================================== import struct from packet_utils import * from packet_base import packet_base from eap import * class eapol(packet_base): "EAP over LAN packet" MIN_LEN = 4 V1_PROTO = 1 V2_PROTO = 2 EAP_TYPE = 0 EAPOL_START_TYPE = 1 EAPOL_LOGOFF_TYPE = 2 EAPOL_KEY_TYPE = 3 EAPOL_ENCAPSULATED_ASF_ALERT = 4 type_names = {EAP_TYPE: "EAP", EAPOL_START_TYPE: "EAPOL-Start", EAPOL_LOGOFF_TYPE: "EAPOL-Logoff", EAPOL_KEY_TYPE: "EAPOL-Key", EAPOL_ENCAPSULATED_ASF_ALERT: "EAPOL-Encapsulated-ASF-Alert"} @staticmethod def type_name(type): return eapol.type_names.get(type, "type%d" % type) def __init__(self, raw=None, prev=None, **kw): packet_base.__init__(self) self.prev = prev self.version = self.V1_PROTO self.type = self.EAP_TYPE self.bodylen = 0 if raw is not None: self.parse(raw) self._init(kw) def __str__(self): s = '[EAPOL v%d %s]' % (self.version, self.type_name(self.type)) return s def parse(self, raw): assert isinstance(raw, bytes) self.raw = raw dlen = len(raw) if dlen < self.MIN_LEN: self.msg('(eapol parse) warning EAPOL packet data too short to parse header: data len %u' % (dlen,)) return (self.version, self.type, self.bodylen) \ = struct.unpack('!BBH', raw[:self.MIN_LEN]) self.parsed = True if self.type == self.EAP_TYPE: self.next = eap(raw=raw[self.MIN_LEN:], prev=self) elif (self.type == self.EAPOL_START_TYPE or self.type == self.EAPOL_LOGOFF_TYPE): pass # These types have no payloads. else: self.msg('warning unsupported EAPOL type: %s' % (self.type_name(self.type),)) def hdr(self, payload): return struct.pack('!BBH', self.version, self.type, self.bodylen)
apache-2.0
chand3040/cloud_that
common/test/acceptance/fixtures/base.py
148
6165
""" Common code shared by course and library fixtures. """ import re import requests import json from lazy import lazy from . import STUDIO_BASE_URL class StudioApiLoginError(Exception): """ Error occurred while logging in to the Studio API. """ pass class StudioApiFixture(object): """ Base class for fixtures that use the Studio restful API. """ def __init__(self): # Info about the auto-auth user used to create the course/library. self.user = {} @lazy def session(self): """ Log in as a staff user, then return a `requests` `session` object for the logged in user. Raises a `StudioApiLoginError` if the login fails. """ # Use auto-auth to retrieve the session for a logged in user session = requests.Session() response = session.get(STUDIO_BASE_URL + "/auto_auth?staff=true") # Return the session from the request if response.ok: # auto_auth returns information about the newly created user # capture this so it can be used by by the testcases. user_pattern = re.compile(r'Logged in user {0} \({1}\) with password {2} and user_id {3}'.format( r'(?P<username>\S+)', r'(?P<email>[^\)]+)', r'(?P<password>\S+)', r'(?P<user_id>\d+)')) user_matches = re.match(user_pattern, response.text) if user_matches: self.user = user_matches.groupdict() return session else: msg = "Could not log in to use Studio restful API. Status code: {0}".format(response.status_code) raise StudioApiLoginError(msg) @lazy def session_cookies(self): """ Log in as a staff user, then return the cookies for the session (as a dict) Raises a `StudioApiLoginError` if the login fails. """ return {key: val for key, val in self.session.cookies.items()} @lazy def headers(self): """ Default HTTP headers dict. """ return { 'Content-type': 'application/json', 'Accept': 'application/json', 'X-CSRFToken': self.session_cookies.get('csrftoken', '') } class FixtureError(Exception): """ Error occurred while installing a course or library fixture. """ pass class XBlockContainerFixture(StudioApiFixture): """ Base class for course and library fixtures. """ def __init__(self): self.children = [] super(XBlockContainerFixture, self).__init__() def add_children(self, *args): """ Add children XBlock to the container. Each item in `args` is an `XBlockFixtureDesc` object. Returns the fixture to allow chaining. """ self.children.extend(args) return self def _create_xblock_children(self, parent_loc, xblock_descriptions): """ Recursively create XBlock children. """ for desc in xblock_descriptions: loc = self.create_xblock(parent_loc, desc) self._create_xblock_children(loc, desc.children) def create_xblock(self, parent_loc, xblock_desc): """ Create an XBlock with `parent_loc` (the location of the parent block) and `xblock_desc` (an `XBlockFixtureDesc` instance). """ create_payload = { 'category': xblock_desc.category, 'display_name': xblock_desc.display_name, } if parent_loc is not None: create_payload['parent_locator'] = parent_loc # Create the new XBlock response = self.session.post( STUDIO_BASE_URL + '/xblock/', data=json.dumps(create_payload), headers=self.headers, ) if not response.ok: msg = "Could not create {0}. Status was {1}".format(xblock_desc, response.status_code) raise FixtureError(msg) try: loc = response.json().get('locator') xblock_desc.locator = loc except ValueError: raise FixtureError("Could not decode JSON from '{0}'".format(response.content)) # Configure the XBlock response = self.session.post( STUDIO_BASE_URL + '/xblock/' + loc, data=xblock_desc.serialize(), headers=self.headers, ) if response.ok: return loc else: raise FixtureError("Could not update {0}. Status code: {1}".format(xblock_desc, response.status_code)) def _update_xblock(self, locator, data): """ Update the xblock at `locator`. """ # Create the new XBlock response = self.session.put( "{}/xblock/{}".format(STUDIO_BASE_URL, locator), data=json.dumps(data), headers=self.headers, ) if not response.ok: msg = "Could not update {} with data {}. Status was {}".format(locator, data, response.status_code) raise FixtureError(msg) def _encode_post_dict(self, post_dict): """ Encode `post_dict` (a dictionary) as UTF-8 encoded JSON. """ return json.dumps({ k: v.encode('utf-8') if isinstance(v, basestring) else v for k, v in post_dict.items() }) def get_nested_xblocks(self, category=None): """ Return a list of nested XBlocks for the container that can be filtered by category. """ xblocks = self._get_nested_xblocks(self) if category: xblocks = [x for x in xblocks if x.category == category] return xblocks def _get_nested_xblocks(self, xblock_descriptor): """ Return a list of nested XBlocks for the container. """ xblocks = list(xblock_descriptor.children) for child in xblock_descriptor.children: xblocks.extend(self._get_nested_xblocks(child)) return xblocks def _publish_xblock(self, locator): """ Publish the xblock at `locator`. """ self._update_xblock(locator, {'publish': 'make_public'})
agpl-3.0
coursemdetw/2014c2
w2/static/Brython2.0.0-20140209-164925/Lib/signal.py
743
1646
"""This module provides mechanisms to use signal handlers in Python. Functions: alarm() -- cause SIGALRM after a specified time [Unix only] setitimer() -- cause a signal (described below) after a specified float time and the timer may restart then [Unix only] getitimer() -- get current value of timer [Unix only] signal() -- set the action for a given signal getsignal() -- get the signal action for a given signal pause() -- wait until a signal arrives [Unix only] default_int_handler() -- default SIGINT handler signal constants: SIG_DFL -- used to refer to the system default handler SIG_IGN -- used to ignore the signal NSIG -- number of defined signals SIGINT, SIGTERM, etc. -- signal numbers itimer constants: ITIMER_REAL -- decrements in real time, and delivers SIGALRM upon expiration ITIMER_VIRTUAL -- decrements only when the process is executing, and delivers SIGVTALRM upon expiration ITIMER_PROF -- decrements both when the process is executing and when the system is executing on behalf of the process. Coupled with ITIMER_VIRTUAL, this timer is usually used to profile the time spent by the application in user and kernel space. SIGPROF is delivered upon expiration. *** IMPORTANT NOTICE *** A signal handler function is called with two arguments: the first is the signal number, the second is the interrupted stack frame.""" CTRL_BREAK_EVENT=1 CTRL_C_EVENT=0 NSIG=23 SIGABRT=22 SIGBREAK=21 SIGFPE=8 SIGILL=4 SIGINT=2 SIGSEGV=11 SIGTERM=15 SIG_DFL=0 SIG_IGN=1 def signal(signalnum, handler) : pass
gpl-2.0
jelly/calibre
src/calibre/utils/resources.py
1
3853
#!/usr/bin/env python2 # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai from __future__ import with_statement __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <[email protected]>' __docformat__ = 'restructuredtext en' import __builtin__, sys, os from calibre import config_dir class PathResolver(object): def __init__(self): self.locations = [sys.resources_location] self.cache = {} def suitable(path): try: return os.path.exists(path) and os.path.isdir(path) and \ os.listdir(path) except: pass return False self.default_path = sys.resources_location dev_path = os.environ.get('CALIBRE_DEVELOP_FROM', None) self.using_develop_from = False if dev_path is not None: dev_path = os.path.join(os.path.abspath( os.path.dirname(dev_path)), 'resources') if suitable(dev_path): self.locations.insert(0, dev_path) self.default_path = dev_path self.using_develop_from = True user_path = os.path.join(config_dir, 'resources') self.user_path = None if suitable(user_path): self.locations.insert(0, user_path) self.user_path = user_path def __call__(self, path, allow_user_override=True): path = path.replace(os.sep, '/') key = (path, allow_user_override) ans = self.cache.get(key, None) if ans is None: for base in self.locations: if not allow_user_override and base == self.user_path: continue fpath = os.path.join(base, *path.split('/')) if os.path.exists(fpath): ans = fpath break if ans is None: ans = os.path.join(self.default_path, *path.split('/')) self.cache[key] = ans return ans _resolver = PathResolver() def get_path(path, data=False, allow_user_override=True): fpath = _resolver(path, allow_user_override=allow_user_override) if data: with open(fpath, 'rb') as f: return f.read() return fpath def get_image_path(path, data=False, allow_user_override=True): if not path: return get_path('images', allow_user_override=allow_user_override) return get_path('images/'+path, data=data, allow_user_override=allow_user_override) def js_name_to_path(name, ext='.coffee'): path = (u'/'.join(name.split('.'))) + ext d = os.path.dirname base = d(d(os.path.abspath(__file__))) return os.path.join(base, path) def _compile_coffeescript(name): from calibre.utils.serve_coffee import compile_coffeescript src = js_name_to_path(name) with open(src, 'rb') as f: cs, errors = compile_coffeescript(f.read(), src) if errors: for line in errors: print (line) raise Exception('Failed to compile coffeescript' ': %s'%src) return cs def compiled_coffeescript(name, dynamic=False): import zipfile zipf = get_path('compiled_coffeescript.zip', allow_user_override=False) with zipfile.ZipFile(zipf, 'r') as zf: if dynamic: import json existing_hash = json.loads(zf.comment or '{}').get(name + '.js') if existing_hash is not None: import hashlib with open(js_name_to_path(name), 'rb') as f: if existing_hash == hashlib.sha1(f.read()).hexdigest(): return zf.read(name + '.js') return _compile_coffeescript(name) else: return zf.read(name+'.js') __builtin__.__dict__['P'] = get_path __builtin__.__dict__['I'] = get_image_path
gpl-3.0
midospan/profitpy
profit/session/collection.py
18
4963
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2007 Troy Melhase, Yichun Wei # Distributed under the terms of the GNU General Public License v2 # Author: Troy Melhase <[email protected]> # Yichun Wei <[email protected]> import os from cPickle import PicklingError, UnpicklingError, dump, load from PyQt4.QtCore import QObject, QThread from profit.lib import logging from profit.lib import Signals class DataCollection(QObject): sessionResendSignals = [] def __init__(self, session): QObject.__init__(self) self.session = session self.data = {} session.registerMeta(self) for signal in self.sessionResendSignals: self.connect(self, signal, session, signal) def __contains__(self, item): return item in self.data def __getitem__(self, name): return self.data[name] def __setitem__(self, name, value): self.data[name] = value def keys(self): return self.data.keys() def items(self): return self.data.items() def setdefault(self, key, default): return self.data.setdefault(key, default) class AccountCollection(DataCollection): sessionResendSignals = [Signals.createdAccountData, ] def __init__(self, session): DataCollection.__init__(self, session) self.last = {} def on_session_UpdateAccountValue(self, message): key = (message.key, message.currency, message.accountName) try: iv = float(message.value) except (ValueError, ): iv = message.value try: acctdata = self[key] except (KeyError, ): acctdata = self[key] = \ self.session.strategy.makeAccountSeries(key) self.emit(Signals.createdAccountData, key, acctdata, iv) acctdata.append(iv) self.last[key] = iv class ContractDataCollection(DataCollection): sessionResendSignals = [Signals.contract.added, ] def __setitem__(self, tickerId, contract): ## maybe enforce types? DataCollection.__setitem__(self, tickerId, contract) self.emit(Signals.contract.added, tickerId, contract) def on_session_TickPrice_TickSize(self, message): tickerId = message.tickerId if tickerId not in self: contract = self[tickerId] = self.session.strategy.makeContract(symbol='') self.emit(Signals.contract.added, tickerId, contract) class TickerCollection(DataCollection): sessionResendSignals = [Signals.createdSeries, Signals.createdTicker, ] def __init__(self, session): DataCollection.__init__(self, session) ## have to make the strategy symbols lazy somehow for tid in session.strategy.symbols().values(): self[tid] = session.strategy.makeTicker(tid) def on_session_TickPrice_TickSize(self, message): tickerId = message.tickerId try: tickerdata = self[tickerId] except (KeyError, ): tickerdata = self[tickerId] = \ self.session.strategy.makeTicker(tickerId) self.emit(Signals.createdTicker, tickerId, tickerdata) try: value = message.price except (AttributeError, ): value = message.size field = message.field try: seq = tickerdata.series[field] except (KeyError, ): seq = tickerdata.series[field] = \ self.session.strategy.makeTickerSeries(tickerId, field) self.emit(Signals.createdSeries, tickerId, field) seq.append(value) class HistoricalDataCollection(DataCollection): sessionResendSignals = [Signals.histdata.start, Signals.histdata.finish] def __init__(self, session): DataCollection.__init__(self, session) def on_session_HistoricalData(self, message): if message.date.startswith('finished'): reqId = message.reqId reqData = self.setdefault(reqId, {}) histMsgs = self.session.messagesTyped['HistoricalData'] reqData['messages'] = self.historyMessages(reqId, histMsgs) self.emit(Signals.histdata.finish, reqId) def begin(self, params): reqId = params['tickerId'] reqData = self.setdefault(reqId, {}) reqData.update(params) self.emit(Signals.histdata.start, reqId, reqData) self.session.connection.reqHistoricalData(**reqData) @staticmethod def historyMessages(reqId, msgs): return (m for m in msgs if m[1].reqId==reqId and not m[1].date.startswith('finished')) class OrderDataCollection(DataCollection): nextId = 0 def on_session_nextValidId(self, message): self.nextId = int(message.orderId) class ErrorDataCollection(DataCollection): def on_session_Error(self, message): logging.debug(str(message))
gpl-2.0
areeda/gwpy
gwpy/timeseries/io/core.py
3
4692
# -*- coding: utf-8 -*- # Copyright (C) Duncan Macleod (2018-2020) # # This file is part of GWpy. # # GWpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GWpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GWpy. If not, see <http://www.gnu.org/licenses/>. """Basic I/O routines for :mod:`gwpy.timeseries` """ from ...io import cache as io_cache from ...io.mp import read_multi as io_read_multi def read(cls, source, *args, **kwargs): """Read data from a source into a `gwpy.timeseries` object. This method is just the internal worker for `TimeSeries.read`, and `TimeSeriesDict.read`, and isn't meant to be called directly. """ # if reading a cache, read it now and sieve if io_cache.is_cache(source): from .cache import preformat_cache source = preformat_cache(source, *args[1:], start=kwargs.get('start'), end=kwargs.get('end')) # get join arguments pad = kwargs.pop('pad', None) gap = kwargs.pop('gap', 'raise' if pad is None else 'pad') joiner = _join_factory( cls, gap, pad, kwargs.get("start", None), kwargs.get("end", None), ) # read return io_read_multi(joiner, cls, source, *args, **kwargs) def _join_factory(cls, gap, pad, start, end): """Build a joiner for the given cls, and the given padding options """ if issubclass(cls, dict): def _join(data): out = cls() data = list(data) while data: tsd = data.pop(0) out.append(tsd, gap=gap, pad=pad) del tsd if gap in ("pad", "raise"): for key in out: out[key] = _pad_series( out[key], pad, start, end, error=(gap == "raise"), ) return out else: from .. import TimeSeriesBaseList def _join(arrays): list_ = TimeSeriesBaseList(*arrays) joined = list_.join(pad=pad, gap=gap) if gap in ("pad", "raise"): return _pad_series( joined, pad, start, end, error=(gap == "raise"), ) return joined return _join def _pad_series(ts, pad, start=None, end=None, error=False): """Pad a timeseries to match the specified [start, end) limits To cover a gap in data returned from a data source. Parameters ---------- ts : `gwpy.types.Series` the input series pad : `float`, `astropy.units.Quantity` the value with which to pad start : `float`, `astropy.units.Quantity`, optional the desired start point of the X-axis, defaults to the start point of the incoming series end : `float`, `astropy.units.Quantity`, optional the desired end point of the X-axis, defaults to the end point of the incoming series error : `bool`, optional raise `ValueError` when gaps are present, rather than padding anything Returns ------- series : instance of incoming series type a padded version of the series. This may be the same object if not padding is needed. Raises ------ ValueError if `error=True` is given and padding would have been required to match the request. """ span = ts.span if start is None: start = span[0] if end is None: end = span[1] pada = max(int((span[0] - start) * ts.sample_rate.value), 0) padb = max(int((end - span[1]) * ts.sample_rate.value), 0) if not (pada or padb): # if noop, just return the input return ts if error: # if error, bail out now raise ValueError( "{} with span {} does not cover requested interval {}".format( type(ts).__name__, span, type(span)(start, end), ) ) # otherwise applying the padding return ts.pad((pada, padb), mode='constant', constant_values=(pad,))
gpl-3.0
andmos/ansible
test/units/modules/network/ingate/test_ig_config.py
50
8319
# -*- coding: utf-8 -*- # Copyright (c) 2018, Ingate Systems AB # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from units.compat.mock import patch from ansible.modules.network.ingate import ig_config from units.modules.utils import set_module_args from .ingate_module import TestIngateModule, load_fixture class TestConfigModule(TestIngateModule): module = ig_config def setUp(self): super(TestConfigModule, self).setUp() self.mock_make_request = patch('ansible.modules.network.ingate.' 'ig_config.make_request') self.make_request = self.mock_make_request.start() # ATM the Ingate Python SDK is not needed in this unit test. self.module.HAS_INGATESDK = True def tearDown(self): super(TestConfigModule, self).tearDown() self.mock_make_request.stop() def load_fixtures(self, fixture=None, command=None, changed=False): self.make_request.side_effect = [(changed, command, load_fixture(fixture))] def test_ig_config_add(self): """Test adding a row to a table. """ command = 'add' set_module_args(dict( client=dict( version='v1', address='127.0.0.1', scheme='http', username='alice', password='foobar' ), add=True, table='misc.dns_servers', columns=dict( server='192.168.1.23' ))) fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0], command, 'json') result = self.execute_module(changed=True, fixture=fixture, command=command) self.assertTrue(command in result) def test_ig_config_delete(self): """Test deleting all rows in a table. """ command = 'delete' set_module_args(dict( client=dict( version='v1', address='127.0.0.1', scheme='http', username='alice', password='foobar' ), delete=True, table='misc.dns_servers', )) fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0], command, 'json') result = self.execute_module(changed=True, fixture=fixture, command=command) self.assertTrue(command in result) def test_ig_config_get(self): """Test returning all rows in a table. """ command = 'get' set_module_args(dict( client=dict( version='v1', address='127.0.0.1', scheme='http', username='alice', password='foobar' ), get=True, table='misc.dns_servers', )) fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0], command, 'json') result = self.execute_module(changed=True, fixture=fixture, command=command) self.assertTrue(command in result) def test_ig_config_modify(self): """Test modifying a row. """ command = 'modify' set_module_args(dict( client=dict( version='v1', address='127.0.0.1', scheme='http', username='alice', password='foobar' ), modify=True, table='misc.unitname', columns=dict( unitname='"Testapi - 1541699806"' ))) fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0], command, 'json') result = self.execute_module(changed=True, fixture=fixture, command=command) self.assertTrue(command in result) def test_ig_config_revert(self): """Test reverting the preliminary configuration. """ command = 'revert' set_module_args(dict( client=dict( version='v1', address='127.0.0.1', scheme='http', username='alice', password='foobar' ), revert=True )) fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0], command, 'json') result = self.execute_module(changed=True, fixture=fixture, command=command) self.assertTrue(command in result) def test_ig_config_factory(self): """Test loading factory defaults. """ command = 'factory' set_module_args(dict( client=dict( version='v1', address='127.0.0.1', scheme='http', username='alice', password='foobar' ), factory=True )) fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0], command, 'json') result = self.execute_module(changed=True, fixture=fixture, command=command) self.assertTrue(command in result) def test_ig_config_store(self): """Test storing the preliminary configuration. """ command = 'store' set_module_args(dict( client=dict( version='v1', address='127.0.0.1', scheme='http', username='alice', password='foobar' ), store=True )) fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0], command, 'json') result = self.execute_module(changed=True, fixture=fixture, command=command) self.assertTrue(command in result) def test_ig_config_download(self): """Test doing backup of configuration database. """ command = 'store' set_module_args(dict( client=dict( version='v1', address='127.0.0.1', scheme='http', username='alice', password='foobar' ), download=True )) fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0], command, 'json') result = self.execute_module(changed=True, fixture=fixture, command=command) self.assertTrue(command in result) def test_ig_config_return_rowid(self): """Test retrieving a row id. """ command = 'return_rowid' set_module_args(dict( client=dict( version='v1', address='127.0.0.1', scheme='http', username='alice', password='foobar' ), return_rowid=True, table='network.local_nets', columns=dict( interface='eth0' ))) fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0], command, 'json') result = self.execute_module(changed=True, fixture=fixture, command=command) self.assertTrue(command in result)
gpl-3.0
goldsborough/.emacs
.emacs.d/.python-environments/default/lib/python3.5/site-packages/setuptools/command/install_scripts.py
505
2231
from distutils import log import distutils.command.install_scripts as orig import os from pkg_resources import Distribution, PathMetadata, ensure_directory class install_scripts(orig.install_scripts): """Do normal script install, plus any egg_info wrapper scripts""" def initialize_options(self): orig.install_scripts.initialize_options(self) self.no_ep = False def run(self): import setuptools.command.easy_install as ei self.run_command("egg_info") if self.distribution.scripts: orig.install_scripts.run(self) # run first to set up self.outfiles else: self.outfiles = [] if self.no_ep: # don't install entry point scripts into .egg file! return ei_cmd = self.get_finalized_command("egg_info") dist = Distribution( ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info), ei_cmd.egg_name, ei_cmd.egg_version, ) bs_cmd = self.get_finalized_command('build_scripts') exec_param = getattr(bs_cmd, 'executable', None) bw_cmd = self.get_finalized_command("bdist_wininst") is_wininst = getattr(bw_cmd, '_is_running', False) writer = ei.ScriptWriter if is_wininst: exec_param = "python.exe" writer = ei.WindowsScriptWriter # resolve the writer to the environment writer = writer.best() cmd = writer.command_spec_class.best().from_param(exec_param) for args in writer.get_args(dist, cmd.as_header()): self.write_script(*args) def write_script(self, script_name, contents, mode="t", *ignored): """Write an executable file to the scripts directory""" from setuptools.command.easy_install import chmod, current_umask log.info("Installing %s script to %s", script_name, self.install_dir) target = os.path.join(self.install_dir, script_name) self.outfiles.append(target) mask = current_umask() if not self.dry_run: ensure_directory(target) f = open(target, "w" + mode) f.write(contents) f.close() chmod(target, 0o777 - mask)
mit
sarvex/django
django/test/utils.py
14
20974
import logging import re import sys import time import warnings from contextlib import contextmanager from functools import wraps from unittest import skipIf, skipUnless from xml.dom.minidom import Node, parseString from django.apps import apps from django.conf import UserSettingsHolder, settings from django.core import mail from django.core.signals import request_started from django.core.urlresolvers import get_script_prefix, set_script_prefix from django.db import reset_queries from django.http import request from django.template import Template from django.test.signals import setting_changed, template_rendered from django.utils import six from django.utils.decorators import ContextDecorator from django.utils.encoding import force_str from django.utils.translation import deactivate try: import jinja2 except ImportError: jinja2 = None __all__ = ( 'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner', 'modify_settings', 'override_settings', 'requires_tz_support', 'setup_test_environment', 'teardown_test_environment', ) TZ_SUPPORT = hasattr(time, 'tzset') class Approximate(object): def __init__(self, val, places=7): self.val = val self.places = places def __repr__(self): return repr(self.val) def __eq__(self, other): if self.val == other: return True return round(abs(self.val - other), self.places) == 0 class ContextList(list): """A wrapper that provides direct key access to context items contained in a list of context objects. """ def __getitem__(self, key): if isinstance(key, six.string_types): for subcontext in self: if key in subcontext: return subcontext[key] raise KeyError(key) else: return super(ContextList, self).__getitem__(key) def __contains__(self, key): try: self[key] except KeyError: return False return True def keys(self): """ Flattened keys of subcontexts. """ keys = set() for subcontext in self: for dict in subcontext: keys |= set(dict.keys()) return keys def instrumented_test_render(self, context): """ An instrumented Template render method, providing a signal that can be intercepted by the test system Client """ template_rendered.send(sender=self, template=self, context=context) return self.nodelist.render(context) def setup_test_environment(): """Perform any global pre-test setup. This involves: - Installing the instrumented test renderer - Set the email backend to the locmem email backend. - Setting the active locale to match the LANGUAGE_CODE setting. """ Template._original_render = Template._render Template._render = instrumented_test_render # Storing previous values in the settings module itself is problematic. # Store them in arbitrary (but related) modules instead. See #20636. mail._original_email_backend = settings.EMAIL_BACKEND settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend' request._original_allowed_hosts = settings.ALLOWED_HOSTS settings.ALLOWED_HOSTS = ['*'] mail.outbox = [] deactivate() def teardown_test_environment(): """Perform any global post-test teardown. This involves: - Restoring the original test renderer - Restoring the email sending functions """ Template._render = Template._original_render del Template._original_render settings.EMAIL_BACKEND = mail._original_email_backend del mail._original_email_backend settings.ALLOWED_HOSTS = request._original_allowed_hosts del request._original_allowed_hosts del mail.outbox def get_runner(settings, test_runner_class=None): if not test_runner_class: test_runner_class = settings.TEST_RUNNER test_path = test_runner_class.split('.') # Allow for Python 2.5 relative paths if len(test_path) > 1: test_module_name = '.'.join(test_path[:-1]) else: test_module_name = '.' test_module = __import__(test_module_name, {}, {}, force_str(test_path[-1])) test_runner = getattr(test_module, test_path[-1]) return test_runner class override_settings(object): """ Acts as either a decorator, or a context manager. If it's a decorator it takes a function and returns a wrapped function. If it's a contextmanager it's used with the ``with`` statement. In either event entering/exiting are called before and after, respectively, the function/block is executed. """ def __init__(self, **kwargs): self.options = kwargs def __enter__(self): self.enable() def __exit__(self, exc_type, exc_value, traceback): self.disable() def __call__(self, test_func): from django.test import SimpleTestCase if isinstance(test_func, type): if not issubclass(test_func, SimpleTestCase): raise Exception( "Only subclasses of Django SimpleTestCase can be decorated " "with override_settings") self.save_options(test_func) return test_func else: @wraps(test_func) def inner(*args, **kwargs): with self: return test_func(*args, **kwargs) return inner def save_options(self, test_func): if test_func._overridden_settings is None: test_func._overridden_settings = self.options else: # Duplicate dict to prevent subclasses from altering their parent. test_func._overridden_settings = dict( test_func._overridden_settings, **self.options) def enable(self): # Keep this code at the beginning to leave the settings unchanged # in case it raises an exception because INSTALLED_APPS is invalid. if 'INSTALLED_APPS' in self.options: try: apps.set_installed_apps(self.options['INSTALLED_APPS']) except Exception: apps.unset_installed_apps() raise override = UserSettingsHolder(settings._wrapped) for key, new_value in self.options.items(): setattr(override, key, new_value) self.wrapped = settings._wrapped settings._wrapped = override for key, new_value in self.options.items(): setting_changed.send(sender=settings._wrapped.__class__, setting=key, value=new_value, enter=True) def disable(self): if 'INSTALLED_APPS' in self.options: apps.unset_installed_apps() settings._wrapped = self.wrapped del self.wrapped for key in self.options: new_value = getattr(settings, key, None) setting_changed.send(sender=settings._wrapped.__class__, setting=key, value=new_value, enter=False) class modify_settings(override_settings): """ Like override_settings, but makes it possible to append, prepend or remove items instead of redefining the entire list. """ def __init__(self, *args, **kwargs): if args: # Hack used when instantiating from SimpleTestCase.setUpClass. assert not kwargs self.operations = args[0] else: assert not args self.operations = list(kwargs.items()) def save_options(self, test_func): if test_func._modified_settings is None: test_func._modified_settings = self.operations else: # Duplicate list to prevent subclasses from altering their parent. test_func._modified_settings = list( test_func._modified_settings) + self.operations def enable(self): self.options = {} for name, operations in self.operations: try: # When called from SimpleTestCase.setUpClass, values may be # overridden several times; cumulate changes. value = self.options[name] except KeyError: value = list(getattr(settings, name, [])) for action, items in operations.items(): # items my be a single value or an iterable. if isinstance(items, six.string_types): items = [items] if action == 'append': value = value + [item for item in items if item not in value] elif action == 'prepend': value = [item for item in items if item not in value] + value elif action == 'remove': value = [item for item in value if item not in items] else: raise ValueError("Unsupported action: %s" % action) self.options[name] = value super(modify_settings, self).enable() def override_system_checks(new_checks, deployment_checks=None): """ Acts as a decorator. Overrides list of registered system checks. Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app, you also need to exclude its system checks. """ from django.core.checks.registry import registry def outer(test_func): @wraps(test_func) def inner(*args, **kwargs): old_checks = registry.registered_checks registry.registered_checks = new_checks old_deployment_checks = registry.deployment_checks if deployment_checks is not None: registry.deployment_checks = deployment_checks try: return test_func(*args, **kwargs) finally: registry.registered_checks = old_checks registry.deployment_checks = old_deployment_checks return inner return outer def compare_xml(want, got): """Tries to do a 'xml-comparison' of want and got. Plain string comparison doesn't always work because, for example, attribute ordering should not be important. Comment nodes are not considered in the comparison. Leading and trailing whitespace is ignored on both chunks. Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py """ _norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+') def norm_whitespace(v): return _norm_whitespace_re.sub(' ', v) def child_text(element): return ''.join(c.data for c in element.childNodes if c.nodeType == Node.TEXT_NODE) def children(element): return [c for c in element.childNodes if c.nodeType == Node.ELEMENT_NODE] def norm_child_text(element): return norm_whitespace(child_text(element)) def attrs_dict(element): return dict(element.attributes.items()) def check_element(want_element, got_element): if want_element.tagName != got_element.tagName: return False if norm_child_text(want_element) != norm_child_text(got_element): return False if attrs_dict(want_element) != attrs_dict(got_element): return False want_children = children(want_element) got_children = children(got_element) if len(want_children) != len(got_children): return False for want, got in zip(want_children, got_children): if not check_element(want, got): return False return True def first_node(document): for node in document.childNodes: if node.nodeType != Node.COMMENT_NODE: return node want, got = strip_quotes(want, got) want = want.strip().replace('\\n', '\n') got = got.strip().replace('\\n', '\n') # If the string is not a complete xml document, we may need to add a # root element. This allow us to compare fragments, like "<foo/><bar/>" if not want.startswith('<?xml'): wrapper = '<root>%s</root>' want = wrapper % want got = wrapper % got # Parse the want and got strings, and compare the parsings. want_root = first_node(parseString(want)) got_root = first_node(parseString(got)) return check_element(want_root, got_root) def strip_quotes(want, got): """ Strip quotes of doctests output values: >>> strip_quotes("'foo'") "foo" >>> strip_quotes('"foo"') "foo" """ def is_quoted_string(s): s = s.strip() return (len(s) >= 2 and s[0] == s[-1] and s[0] in ('"', "'")) def is_quoted_unicode(s): s = s.strip() return (len(s) >= 3 and s[0] == 'u' and s[1] == s[-1] and s[1] in ('"', "'")) if is_quoted_string(want) and is_quoted_string(got): want = want.strip()[1:-1] got = got.strip()[1:-1] elif is_quoted_unicode(want) and is_quoted_unicode(got): want = want.strip()[2:-1] got = got.strip()[2:-1] return want, got def str_prefix(s): return s % {'_': '' if six.PY3 else 'u'} class CaptureQueriesContext(object): """ Context manager that captures queries executed by the specified connection. """ def __init__(self, connection): self.connection = connection def __iter__(self): return iter(self.captured_queries) def __getitem__(self, index): return self.captured_queries[index] def __len__(self): return len(self.captured_queries) @property def captured_queries(self): return self.connection.queries[self.initial_queries:self.final_queries] def __enter__(self): self.force_debug_cursor = self.connection.force_debug_cursor self.connection.force_debug_cursor = True self.initial_queries = len(self.connection.queries_log) self.final_queries = None request_started.disconnect(reset_queries) return self def __exit__(self, exc_type, exc_value, traceback): self.connection.force_debug_cursor = self.force_debug_cursor request_started.connect(reset_queries) if exc_type is not None: return self.final_queries = len(self.connection.queries_log) class ignore_warnings(object): def __init__(self, **kwargs): self.ignore_kwargs = kwargs if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs: self.filter_func = warnings.filterwarnings else: self.filter_func = warnings.simplefilter def __call__(self, decorated): if isinstance(decorated, type): # A class is decorated saved_setUp = decorated.setUp saved_tearDown = decorated.tearDown def setUp(inner_self): self.catch_warnings = warnings.catch_warnings() self.catch_warnings.__enter__() self.filter_func('ignore', **self.ignore_kwargs) saved_setUp(inner_self) def tearDown(inner_self): saved_tearDown(inner_self) self.catch_warnings.__exit__(*sys.exc_info()) decorated.setUp = setUp decorated.tearDown = tearDown return decorated else: @wraps(decorated) def inner(*args, **kwargs): with warnings.catch_warnings(): self.filter_func('ignore', **self.ignore_kwargs) return decorated(*args, **kwargs) return inner @contextmanager def patch_logger(logger_name, log_level): """ Context manager that takes a named logger and the logging level and provides a simple mock-like list of messages received """ calls = [] def replacement(msg, *args, **kwargs): calls.append(msg % args) logger = logging.getLogger(logger_name) orig = getattr(logger, log_level) setattr(logger, log_level, replacement) try: yield calls finally: setattr(logger, log_level, orig) # On OSes that don't provide tzset (Windows), we can't set the timezone # in which the program runs. As a consequence, we must skip tests that # don't enforce a specific timezone (with timezone.override or equivalent), # or attempt to interpret naive datetimes in the default timezone. requires_tz_support = skipUnless(TZ_SUPPORT, "This test relies on the ability to run a program in an arbitrary " "time zone, but your operating system isn't able to do that.") @contextmanager def extend_sys_path(*paths): """Context manager to temporarily add paths to sys.path.""" _orig_sys_path = sys.path[:] sys.path.extend(paths) try: yield finally: sys.path = _orig_sys_path @contextmanager def isolate_lru_cache(lru_cache_object): """Clear the cache of an LRU cache object on entering and exiting.""" lru_cache_object.cache_clear() try: yield finally: lru_cache_object.cache_clear() @contextmanager def captured_output(stream_name): """Return a context manager used by captured_stdout/stdin/stderr that temporarily replaces the sys stream *stream_name* with a StringIO. Note: This function and the following ``captured_std*`` are copied from CPython's ``test.support`` module.""" orig_stdout = getattr(sys, stream_name) setattr(sys, stream_name, six.StringIO()) try: yield getattr(sys, stream_name) finally: setattr(sys, stream_name, orig_stdout) def captured_stdout(): """Capture the output of sys.stdout: with captured_stdout() as stdout: print("hello") self.assertEqual(stdout.getvalue(), "hello\n") """ return captured_output("stdout") def captured_stderr(): """Capture the output of sys.stderr: with captured_stderr() as stderr: print("hello", file=sys.stderr) self.assertEqual(stderr.getvalue(), "hello\n") """ return captured_output("stderr") def captured_stdin(): """Capture the input to sys.stdin: with captured_stdin() as stdin: stdin.write('hello\n') stdin.seek(0) # call test code that consumes from sys.stdin captured = input() self.assertEqual(captured, "hello") """ return captured_output("stdin") def reset_warning_registry(): """ Clear warning registry for all modules. This is required in some tests because of a bug in Python that prevents warnings.simplefilter("always") from always making warnings appear: http://bugs.python.org/issue4180 The bug was fixed in Python 3.4.2. """ key = "__warningregistry__" for mod in sys.modules.values(): if hasattr(mod, key): getattr(mod, key).clear() @contextmanager def freeze_time(t): """ Context manager to temporarily freeze time.time(). This temporarily modifies the time function of the time module. Modules which import the time function directly (e.g. `from time import time`) won't be affected This isn't meant as a public API, but helps reduce some repetitive code in Django's test suite. """ _real_time = time.time time.time = lambda: t try: yield finally: time.time = _real_time def require_jinja2(test_func): """ Decorator to enable a Jinja2 template engine in addition to the regular Django template engine for a test or skip it if Jinja2 isn't available. """ test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func) test_func = override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, }, { 'BACKEND': 'django.template.backends.jinja2.Jinja2', 'APP_DIRS': True, 'OPTIONS': {'keep_trailing_newline': True}, }])(test_func) return test_func class ScriptPrefix(ContextDecorator): def __enter__(self): set_script_prefix(self.prefix) def __exit__(self, exc_type, exc_val, traceback): set_script_prefix(self.old_prefix) def __init__(self, prefix): self.prefix = prefix self.old_prefix = get_script_prefix() def override_script_prefix(prefix): """ Decorator or context manager to temporary override the script prefix. """ return ScriptPrefix(prefix) class LoggingCaptureMixin(object): """ Capture the output from the 'django' logger and store it on the class's logger_output attribute. """ def setUp(self): self.logger = logging.getLogger('django') self.old_stream = self.logger.handlers[0].stream self.logger_output = six.StringIO() self.logger.handlers[0].stream = self.logger_output def tearDown(self): self.logger.handlers[0].stream = self.old_stream
bsd-3-clause
molgun/oclapi
django-nonrel/ocl/mappings/views.py
4
15934
from django.core.exceptions import ValidationError from django.db.models import Q from django.http import HttpResponse from rest_framework import mixins, status from rest_framework.generics import RetrieveAPIView, ListAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView from rest_framework.response import Response from concepts.permissions import CanEditParentDictionary, CanViewParentDictionary from mappings.filters import PublicMappingsSearchFilter, SourceRestrictedMappingsFilter, CollectionRestrictedMappingFilter from mappings.models import Mapping, MappingVersion from mappings.serializers import MappingCreateSerializer, MappingUpdateSerializer, MappingDetailSerializer, MappingListSerializer, \ MappingVersionDetailSerializer, MappingVersionListSerializer from oclapi.mixins import ListWithHeadersMixin from oclapi.models import ACCESS_TYPE_NONE from oclapi.views import ConceptDictionaryMixin, BaseAPIView, parse_updated_since_param, VersionedResourceChildMixin from sources.models import SourceVersion from orgs.models import Organization from users.models import UserProfile INCLUDE_RETIRED_PARAM = 'includeRetired' LIMIT_PARAM = 'limit' class MappingBaseView(ConceptDictionaryMixin): lookup_field = 'mapping' pk_field = 'id' model = Mapping child_list_attribute = 'mappings' include_retired = False permission_classes = (CanViewParentDictionary,) def initialize(self, request, path_info_segment, **kwargs): super(MappingBaseView, self).initialize(request, path_info_segment, **kwargs) if self.parent_resource: if hasattr(self.parent_resource, 'versioned_object'): self.parent_resource_version = self.parent_resource self.parent_resource = self.parent_resource.versioned_object else: self.parent_resource_version = self.parent_resource.get_head() def get_queryset(self): queryset = super(ConceptDictionaryMixin, self).get_queryset() owner_is_self = self.parent_resource and self.userprofile and self.parent_resource.owner == self.userprofile if self.parent_resource: queryset = queryset.filter(parent_id=self.parent_resource.id) if not(self.user.is_staff or owner_is_self): queryset = queryset.filter(~Q(public_access=ACCESS_TYPE_NONE)) return queryset class MappingVersionCsvMixin: def get_csv_rows(self, queryset=None): if not queryset: queryset = self.get_queryset() values = queryset.values('map_type','versioned_object_id','uri') for value in values: mapping = Mapping.objects.get(id=value.pop('versioned_object_id')) value['From Concept Owner'] = mapping.from_source_owner value['From Concept Source'] = mapping.from_source_name value['From Concept Code'] = mapping.from_concept_code value['From Concept Name'] = mapping.from_concept_name value['Map Type'] = value.pop('map_type') value['To Concept Owner'] = mapping.to_source_owner value['To Concept Source'] = mapping.to_source_name value['To Concept Code'] = mapping.get_to_concept_code() value['To Concept Name'] = mapping.get_to_concept_name() value['Internal/External'] = 'Internal' if mapping.to_concept_url else 'External' value['Retired'] = mapping.retired value['External ID'] = mapping.external_id value['Last Updated'] = mapping.updated_at value['Updated By'] = mapping.updated_by value['Mapping Owner'] = mapping.owner value['Mapping Source'] = mapping.source value['URI'] = value.pop('uri') values.field_names.extend(['From Concept Owner','From Concept Source','From Concept Code','From Concept Name','Map Type','To Concept Owner', 'To Concept Source','To Concept Code','To Concept Name','Internal/External','Retired','External ID','Last Updated','Updated By','Mapping Owner','Mapping Source','URI']) del values.field_names[0:3] return values class MappingVersionBaseView(ConceptDictionaryMixin): lookup_field = 'mapping_version' model = MappingVersion include_retired = False permission_classes = (CanViewParentDictionary,) queryset = MappingVersion.objects.filter(is_active=True) def initialize(self, request, path_info_segment, **kwargs): super(MappingVersionBaseView, self).initialize(request, path_info_segment, **kwargs) def get_queryset(self): queryset = MappingVersion.objects.filter(is_active=True, versioned_object_id=self.kwargs.get('mapping')) return queryset class MappingDetailView(MappingBaseView, RetrieveAPIView, UpdateAPIView, DestroyAPIView): serializer_class = MappingDetailSerializer def destroy(self, request, *args, **kwargs): self.permission_classes = (CanEditParentDictionary,) mapping = self.get_object_or_none() if mapping is None: return Response( {'non_field_errors': 'Could not find mapping to retire'}, status=status.HTTP_404_NOT_FOUND) update_comment = None if 'update_comment' in request.DATA: update_comment = request.DATA.get('update_comment') errors = Mapping.retire(mapping, request.user, update_comment=update_comment) if errors: return Response(errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_204_NO_CONTENT) def update(self, request, *args, **kwargs): self.permission_classes = (CanEditParentDictionary,) self.serializer_class = MappingUpdateSerializer partial = True self.object = self.get_object() created = False save_kwargs = {'force_update': True} if 'update_comment' in request.DATA: save_kwargs = {'force_update':True, 'update_comment': request.DATA.get('update_comment')} else: save_kwargs = {'force_update': True} success_status_code = status.HTTP_200_OK serializer = self.get_serializer(self.object, data=request.DATA, files=request.FILES, partial=partial) if serializer.is_valid(): try: self.pre_save(serializer.object) except ValidationError as e: return Response(e.messages, status=status.HTTP_400_BAD_REQUEST) self.object = serializer.save(**save_kwargs) self.post_save(self.object, created=created) serializer = MappingDetailSerializer(self.object, context={'request': request}) return Response(serializer.data, status=success_status_code) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class MappingVersionMixin(): lookup_field = 'mapping_version' pk_field = 'mnemonic' model = MappingVersion parent_resource_version_model = SourceVersion permission_classes = (CanViewParentDictionary,) child_list_attribute = 'mappings' class MappingVersionsListView(MappingVersionMixin, VersionedResourceChildMixin, ListWithHeadersMixin, MappingVersionCsvMixin): serializer_class = MappingVersionListSerializer solr_fields = { 'lastUpdate': {'sortable': True, 'filterable': False, 'facet': False}, 'concept': {'sortable': False, 'filterable': True, 'facet': False}, 'fromConcept': {'sortable': False, 'filterable': True, 'facet': False}, 'toConcept': {'sortable': False, 'filterable': True, 'facet': False}, 'retired': {'sortable': False, 'filterable': True, 'facet': True}, 'mapType': {'sortable': False, 'filterable': True, 'facet': True}, 'source': {'sortable': False, 'filterable': True, 'facet': True}, 'collection': {'sortable': False, 'filterable': True, 'facet': True}, 'owner': {'sortable': False, 'filterable': True, 'facet': True}, 'ownerType': {'sortable': False, 'filterable': True, 'facet': True}, 'conceptSource': {'sortable': False, 'filterable': True, 'facet': True}, 'fromConceptSource': {'sortable': False, 'filterable': True, 'facet': True}, 'toConceptSource': {'sortable': False, 'filterable': True, 'facet': True}, 'conceptOwner': {'sortable': False, 'filterable': True, 'facet': True}, 'fromConceptOwner': {'sortable': False, 'filterable': True, 'facet': True}, 'toConceptOwner': {'sortable': False, 'filterable': True, 'facet': True}, 'conceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True}, 'fromConceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True}, 'toConceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True}, } def get(self, request, *args, **kwargs): self.filter_backends = [CollectionRestrictedMappingFilter] if 'collection' in kwargs else [SourceRestrictedMappingsFilter] self.include_retired = request.QUERY_PARAMS.get(INCLUDE_RETIRED_PARAM, False) self.updated_since = parse_updated_since_param(request) return self.list(request, *args, **kwargs) def get_queryset(self): if ('collection' in self.kwargs and 'version' not in self.kwargs) or ('collection' in self.kwargs and 'version' in self.kwargs and self.kwargs['version'] == 'HEAD'): all_children = getattr(self.parent_resource_version, self.child_list_attribute) or [] queryset = super(ConceptDictionaryMixin, self).get_queryset() queryset = queryset.filter(versioned_object_id__in=all_children, is_latest_version=True) else: queryset = super(MappingVersionsListView, self).get_queryset() queryset = queryset.filter(is_active=True) if not self.include_retired: queryset = queryset.filter(~Q(retired=True)) if self.updated_since: queryset = queryset.filter(updated_at__gte=self.updated_since) return queryset def get_owner(self): owner = None if 'user' in self.kwargs: owner_id = self.kwargs['user'] owner = UserProfile.objects.get(mnemonic=owner_id) elif 'org' in self.kwargs: owner_id = self.kwargs['org'] owner = Organization.objects.get(mnemonic=owner_id) return owner class MappingVersionsView(ConceptDictionaryMixin, ListWithHeadersMixin): serializer_class = MappingVersionListSerializer permission_classes = (CanViewParentDictionary,) def get(self, request, *args, **kwargs): self.serializer_class = MappingVersionDetailSerializer return self.list(request, *args, **kwargs) def get_queryset(self): return MappingVersion.objects.filter(versioned_object_id=self.parent_resource.id, is_active=True) class MappingVersionDetailView(MappingVersionBaseView, RetrieveAPIView): serializer_class = MappingVersionDetailSerializer def initialize(self, request, path_info_segment, **kwargs): super(MappingVersionDetailView, self).initialize(request, path_info_segment, **kwargs) def get_level(self): return 1 class MappingListView(MappingBaseView, ListAPIView, CreateAPIView, ListWithHeadersMixin, mixins.CreateModelMixin): queryset = Mapping.objects.filter(is_active=True) serializer_class = MappingCreateSerializer def get(self, request, *args, **kwargs): delegate_view = MappingVersionsListView.as_view() return delegate_view(request, *args, **kwargs) def create(self, request, *args, **kwargs): self.permission_classes = (CanEditParentDictionary,) if not self.parent_resource: return HttpResponse(status=status.HTTP_405_METHOD_NOT_ALLOWED) serializer = self.get_serializer(data=request.DATA, files=request.FILES) if serializer.is_valid(): self.pre_save(serializer.object) save_kwargs = { 'force_insert': True, 'parent_resource': self.parent_resource, } self.object = serializer.save(**save_kwargs) if serializer.is_valid(): self.post_save(self.object, created=True) headers = self.get_success_headers(serializer.data) serializer = MappingDetailSerializer(self.object, context={'request': request}) return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) return Response({'errors' : (('' if k == '__all__' else k +' : ')+ v[0]) for k, v in serializer.errors.items()}, status=status.HTTP_400_BAD_REQUEST) def get_queryset(self): queryset = super(ConceptDictionaryMixin, self).get_queryset() if not self.include_retired: queryset = queryset.filter(~Q(retired=True)) return queryset def get_owner(self): owner = None if 'user' in self.kwargs: owner_id = self.kwargs['user'] owner = UserProfile.objects.get(mnemonic=owner_id) elif 'org' in self.kwargs: owner_id = self.kwargs['org'] owner = Organization.objects.get(mnemonic=owner_id) return owner class MappingListAllView(BaseAPIView, ListWithHeadersMixin, MappingVersionCsvMixin): model = MappingVersion filter_backends = [PublicMappingsSearchFilter,] permission_classes = (CanEditParentDictionary,) queryset = MappingVersion.objects.filter(is_active=True) solr_fields = { 'lastUpdate': {'sortable': True, 'filterable': False, 'facet': False}, 'concept': {'sortable': False, 'filterable': True, 'facet': False}, 'fromConcept': {'sortable': False, 'filterable': True, 'facet': False}, 'toConcept': {'sortable': False, 'filterable': True, 'facet': False}, 'retired': {'sortable': False, 'filterable': True, 'facet': True}, 'mapType': {'sortable': False, 'filterable': True, 'facet': True}, 'source': {'sortable': False, 'filterable': True, 'facet': True}, 'owner': {'sortable': False, 'filterable': True, 'facet': True}, 'ownerType': {'sortable': False, 'filterable': True, 'facet': True}, 'conceptSource': {'sortable': False, 'filterable': True, 'facet': True}, 'fromConceptSource': {'sortable': False, 'filterable': True, 'facet': True}, 'toConceptSource': {'sortable': False, 'filterable': True, 'facet': True}, 'conceptOwner': {'sortable': False, 'filterable': True, 'facet': True}, 'fromConceptOwner': {'sortable': False, 'filterable': True, 'facet': True}, 'toConceptOwner': {'sortable': False, 'filterable': True, 'facet': True}, 'conceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True}, 'fromConceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True}, 'toConceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True}, } include_retired = False default_filters = {'is_active': True, 'is_latest_version': True} def get(self, request, *args, **kwargs): self.include_retired = request.QUERY_PARAMS.get(INCLUDE_RETIRED_PARAM, False) self.serializer_class = MappingVersionDetailSerializer if self.is_verbose(request) else MappingVersionListSerializer self.limit = request.QUERY_PARAMS.get(LIMIT_PARAM, 25) return self.list(request, *args, **kwargs) def get_queryset(self): queryset = super(MappingListAllView, self).get_queryset() if not self.include_retired: queryset = queryset.filter(~Q(retired=True)) if not self.request.user.is_staff: queryset = queryset.filter(~Q(public_access=ACCESS_TYPE_NONE)) return queryset[0:self.limit]
mpl-2.0
nicky-ji/edx-nicky
lms/lib/comment_client/models.py
27
5994
import logging from .utils import extract, perform_request, CommentClientRequestError log = logging.getLogger(__name__) class Model(object): accessible_fields = ['id'] updatable_fields = ['id'] initializable_fields = ['id'] base_url = None default_retrieve_params = {} metric_tag_fields = [] DEFAULT_ACTIONS_WITH_ID = ['get', 'put', 'delete'] DEFAULT_ACTIONS_WITHOUT_ID = ['get_all', 'post'] DEFAULT_ACTIONS = DEFAULT_ACTIONS_WITH_ID + DEFAULT_ACTIONS_WITHOUT_ID def __init__(self, *args, **kwargs): self.attributes = extract(kwargs, self.accessible_fields) self.retrieved = False def __getattr__(self, name): if name == 'id': return self.attributes.get('id', None) try: return self.attributes[name] except KeyError: if self.retrieved or self.id is None: raise AttributeError("Field {0} does not exist".format(name)) self.retrieve() return self.__getattr__(name) def __setattr__(self, name, value): if name == 'attributes' or name not in self.accessible_fields: super(Model, self).__setattr__(name, value) else: self.attributes[name] = value def __getitem__(self, key): if key not in self.accessible_fields: raise KeyError("Field {0} does not exist".format(key)) return self.attributes.get(key) def __setitem__(self, key, value): if key not in self.accessible_fields: raise KeyError("Field {0} does not exist".format(key)) self.attributes.__setitem__(key, value) def items(self, *args, **kwargs): return self.attributes.items(*args, **kwargs) def get(self, *args, **kwargs): return self.attributes.get(*args, **kwargs) def to_dict(self): self.retrieve() return self.attributes def retrieve(self, *args, **kwargs): if not self.retrieved: self._retrieve(*args, **kwargs) self.retrieved = True return self def _retrieve(self, *args, **kwargs): url = self.url(action='get', params=self.attributes) response = perform_request( 'get', url, self.default_retrieve_params, metric_tags=self._metric_tags, metric_action='model.retrieve' ) self._update_from_response(response) @property def _metric_tags(self): """ Returns a list of tags to be used when recording metrics about this model. Each field named in ``self.metric_tag_fields`` is used as a tag value, under the key ``<class>.<metric_field>``. The tag model_class is used to record the class name of the model. """ tags = [ u'{}.{}:{}'.format(self.__class__.__name__, attr, self[attr]) for attr in self.metric_tag_fields if attr in self.attributes ] tags.append(u'model_class:{}'.format(self.__class__.__name__)) return tags @classmethod def find(cls, id): return cls(id=id) def _update_from_response(self, response_data): for k, v in response_data.items(): if k in self.accessible_fields: self.__setattr__(k, v) else: log.warning( "Unexpected field {field_name} in model {model_name}".format( field_name=k, model_name=self.__class__.__name__ ) ) def updatable_attributes(self): return extract(self.attributes, self.updatable_fields) def initializable_attributes(self): return extract(self.attributes, self.initializable_fields) @classmethod def before_save(cls, instance): pass @classmethod def after_save(cls, instance): pass def save(self): self.before_save(self) if self.id: # if we have id already, treat this as an update url = self.url(action='put', params=self.attributes) response = perform_request( 'put', url, self.updatable_attributes(), metric_tags=self._metric_tags, metric_action='model.update' ) else: # otherwise, treat this as an insert url = self.url(action='post', params=self.attributes) response = perform_request( 'post', url, self.initializable_attributes(), metric_tags=self._metric_tags, metric_action='model.insert' ) self.retrieved = True self._update_from_response(response) self.after_save(self) def delete(self): url = self.url(action='delete', params=self.attributes) response = perform_request('delete', url, metric_tags=self._metric_tags, metric_action='model.delete') self.retrieved = True self._update_from_response(response) @classmethod def url_with_id(cls, params={}): return cls.base_url + '/' + str(params['id']) @classmethod def url_without_id(cls, params={}): return cls.base_url @classmethod def url(cls, action, params={}): if cls.base_url is None: raise CommentClientRequestError("Must provide base_url when using default url function") if action not in cls.DEFAULT_ACTIONS: raise ValueError("Invalid action {0}. The supported action must be in {1}".format(action, str(cls.DEFAULT_ACTIONS))) elif action in cls.DEFAULT_ACTIONS_WITH_ID: try: return cls.url_with_id(params) except KeyError: raise CommentClientRequestError("Cannot perform action {0} without id".format(action)) else: # action must be in DEFAULT_ACTIONS_WITHOUT_ID now return cls.url_without_id()
agpl-3.0
gymnasium/edx-platform
common/lib/xmodule/xmodule/conditional_module.py
8
15152
"""Conditional module is the xmodule, which you can use for disabling some xmodules by conditions. """ import json import logging from lazy import lazy from lxml import etree from pkg_resources import resource_string from six import text_type from opaque_keys.edx.locator import BlockUsageLocator from web_fragments.fragment import Fragment from xblock.fields import ReferenceList, Scope, String from xmodule.modulestore.exceptions import ItemNotFoundError from xmodule.seq_module import SequenceDescriptor from xmodule.studio_editable import StudioEditableDescriptor, StudioEditableModule from xmodule.validation import StudioValidation, StudioValidationMessage from xmodule.x_module import STUDENT_VIEW, XModule log = logging.getLogger('edx.' + __name__) # Make '_' a no-op so we can scrape strings _ = lambda text: text class ConditionalFields(object): has_children = True display_name = String( display_name=_("Display Name"), help=_("The display name for this component."), scope=Scope.settings, default=_('Conditional') ) show_tag_list = ReferenceList( help=_("List of urls of children that are references to external modules"), scope=Scope.content ) sources_list = ReferenceList( display_name=_("Source Components"), help=_("The component location IDs of all source components that are used to determine whether a learner is " "shown the content of this conditional module. Copy the component location ID of a component from its " "Settings dialog in Studio."), scope=Scope.content ) conditional_attr = String( display_name=_("Conditional Attribute"), help=_("The attribute of the source components that determines whether a learner is shown the content of this " "conditional module."), scope=Scope.content, default='correct', values=lambda: [{'display_name': xml_attr, 'value': xml_attr} for xml_attr in ConditionalModule.conditions_map.keys()] ) conditional_value = String( display_name=_("Conditional Value"), help=_("The value that the conditional attribute of the source components must match before a learner is shown " "the content of this conditional module."), scope=Scope.content, default='True' ) conditional_message = String( display_name=_("Blocked Content Message"), help=_("The message that is shown to learners when not all conditions are met to show the content of this " "conditional module. Include {link} in the text of your message to give learners a direct link to " "required units. For example, 'You must complete {link} before you can access this unit'."), scope=Scope.content, default=_('You must complete {link} before you can access this unit.') ) class ConditionalModule(ConditionalFields, XModule, StudioEditableModule): """ Blocks child module from showing unless certain conditions are met. Example: <conditional sources="i4x://.../problem_1; i4x://.../problem_2" completed="True"> <show sources="i4x://.../test_6; i4x://.../Avi_resources"/> <video url_name="secret_video" /> </conditional> <conditional> tag attributes: sources - location id of required modules, separated by ';' submitted - map to `is_submitted` module method. (pressing RESET button makes this function to return False.) attempted - map to `is_attempted` module method correct - map to `is_correct` module method poll_answer - map to `poll_answer` module attribute voted - map to `voted` module attribute <show> tag attributes: sources - location id of required modules, separated by ';' You can add you own rules for <conditional> tag, like "completed", "attempted" etc. To do that yo must extend `ConditionalModule.conditions_map` variable and add pair: my_attr: my_property/my_method After that you can use it: <conditional my_attr="some value" ...> ... </conditional> And my_property/my_method will be called for required modules. """ js = { 'js': [ resource_string(__name__, 'js/src/conditional/display.js'), resource_string(__name__, 'js/src/javascript_loader.js'), resource_string(__name__, 'js/src/collapsible.js'), ] } js_module_name = "Conditional" css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]} # Map # key: <tag attribute in xml> # value: <name of module attribute> conditions_map = { 'poll_answer': 'poll_answer', # poll_question attr # problem was submitted (it can be wrong) # if student will press reset button after that, # state will be reverted 'submitted': 'is_submitted', # capa_problem attr # if student attempted problem 'attempted': 'is_attempted', # capa_problem attr # if problem is full points 'correct': 'is_correct', 'voted': 'voted' # poll_question attr } @lazy def required_modules(self): return [self.system.get_module(descriptor) for descriptor in self.descriptor.get_required_module_descriptors()] def is_condition_satisfied(self): attr_name = self.conditions_map[self.conditional_attr] if self.conditional_value and self.required_modules: for module in self.required_modules: if not hasattr(module, attr_name): # We don't throw an exception here because it is possible for # the descriptor of a required module to have a property but # for the resulting module to be a (flavor of) ErrorModule. # So just log and return false. if module is not None: # We do not want to log when module is None, and it is when requester # does not have access to the requested required module. log.warn('Error in conditional module: \ required module {module} has no {module_attr}'.format(module=module, module_attr=attr_name)) return False attr = getattr(module, attr_name) if callable(attr): attr = attr() if self.conditional_value != str(attr): break else: return True return False def get_html(self): # Calculate html ids of dependencies self.required_html_ids = [descriptor.location.html_id() for descriptor in self.descriptor.get_required_module_descriptors()] return self.system.render_template('conditional_ajax.html', { 'element_id': self.location.html_id(), 'ajax_url': self.system.ajax_url, 'depends': ';'.join(self.required_html_ids) }) def author_view(self, context): """ Renders the Studio preview by rendering each child so that they can all be seen and edited. """ fragment = Fragment() root_xblock = context.get('root_xblock') is_root = root_xblock and root_xblock.location == self.location if is_root: # User has clicked the "View" link. Show a preview of all possible children: self.render_children(context, fragment, can_reorder=True, can_add=True) # else: When shown on a unit page, don't show any sort of preview - # just the status of this block in the validation area. return fragment def handle_ajax(self, _dispatch, _data): """This is called by courseware.moduleodule_render, to handle an AJAX call. """ if not self.is_condition_satisfied(): context = {'module': self, 'message': self.conditional_message} html = self.system.render_template('conditional_module.html', context) return json.dumps({'html': [html], 'message': bool(self.conditional_message)}) html = [child.render(STUDENT_VIEW).content for child in self.get_display_items()] return json.dumps({'html': html}) def get_icon_class(self): new_class = 'other' # HACK: This shouldn't be hard-coded to two types # OBSOLETE: This obsoletes 'type' class_priority = ['video', 'problem'] child_classes = [self.system.get_module(child_descriptor).get_icon_class() for child_descriptor in self.descriptor.get_children()] for c in class_priority: if c in child_classes: new_class = c return new_class def validate(self): """ Message for either error or warning validation message/s. Returns message and type. Priority given to error type message. """ return self.descriptor.validate() class ConditionalDescriptor(ConditionalFields, SequenceDescriptor, StudioEditableDescriptor): """Descriptor for conditional xmodule.""" _tag_name = 'conditional' module_class = ConditionalModule resources_dir = None filename_extension = "xml" has_score = False show_in_read_only_mode = True def __init__(self, *args, **kwargs): """ Create an instance of the conditional module. """ super(ConditionalDescriptor, self).__init__(*args, **kwargs) # Convert sources xml_attribute to a ReferenceList field type so Location/Locator # substitution can be done. if not self.sources_list: if 'sources' in self.xml_attributes and isinstance(self.xml_attributes['sources'], basestring): self.sources_list = [ # TODO: it is not clear why we are replacing the run here (which actually is a no-op # for old-style course locators. However, this is the implementation of # CourseLocator.make_usage_key_from_deprecated_string, which was previously # being called in this location. BlockUsageLocator.from_string(item).replace(run=self.location.course_key.run) for item in ConditionalDescriptor.parse_sources(self.xml_attributes) ] @staticmethod def parse_sources(xml_element): """ Parse xml_element 'sources' attr and return a list of location strings. """ sources = xml_element.get('sources') if sources: return [location.strip() for location in sources.split(';')] def get_required_module_descriptors(self): """Returns a list of XModuleDescriptor instances upon which this module depends. """ descriptors = [] for location in self.sources_list: try: descriptor = self.system.load_item(location) descriptors.append(descriptor) except ItemNotFoundError: msg = "Invalid module by location." log.exception(msg) self.system.error_tracker(msg) return descriptors @classmethod def definition_from_xml(cls, xml_object, system): children = [] show_tag_list = [] definition = {} for conditional_attr in ConditionalModule.conditions_map.iterkeys(): conditional_value = xml_object.get(conditional_attr) if conditional_value is not None: definition.update({ 'conditional_attr': conditional_attr, 'conditional_value': str(conditional_value), }) for child in xml_object: if child.tag == 'show': locations = ConditionalDescriptor.parse_sources(child) for location in locations: children.append(location) show_tag_list.append(location) else: try: descriptor = system.process_xml(etree.tostring(child)) children.append(descriptor.scope_ids.usage_id) except: msg = "Unable to load child when parsing Conditional." log.exception(msg) system.error_tracker(msg) definition.update({ 'show_tag_list': show_tag_list, 'conditional_message': xml_object.get('message', '') }) return definition, children def definition_to_xml(self, resource_fs): xml_object = etree.Element(self._tag_name) for child in self.get_children(): if child.location not in self.show_tag_list: self.runtime.add_block_as_child_node(child, xml_object) if self.show_tag_list: show_str = u'<{tag_name} sources="{sources}" />'.format( tag_name='show', sources=';'.join(text_type(location) for location in self.show_tag_list)) xml_object.append(etree.fromstring(show_str)) # Overwrite the original sources attribute with the value from sources_list, as # Locations may have been changed to Locators. stringified_sources_list = map(lambda loc: text_type(loc), self.sources_list) self.xml_attributes['sources'] = ';'.join(stringified_sources_list) self.xml_attributes[self.conditional_attr] = self.conditional_value self.xml_attributes['message'] = self.conditional_message return xml_object def validate(self): validation = super(ConditionalDescriptor, self).validate() if not self.sources_list: conditional_validation = StudioValidation(self.location) conditional_validation.add( StudioValidationMessage( StudioValidationMessage.NOT_CONFIGURED, _(u"This component has no source components configured yet."), action_class='edit-button', action_label=_(u"Configure list of sources") ) ) validation = StudioValidation.copy(validation) validation.summary = conditional_validation.messages[0] return validation @property def non_editable_metadata_fields(self): non_editable_fields = super(ConditionalDescriptor, self).non_editable_metadata_fields non_editable_fields.extend([ ConditionalDescriptor.due, ConditionalDescriptor.is_practice_exam, ConditionalDescriptor.is_proctored_enabled, ConditionalDescriptor.is_time_limited, ConditionalDescriptor.default_time_limit_minutes, ConditionalDescriptor.show_tag_list, ConditionalDescriptor.exam_review_rules, ]) return non_editable_fields
agpl-3.0
thaines/rfam
bin/prman_AlfParser.py
1
9166
import pyparsing as pp import re import copy class prman_AlfParser: def __init__(self): self.keywords = ['Job', 'Task', 'RemoteCmd'] def parseFile(self, fileText): commands = self.__parseCommandStructure(fileText, 0, isStart = True) #print(commands) textureCmds, Cmds, frames = self.extractCommandHierarchy(commands) return [textureCmds, Cmds, frames] def printCommands(self, cmds, currentIndent = 0): if isinstance(cmds, list): for e in cmds: self.printCommands(e, currentIndent + 1) print('---------------------') else: tabs = '' for i in range(currentIndent): tabs += '\t' print(tabs + repr(cmds)) def __matchBracket(self, str): if str[0] != '{': return None num_open = 0 for i, c in enumerate(str): if c == '{': num_open += 1 elif c == '}': num_open -= 1 if num_open == 0: return str[1:i] return None def leadingSpace(self, text): return len(text) - len(text.lstrip()) def removingLeadingNewLines(self, text): return text.lstrip('\n') def determineCommandLength(self, text): if text[0] == '\n': raise ValueError('Determine command length should never take newline as first char!') text = copy.deepcopy(text) lines = text.split('\n') lengths = [len(l) for l in lines] currentIndent = self.leadingSpace(lines[0]) extent = len(lines[0]) for i, l in enumerate(lines[1:]): if self.leadingSpace(l) != currentIndent: extent += lengths[i + 1] + 1 else: extent += lengths[i + 1] + 1 return extent return extent def extractAllArgs(self, text): currentIndent = 0 parsingBracket = False parsingSimple = False args = [] argNames = [] resultText = '' currentBracketText = '' i = 0 while i < len(text): if parsingBracket: #process indents if text[i] == '}': currentIndent -= 1 currentBracketText += text[i] if currentIndent == 0: args.append(currentBracketText[1:-1]) currentBracketText = '' parsingBracket = False currentIndent = 0 elif text[i] == '{': currentBracketText += text[i] currentIndent += 1 else: currentBracketText += text[i] elif parsingSimple: if text[i] == ' ': args.append(currentBracketText ) currentBracketText = '' parsingSimple = False else: currentBracketText += text[i] else: if text[i] == '-': counter = 1 argName = '' while True: if text[i + counter] == ' ': argNames.append(argName) if text[i + counter + 1] == '{': currentIndent = 0 parsingBracket = True i = i + counter else: parsingSimple = True i = i + counter break else: argName += text[i + counter] counter += 1 i += 1 return argNames, args, resultText def parseOptions(self, text): optsNames, opts, textWithoutOpts = self.extractAllArgs(text) result = {} for i in range(len(optsNames)): result[optsNames[i]] = opts[i] return result def parseJob(self, text): newJob = self.parseOptions(text) newJob['type'] = 'job' return newJob def parseRemoteCmd(self, text): #grab the actual command i = len(text) - 1 actualCommand = '' while i > 0: if text[i] == '}': break else: i -= 1 while i > 0: if text[i] == '{': actualCommand = text[i] + actualCommand break else: actualCommand = text[i] + actualCommand i -=1 newCmd = self.parseOptions(text[:i]) newCmd['type'] = 'remoteCommand' newCmd['command'] = actualCommand[1:-1] return newCmd def parseTask(self, text): #parse Task Name taskName = '' start = text.find('{') + 1 for i in range(start, len(text)): if text[i] == '}': break else: taskName += text[i] text = text[i+1:] newTask = self.parseOptions(text) newTask['type'] = 'task' newTask['taskName'] = taskName return newTask def __parseCommandStructure(self, text, indentLevel, isStart = False): structure = [] text = copy.deepcopy(text) if isStart: text = text[17:] starts = [text.find(k) for k in self.keywords] for i in range(len(starts)): if starts[i] < 0: starts[i] = 111111111111111111 lowestStartIdx = starts.index(min(starts)) #move back until new line startIdx = starts[lowestStartIdx] if startIdx == 111111111111111111: return None while startIdx > 0: if text[startIdx - 1] == '\t': startIdx -= 1 else: break if lowestStartIdx == 0: #Job length = self.determineCommandLength(text[startIdx:]) newItem = self.parseJob(text[startIdx+3:startIdx+length]) elif lowestStartIdx == 1: #Task length = self.determineCommandLength(text[startIdx:]) newItem = self.parseTask(text[startIdx+4:startIdx+length]) elif lowestStartIdx == 2: #RemoteCmd length = self.determineCommandLength(text[startIdx:]) newItem = self.parseRemoteCmd(text[startIdx+9:startIdx+length]) try: #why does hasattr not work here? #print('Attempting to parse subtasks') newItem['subtasks'] = self.__parseCommandStructure(self.removingLeadingNewLines(newItem['subtasks']), indentLevel+1) except: pass try: newItem['cmds'] = self.__parseCommandStructure(self.removingLeadingNewLines(newItem['cmds']), indentLevel+1) except: pass structure.append(newItem) nextCommands = self.__parseCommandStructure(text[startIdx+length:], indentLevel) if nextCommands: for c in nextCommands: structure.append(c) return structure def extractCommandsForFrame(self, task): frames = [] cmds = {} for t in task['subtasks']: subcmds = [] #extract frame index frameLinearIdx = int(t['taskName'].replace('Frame', '')) frames.append(frameLinearIdx) for t_sub in t['subtasks']: try: for c in t_sub['cmds']: subcmds.append(c) except: pass if subcmds: cmds[str(frameLinearIdx)] = subcmds return cmds, frames def extractCommandsForTexture(self, task): cmds = [] for t in task['subtasks']: try: for c in t['cmds']: cmds.append(c) except: pass return cmds def extractCommandHierarchy(self, jobs): textureCommands = [] commands = {} for j in jobs: for t in j['subtasks']: #get all texture conversion tasks if t['taskName'] == 'Job Textures': try: newCommands = self.extractCommandsForTexture(t) #textureCommands.append(newCommands) for c in newCommands: textureCommands.append(c) except: pass #get commands for all frames else: newCommands, frames = self.extractCommandsForFrame(t) commands.update(newCommands) return textureCommands, commands, frames def main(): with open('data/blue/shots/spool.alf', 'r') as myfile: data = myfile.read() parser = prman_AlfParser() textureCmds, Cmds, frames = parser.parseFile(data) print('Frames: ', frames) if __name__ == "__main__": main()
gpl-3.0
gshivani/ansible-modules-extras
cloud/misc/virt.py
8
14024
#!/usr/bin/python # -*- coding: utf-8 -*- """ Virt management features Copyright 2007, 2012 Red Hat, Inc Michael DeHaan <[email protected]> Seth Vidal <[email protected]> This software may be freely redistributed under the terms of the GNU general public license. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ DOCUMENTATION = ''' --- module: virt short_description: Manages virtual machines supported by libvirt description: - Manages virtual machines supported by I(libvirt). version_added: "0.2" options: name: description: - name of the guest VM being managed. Note that VM must be previously defined with xml. required: true default: null aliases: [] state: description: - Note that there may be some lag for state requests like C(shutdown) since these refer only to VM states. After starting a guest, it may not be immediately accessible. required: false choices: [ "running", "shutdown", "destroyed", "paused" ] default: "no" command: description: - in addition to state management, various non-idempotent commands are available. See examples required: false choices: ["create","status", "start", "stop", "pause", "unpause", "shutdown", "undefine", "destroy", "get_xml", "autostart", "freemem", "list_vms", "info", "nodeinfo", "virttype", "define"] uri: description: - libvirt connection uri required: false defaults: qemu:///system xml: description: - XML document used with the define command required: false default: null requirements: - "python >= 2.6" - "libvirt-python" author: - "Ansible Core Team" - '"Michael DeHaan (@mpdehaan)" <[email protected]>' - '"Seth Vidal (@skvidal)" <[email protected]>' ''' EXAMPLES = ''' # a playbook task line: - virt: name=alpha state=running # /usr/bin/ansible invocations ansible host -m virt -a "name=alpha command=status" ansible host -m virt -a "name=alpha command=get_xml" ansible host -m virt -a "name=alpha command=create uri=lxc:///" # a playbook example of defining and launching an LXC guest tasks: - name: define vm virt: name=foo command=define xml="{{ lookup('template', 'container-template.xml.j2') }}" uri=lxc:/// - name: start vm virt: name=foo state=running uri=lxc:/// ''' VIRT_FAILED = 1 VIRT_SUCCESS = 0 VIRT_UNAVAILABLE=2 import sys try: import libvirt except ImportError: print "failed=True msg='libvirt python module unavailable'" sys.exit(1) ALL_COMMANDS = [] VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause', 'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart', 'define'] HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype'] ALL_COMMANDS.extend(VM_COMMANDS) ALL_COMMANDS.extend(HOST_COMMANDS) VIRT_STATE_NAME_MAP = { 0 : "running", 1 : "running", 2 : "running", 3 : "paused", 4 : "shutdown", 5 : "shutdown", 6 : "crashed" } class VMNotFound(Exception): pass class LibvirtConnection(object): def __init__(self, uri, module): self.module = module cmd = "uname -r" rc, stdout, stderr = self.module.run_command(cmd) if "xen" in stdout: conn = libvirt.open(None) else: conn = libvirt.open(uri) if not conn: raise Exception("hypervisor connection failure") self.conn = conn def find_vm(self, vmid): """ Extra bonus feature: vmid = -1 returns a list of everything """ conn = self.conn vms = [] # this block of code borrowed from virt-manager: # get working domain's name ids = conn.listDomainsID() for id in ids: vm = conn.lookupByID(id) vms.append(vm) # get defined domain names = conn.listDefinedDomains() for name in names: vm = conn.lookupByName(name) vms.append(vm) if vmid == -1: return vms for vm in vms: if vm.name() == vmid: return vm raise VMNotFound("virtual machine %s not found" % vmid) def shutdown(self, vmid): return self.find_vm(vmid).shutdown() def pause(self, vmid): return self.suspend(self.conn,vmid) def unpause(self, vmid): return self.resume(self.conn,vmid) def suspend(self, vmid): return self.find_vm(vmid).suspend() def resume(self, vmid): return self.find_vm(vmid).resume() def create(self, vmid): return self.find_vm(vmid).create() def destroy(self, vmid): return self.find_vm(vmid).destroy() def undefine(self, vmid): return self.find_vm(vmid).undefine() def get_status2(self, vm): state = vm.info()[0] return VIRT_STATE_NAME_MAP.get(state,"unknown") def get_status(self, vmid): state = self.find_vm(vmid).info()[0] return VIRT_STATE_NAME_MAP.get(state,"unknown") def nodeinfo(self): return self.conn.getInfo() def get_type(self): return self.conn.getType() def get_xml(self, vmid): vm = self.conn.lookupByName(vmid) return vm.XMLDesc(0) def get_maxVcpus(self, vmid): vm = self.conn.lookupByName(vmid) return vm.maxVcpus() def get_maxMemory(self, vmid): vm = self.conn.lookupByName(vmid) return vm.maxMemory() def getFreeMemory(self): return self.conn.getFreeMemory() def get_autostart(self, vmid): vm = self.conn.lookupByName(vmid) return vm.autostart() def set_autostart(self, vmid, val): vm = self.conn.lookupByName(vmid) return vm.setAutostart(val) def define_from_xml(self, xml): return self.conn.defineXML(xml) class Virt(object): def __init__(self, uri, module): self.module = module self.uri = uri def __get_conn(self): self.conn = LibvirtConnection(self.uri, self.module) return self.conn def get_vm(self, vmid): self.__get_conn() return self.conn.find_vm(vmid) def state(self): vms = self.list_vms() state = [] for vm in vms: state_blurb = self.conn.get_status(vm) state.append("%s %s" % (vm,state_blurb)) return state def info(self): vms = self.list_vms() info = dict() for vm in vms: data = self.conn.find_vm(vm).info() # libvirt returns maxMem, memory, and cpuTime as long()'s, which # xmlrpclib tries to convert to regular int's during serialization. # This throws exceptions, so convert them to strings here and # assume the other end of the xmlrpc connection can figure things # out or doesn't care. info[vm] = { "state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"), "maxMem" : str(data[1]), "memory" : str(data[2]), "nrVirtCpu" : data[3], "cpuTime" : str(data[4]), } info[vm]["autostart"] = self.conn.get_autostart(vm) return info def nodeinfo(self): self.__get_conn() info = dict() data = self.conn.nodeinfo() info = { "cpumodel" : str(data[0]), "phymemory" : str(data[1]), "cpus" : str(data[2]), "cpumhz" : str(data[3]), "numanodes" : str(data[4]), "sockets" : str(data[5]), "cpucores" : str(data[6]), "cputhreads" : str(data[7]) } return info def list_vms(self, state=None): self.conn = self.__get_conn() vms = self.conn.find_vm(-1) results = [] for x in vms: try: if state: vmstate = self.conn.get_status2(x) if vmstate == state: results.append(x.name()) else: results.append(x.name()) except: pass return results def virttype(self): return self.__get_conn().get_type() def autostart(self, vmid): self.conn = self.__get_conn() return self.conn.set_autostart(vmid, True) def freemem(self): self.conn = self.__get_conn() return self.conn.getFreeMemory() def shutdown(self, vmid): """ Make the machine with the given vmid stop running. Whatever that takes. """ self.__get_conn() self.conn.shutdown(vmid) return 0 def pause(self, vmid): """ Pause the machine with the given vmid. """ self.__get_conn() return self.conn.suspend(vmid) def unpause(self, vmid): """ Unpause the machine with the given vmid. """ self.__get_conn() return self.conn.resume(vmid) def create(self, vmid): """ Start the machine via the given vmid """ self.__get_conn() return self.conn.create(vmid) def start(self, vmid): """ Start the machine via the given id/name """ self.__get_conn() return self.conn.create(vmid) def destroy(self, vmid): """ Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """ self.__get_conn() return self.conn.destroy(vmid) def undefine(self, vmid): """ Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """ self.__get_conn() return self.conn.undefine(vmid) def status(self, vmid): """ Return a state suitable for server consumption. Aka, codes.py values, not XM output. """ self.__get_conn() return self.conn.get_status(vmid) def get_xml(self, vmid): """ Receive a Vm id as input Return an xml describing vm config returned by a libvirt call """ self.__get_conn() return self.conn.get_xml(vmid) def get_maxVcpus(self, vmid): """ Gets the max number of VCPUs on a guest """ self.__get_conn() return self.conn.get_maxVcpus(vmid) def get_max_memory(self, vmid): """ Gets the max memory on a guest """ self.__get_conn() return self.conn.get_MaxMemory(vmid) def define(self, xml): """ Define a guest with the given xml """ self.__get_conn() return self.conn.define_from_xml(xml) def core(module): state = module.params.get('state', None) guest = module.params.get('name', None) command = module.params.get('command', None) uri = module.params.get('uri', None) xml = module.params.get('xml', None) v = Virt(uri, module) res = {} if state and command=='list_vms': res = v.list_vms(state=state) if type(res) != dict: res = { command: res } return VIRT_SUCCESS, res if state: if not guest: module.fail_json(msg = "state change requires a guest specified") res['changed'] = False if state == 'running': if v.status(guest) is 'paused': res['changed'] = True res['msg'] = v.unpause(guest) elif v.status(guest) is not 'running': res['changed'] = True res['msg'] = v.start(guest) elif state == 'shutdown': if v.status(guest) is not 'shutdown': res['changed'] = True res['msg'] = v.shutdown(guest) elif state == 'destroyed': if v.status(guest) is not 'shutdown': res['changed'] = True res['msg'] = v.destroy(guest) elif state == 'paused': if v.status(guest) is 'running': res['changed'] = True res['msg'] = v.pause(guest) else: module.fail_json(msg="unexpected state") return VIRT_SUCCESS, res if command: if command in VM_COMMANDS: if not guest: module.fail_json(msg = "%s requires 1 argument: guest" % command) if command == 'define': if not xml: module.fail_json(msg = "define requires xml argument") try: v.get_vm(guest) except VMNotFound: v.define(xml) res = {'changed': True, 'created': guest} return VIRT_SUCCESS, res res = getattr(v, command)(guest) if type(res) != dict: res = { command: res } return VIRT_SUCCESS, res elif hasattr(v, command): res = getattr(v, command)() if type(res) != dict: res = { command: res } return VIRT_SUCCESS, res else: module.fail_json(msg="Command %s not recognized" % basecmd) module.fail_json(msg="expected state or command parameter to be specified") def main(): module = AnsibleModule(argument_spec=dict( name = dict(aliases=['guest']), state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']), command = dict(choices=ALL_COMMANDS), uri = dict(default='qemu:///system'), xml = dict(), )) rc = VIRT_SUCCESS try: rc, result = core(module) except Exception, e: module.fail_json(msg=str(e)) if rc != 0: # something went wrong emit the msg module.fail_json(rc=rc, msg=result) else: module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * main()
gpl-3.0
Tribler/decentralized-mortgage-market
market/models/investment.py
2
2937
from enum import Enum as PyEnum from base64 import urlsafe_b64encode from storm.properties import Int, Float, RawStr from storm.references import ReferenceSet from protobuf_to_dict import dict_to_protobuf, protobuf_to_dict from market.community.market.conversion_pb2 import Investment as InvestmentPB from market.database.types import Enum from market.models.transfer import Transfer class InvestmentStatus(PyEnum): NONE = 0 PENDING = 1 ACCEPTED = 2 REJECTED = 3 FORSALE = 4 class Investment(object): """ This class represents an investment of someone in a specific campaign. """ __storm_table__ = 'investment' __storm_primary__ = 'id', 'user_id' id = Int() user_id = RawStr() owner_id = RawStr() amount = Float() interest_rate = Float() campaign_id = Int() campaign_user_id = RawStr() status = Enum(InvestmentStatus) contract_id = RawStr() transfers = ReferenceSet((id, user_id), (Transfer.investment_id, Transfer.investment_user_id)) def __init__(self, identifier, user_id, amount, interest_rate, campaign_id, campaign_user_id, status, contract_id=''): self.id = identifier self.user_id = user_id self.amount = amount self.interest_rate = interest_rate self.campaign_id = campaign_id self.campaign_user_id = campaign_user_id self.status = status self.contract_id = contract_id def to_dict(self, api_response=False): return { 'id': self.id, 'user_id': urlsafe_b64encode(self.user_id) if api_response else self.user_id, 'amount': self.amount, 'interest_rate': self.interest_rate, 'campaign_id': self.campaign_id, 'campaign_user_id': urlsafe_b64encode(self.campaign_user_id) if api_response else self.campaign_user_id, 'status': self.status.name if api_response else self.status.value, 'contract_id': urlsafe_b64encode(self.contract_id) if api_response else self.contract_id } @staticmethod def from_dict(investment_dict): try: status = InvestmentStatus(investment_dict['status']) except ValueError: return None return Investment(investment_dict['id'], investment_dict['user_id'], investment_dict['amount'], investment_dict['interest_rate'], investment_dict['campaign_id'], investment_dict['campaign_user_id'], status, investment_dict['contract_id']) def to_bin(self): return dict_to_protobuf(InvestmentPB, self.to_dict()).SerializeToString() @staticmethod def from_bin(binary): msg = InvestmentPB() msg.ParseFromString(binary) return Investment.from_dict(protobuf_to_dict(msg))
gpl-3.0
sy0302/lammps_qtb
python/examples/viz_atomeye.py
25
1913
#!/usr/bin/env python -i # preceeding line should have path for Python on your machine # viz_atomeye.py # Purpose: viz running LAMMPS simulation via AtomEye # Syntax: viz_atomeye.py in.lammps Nfreq Nsteps # in.lammps = LAMMPS input script # Nfreq = dump and viz shapshot every this many steps # Nsteps = run for this many steps import sys,os # set this to point to AtomEye version 3 executable # first line if want AtomEye output to screen, 2nd line to file #ATOMEYE3 = "/home/sjplimp/tools/atomeye3/A3.i686-20060530" ATOMEYE3 = "/home/sjplimp/tools/atomeye3/A3.i686-20060530 > atomeye.out" # parse command line argv = sys.argv if len(argv) != 4: print "Syntax: viz_atomeye.py in.lammps Nfreq Nsteps" sys.exit() infile = sys.argv[1] nfreq = int(sys.argv[2]) nsteps = int(sys.argv[3]) me = 0 # uncomment if running in parallel via Pypar #import pypar #me = pypar.rank() #nprocs = pypar.size() from lammps import lammps lmp = lammps() # run infile all at once # assumed to have no run command in it # dump a file in extended CFG format for AtomEye lmp.file(infile) lmp.command("thermo %d" % nfreq) lmp.command("dump python all cfg %d tmp.cfg.* id type xs ys zs" % nfreq) # initial 0-step run to generate dump file and image lmp.command("run 0 pre yes post no") ntimestep = 0 # wrapper on GL window via Pizza.py gl tool # just proc 0 handles reading of dump file and viz if me == 0: a = os.popen(ATOMEYE3,'w') a.write("load_config tmp.cfg.0\n") a.flush() # run nfreq steps at a time w/out pre/post, read dump snapshot, display it while ntimestep < nsteps: lmp.command("run %d pre no post no" % nfreq) ntimestep += nfreq if me == 0: a.write("load_config tmp.cfg.%d\n" % ntimestep) a.flush() lmp.command("run 0 pre no post yes") # uncomment if running in parallel via Pypar #print "Proc %d out of %d procs has" % (me,nprocs), lmp #pypar.finalize()
gpl-2.0
neerajvashistha/pa-dude
lib/python2.7/site-packages/docutils/readers/pep.py
136
1555
# $Id: pep.py 7320 2012-01-19 22:33:02Z milde $ # Author: David Goodger <[email protected]> # Copyright: This module has been placed in the public domain. """ Python Enhancement Proposal (PEP) Reader. """ __docformat__ = 'reStructuredText' from docutils.readers import standalone from docutils.transforms import peps, references, misc, frontmatter from docutils.parsers import rst class Reader(standalone.Reader): supported = ('pep',) """Contexts this reader supports.""" settings_spec = ( 'PEP Reader Option Defaults', 'The --pep-references and --rfc-references options (for the ' 'reStructuredText parser) are on by default.', ()) config_section = 'pep reader' config_section_dependencies = ('readers', 'standalone reader') def get_transforms(self): transforms = standalone.Reader.get_transforms(self) # We have PEP-specific frontmatter handling. transforms.remove(frontmatter.DocTitle) transforms.remove(frontmatter.SectionSubTitle) transforms.remove(frontmatter.DocInfo) transforms.extend([peps.Headers, peps.Contents, peps.TargetNotes]) return transforms settings_default_overrides = {'pep_references': 1, 'rfc_references': 1} inliner_class = rst.states.Inliner def __init__(self, parser=None, parser_name=None): """`parser` should be ``None``.""" if parser is None: parser = rst.Parser(rfc2822=True, inliner=self.inliner_class()) standalone.Reader.__init__(self, parser, '')
mit
jokajak/itweb
data/env/lib/python2.6/site-packages/MarkupSafe-0.11-py2.6-linux-x86_64.egg/markupsafe/tests.py
24
2610
import gc import unittest from markupsafe import Markup, escape, escape_silent class MarkupTestCase(unittest.TestCase): def test_markup_operations(self): # adding two strings should escape the unsafe one unsafe = '<script type="application/x-some-script">alert("foo");</script>' safe = Markup('<em>username</em>') assert unsafe + safe == unicode(escape(unsafe)) + unicode(safe) # string interpolations are safe to use too assert Markup('<em>%s</em>') % '<bad user>' == \ '<em>&lt;bad user&gt;</em>' assert Markup('<em>%(username)s</em>') % { 'username': '<bad user>' } == '<em>&lt;bad user&gt;</em>' # an escaped object is markup too assert type(Markup('foo') + 'bar') is Markup # and it implements __html__ by returning itself x = Markup("foo") assert x.__html__() is x # it also knows how to treat __html__ objects class Foo(object): def __html__(self): return '<em>awesome</em>' def __unicode__(self): return 'awesome' assert Markup(Foo()) == '<em>awesome</em>' assert Markup('<strong>%s</strong>') % Foo() == \ '<strong><em>awesome</em></strong>' # escaping and unescaping assert escape('"<>&\'') == '&#34;&lt;&gt;&amp;&#39;' assert Markup("<em>Foo &amp; Bar</em>").striptags() == "Foo & Bar" assert Markup("&lt;test&gt;").unescape() == "<test>" def test_all_set(self): import markupsafe as markup for item in markup.__all__: getattr(markup, item) def test_escape_silent(self): assert escape_silent(None) == Markup() assert escape(None) == Markup(None) assert escape_silent('<foo>') == Markup(u'&lt;foo&gt;') class MarkupLeakTestCase(unittest.TestCase): def test_markup_leaks(self): counts = set() for count in xrange(20): for item in xrange(1000): escape("foo") escape("<foo>") escape(u"foo") escape(u"<foo>") counts.add(len(gc.get_objects())) assert len(counts) == 1, 'ouch, c extension seems to leak objects' def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(MarkupTestCase)) # this test only tests the c extension if not hasattr(escape, 'func_code'): suite.addTest(unittest.makeSuite(MarkupLeakTestCase)) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
gpl-3.0
xjnny/NRPhoto
node_modules/node-gyp/gyp/pylib/gyp/__init__.py
1524
22178
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import copy import gyp.input import optparse import os.path import re import shlex import sys import traceback from gyp.common import GypError # Default debug modes for GYP debug = {} # List of "official" debug modes, but you can use anything you like. DEBUG_GENERAL = 'general' DEBUG_VARIABLES = 'variables' DEBUG_INCLUDES = 'includes' def DebugOutput(mode, message, *args): if 'all' in gyp.debug or mode in gyp.debug: ctx = ('unknown', 0, 'unknown') try: f = traceback.extract_stack(limit=2) if f: ctx = f[0][:3] except: pass if args: message %= args print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]), ctx[1], ctx[2], message) def FindBuildFiles(): extension = '.gyp' files = os.listdir(os.getcwd()) build_files = [] for file in files: if file.endswith(extension): build_files.append(file) return build_files def Load(build_files, format, default_variables={}, includes=[], depth='.', params=None, check=False, circular_check=True, duplicate_basename_check=True): """ Loads one or more specified build files. default_variables and includes will be copied before use. Returns the generator for the specified format and the data returned by loading the specified build files. """ if params is None: params = {} if '-' in format: format, params['flavor'] = format.split('-', 1) default_variables = copy.copy(default_variables) # Default variables provided by this program and its modules should be # named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace, # avoiding collisions with user and automatic variables. default_variables['GENERATOR'] = format default_variables['GENERATOR_FLAVOR'] = params.get('flavor', '') # Format can be a custom python file, or by default the name of a module # within gyp.generator. if format.endswith('.py'): generator_name = os.path.splitext(format)[0] path, generator_name = os.path.split(generator_name) # Make sure the path to the custom generator is in sys.path # Don't worry about removing it once we are done. Keeping the path # to each generator that is used in sys.path is likely harmless and # arguably a good idea. path = os.path.abspath(path) if path not in sys.path: sys.path.insert(0, path) else: generator_name = 'gyp.generator.' + format # These parameters are passed in order (as opposed to by key) # because ActivePython cannot handle key parameters to __import__. generator = __import__(generator_name, globals(), locals(), generator_name) for (key, val) in generator.generator_default_variables.items(): default_variables.setdefault(key, val) # Give the generator the opportunity to set additional variables based on # the params it will receive in the output phase. if getattr(generator, 'CalculateVariables', None): generator.CalculateVariables(default_variables, params) # Give the generator the opportunity to set generator_input_info based on # the params it will receive in the output phase. if getattr(generator, 'CalculateGeneratorInputInfo', None): generator.CalculateGeneratorInputInfo(params) # Fetch the generator specific info that gets fed to input, we use getattr # so we can default things and the generators only have to provide what # they need. generator_input_info = { 'non_configuration_keys': getattr(generator, 'generator_additional_non_configuration_keys', []), 'path_sections': getattr(generator, 'generator_additional_path_sections', []), 'extra_sources_for_rules': getattr(generator, 'generator_extra_sources_for_rules', []), 'generator_supports_multiple_toolsets': getattr(generator, 'generator_supports_multiple_toolsets', False), 'generator_wants_static_library_dependencies_adjusted': getattr(generator, 'generator_wants_static_library_dependencies_adjusted', True), 'generator_wants_sorted_dependencies': getattr(generator, 'generator_wants_sorted_dependencies', False), 'generator_filelist_paths': getattr(generator, 'generator_filelist_paths', None), } # Process the input specific to this generator. result = gyp.input.Load(build_files, default_variables, includes[:], depth, generator_input_info, check, circular_check, duplicate_basename_check, params['parallel'], params['root_targets']) return [generator] + result def NameValueListToDict(name_value_list): """ Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary of the pairs. If a string is simply NAME, then the value in the dictionary is set to True. If VALUE can be converted to an integer, it is. """ result = { } for item in name_value_list: tokens = item.split('=', 1) if len(tokens) == 2: # If we can make it an int, use that, otherwise, use the string. try: token_value = int(tokens[1]) except ValueError: token_value = tokens[1] # Set the variable to the supplied value. result[tokens[0]] = token_value else: # No value supplied, treat it as a boolean and set it. result[tokens[0]] = True return result def ShlexEnv(env_name): flags = os.environ.get(env_name, []) if flags: flags = shlex.split(flags) return flags def FormatOpt(opt, value): if opt.startswith('--'): return '%s=%s' % (opt, value) return opt + value def RegenerateAppendFlag(flag, values, predicate, env_name, options): """Regenerate a list of command line flags, for an option of action='append'. The |env_name|, if given, is checked in the environment and used to generate an initial list of options, then the options that were specified on the command line (given in |values|) are appended. This matches the handling of environment variables and command line flags where command line flags override the environment, while not requiring the environment to be set when the flags are used again. """ flags = [] if options.use_environment and env_name: for flag_value in ShlexEnv(env_name): value = FormatOpt(flag, predicate(flag_value)) if value in flags: flags.remove(value) flags.append(value) if values: for flag_value in values: flags.append(FormatOpt(flag, predicate(flag_value))) return flags def RegenerateFlags(options): """Given a parsed options object, and taking the environment variables into account, returns a list of flags that should regenerate an equivalent options object (even in the absence of the environment variables.) Any path options will be normalized relative to depth. The format flag is not included, as it is assumed the calling generator will set that as appropriate. """ def FixPath(path): path = gyp.common.FixIfRelativePath(path, options.depth) if not path: return os.path.curdir return path def Noop(value): return value # We always want to ignore the environment when regenerating, to avoid # duplicate or changed flags in the environment at the time of regeneration. flags = ['--ignore-environment'] for name, metadata in options._regeneration_metadata.iteritems(): opt = metadata['opt'] value = getattr(options, name) value_predicate = metadata['type'] == 'path' and FixPath or Noop action = metadata['action'] env_name = metadata['env_name'] if action == 'append': flags.extend(RegenerateAppendFlag(opt, value, value_predicate, env_name, options)) elif action in ('store', None): # None is a synonym for 'store'. if value: flags.append(FormatOpt(opt, value_predicate(value))) elif options.use_environment and env_name and os.environ.get(env_name): flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name)))) elif action in ('store_true', 'store_false'): if ((action == 'store_true' and value) or (action == 'store_false' and not value)): flags.append(opt) elif options.use_environment and env_name: print >>sys.stderr, ('Warning: environment regeneration unimplemented ' 'for %s flag %r env_name %r' % (action, opt, env_name)) else: print >>sys.stderr, ('Warning: regeneration unimplemented for action %r ' 'flag %r' % (action, opt)) return flags class RegeneratableOptionParser(optparse.OptionParser): def __init__(self): self.__regeneratable_options = {} optparse.OptionParser.__init__(self) def add_option(self, *args, **kw): """Add an option to the parser. This accepts the same arguments as OptionParser.add_option, plus the following: regenerate: can be set to False to prevent this option from being included in regeneration. env_name: name of environment variable that additional values for this option come from. type: adds type='path', to tell the regenerator that the values of this option need to be made relative to options.depth """ env_name = kw.pop('env_name', None) if 'dest' in kw and kw.pop('regenerate', True): dest = kw['dest'] # The path type is needed for regenerating, for optparse we can just treat # it as a string. type = kw.get('type') if type == 'path': kw['type'] = 'string' self.__regeneratable_options[dest] = { 'action': kw.get('action'), 'type': type, 'env_name': env_name, 'opt': args[0], } optparse.OptionParser.add_option(self, *args, **kw) def parse_args(self, *args): values, args = optparse.OptionParser.parse_args(self, *args) values._regeneration_metadata = self.__regeneratable_options return values, args def gyp_main(args): my_name = os.path.basename(sys.argv[0]) parser = RegeneratableOptionParser() usage = 'usage: %s [options ...] [build_file ...]' parser.set_usage(usage.replace('%s', '%prog')) parser.add_option('--build', dest='configs', action='append', help='configuration for build after project generation') parser.add_option('--check', dest='check', action='store_true', help='check format of gyp files') parser.add_option('--config-dir', dest='config_dir', action='store', env_name='GYP_CONFIG_DIR', default=None, help='The location for configuration files like ' 'include.gypi.') parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE', action='append', default=[], help='turn on a debugging ' 'mode for debugging GYP. Supported modes are "variables", ' '"includes" and "general" or "all" for all of them.') parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL', env_name='GYP_DEFINES', help='sets variable VAR to value VAL') parser.add_option('--depth', dest='depth', metavar='PATH', type='path', help='set DEPTH gyp variable to a relative path to PATH') parser.add_option('-f', '--format', dest='formats', action='append', env_name='GYP_GENERATORS', regenerate=False, help='output formats to generate') parser.add_option('-G', dest='generator_flags', action='append', default=[], metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS', help='sets generator flag FLAG to VAL') parser.add_option('--generator-output', dest='generator_output', action='store', default=None, metavar='DIR', type='path', env_name='GYP_GENERATOR_OUTPUT', help='puts generated build files under DIR') parser.add_option('--ignore-environment', dest='use_environment', action='store_false', default=True, regenerate=False, help='do not read options from environment variables') parser.add_option('-I', '--include', dest='includes', action='append', metavar='INCLUDE', type='path', help='files to include in all loaded .gyp files') # --no-circular-check disables the check for circular relationships between # .gyp files. These relationships should not exist, but they've only been # observed to be harmful with the Xcode generator. Chromium's .gyp files # currently have some circular relationships on non-Mac platforms, so this # option allows the strict behavior to be used on Macs and the lenient # behavior to be used elsewhere. # TODO(mark): Remove this option when http://crbug.com/35878 is fixed. parser.add_option('--no-circular-check', dest='circular_check', action='store_false', default=True, regenerate=False, help="don't check for circular relationships between files") # --no-duplicate-basename-check disables the check for duplicate basenames # in a static_library/shared_library project. Visual C++ 2008 generator # doesn't support this configuration. Libtool on Mac also generates warnings # when duplicate basenames are passed into Make generator on Mac. # TODO(yukawa): Remove this option when these legacy generators are # deprecated. parser.add_option('--no-duplicate-basename-check', dest='duplicate_basename_check', action='store_false', default=True, regenerate=False, help="don't check for duplicate basenames") parser.add_option('--no-parallel', action='store_true', default=False, help='Disable multiprocessing') parser.add_option('-S', '--suffix', dest='suffix', default='', help='suffix to add to generated files') parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store', default=None, metavar='DIR', type='path', help='directory to use as the root of the source tree') parser.add_option('-R', '--root-target', dest='root_targets', action='append', metavar='TARGET', help='include only TARGET and its deep dependencies') options, build_files_arg = parser.parse_args(args) build_files = build_files_arg # Set up the configuration directory (defaults to ~/.gyp) if not options.config_dir: home = None home_dot_gyp = None if options.use_environment: home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None) if home_dot_gyp: home_dot_gyp = os.path.expanduser(home_dot_gyp) if not home_dot_gyp: home_vars = ['HOME'] if sys.platform in ('cygwin', 'win32'): home_vars.append('USERPROFILE') for home_var in home_vars: home = os.getenv(home_var) if home != None: home_dot_gyp = os.path.join(home, '.gyp') if not os.path.exists(home_dot_gyp): home_dot_gyp = None else: break else: home_dot_gyp = os.path.expanduser(options.config_dir) if home_dot_gyp and not os.path.exists(home_dot_gyp): home_dot_gyp = None if not options.formats: # If no format was given on the command line, then check the env variable. generate_formats = [] if options.use_environment: generate_formats = os.environ.get('GYP_GENERATORS', []) if generate_formats: generate_formats = re.split(r'[\s,]', generate_formats) if generate_formats: options.formats = generate_formats else: # Nothing in the variable, default based on platform. if sys.platform == 'darwin': options.formats = ['xcode'] elif sys.platform in ('win32', 'cygwin'): options.formats = ['msvs'] else: options.formats = ['make'] if not options.generator_output and options.use_environment: g_o = os.environ.get('GYP_GENERATOR_OUTPUT') if g_o: options.generator_output = g_o options.parallel = not options.no_parallel for mode in options.debug: gyp.debug[mode] = 1 # Do an extra check to avoid work when we're not debugging. if DEBUG_GENERAL in gyp.debug: DebugOutput(DEBUG_GENERAL, 'running with these options:') for option, value in sorted(options.__dict__.items()): if option[0] == '_': continue if isinstance(value, basestring): DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value) else: DebugOutput(DEBUG_GENERAL, " %s: %s", option, value) if not build_files: build_files = FindBuildFiles() if not build_files: raise GypError((usage + '\n\n%s: error: no build_file') % (my_name, my_name)) # TODO(mark): Chromium-specific hack! # For Chromium, the gyp "depth" variable should always be a relative path # to Chromium's top-level "src" directory. If no depth variable was set # on the command line, try to find a "src" directory by looking at the # absolute path to each build file's directory. The first "src" component # found will be treated as though it were the path used for --depth. if not options.depth: for build_file in build_files: build_file_dir = os.path.abspath(os.path.dirname(build_file)) build_file_dir_components = build_file_dir.split(os.path.sep) components_len = len(build_file_dir_components) for index in xrange(components_len - 1, -1, -1): if build_file_dir_components[index] == 'src': options.depth = os.path.sep.join(build_file_dir_components) break del build_file_dir_components[index] # If the inner loop found something, break without advancing to another # build file. if options.depth: break if not options.depth: raise GypError('Could not automatically locate src directory. This is' 'a temporary Chromium feature that will be removed. Use' '--depth as a workaround.') # If toplevel-dir is not set, we assume that depth is the root of our source # tree. if not options.toplevel_dir: options.toplevel_dir = options.depth # -D on the command line sets variable defaults - D isn't just for define, # it's for default. Perhaps there should be a way to force (-F?) a # variable's value so that it can't be overridden by anything else. cmdline_default_variables = {} defines = [] if options.use_environment: defines += ShlexEnv('GYP_DEFINES') if options.defines: defines += options.defines cmdline_default_variables = NameValueListToDict(defines) if DEBUG_GENERAL in gyp.debug: DebugOutput(DEBUG_GENERAL, "cmdline_default_variables: %s", cmdline_default_variables) # Set up includes. includes = [] # If ~/.gyp/include.gypi exists, it'll be forcibly included into every # .gyp file that's loaded, before anything else is included. if home_dot_gyp != None: default_include = os.path.join(home_dot_gyp, 'include.gypi') if os.path.exists(default_include): print 'Using overrides found in ' + default_include includes.append(default_include) # Command-line --include files come after the default include. if options.includes: includes.extend(options.includes) # Generator flags should be prefixed with the target generator since they # are global across all generator runs. gen_flags = [] if options.use_environment: gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS') if options.generator_flags: gen_flags += options.generator_flags generator_flags = NameValueListToDict(gen_flags) if DEBUG_GENERAL in gyp.debug.keys(): DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags) # Generate all requested formats (use a set in case we got one format request # twice) for format in set(options.formats): params = {'options': options, 'build_files': build_files, 'generator_flags': generator_flags, 'cwd': os.getcwd(), 'build_files_arg': build_files_arg, 'gyp_binary': sys.argv[0], 'home_dot_gyp': home_dot_gyp, 'parallel': options.parallel, 'root_targets': options.root_targets, 'target_arch': cmdline_default_variables.get('target_arch', '')} # Start with the default variables from the command line. [generator, flat_list, targets, data] = Load( build_files, format, cmdline_default_variables, includes, options.depth, params, options.check, options.circular_check, options.duplicate_basename_check) # TODO(mark): Pass |data| for now because the generator needs a list of # build files that came in. In the future, maybe it should just accept # a list, and not the whole data dict. # NOTE: flat_list is the flattened dependency graph specifying the order # that targets may be built. Build systems that operate serially or that # need to have dependencies defined before dependents reference them should # generate targets in the order specified in flat_list. generator.GenerateOutput(flat_list, targets, data, params) if options.configs: valid_configs = targets[flat_list[0]]['configurations'].keys() for conf in options.configs: if conf not in valid_configs: raise GypError('Invalid config specified via --build: %s' % conf) generator.PerformBuild(data, options.configs, params) # Done return 0 def main(args): try: return gyp_main(args) except GypError, e: sys.stderr.write("gyp: %s\n" % e) return 1 # NOTE: setuptools generated console_scripts calls function with no arguments def script_main(): return main(sys.argv[1:]) if __name__ == '__main__': sys.exit(script_main())
gpl-2.0
Joergen/olympia
apps/pages/views.py
15
2236
from collections import defaultdict from django.conf import settings from django.shortcuts import render from devhub.models import ActivityLog from users.models import UserProfile def credits(request): developers = (UserProfile.objects .exclude(display_name=None) .filter(groupuser__group__name='Developers Credits') .order_by('display_name') .distinct()) past_developers = (UserProfile.objects .exclude(display_name=None) .filter( groupuser__group__name='Past Developers Credits') .order_by('display_name') .distinct()) other_contribs = (UserProfile.objects .exclude(display_name=None) .filter( groupuser__group__name='Other Contributors Credits') .order_by('display_name') .distinct()) languages = sorted(list( set(settings.AMO_LANGUAGES + settings.HIDDEN_LANGUAGES) - set(['en-US']))) localizers = [] for lang in languages: users = (UserProfile.objects .exclude(display_name=None) .filter(groupuser__group__name='%s Localizers' % lang) .order_by('display_name') .distinct()) if users: localizers.append((lang, users)) total_reviews = (ActivityLog.objects.total_reviews() .filter(approval_count__gt=10)) reviewers = defaultdict(list) for total in total_reviews: cnt = total.get('approval_count', 0) if cnt > 1000: reviewers[1000].append(total) elif cnt > 500: reviewers[500].append(total) elif cnt > 100: reviewers[100].append(total) elif cnt > 10: reviewers[10].append(total) context = { 'developers': developers, 'past_developers': past_developers, 'other_contribs': other_contribs, 'localizers': localizers, 'reviewers': reviewers, } return render(request, 'pages/credits.html', context)
bsd-3-clause
trishnaguha/ansible
lib/ansible/modules/cloud/google/gcp_spanner_instance_facts.py
4
5935
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_spanner_instance_facts description: - Gather facts for GCP Instance short_description: Gather facts for GCP Instance version_added: 2.8 author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: {} extends_documentation_fragment: gcp ''' EXAMPLES = ''' - name: a instance facts gcp_spanner_instance_facts: project: test_project auth_kind: serviceaccount service_account_file: "/tmp/auth.pem" ''' RETURN = ''' items: description: List of items returned: always type: complex contains: name: description: - A unique identifier for the instance, which cannot be changed after the instance is created. Values are of the form projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9]. The final segment of the name must be between 6 and 30 characters in length. returned: success type: str config: description: - A reference to the instance configuration. returned: success type: str displayName: description: - The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length. returned: success type: str nodeCount: description: - The number of nodes allocated to this instance. returned: success type: int labels: description: - Cloud Labels are a flexible and lightweight mechanism for organizing cloud resources into groups that reflect a customer's organizational needs and deployment strategies. Cloud Labels can be used to filter collections of resources. They can be used to control how resource metrics are aggregated. And they can be used as arguments to policy management rules (e.g. route, firewall, load balancing, etc.). - 'Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.' - Label values must be between 0 and 63 characters long and must conform to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. - No more than 64 labels can be associated with a given resource. - See U(https://goo.gl/xmQnxf) for more information on and examples of labels. - 'If you plan to use labels in your own code, please note that additional characters may be allowed in the future. And so you are advised to use an internal label representation, such as JSON, which doesn''t rely upon specific characters being disallowed. For example, representing labels as the string: name + "_" + value would prove problematic if we were to allow "_" in a future release.' - 'An object containing a list of "key": value pairs.' - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' returned: success type: dict ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest import json ################################################################################ # Main ################################################################################ def main(): module = GcpModule( argument_spec=dict( ) ) if not module.params['scopes']: module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin'] items = fetch_list(module, collection(module)) if items.get('instances'): items = items.get('instances') else: items = [] return_value = { 'items': items } module.exit_json(**return_value) def collection(module): return "https://spanner.googleapis.com/v1/projects/{project}/instances".format(**module.params) def fetch_list(module, link): auth = GcpSession(module, 'spanner') response = auth.get(link) return return_if_object(module, response) def return_if_object(module, response): # If not found, return nothing. if response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: module.fail_json(msg="Invalid JSON response with error: %s" % inst) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) return result if __name__ == "__main__": main()
gpl-3.0
resmo/ansible
test/units/config/test_data.py
113
1266
# Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from units.compat import unittest from ansible.config.data import ConfigData from ansible.config.manager import Setting mykey = Setting('mykey', 'myvalue', 'test', 'string') mykey2 = Setting('mykey2', 'myvalue2', ['test', 'test2'], 'list') mykey3 = Setting('mykey3', 'myvalue3', 11111111111, 'integer') class TestConfigData(unittest.TestCase): def setUp(self): self.cdata = ConfigData() def tearDown(self): self.cdata = None def test_update_setting(self): for setting in [mykey, mykey2, mykey3]: self.cdata.update_setting(setting) self.assertEqual(setting, self.cdata._global_settings.get(setting.name)) def test_update_setting_with_plugin(self): pass def test_get_setting(self): self.cdata._global_settings = {'mykey': mykey} self.assertEqual(mykey, self.cdata.get_setting('mykey')) def test_get_settings(self): all_settings = {'mykey': mykey, 'mykey2': mykey2} self.cdata._global_settings = all_settings for setting in self.cdata.get_settings(): self.assertEqual(all_settings[setting.name], setting)
gpl-3.0
rainysia/dotfiles
doc/python/test/selenium_localchromeff_remoteIE.py
1
1961
#!/usr/bin/env python # coding=utf-8 #chrome localhost ''' import os from selenium import webdriver chromedriver = "/home/softs/selenium/chromedriver" os.environ["webdriver.chrome.driver"] = chromedriver driver = webdriver.Chrome(chromedriver) driver.get("http://baidu.com") driver.quit() ''' #firefox(iceweasel) localhost ''' import os from selenium import webdriver browser = webdriver.Firefox() browser.get('http://www.baidu.com') browser.save_screenshot('screen.png') browser.quit() ''' #remote chrome #remote IE import os # For Chinese import sys reload(sys) sys.setdefaultencoding('utf-8') from time import sleep from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.desired_capabilities import DesiredCapabilities ie_desired_cap = {'os': 'Windows', 'os_version': '2008', 'browser': 'IE', 'browser_version': '9.0', 'resolution' : '1024x768'} tommy_remote_url = 'http://192.168.85.123:4444/wd/hub' derek_remote_url = 'http://192.168.87.72:18181/wd/hub' # command_executor = 'http://USERNAME:[email protected]:80/wd/hub' driver = webdriver.Remote( command_executor=derek_remote_url, desired_capabilities=ie_desired_cap) #google, name=q driver.get("http://www.baidu.com") eg_title = "百度" #有中文,需要import sys reload(sys) sys.setdefaultencoding('utf-8') print driver.title #print help(driver) try: if not eg_title in driver.title: raise Exception("Unable to load ",eg_title," page!") elem = driver.find_element_by_name("wd") elem.send_keys("domain") elem.submit() #two ways to wait, explict & implicit #WebDriverWait.until(condition-that-finds-the-element) #explict #driver.manage().timeouts().implicitlyWait(10, TimeUnit.SECONDS) #implicit print driver.title sleep(10) print '12345\n' except Exception, e: raise e finally: #driver.implicitly_wait(10) #driver.set_script_timeout(10) driver.quit()
mit
CanalTP/navitia
source/jormungandr/jormungandr/scenarios/tests/journey_compare_tests.py
1
43791
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved. # # This file is part of Navitia, # the software to build cool stuff with public transport. # # Hope you'll enjoy and contribute to this project, # powered by Canal TP (www.canaltp.fr). # Help us simplify mobility and open public transport: # a non ending quest to the responsive locomotion way of traveling! # # LICENCE: This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Stay tuned using # twitter @navitia # channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org # https://groups.google.com/d/forum/navitia # www.navitia.io from __future__ import absolute_import, print_function, unicode_literals, division from copy import deepcopy from jormungandr.scenarios import journey_filter as jf from jormungandr.scenarios.utils import DepartureJourneySorter, ArrivalJourneySorter import navitiacommon.response_pb2 as response_pb2 from jormungandr.scenarios.new_default import sort_journeys from jormungandr.utils import str_to_time_stamp import random import itertools import functools def empty_journeys_test(): response = response_pb2.Response() sort_journeys(response, 'arrival_time', True) assert not response.journeys def different_arrival_times_test(): response = response_pb2.Response() journey1 = response.journeys.add() journey1.arrival_date_time = str_to_time_stamp("20140422T0800") journey1.duration = 5 * 60 journey1.nb_transfers = 0 journey1.sections.add() journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT journey1.sections[0].duration = 5 * 60 journey2 = response.journeys.add() journey2.arrival_date_time = str_to_time_stamp("20140422T0758") journey2.duration = 2 * 60 journey2.nb_transfers = 0 journey2.sections.add() journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT journey2.sections[0].duration = 2 * 60 sort_journeys(response, 'arrival_time', True) assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0758") assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800") def different_departure_times_test(): response = response_pb2.Response() journey1 = response.journeys.add() journey1.departure_date_time = str_to_time_stamp("20140422T0800") journey1.duration = 5 * 60 journey1.nb_transfers = 0 journey1.sections.add() journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT journey1.sections[0].duration = 5 * 60 journey2 = response.journeys.add() journey2.departure_date_time = str_to_time_stamp("20140422T0758") journey2.duration = 2 * 60 journey2.nb_transfers = 0 journey2.sections.add() journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT journey2.sections[0].duration = 2 * 60 sort_journeys(response, 'departure_time', True) assert response.journeys[0].departure_date_time == str_to_time_stamp("20140422T0758") assert response.journeys[1].departure_date_time == str_to_time_stamp("20140422T0800") def different_duration_test(): response = response_pb2.Response() journey1 = response.journeys.add() journey1.arrival_date_time = str_to_time_stamp("20140422T0800") journey1.duration = 5 * 60 journey1.nb_transfers = 0 journey1.sections.add() journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT journey1.sections[0].duration = 5 * 60 journey2 = response.journeys.add() journey2.arrival_date_time = str_to_time_stamp("20140422T0800") journey2.duration = 3 * 60 journey2.nb_transfers = 0 journey2.sections.add() journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT journey2.sections[0].duration = 3 * 60 sort_journeys(response, 'arrival_time', True) assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800") assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800") assert response.journeys[0].duration == 3 * 60 assert response.journeys[1].duration == 5 * 60 def different_nb_transfers_test(): response = response_pb2.Response() journey1 = response.journeys.add() journey1.arrival_date_time = str_to_time_stamp("20140422T0800") journey1.duration = 25 * 60 journey1.nb_transfers = 1 journey1.sections.add() journey1.sections.add() journey1.sections.add() journey1.sections.add() journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT journey1.sections[0].duration = 5 * 60 journey1.sections[1].type = response_pb2.TRANSFER journey1.sections[1].duration = 3 * 60 journey1.sections[2].type = response_pb2.WAITING journey1.sections[2].duration = 2 * 60 journey1.sections[3].type = response_pb2.PUBLIC_TRANSPORT journey1.sections[3].duration = 15 * 60 journey2 = response.journeys.add() journey2.arrival_date_time = str_to_time_stamp("20140422T0800") journey2.duration = 25 * 60 journey2.nb_transfers = 0 journey2.sections.add() journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT journey2.sections[0].duration = 25 * 60 sort_journeys(response, 'arrival_time', True) assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800") assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800") assert response.journeys[0].duration == 25 * 60 assert response.journeys[1].duration == 25 * 60 assert response.journeys[0].nb_transfers == 0 assert response.journeys[1].nb_transfers == 1 def different_duration_non_pt_test(): response = response_pb2.Response() journey1 = response.journeys.add() journey1.arrival_date_time = str_to_time_stamp("20140422T0800") journey1.duration = 25 * 60 journey1.nb_transfers = 1 journey1.sections.add() journey1.sections.add() journey1.sections.add() journey1.sections.add() journey1.sections.add() journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT journey1.sections[0].duration = 5 * 60 journey1.sections[1].type = response_pb2.TRANSFER journey1.sections[1].duration = 3 * 60 journey1.sections[2].type = response_pb2.WAITING journey1.sections[2].duration = 2 * 60 journey1.sections[3].type = response_pb2.PUBLIC_TRANSPORT journey1.sections[3].duration = 15 * 60 journey1.sections[4].type = response_pb2.STREET_NETWORK journey1.sections[4].duration = 10 * 60 journey2 = response.journeys.add() journey2.arrival_date_time = str_to_time_stamp("20140422T0800") journey2.duration = 25 * 60 journey2.nb_transfers = 1 journey2.sections.add() journey2.sections.add() journey2.sections.add() journey2.sections.add() journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT journey2.sections[0].duration = 5 * 60 journey2.sections[1].type = response_pb2.TRANSFER journey2.sections[1].duration = 3 * 60 journey2.sections[2].type = response_pb2.WAITING journey2.sections[2].duration = 2 * 60 journey2.sections[3].type = response_pb2.PUBLIC_TRANSPORT journey2.sections[3].duration = 15 * 60 sort_journeys(response, 'arrival_time', True) assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800") assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800") assert response.journeys[0].duration == 25 * 60 assert response.journeys[1].duration == 25 * 60 assert response.journeys[0].nb_transfers == 1 assert response.journeys[1].nb_transfers == 1 # We want to have journey2 in first, this is the one with 4 sections assert len(response.journeys[0].sections) == 4 assert len(response.journeys[1].sections) == 5 def create_dummy_journey(): journey = response_pb2.Journey() journey.arrival_date_time = str_to_time_stamp("20140422T0800") journey.duration = 25 * 60 journey.nb_transfers = 1 s = journey.sections.add() s.type = response_pb2.PUBLIC_TRANSPORT s.origin.uri = "stop_point_1" s.destination.uri = "stop_point_2" s.vehicle_journey.uri = "vj_toto" s.duration = 5 * 60 s = journey.sections.add() s.type = response_pb2.TRANSFER s.duration = 3 * 60 s = journey.sections.add() s.type = response_pb2.WAITING s.duration = 2 * 60 s = journey.sections.add() s.type = response_pb2.PUBLIC_TRANSPORT s.origin.uri = "stop_point_3" s.destination.uri = "stop_point_4" s.duration = 15 * 60 s = journey.sections.add() s.type = response_pb2.STREET_NETWORK s.duration = 10 * 60 return journey def journey_pairs_gen(list_responses): return itertools.combinations(jf.get_qualified_journeys(list_responses), 2) def test_get_qualified_journeys(): responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.tags.append("a_tag") journey2 = responses[0].journeys.add() journey2.tags.append("to_delete") journey3 = responses[0].journeys.add() journey3.tags.append("another_tag") journey3.tags.append("to_delete") for qualified in jf.get_qualified_journeys(responses): assert qualified.tags[0] == 'a_tag' def test_num_qualifed_journeys(): responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.tags.append("a_tag") journey2 = responses[0].journeys.add() journey2.tags.append("to_delete") journey3 = responses[0].journeys.add() journey3.tags.append("another_tag") assert jf.nb_qualifed_journeys(responses) == 2 def test_similar_journeys(): responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.sections.add() journey1.duration = 42 journey1.sections[0].uris.vehicle_journey = 'bob' journey2 = responses[0].journeys.add() journey2.sections.add() journey2.duration = 43 journey2.sections[0].uris.vehicle_journey = 'bob' jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {}) assert len(list(jf.get_qualified_journeys(responses))) == 1 def test_similar_journeys_test2(): responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.sections.add() journey1.duration = 42 journey1.sections[0].uris.vehicle_journey = 'bob' responses.append(response_pb2.Response()) journey2 = responses[-1].journeys.add() journey2.sections.add() journey2.duration = 43 journey2.sections[-1].uris.vehicle_journey = 'bob' jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {}) assert len(list(jf.get_qualified_journeys(responses))) == 1 def test_similar_journeys_test3(): responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.sections.add() journey1.duration = 42 journey1.sections[0].uris.vehicle_journey = 'bob' responses.append(response_pb2.Response()) journey2 = responses[-1].journeys.add() journey2.sections.add() journey2.duration = 43 journey2.sections[-1].uris.vehicle_journey = 'bobette' jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {}) assert 'to_delete' not in journey1.tags assert 'to_delete' in journey2.tags def test_similar_journeys_different_transfer(): """ If 2 journeys take the same vjs but with a different number of sections, one should be filtered """ responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.sections.add() journey1.duration = 42 journey1.sections[-1].uris.vehicle_journey = 'bob' journey1.sections.add() journey1.duration = 42 journey1.sections[-1].uris.vehicle_journey = 'bobette' responses.append(response_pb2.Response()) journey2 = responses[-1].journeys.add() journey2.sections.add() journey2.duration = 43 journey2.sections[-1].uris.vehicle_journey = 'bob' journey2.sections.add() journey2.duration = 43 journey2.sections[-1].type = response_pb2.TRANSFER journey2.sections.add() journey2.duration = 43 journey2.sections[-1].uris.vehicle_journey = 'bobette' jf.filter_similar_vj_journeys(journey_pairs_gen(responses), {}) assert 'to_delete' not in journey1.tags assert 'to_delete' in journey2.tags def test_similar_journeys_different_waiting_durations(): """ If 2 journeys take the same vj, same number of sections but with different waiting durations, filter one with smaller waiting duration """ responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.duration = 600 journey1.sections.add() journey1.sections[-1].uris.vehicle_journey = 'bob' journey1.sections[-1].duration = 200 journey1.sections.add() journey1.sections[-1].type = response_pb2.TRANSFER journey1.sections[-1].duration = 50 journey1.sections.add() journey1.sections[-1].type = response_pb2.WAITING journey1.sections[-1].duration = 150 journey1.sections.add() journey1.sections[-1].uris.vehicle_journey = 'bobette' journey1.sections[-1].duration = 200 responses.append(response_pb2.Response()) journey2 = responses[-1].journeys.add() journey2.duration = 600 journey2.sections.add() journey2.sections[-1].uris.vehicle_journey = 'bob' journey2.sections[-1].duration = 200 journey2.sections.add() journey2.sections[-1].type = response_pb2.TRANSFER journey2.sections[-1].duration = 25 journey2.sections.add() journey2.sections[-1].type = response_pb2.WAITING journey2.sections[-1].duration = 175 journey2.sections.add() journey2.sections[-1].uris.vehicle_journey = 'bobette' journey2.sections[-1].duration = 200 jf.filter_similar_vj_journeys(journey_pairs_gen(responses), {}) assert 'to_delete' not in journey2.tags assert 'to_delete' in journey1.tags def test_similar_journeys_multi_trasfer_and_different_waiting_durations(): """ If 2 journeys take the same vj, same number of sections and several waitings with different waiting durations, for each journey find "min waiting duration" keep the journey which has larger "min waiting duration" """ responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.duration = 1000 journey1.sections.add() journey1.sections[-1].uris.vehicle_journey = 'bob' journey1.sections[-1].duration = 200 journey1.sections.add() journey1.sections[-1].type = response_pb2.TRANSFER journey1.sections[-1].duration = 50 journey1.sections.add() journey1.sections[-1].type = response_pb2.WAITING journey1.sections[-1].duration = 150 journey1.sections.add() journey1.sections[-1].uris.vehicle_journey = 'bobette' journey1.sections[-1].duration = 200 journey1.sections.add() journey1.sections[-1].type = response_pb2.TRANSFER journey1.sections[-1].duration = 10 journey1.sections.add() journey1.sections[-1].type = response_pb2.WAITING journey1.sections[-1].duration = 190 journey1.sections.add() journey1.sections[-1].uris.vehicle_journey = 'boby' journey1.sections[-1].duration = 200 responses.append(response_pb2.Response()) journey2 = responses[-1].journeys.add() journey2.duration = 1000 journey2.sections.add() journey2.sections[-1].uris.vehicle_journey = 'bob' journey2.sections[-1].duration = 200 journey2.sections.add() journey2.sections[-1].type = response_pb2.TRANSFER journey2.sections[-1].duration = 20 journey2.sections.add() journey2.sections[-1].type = response_pb2.WAITING journey2.sections[-1].duration = 180 journey2.sections.add() journey2.sections[-1].uris.vehicle_journey = 'bobette' journey2.sections[-1].duration = 200 journey2.sections.add() journey2.sections[-1].type = response_pb2.TRANSFER journey2.sections[-1].duration = 100 journey2.sections.add() journey2.sections[-1].type = response_pb2.WAITING journey2.sections[-1].duration = 100 journey2.sections.add() journey2.sections[-1].uris.vehicle_journey = 'boby' journey2.sections[-1].duration = 200 jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {}) assert 'to_delete' not in journey1.tags assert 'to_delete' in journey2.tags def test_similar_journeys_with_and_without_waiting_section(): """ If 2 journeys take the same vj, one with a waiting section and another without, filtere one with transfer but without waiting section """ responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.duration = 600 journey1.sections.add() journey1.sections[-1].uris.vehicle_journey = 'bob' journey1.sections[-1].duration = 200 journey1.sections.add() journey1.sections[-1].type = response_pb2.TRANSFER journey1.sections[-1].duration = 50 journey1.sections.add() journey1.sections[-1].type = response_pb2.WAITING journey1.sections[-1].duration = 150 journey1.sections.add() journey1.sections[-1].uris.vehicle_journey = 'bobette' journey1.sections[-1].duration = 200 responses.append(response_pb2.Response()) journey2 = responses[-1].journeys.add() journey2.duration = 600 journey2.sections.add() journey2.sections[-1].uris.vehicle_journey = 'bob' journey2.sections[-1].duration = 200 journey2.sections.add() journey2.sections[-1].type = response_pb2.TRANSFER journey2.sections[-1].duration = 200 journey2.sections.add() journey2.sections[-1].uris.vehicle_journey = 'bobette' journey2.sections[-1].duration = 200 jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {}) assert 'to_delete' not in journey1.tags assert 'to_delete' in journey2.tags def test_similar_journeys_walking_bike(): """ If we have 2 direct path, one walking and one by bike, we should not filter any journey """ responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.duration = 42 journey1.sections.add() journey1.sections[-1].type = response_pb2.STREET_NETWORK journey1.sections[-1].street_network.mode = response_pb2.Walking responses.append(response_pb2.Response()) journey2 = responses[-1].journeys.add() journey2.duration = 42 journey2.sections.add() journey2.sections[-1].type = response_pb2.STREET_NETWORK journey2.sections[-1].street_network.mode = response_pb2.Bike jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {}) assert 'to_delete' not in journey1.tags assert 'to_delete' not in journey2.tags def test_similar_journeys_car_park(): """ We have to consider a journey with CAR / PARK / WALK to be equal to CAR / PARK """ responses = [response_pb2.Response()] journey1 = response_pb2.Journey() journey1.sections.add() journey1.sections[-1].type = response_pb2.STREET_NETWORK journey1.sections[-1].street_network.mode = response_pb2.Car journey1.sections.add() journey1.sections[-1].type = response_pb2.PARK journey1.sections.add() journey1.sections[-1].type = response_pb2.STREET_NETWORK journey1.sections[-1].street_network.mode = response_pb2.Walking journey2 = response_pb2.Journey() journey2.sections.add() journey2.sections[-1].type = response_pb2.STREET_NETWORK journey2.sections[-1].street_network.mode = response_pb2.Car journey2.sections.add() journey2.sections[-1].type = response_pb2.PARK assert jf.compare(journey1, journey2, jf.similar_journeys_vj_generator) def test_similar_journeys_bss_park(): """ We have to consider a journey with WALK / GET A BIKE / BSS to be equals to GET A BIKE / BSS """ responses = [response_pb2.Response()] journey1 = response_pb2.Journey() journey1.sections.add() journey1.sections[-1].type = response_pb2.STREET_NETWORK journey1.sections[-1].street_network.mode = response_pb2.Walking journey1.sections.add() journey1.sections[-1].type = response_pb2.BSS_RENT journey1.sections.add() journey1.sections[-1].type = response_pb2.STREET_NETWORK journey1.sections[-1].street_network.mode = response_pb2.Bss journey2 = response_pb2.Journey() journey2.sections.add() journey2.sections[-1].type = response_pb2.BSS_RENT journey2.sections.add() journey2.sections[-1].type = response_pb2.STREET_NETWORK journey2.sections[-1].street_network.mode = response_pb2.Bss assert jf.compare(journey1, journey2, jf.similar_journeys_vj_generator) def test_similar_journeys_crowfly_rs(): """ We have to consider a journey with CROWFLY WALK to be different than CROWFLY Ridesharing """ journey1 = response_pb2.Journey() journey1.sections.add() journey1.sections[-1].type = response_pb2.CROW_FLY journey1.sections[-1].street_network.mode = response_pb2.Walking journey2 = response_pb2.Journey() journey2.sections.add() journey2.sections[-1].type = response_pb2.CROW_FLY journey2.sections[-1].street_network.mode = response_pb2.Ridesharing assert not jf.compare(journey1, journey2, jf.similar_journeys_vj_generator) def test_departure_sort(): """ we want to sort by departure hour, then by duration """ j1 = response_pb2.Journey() j1.departure_date_time = str_to_time_stamp('20151005T071000') j1.arrival_date_time = str_to_time_stamp('20151005T081900') j1.duration = j1.arrival_date_time - j1.departure_date_time j1.nb_transfers = 0 j2 = response_pb2.Journey() j2.departure_date_time = str_to_time_stamp('20151005T072200') j2.arrival_date_time = str_to_time_stamp('20151005T083500') j2.duration = j2.arrival_date_time - j2.departure_date_time j2.nb_transfers = 0 j3 = response_pb2.Journey() j3.departure_date_time = str_to_time_stamp('20151005T074500') j3.arrival_date_time = str_to_time_stamp('20151005T091200') j3.duration = j3.arrival_date_time - j3.departure_date_time j3.nb_transfers = 0 j4 = response_pb2.Journey() j4.departure_date_time = str_to_time_stamp('20151005T074500') j4.arrival_date_time = str_to_time_stamp('20151005T091100') j4.duration = j4.arrival_date_time - j4.departure_date_time j4.nb_transfers = 0 j5 = response_pb2.Journey() j5.departure_date_time = str_to_time_stamp('20151005T074500') j5.arrival_date_time = str_to_time_stamp('20151005T090800') j5.duration = j5.arrival_date_time - j5.departure_date_time j5.nb_transfers = 0 result = [j1, j2, j3, j4, j5] random.shuffle(result) comparator = DepartureJourneySorter(True) result.sort(key=functools.cmp_to_key(comparator)) assert result[0] == j1 assert result[1] == j2 assert result[2] == j5 assert result[3] == j4 assert result[4] == j3 def test_arrival_sort(): """ we want to sort by arrival hour, then by duration """ j1 = response_pb2.Journey() j1.departure_date_time = str_to_time_stamp('20151005T071000') j1.arrival_date_time = str_to_time_stamp('20151005T081900') j1.duration = j1.arrival_date_time - j1.departure_date_time j1.nb_transfers = 0 j2 = response_pb2.Journey() j2.departure_date_time = str_to_time_stamp('20151005T072200') j2.arrival_date_time = str_to_time_stamp('20151005T083500') j2.duration = j2.arrival_date_time - j2.departure_date_time j2.nb_transfers = 0 j3 = response_pb2.Journey() j3.departure_date_time = str_to_time_stamp('20151005T074500') j3.arrival_date_time = str_to_time_stamp('20151005T091200') j3.duration = j3.arrival_date_time - j3.departure_date_time j3.nb_transfers = 0 j4 = response_pb2.Journey() j4.departure_date_time = str_to_time_stamp('20151005T075000') j4.arrival_date_time = str_to_time_stamp('20151005T091200') j4.duration = j4.arrival_date_time - j4.departure_date_time j4.nb_transfers = 0 j5 = response_pb2.Journey() j5.departure_date_time = str_to_time_stamp('20151005T075500') j5.arrival_date_time = str_to_time_stamp('20151005T091200') j5.duration = j5.arrival_date_time - j5.departure_date_time j5.nb_transfers = 0 result = [j1, j2, j3, j4, j5] random.shuffle(result) comparator = ArrivalJourneySorter(True) result.sort(key=functools.cmp_to_key(comparator)) assert result[0] == j1 assert result[1] == j2 assert result[2] == j5 assert result[3] == j4 assert result[4] == j3 def test_heavy_journey_walking(): """ we don't filter any journey with walking """ journey = response_pb2.Journey() journey.sections.add() journey.sections[-1].type = response_pb2.STREET_NETWORK journey.sections[-1].street_network.mode = response_pb2.Walking journey.sections[-1].duration = 5 f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20) assert f.filter_func(journey) def test_heavy_journey_bike(): """ the first time the duration of the biking section is superior to the min value, so we keep the journey on the second test the duration is inferior to the min, so we delete the journey """ journey = response_pb2.Journey() journey.sections.add() journey.sections[-1].type = response_pb2.STREET_NETWORK journey.sections[-1].street_network.mode = response_pb2.Bike journey.durations.bike = journey.sections[-1].duration = 15 f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20) assert f.filter_func(journey) journey.durations.bike = journey.sections[-1].duration = 5 f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20, orig_modes=['bike', 'walking']) assert not f.filter_func(journey) def test_filter_wrapper(): """ Testing that filter_wrapper is fine (see filter_wrapper doc) """ class LoveHateFilter(jf.SingleJourneyFilter): message = 'i_dont_like_you' def __init__(self, love=True): self.love = love def filter_func(self, journey): return self.love ref_journey = response_pb2.Journey() # first we test when debug-mode deactivated (each time both OK-filter and KO-filter) j = deepcopy(ref_journey) wrapped_f = jf.filter_wrapper(is_debug=False, filter_obj=LoveHateFilter(love=True)) assert wrapped_f(j) assert 'to_delete' not in j.tags assert 'deleted_because_i_dont_like_you' not in j.tags j = deepcopy(ref_journey) wrapped_f = jf.filter_wrapper(is_debug=False, filter_obj=LoveHateFilter(love=False)) assert not wrapped_f(j) assert 'to_delete' in j.tags assert 'deleted_because_i_dont_like_you' not in j.tags # test using without debug mode (should be deactivated) j = deepcopy(ref_journey) wrapped_f = jf.filter_wrapper(filter_obj=LoveHateFilter(love=True)) assert wrapped_f(j) assert 'to_delete' not in j.tags assert 'deleted_because_i_dont_like_you' not in j.tags j = deepcopy(ref_journey) wrapped_f = jf.filter_wrapper(filter_obj=LoveHateFilter(love=False)) assert not wrapped_f(j) assert 'to_delete' in j.tags assert 'deleted_because_i_dont_like_you' not in j.tags # test when debug-mode is activated j = deepcopy(ref_journey) wrapped_f = jf.filter_wrapper(is_debug=True, filter_obj=LoveHateFilter(love=True)) assert wrapped_f(j) assert 'to_delete' not in j.tags assert 'deleted_because_i_dont_like_you' not in j.tags j = deepcopy(ref_journey) wrapped_f = jf.filter_wrapper(is_debug=True, filter_obj=LoveHateFilter(love=False)) assert wrapped_f(j) assert 'to_delete' in j.tags assert 'deleted_because_i_dont_like_you' in j.tags def test_heavy_journey_car(): """ the first time the duration of the car section is superior to the min value, so we keep the journey on the second test the duration is inferior to the min, so we delete the journey """ journey = response_pb2.Journey() journey.sections.add() journey.sections[-1].type = response_pb2.STREET_NETWORK journey.sections[-1].street_network.mode = response_pb2.Car journey.durations.car = journey.sections[-1].duration = 25 f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20) assert f.filter_func(journey) journey.durations.car = journey.sections[-1].duration = 15 f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20, orig_modes=['bike', 'walking']) assert not f.filter_func(journey) def test_heavy_journey_taxi(): """ the first time the duration of the taxi section is superior to the min value, so we keep the journey on the second test the duration is inferior to the min, so we delete the journey """ journey = response_pb2.Journey() journey.sections.add() journey.sections[-1].type = response_pb2.STREET_NETWORK journey.sections[-1].street_network.mode = response_pb2.Taxi journey.durations.taxi = journey.sections[-1].duration = 25 f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_taxi=20) assert f.filter_func(journey) journey.durations.taxi = journey.sections[-1].duration = 15 f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_taxi=20, orig_modes=['bike', 'walking']) assert not f.filter_func(journey) def test_heavy_journey_bss(): """ we should not remove any bss journey since it is already in concurrence with the walking """ journey = response_pb2.Journey() journey.sections.add() journey.sections[-1].type = response_pb2.STREET_NETWORK journey.sections[-1].street_network.mode = response_pb2.Walking journey.sections[-1].duration = 5 journey.sections.add() journey.sections[-1].type = response_pb2.BSS_RENT journey.sections[-1].duration = 5 journey.sections.add() journey.sections[-1].type = response_pb2.STREET_NETWORK journey.sections[-1].street_network.mode = response_pb2.Bike journey.sections[-1].duration = 5 journey.sections.add() journey.sections[-1].type = response_pb2.BSS_PUT_BACK journey.sections[-1].duration = 5 journey.sections.add() journey.sections[-1].type = response_pb2.STREET_NETWORK journey.sections[-1].street_network.mode = response_pb2.Walking journey.sections[-1].duration = 5 journey.durations.bike = 5 journey.durations.walking = 10 f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20) assert f.filter_func(journey) def test_activate_deactivate_min_bike(): """ A B C D *................*============================*.............* A: origin D: Destination A->B : Bike B->C : public transport C->D : Bike """ # case 1: request without origin_mode and destination_mode journey = response_pb2.Journey() journey.sections.add() journey.sections[-1].type = response_pb2.STREET_NETWORK journey.sections[-1].street_network.mode = response_pb2.Bike journey.sections[-1].duration = 5 journey.sections.add() journey.sections[-1].type = response_pb2.PUBLIC_TRANSPORT journey.sections[-1].street_network.mode = response_pb2.PUBLIC_TRANSPORT journey.sections[-1].duration = 35 journey.sections.add() journey.sections[-1].type = response_pb2.STREET_NETWORK journey.sections[-1].street_network.mode = response_pb2.Bike journey.sections[-1].duration = 7 journey.durations.bike = 12 f = jf.FilterTooShortHeavyJourneys(min_bike=10) assert f.filter_func(journey) # case 2: request without origin_mode journey.sections[-1].duration = 15 journey.durations.bike = 20 f = jf.FilterTooShortHeavyJourneys(min_bike=8, dest_modes=['bike', 'walking']) assert f.filter_func(journey) # case 3: request without destination_mode journey.sections[0].duration = 15 journey.sections[-1].duration = 5 journey.durations.bike = 20 f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike', 'walking']) assert f.filter_func(journey) # case 4: request without walking in origin_mode journey.sections[0].duration = 5 journey.sections[-1].duration = 15 journey.durations.bike = 20 f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike']) assert f.filter_func(journey) # case 5: request without walking in destination_mode journey.sections[0].duration = 15 journey.sections[-1].duration = 5 journey.durations.bike = 20 f = jf.FilterTooShortHeavyJourneys(min_bike=8, dest_modes=['bike']) assert f.filter_func(journey) # case 6: request with bike only in origin_mode destination_mode journey.sections[0].duration = 15 journey.sections[-1].duration = 14 journey.durations.bike = 29 f = jf.FilterTooShortHeavyJourneys(min_bike=17, orig_modes=['bike'], dest_modes=['bike']) assert f.filter_func(journey) # case 7: request with walking in destination_mode journey.sections[0].duration = 15 journey.sections[-1].duration = 5 journey.durations.bike = 20 f = jf.FilterTooShortHeavyJourneys(min_bike=8, dest_modes=['bike', 'walking']) assert not f.filter_func(journey) # case 8: request with walking in origin_mode journey.sections[0].duration = 5 journey.sections[-1].duration = 15 journey.durations.bike = 20 f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike', 'walking']) assert not f.filter_func(journey) # case 9: request with bike in origin_mode and bike, walking in destination_mode journey.sections[0].duration = 5 journey.sections[-1].duration = 7 journey.durations.bike = 12 f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike'], dest_modes=['bike', 'walking']) assert not f.filter_func(journey) def test_activate_deactivate_min_car(): """ A B C D *................*============================*.............* A: origin D: Destination A->B : car B->C : public transport C->D : car """ # case 1: request without origin_mode and destination_mode journey = response_pb2.Journey() journey.sections.add() journey.sections[-1].type = response_pb2.STREET_NETWORK journey.sections[-1].street_network.mode = response_pb2.Car journey.sections[-1].duration = 5 journey.sections.add() journey.sections[-1].type = response_pb2.PUBLIC_TRANSPORT journey.sections[-1].street_network.mode = response_pb2.PUBLIC_TRANSPORT journey.sections[-1].duration = 35 journey.sections.add() journey.sections[-1].type = response_pb2.STREET_NETWORK journey.sections[-1].street_network.mode = response_pb2.Car journey.sections[-1].duration = 7 journey.durations.car = 12 f = jf.FilterTooShortHeavyJourneys(min_car=10) assert f.filter_func(journey) # case 2: request without origin_mode journey.sections[-1].duration = 15 journey.durations.car = 20 f = jf.FilterTooShortHeavyJourneys(min_car=8, dest_modes=['car', 'walking']) assert f.filter_func(journey) # case 3: request without destination_mode journey.sections[0].duration = 15 journey.sections[-1].duration = 5 journey.durations.car = 20 f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car', 'walking']) assert f.filter_func(journey) # case 4: request without walking in origin_mode journey.sections[0].duration = 5 journey.sections[-1].duration = 15 journey.durations.car = 20 f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car']) assert f.filter_func(journey) # case 5: request without walking in destination_mode journey.sections[0].duration = 15 journey.sections[-1].duration = 5 journey.durations.car = 20 f = jf.FilterTooShortHeavyJourneys(min_car=8, dest_modes=['car']) assert f.filter_func(journey) # case 6: request with car only in origin_mode destination_mode journey.sections[0].duration = 15 journey.sections[-1].duration = 14 journey.durations.car = 29 f = jf.FilterTooShortHeavyJourneys(min_car=17, orig_modes=['car'], dest_modes=['car']) assert f.filter_func(journey) # case 7: request with walking in destination_mode journey.sections[0].duration = 15 journey.sections[-1].duration = 5 journey.durations.car = 20 f = jf.FilterTooShortHeavyJourneys(min_car=8, dest_modes=['car', 'walking']) assert not f.filter_func(journey) # case 8: request with walking in origin_mode journey.sections[0].duration = 5 journey.sections[-1].duration = 15 journey.durations.car = 20 f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car', 'walking']) assert not f.filter_func(journey) # case 9: request with bike in origin_mode and bike, walking in destination_mode journey.sections[0].duration = 5 journey.sections[-1].duration = 7 journey.durations.car = 12 f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car'], dest_modes=['car', 'walking']) assert not f.filter_func(journey) def test_activate_deactivate_min_taxi(): """ A B C D *................*============================*.............* A: origin D: Destination A->B : taxi B->C : public transport C->D : taxi """ # case 1: request without origin_mode and destination_mode journey = response_pb2.Journey() journey.sections.add() journey.sections[-1].type = response_pb2.STREET_NETWORK journey.sections[-1].street_network.mode = response_pb2.Taxi journey.sections[-1].duration = 5 journey.sections.add() journey.sections[-1].type = response_pb2.PUBLIC_TRANSPORT journey.sections[-1].street_network.mode = response_pb2.PUBLIC_TRANSPORT journey.sections[-1].duration = 35 journey.sections.add() journey.sections[-1].type = response_pb2.STREET_NETWORK journey.sections[-1].street_network.mode = response_pb2.Taxi journey.sections[-1].duration = 7 journey.durations.taxi = 12 f = jf.FilterTooShortHeavyJourneys(min_taxi=10) assert f.filter_func(journey) # case 2: request without origin_mode journey.sections[-1].duration = 15 journey.durations.taxi = 20 f = jf.FilterTooShortHeavyJourneys(min_taxi=8, dest_modes=['taxi', 'walking']) assert f.filter_func(journey) # case 3: request without destination_mode journey.sections[0].duration = 15 journey.sections[-1].duration = 5 journey.durations.taxi = 20 f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi', 'walking']) assert f.filter_func(journey) # case 4: request without walking in origin_mode journey.sections[0].duration = 5 journey.sections[-1].duration = 15 journey.durations.taxi = 20 f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi']) assert f.filter_func(journey) # case 5: request without walking in destination_mode journey.sections[0].duration = 15 journey.sections[-1].duration = 5 journey.durations.taxi = 20 f = jf.FilterTooShortHeavyJourneys(min_taxi=8, dest_modes=['taxi']) assert f.filter_func(journey) # case 6: request with taxi only in origin_mode destination_mode journey.sections[0].duration = 15 journey.sections[-1].duration = 14 journey.durations.taxi = 29 f = jf.FilterTooShortHeavyJourneys(min_taxi=17, orig_modes=['taxi'], dest_modes=['taxi']) assert f.filter_func(journey) # case 7: request with walking in destination_mode journey.sections[0].duration = 15 journey.sections[-1].duration = 5 journey.durations.taxi = 20 f = jf.FilterTooShortHeavyJourneys(min_taxi=8, dest_modes=['taxi', 'walking']) assert not f.filter_func(journey) # case 8: request with walking in origin_mode journey.sections[0].duration = 5 journey.sections[-1].duration = 15 journey.durations.taxi = 20 f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi', 'walking']) assert not f.filter_func(journey) # case 9: request with bike in origin_mode and bike, walking in destination_mode journey.sections[0].duration = 5 journey.sections[-1].duration = 7 journey.durations.taxi = 12 f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi'], dest_modes=['taxi', 'walking']) assert not f.filter_func(journey) def test_filter_direct_path_mode_car(): # is_dp and not is_in_direct_path_mode_list journey = response_pb2.Journey() journey.tags.append("car") journey.tags.append("non_pt") f = jf.FilterDirectPathMode(["bike"]) assert not f.filter_func(journey) # is_dp and is_in_direct_path_mode_list journey = response_pb2.Journey() journey.tags.append("car") journey.tags.append("non_pt") f = jf.FilterDirectPathMode(["car"]) assert f.filter_func(journey) # is_dp and is_in_direct_path_mode_list journey = response_pb2.Journey() journey.tags.append("car") journey.tags.append("non_pt") f = jf.FilterDirectPathMode(["taxi", "surf", "car", "bike"]) assert f.filter_func(journey) # not is_dp and not is_in_direct_path_mode_list journey = response_pb2.Journey() journey.tags.append("car") f = jf.FilterDirectPathMode(["bike"]) assert f.filter_func(journey) # not is_dp and not is_in_direct_path_mode_list journey = response_pb2.Journey() journey.tags.append("car") f = jf.FilterDirectPathMode(["car"]) assert f.filter_func(journey) def test_heavy_journey_ridesharing(): """ the first time the duration of the ridesharing section is superior to the min value, so we keep the journey on the second test the duration is inferior to the min, so we delete the journey """ journey = response_pb2.Journey() journey.sections.add() journey.sections[-1].type = response_pb2.STREET_NETWORK journey.sections[-1].street_network.mode = response_pb2.Ridesharing journey.durations.ridesharing = journey.sections[-1].duration = 25 # Ridesharing duration is superior to min_ridesharing value so we have ridesharing section f = jf.FilterTooShortHeavyJourneys(min_ridesharing=20, orig_modes=['ridesharing', 'walking']) assert f.filter_func(journey) # Ridesharing duration is inferior to min_ridesharing value but there is no walking option # In this case we have ridesharing section journey.durations.ridesharing = journey.sections[-1].duration = 15 f = jf.FilterTooShortHeavyJourneys(min_ridesharing=20, orig_modes=['ridesharing']) assert f.filter_func(journey) # Ridesharing duration is inferior to min_ridesharing value and there is also walking option # In this case we have reject ridesharing section journey.durations.ridesharing = journey.sections[-1].duration = 15 f = jf.FilterTooShortHeavyJourneys(min_ridesharing=20, orig_modes=['ridesharing', 'walking']) assert not f.filter_func(journey)
agpl-3.0
frankito9999/Ecommerce-OAuth-Stripe-Bitcoin
node_modules/laravel-elixir/node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/MSVSToolFile.py
2736
1804
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio project reader/writer.""" import gyp.common import gyp.easy_xml as easy_xml class Writer(object): """Visual Studio XML tool file writer.""" def __init__(self, tool_file_path, name): """Initializes the tool file. Args: tool_file_path: Path to the tool file. name: Name of the tool file. """ self.tool_file_path = tool_file_path self.name = name self.rules_section = ['Rules'] def AddCustomBuildRule(self, name, cmd, description, additional_dependencies, outputs, extensions): """Adds a rule to the tool file. Args: name: Name of the rule. description: Description of the rule. cmd: Command line of the rule. additional_dependencies: other files which may trigger the rule. outputs: outputs of the rule. extensions: extensions handled by the rule. """ rule = ['CustomBuildRule', {'Name': name, 'ExecutionDescription': description, 'CommandLine': cmd, 'Outputs': ';'.join(outputs), 'FileExtensions': ';'.join(extensions), 'AdditionalDependencies': ';'.join(additional_dependencies) }] self.rules_section.append(rule) def WriteIfChanged(self): """Writes the tool file.""" content = ['VisualStudioToolFile', {'Version': '8.00', 'Name': self.name }, self.rules_section ] easy_xml.WriteXmlIfChanged(content, self.tool_file_path, encoding="Windows-1252")
mit
jatinmistry13/pattern
pattern/web/pdf/pdfdevice.py
56
5319
#!/usr/bin/env python2 import sys from utils import mult_matrix, translate_matrix from utils import enc, bbox2str from pdffont import PDFUnicodeNotDefined ## PDFDevice ## class PDFDevice(object): debug = 0 def __init__(self, rsrcmgr): self.rsrcmgr = rsrcmgr self.ctm = None return def __repr__(self): return '<PDFDevice>' def close(self): return def set_ctm(self, ctm): self.ctm = ctm return def begin_tag(self, tag, props=None): return def end_tag(self): return def do_tag(self, tag, props=None): return def begin_page(self, page, ctm): return def end_page(self, page): return def begin_figure(self, name, bbox, matrix): return def end_figure(self, name): return def paint_path(self, graphicstate, stroke, fill, evenodd, path): return def render_image(self, name, stream): return def render_string(self, textstate, seq): return ## PDFTextDevice ## class PDFTextDevice(PDFDevice): def render_string(self, textstate, seq): matrix = mult_matrix(textstate.matrix, self.ctm) font = textstate.font fontsize = textstate.fontsize scaling = textstate.scaling * .01 charspace = textstate.charspace * scaling wordspace = textstate.wordspace * scaling rise = textstate.rise if font.is_multibyte(): wordspace = 0 dxscale = .001 * fontsize * scaling if font.is_vertical(): textstate.linematrix = self.render_string_vertical( seq, matrix, textstate.linematrix, font, fontsize, scaling, charspace, wordspace, rise, dxscale) else: textstate.linematrix = self.render_string_horizontal( seq, matrix, textstate.linematrix, font, fontsize, scaling, charspace, wordspace, rise, dxscale) return def render_string_horizontal(self, seq, matrix, (x,y), font, fontsize, scaling, charspace, wordspace, rise, dxscale): needcharspace = False for obj in seq: if isinstance(obj, int) or isinstance(obj, float): x -= obj*dxscale needcharspace = True else: for cid in font.decode(obj): if needcharspace: x += charspace x += self.render_char(translate_matrix(matrix, (x,y)), font, fontsize, scaling, rise, cid) if cid == 32 and wordspace: x += wordspace needcharspace = True return (x, y) def render_string_vertical(self, seq, matrix, (x,y), font, fontsize, scaling, charspace, wordspace, rise, dxscale): needcharspace = False for obj in seq: if isinstance(obj, int) or isinstance(obj, float): y -= obj*dxscale needcharspace = True else: for cid in font.decode(obj): if needcharspace: y += charspace y += self.render_char(translate_matrix(matrix, (x,y)), font, fontsize, scaling, rise, cid) if cid == 32 and wordspace: y += wordspace needcharspace = True return (x, y) def render_char(self, matrix, font, fontsize, scaling, rise, cid): return 0 ## TagExtractor ## class TagExtractor(PDFDevice): def __init__(self, rsrcmgr, outfp, codec='utf-8', debug=0): PDFDevice.__init__(self, rsrcmgr) self.outfp = outfp self.codec = codec self.debug = debug self.pageno = 0 self._stack = [] return def render_string(self, textstate, seq): font = textstate.font text = '' for obj in seq: if not isinstance(obj, str): continue chars = font.decode(obj) for cid in chars: try: char = font.to_unichr(cid) text += char except PDFUnicodeNotDefined: pass self.outfp.write(enc(text, self.codec)) return def begin_page(self, page, ctm): self.outfp.write('<page id="%s" bbox="%s" rotate="%d">' % (self.pageno, bbox2str(page.mediabox), page.rotate)) return def end_page(self, page): self.outfp.write('</page>\n') self.pageno += 1 return def begin_tag(self, tag, props=None): s = '' if isinstance(props, dict): s = ''.join( ' %s="%s"' % (enc(k), enc(str(v))) for (k,v) in sorted(props.iteritems()) ) self.outfp.write('<%s%s>' % (enc(tag.name), s)) self._stack.append(tag) return def end_tag(self): assert self._stack tag = self._stack.pop(-1) self.outfp.write('</%s>' % enc(tag.name)) return def do_tag(self, tag, props=None): self.begin_tag(tag, props) self._stack.pop(-1) return
bsd-3-clause
ami/lob-python
lob/api_requestor.py
1
2714
import requests import lob import json import resource from lob import error from version import VERSION def _is_file_like(obj): """ Checks if an object is file-like enough to be sent to requests. In particular, file, StringIO and cStringIO objects are file-like. Refs http://stackoverflow.com/questions/3450857/python-determining-if-an-object-is-file-like """ return hasattr(obj, 'read') and hasattr(obj, 'seek') class APIRequestor(object): def __init__(self, key=None): self.api_key = key or lob.api_key def parse_response(self, resp): payload = json.loads(resp.content) if resp.status_code == 200: return payload elif resp.status_code == 401: raise error.AuthenticationError(payload['errors'][0]['message'], resp.content, resp.status_code, resp) elif resp.status_code in [404, 422]: raise error.InvalidRequestError(payload['errors'][0]['message'], resp.content, resp.status_code, resp) else: #pragma: no cover raise error.APIError(payload['errors'][0]['message'], resp.content, resp.status_code, resp) # pragma: no cover def request(self, method, url, params=None): headers = { 'User-Agent': 'Lob/v1 PythonBindings/%s' % VERSION } if hasattr(lob, 'api_version'): headers['Lob-Version'] = lob.api_version if method == 'get': return self.parse_response( requests.get(lob.api_base + url, auth=(self.api_key, ''), params=params, headers=headers) ) elif method == 'delete': return self.parse_response( requests.delete(lob.api_base + url, auth=(self.api_key, ''), headers=headers) ) elif method == 'post': data = {} files = params.pop('files', {}) explodedParams = {} for k,v in params.iteritems(): if isinstance(v, dict) and not isinstance(v, resource.LobObject): for k2,v2 in v.iteritems(): explodedParams[k + '[' + k2 + ']'] = v2 else: explodedParams[k] = v for k,v in explodedParams.iteritems(): if _is_file_like(v): files[k] = v else: if isinstance(v, resource.LobObject): data[k] = v.id else: data[k] = v return self.parse_response( requests.post(lob.api_base + url, auth=(self.api_key, ''), data=data, files=files, headers=headers) )
mit
jupierce/openshift-tools
openshift/installer/vendored/openshift-ansible-3.4.40/lookup_plugins/oo_option.py
37
2602
#!/usr/bin/env python2 # -*- coding: utf-8 -*- # vim: expandtab:tabstop=4:shiftwidth=4 ''' oo_option lookup plugin for openshift-ansible Usage: - debug: msg: "{{ lookup('oo_option', '<key>') | default('<default_value>', True) }}" This returns, by order of priority: * if it exists, the `cli_<key>` ansible variable. This variable is set by `bin/cluster --option <key>=<value> …` * if it exists, the envirnoment variable named `<key>` * if none of the above conditions are met, empty string is returned ''' import os # pylint: disable=no-name-in-module,import-error,unused-argument,unused-variable,super-init-not-called,too-few-public-methods,missing-docstring try: # ansible-2.0 from ansible.plugins.lookup import LookupBase except ImportError: # ansible-1.9.x class LookupBase(object): def __init__(self, basedir=None, runner=None, **kwargs): self.runner = runner self.basedir = self.runner.basedir def get_basedir(self, variables): return self.basedir # Reason: disable too-few-public-methods because the `run` method is the only # one required by the Ansible API # Status: permanently disabled # pylint: disable=too-few-public-methods class LookupModule(LookupBase): ''' oo_option lookup plugin main class ''' # Reason: disable unused-argument because Ansible is calling us with many # parameters we are not interested in. # The lookup plugins of Ansible have this kwargs “catch-all” parameter # which is not used # Status: permanently disabled unless Ansible API evolves # pylint: disable=unused-argument def __init__(self, basedir=None, **kwargs): ''' Constructor ''' self.basedir = basedir # Reason: disable unused-argument because Ansible is calling us with many # parameters we are not interested in. # The lookup plugins of Ansible have this kwargs “catch-all” parameter # which is not used # Status: permanently disabled unless Ansible API evolves # pylint: disable=unused-argument def run(self, terms, variables, **kwargs): ''' Main execution path ''' ret = [] for term in terms: option_name = term.split()[0] cli_key = 'cli_' + option_name if 'vars' in variables and cli_key in variables['vars']: ret.append(variables['vars'][cli_key]) elif option_name in os.environ: ret.append(os.environ[option_name]) else: ret.append('') return ret
apache-2.0
igemsoftware/SYSU-Software2013
project/Python27_32/Lib/tabnanny.py
394
11336
#! /usr/bin/env python """The Tab Nanny despises ambiguous indentation. She knows no mercy. tabnanny -- Detection of ambiguous indentation For the time being this module is intended to be called as a script. However it is possible to import it into an IDE and use the function check() described below. Warning: The API provided by this module is likely to change in future releases; such changes may not be backward compatible. """ # Released to the public domain, by Tim Peters, 15 April 1998. # XXX Note: this is now a standard library module. # XXX The API needs to undergo changes however; the current code is too # XXX script-like. This will be addressed later. __version__ = "6" import os import sys import getopt import tokenize if not hasattr(tokenize, 'NL'): raise ValueError("tokenize.NL doesn't exist -- tokenize module too old") __all__ = ["check", "NannyNag", "process_tokens"] verbose = 0 filename_only = 0 def errprint(*args): sep = "" for arg in args: sys.stderr.write(sep + str(arg)) sep = " " sys.stderr.write("\n") def main(): global verbose, filename_only try: opts, args = getopt.getopt(sys.argv[1:], "qv") except getopt.error, msg: errprint(msg) return for o, a in opts: if o == '-q': filename_only = filename_only + 1 if o == '-v': verbose = verbose + 1 if not args: errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...") return for arg in args: check(arg) class NannyNag(Exception): """ Raised by tokeneater() if detecting an ambiguous indent. Captured and handled in check(). """ def __init__(self, lineno, msg, line): self.lineno, self.msg, self.line = lineno, msg, line def get_lineno(self): return self.lineno def get_msg(self): return self.msg def get_line(self): return self.line def check(file): """check(file_or_dir) If file_or_dir is a directory and not a symbolic link, then recursively descend the directory tree named by file_or_dir, checking all .py files along the way. If file_or_dir is an ordinary Python source file, it is checked for whitespace related problems. The diagnostic messages are written to standard output using the print statement. """ if os.path.isdir(file) and not os.path.islink(file): if verbose: print "%r: listing directory" % (file,) names = os.listdir(file) for name in names: fullname = os.path.join(file, name) if (os.path.isdir(fullname) and not os.path.islink(fullname) or os.path.normcase(name[-3:]) == ".py"): check(fullname) return try: f = open(file) except IOError, msg: errprint("%r: I/O Error: %s" % (file, msg)) return if verbose > 1: print "checking %r ..." % file try: process_tokens(tokenize.generate_tokens(f.readline)) except tokenize.TokenError, msg: errprint("%r: Token Error: %s" % (file, msg)) return except IndentationError, msg: errprint("%r: Indentation Error: %s" % (file, msg)) return except NannyNag, nag: badline = nag.get_lineno() line = nag.get_line() if verbose: print "%r: *** Line %d: trouble in tab city! ***" % (file, badline) print "offending line: %r" % (line,) print nag.get_msg() else: if ' ' in file: file = '"' + file + '"' if filename_only: print file else: print file, badline, repr(line) return if verbose: print "%r: Clean bill of health." % (file,) class Whitespace: # the characters used for space and tab S, T = ' \t' # members: # raw # the original string # n # the number of leading whitespace characters in raw # nt # the number of tabs in raw[:n] # norm # the normal form as a pair (count, trailing), where: # count # a tuple such that raw[:n] contains count[i] # instances of S * i + T # trailing # the number of trailing spaces in raw[:n] # It's A Theorem that m.indent_level(t) == # n.indent_level(t) for all t >= 1 iff m.norm == n.norm. # is_simple # true iff raw[:n] is of the form (T*)(S*) def __init__(self, ws): self.raw = ws S, T = Whitespace.S, Whitespace.T count = [] b = n = nt = 0 for ch in self.raw: if ch == S: n = n + 1 b = b + 1 elif ch == T: n = n + 1 nt = nt + 1 if b >= len(count): count = count + [0] * (b - len(count) + 1) count[b] = count[b] + 1 b = 0 else: break self.n = n self.nt = nt self.norm = tuple(count), b self.is_simple = len(count) <= 1 # return length of longest contiguous run of spaces (whether or not # preceding a tab) def longest_run_of_spaces(self): count, trailing = self.norm return max(len(count)-1, trailing) def indent_level(self, tabsize): # count, il = self.norm # for i in range(len(count)): # if count[i]: # il = il + (i/tabsize + 1)*tabsize * count[i] # return il # quicker: # il = trailing + sum (i/ts + 1)*ts*count[i] = # trailing + ts * sum (i/ts + 1)*count[i] = # trailing + ts * sum i/ts*count[i] + count[i] = # trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] = # trailing + ts * [(sum i/ts*count[i]) + num_tabs] # and note that i/ts*count[i] is 0 when i < ts count, trailing = self.norm il = 0 for i in range(tabsize, len(count)): il = il + i/tabsize * count[i] return trailing + tabsize * (il + self.nt) # return true iff self.indent_level(t) == other.indent_level(t) # for all t >= 1 def equal(self, other): return self.norm == other.norm # return a list of tuples (ts, i1, i2) such that # i1 == self.indent_level(ts) != other.indent_level(ts) == i2. # Intended to be used after not self.equal(other) is known, in which # case it will return at least one witnessing tab size. def not_equal_witness(self, other): n = max(self.longest_run_of_spaces(), other.longest_run_of_spaces()) + 1 a = [] for ts in range(1, n+1): if self.indent_level(ts) != other.indent_level(ts): a.append( (ts, self.indent_level(ts), other.indent_level(ts)) ) return a # Return True iff self.indent_level(t) < other.indent_level(t) # for all t >= 1. # The algorithm is due to Vincent Broman. # Easy to prove it's correct. # XXXpost that. # Trivial to prove n is sharp (consider T vs ST). # Unknown whether there's a faster general way. I suspected so at # first, but no longer. # For the special (but common!) case where M and N are both of the # form (T*)(S*), M.less(N) iff M.len() < N.len() and # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded. # XXXwrite that up. # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1. def less(self, other): if self.n >= other.n: return False if self.is_simple and other.is_simple: return self.nt <= other.nt n = max(self.longest_run_of_spaces(), other.longest_run_of_spaces()) + 1 # the self.n >= other.n test already did it for ts=1 for ts in range(2, n+1): if self.indent_level(ts) >= other.indent_level(ts): return False return True # return a list of tuples (ts, i1, i2) such that # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2. # Intended to be used after not self.less(other) is known, in which # case it will return at least one witnessing tab size. def not_less_witness(self, other): n = max(self.longest_run_of_spaces(), other.longest_run_of_spaces()) + 1 a = [] for ts in range(1, n+1): if self.indent_level(ts) >= other.indent_level(ts): a.append( (ts, self.indent_level(ts), other.indent_level(ts)) ) return a def format_witnesses(w): firsts = map(lambda tup: str(tup[0]), w) prefix = "at tab size" if len(w) > 1: prefix = prefix + "s" return prefix + " " + ', '.join(firsts) def process_tokens(tokens): INDENT = tokenize.INDENT DEDENT = tokenize.DEDENT NEWLINE = tokenize.NEWLINE JUNK = tokenize.COMMENT, tokenize.NL indents = [Whitespace("")] check_equal = 0 for (type, token, start, end, line) in tokens: if type == NEWLINE: # a program statement, or ENDMARKER, will eventually follow, # after some (possibly empty) run of tokens of the form # (NL | COMMENT)* (INDENT | DEDENT+)? # If an INDENT appears, setting check_equal is wrong, and will # be undone when we see the INDENT. check_equal = 1 elif type == INDENT: check_equal = 0 thisguy = Whitespace(token) if not indents[-1].less(thisguy): witness = indents[-1].not_less_witness(thisguy) msg = "indent not greater e.g. " + format_witnesses(witness) raise NannyNag(start[0], msg, line) indents.append(thisguy) elif type == DEDENT: # there's nothing we need to check here! what's important is # that when the run of DEDENTs ends, the indentation of the # program statement (or ENDMARKER) that triggered the run is # equal to what's left at the top of the indents stack # Ouch! This assert triggers if the last line of the source # is indented *and* lacks a newline -- then DEDENTs pop out # of thin air. # assert check_equal # else no earlier NEWLINE, or an earlier INDENT check_equal = 1 del indents[-1] elif check_equal and type not in JUNK: # this is the first "real token" following a NEWLINE, so it # must be the first token of the next program statement, or an # ENDMARKER; the "line" argument exposes the leading whitespace # for this statement; in the case of ENDMARKER, line is an empty # string, so will properly match the empty string with which the # "indents" stack was seeded check_equal = 0 thisguy = Whitespace(line) if not indents[-1].equal(thisguy): witness = indents[-1].not_equal_witness(thisguy) msg = "indent not equal e.g. " + format_witnesses(witness) raise NannyNag(start[0], msg, line) if __name__ == '__main__': main()
mit
eerwitt/tensorflow
tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils_test.py
20
29102
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests of utilities supporting export to SavedModel.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import tempfile import time # pylint: disable=g-import-not-at-top # TODO(jart): #6568 Remove this hack that makes dlopen() not crash. if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"): import ctypes sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL) from tensorflow.contrib.layers.python.layers import feature_column as fc from tensorflow.contrib.learn.python.learn import export_strategy as export_strategy_lib from tensorflow.contrib.learn.python.learn.estimators import constants from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.contrib.learn.python.learn.utils import input_fn_utils from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils from tensorflow.core.framework import tensor_shape_pb2 from tensorflow.core.framework import types_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.platform import gfile from tensorflow.python.platform import test from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.util import compat class SavedModelExportUtilsTest(test.TestCase): def test_build_standardized_signature_def_regression(self): input_tensors = { "input-1": array_ops.placeholder( dtypes.float32, 1, name="input-tensor-1") } output_tensors = { "output-1": array_ops.placeholder( dtypes.float32, 1, name="output-tensor-1") } problem_type = constants.ProblemType.LINEAR_REGRESSION actual_signature_def = ( saved_model_export_utils.build_standardized_signature_def( input_tensors, output_tensors, problem_type)) expected_signature_def = meta_graph_pb2.SignatureDef() shape = tensor_shape_pb2.TensorShapeProto( dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]) dtype = types_pb2.DataType.Value("DT_FLOAT") expected_signature_def.inputs[ signature_constants.REGRESS_INPUTS].CopyFrom( meta_graph_pb2.TensorInfo( name="input-tensor-1:0", dtype=dtype, tensor_shape=shape)) expected_signature_def.outputs[ signature_constants.REGRESS_OUTPUTS].CopyFrom( meta_graph_pb2.TensorInfo( name="output-tensor-1:0", dtype=dtype, tensor_shape=shape)) expected_signature_def.method_name = signature_constants.REGRESS_METHOD_NAME self.assertEqual(actual_signature_def, expected_signature_def) def test_build_standardized_signature_def_classification(self): """Tests classification with one output tensor.""" input_tensors = { "input-1": array_ops.placeholder( dtypes.float32, 1, name="input-tensor-1") } output_tensors = { "output-1": array_ops.placeholder( dtypes.string, 1, name="output-tensor-1") } problem_type = constants.ProblemType.CLASSIFICATION actual_signature_def = ( saved_model_export_utils.build_standardized_signature_def( input_tensors, output_tensors, problem_type)) expected_signature_def = meta_graph_pb2.SignatureDef() shape = tensor_shape_pb2.TensorShapeProto( dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]) dtype_float = types_pb2.DataType.Value("DT_FLOAT") dtype_string = types_pb2.DataType.Value("DT_STRING") expected_signature_def.inputs[ signature_constants.CLASSIFY_INPUTS].CopyFrom( meta_graph_pb2.TensorInfo( name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape)) expected_signature_def.outputs[ signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom( meta_graph_pb2.TensorInfo( name="output-tensor-1:0", dtype=dtype_string, tensor_shape=shape)) expected_signature_def.method_name = ( signature_constants.CLASSIFY_METHOD_NAME) self.assertEqual(actual_signature_def, expected_signature_def) def test_build_standardized_signature_def_classification2(self): """Tests multiple output tensors that include classes and probabilites.""" input_tensors = { "input-1": array_ops.placeholder( dtypes.float32, 1, name="input-tensor-1") } output_tensors = { "classes": array_ops.placeholder( dtypes.string, 1, name="output-tensor-classes"), # Will be used for CLASSIFY_OUTPUT_SCORES. "probabilities": array_ops.placeholder( dtypes.float32, 1, name="output-tensor-proba"), "logits": array_ops.placeholder( dtypes.float32, 1, name="output-tensor-logits-unused"), } problem_type = constants.ProblemType.CLASSIFICATION actual_signature_def = ( saved_model_export_utils.build_standardized_signature_def( input_tensors, output_tensors, problem_type)) expected_signature_def = meta_graph_pb2.SignatureDef() shape = tensor_shape_pb2.TensorShapeProto( dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]) dtype_float = types_pb2.DataType.Value("DT_FLOAT") dtype_string = types_pb2.DataType.Value("DT_STRING") expected_signature_def.inputs[ signature_constants.CLASSIFY_INPUTS].CopyFrom( meta_graph_pb2.TensorInfo( name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape)) expected_signature_def.outputs[ signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom( meta_graph_pb2.TensorInfo( name="output-tensor-classes:0", dtype=dtype_string, tensor_shape=shape)) expected_signature_def.outputs[ signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom( meta_graph_pb2.TensorInfo( name="output-tensor-proba:0", dtype=dtype_float, tensor_shape=shape)) expected_signature_def.method_name = ( signature_constants.CLASSIFY_METHOD_NAME) self.assertEqual(actual_signature_def, expected_signature_def) def test_build_standardized_signature_def_classification3(self): """Tests multiple output tensors that include classes and scores.""" input_tensors = { "input-1": array_ops.placeholder( dtypes.float32, 1, name="input-tensor-1") } output_tensors = { "classes": array_ops.placeholder( dtypes.string, 1, name="output-tensor-classes"), "scores": array_ops.placeholder( dtypes.float32, 1, name="output-tensor-scores"), "logits": array_ops.placeholder( dtypes.float32, 1, name="output-tensor-logits-unused"), } problem_type = constants.ProblemType.CLASSIFICATION actual_signature_def = ( saved_model_export_utils.build_standardized_signature_def( input_tensors, output_tensors, problem_type)) expected_signature_def = meta_graph_pb2.SignatureDef() shape = tensor_shape_pb2.TensorShapeProto( dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]) dtype_float = types_pb2.DataType.Value("DT_FLOAT") dtype_string = types_pb2.DataType.Value("DT_STRING") expected_signature_def.inputs[ signature_constants.CLASSIFY_INPUTS].CopyFrom( meta_graph_pb2.TensorInfo( name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape)) expected_signature_def.outputs[ signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom( meta_graph_pb2.TensorInfo( name="output-tensor-classes:0", dtype=dtype_string, tensor_shape=shape)) expected_signature_def.outputs[ signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom( meta_graph_pb2.TensorInfo( name="output-tensor-scores:0", dtype=dtype_float, tensor_shape=shape)) expected_signature_def.method_name = ( signature_constants.CLASSIFY_METHOD_NAME) self.assertEqual(actual_signature_def, expected_signature_def) def test_build_standardized_signature_def_classification4(self): """Tests classification without classes tensor.""" input_tensors = { "input-1": array_ops.placeholder( dtypes.float32, 1, name="input-tensor-1") } output_tensors = { "probabilities": array_ops.placeholder( dtypes.float32, 1, name="output-tensor-proba"), "logits": array_ops.placeholder( dtypes.float32, 1, name="output-tensor-logits-unused"), } problem_type = constants.ProblemType.CLASSIFICATION actual_signature_def = ( saved_model_export_utils.build_standardized_signature_def( input_tensors, output_tensors, problem_type)) expected_signature_def = meta_graph_pb2.SignatureDef() shape = tensor_shape_pb2.TensorShapeProto( dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]) dtype_float = types_pb2.DataType.Value("DT_FLOAT") expected_signature_def.inputs[ signature_constants.CLASSIFY_INPUTS].CopyFrom( meta_graph_pb2.TensorInfo( name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape)) expected_signature_def.outputs[ signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom( meta_graph_pb2.TensorInfo( name="output-tensor-proba:0", dtype=dtype_float, tensor_shape=shape)) expected_signature_def.method_name = ( signature_constants.CLASSIFY_METHOD_NAME) self.assertEqual(actual_signature_def, expected_signature_def) def test_build_standardized_signature_def_classification5(self): """Tests multiple output tensors that include integer classes and scores. Integer classes are dropped out, because Servo classification can only serve string classes. So, only scores are present in the signature. """ input_tensors = { "input-1": array_ops.placeholder( dtypes.float32, 1, name="input-tensor-1") } output_tensors = { "classes": array_ops.placeholder( dtypes.int64, 1, name="output-tensor-classes"), "scores": array_ops.placeholder( dtypes.float32, 1, name="output-tensor-scores"), "logits": array_ops.placeholder( dtypes.float32, 1, name="output-tensor-logits-unused"), } problem_type = constants.ProblemType.CLASSIFICATION actual_signature_def = ( saved_model_export_utils.build_standardized_signature_def( input_tensors, output_tensors, problem_type)) expected_signature_def = meta_graph_pb2.SignatureDef() shape = tensor_shape_pb2.TensorShapeProto( dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]) dtype_float = types_pb2.DataType.Value("DT_FLOAT") expected_signature_def.inputs[ signature_constants.CLASSIFY_INPUTS].CopyFrom( meta_graph_pb2.TensorInfo( name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape)) expected_signature_def.outputs[ signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom( meta_graph_pb2.TensorInfo( name="output-tensor-scores:0", dtype=dtype_float, tensor_shape=shape)) expected_signature_def.method_name = ( signature_constants.CLASSIFY_METHOD_NAME) self.assertEqual(actual_signature_def, expected_signature_def) def test_build_standardized_signature_def_classification6(self): """Tests multiple output tensors that with integer classes and no scores. Servo classification cannot serve integer classes, but no scores are available. So, we fall back to predict signature. """ input_tensors = { "input-1": array_ops.placeholder( dtypes.float32, 1, name="input-tensor-1") } output_tensors = { "classes": array_ops.placeholder( dtypes.int64, 1, name="output-tensor-classes"), "logits": array_ops.placeholder( dtypes.float32, 1, name="output-tensor-logits"), } problem_type = constants.ProblemType.CLASSIFICATION actual_signature_def = ( saved_model_export_utils.build_standardized_signature_def( input_tensors, output_tensors, problem_type)) expected_signature_def = meta_graph_pb2.SignatureDef() shape = tensor_shape_pb2.TensorShapeProto( dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]) dtype_int64 = types_pb2.DataType.Value("DT_INT64") dtype_float = types_pb2.DataType.Value("DT_FLOAT") expected_signature_def.inputs[ signature_constants.PREDICT_INPUTS].CopyFrom( meta_graph_pb2.TensorInfo( name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape)) expected_signature_def.outputs["classes"].CopyFrom( meta_graph_pb2.TensorInfo( name="output-tensor-classes:0", dtype=dtype_int64, tensor_shape=shape)) expected_signature_def.outputs["logits"].CopyFrom( meta_graph_pb2.TensorInfo( name="output-tensor-logits:0", dtype=dtype_float, tensor_shape=shape)) expected_signature_def.method_name = ( signature_constants.PREDICT_METHOD_NAME) self.assertEqual(actual_signature_def, expected_signature_def) def test_get_input_alternatives(self): input_ops = input_fn_utils.InputFnOps("bogus features dict", None, "bogus default input dict") input_alternatives, _ = saved_model_export_utils.get_input_alternatives( input_ops) self.assertEqual(input_alternatives[ saved_model_export_utils.DEFAULT_INPUT_ALTERNATIVE_KEY], "bogus default input dict") # self.assertEqual(input_alternatives[ # saved_model_export_utils.FEATURES_INPUT_ALTERNATIVE_KEY], # "bogus features dict") def test_get_output_alternatives_explicit_default(self): provided_output_alternatives = { "head-1": (constants.ProblemType.LINEAR_REGRESSION, "bogus output dict"), "head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"), "head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"), } model_fn_ops = model_fn.ModelFnOps( model_fn.ModeKeys.INFER, predictions={"some_output": "bogus_tensor"}, output_alternatives=provided_output_alternatives) output_alternatives, _ = saved_model_export_utils.get_output_alternatives( model_fn_ops, "head-1") self.assertEqual(provided_output_alternatives, output_alternatives) def test_get_output_alternatives_wrong_default(self): provided_output_alternatives = { "head-1": (constants.ProblemType.LINEAR_REGRESSION, "bogus output dict"), "head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"), "head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"), } model_fn_ops = model_fn.ModelFnOps( model_fn.ModeKeys.INFER, predictions={"some_output": "bogus_tensor"}, output_alternatives=provided_output_alternatives) with self.assertRaises(ValueError) as e: saved_model_export_utils.get_output_alternatives(model_fn_ops, "WRONG") self.assertEqual("Requested default_output_alternative: WRONG, but " "available output_alternatives are: ['head-1', 'head-2', " "'head-3']", str(e.exception)) def test_get_output_alternatives_single_no_default(self): prediction_tensor = constant_op.constant(["bogus"]) provided_output_alternatives = { "head-1": (constants.ProblemType.LINEAR_REGRESSION, {"output": prediction_tensor}), } model_fn_ops = model_fn.ModelFnOps( model_fn.ModeKeys.INFER, predictions=prediction_tensor, output_alternatives=provided_output_alternatives) output_alternatives, _ = saved_model_export_utils.get_output_alternatives( model_fn_ops) self.assertEqual({"head-1": (constants.ProblemType.LINEAR_REGRESSION, {"output": prediction_tensor})}, output_alternatives) def test_get_output_alternatives_multi_no_default(self): provided_output_alternatives = { "head-1": (constants.ProblemType.LINEAR_REGRESSION, "bogus output dict"), "head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"), "head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"), } model_fn_ops = model_fn.ModelFnOps( model_fn.ModeKeys.INFER, predictions={"some_output": "bogus_tensor"}, output_alternatives=provided_output_alternatives) with self.assertRaises(ValueError) as e: saved_model_export_utils.get_output_alternatives(model_fn_ops) self.assertEqual("Please specify a default_output_alternative. Available " "output_alternatives are: ['head-1', 'head-2', 'head-3']", str(e.exception)) def test_get_output_alternatives_none_provided(self): prediction_tensor = constant_op.constant(["bogus"]) model_fn_ops = model_fn.ModelFnOps( model_fn.ModeKeys.INFER, predictions={"some_output": prediction_tensor}, output_alternatives=None) output_alternatives, _ = saved_model_export_utils.get_output_alternatives( model_fn_ops) self.assertEqual( {"default_output_alternative": (constants.ProblemType.UNSPECIFIED, { "some_output": prediction_tensor})}, output_alternatives) def test_get_output_alternatives_empty_provided_with_default(self): prediction_tensor = constant_op.constant(["bogus"]) model_fn_ops = model_fn.ModelFnOps( model_fn.ModeKeys.INFER, predictions={"some_output": prediction_tensor}, output_alternatives={}) with self.assertRaises(ValueError) as e: saved_model_export_utils.get_output_alternatives(model_fn_ops, "WRONG") self.assertEqual("Requested default_output_alternative: WRONG, but " "available output_alternatives are: []", str(e.exception)) def test_get_output_alternatives_empty_provided_no_default(self): prediction_tensor = constant_op.constant(["bogus"]) model_fn_ops = model_fn.ModelFnOps( model_fn.ModeKeys.INFER, predictions={"some_output": prediction_tensor}, output_alternatives={}) output_alternatives, _ = saved_model_export_utils.get_output_alternatives( model_fn_ops) self.assertEqual( {"default_output_alternative": (constants.ProblemType.UNSPECIFIED, { "some_output": prediction_tensor})}, output_alternatives) def test_get_output_alternatives_implicit_single(self): prediction_tensor = constant_op.constant(["bogus"]) model_fn_ops = model_fn.ModelFnOps( model_fn.ModeKeys.INFER, predictions=prediction_tensor, output_alternatives=None) output_alternatives, _ = saved_model_export_utils.get_output_alternatives( model_fn_ops) self.assertEqual({ "default_output_alternative": (constants.ProblemType.UNSPECIFIED, { "output": prediction_tensor }) }, output_alternatives) def test_build_all_signature_defs(self): input_features = constant_op.constant(["10"]) input_example = constant_op.constant(["11"]) input_ops = input_fn_utils.InputFnOps({ "features": input_features }, None, {"default input": input_example}) input_alternatives, _ = ( saved_model_export_utils.get_input_alternatives(input_ops)) output_1 = constant_op.constant(["1"]) output_2 = constant_op.constant(["2"]) output_3 = constant_op.constant(["3"]) provided_output_alternatives = { "head-1": (constants.ProblemType.LINEAR_REGRESSION, { "some_output_1": output_1 }), "head-2": (constants.ProblemType.CLASSIFICATION, { "some_output_2": output_2 }), "head-3": (constants.ProblemType.UNSPECIFIED, { "some_output_3": output_3 }), } model_fn_ops = model_fn.ModelFnOps( model_fn.ModeKeys.INFER, predictions={"some_output": constant_op.constant(["4"])}, output_alternatives=provided_output_alternatives) output_alternatives, _ = (saved_model_export_utils.get_output_alternatives( model_fn_ops, "head-1")) signature_defs = saved_model_export_utils.build_all_signature_defs( input_alternatives, output_alternatives, "head-1") expected_signature_defs = { "serving_default": signature_def_utils.regression_signature_def(input_example, output_1), "default_input_alternative:head-1": signature_def_utils.regression_signature_def(input_example, output_1), "default_input_alternative:head-2": signature_def_utils.classification_signature_def(input_example, output_2, None), "default_input_alternative:head-3": signature_def_utils.predict_signature_def({ "input": input_example }, {"output": output_3}), # "features_input_alternative:head-1": # signature_def_utils.regression_signature_def(input_features, # output_1), # "features_input_alternative:head-2": # signature_def_utils.classification_signature_def(input_features, # output_2, None), # "features_input_alternative:head-3": # signature_def_utils.predict_signature_def({ # "input": input_features # }, {"output": output_3}), } self.assertDictEqual(expected_signature_defs, signature_defs) def test_build_all_signature_defs_legacy_input_fn_not_supported(self): """Tests that legacy input_fn returning (features, labels) raises error. serving_input_fn must return InputFnOps including a default input alternative. """ input_features = constant_op.constant(["10"]) input_ops = ({"features": input_features}, None) input_alternatives, _ = ( saved_model_export_utils.get_input_alternatives(input_ops)) output_1 = constant_op.constant(["1"]) output_2 = constant_op.constant(["2"]) output_3 = constant_op.constant(["3"]) provided_output_alternatives = { "head-1": (constants.ProblemType.LINEAR_REGRESSION, { "some_output_1": output_1 }), "head-2": (constants.ProblemType.CLASSIFICATION, { "some_output_2": output_2 }), "head-3": (constants.ProblemType.UNSPECIFIED, { "some_output_3": output_3 }), } model_fn_ops = model_fn.ModelFnOps( model_fn.ModeKeys.INFER, predictions={"some_output": constant_op.constant(["4"])}, output_alternatives=provided_output_alternatives) output_alternatives, _ = (saved_model_export_utils.get_output_alternatives( model_fn_ops, "head-1")) with self.assertRaisesRegexp( ValueError, "A default input_alternative must be provided"): saved_model_export_utils.build_all_signature_defs( input_alternatives, output_alternatives, "head-1") def test_get_timestamped_export_dir(self): export_dir_base = tempfile.mkdtemp() + "export/" export_dir_1 = saved_model_export_utils.get_timestamped_export_dir( export_dir_base) time.sleep(2) export_dir_2 = saved_model_export_utils.get_timestamped_export_dir( export_dir_base) time.sleep(2) export_dir_3 = saved_model_export_utils.get_timestamped_export_dir( export_dir_base) # Export directories should be named using a timestamp that is seconds # since epoch. Such a timestamp is 10 digits long. time_1 = os.path.basename(export_dir_1) self.assertEqual(10, len(time_1)) time_2 = os.path.basename(export_dir_2) self.assertEqual(10, len(time_2)) time_3 = os.path.basename(export_dir_3) self.assertEqual(10, len(time_3)) self.assertTrue(int(time_1) < int(time_2)) self.assertTrue(int(time_2) < int(time_3)) def test_garbage_collect_exports(self): export_dir_base = tempfile.mkdtemp() + "export/" gfile.MkDir(export_dir_base) export_dir_1 = _create_test_export_dir(export_dir_base) export_dir_2 = _create_test_export_dir(export_dir_base) export_dir_3 = _create_test_export_dir(export_dir_base) export_dir_4 = _create_test_export_dir(export_dir_base) self.assertTrue(gfile.Exists(export_dir_1)) self.assertTrue(gfile.Exists(export_dir_2)) self.assertTrue(gfile.Exists(export_dir_3)) self.assertTrue(gfile.Exists(export_dir_4)) # Garbage collect all but the most recent 2 exports, # where recency is determined based on the timestamp directory names. saved_model_export_utils.garbage_collect_exports(export_dir_base, 2) self.assertFalse(gfile.Exists(export_dir_1)) self.assertFalse(gfile.Exists(export_dir_2)) self.assertTrue(gfile.Exists(export_dir_3)) self.assertTrue(gfile.Exists(export_dir_4)) def test_get_most_recent_export(self): export_dir_base = tempfile.mkdtemp() + "export/" gfile.MkDir(export_dir_base) _create_test_export_dir(export_dir_base) _create_test_export_dir(export_dir_base) _create_test_export_dir(export_dir_base) export_dir_4 = _create_test_export_dir(export_dir_base) (most_recent_export_dir, most_recent_export_version) = ( saved_model_export_utils.get_most_recent_export(export_dir_base)) self.assertEqual(compat.as_bytes(export_dir_4), compat.as_bytes(most_recent_export_dir)) self.assertEqual(compat.as_bytes(export_dir_4), os.path.join(compat.as_bytes(export_dir_base), compat.as_bytes( str(most_recent_export_version)))) def test_make_export_strategy(self): """Only tests that an ExportStrategy instance is created.""" def _serving_input_fn(): return array_ops.constant([1]), None export_strategy = saved_model_export_utils.make_export_strategy( serving_input_fn=_serving_input_fn, default_output_alternative_key="default", assets_extra={"from/path": "to/path"}, as_text=False, exports_to_keep=5) self.assertTrue( isinstance(export_strategy, export_strategy_lib.ExportStrategy)) def test_make_parsing_export_strategy(self): """Only tests that an ExportStrategy instance is created.""" sparse_col = fc.sparse_column_with_hash_bucket( "sparse_column", hash_bucket_size=100) embedding_col = fc.embedding_column( fc.sparse_column_with_hash_bucket( "sparse_column_for_embedding", hash_bucket_size=10), dimension=4) real_valued_col1 = fc.real_valued_column("real_valued_column1") bucketized_col1 = fc.bucketized_column( fc.real_valued_column("real_valued_column_for_bucketization1"), [0, 4]) feature_columns = [sparse_col, embedding_col, real_valued_col1, bucketized_col1] export_strategy = saved_model_export_utils.make_parsing_export_strategy( feature_columns=feature_columns) self.assertTrue( isinstance(export_strategy, export_strategy_lib.ExportStrategy)) def _create_test_export_dir(export_dir_base): export_dir = saved_model_export_utils.get_timestamped_export_dir( export_dir_base) gfile.MkDir(export_dir) time.sleep(2) return export_dir if __name__ == "__main__": test.main()
apache-2.0
castedo/celauth
celauth/providers.py
1
4151
import urlparse from openid.consumer import consumer from openid.extensions import sreg, ax from celauth import OpenIDCase from celauth.dj.celauth.openid_store import DjangoOpenIDStore class OpenIDChoices(object): def __init__(self, data): self.data = data def ids(self, id_prefix=''): return [id_prefix + x[0] for x in self.data] def texts(self): return [x[1] for x in self.data] def urls_by_id(self, id_prefix=''): return dict( (id_prefix + x[0], x[2]) for x in self.data ) OPENID_PROVIDERS = OpenIDChoices([ ('google', 'Google', 'https://www.google.com/accounts/o8/id'), ('yahoo', 'Yahoo!', 'https://me.yahoo.com/'), ('aol', 'AOL', 'https://openid.aol.com/'), ('stackexchange', 'StackExchange', 'https://openid.stackexchange.com/'), ('launchpad', 'Launchpad', 'https://login.launchpad.net/'), ('intuit', 'Intuit', 'https://openid.intuit.com/openid/xrds'), ]) class TestOpenIDHelper: def __init__(self, real): self.case = None self.real = real def initial_response(self, request, user_url, return_url): urlp = urlparse.urlparse(user_url) if urlp.netloc not in ('example.com', 'example.org', 'example.net'): return self.real.initial_response(request, user_url, return_url) if urlp.fragment: email = urlp.fragment + '@' + urlp.netloc urlp = list(urlp) urlp[5] = '' # remove fragment user_url = urlparse.ParseResult(*urlp).geturl() else: email = None self.case = OpenIDCase(user_url, user_url, email) return return_url def make_case(self, request): if not self.case: return self.real.make_case(request) ret = self.case self.case = None return ret EMAIL_AX_TYPE_URI = 'http://axschema.org/contact/email' class LiveOpenIDHelper: def _openid_consumer(self, request): openid_store = DjangoOpenIDStore() return consumer.Consumer(request.session, openid_store) def initial_response(self, request, user_url, return_url): oc = self._openid_consumer(request) openid_request = oc.begin(user_url) if openid_request.endpoint.supportsType(ax.AXMessage.ns_uri): ax_request = ax.FetchRequest() ax_request.add(ax.AttrInfo(EMAIL_AX_TYPE_URI, alias='email', required=True, )) openid_request.addExtension(ax_request) else: sreg_request = sreg.SRegRequest(required=['email'], optional=[], ) openid_request.addExtension(sreg_request) realm = request.build_absolute_uri('/') if openid_request.shouldSendRedirect(): return openid_request.redirectURL(realm, return_url) else: return openid_request.htmlMarkup(realm, return_url) def make_case(self, request): oc = self._openid_consumer(request) current_url = request.build_absolute_uri() query_params = dict(request.REQUEST.items()) response = oc.complete(query_params, current_url) if response.status == consumer.CANCEL: return "OpenID sign in cancelled" if response.status == consumer.SUCCESS: email = None sreg_response = sreg.SRegResponse.fromSuccessResponse(response) if sreg_response: email = sreg_response.get('email', None) ax_response = ax.FetchResponse.fromSuccessResponse(response) if ax_response: email = ax_response.getSingle(EMAIL_AX_TYPE_URI, email) return OpenIDCase(response.identity_url, response.getDisplayIdentifier(), email) return response.message or "Internal openid library error" #should throw exception facade = LiveOpenIDHelper() def enable_test_openids(): global facade facade = TestOpenIDHelper(facade)
mit
ganescoo/Django-facebook
docs/docs_env/Lib/encodings/iso8859_1.py
593
13432
""" Python Character Mapping Codec iso8859_1 generated from 'MAPPINGS/ISO8859/8859-1.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-1', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\x80' # 0x80 -> <control> u'\x81' # 0x81 -> <control> u'\x82' # 0x82 -> <control> u'\x83' # 0x83 -> <control> u'\x84' # 0x84 -> <control> u'\x85' # 0x85 -> <control> u'\x86' # 0x86 -> <control> u'\x87' # 0x87 -> <control> u'\x88' # 0x88 -> <control> u'\x89' # 0x89 -> <control> u'\x8a' # 0x8A -> <control> u'\x8b' # 0x8B -> <control> u'\x8c' # 0x8C -> <control> u'\x8d' # 0x8D -> <control> u'\x8e' # 0x8E -> <control> u'\x8f' # 0x8F -> <control> u'\x90' # 0x90 -> <control> u'\x91' # 0x91 -> <control> u'\x92' # 0x92 -> <control> u'\x93' # 0x93 -> <control> u'\x94' # 0x94 -> <control> u'\x95' # 0x95 -> <control> u'\x96' # 0x96 -> <control> u'\x97' # 0x97 -> <control> u'\x98' # 0x98 -> <control> u'\x99' # 0x99 -> <control> u'\x9a' # 0x9A -> <control> u'\x9b' # 0x9B -> <control> u'\x9c' # 0x9C -> <control> u'\x9d' # 0x9D -> <control> u'\x9e' # 0x9E -> <control> u'\x9f' # 0x9F -> <control> u'\xa0' # 0xA0 -> NO-BREAK SPACE u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK u'\xa2' # 0xA2 -> CENT SIGN u'\xa3' # 0xA3 -> POUND SIGN u'\xa4' # 0xA4 -> CURRENCY SIGN u'\xa5' # 0xA5 -> YEN SIGN u'\xa6' # 0xA6 -> BROKEN BAR u'\xa7' # 0xA7 -> SECTION SIGN u'\xa8' # 0xA8 -> DIAERESIS u'\xa9' # 0xA9 -> COPYRIGHT SIGN u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xac' # 0xAC -> NOT SIGN u'\xad' # 0xAD -> SOFT HYPHEN u'\xae' # 0xAE -> REGISTERED SIGN u'\xaf' # 0xAF -> MACRON u'\xb0' # 0xB0 -> DEGREE SIGN u'\xb1' # 0xB1 -> PLUS-MINUS SIGN u'\xb2' # 0xB2 -> SUPERSCRIPT TWO u'\xb3' # 0xB3 -> SUPERSCRIPT THREE u'\xb4' # 0xB4 -> ACUTE ACCENT u'\xb5' # 0xB5 -> MICRO SIGN u'\xb6' # 0xB6 -> PILCROW SIGN u'\xb7' # 0xB7 -> MIDDLE DOT u'\xb8' # 0xB8 -> CEDILLA u'\xb9' # 0xB9 -> SUPERSCRIPT ONE u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS u'\xbf' # 0xBF -> INVERTED QUESTION MARK u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic) u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\xd7' # 0xD7 -> MULTIPLICATION SIGN u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic) u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German) u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic) u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS u'\xf7' # 0xF7 -> DIVISION SIGN u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic) u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
bsd-3-clause
gisce/OCB
addons/google_base_account/google_base_account.py
53
1297
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields,osv class res_users(osv.osv): _inherit = "res.users" _columns = { 'gmail_user': fields.char('Username', size=64,), 'gmail_password': fields.char('Password', size=64), } res_users() # vim:expandtab:smartindent:toabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
hassanabidpk/django
tests/queries/models.py
91
17678
""" Various complex queries that have been problematic in the past. """ from __future__ import unicode_literals import threading from django.db import models from django.utils import six from django.utils.encoding import python_2_unicode_compatible class DumbCategory(models.Model): pass class ProxyCategory(DumbCategory): class Meta: proxy = True @python_2_unicode_compatible class NamedCategory(DumbCategory): name = models.CharField(max_length=10) def __str__(self): return self.name @python_2_unicode_compatible class Tag(models.Model): name = models.CharField(max_length=10) parent = models.ForeignKey( 'self', models.SET_NULL, blank=True, null=True, related_name='children', ) category = models.ForeignKey(NamedCategory, models.SET_NULL, null=True, default=None) class Meta: ordering = ['name'] def __str__(self): return self.name @python_2_unicode_compatible class Note(models.Model): note = models.CharField(max_length=100) misc = models.CharField(max_length=10) class Meta: ordering = ['note'] def __str__(self): return self.note def __init__(self, *args, **kwargs): super(Note, self).__init__(*args, **kwargs) # Regression for #13227 -- having an attribute that # is unpickleable doesn't stop you from cloning queries # that use objects of that type as an argument. self.lock = threading.Lock() @python_2_unicode_compatible class Annotation(models.Model): name = models.CharField(max_length=10) tag = models.ForeignKey(Tag, models.CASCADE) notes = models.ManyToManyField(Note) def __str__(self): return self.name @python_2_unicode_compatible class ExtraInfo(models.Model): info = models.CharField(max_length=100) note = models.ForeignKey(Note, models.CASCADE) value = models.IntegerField(null=True) class Meta: ordering = ['info'] def __str__(self): return self.info @python_2_unicode_compatible class Author(models.Model): name = models.CharField(max_length=10) num = models.IntegerField(unique=True) extra = models.ForeignKey(ExtraInfo, models.CASCADE) class Meta: ordering = ['name'] def __str__(self): return self.name @python_2_unicode_compatible class Item(models.Model): name = models.CharField(max_length=10) created = models.DateTimeField() modified = models.DateTimeField(blank=True, null=True) tags = models.ManyToManyField(Tag, blank=True) creator = models.ForeignKey(Author, models.CASCADE) note = models.ForeignKey(Note, models.CASCADE) class Meta: ordering = ['-note', 'name'] def __str__(self): return self.name @python_2_unicode_compatible class Report(models.Model): name = models.CharField(max_length=10) creator = models.ForeignKey(Author, models.SET_NULL, to_field='num', null=True) def __str__(self): return self.name @python_2_unicode_compatible class Ranking(models.Model): rank = models.IntegerField() author = models.ForeignKey(Author, models.CASCADE) class Meta: # A complex ordering specification. Should stress the system a bit. ordering = ('author__extra__note', 'author__name', 'rank') def __str__(self): return '%d: %s' % (self.rank, self.author.name) @python_2_unicode_compatible class Cover(models.Model): title = models.CharField(max_length=50) item = models.ForeignKey(Item, models.CASCADE) class Meta: ordering = ['item'] def __str__(self): return self.title @python_2_unicode_compatible class Number(models.Model): num = models.IntegerField() def __str__(self): return six.text_type(self.num) # Symmetrical m2m field with a normal field using the reverse accessor name # ("valid"). class Valid(models.Model): valid = models.CharField(max_length=10) parent = models.ManyToManyField('self') class Meta: ordering = ['valid'] # Some funky cross-linked models for testing a couple of infinite recursion # cases. class X(models.Model): y = models.ForeignKey('Y', models.CASCADE) class Y(models.Model): x1 = models.ForeignKey(X, models.CASCADE, related_name='y1') # Some models with a cycle in the default ordering. This would be bad if we # didn't catch the infinite loop. class LoopX(models.Model): y = models.ForeignKey('LoopY', models.CASCADE) class Meta: ordering = ['y'] class LoopY(models.Model): x = models.ForeignKey(LoopX, models.CASCADE) class Meta: ordering = ['x'] class LoopZ(models.Model): z = models.ForeignKey('self', models.CASCADE) class Meta: ordering = ['z'] # A model and custom default manager combination. class CustomManager(models.Manager): def get_queryset(self): qs = super(CustomManager, self).get_queryset() return qs.filter(public=True, tag__name='t1') @python_2_unicode_compatible class ManagedModel(models.Model): data = models.CharField(max_length=10) tag = models.ForeignKey(Tag, models.CASCADE) public = models.BooleanField(default=True) objects = CustomManager() normal_manager = models.Manager() def __str__(self): return self.data # An inter-related setup with multiple paths from Child to Detail. class Detail(models.Model): data = models.CharField(max_length=10) class MemberManager(models.Manager): def get_queryset(self): return super(MemberManager, self).get_queryset().select_related("details") class Member(models.Model): name = models.CharField(max_length=10) details = models.OneToOneField(Detail, models.CASCADE, primary_key=True) objects = MemberManager() class Child(models.Model): person = models.OneToOneField(Member, models.CASCADE, primary_key=True) parent = models.ForeignKey(Member, models.CASCADE, related_name="children") # Custom primary keys interfered with ordering in the past. class CustomPk(models.Model): name = models.CharField(max_length=10, primary_key=True) extra = models.CharField(max_length=10) class Meta: ordering = ['name', 'extra'] class Related(models.Model): custom = models.ForeignKey(CustomPk, models.CASCADE) class CustomPkTag(models.Model): id = models.CharField(max_length=20, primary_key=True) custom_pk = models.ManyToManyField(CustomPk) tag = models.CharField(max_length=20) # An inter-related setup with a model subclass that has a nullable # path to another model, and a return path from that model. @python_2_unicode_compatible class Celebrity(models.Model): name = models.CharField("Name", max_length=20) greatest_fan = models.ForeignKey("Fan", models.SET_NULL, null=True, unique=True) def __str__(self): return self.name class TvChef(Celebrity): pass class Fan(models.Model): fan_of = models.ForeignKey(Celebrity, models.CASCADE) # Multiple foreign keys @python_2_unicode_compatible class LeafA(models.Model): data = models.CharField(max_length=10) def __str__(self): return self.data class LeafB(models.Model): data = models.CharField(max_length=10) class Join(models.Model): a = models.ForeignKey(LeafA, models.CASCADE) b = models.ForeignKey(LeafB, models.CASCADE) @python_2_unicode_compatible class ReservedName(models.Model): name = models.CharField(max_length=20) order = models.IntegerField() def __str__(self): return self.name # A simpler shared-foreign-key setup that can expose some problems. @python_2_unicode_compatible class SharedConnection(models.Model): data = models.CharField(max_length=10) def __str__(self): return self.data class PointerA(models.Model): connection = models.ForeignKey(SharedConnection, models.CASCADE) class PointerB(models.Model): connection = models.ForeignKey(SharedConnection, models.CASCADE) # Multi-layer ordering @python_2_unicode_compatible class SingleObject(models.Model): name = models.CharField(max_length=10) class Meta: ordering = ['name'] def __str__(self): return self.name class RelatedObject(models.Model): single = models.ForeignKey(SingleObject, models.SET_NULL, null=True) f = models.IntegerField(null=True) class Meta: ordering = ['single'] @python_2_unicode_compatible class Plaything(models.Model): name = models.CharField(max_length=10) others = models.ForeignKey(RelatedObject, models.SET_NULL, null=True) class Meta: ordering = ['others'] def __str__(self): return self.name @python_2_unicode_compatible class Article(models.Model): name = models.CharField(max_length=20) created = models.DateTimeField() def __str__(self): return self.name @python_2_unicode_compatible class Food(models.Model): name = models.CharField(max_length=20, unique=True) def __str__(self): return self.name @python_2_unicode_compatible class Eaten(models.Model): food = models.ForeignKey(Food, models.SET_NULL, to_field="name", null=True) meal = models.CharField(max_length=20) def __str__(self): return "%s at %s" % (self.food, self.meal) @python_2_unicode_compatible class Node(models.Model): num = models.IntegerField(unique=True) parent = models.ForeignKey("self", models.SET_NULL, to_field="num", null=True) def __str__(self): return "%s" % self.num # Bug #12252 @python_2_unicode_compatible class ObjectA(models.Model): name = models.CharField(max_length=50) def __str__(self): return self.name def __iter__(self): # Ticket #23721 assert False, 'type checking should happen without calling model __iter__' class ProxyObjectA(ObjectA): class Meta: proxy = True class ChildObjectA(ObjectA): pass @python_2_unicode_compatible class ObjectB(models.Model): name = models.CharField(max_length=50) objecta = models.ForeignKey(ObjectA, models.CASCADE) num = models.PositiveSmallIntegerField() def __str__(self): return self.name class ProxyObjectB(ObjectB): class Meta: proxy = True @python_2_unicode_compatible class ObjectC(models.Model): name = models.CharField(max_length=50) objecta = models.ForeignKey(ObjectA, models.SET_NULL, null=True) objectb = models.ForeignKey(ObjectB, models.SET_NULL, null=True) childobjecta = models.ForeignKey(ChildObjectA, models.SET_NULL, null=True, related_name='ca_pk') def __str__(self): return self.name @python_2_unicode_compatible class SimpleCategory(models.Model): name = models.CharField(max_length=15) def __str__(self): return self.name @python_2_unicode_compatible class SpecialCategory(SimpleCategory): special_name = models.CharField(max_length=15) def __str__(self): return self.name + " " + self.special_name @python_2_unicode_compatible class CategoryItem(models.Model): category = models.ForeignKey(SimpleCategory, models.CASCADE) def __str__(self): return "category item: " + str(self.category) @python_2_unicode_compatible class OneToOneCategory(models.Model): new_name = models.CharField(max_length=15) category = models.OneToOneField(SimpleCategory, models.CASCADE) def __str__(self): return "one2one " + self.new_name class CategoryRelationship(models.Model): first = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='first_rel') second = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='second_rel') class NullableName(models.Model): name = models.CharField(max_length=20, null=True) class Meta: ordering = ['id'] class ModelD(models.Model): name = models.TextField() class ModelC(models.Model): name = models.TextField() class ModelB(models.Model): name = models.TextField() c = models.ForeignKey(ModelC, models.CASCADE) class ModelA(models.Model): name = models.TextField() b = models.ForeignKey(ModelB, models.SET_NULL, null=True) d = models.ForeignKey(ModelD, models.CASCADE) @python_2_unicode_compatible class Job(models.Model): name = models.CharField(max_length=20, unique=True) def __str__(self): return self.name class JobResponsibilities(models.Model): job = models.ForeignKey(Job, models.SET_NULL, to_field='name') responsibility = models.ForeignKey('Responsibility', models.SET_NULL, to_field='description') @python_2_unicode_compatible class Responsibility(models.Model): description = models.CharField(max_length=20, unique=True) jobs = models.ManyToManyField(Job, through=JobResponsibilities, related_name='responsibilities') def __str__(self): return self.description # Models for disjunction join promotion low level testing. class FK1(models.Model): f1 = models.TextField() f2 = models.TextField() class FK2(models.Model): f1 = models.TextField() f2 = models.TextField() class FK3(models.Model): f1 = models.TextField() f2 = models.TextField() class BaseA(models.Model): a = models.ForeignKey(FK1, models.SET_NULL, null=True) b = models.ForeignKey(FK2, models.SET_NULL, null=True) c = models.ForeignKey(FK3, models.SET_NULL, null=True) @python_2_unicode_compatible class Identifier(models.Model): name = models.CharField(max_length=100) def __str__(self): return self.name class Program(models.Model): identifier = models.OneToOneField(Identifier, models.CASCADE) class Channel(models.Model): programs = models.ManyToManyField(Program) identifier = models.OneToOneField(Identifier, models.CASCADE) class Book(models.Model): title = models.TextField() chapter = models.ForeignKey('Chapter', models.CASCADE) class Chapter(models.Model): title = models.TextField() paragraph = models.ForeignKey('Paragraph', models.CASCADE) class Paragraph(models.Model): text = models.TextField() page = models.ManyToManyField('Page') class Page(models.Model): text = models.TextField() class MyObject(models.Model): parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True, related_name='children') data = models.CharField(max_length=100) created_at = models.DateTimeField(auto_now_add=True) # Models for #17600 regressions @python_2_unicode_compatible class Order(models.Model): id = models.IntegerField(primary_key=True) class Meta: ordering = ('pk', ) def __str__(self): return '%s' % self.pk @python_2_unicode_compatible class OrderItem(models.Model): order = models.ForeignKey(Order, models.SET_NULL, related_name='items') status = models.IntegerField() class Meta: ordering = ('pk', ) def __str__(self): return '%s' % self.pk class BaseUser(models.Model): pass @python_2_unicode_compatible class Task(models.Model): title = models.CharField(max_length=10) owner = models.ForeignKey(BaseUser, models.SET_NULL, related_name='owner') creator = models.ForeignKey(BaseUser, models.SET_NULL, related_name='creator') def __str__(self): return self.title @python_2_unicode_compatible class Staff(models.Model): name = models.CharField(max_length=10) def __str__(self): return self.name @python_2_unicode_compatible class StaffUser(BaseUser): staff = models.OneToOneField(Staff, models.SET_NULL, related_name='user') def __str__(self): return self.staff class Ticket21203Parent(models.Model): parentid = models.AutoField(primary_key=True) parent_bool = models.BooleanField(default=True) created = models.DateTimeField(auto_now=True) class Ticket21203Child(models.Model): childid = models.AutoField(primary_key=True) parent = models.ForeignKey(Ticket21203Parent, models.CASCADE) class Person(models.Model): name = models.CharField(max_length=128) @python_2_unicode_compatible class Company(models.Model): name = models.CharField(max_length=128) employees = models.ManyToManyField(Person, related_name='employers', through='Employment') def __str__(self): return self.name class Employment(models.Model): employer = models.ForeignKey(Company, models.CASCADE) employee = models.ForeignKey(Person, models.CASCADE) title = models.CharField(max_length=128) # Bug #22429 class School(models.Model): pass class Student(models.Model): school = models.ForeignKey(School, models.CASCADE) class Classroom(models.Model): school = models.ForeignKey(School, models.CASCADE) students = models.ManyToManyField(Student, related_name='classroom') class Ticket23605AParent(models.Model): pass class Ticket23605A(Ticket23605AParent): pass class Ticket23605B(models.Model): modela_fk = models.ForeignKey(Ticket23605A, models.CASCADE) modelc_fk = models.ForeignKey("Ticket23605C", models.CASCADE) field_b0 = models.IntegerField(null=True) field_b1 = models.BooleanField(default=False) class Ticket23605C(models.Model): field_c0 = models.FloatField() # db_table names have capital letters to ensure they are quoted in queries. class Individual(models.Model): alive = models.BooleanField() class Meta: db_table = 'Individual' class RelatedIndividual(models.Model): related = models.ForeignKey(Individual, models.CASCADE, related_name='related_individual') class Meta: db_table = 'RelatedIndividual'
bsd-3-clause
IXgnas/dixcovery_kernel
tools/perf/scripts/python/netdev-times.py
11271
15048
# Display a process of packets and processed time. # It helps us to investigate networking or network device. # # options # tx: show only tx chart # rx: show only rx chart # dev=: show only thing related to specified device # debug: work with debug mode. It shows buffer status. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * all_event_list = []; # insert all tracepoint event related with this script irq_dic = {}; # key is cpu and value is a list which stacks irqs # which raise NET_RX softirq net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry # and a list which stacks receive receive_hunk_list = []; # a list which include a sequence of receive events rx_skb_list = []; # received packet list for matching # skb_copy_datagram_iovec buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and # tx_xmit_list of_count_rx_skb_list = 0; # overflow count tx_queue_list = []; # list of packets which pass through dev_queue_xmit of_count_tx_queue_list = 0; # overflow count tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit of_count_tx_xmit_list = 0; # overflow count tx_free_list = []; # list of packets which is freed # options show_tx = 0; show_rx = 0; dev = 0; # store a name of device specified by option "dev=" debug = 0; # indices of event_info tuple EINFO_IDX_NAME= 0 EINFO_IDX_CONTEXT=1 EINFO_IDX_CPU= 2 EINFO_IDX_TIME= 3 EINFO_IDX_PID= 4 EINFO_IDX_COMM= 5 # Calculate a time interval(msec) from src(nsec) to dst(nsec) def diff_msec(src, dst): return (dst - src) / 1000000.0 # Display a process of transmitting a packet def print_transmit(hunk): if dev != 0 and hunk['dev'].find(dev) < 0: return print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \ (hunk['dev'], hunk['len'], nsecs_secs(hunk['queue_t']), nsecs_nsecs(hunk['queue_t'])/1000, diff_msec(hunk['queue_t'], hunk['xmit_t']), diff_msec(hunk['xmit_t'], hunk['free_t'])) # Format for displaying rx packet processing PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)" PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)" PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)" PF_JOINT= " |" PF_WJOINT= " | |" PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)" PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)" PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)" PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)" PF_CONS_SKB= " | consume_skb(+%.3fmsec)" # Display a process of received packets and interrputs associated with # a NET_RX softirq def print_receive(hunk): show_hunk = 0 irq_list = hunk['irq_list'] cpu = irq_list[0]['cpu'] base_t = irq_list[0]['irq_ent_t'] # check if this hunk should be showed if dev != 0: for i in range(len(irq_list)): if irq_list[i]['name'].find(dev) >= 0: show_hunk = 1 break else: show_hunk = 1 if show_hunk == 0: return print "%d.%06dsec cpu=%d" % \ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu) for i in range(len(irq_list)): print PF_IRQ_ENTRY % \ (diff_msec(base_t, irq_list[i]['irq_ent_t']), irq_list[i]['irq'], irq_list[i]['name']) print PF_JOINT irq_event_list = irq_list[i]['event_list'] for j in range(len(irq_event_list)): irq_event = irq_event_list[j] if irq_event['event'] == 'netif_rx': print PF_NET_RX % \ (diff_msec(base_t, irq_event['time']), irq_event['skbaddr']) print PF_JOINT print PF_SOFT_ENTRY % \ diff_msec(base_t, hunk['sirq_ent_t']) print PF_JOINT event_list = hunk['event_list'] for i in range(len(event_list)): event = event_list[i] if event['event_name'] == 'napi_poll': print PF_NAPI_POLL % \ (diff_msec(base_t, event['event_t']), event['dev']) if i == len(event_list) - 1: print "" else: print PF_JOINT else: print PF_NET_RECV % \ (diff_msec(base_t, event['event_t']), event['skbaddr'], event['len']) if 'comm' in event.keys(): print PF_WJOINT print PF_CPY_DGRAM % \ (diff_msec(base_t, event['comm_t']), event['pid'], event['comm']) elif 'handle' in event.keys(): print PF_WJOINT if event['handle'] == "kfree_skb": print PF_KFREE_SKB % \ (diff_msec(base_t, event['comm_t']), event['location']) elif event['handle'] == "consume_skb": print PF_CONS_SKB % \ diff_msec(base_t, event['comm_t']) print PF_JOINT def trace_begin(): global show_tx global show_rx global dev global debug for i in range(len(sys.argv)): if i == 0: continue arg = sys.argv[i] if arg == 'tx': show_tx = 1 elif arg =='rx': show_rx = 1 elif arg.find('dev=',0, 4) >= 0: dev = arg[4:] elif arg == 'debug': debug = 1 if show_tx == 0 and show_rx == 0: show_tx = 1 show_rx = 1 def trace_end(): # order all events in time all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME], b[EINFO_IDX_TIME])) # process all events for i in range(len(all_event_list)): event_info = all_event_list[i] name = event_info[EINFO_IDX_NAME] if name == 'irq__softirq_exit': handle_irq_softirq_exit(event_info) elif name == 'irq__softirq_entry': handle_irq_softirq_entry(event_info) elif name == 'irq__softirq_raise': handle_irq_softirq_raise(event_info) elif name == 'irq__irq_handler_entry': handle_irq_handler_entry(event_info) elif name == 'irq__irq_handler_exit': handle_irq_handler_exit(event_info) elif name == 'napi__napi_poll': handle_napi_poll(event_info) elif name == 'net__netif_receive_skb': handle_netif_receive_skb(event_info) elif name == 'net__netif_rx': handle_netif_rx(event_info) elif name == 'skb__skb_copy_datagram_iovec': handle_skb_copy_datagram_iovec(event_info) elif name == 'net__net_dev_queue': handle_net_dev_queue(event_info) elif name == 'net__net_dev_xmit': handle_net_dev_xmit(event_info) elif name == 'skb__kfree_skb': handle_kfree_skb(event_info) elif name == 'skb__consume_skb': handle_consume_skb(event_info) # display receive hunks if show_rx: for i in range(len(receive_hunk_list)): print_receive(receive_hunk_list[i]) # display transmit hunks if show_tx: print " dev len Qdisc " \ " netdevice free" for i in range(len(tx_free_list)): print_transmit(tx_free_list[i]) if debug: print "debug buffer status" print "----------------------------" print "xmit Qdisc:remain:%d overflow:%d" % \ (len(tx_queue_list), of_count_tx_queue_list) print "xmit netdevice:remain:%d overflow:%d" % \ (len(tx_xmit_list), of_count_tx_xmit_list) print "receive:remain:%d overflow:%d" % \ (len(rx_skb_list), of_count_rx_skb_list) # called from perf, when it finds a correspoinding event def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, irq, irq_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, irq_name) all_event_list.append(event_info) def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) all_event_list.append(event_info) def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, napi, dev_name) all_event_list.append(event_info) def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, rc, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, rc ,dev_name) all_event_list.append(event_info) def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, protocol, location) all_event_list.append(event_info) def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr) all_event_list.append(event_info) def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen) all_event_list.append(event_info) def handle_irq_handler_entry(event_info): (name, context, cpu, time, pid, comm, irq, irq_name) = event_info if cpu not in irq_dic.keys(): irq_dic[cpu] = [] irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time} irq_dic[cpu].append(irq_record) def handle_irq_handler_exit(event_info): (name, context, cpu, time, pid, comm, irq, ret) = event_info if cpu not in irq_dic.keys(): return irq_record = irq_dic[cpu].pop() if irq != irq_record['irq']: return irq_record.update({'irq_ext_t':time}) # if an irq doesn't include NET_RX softirq, drop. if 'event_list' in irq_record.keys(): irq_dic[cpu].append(irq_record) def handle_irq_softirq_raise(event_info): (name, context, cpu, time, pid, comm, vec) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'sirq_raise'}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_irq_softirq_entry(event_info): (name, context, cpu, time, pid, comm, vec) = event_info net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]} def handle_irq_softirq_exit(event_info): (name, context, cpu, time, pid, comm, vec) = event_info irq_list = [] event_list = 0 if cpu in irq_dic.keys(): irq_list = irq_dic[cpu] del irq_dic[cpu] if cpu in net_rx_dic.keys(): sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t'] event_list = net_rx_dic[cpu]['event_list'] del net_rx_dic[cpu] if irq_list == [] or event_list == 0: return rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, 'irq_list':irq_list, 'event_list':event_list} # merge information realted to a NET_RX softirq receive_hunk_list.append(rec_data) def handle_napi_poll(event_info): (name, context, cpu, time, pid, comm, napi, dev_name) = event_info if cpu in net_rx_dic.keys(): event_list = net_rx_dic[cpu]['event_list'] rec_data = {'event_name':'napi_poll', 'dev':dev_name, 'event_t':time} event_list.append(rec_data) def handle_netif_rx(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'netif_rx', 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_netif_receive_skb(event_info): global of_count_rx_skb_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu in net_rx_dic.keys(): rec_data = {'event_name':'netif_receive_skb', 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} event_list = net_rx_dic[cpu]['event_list'] event_list.append(rec_data) rx_skb_list.insert(0, rec_data) if len(rx_skb_list) > buffer_budget: rx_skb_list.pop() of_count_rx_skb_list += 1 def handle_net_dev_queue(event_info): global of_count_tx_queue_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time} tx_queue_list.insert(0, skb) if len(tx_queue_list) > buffer_budget: tx_queue_list.pop() of_count_tx_queue_list += 1 def handle_net_dev_xmit(event_info): global of_count_tx_xmit_list (name, context, cpu, time, pid, comm, skbaddr, skblen, rc, dev_name) = event_info if rc == 0: # NETDEV_TX_OK for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: skb['xmit_t'] = time tx_xmit_list.insert(0, skb) del tx_queue_list[i] if len(tx_xmit_list) > buffer_budget: tx_xmit_list.pop() of_count_tx_xmit_list += 1 return def handle_kfree_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr, protocol, location) = event_info for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: del tx_queue_list[i] return for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if rec_data['skbaddr'] == skbaddr: rec_data.update({'handle':"kfree_skb", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return def handle_consume_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr) = event_info for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return def handle_skb_copy_datagram_iovec(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if skbaddr == rec_data['skbaddr']: rec_data.update({'handle':"skb_copy_datagram_iovec", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return
gpl-3.0
shahar-stratoscale/nova
nova/tests/objects/test_instance_group.py
8
13653
# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from nova.compute import flavors from nova import context from nova import db from nova import exception from nova.objects import instance_group from nova import test from nova.tests.objects import test_objects from nova.tests import utils as tests_utils class _TestInstanceGroupObjects(test.TestCase): def setUp(self): super(_TestInstanceGroupObjects, self).setUp() self.user_id = 'fake_user' self.project_id = 'fake_project' self.context = context.RequestContext(self.user_id, self.project_id) def _get_default_values(self): return {'name': 'fake_name', 'user_id': self.user_id, 'project_id': self.project_id} def _create_instance_group(self, context, values, policies=None, metadata=None, members=None): return db.instance_group_create(context, values, policies=policies, metadata=metadata, members=members) def test_get_by_uuid(self): values = self._get_default_values() metadata = {'key11': 'value1', 'key12': 'value2'} policies = ['policy1', 'policy2'] members = ['instance_id1', 'instance_id2'] db_result = self._create_instance_group(self.context, values, metadata=metadata, policies=policies, members=members) obj_result = instance_group.InstanceGroup.get_by_uuid(self.context, db_result.uuid) self.assertEqual(obj_result.metadetails, metadata) self.assertEqual(obj_result.members, members) self.assertEqual(obj_result.policies, policies) def test_refresh(self): values = self._get_default_values() db_result = self._create_instance_group(self.context, values) obj_result = instance_group.InstanceGroup.get_by_uuid(self.context, db_result.uuid) self.assertEqual(obj_result.name, 'fake_name') values = {'name': 'new_name', 'user_id': 'new_user', 'project_id': 'new_project'} db.instance_group_update(self.context, db_result['uuid'], values) obj_result.refresh() self.assertEqual(obj_result.name, 'new_name') self.assertEqual(set([]), obj_result.obj_what_changed()) def test_save_simple(self): values = self._get_default_values() db_result = self._create_instance_group(self.context, values) obj_result = instance_group.InstanceGroup.get_by_uuid(self.context, db_result.uuid) self.assertEqual(obj_result.name, 'fake_name') obj_result.name = 'new_name' obj_result.save() result = db.instance_group_get(self.context, db_result['uuid']) self.assertEqual(result['name'], 'new_name') def test_save_policies(self): values = self._get_default_values() db_result = self._create_instance_group(self.context, values) obj_result = instance_group.InstanceGroup.get_by_uuid(self.context, db_result.uuid) policies = ['policy1', 'policy2'] obj_result.policies = policies obj_result.save() result = db.instance_group_get(self.context, db_result['uuid']) self.assertEqual(result['policies'], policies) def test_save_members(self): values = self._get_default_values() db_result = self._create_instance_group(self.context, values) obj_result = instance_group.InstanceGroup.get_by_uuid(self.context, db_result.uuid) members = ['instance1', 'instance2'] obj_result.members = members obj_result.save() result = db.instance_group_get(self.context, db_result['uuid']) self.assertEqual(result['members'], members) def test_save_metadata(self): values = self._get_default_values() db_result = self._create_instance_group(self.context, values) obj_result = instance_group.InstanceGroup.get_by_uuid(self.context, db_result.uuid) metadata = {'foo': 'bar'} obj_result.metadetails = metadata obj_result.save() metadata1 = db.instance_group_metadata_get(self.context, db_result['uuid']) for key, value in metadata.iteritems(): self.assertEqual(value, metadata[key]) def test_create(self): group1 = instance_group.InstanceGroup() group1.uuid = 'fake-uuid' group1.name = 'fake-name' group1.create(self.context) group2 = instance_group.InstanceGroup.get_by_uuid(self.context, group1.uuid) self.assertEqual(group1.id, group2.id) self.assertEqual(group1.uuid, group2.uuid) self.assertEqual(group1.name, group2.name) result = db.instance_group_get(self.context, group1.uuid) self.assertEqual(group1.id, result.id) self.assertEqual(group1.uuid, result.uuid) self.assertEqual(group1.name, result.name) def test_create_with_policies(self): group1 = instance_group.InstanceGroup() group1.policies = ['policy1', 'policy2'] group1.create(self.context) group2 = instance_group.InstanceGroup.get_by_uuid(self.context, group1.uuid) self.assertEqual(group1.id, group2.id) self.assertEqual(group1.policies, group2.policies) def test_create_with_members(self): group1 = instance_group.InstanceGroup() group1.members = ['instance1', 'instance2'] group1.create(self.context) group2 = instance_group.InstanceGroup.get_by_uuid(self.context, group1.uuid) self.assertEqual(group1.id, group2.id) self.assertEqual(group1.members, group2.members) def test_create_with_metadata(self): group1 = instance_group.InstanceGroup() metadata = {'foo': 'bar'} group1.metadetails = metadata group1.create(self.context) group2 = instance_group.InstanceGroup.get_by_uuid(self.context, group1.uuid) self.assertEqual(group1.id, group2.id) for key, value in metadata.iteritems(): self.assertEqual(value, group2.metadetails[key]) def test_recreate_fails(self): group = instance_group.InstanceGroup() group.create(self.context) self.assertRaises(exception.ObjectActionError, group.create, self.context) def test_destroy(self): values = self._get_default_values() result = self._create_instance_group(self.context, values) group = instance_group.InstanceGroup() group.id = result.id group.uuid = result.uuid group.destroy(self.context) self.assertRaises(exception.InstanceGroupNotFound, db.instance_group_get, self.context, result['uuid']) def _populate_instances(self): instances = [(str(uuid.uuid4()), 'f1', 'p1'), (str(uuid.uuid4()), 'f2', 'p1'), (str(uuid.uuid4()), 'f3', 'p2'), (str(uuid.uuid4()), 'f4', 'p2')] for instance in instances: values = self._get_default_values() values['uuid'] = instance[0] values['name'] = instance[1] values['project_id'] = instance[2] self._create_instance_group(self.context, values) return instances def test_list_all(self): self._populate_instances() inst_list = instance_group.InstanceGroupList.get_all(self.context) groups = db.instance_group_get_all(self.context) self.assertEqual(len(groups), len(inst_list.objects)) self.assertEqual(len(groups), 4) for i in range(0, len(groups)): self.assertIsInstance(inst_list.objects[i], instance_group.InstanceGroup) self.assertEqual(inst_list.objects[i].uuid, groups[i]['uuid']) def test_list_by_project_id(self): self._populate_instances() project_ids = ['p1', 'p2'] for id in project_ids: il = instance_group.InstanceGroupList.get_by_project_id( self.context, id) groups = db.instance_group_get_all_by_project_id(self.context, id) self.assertEqual(len(groups), len(il.objects)) self.assertEqual(len(groups), 2) for i in range(0, len(groups)): self.assertIsInstance(il.objects[i], instance_group.InstanceGroup) self.assertEqual(il.objects[i].uuid, groups[i]['uuid']) self.assertEqual(il.objects[i].name, groups[i]['name']) self.assertEqual(il.objects[i].project_id, id) def test_get_by_name(self): self._populate_instances() ctxt = context.RequestContext('fake_user', 'p1') ig = instance_group.InstanceGroup.get_by_name(ctxt, 'f1') self.assertEqual('f1', ig.name) def test_get_by_hint(self): instances = self._populate_instances() for instance in instances: ctxt = context.RequestContext('fake_user', instance[2]) ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[1]) self.assertEqual(instance[1], ig.name) ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[0]) self.assertEqual(instance[0], ig.uuid) def test_add_members(self): instance_ids = ['fakeid1', 'fakeid2'] values = self._get_default_values() group = self._create_instance_group(self.context, values) members = instance_group.InstanceGroup.add_members(self.context, group.uuid, instance_ids) group = instance_group.InstanceGroup.get_by_uuid(self.context, group.uuid) for instance in instance_ids: self.assertIn(instance, members) self.assertIn(instance, group.members) def test_get_hosts(self): instance1 = tests_utils.get_test_instance(self.context, flavor=flavors.get_default_flavor(), obj=True) instance1.host = 'hostA' instance1.save() instance2 = tests_utils.get_test_instance(self.context, flavor=flavors.get_default_flavor(), obj=True) instance2.host = 'hostB' instance2.save() instance3 = tests_utils.get_test_instance(self.context, flavor=flavors.get_default_flavor(), obj=True) instance3.host = 'hostB' instance3.save() instance_ids = [instance1.uuid, instance2.uuid, instance3.uuid] values = self._get_default_values() group = self._create_instance_group(self.context, values) instance_group.InstanceGroup.add_members(self.context, group.uuid, instance_ids) group = instance_group.InstanceGroup.get_by_uuid(self.context, group.uuid) hosts = group.get_hosts(self.context) self.assertEqual(2, len(hosts)) self.assertIn('hostA', hosts) self.assertIn('hostB', hosts) hosts = group.get_hosts(self.context, exclude=[instance1.uuid]) self.assertEqual(1, len(hosts)) self.assertIn('hostB', hosts) def test_get_hosts_with_some_none(self): instance1 = tests_utils.get_test_instance(self.context, flavor=flavors.get_default_flavor(), obj=True) instance1.host = None instance1.save() instance2 = tests_utils.get_test_instance(self.context, flavor=flavors.get_default_flavor(), obj=True) instance2.host = 'hostB' instance2.save() instance_ids = [instance1.uuid, instance2.uuid] values = self._get_default_values() group = self._create_instance_group(self.context, values) instance_group.InstanceGroup.add_members(self.context, group.uuid, instance_ids) group = instance_group.InstanceGroup.get_by_uuid(self.context, group.uuid) hosts = group.get_hosts(self.context) self.assertEqual(1, len(hosts)) self.assertIn('hostB', hosts) class TestInstanceGroupObject(test_objects._LocalTest, _TestInstanceGroupObjects): pass class TestRemoteInstanceGroupObject(test_objects._RemoteTest, _TestInstanceGroupObjects): pass
apache-2.0
mjames-upc/python-awips
dynamicserialize/dstypes/com/raytheon/uf/common/site/notify/SiteActivationNotification.py
1
1716
## ## # # SOFTWARE HISTORY # # Date Ticket# Engineer Description # ------------ ---------- ----------- -------------------------- # 09/10/14 #3623 randerso Manually created, do not regenerate # ## class SiteActivationNotification(object): def __init__(self): self.type = None self.status = None self.primarySite = None self.modifiedSite = None self.runMode = None self.serverName = None self.pluginName = None def getType(self): return self.type def setType(self, type): self.type = type def getStatus(self): return self.status def setStatus(self, status): self.status = status def getPrimarySite(self): return self.primarysite def setPrimarySite(self, primarysite): self.primarysite = primarysite def getModifiedSite(self): return self.modifiedSite def setModifiedSite(self, modifiedSite): self.modifiedSite = modifiedSite def getRunMode(self): return self.runMode def setRunMode(self, runMode): self.runMode = runMode def getServerName(self): return self.serverName def setServerName(self, serverName): self.serverName = serverName def getPluginName(self): return self.pluginName def setPluginName(self, pluginName): self.pluginName = pluginName def __str__(self): return self.pluginName.upper() + ":" \ + self.status + ":" \ + self.type + " " \ + self.modifiedSite.upper() + " on " \ + self.serverName + ":" \ + self.runMode
bsd-3-clause
Ratheronfire/YouTube-Playlist-Manager---Kodi
lib/requests/packages/chardet/euctwfreq.py
3133
34872
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # EUCTW frequency table # Converted from big5 work # by Taiwan's Mandarin Promotion Council # <http:#www.edu.tw:81/mandr/> # 128 --> 0.42261 # 256 --> 0.57851 # 512 --> 0.74851 # 1024 --> 0.89384 # 2048 --> 0.97583 # # Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98 # Random Distribution Ration = 512/(5401-512)=0.105 # # Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75 # Char to FreqOrder table , EUCTW_TABLE_SIZE = 8102 EUCTWCharToFreqOrder = ( 1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742 3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758 1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774 63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790 3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806 4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822 7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838 630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854 179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870 995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886 2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902 1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918 3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934 706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950 1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966 3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982 2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998 437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014 3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030 1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046 7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062 266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078 7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094 1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110 32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126 188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142 3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158 3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174 324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190 2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206 2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222 314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238 287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254 3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270 1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286 1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302 1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318 2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334 265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350 4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366 1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382 7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398 2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414 383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430 98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446 523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462 710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478 7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494 379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510 1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526 585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542 690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558 7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574 1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590 544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606 3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622 4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638 3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654 279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670 610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686 1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702 4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718 3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734 3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750 2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766 7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782 3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798 7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814 1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830 2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846 1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862 78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878 1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894 4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910 3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926 534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942 165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958 626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974 2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990 7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006 1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022 2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038 1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054 1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070 7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086 7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102 7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118 3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134 4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150 1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166 7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182 2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198 7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214 3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230 3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246 7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262 2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278 7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294 862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310 4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326 2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342 7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358 3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374 2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390 2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406 294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422 2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438 1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454 1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470 2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486 1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502 7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518 7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534 2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550 4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566 1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582 7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598 829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614 4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630 375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646 2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662 444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678 1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694 1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710 730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726 3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742 3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758 1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774 3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790 7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806 7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822 1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838 2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854 1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870 3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886 2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902 3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918 2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934 4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950 4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966 3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982 97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998 3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014 424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030 3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046 3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062 3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078 1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094 7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110 199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126 7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142 1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158 391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174 4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190 3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206 397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222 2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238 2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254 3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270 1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286 4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302 2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318 1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334 1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350 2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366 3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382 1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398 7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414 1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430 4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446 1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462 135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478 1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494 3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510 3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526 2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542 1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558 4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574 660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590 7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606 2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622 3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638 4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654 790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670 7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686 7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702 1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718 4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734 3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750 2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766 3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782 3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798 2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814 1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830 4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846 3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862 3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878 2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894 4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910 7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926 3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942 2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958 3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974 1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990 2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006 3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022 4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038 2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054 2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070 7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086 1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102 2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118 1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134 3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150 4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166 2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182 3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198 3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214 2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230 4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246 2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262 3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278 4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294 7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310 3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326 194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342 1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358 4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374 1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390 4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406 7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422 510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438 7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454 2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470 1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486 1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502 3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518 509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534 552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550 478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566 3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582 2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598 751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614 7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630 1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646 3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662 7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678 1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694 7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710 4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726 1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742 2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758 2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774 4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790 802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806 809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822 3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838 3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854 1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870 2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886 7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902 1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918 1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934 3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950 919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966 1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982 4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998 7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014 2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030 3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046 516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062 1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078 2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094 2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110 7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126 7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142 7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158 2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174 2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190 1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206 4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222 3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238 3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254 4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270 4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286 2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302 2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318 7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334 4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350 7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366 2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382 1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398 3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414 4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430 2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446 120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462 2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478 1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494 2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510 2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526 4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542 7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558 1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574 3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590 7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606 1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622 8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638 2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654 8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670 2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686 2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702 8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718 8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734 8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750 408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766 8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782 4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798 3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814 8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830 1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846 8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862 425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878 1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894 479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910 4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926 1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942 4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958 1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974 433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990 3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006 4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022 8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038 938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054 3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070 890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086 2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102 #Everything below is of no interest for detection purpose 2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118 2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134 8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150 8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166 8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182 8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198 8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214 8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230 8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246 8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262 8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278 8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294 8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310 8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326 8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342 8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358 8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374 8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390 8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406 8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422 8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438 8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454 8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470 8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486 8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502 8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518 8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534 8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550 8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566 8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582 8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598 8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614 8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630 8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646 8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662 8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678 8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694 8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710 8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726 8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742 # flake8: noqa
gpl-2.0
maheshp/novatest
nova/virt/baremetal/base.py
10
2335
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 NTT DOCOMO, INC. # Copyright (c) 2011 University of Southern California / ISI # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.virt.baremetal import baremetal_states class NodeDriver(object): def __init__(self, virtapi): self.virtapi = virtapi def cache_images(self, context, node, instance, **kwargs): raise NotImplementedError() def destroy_images(self, context, node, instance): raise NotImplementedError() def activate_bootloader(self, context, node, instance): raise NotImplementedError() def deactivate_bootloader(self, context, node, instance): raise NotImplementedError() def activate_node(self, context, node, instance): """For operations after power on.""" raise NotImplementedError() def deactivate_node(self, context, node, instance): """For operations before power off.""" raise NotImplementedError() def get_console_output(self, node, instance): raise NotImplementedError() class PowerManager(object): def __init__(self, **kwargs): self.state = baremetal_states.DELETED pass def activate_node(self): self.state = baremetal_states.ACTIVE return self.state def reboot_node(self): self.state = baremetal_states.ACTIVE return self.state def deactivate_node(self): self.state = baremetal_states.DELETED return self.state def is_power_on(self): """Returns True or False according as the node's power state.""" return True # TODO(NTTdocomo): split out console methods to its own class def start_console(self): pass def stop_console(self): pass
apache-2.0
kustodian/ansible-modules-core
commands/shell.py
60
2743
# There is actually no actual shell module source, when you use 'shell' in ansible, # it runs the 'command' module with special arguments and it behaves differently. # See the command source and the comment "#USE_SHELL". DOCUMENTATION = ''' --- module: shell short_description: Execute commands in nodes. description: - The M(shell) module takes the command name followed by a list of space-delimited arguments. It is almost exactly like the M(command) module but runs the command through a shell (C(/bin/sh)) on the remote node. version_added: "0.2" options: free_form: description: - The shell module takes a free form command to run, as a string. There's not an actual option named "free form". See the examples! required: true default: null creates: description: - a filename, when it already exists, this step will B(not) be run. required: no default: null removes: description: - a filename, when it does not exist, this step will B(not) be run. version_added: "0.8" required: no default: null chdir: description: - cd into this directory before running the command required: false default: null version_added: "0.6" executable: description: - change the shell used to execute the command. Should be an absolute path to the executable. required: false default: null version_added: "0.9" warn: description: - if command warnings are on in ansible.cfg, do not warn about this particular line if set to no/false. required: false default: True version_added: "1.8" notes: - If you want to execute a command securely and predictably, it may be better to use the M(command) module instead. Best practices when writing playbooks will follow the trend of using M(command) unless M(shell) is explicitly required. When running ad-hoc commands, use your best judgement. - To sanitize any variables passed to the shell module, you should use "{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons. requirements: [ ] author: Michael DeHaan ''' EXAMPLES = ''' # Execute the command in remote shell; stdout goes to the specified # file on the remote. - shell: somescript.sh >> somelog.txt # Change the working directory to somedir/ before executing the command. - shell: somescript.sh >> somelog.txt chdir=somedir/ # You can also use the 'args' form to provide the options. This command # will change the working directory to somedir/ and will only run when # somedir/somelog.txt doesn't exist. - shell: somescript.sh >> somelog.txt args: chdir: somedir/ creates: somelog.txt '''
gpl-3.0
reybalgs/PyRecipe-4-U
models/recipemodel.py
1
3188
############################################################################### # # recipemodel.py # # Provides the class model for a recipe. The class model is passed around in # the application proper. # ############################################################################### import simplejson as json class RecipeModel(): def export_recipe(self): """ This function exports the current recipe object as a JSON-encoded recipe (.rcpe) file. Actually just returns a JSON-encoded string """ # Dump the object into a JSON-formatted string json_recipe = json.dumps({"name":self.name,"course":self.course, "serving_size":self.servingSize,"ingredients":self.ingredients, "instructions":self.instructions,"images":self.images}, separators=(',',':')) # Return the string return json_recipe def import_recipe(self, raw_json): """ Parses a JSON-encoded .rcpe file and then sets it to itself. The string containing the [contents] of the JSON file is passed into this function. """ # Put the decoded JSON string into a "raw" recipe object raw_recipe = json.loads(raw_json) print raw_recipe # print it for now self.name = raw_recipe['name'] self.course = raw_recipe['course'] self.servingSize = raw_recipe['serving_size'] self.ingredients = raw_recipe['ingredients'] self.instructions = raw_recipe['instructions'] self.images = raw_recipe['images'] def print_recipe_information(self): """ A useful debugging function that prints the entirety of the recipe """ # Print basic information print '\nName: ' + self.name print 'Course: ' + self.course print 'Serving Size: ' + str(self.servingSize) # Print the ingredients print '\nIngredients:' if len(self.ingredients) == 0: print 'No ingredients.' else: for ingredient in self.ingredients: print(ingredient['name'] + str(ingredient['quantity']) + ingredient['unit']) # Print the instructions print '\nInstructions:' if len(self.instructions) == 0: print 'No instructions.' else: for instruction in self.instructions: print instruction # Print the filepaths of the images print '\nImage paths:' if len(self.images) == 0: print 'No images.' else: for filePath in self.images: print filePath def get_recipe(self, recipe): """ Assigns a given recipe to this recipe. """ self.name = recipe.name self.course = recipe.course self.servingSize = recipe.servingSize self.ingredients = recipe.ingredients self.instructions = recipe.instructions def __init__(self): self.name = 'noname' self.course = 'none' self.servingSize = 0 self.ingredients = [] self.instructions = [] self.images = []
gpl-3.0
Entropy512/libsigrokdecode
decoders/eeprom93xx/__init__.py
7
1168
## ## This file is part of the libsigrokdecode project. ## ## Copyright (C) 2017 Kevin Redon <[email protected]> ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## ''' This decoder stacks on top of the 'microwire' PD and decodes the 93xx EEPROM specific instructions. The implemented instructions come from the STMicroelectronics M93Cx6 EEPROM datasheet. They are compatible with the Atmel AT93Cxx EEPROM with slightly different names. Warning: Other EEPROMs using Microwire might have different operation codes and instructions. ''' from .pd import Decoder
gpl-3.0
rbuffat/pyidf
tests/test_controllerwatercoil.py
1
2641
import os import tempfile import unittest import logging from pyidf import ValidationLevel import pyidf from pyidf.idf import IDF from pyidf.controllers import ControllerWaterCoil log = logging.getLogger(__name__) class TestControllerWaterCoil(unittest.TestCase): def setUp(self): self.fd, self.path = tempfile.mkstemp() def tearDown(self): os.remove(self.path) def test_create_controllerwatercoil(self): pyidf.validation_level = ValidationLevel.error obj = ControllerWaterCoil() # alpha var_name = "Name" obj.name = var_name # alpha var_control_variable = "Temperature" obj.control_variable = var_control_variable # alpha var_action = "Normal" obj.action = var_action # alpha var_actuator_variable = "Flow" obj.actuator_variable = var_actuator_variable # node var_sensor_node_name = "node|Sensor Node Name" obj.sensor_node_name = var_sensor_node_name # node var_actuator_node_name = "node|Actuator Node Name" obj.actuator_node_name = var_actuator_node_name # real var_controller_convergence_tolerance = 7.7 obj.controller_convergence_tolerance = var_controller_convergence_tolerance # real var_maximum_actuated_flow = 8.8 obj.maximum_actuated_flow = var_maximum_actuated_flow # real var_minimum_actuated_flow = 9.9 obj.minimum_actuated_flow = var_minimum_actuated_flow idf = IDF() idf.add(obj) idf.save(self.path, check=False) with open(self.path, mode='r') as f: for line in f: log.debug(line.strip()) idf2 = IDF(self.path) self.assertEqual(idf2.controllerwatercoils[0].name, var_name) self.assertEqual(idf2.controllerwatercoils[0].control_variable, var_control_variable) self.assertEqual(idf2.controllerwatercoils[0].action, var_action) self.assertEqual(idf2.controllerwatercoils[0].actuator_variable, var_actuator_variable) self.assertEqual(idf2.controllerwatercoils[0].sensor_node_name, var_sensor_node_name) self.assertEqual(idf2.controllerwatercoils[0].actuator_node_name, var_actuator_node_name) self.assertAlmostEqual(idf2.controllerwatercoils[0].controller_convergence_tolerance, var_controller_convergence_tolerance) self.assertAlmostEqual(idf2.controllerwatercoils[0].maximum_actuated_flow, var_maximum_actuated_flow) self.assertAlmostEqual(idf2.controllerwatercoils[0].minimum_actuated_flow, var_minimum_actuated_flow)
apache-2.0
zabracks/sshuttle
src/ssnet.py
7
18201
import struct import socket import errno import select import os if not globals().get('skip_imports'): from helpers import log, debug1, debug2, debug3, Fatal MAX_CHANNEL = 65535 # these don't exist in the socket module in python 2.3! SHUT_RD = 0 SHUT_WR = 1 SHUT_RDWR = 2 HDR_LEN = 8 CMD_EXIT = 0x4200 CMD_PING = 0x4201 CMD_PONG = 0x4202 CMD_TCP_CONNECT = 0x4203 CMD_TCP_STOP_SENDING = 0x4204 CMD_TCP_EOF = 0x4205 CMD_TCP_DATA = 0x4206 CMD_ROUTES = 0x4207 CMD_HOST_REQ = 0x4208 CMD_HOST_LIST = 0x4209 CMD_DNS_REQ = 0x420a CMD_DNS_RESPONSE = 0x420b CMD_UDP_OPEN = 0x420c CMD_UDP_DATA = 0x420d CMD_UDP_CLOSE = 0x420e cmd_to_name = { CMD_EXIT: 'EXIT', CMD_PING: 'PING', CMD_PONG: 'PONG', CMD_TCP_CONNECT: 'TCP_CONNECT', CMD_TCP_STOP_SENDING: 'TCP_STOP_SENDING', CMD_TCP_EOF: 'TCP_EOF', CMD_TCP_DATA: 'TCP_DATA', CMD_ROUTES: 'ROUTES', CMD_HOST_REQ: 'HOST_REQ', CMD_HOST_LIST: 'HOST_LIST', CMD_DNS_REQ: 'DNS_REQ', CMD_DNS_RESPONSE: 'DNS_RESPONSE', CMD_UDP_OPEN: 'UDP_OPEN', CMD_UDP_DATA: 'UDP_DATA', CMD_UDP_CLOSE: 'UDP_CLOSE', } NET_ERRS = [errno.ECONNREFUSED, errno.ETIMEDOUT, errno.EHOSTUNREACH, errno.ENETUNREACH, errno.EHOSTDOWN, errno.ENETDOWN] def _add(l, elem): if not elem in l: l.append(elem) def _fds(l): out = [] for i in l: try: out.append(i.fileno()) except AttributeError: out.append(i) out.sort() return out def _nb_clean(func, *args): try: return func(*args) except OSError, e: if e.errno not in (errno.EWOULDBLOCK, errno.EAGAIN): raise else: debug3('%s: err was: %s\n' % (func.__name__, e)) return None def _try_peername(sock): try: pn = sock.getpeername() if pn: return '%s:%s' % (pn[0], pn[1]) except socket.error, e: if e.args[0] not in (errno.ENOTCONN, errno.ENOTSOCK): raise return 'unknown' _swcount = 0 class SockWrapper: def __init__(self, rsock, wsock, connect_to=None, peername=None): global _swcount _swcount += 1 debug3('creating new SockWrapper (%d now exist)\n' % _swcount) self.exc = None self.rsock = rsock self.wsock = wsock self.shut_read = self.shut_write = False self.buf = [] self.connect_to = connect_to self.peername = peername or _try_peername(self.rsock) self.try_connect() def __del__(self): global _swcount _swcount -= 1 debug1('%r: deleting (%d remain)\n' % (self, _swcount)) if self.exc: debug1('%r: error was: %s\n' % (self, self.exc)) def __repr__(self): if self.rsock == self.wsock: fds = '#%d' % self.rsock.fileno() else: fds = '#%d,%d' % (self.rsock.fileno(), self.wsock.fileno()) return 'SW%s:%s' % (fds, self.peername) def seterr(self, e): if not self.exc: self.exc = e self.nowrite() self.noread() def try_connect(self): if self.connect_to and self.shut_write: self.noread() self.connect_to = None if not self.connect_to: return # already connected self.rsock.setblocking(False) debug3('%r: trying connect to %r\n' % (self, self.connect_to)) try: self.rsock.connect(self.connect_to) # connected successfully (Linux) self.connect_to = None except socket.error, e: debug3('%r: connect result: %s\n' % (self, e)) if e.args[0] == errno.EINVAL: # this is what happens when you call connect() on a socket # that is now connected but returned EINPROGRESS last time, # on BSD, on python pre-2.5.1. We need to use getsockopt() # to get the "real" error. Later pythons do this # automatically, so this code won't run. realerr = self.rsock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) e = socket.error(realerr, os.strerror(realerr)) debug3('%r: fixed connect result: %s\n' % (self, e)) if e.args[0] in [errno.EINPROGRESS, errno.EALREADY]: pass # not connected yet elif e.args[0] == 0: # connected successfully (weird Linux bug?) # Sometimes Linux seems to return EINVAL when it isn't # invalid. This *may* be caused by a race condition # between connect() and getsockopt(SO_ERROR) (ie. it # finishes connecting in between the two, so there is no # longer an error). However, I'm not sure of that. # # I did get at least one report that the problem went away # when we added this, however. self.connect_to = None elif e.args[0] == errno.EISCONN: # connected successfully (BSD) self.connect_to = None elif e.args[0] in NET_ERRS + [errno.EACCES, errno.EPERM]: # a "normal" kind of error self.connect_to = None self.seterr(e) else: raise # error we've never heard of?! barf completely. def noread(self): if not self.shut_read: debug2('%r: done reading\n' % self) self.shut_read = True # self.rsock.shutdown(SHUT_RD) # doesn't do anything anyway def nowrite(self): if not self.shut_write: debug2('%r: done writing\n' % self) self.shut_write = True try: self.wsock.shutdown(SHUT_WR) except socket.error, e: self.seterr('nowrite: %s' % e) def too_full(self): return False # fullness is determined by the socket's select() state def uwrite(self, buf): if self.connect_to: return 0 # still connecting self.wsock.setblocking(False) try: return _nb_clean(os.write, self.wsock.fileno(), buf) except OSError, e: if e.errno == errno.EPIPE: debug1('%r: uwrite: got EPIPE\n' % self) self.nowrite() return 0 else: # unexpected error... stream is dead self.seterr('uwrite: %s' % e) return 0 def write(self, buf): assert(buf) return self.uwrite(buf) def uread(self): if self.connect_to: return None # still connecting if self.shut_read: return self.rsock.setblocking(False) try: return _nb_clean(os.read, self.rsock.fileno(), 65536) except OSError, e: self.seterr('uread: %s' % e) return '' # unexpected error... we'll call it EOF def fill(self): if self.buf: return rb = self.uread() if rb: self.buf.append(rb) if rb == '': # empty string means EOF; None means temporarily empty self.noread() def copy_to(self, outwrap): if self.buf and self.buf[0]: wrote = outwrap.write(self.buf[0]) self.buf[0] = self.buf[0][wrote:] while self.buf and not self.buf[0]: self.buf.pop(0) if not self.buf and self.shut_read: outwrap.nowrite() class Handler: def __init__(self, socks=None, callback=None): self.ok = True self.socks = socks or [] if callback: self.callback = callback def pre_select(self, r, w, x): for i in self.socks: _add(r, i) def callback(self): log('--no callback defined-- %r\n' % self) (r, w, x) = select.select(self.socks, [], [], 0) for s in r: v = s.recv(4096) if not v: log('--closed-- %r\n' % self) self.socks = [] self.ok = False class Proxy(Handler): def __init__(self, wrap1, wrap2): Handler.__init__(self, [wrap1.rsock, wrap1.wsock, wrap2.rsock, wrap2.wsock]) self.wrap1 = wrap1 self.wrap2 = wrap2 def pre_select(self, r, w, x): if self.wrap1.shut_write: self.wrap2.noread() if self.wrap2.shut_write: self.wrap1.noread() if self.wrap1.connect_to: _add(w, self.wrap1.rsock) elif self.wrap1.buf: if not self.wrap2.too_full(): _add(w, self.wrap2.wsock) elif not self.wrap1.shut_read: _add(r, self.wrap1.rsock) if self.wrap2.connect_to: _add(w, self.wrap2.rsock) elif self.wrap2.buf: if not self.wrap1.too_full(): _add(w, self.wrap1.wsock) elif not self.wrap2.shut_read: _add(r, self.wrap2.rsock) def callback(self): self.wrap1.try_connect() self.wrap2.try_connect() self.wrap1.fill() self.wrap2.fill() self.wrap1.copy_to(self.wrap2) self.wrap2.copy_to(self.wrap1) if self.wrap1.buf and self.wrap2.shut_write: self.wrap1.buf = [] self.wrap1.noread() if self.wrap2.buf and self.wrap1.shut_write: self.wrap2.buf = [] self.wrap2.noread() if (self.wrap1.shut_read and self.wrap2.shut_read and not self.wrap1.buf and not self.wrap2.buf): self.ok = False self.wrap1.nowrite() self.wrap2.nowrite() class Mux(Handler): def __init__(self, rsock, wsock): Handler.__init__(self, [rsock, wsock]) self.rsock = rsock self.wsock = wsock self.new_channel = self.got_dns_req = self.got_routes = None self.got_udp_open = self.got_udp_data = self.got_udp_close = None self.got_host_req = self.got_host_list = None self.channels = {} self.chani = 0 self.want = 0 self.inbuf = '' self.outbuf = [] self.fullness = 0 self.too_full = False self.send(0, CMD_PING, 'chicken') def next_channel(self): # channel 0 is special, so we never allocate it for timeout in xrange(1024): self.chani += 1 if self.chani > MAX_CHANNEL: self.chani = 1 if not self.channels.get(self.chani): return self.chani def amount_queued(self): total = 0 for b in self.outbuf: total += len(b) return total def check_fullness(self): if self.fullness > 32768: if not self.too_full: self.send(0, CMD_PING, 'rttest') self.too_full = True #ob = [] # for b in self.outbuf: # (s1,s2,c) = struct.unpack('!ccH', b[:4]) # ob.append(c) #log('outbuf: %d %r\n' % (self.amount_queued(), ob)) def send(self, channel, cmd, data): data = str(data) assert(len(data) <= 65535) p = struct.pack('!ccHHH', 'S', 'S', channel, cmd, len(data)) + data self.outbuf.append(p) debug2(' > channel=%d cmd=%s len=%d (fullness=%d)\n' % (channel, cmd_to_name.get(cmd, hex(cmd)), len(data), self.fullness)) self.fullness += len(data) def got_packet(self, channel, cmd, data): debug2('< channel=%d cmd=%s len=%d\n' % (channel, cmd_to_name.get(cmd, hex(cmd)), len(data))) if cmd == CMD_PING: self.send(0, CMD_PONG, data) elif cmd == CMD_PONG: debug2('received PING response\n') self.too_full = False self.fullness = 0 elif cmd == CMD_EXIT: self.ok = False elif cmd == CMD_TCP_CONNECT: assert(not self.channels.get(channel)) if self.new_channel: self.new_channel(channel, data) elif cmd == CMD_DNS_REQ: assert(not self.channels.get(channel)) if self.got_dns_req: self.got_dns_req(channel, data) elif cmd == CMD_UDP_OPEN: assert(not self.channels.get(channel)) if self.got_udp_open: self.got_udp_open(channel, data) elif cmd == CMD_ROUTES: if self.got_routes: self.got_routes(data) else: raise Exception('got CMD_ROUTES without got_routes?') elif cmd == CMD_HOST_REQ: if self.got_host_req: self.got_host_req(data) else: raise Exception('got CMD_HOST_REQ without got_host_req?') elif cmd == CMD_HOST_LIST: if self.got_host_list: self.got_host_list(data) else: raise Exception('got CMD_HOST_LIST without got_host_list?') else: callback = self.channels.get(channel) if not callback: log('warning: closed channel %d got cmd=%s len=%d\n' % (channel, cmd_to_name.get(cmd, hex(cmd)), len(data))) else: callback(cmd, data) def flush(self): self.wsock.setblocking(False) if self.outbuf and self.outbuf[0]: wrote = _nb_clean(os.write, self.wsock.fileno(), self.outbuf[0]) debug2('mux wrote: %r/%d\n' % (wrote, len(self.outbuf[0]))) if wrote: self.outbuf[0] = self.outbuf[0][wrote:] while self.outbuf and not self.outbuf[0]: self.outbuf[0:1] = [] def fill(self): self.rsock.setblocking(False) try: b = _nb_clean(os.read, self.rsock.fileno(), 32768) except OSError, e: raise Fatal('other end: %r' % e) #log('<<< %r\n' % b) if b == '': # EOF self.ok = False if b: self.inbuf += b def handle(self): self.fill() # log('inbuf is: (%d,%d) %r\n' # % (self.want, len(self.inbuf), self.inbuf)) while 1: if len(self.inbuf) >= (self.want or HDR_LEN): (s1, s2, channel, cmd, datalen) = \ struct.unpack('!ccHHH', self.inbuf[:HDR_LEN]) assert(s1 == 'S') assert(s2 == 'S') self.want = datalen + HDR_LEN if self.want and len(self.inbuf) >= self.want: data = self.inbuf[HDR_LEN:self.want] self.inbuf = self.inbuf[self.want:] self.want = 0 self.got_packet(channel, cmd, data) else: break def pre_select(self, r, w, x): _add(r, self.rsock) if self.outbuf: _add(w, self.wsock) def callback(self): (r, w, x) = select.select([self.rsock], [self.wsock], [], 0) if self.rsock in r: self.handle() if self.outbuf and self.wsock in w: self.flush() class MuxWrapper(SockWrapper): def __init__(self, mux, channel): SockWrapper.__init__(self, mux.rsock, mux.wsock) self.mux = mux self.channel = channel self.mux.channels[channel] = self.got_packet self.socks = [] debug2('new channel: %d\n' % channel) def __del__(self): self.nowrite() SockWrapper.__del__(self) def __repr__(self): return 'SW%r:Mux#%d' % (self.peername, self.channel) def noread(self): if not self.shut_read: self.shut_read = True self.mux.send(self.channel, CMD_TCP_STOP_SENDING, '') self.maybe_close() def nowrite(self): if not self.shut_write: self.shut_write = True self.mux.send(self.channel, CMD_TCP_EOF, '') self.maybe_close() def maybe_close(self): if self.shut_read and self.shut_write: # remove the mux's reference to us. The python garbage collector # will then be able to reap our object. self.mux.channels[self.channel] = None def too_full(self): return self.mux.too_full def uwrite(self, buf): if self.mux.too_full: return 0 # too much already enqueued if len(buf) > 2048: buf = buf[:2048] self.mux.send(self.channel, CMD_TCP_DATA, buf) return len(buf) def uread(self): if self.shut_read: return '' # EOF else: return None # no data available right now def got_packet(self, cmd, data): if cmd == CMD_TCP_EOF: self.noread() elif cmd == CMD_TCP_STOP_SENDING: self.nowrite() elif cmd == CMD_TCP_DATA: self.buf.append(data) else: raise Exception('unknown command %d (%d bytes)' % (cmd, len(data))) def connect_dst(family, ip, port): debug2('Connecting to %s:%d\n' % (ip, port)) outsock = socket.socket(family) outsock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42) return SockWrapper(outsock, outsock, connect_to=(ip, port), peername = '%s:%d' % (ip, port)) def runonce(handlers, mux): r = [] w = [] x = [] to_remove = filter(lambda s: not s.ok, handlers) for h in to_remove: handlers.remove(h) for s in handlers: s.pre_select(r, w, x) debug2('Waiting: %d r=%r w=%r x=%r (fullness=%d/%d)\n' % (len(handlers), _fds(r), _fds(w), _fds(x), mux.fullness, mux.too_full)) (r, w, x) = select.select(r, w, x) debug2(' Ready: %d r=%r w=%r x=%r\n' % (len(handlers), _fds(r), _fds(w), _fds(x))) ready = r + w + x did = {} for h in handlers: for s in h.socks: if s in ready: h.callback() did[s] = 1 for s in ready: if not s in did: raise Fatal('socket %r was not used by any handler' % s)
lgpl-2.1
goanpeca/mongokit
tests/test_versioned.py
3
15067
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009-2011, Nicolas Clairon # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the University of California, Berkeley nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest from mongokit import * class VersionedTestCase(unittest.TestCase): def setUp(self): self.connection = Connection() self.col = self.connection['test']['mongokit'] def tearDown(self): self.connection['test'].drop_collection('mongokit') self.connection['test'].drop_collection('versioned_mongokit') self.connection['test'].drop_collection('versioned_mongokit2') self.connection['versioned_test'].drop_collection('versioned_mongokit') def test_save_versioning(self): class MyDoc(Document): structure = { "bla" : unicode, } self.connection.register([MyDoc]) doc = self.col.MyDoc() doc['bla'] = u"bli" doc.save() assert "_revision" not in doc doc.delete() class MyVersionedDoc(VersionedDocument): structure = { "foo" : unicode, } self.connection.register([MyVersionedDoc]) versioned_doc = self.col.MyVersionedDoc() versioned_doc['_id'] = "mydoc" versioned_doc['foo'] = u'bla' versioned_doc.save() docs = list(self.col.find()) assert len(docs) == 1 ver_doc = list(self.connection.test.versioned_mongokit.find()) assert len(ver_doc) == 1 assert ver_doc[0]['id'] == 'mydoc' assert ver_doc[0]['revision'] == 1 assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc'} assert versioned_doc['_revision'] == 1 assert versioned_doc.get_last_revision_id() == 1 assert versioned_doc.get_revision(1) == {'foo':'bla', "_revision":1, "_id":"mydoc"} versioned_doc['foo'] = u'bar' versioned_doc.save() ver_doc = list(self.connection.test.versioned_mongokit.find()) assert len(ver_doc) == 2 assert ver_doc[0]['id'] == 'mydoc' assert ver_doc[0]['revision'] == 1 assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc'} assert ver_doc[1]['id'] == 'mydoc' assert ver_doc[1]['revision'] == 2 assert ver_doc[1]['doc'] == {u'_revision': 2, u'foo': u'bar', u'_id': u'mydoc'} assert versioned_doc['_revision'] == 2 assert versioned_doc.get_last_revision_id() == 2 assert versioned_doc['foo'] == 'bar' assert versioned_doc.get_revision(2) == {'foo':'bar', "_revision":2, "_id":"mydoc"}, versioned_doc.get_revision(2) old_doc = versioned_doc.get_revision(1) print old_doc, type(old_doc) old_doc.save() assert old_doc['_revision'] == 3 versioned_doc = self.connection.test.mongokit.MyVersionedDoc.get_from_id(versioned_doc['_id']) assert len(list(versioned_doc.get_revisions())) == 3, len(list(versioned_doc.get_revisions())) def test_save_without_versionning(self): class MyVersionedDoc(VersionedDocument): structure = { "foo" : unicode, } self.connection.register([MyVersionedDoc]) versioned_doc = self.col.MyVersionedDoc() versioned_doc['_id'] = "mydoc" versioned_doc['foo'] = u'bla' versioned_doc.save(versioning=False) assert self.col.MyVersionedDoc.versioning_collection.find().count() == 0 assert self.col.find().count() == 1 def test_save_versioning_without_id(self): class MyVersionedDoc(VersionedDocument): structure = { "foo" : unicode, } self.connection.register([MyVersionedDoc]) versioned_doc = self.col.MyVersionedDoc() versioned_doc['foo'] = u'bla' versioned_doc.save() ver_doc = list(self.connection.test.versioned_mongokit.find()) assert len(ver_doc) == 1 assert 'doc' in ver_doc[0] assert 'revision' in ver_doc[0], ver_doc[0] ver_doc = list(self.col.find()) assert len(ver_doc) == 1 assert 'doc' not in ver_doc[0] assert '_revision' in ver_doc[0] def _test_bad_versioning(self): class MyVersionedDoc(VersionedDocument): structure = { "foo" : unicode, } self.connection.register([MyVersionedDoc]) self.assertRaises(ValidationError, MyVersionedDoc) def test_delete_versioning(self): class MyVersionedDoc(VersionedDocument): structure = { "foo" : unicode, } self.connection.register([MyVersionedDoc]) versioned_doc = self.col.MyVersionedDoc() versioned_doc['_id'] = "mydoc" versioned_doc['foo'] = u'bla' versioned_doc.save() assert self.col.MyVersionedDoc.versioning_collection.find().count() == 1 versioned_doc['foo'] = u'bar' versioned_doc.save() assert self.col.MyVersionedDoc.versioning_collection.find().count() == 2 versioned_doc.delete(versioning=True) assert self.col.MyVersionedDoc.versioning_collection.find().count() == 0 assert self.col.MyVersionedDoc.find().count() == 0 versioned_doc = self.col.MyVersionedDoc() versioned_doc['_id'] = "mydoc" versioned_doc['foo'] = u'bla' versioned_doc.save() assert self.col.MyVersionedDoc.versioning_collection.find().count() == 1 versioned_doc['foo'] = u'bar' versioned_doc.save() assert self.col.MyVersionedDoc.versioning_collection.find().count() == 2 versioned_doc.delete() assert self.col.MyVersionedDoc.versioning_collection.find().count() == 2 assert self.col.MyVersionedDoc.find().count() == 0 def test_remove_versioning(self): class MyVersionedDoc(VersionedDocument): structure = { "foo" : unicode, } self.connection.register([MyVersionedDoc]) versioned_doc = self.col.MyVersionedDoc() versioned_doc['_id'] = "mydoc" versioned_doc['foo'] = u'bla' versioned_doc.save() versioned_doc2 = self.col.MyVersionedDoc() versioned_doc2['_id'] = "mydoc2" versioned_doc2['foo'] = u'bla' versioned_doc2.save() versioned_doc3 = self.col.MyVersionedDoc() versioned_doc3['_id'] = "mydoc3" versioned_doc3['foo'] = u'bla' versioned_doc3.save() versioned_doc['foo'] = u'bar' versioned_doc.save() versioned_doc2['foo'] = u'bar' versioned_doc2.save() versioned_doc3['foo'] = u'bar' versioned_doc3.save() count = self.col.MyVersionedDoc.versioning_collection.find().count() assert count == 6, count count = self.col.MyVersionedDoc.collection.find().count() assert count == 3, count versioned_doc.remove({'foo':'bar'}, versioning=True) count = self.col.MyVersionedDoc.versioning_collection.find().count() assert count == 0, count count = self.col.MyVersionedDoc.collection.find().count() assert count == 0, count def _test_versioning_with_dynamic_db(self): class MyVersionedDoc(VersionedDocument): structure = { "foo" : unicode, } self.connection.register([MyVersionedDoc]) versioned_doc = self.col.MyVersionedDoc() versioned_doc['_id'] = "mydoc" versioned_doc['foo'] = u'bla' versioned_doc.save() ver_doc = list(self.connection.test.versioned_mongokit.find()) assert len(ver_doc) == 1 assert ver_doc[0]['id'] == 'mydoc' assert ver_doc[0]['revision'] == 1 assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc'} ver_mongokit2 = list(CONNECTION['versioned_test']['versioned_mongokit'].find()) assert len(ver_mongokit2) == 0, len(ver_mongokit2) versioned_doc2 = MyVersionedDoc(versioning_db_name="versioned_test") versioned_doc2['_id'] = "mydoc2" versioned_doc2['foo'] = u'bla' versioned_doc2.save() ver_mongokit = list(CONNECTION['test']['versioned_mongokit'].find()) assert len(ver_mongokit) == 1, len(ver_mongokit) ver_doc = list(CONNECTION['versioned_test']['versioned_mongokit'].find()) assert len(ver_doc) == 1 assert ver_doc[0]['id'] == 'mydoc2' assert ver_doc[0]['revision'] == 1 assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc2'} versioned_doc['foo'] = u'bar' versioned_doc.save() ver_doc = list(CONNECTION['test']['versioned_mongokit'].find()) assert len(ver_doc) == 2 ver_doc = list(CONNECTION['versioned_test']['versioned_mongokit'].find()) assert len(ver_doc) == 1 def _test_versioning_with_dynamic_collection(self): class MyVersionedDoc(VersionedDocument): structure = { "foo" : unicode, } versioning_collection_name = "versioned_mongokit" versioned_doc = MyVersionedDoc() versioned_doc['_id'] = "mydoc" versioned_doc['foo'] = u'bla' versioned_doc.save() ver_doc = list(CONNECTION['test']['versioned_mongokit'].find()) assert len(ver_doc) == 1 assert ver_doc[0]['id'] == 'mydoc' assert ver_doc[0]['revision'] == 1 assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc'} ver_mongokit2 = list(CONNECTION['test']['versioned_mongokit2'].find()) assert len(ver_mongokit2) == 0 versioned_doc2 = MyVersionedDoc(versioning_collection_name="versioned_mongokit2") versioned_doc2['_id'] = "mydoc2" versioned_doc2['foo'] = u'bla' versioned_doc2.save() ver_mongokit = list(CONNECTION['test']['versioned_mongokit'].find()) assert len(ver_mongokit) == 1, len(ver_mongokit) ver_doc = list(CONNECTION['test']['versioned_mongokit2'].find()) assert len(ver_doc) == 1 assert ver_doc[0]['id'] == 'mydoc2' assert ver_doc[0]['revision'] == 1 assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc2'} versioned_doc['foo'] = u'bar' versioned_doc.save() ver_doc = list(CONNECTION['test']['versioned_mongokit'].find()) assert len(ver_doc) == 2 ver_doc = list(CONNECTION['test']['versioned_mongokit2'].find()) assert len(ver_doc) == 1 def test_versioning_without_versioning_collection_name(self): test_passed = False try: class Group(VersionedDocument): use_autorefs = True structure = { 'name':unicode, 'members':[User], #users } except: test_passed = True assert test_passed def test_resave_versioned_doc_with_objectId(self): """ 1. Create a simple VersionedDocument using the defaults, thus using the built-in objectID. 2. save to the database 3. change the VersionedDocument contents (leave _id unchanged) 4. resave to the database 4a. the save action will search for the get_last_revision_id 4b. add +1 to the _revision attribute 4c. save the revised document, save the old document in the versioned_* collection 4a BREAKS! self['_revision'] = self.get_last_revision_id() File "...\mongokit\versioned_document.py", line 100, in get_last_revision_id {'id':self['_id']}).sort('revision', -1).next() File "...\mongokit\cursor.py", line 44, in next raise StopIteration """ class MyVersionedDoc(VersionedDocument): structure = { "foo" : unicode, } self.connection.register([MyVersionedDoc]) versioned_doc = self.col.MyVersionedDoc() versioned_doc['foo'] = u'bla' versioned_doc.save() docs = list(self.col.find()) assert len(docs) == 1 versioned_doc['foo'] = u'Some Other bla' versioned_doc.save() print(versioned_doc) def test_resave_versioned_doc_with_UUID(self): """ Simple versioning test, a bit different than the test_save_versionning """ class MyVersionedUUIDDoc(VersionedDocument): structure = { "foo" : unicode, } def save(self, versioning=True, uuid=True, *args, **kwargs): """ Ensure that the save is performed using uuid=True """ return super(MyVersionedUUIDDoc, self).save(versioning, uuid, *args, **kwargs) self.connection.register([MyVersionedUUIDDoc]) versioned_doc = self.col.MyVersionedUUIDDoc() versioned_doc['foo'] = u'bla' versioned_doc.save() docs = list(self.col.find()) assert len(docs) == 1 versioned_doc['foo'] = u'Some Other bla' versioned_doc.save() # search for the versioned_doc in the database and compare id's ver_doc = list(self.connection.test.mongokit.find()) assert len(ver_doc) == 1 assert ver_doc[0]['_revision'] == 2 assert ver_doc[0]['foo'] == u'Some Other bla' assert ver_doc[0]['_id'][:18] == u'MyVersionedUUIDDoc' assert ver_doc[0]['_id'] == versioned_doc['_id']
bsd-3-clause
collective/eden
modules/s3db/doc.py
2
32300
# -*- coding: utf-8 -*- """ Sahana Eden Document Library @copyright: 2011-2014 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ("S3DocumentLibrary", "S3DocSitRepModel", "doc_image_represent", "doc_document_list_layout", ) import os from gluon import * from gluon.storage import Storage from ..s3 import * # ============================================================================= class S3DocumentLibrary(S3Model): names = ("doc_entity", "doc_document", "doc_document_id", "doc_image", ) def model(self): T = current.T db = current.db s3 = current.response.s3 person_comment = self.pr_person_comment person_id = self.pr_person_id location_id = self.gis_location_id organisation_id = self.org_organisation_id messages = current.messages NONE = messages["NONE"] UNKNOWN_OPT = messages.UNKNOWN_OPT # Shortcuts configure = self.configure crud_strings = s3.crud_strings define_table = self.define_table folder = current.request.folder super_link = self.super_link # --------------------------------------------------------------------- # Document-referencing entities # entity_types = Storage(asset_asset=T("Asset"), cms_post=T("Post"), cr_shelter=T("Shelter"), deploy_mission=T("Mission"), doc_sitrep=T("Situation Report"), event_incident=T("Incident"), event_incident_report=T("Incident Report"), hms_hospital=T("Hospital"), hrm_human_resource=T("Human Resource"), inv_adj=T("Stock Adjustment"), inv_warehouse=T("Warehouse"), # @ToDo: Deprecate irs_ireport=T("Incident Report"), pr_group=T("Team"), project_project=T("Project"), project_activity=T("Project Activity"), project_framework=T("Project Framework"), project_task=T("Task"), org_office=T("Office"), org_facility=T("Facility"), org_group=T("Organization Group"), # @ToDo: Deprecate stats_people=T("People"), vulnerability_document=T("Vulnerability Document"), vulnerability_risk=T("Risk"), vulnerability_evac_route=T("Evacuation Route"), ) tablename = "doc_entity" self.super_entity(tablename, "doc_id", entity_types) # Components doc_id = "doc_id" self.add_components(tablename, doc_document = doc_id, doc_image = doc_id, ) # --------------------------------------------------------------------- # Documents # tablename = "doc_document" define_table(tablename, # Instance self.stats_source_superlink, # Component not instance super_link(doc_id, "doc_entity"), # @ToDo: Remove since Site Instances are doc entities? super_link("site_id", "org_site"), Field("file", "upload", autodelete = True, represent = self.doc_file_represent, # upload folder needs to be visible to the download() function as well as the upload uploadfolder = os.path.join(folder, "uploads"), ), Field("mime_type", readable = False, writable = False, ), Field("name", length=128, # Allow Name to be added onvalidation requires = IS_EMPTY_OR(IS_LENGTH(128)), label = T("Name") ), Field("url", label = T("URL"), represent = lambda url: \ url and A(url, _href=url) or NONE, requires = IS_EMPTY_OR(IS_URL()), ), Field("has_been_indexed", "boolean", default = False, readable = False, writable = False, ), person_id( # Enable when-required label = T("Author"), readable = False, writable = False, comment = person_comment(T("Author"), T("The Author of this Document (optional)")) ), organisation_id(# Enable when-required readable = False, writable = False, ), s3_date(label = T("Date Published"), ), # @ToDo: Move location to link table location_id(# Enable when-required readable = False, writable = False, ), s3_comments(), Field("checksum", readable = False, writable = False, ), *s3_meta_fields()) # CRUD Strings crud_strings[tablename] = Storage( label_create = T("Add Reference Document"), title_display = T("Document Details"), title_list = T("Documents"), title_update = T("Edit Document"), label_list_button = T("List Documents"), label_delete_button = T("Delete Document"), msg_record_created = T("Document added"), msg_record_modified = T("Document updated"), msg_record_deleted = T("Document deleted"), msg_list_empty = T("No Documents found") ) # Search Method # Resource Configuration if current.deployment_settings.get_base_solr_url(): onaccept = self.document_onaccept ondelete = self.document_ondelete else: onaccept = None ondelete = None configure(tablename, context = {"organisation": "organisation_id", "person": "person_id", "site": "site_id", }, deduplicate = self.document_duplicate, list_layout = doc_document_list_layout, onaccept = onaccept, ondelete = ondelete, onvalidation = self.document_onvalidation, super_entity = "stats_source", ) # Reusable field represent = doc_DocumentRepresent(lookup = tablename, fields = ["name", "file", "url"], labels = "%(name)s", show_link = True) document_id = S3ReusableField("document_id", "reference %s" % tablename, label = T("Document"), ondelete = "CASCADE", represent = represent, requires = IS_ONE_OF(db, "doc_document.id", represent), ) # --------------------------------------------------------------------- # Images # # @ToDo: Field to determine which is the default image to use for # e.g. a Map popup (like the profile picture) # readable/writable=False except in the cases where-needed # doc_image_type_opts = {1: T("Photograph"), 2: T("Map"), 3: T("Document Scan"), 99: T("other") } tablename = "doc_image" define_table(tablename, # Component not instance super_link(doc_id, "doc_entity"), super_link("pe_id", "pr_pentity"), # @ToDo: Remove & make Persons doc entities instead? super_link("site_id", "org_site"), # @ToDo: Remove since Site Instances are doc entities? Field("file", "upload", autodelete=True, represent = doc_image_represent, requires = IS_EMPTY_OR( IS_IMAGE(extensions=(s3.IMAGE_EXTENSIONS)), # Distingish from prepop null = "", ), # upload folder needs to be visible to the download() function as well as the upload uploadfolder = os.path.join(folder, "uploads", "images"), widget = S3ImageCropWidget((600, 600)), ), Field("mime_type", readable = False, writable = False, ), Field("name", length=128, label = T("Name"), # Allow Name to be added onvalidation requires = IS_EMPTY_OR(IS_LENGTH(128)), ), Field("url", label = T("URL"), requires = IS_EMPTY_OR(IS_URL()), ), Field("type", "integer", default = 1, label = T("Image Type"), represent = lambda opt: \ doc_image_type_opts.get(opt, UNKNOWN_OPT), requires = IS_IN_SET(doc_image_type_opts, zero=None), ), person_id(label = T("Author"), ), organisation_id(), s3_date(label = T("Date Taken"), ), # @ToDo: Move location to link table location_id(), s3_comments(), Field("checksum", readable = False, writable = False, ), *s3_meta_fields()) # CRUD Strings crud_strings[tablename] = Storage( label_create = T("Add Photo"), title_display = T("Photo Details"), title_list = T("Photos"), title_update = T("Edit Photo"), label_list_button = T("List Photos"), label_delete_button = T("Delete Photo"), msg_record_created = T("Photo added"), msg_record_modified = T("Photo updated"), msg_record_deleted = T("Photo deleted"), msg_list_empty = T("No Photos found")) # Resource Configuration configure(tablename, deduplicate = self.document_duplicate, onvalidation = lambda form: \ self.document_onvalidation(form, document=False) ) # --------------------------------------------------------------------- # Pass model-global names to response.s3 # return dict(doc_document_id = document_id, ) # ------------------------------------------------------------------------- def defaults(self): """ Safe defaults if the module is disabled """ document_id = S3ReusableField("document_id", "integer", readable=False, writable=False) return dict(doc_document_id = document_id, ) # ------------------------------------------------------------------------- @staticmethod def doc_file_represent(file): """ File representation """ if file: try: # Read the filename from the file filename = current.db.doc_document.file.retrieve(file)[0] except IOError: return current.T("File not found") else: return A(filename, _href=URL(c="default", f="download", args=[file])) else: return current.messages["NONE"] # ------------------------------------------------------------------------- @staticmethod def document_duplicate(item): """ Import item de-duplication """ data = item.data query = None file = data.get("file") if file: table = item.table query = (table.file == file) else: url = data.get("url") if url: table = item.table query = (table.url == url) if query: duplicate = current.db(query).select(table.id, limitby=(0, 1)).first() if duplicate: item.id = duplicate.id item.method = item.METHOD.UPDATE return # ------------------------------------------------------------------------- @staticmethod def document_onvalidation(form, document=True): """ Form validation for both, documents and images """ form_vars = form.vars doc = form_vars.file if doc is None: # If this is a prepop, then file not in form # Interactive forms with empty doc has this as "" not None return if not document: encoded_file = form_vars.get("imagecrop-data", None) if encoded_file: # S3ImageCropWidget import base64 import uuid metadata, encoded_file = encoded_file.split(",") filename, datatype, enctype = metadata.split(";") f = Storage() f.filename = uuid.uuid4().hex + filename import cStringIO f.file = cStringIO.StringIO(base64.decodestring(encoded_file)) form_vars.file = f if not form_vars.name: form_vars.name = filename if not hasattr(doc, "file") and not doc and not form_vars.url: if document: msg = current.T("Either file upload or document URL required.") else: msg = current.T("Either file upload or image URL required.") form.errors.file = msg form.errors.url = msg if hasattr(doc, "file"): name = form_vars.name if not name: # Use the filename form_vars.name = doc.filename else: id = current.request.post_vars.id if id: if document: tablename = "doc_document" else: tablename = "doc_image" db = current.db table = db[tablename] record = db(table.id == id).select(table.file, limitby=(0, 1)).first() if record: name = form_vars.name if not name: # Use the filename form_vars.name = table.file.retrieve(record.file)[0] # Do a checksum on the file to see if it's a duplicate #import cgi #if isinstance(doc, cgi.FieldStorage) and doc.filename: # f = doc.file # form_vars.checksum = doc_checksum(f.read()) # f.seek(0) # if not form_vars.name: # form_vars.name = doc.filename #if form_vars.checksum is not None: # # Duplicate allowed if original version is deleted # query = ((table.checksum == form_vars.checksum) & \ # (table.deleted == False)) # result = db(query).select(table.name, # limitby=(0, 1)).first() # if result: # doc_name = result.name # form.errors["file"] = "%s %s" % \ # (T("This file already exists on the server as"), doc_name) # ------------------------------------------------------------------------- @staticmethod def document_onaccept(form): """ Build a full-text index """ form_vars = form.vars doc = form_vars.file table = current.db.doc_document document = json.dumps(dict(filename=doc, name=table.file.retrieve(doc)[0], id=form_vars.id, )) current.s3task.async("document_create_index", args = [document]) # ------------------------------------------------------------------------- @staticmethod def document_ondelete(row): """ Remove the full-text index """ db = current.db table = db.doc_document record = db(table.id == row.id).select(table.file, limitby=(0, 1)).first() document = json.dumps(dict(filename=record.file, id=row.id, )) current.s3task.async("document_delete_index", args = [document]) # ============================================================================= def doc_image_represent(filename): """ Represent an image as a clickable thumbnail @param filename: name of the image file """ if not filename: return current.messages["NONE"] return DIV(A(IMG(_src=URL(c="default", f="download", args=filename), _height=40), _class="zoom", _href=URL(c="default", f="download", args=filename))) # @todo: implement/activate the JavaScript for this: #import uuid #anchor = "zoom-media-image-%s" % uuid.uuid4() #return DIV(A(IMG(_src=URL(c="default", f="download", #args=filename), #_height=40), #_class="zoom", #_href="#%s" % anchor), #DIV(IMG(_src=URL(c="default", f="download", #args=filename), #_width=600), #_id="%s" % anchor, #_class="hide")) # ============================================================================= def doc_checksum(docstr): """ Calculate a checksum for a file """ import hashlib converted = hashlib.sha1(docstr).hexdigest() return converted # ============================================================================= def doc_document_list_layout(list_id, item_id, resource, rfields, record): """ Default dataList item renderer for Documents, e.g. on the HRM Profile @param list_id: the HTML ID of the list @param item_id: the HTML ID of the item @param resource: the S3Resource to render @param rfields: the S3ResourceFields to render @param record: the record as dict """ record_id = record["doc_document.id"] item_class = "thumbnail" raw = record._row title = record["doc_document.name"] file = raw["doc_document.file"] or "" url = raw["doc_document.url"] or "" date = record["doc_document.date"] comments = raw["doc_document.comments"] or "" if file: try: doc_name = current.s3db.doc_document.file.retrieve(file)[0] except (IOError, TypeError): doc_name = current.messages["NONE"] doc_url = URL(c="default", f="download", args=[file]) body = P(I(_class="icon-paperclip"), " ", SPAN(A(doc_name, _href=doc_url, ) ), " ", _class="card_1_line", ) elif url: body = P(I(_class="icon-globe"), " ", SPAN(A(url, _href=url, )), " ", _class="card_1_line", ) else: # Shouldn't happen! body = P(_class="card_1_line") # Edit Bar permit = current.auth.s3_has_permission table = current.s3db.doc_document if permit("update", table, record_id=record_id): edit_btn = A(I(" ", _class="icon icon-edit"), _href=URL(c="doc", f="document", args=[record_id, "update.popup"], vars={"refresh": list_id, "record": record_id}), _class="s3_modal", _title=current.T("Edit Document"), ) else: edit_btn = "" if permit("delete", table, record_id=record_id): delete_btn = A(I(" ", _class="icon icon-trash"), _class="dl-item-delete", ) else: delete_btn = "" edit_bar = DIV(edit_btn, delete_btn, _class="edit-bar fright", ) # Render the item item = DIV(DIV(I(_class="icon"), SPAN(" %s" % title, _class="card-title"), edit_bar, _class="card-header", ), DIV(DIV(DIV(body, P(SPAN(comments), " ", _class="card_manylines", ), _class="media", ), _class="media-body", ), _class="media", ), _class=item_class, _id=item_id, ) return item # ============================================================================= class doc_DocumentRepresent(S3Represent): """ Representation of Documents """ def link(self, k, v, row=None): """ Represent a (key, value) as hypertext link. @param k: the key (doc_document.id) @param v: the representation of the key @param row: the row with this key """ if row: try: filename = row["doc_document.file"] url = row["doc_document.url"] except AttributeError: return v else: if filename: url = URL(c="default", f="download", args=filename) return A(v, _href=url) elif url: return A(v, _href=url) return v # ============================================================================= class S3DocSitRepModel(S3Model): """ Situation Reports """ names = ("doc_sitrep", "doc_sitrep_id", ) def model(self): T = current.T # --------------------------------------------------------------------- # Situation Reports # - can be aggregated by OU # tablename = "doc_sitrep" self.define_table(tablename, self.super_link("doc_id", "doc_entity"), Field("name", length=128, label = T("Name"), ), Field("description", "text", label = T("Description"), represent = lambda body: XML(body), widget = s3_richtext_widget, ), self.org_organisation_id(), self.gis_location_id( widget = S3LocationSelector(show_map = False), ), s3_date(default = "now", ), s3_comments(), *s3_meta_fields()) # CRUD strings current.response.s3.crud_strings[tablename] = Storage( label_create = T("Add Situation Report"), title_display = T("Situation Report Details"), title_list = T("Situation Reports"), title_update = T("Edit Situation Report"), title_upload = T("Import Situation Reports"), label_list_button = T("List Situation Reports"), label_delete_button = T("Delete Situation Report"), msg_record_created = T("Situation Report added"), msg_record_modified = T("Situation Report updated"), msg_record_deleted = T("Situation Report deleted"), msg_list_empty = T("No Situation Reports currently registered")) crud_form = S3SQLCustomForm("name", "description", "organisation_id", "location_id", "date", S3SQLInlineComponent( "document", name = "document", label = T("Attachments"), fields = [("", "file")], ), "comments", ) if current.deployment_settings.get_org_branches(): org_filter = S3HierarchyFilter("organisation_id", leafonly = False, ) else: org_filter = S3OptionsFilter("organisation_id", #filter = True, #header = "", ) filter_widgets = [org_filter, S3LocationFilter(), S3DateFilter("date"), ] self.configure(tablename, crud_form = crud_form, filter_widgets = filter_widgets, list_fields = ["date", "event_sitrep.incident_id", "location_id$L1", "location_id$L2", "location_id$L3", "organisation_id", "name", (T("Attachments"), "document.file"), "comments", ], super_entity = "doc_entity", ) # Components self.add_components(tablename, event_sitrep = {"name": "event_sitrep", "joinby": "sitrep_id", }, event_incident = {"link": "event_sitrep", "joinby": "sitrep_id", "key": "incident_id", "actuate": "hide", "multiple": "False", #"autocomplete": "name", "autodelete": False, }, ) represent = S3Represent(lookup=tablename) sitrep_id = S3ReusableField("sitrep_id", "reference %s" % tablename, label = T("Situation Report"), ondelete = "RESTRICT", represent = represent, requires = IS_EMPTY_OR( IS_ONE_OF(db, "doc_sitrep.id", represent, orderby="doc_sitrep.name", sort=True)), sortby = "name", ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict(doc_sitrep_id = sitrep_id, ) # ------------------------------------------------------------------------- @staticmethod def defaults(): """ Return safe defaults in case the model has been deactivated. """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False) return dict(doc_sitrep_id = lambda **attr: dummy("sitrep_id"), ) # END =========================================================================
mit
Jonekee/chromium.src
tools/telemetry/telemetry/user_story/shared_user_story_state.py
15
2183
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. class SharedUserStoryState(object): """A class that manages the test state across multiple user stories. It's styled on unittest.TestCase for handling test setup & teardown logic. """ def __init__(self, test, options, user_story_set): """ This method is styled on unittest.TestCase.setUpClass. Override to do any action before running user stories that share this same state. Args: test: a page_test.PageTest instance. options: a BrowserFinderOptions instance that contains command line options. user_story_set: a user_story_set.UserStorySet instance. """ pass @property def platform(self): """ Override to return the platform which user stories that share this same state will be run on. """ raise NotImplementedError() def WillRunUserStory(self, user_story): """ Override to do any action before running each one of all user stories that share this same state. This method is styled on unittest.TestCase.setUp. """ raise NotImplementedError() def DidRunUserStory(self, results): """ Override to do any action after running each of all user stories that share this same state. This method is styled on unittest.TestCase.tearDown. """ raise NotImplementedError() def GetTestExpectationAndSkipValue(self, expectations): """ Return test expectation and skip value instance in case expectation is 'skip'. This is run after WillRunUserStory and before RunUserStory. """ raise NotImplementedError() def RunUserStory(self, results): """ Override to do any action before running each one of all user stories that share this same state. This method is styled on unittest.TestCase.run. """ raise NotImplementedError() def TearDownState(self, results): """ Override to do any action after running multiple user stories that share this same state. This method is styled on unittest.TestCase.tearDownClass. """ raise NotImplementedError()
bsd-3-clause
openstack/sahara
sahara/service/api/v2/data_sources.py
4
1194
# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import conductor as c from sahara import context conductor = c.API def get_data_sources(**kwargs): return conductor.data_source_get_all(context.ctx(), regex_search=True, **kwargs) def get_data_source(id): return conductor.data_source_get(context.ctx(), id) def delete_data_source(id): conductor.data_source_destroy(context.ctx(), id) def register_data_source(values): return conductor.data_source_create(context.ctx(), values) def data_source_update(id, values): return conductor.data_source_update(context.ctx(), id, values)
apache-2.0
evansd/django
django/template/base.py
15
38221
""" This is the Django template system. How it works: The Lexer.tokenize() function converts a template string (i.e., a string containing markup with custom template tags) to tokens, which can be either plain text (TOKEN_TEXT), variables (TOKEN_VAR) or block statements (TOKEN_BLOCK). The Parser() class takes a list of tokens in its constructor, and its parse() method returns a compiled template -- which is, under the hood, a list of Node objects. Each Node is responsible for creating some sort of output -- e.g. simple text (TextNode), variable values in a given context (VariableNode), results of basic logic (IfNode), results of looping (ForNode), or anything else. The core Node types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can define their own custom node types. Each Node has a render() method, which takes a Context and returns a string of the rendered node. For example, the render() method of a Variable Node returns the variable's value as a string. The render() method of a ForNode returns the rendered output of whatever was inside the loop, recursively. The Template class is a convenient wrapper that takes care of template compilation and rendering. Usage: The only thing you should ever use directly in this file is the Template class. Create a compiled template object with a template_string, then call render() with a context. In the compilation stage, the TemplateSyntaxError exception will be raised if the template doesn't have proper syntax. Sample code: >>> from django import template >>> s = '<html>{% if test %}<h1>{{ varvalue }}</h1>{% endif %}</html>' >>> t = template.Template(s) (t is now a compiled template, and its render() method can be called multiple times with multiple contexts) >>> c = template.Context({'test':True, 'varvalue': 'Hello'}) >>> t.render(c) '<html><h1>Hello</h1></html>' >>> c = template.Context({'test':False, 'varvalue': 'Hello'}) >>> t.render(c) '<html></html>' """ import logging import re from inspect import getcallargs, getfullargspec from django.template.context import ( # NOQA: imported for backwards compatibility BaseContext, Context, ContextPopException, RequestContext, ) from django.utils.formats import localize from django.utils.html import conditional_escape, escape from django.utils.safestring import SafeData, mark_safe from django.utils.text import ( get_text_list, smart_split, unescape_string_literal, ) from django.utils.timezone import template_localtime from django.utils.translation import gettext_lazy, pgettext_lazy from .exceptions import TemplateSyntaxError TOKEN_TEXT = 0 TOKEN_VAR = 1 TOKEN_BLOCK = 2 TOKEN_COMMENT = 3 TOKEN_MAPPING = { TOKEN_TEXT: 'Text', TOKEN_VAR: 'Var', TOKEN_BLOCK: 'Block', TOKEN_COMMENT: 'Comment', } # template syntax constants FILTER_SEPARATOR = '|' FILTER_ARGUMENT_SEPARATOR = ':' VARIABLE_ATTRIBUTE_SEPARATOR = '.' BLOCK_TAG_START = '{%' BLOCK_TAG_END = '%}' VARIABLE_TAG_START = '{{' VARIABLE_TAG_END = '}}' COMMENT_TAG_START = '{#' COMMENT_TAG_END = '#}' TRANSLATOR_COMMENT_MARK = 'Translators' SINGLE_BRACE_START = '{' SINGLE_BRACE_END = '}' # what to report as the origin for templates that come from non-loader sources # (e.g. strings) UNKNOWN_SOURCE = '<unknown source>' # match a variable or block tag and capture the entire tag, including start/end # delimiters tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' % (re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END), re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END), re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END)))) logger = logging.getLogger('django.template') class VariableDoesNotExist(Exception): def __init__(self, msg, params=()): self.msg = msg self.params = params def __str__(self): return self.msg % self.params class Origin: def __init__(self, name, template_name=None, loader=None): self.name = name self.template_name = template_name self.loader = loader def __str__(self): return self.name def __eq__(self, other): if not isinstance(other, Origin): return False return ( self.name == other.name and self.loader == other.loader ) @property def loader_name(self): if self.loader: return '%s.%s' % ( self.loader.__module__, self.loader.__class__.__name__, ) class Template: def __init__(self, template_string, origin=None, name=None, engine=None): # If Template is instantiated directly rather than from an Engine and # exactly one Django template engine is configured, use that engine. # This is required to preserve backwards-compatibility for direct use # e.g. Template('...').render(Context({...})) if engine is None: from .engine import Engine engine = Engine.get_default() if origin is None: origin = Origin(UNKNOWN_SOURCE) self.name = name self.origin = origin self.engine = engine self.source = template_string self.nodelist = self.compile_nodelist() def __iter__(self): for node in self.nodelist: yield from node def _render(self, context): return self.nodelist.render(context) def render(self, context): "Display stage -- can be called many times" with context.render_context.push_state(self): if context.template is None: with context.bind_template(self): context.template_name = self.name return self._render(context) else: return self._render(context) def compile_nodelist(self): """ Parse and compile the template source into a nodelist. If debug is True and an exception occurs during parsing, the exception is is annotated with contextual line information where it occurred in the template source. """ if self.engine.debug: lexer = DebugLexer(self.source) else: lexer = Lexer(self.source) tokens = lexer.tokenize() parser = Parser( tokens, self.engine.template_libraries, self.engine.template_builtins, self.origin, ) try: return parser.parse() except Exception as e: if self.engine.debug: e.template_debug = self.get_exception_info(e, e.token) raise def get_exception_info(self, exception, token): """ Return a dictionary containing contextual line information of where the exception occurred in the template. The following information is provided: message The message of the exception raised. source_lines The lines before, after, and including the line the exception occurred on. line The line number the exception occurred on. before, during, after The line the exception occurred on split into three parts: 1. The content before the token that raised the error. 2. The token that raised the error. 3. The content after the token that raised the error. total The number of lines in source_lines. top The line number where source_lines starts. bottom The line number where source_lines ends. start The start position of the token in the template source. end The end position of the token in the template source. """ start, end = token.position context_lines = 10 line = 0 upto = 0 source_lines = [] before = during = after = "" for num, next in enumerate(linebreak_iter(self.source)): if start >= upto and end <= next: line = num before = escape(self.source[upto:start]) during = escape(self.source[start:end]) after = escape(self.source[end:next]) source_lines.append((num, escape(self.source[upto:next]))) upto = next total = len(source_lines) top = max(1, line - context_lines) bottom = min(total, line + 1 + context_lines) # In some rare cases exc_value.args can be empty or an invalid # string. try: message = str(exception.args[0]) except (IndexError, UnicodeDecodeError): message = '(Could not get exception message)' return { 'message': message, 'source_lines': source_lines[top:bottom], 'before': before, 'during': during, 'after': after, 'top': top, 'bottom': bottom, 'total': total, 'line': line, 'name': self.origin.name, 'start': start, 'end': end, } def linebreak_iter(template_source): yield 0 p = template_source.find('\n') while p >= 0: yield p + 1 p = template_source.find('\n', p + 1) yield len(template_source) + 1 class Token: def __init__(self, token_type, contents, position=None, lineno=None): """ A token representing a string from the template. token_type One of TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, or TOKEN_COMMENT. contents The token source string. position An optional tuple containing the start and end index of the token in the template source. This is used for traceback information when debug is on. lineno The line number the token appears on in the template source. This is used for traceback information and gettext files. """ self.token_type, self.contents = token_type, contents self.lineno = lineno self.position = position def __str__(self): token_name = TOKEN_MAPPING[self.token_type] return ('<%s token: "%s...">' % (token_name, self.contents[:20].replace('\n', ''))) def split_contents(self): split = [] bits = iter(smart_split(self.contents)) for bit in bits: # Handle translation-marked template pieces if bit.startswith(('_("', "_('")): sentinel = bit[2] + ')' trans_bit = [bit] while not bit.endswith(sentinel): bit = next(bits) trans_bit.append(bit) bit = ' '.join(trans_bit) split.append(bit) return split class Lexer: def __init__(self, template_string): self.template_string = template_string self.verbatim = False def tokenize(self): """ Return a list of tokens from a given template_string. """ in_tag = False lineno = 1 result = [] for bit in tag_re.split(self.template_string): if bit: result.append(self.create_token(bit, None, lineno, in_tag)) in_tag = not in_tag lineno += bit.count('\n') return result def create_token(self, token_string, position, lineno, in_tag): """ Convert the given token string into a new Token object and return it. If in_tag is True, we are processing something that matched a tag, otherwise it should be treated as a literal string. """ if in_tag and token_string.startswith(BLOCK_TAG_START): # The [2:-2] ranges below strip off *_TAG_START and *_TAG_END. # We could do len(BLOCK_TAG_START) to be more "correct", but we've # hard-coded the 2s here for performance. And it's not like # the TAG_START values are going to change anytime, anyway. block_content = token_string[2:-2].strip() if self.verbatim and block_content == self.verbatim: self.verbatim = False if in_tag and not self.verbatim: if token_string.startswith(VARIABLE_TAG_START): token = Token(TOKEN_VAR, token_string[2:-2].strip(), position, lineno) elif token_string.startswith(BLOCK_TAG_START): if block_content[:9] in ('verbatim', 'verbatim '): self.verbatim = 'end%s' % block_content token = Token(TOKEN_BLOCK, block_content, position, lineno) elif token_string.startswith(COMMENT_TAG_START): content = '' if token_string.find(TRANSLATOR_COMMENT_MARK): content = token_string[2:-2].strip() token = Token(TOKEN_COMMENT, content, position, lineno) else: token = Token(TOKEN_TEXT, token_string, position, lineno) return token class DebugLexer(Lexer): def tokenize(self): """ Split a template string into tokens and annotates each token with its start and end position in the source. This is slower than the default lexer so only use it when debug is True. """ lineno = 1 result = [] upto = 0 for match in tag_re.finditer(self.template_string): start, end = match.span() if start > upto: token_string = self.template_string[upto:start] result.append(self.create_token(token_string, (upto, start), lineno, in_tag=False)) lineno += token_string.count('\n') upto = start token_string = self.template_string[start:end] result.append(self.create_token(token_string, (start, end), lineno, in_tag=True)) lineno += token_string.count('\n') upto = end last_bit = self.template_string[upto:] if last_bit: result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), lineno, in_tag=False)) return result class Parser: def __init__(self, tokens, libraries=None, builtins=None, origin=None): self.tokens = tokens self.tags = {} self.filters = {} self.command_stack = [] if libraries is None: libraries = {} if builtins is None: builtins = [] self.libraries = libraries for builtin in builtins: self.add_library(builtin) self.origin = origin def parse(self, parse_until=None): """ Iterate through the parser tokens and compiles each one into a node. If parse_until is provided, parsing will stop once one of the specified tokens has been reached. This is formatted as a list of tokens, e.g. ['elif', 'else', 'endif']. If no matching token is reached, raise an exception with the unclosed block tag details. """ if parse_until is None: parse_until = [] nodelist = NodeList() while self.tokens: token = self.next_token() # Use the raw values here for TOKEN_* for a tiny performance boost. if token.token_type == 0: # TOKEN_TEXT self.extend_nodelist(nodelist, TextNode(token.contents), token) elif token.token_type == 1: # TOKEN_VAR if not token.contents: raise self.error(token, 'Empty variable tag on line %d' % token.lineno) try: filter_expression = self.compile_filter(token.contents) except TemplateSyntaxError as e: raise self.error(token, e) var_node = VariableNode(filter_expression) self.extend_nodelist(nodelist, var_node, token) elif token.token_type == 2: # TOKEN_BLOCK try: command = token.contents.split()[0] except IndexError: raise self.error(token, 'Empty block tag on line %d' % token.lineno) if command in parse_until: # A matching token has been reached. Return control to # the caller. Put the token back on the token list so the # caller knows where it terminated. self.prepend_token(token) return nodelist # Add the token to the command stack. This is used for error # messages if further parsing fails due to an unclosed block # tag. self.command_stack.append((command, token)) # Get the tag callback function from the ones registered with # the parser. try: compile_func = self.tags[command] except KeyError: self.invalid_block_tag(token, command, parse_until) # Compile the callback into a node object and add it to # the node list. try: compiled_result = compile_func(self, token) except Exception as e: raise self.error(token, e) self.extend_nodelist(nodelist, compiled_result, token) # Compile success. Remove the token from the command stack. self.command_stack.pop() if parse_until: self.unclosed_block_tag(parse_until) return nodelist def skip_past(self, endtag): while self.tokens: token = self.next_token() if token.token_type == TOKEN_BLOCK and token.contents == endtag: return self.unclosed_block_tag([endtag]) def extend_nodelist(self, nodelist, node, token): # Check that non-text nodes don't appear before an extends tag. if node.must_be_first and nodelist.contains_nontext: raise self.error( token, '%r must be the first tag in the template.' % node, ) if isinstance(nodelist, NodeList) and not isinstance(node, TextNode): nodelist.contains_nontext = True # Set origin and token here since we can't modify the node __init__() # method. node.token = token node.origin = self.origin nodelist.append(node) def error(self, token, e): """ Return an exception annotated with the originating token. Since the parser can be called recursively, check if a token is already set. This ensures the innermost token is highlighted if an exception occurs, e.g. a compile error within the body of an if statement. """ if not isinstance(e, Exception): e = TemplateSyntaxError(e) if not hasattr(e, 'token'): e.token = token return e def invalid_block_tag(self, token, command, parse_until=None): if parse_until: raise self.error( token, "Invalid block tag on line %d: '%s', expected %s. Did you " "forget to register or load this tag?" % ( token.lineno, command, get_text_list(["'%s'" % p for p in parse_until], 'or'), ), ) raise self.error( token, "Invalid block tag on line %d: '%s'. Did you forget to register " "or load this tag?" % (token.lineno, command) ) def unclosed_block_tag(self, parse_until): command, token = self.command_stack.pop() msg = "Unclosed tag on line %d: '%s'. Looking for one of: %s." % ( token.lineno, command, ', '.join(parse_until), ) raise self.error(token, msg) def next_token(self): return self.tokens.pop(0) def prepend_token(self, token): self.tokens.insert(0, token) def delete_first_token(self): del self.tokens[0] def add_library(self, lib): self.tags.update(lib.tags) self.filters.update(lib.filters) def compile_filter(self, token): """ Convenient wrapper for FilterExpression """ return FilterExpression(token, self) def find_filter(self, filter_name): if filter_name in self.filters: return self.filters[filter_name] else: raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name) # This only matches constant *strings* (things in quotes or marked for # translation). Numbers are treated as variables for implementation reasons # (so that they retain their type when passed to filters). constant_string = r""" (?:%(i18n_open)s%(strdq)s%(i18n_close)s| %(i18n_open)s%(strsq)s%(i18n_close)s| %(strdq)s| %(strsq)s) """ % { 'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string 'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string 'i18n_open': re.escape("_("), 'i18n_close': re.escape(")"), } constant_string = constant_string.replace("\n", "") filter_raw_string = r""" ^(?P<constant>%(constant)s)| ^(?P<var>[%(var_chars)s]+|%(num)s)| (?:\s*%(filter_sep)s\s* (?P<filter_name>\w+) (?:%(arg_sep)s (?: (?P<constant_arg>%(constant)s)| (?P<var_arg>[%(var_chars)s]+|%(num)s) ) )? )""" % { 'constant': constant_string, 'num': r'[-+\.]?\d[\d\.e]*', 'var_chars': r'\w\.', 'filter_sep': re.escape(FILTER_SEPARATOR), 'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR), } filter_re = re.compile(filter_raw_string, re.VERBOSE) class FilterExpression: """ Parse a variable token and its optional filters (all as a single string), and return a list of tuples of the filter name and arguments. Sample:: >>> token = 'variable|default:"Default value"|date:"Y-m-d"' >>> p = Parser('') >>> fe = FilterExpression(token, p) >>> len(fe.filters) 2 >>> fe.var <Variable: 'variable'> """ def __init__(self, token, parser): self.token = token matches = filter_re.finditer(token) var_obj = None filters = [] upto = 0 for match in matches: start = match.start() if upto != start: raise TemplateSyntaxError("Could not parse some characters: " "%s|%s|%s" % (token[:upto], token[upto:start], token[start:])) if var_obj is None: var, constant = match.group("var", "constant") if constant: try: var_obj = Variable(constant).resolve({}) except VariableDoesNotExist: var_obj = None elif var is None: raise TemplateSyntaxError("Could not find variable at " "start of %s." % token) else: var_obj = Variable(var) else: filter_name = match.group("filter_name") args = [] constant_arg, var_arg = match.group("constant_arg", "var_arg") if constant_arg: args.append((False, Variable(constant_arg).resolve({}))) elif var_arg: args.append((True, Variable(var_arg))) filter_func = parser.find_filter(filter_name) self.args_check(filter_name, filter_func, args) filters.append((filter_func, args)) upto = match.end() if upto != len(token): raise TemplateSyntaxError("Could not parse the remainder: '%s' " "from '%s'" % (token[upto:], token)) self.filters = filters self.var = var_obj def resolve(self, context, ignore_failures=False): if isinstance(self.var, Variable): try: obj = self.var.resolve(context) except VariableDoesNotExist: if ignore_failures: obj = None else: string_if_invalid = context.template.engine.string_if_invalid if string_if_invalid: if '%s' in string_if_invalid: return string_if_invalid % self.var else: return string_if_invalid else: obj = string_if_invalid else: obj = self.var for func, args in self.filters: arg_vals = [] for lookup, arg in args: if not lookup: arg_vals.append(mark_safe(arg)) else: arg_vals.append(arg.resolve(context)) if getattr(func, 'expects_localtime', False): obj = template_localtime(obj, context.use_tz) if getattr(func, 'needs_autoescape', False): new_obj = func(obj, autoescape=context.autoescape, *arg_vals) else: new_obj = func(obj, *arg_vals) if getattr(func, 'is_safe', False) and isinstance(obj, SafeData): obj = mark_safe(new_obj) else: obj = new_obj return obj def args_check(name, func, provided): provided = list(provided) # First argument, filter input, is implied. plen = len(provided) + 1 # Check to see if a decorator is providing the real function. func = getattr(func, '_decorated_function', func) args, _, _, defaults, _, _, _ = getfullargspec(func) alen = len(args) dlen = len(defaults or []) # Not enough OR Too many if plen < (alen - dlen) or plen > alen: raise TemplateSyntaxError("%s requires %d arguments, %d provided" % (name, alen - dlen, plen)) return True args_check = staticmethod(args_check) def __str__(self): return self.token class Variable: """ A template variable, resolvable against a given context. The variable may be a hard-coded string (if it begins and ends with single or double quote marks):: >>> c = {'article': {'section':'News'}} >>> Variable('article.section').resolve(c) 'News' >>> Variable('article').resolve(c) {'section': 'News'} >>> class AClass: pass >>> c = AClass() >>> c.article = AClass() >>> c.article.section = 'News' (The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.') """ def __init__(self, var): self.var = var self.literal = None self.lookups = None self.translate = False self.message_context = None if not isinstance(var, str): raise TypeError( "Variable must be a string or number, got %s" % type(var)) try: # First try to treat this variable as a number. # # Note that this could cause an OverflowError here that we're not # catching. Since this should only happen at compile time, that's # probably OK. self.literal = float(var) # So it's a float... is it an int? If the original value contained a # dot or an "e" then it was a float, not an int. if '.' not in var and 'e' not in var.lower(): self.literal = int(self.literal) # "2." is invalid if var.endswith('.'): raise ValueError except ValueError: # A ValueError means that the variable isn't a number. if var.startswith('_(') and var.endswith(')'): # The result of the lookup should be translated at rendering # time. self.translate = True var = var[2:-1] # If it's wrapped with quotes (single or double), then # we're also dealing with a literal. try: self.literal = mark_safe(unescape_string_literal(var)) except ValueError: # Otherwise we'll set self.lookups so that resolve() knows we're # dealing with a bonafide variable if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_': raise TemplateSyntaxError("Variables and attributes may " "not begin with underscores: '%s'" % var) self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR)) def resolve(self, context): """Resolve this variable against a given context.""" if self.lookups is not None: # We're dealing with a variable that needs to be resolved value = self._resolve_lookup(context) else: # We're dealing with a literal, so it's already been "resolved" value = self.literal if self.translate: is_safe = isinstance(value, SafeData) msgid = value.replace('%', '%%') msgid = mark_safe(msgid) if is_safe else msgid if self.message_context: return pgettext_lazy(self.message_context, msgid) else: return gettext_lazy(msgid) return value def __repr__(self): return "<%s: %r>" % (self.__class__.__name__, self.var) def __str__(self): return self.var def _resolve_lookup(self, context): """ Perform resolution of a real variable (i.e. not a literal) against the given context. As indicated by the method's name, this method is an implementation detail and shouldn't be called by external code. Use Variable.resolve() instead. """ current = context try: # catch-all for silent variable failures for bit in self.lookups: try: # dictionary lookup current = current[bit] # ValueError/IndexError are for numpy.array lookup on # numpy < 1.9 and 1.9+ respectively except (TypeError, AttributeError, KeyError, ValueError, IndexError): try: # attribute lookup # Don't return class attributes if the class is the context: if isinstance(current, BaseContext) and getattr(type(current), bit): raise AttributeError current = getattr(current, bit) except (TypeError, AttributeError): # Reraise if the exception was raised by a @property if not isinstance(current, BaseContext) and bit in dir(current): raise try: # list-index lookup current = current[int(bit)] except (IndexError, # list index out of range ValueError, # invalid literal for int() KeyError, # current is a dict without `int(bit)` key TypeError): # unsubscriptable object raise VariableDoesNotExist("Failed lookup for key " "[%s] in %r", (bit, current)) # missing attribute if callable(current): if getattr(current, 'do_not_call_in_templates', False): pass elif getattr(current, 'alters_data', False): current = context.template.engine.string_if_invalid else: try: # method call (assuming no args required) current = current() except TypeError: try: getcallargs(current) except TypeError: # arguments *were* required current = context.template.engine.string_if_invalid # invalid method call else: raise except Exception as e: template_name = getattr(context, 'template_name', None) or 'unknown' logger.debug( "Exception while resolving variable '%s' in template '%s'.", bit, template_name, exc_info=True, ) if getattr(e, 'silent_variable_failure', False): current = context.template.engine.string_if_invalid else: raise return current class Node: # Set this to True for nodes that must be first in the template (although # they can be preceded by text nodes. must_be_first = False child_nodelists = ('nodelist',) token = None def render(self, context): """ Return the node rendered as a string. """ pass def render_annotated(self, context): """ Render the node. If debug is True and an exception occurs during rendering, the exception is annotated with contextual line information where it occurred in the template. For internal usage this method is preferred over using the render method directly. """ try: return self.render(context) except Exception as e: if context.template.engine.debug and not hasattr(e, 'template_debug'): e.template_debug = context.render_context.template.get_exception_info(e, self.token) raise def __iter__(self): yield self def get_nodes_by_type(self, nodetype): """ Return a list of all nodes (within this node and its nodelist) of the given type """ nodes = [] if isinstance(self, nodetype): nodes.append(self) for attr in self.child_nodelists: nodelist = getattr(self, attr, None) if nodelist: nodes.extend(nodelist.get_nodes_by_type(nodetype)) return nodes class NodeList(list): # Set to True the first time a non-TextNode is inserted by # extend_nodelist(). contains_nontext = False def render(self, context): bits = [] for node in self: if isinstance(node, Node): bit = node.render_annotated(context) else: bit = node bits.append(str(bit)) return mark_safe(''.join(bits)) def get_nodes_by_type(self, nodetype): "Return a list of all nodes of the given type" nodes = [] for node in self: nodes.extend(node.get_nodes_by_type(nodetype)) return nodes class TextNode(Node): def __init__(self, s): self.s = s def __repr__(self): return "<%s: %r>" % (self.__class__.__name__, self.s[:25]) def render(self, context): return self.s def render_value_in_context(value, context): """ Convert any value to a string to become part of a rendered template. This means escaping, if required, and conversion to a string. If value is a string, it's expected to already be translated. """ value = template_localtime(value, use_tz=context.use_tz) value = localize(value, use_l10n=context.use_l10n) if context.autoescape: if not issubclass(type(value), str): value = str(value) return conditional_escape(value) else: return str(value) class VariableNode(Node): def __init__(self, filter_expression): self.filter_expression = filter_expression def __repr__(self): return "<Variable Node: %s>" % self.filter_expression def render(self, context): try: output = self.filter_expression.resolve(context) except UnicodeDecodeError: # Unicode conversion can fail sometimes for reasons out of our # control (e.g. exception rendering). In that case, we fail # quietly. return '' return render_value_in_context(output, context) # Regex for token keyword arguments kwarg_re = re.compile(r"(?:(\w+)=)?(.+)") def token_kwargs(bits, parser, support_legacy=False): """ Parse token keyword arguments and return a dictionary of the arguments retrieved from the ``bits`` token list. `bits` is a list containing the remainder of the token (split by spaces) that is to be checked for arguments. Valid arguments are removed from this list. `support_legacy` - if True, the legacy format ``1 as foo`` is accepted. Otherwise, only the standard ``foo=1`` format is allowed. There is no requirement for all remaining token ``bits`` to be keyword arguments, so return the dictionary as soon as an invalid argument format is reached. """ if not bits: return {} match = kwarg_re.match(bits[0]) kwarg_format = match and match.group(1) if not kwarg_format: if not support_legacy: return {} if len(bits) < 3 or bits[1] != 'as': return {} kwargs = {} while bits: if kwarg_format: match = kwarg_re.match(bits[0]) if not match or not match.group(1): return kwargs key, value = match.groups() del bits[:1] else: if len(bits) < 3 or bits[1] != 'as': return kwargs key, value = bits[2], bits[0] del bits[:3] kwargs[key] = parser.compile_filter(value) if bits and not kwarg_format: if bits[0] != 'and': return kwargs del bits[:1] return kwargs
bsd-3-clause
sloria/sphinx-issues
test_sphinx_issues.py
1
4598
from tempfile import mkdtemp from shutil import rmtree try: from unittest.mock import Mock except ImportError: from unittest.mock import Mock from sphinx.application import Sphinx from sphinx_issues import ( issue_role, user_role, pr_role, cve_role, commit_role, setup as issues_setup, ) import pytest @pytest.yield_fixture( params=[ # Parametrize config {"issues_github_path": "marshmallow-code/marshmallow"}, { "issues_uri": "https://github.com/marshmallow-code/marshmallow/issues/{issue}", "issues_pr_uri": "https://github.com/marshmallow-code/marshmallow/pull/{pr}", "issues_commit_uri": "https://github.com/marshmallow-code/marshmallow/commit/{commit}", }, ] ) def app(request): src, doctree, confdir, outdir = [mkdtemp() for _ in range(4)] Sphinx._log = lambda self, message, wfile, nonl=False: None app = Sphinx( srcdir=src, confdir=None, outdir=outdir, doctreedir=doctree, buildername="html" ) issues_setup(app) # Stitch together as the sphinx app init() usually does w/ real conf files app.config._raw_config = request.param try: app.config.init_values() except TypeError: app.config.init_values(lambda x: x) yield app [rmtree(x) for x in (src, doctree, confdir, outdir)] @pytest.fixture() def inliner(app): return Mock(document=Mock(settings=Mock(env=Mock(app=app)))) @pytest.mark.parametrize( ("role", "role_name", "text", "expected_text", "expected_url"), [ ( issue_role, "issue", "42", "#42", "https://github.com/marshmallow-code/marshmallow/issues/42", ), ( pr_role, "pr", "42", "#42", "https://github.com/marshmallow-code/marshmallow/pull/42", ), (user_role, "user", "sloria", "@sloria", "https://github.com/sloria"), ( user_role, "user", "Steven Loria <sloria>", "Steven Loria", "https://github.com/sloria", ), ( cve_role, "cve", "CVE-2018-17175", "CVE-2018-17175", "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-17175", ), ( commit_role, "commit", "123abc456def", "123abc4", "https://github.com/marshmallow-code/marshmallow/commit/123abc456def", ), # External issue ( issue_role, "issue", "sloria/webargs#42", "sloria/webargs#42", "https://github.com/sloria/webargs/issues/42", ), # External PR ( pr_role, "pr", "sloria/webargs#42", "sloria/webargs#42", "https://github.com/sloria/webargs/pull/42", ), # External commit ( commit_role, "commit", "sloria/webargs@abc123def456", "sloria/webargs@abc123d", "https://github.com/sloria/webargs/commit/abc123def456", ), ], ) def test_roles(inliner, role, role_name, text, expected_text, expected_url): result = role(role_name, rawtext="", text=text, lineno=None, inliner=inliner) link = result[0][0] assert link.astext() == expected_text assert link.attributes["refuri"] == expected_url def test_issue_role_multiple(inliner): result = issue_role( name=None, rawtext="", text="42,43", inliner=inliner, lineno=None ) link1 = result[0][0] assert link1.astext() == "#42" issue_url = "https://github.com/marshmallow-code/marshmallow/issues/" assert link1.attributes["refuri"] == issue_url + "42" sep = result[0][1] assert sep.astext() == ", " link2 = result[0][2] assert link2.astext() == "#43" assert link2.attributes["refuri"] == issue_url + "43" def test_issue_role_multiple_with_external(inliner): result = issue_role( "issue", rawtext="", text="42,sloria/konch#43", inliner=inliner, lineno=None ) link1 = result[0][0] assert link1.astext() == "#42" issue_url = "https://github.com/marshmallow-code/marshmallow/issues/42" assert link1.attributes["refuri"] == issue_url sep = result[0][1] assert sep.astext() == ", " link2 = result[0][2] assert link2.astext() == "sloria/konch#43" assert link2.attributes["refuri"] == "https://github.com/sloria/konch/issues/43"
mit
Ichag/odoo
addons/account_asset/wizard/__init__.py
445
1122
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import account_asset_change_duration import wizard_asset_compute # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
pavelchristof/gomoku-ai
third_party/llvm/expand_cmake_vars.py
168
2679
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Expands CMake variables in a text file.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import sys _CMAKE_DEFINE_REGEX = re.compile(r"\s*#cmakedefine\s+([A-Za-z_0-9]*)(\s.*)?$") _CMAKE_DEFINE01_REGEX = re.compile(r"\s*#cmakedefine01\s+([A-Za-z_0-9]*)") _CMAKE_VAR_REGEX = re.compile(r"\${([A-Za-z_0-9]*)}") def _parse_args(argv): """Parses arguments with the form KEY=VALUE into a dictionary.""" result = {} for arg in argv: k, v = arg.split("=") result[k] = v return result def _expand_variables(input_str, cmake_vars): """Expands ${VARIABLE}s in 'input_str', using dictionary 'cmake_vars'. Args: input_str: the string containing ${VARIABLE} expressions to expand. cmake_vars: a dictionary mapping variable names to their values. Returns: The expanded string. """ def replace(match): if match.group(1) in cmake_vars: return cmake_vars[match.group(1)] return "" return _CMAKE_VAR_REGEX.sub(replace, input_str) def _expand_cmakedefines(line, cmake_vars): """Expands #cmakedefine declarations, using a dictionary 'cmake_vars'.""" # Handles #cmakedefine lines match = _CMAKE_DEFINE_REGEX.match(line) if match: name = match.group(1) suffix = match.group(2) or "" if name in cmake_vars: return "#define {}{}\n".format(name, _expand_variables(suffix, cmake_vars)) else: return "/* #undef {} */\n".format(name) # Handles #cmakedefine01 lines match = _CMAKE_DEFINE01_REGEX.match(line) if match: name = match.group(1) value = cmake_vars.get(name, "0") return "#define {} {}\n".format(name, value) # Otherwise return the line unchanged. return _expand_variables(line, cmake_vars) def main(): cmake_vars = _parse_args(sys.argv[1:]) for line in sys.stdin: sys.stdout.write(_expand_cmakedefines(line, cmake_vars)) if __name__ == "__main__": main()
apache-2.0
JackonYang/shadowsocks
shadowsocks/crypto/sodium.py
1032
3778
#!/usr/bin/env python # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement from ctypes import c_char_p, c_int, c_ulonglong, byref, \ create_string_buffer, c_void_p from shadowsocks.crypto import util __all__ = ['ciphers'] libsodium = None loaded = False buf_size = 2048 # for salsa20 and chacha20 BLOCK_SIZE = 64 def load_libsodium(): global loaded, libsodium, buf libsodium = util.find_library('sodium', 'crypto_stream_salsa20_xor_ic', 'libsodium') if libsodium is None: raise Exception('libsodium not found') libsodium.crypto_stream_salsa20_xor_ic.restype = c_int libsodium.crypto_stream_salsa20_xor_ic.argtypes = (c_void_p, c_char_p, c_ulonglong, c_char_p, c_ulonglong, c_char_p) libsodium.crypto_stream_chacha20_xor_ic.restype = c_int libsodium.crypto_stream_chacha20_xor_ic.argtypes = (c_void_p, c_char_p, c_ulonglong, c_char_p, c_ulonglong, c_char_p) buf = create_string_buffer(buf_size) loaded = True class SodiumCrypto(object): def __init__(self, cipher_name, key, iv, op): if not loaded: load_libsodium() self.key = key self.iv = iv self.key_ptr = c_char_p(key) self.iv_ptr = c_char_p(iv) if cipher_name == 'salsa20': self.cipher = libsodium.crypto_stream_salsa20_xor_ic elif cipher_name == 'chacha20': self.cipher = libsodium.crypto_stream_chacha20_xor_ic else: raise Exception('Unknown cipher') # byte counter, not block counter self.counter = 0 def update(self, data): global buf_size, buf l = len(data) # we can only prepend some padding to make the encryption align to # blocks padding = self.counter % BLOCK_SIZE if buf_size < padding + l: buf_size = (padding + l) * 2 buf = create_string_buffer(buf_size) if padding: data = (b'\0' * padding) + data self.cipher(byref(buf), c_char_p(data), padding + l, self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr) self.counter += l # buf is copied to a str object when we access buf.raw # strip off the padding return buf.raw[padding:padding + l] ciphers = { 'salsa20': (32, 8, SodiumCrypto), 'chacha20': (32, 8, SodiumCrypto), } def test_salsa20(): cipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 1) decipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 0) util.run_cipher(cipher, decipher) def test_chacha20(): cipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 1) decipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 0) util.run_cipher(cipher, decipher) if __name__ == '__main__': test_chacha20() test_salsa20()
apache-2.0
hyperized/ansible
test/units/parsing/yaml/test_loader.py
55
17407
# coding: utf-8 # (c) 2015, Toshio Kuratomi <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from io import StringIO from units.compat import unittest from ansible import errors from ansible.module_utils.six import text_type, binary_type from ansible.module_utils.common._collections_compat import Sequence, Set, Mapping from ansible.parsing.yaml.loader import AnsibleLoader from ansible.parsing import vault from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode from ansible.parsing.yaml.dumper import AnsibleDumper from units.mock.yaml_helper import YamlTestUtils from units.mock.vault_helper import TextVaultSecret try: from _yaml import ParserError from _yaml import ScannerError except ImportError: from yaml.parser import ParserError from yaml.scanner import ScannerError class NameStringIO(StringIO): """In py2.6, StringIO doesn't let you set name because a baseclass has it as readonly property""" name = None def __init__(self, *args, **kwargs): super(NameStringIO, self).__init__(*args, **kwargs) class TestAnsibleLoaderBasic(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parse_number(self): stream = StringIO(u""" 1 """) loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, 1) # No line/column info saved yet def test_parse_string(self): stream = StringIO(u""" Ansible """) loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, u'Ansible') self.assertIsInstance(data, text_type) self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) def test_parse_utf8_string(self): stream = StringIO(u""" Cafè Eñyei """) loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, u'Cafè Eñyei') self.assertIsInstance(data, text_type) self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) def test_parse_dict(self): stream = StringIO(u""" webster: daniel oed: oxford """) loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, {'webster': 'daniel', 'oed': 'oxford'}) self.assertEqual(len(data), 2) self.assertIsInstance(list(data.keys())[0], text_type) self.assertIsInstance(list(data.values())[0], text_type) # Beginning of the first key self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) self.assertEqual(data[u'webster'].ansible_pos, ('myfile.yml', 2, 26)) self.assertEqual(data[u'oed'].ansible_pos, ('myfile.yml', 3, 22)) def test_parse_list(self): stream = StringIO(u""" - a - b """) loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, [u'a', u'b']) self.assertEqual(len(data), 2) self.assertIsInstance(data[0], text_type) self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) self.assertEqual(data[0].ansible_pos, ('myfile.yml', 2, 19)) self.assertEqual(data[1].ansible_pos, ('myfile.yml', 3, 19)) def test_parse_short_dict(self): stream = StringIO(u"""{"foo": "bar"}""") loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, dict(foo=u'bar')) self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1)) self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 9)) stream = StringIO(u"""foo: bar""") loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, dict(foo=u'bar')) self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1)) self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 6)) def test_error_conditions(self): stream = StringIO(u"""{""") loader = AnsibleLoader(stream, 'myfile.yml') self.assertRaises(ParserError, loader.get_single_data) def test_tab_error(self): stream = StringIO(u"""---\nhosts: localhost\nvars:\n foo: bar\n\tblip: baz""") loader = AnsibleLoader(stream, 'myfile.yml') self.assertRaises(ScannerError, loader.get_single_data) def test_front_matter(self): stream = StringIO(u"""---\nfoo: bar""") loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, dict(foo=u'bar')) self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 1)) self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 2, 6)) # Initial indent (See: #6348) stream = StringIO(u""" - foo: bar\n baz: qux""") loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, [{u'foo': u'bar', u'baz': u'qux'}]) self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 2)) self.assertEqual(data[0].ansible_pos, ('myfile.yml', 1, 4)) self.assertEqual(data[0][u'foo'].ansible_pos, ('myfile.yml', 1, 9)) self.assertEqual(data[0][u'baz'].ansible_pos, ('myfile.yml', 2, 9)) class TestAnsibleLoaderVault(unittest.TestCase, YamlTestUtils): def setUp(self): self.vault_password = "hunter42" vault_secret = TextVaultSecret(self.vault_password) self.vault_secrets = [('vault_secret', vault_secret), ('default', vault_secret)] self.vault = vault.VaultLib(self.vault_secrets) @property def vault_secret(self): return vault.match_encrypt_secret(self.vault_secrets)[1] def test_wrong_password(self): plaintext = u"Ansible" bob_password = "this is a different password" bobs_secret = TextVaultSecret(bob_password) bobs_secrets = [('default', bobs_secret)] bobs_vault = vault.VaultLib(bobs_secrets) ciphertext = bobs_vault.encrypt(plaintext, vault.match_encrypt_secret(bobs_secrets)[1]) try: self.vault.decrypt(ciphertext) except Exception as e: self.assertIsInstance(e, errors.AnsibleError) self.assertEqual(e.message, 'Decryption failed (no vault secrets were found that could decrypt)') def _encrypt_plaintext(self, plaintext): # Construct a yaml repr of a vault by hand vaulted_var_bytes = self.vault.encrypt(plaintext, self.vault_secret) # add yaml tag vaulted_var = vaulted_var_bytes.decode() lines = vaulted_var.splitlines() lines2 = [] for line in lines: lines2.append(' %s' % line) vaulted_var = '\n'.join(lines2) tagged_vaulted_var = u"""!vault |\n%s""" % vaulted_var return tagged_vaulted_var def _build_stream(self, yaml_text): stream = NameStringIO(yaml_text) stream.name = 'my.yml' return stream def _loader(self, stream): return AnsibleLoader(stream, vault_secrets=self.vault.secrets) def _load_yaml(self, yaml_text, password): stream = self._build_stream(yaml_text) loader = self._loader(stream) data_from_yaml = loader.get_single_data() return data_from_yaml def test_dump_load_cycle(self): avu = AnsibleVaultEncryptedUnicode.from_plaintext('The plaintext for test_dump_load_cycle.', self.vault, self.vault_secret) self._dump_load_cycle(avu) def test_embedded_vault_from_dump(self): avu = AnsibleVaultEncryptedUnicode.from_plaintext('setec astronomy', self.vault, self.vault_secret) blip = {'stuff1': [{'a dict key': 24}, {'shhh-ssh-secrets': avu, 'nothing to see here': 'move along'}], 'another key': 24.1} blip = ['some string', 'another string', avu] stream = NameStringIO() self._dump_stream(blip, stream, dumper=AnsibleDumper) stream.seek(0) stream.seek(0) loader = self._loader(stream) data_from_yaml = loader.get_data() stream2 = NameStringIO(u'') # verify we can dump the object again self._dump_stream(data_from_yaml, stream2, dumper=AnsibleDumper) def test_embedded_vault(self): plaintext_var = u"""This is the plaintext string.""" tagged_vaulted_var = self._encrypt_plaintext(plaintext_var) another_vaulted_var = self._encrypt_plaintext(plaintext_var) different_var = u"""A different string that is not the same as the first one.""" different_vaulted_var = self._encrypt_plaintext(different_var) yaml_text = u"""---\nwebster: daniel\noed: oxford\nthe_secret: %s\nanother_secret: %s\ndifferent_secret: %s""" % (tagged_vaulted_var, another_vaulted_var, different_vaulted_var) data_from_yaml = self._load_yaml(yaml_text, self.vault_password) vault_string = data_from_yaml['the_secret'] self.assertEqual(plaintext_var, data_from_yaml['the_secret']) test_dict = {} test_dict[vault_string] = 'did this work?' self.assertEqual(vault_string.data, vault_string) # This looks weird and useless, but the object in question has a custom __eq__ self.assertEqual(vault_string, vault_string) another_vault_string = data_from_yaml['another_secret'] different_vault_string = data_from_yaml['different_secret'] self.assertEqual(vault_string, another_vault_string) self.assertNotEquals(vault_string, different_vault_string) # More testing of __eq__/__ne__ self.assertTrue('some string' != vault_string) self.assertNotEquals('some string', vault_string) # Note this is a compare of the str/unicode of these, they are different types # so we want to test self == other, and other == self etc self.assertEqual(plaintext_var, vault_string) self.assertEqual(vault_string, plaintext_var) self.assertFalse(plaintext_var != vault_string) self.assertFalse(vault_string != plaintext_var) class TestAnsibleLoaderPlay(unittest.TestCase): def setUp(self): stream = NameStringIO(u""" - hosts: localhost vars: number: 1 string: Ansible utf8_string: Cafè Eñyei dictionary: webster: daniel oed: oxford list: - a - b - 1 - 2 tasks: - name: Test case ping: data: "{{ utf8_string }}" - name: Test 2 ping: data: "Cafè Eñyei" - name: Test 3 command: "printf 'Cafè Eñyei\\n'" """) self.play_filename = '/path/to/myplay.yml' stream.name = self.play_filename self.loader = AnsibleLoader(stream) self.data = self.loader.get_single_data() def tearDown(self): pass def test_data_complete(self): self.assertEqual(len(self.data), 1) self.assertIsInstance(self.data, list) self.assertEqual(frozenset(self.data[0].keys()), frozenset((u'hosts', u'vars', u'tasks'))) self.assertEqual(self.data[0][u'hosts'], u'localhost') self.assertEqual(self.data[0][u'vars'][u'number'], 1) self.assertEqual(self.data[0][u'vars'][u'string'], u'Ansible') self.assertEqual(self.data[0][u'vars'][u'utf8_string'], u'Cafè Eñyei') self.assertEqual(self.data[0][u'vars'][u'dictionary'], { u'webster': u'daniel', u'oed': u'oxford' }) self.assertEqual(self.data[0][u'vars'][u'list'], [u'a', u'b', 1, 2]) self.assertEqual(self.data[0][u'tasks'], [ {u'name': u'Test case', u'ping': {u'data': u'{{ utf8_string }}'}}, {u'name': u'Test 2', u'ping': {u'data': u'Cafè Eñyei'}}, {u'name': u'Test 3', u'command': u'printf \'Cafè Eñyei\n\''}, ]) def walk(self, data): # Make sure there's no str in the data self.assertNotIsInstance(data, binary_type) # Descend into various container types if isinstance(data, text_type): # strings are a sequence so we have to be explicit here return elif isinstance(data, (Sequence, Set)): for element in data: self.walk(element) elif isinstance(data, Mapping): for k, v in data.items(): self.walk(k) self.walk(v) # Scalars were all checked so we're good to go return def test_no_str_in_data(self): # Checks that no strings are str type self.walk(self.data) def check_vars(self): # Numbers don't have line/col information yet # self.assertEqual(self.data[0][u'vars'][u'number'].ansible_pos, (self.play_filename, 4, 21)) self.assertEqual(self.data[0][u'vars'][u'string'].ansible_pos, (self.play_filename, 5, 29)) self.assertEqual(self.data[0][u'vars'][u'utf8_string'].ansible_pos, (self.play_filename, 6, 34)) self.assertEqual(self.data[0][u'vars'][u'dictionary'].ansible_pos, (self.play_filename, 8, 23)) self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster'].ansible_pos, (self.play_filename, 8, 32)) self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed'].ansible_pos, (self.play_filename, 9, 28)) self.assertEqual(self.data[0][u'vars'][u'list'].ansible_pos, (self.play_filename, 11, 23)) self.assertEqual(self.data[0][u'vars'][u'list'][0].ansible_pos, (self.play_filename, 11, 25)) self.assertEqual(self.data[0][u'vars'][u'list'][1].ansible_pos, (self.play_filename, 12, 25)) # Numbers don't have line/col info yet # self.assertEqual(self.data[0][u'vars'][u'list'][2].ansible_pos, (self.play_filename, 13, 25)) # self.assertEqual(self.data[0][u'vars'][u'list'][3].ansible_pos, (self.play_filename, 14, 25)) def check_tasks(self): # # First Task # self.assertEqual(self.data[0][u'tasks'][0].ansible_pos, (self.play_filename, 16, 23)) self.assertEqual(self.data[0][u'tasks'][0][u'name'].ansible_pos, (self.play_filename, 16, 29)) self.assertEqual(self.data[0][u'tasks'][0][u'ping'].ansible_pos, (self.play_filename, 18, 25)) self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data'].ansible_pos, (self.play_filename, 18, 31)) # # Second Task # self.assertEqual(self.data[0][u'tasks'][1].ansible_pos, (self.play_filename, 20, 23)) self.assertEqual(self.data[0][u'tasks'][1][u'name'].ansible_pos, (self.play_filename, 20, 29)) self.assertEqual(self.data[0][u'tasks'][1][u'ping'].ansible_pos, (self.play_filename, 22, 25)) self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data'].ansible_pos, (self.play_filename, 22, 31)) # # Third Task # self.assertEqual(self.data[0][u'tasks'][2].ansible_pos, (self.play_filename, 24, 23)) self.assertEqual(self.data[0][u'tasks'][2][u'name'].ansible_pos, (self.play_filename, 24, 29)) self.assertEqual(self.data[0][u'tasks'][2][u'command'].ansible_pos, (self.play_filename, 25, 32)) def test_line_numbers(self): # Check the line/column numbers are correct # Note: Remember, currently dicts begin at the start of their first entry self.assertEqual(self.data[0].ansible_pos, (self.play_filename, 2, 19)) self.assertEqual(self.data[0][u'hosts'].ansible_pos, (self.play_filename, 2, 26)) self.assertEqual(self.data[0][u'vars'].ansible_pos, (self.play_filename, 4, 21)) self.check_vars() self.assertEqual(self.data[0][u'tasks'].ansible_pos, (self.play_filename, 16, 21)) self.check_tasks()
gpl-3.0
hbhdytf/mac2
build/lib.linux-x86_64-2.7/swift/common/middleware/account_quotas.py
39
5676
# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ ``account_quotas`` is a middleware which blocks write requests (PUT, POST) if a given account quota (in bytes) is exceeded while DELETE requests are still allowed. ``account_quotas`` uses the ``x-account-meta-quota-bytes`` metadata entry to store the quota. Write requests to this metadata entry are only permitted for resellers. There is no quota limit if ``x-account-meta-quota-bytes`` is not set. The ``account_quotas`` middleware should be added to the pipeline in your ``/etc/swift/proxy-server.conf`` file just after any auth middleware. For example:: [pipeline:main] pipeline = catch_errors cache tempauth account_quotas proxy-server [filter:account_quotas] use = egg:swift#account_quotas To set the quota on an account:: swift -A http://127.0.0.1:8080/auth/v1.0 -U account:reseller -K secret \ post -m quota-bytes:10000 Remove the quota:: swift -A http://127.0.0.1:8080/auth/v1.0 -U account:reseller -K secret \ post -m quota-bytes: The same limitations apply for the account quotas as for the container quotas. For example, when uploading an object without a content-length header the proxy server doesn't know the final size of the currently uploaded object and the upload will be allowed if the current account size is within the quota. Due to the eventual consistency further uploads might be possible until the account size has been updated. """ from swift.common.constraints import check_copy_from_header from swift.common.swob import HTTPForbidden, HTTPBadRequest, \ HTTPRequestEntityTooLarge, wsgify from swift.common.utils import register_swift_info from swift.proxy.controllers.base import get_account_info, get_object_info class AccountQuotaMiddleware(object): """Account quota middleware See above for a full description. """ def __init__(self, app, *args, **kwargs): self.app = app @wsgify def __call__(self, request): if request.method not in ("POST", "PUT", "COPY"): return self.app try: ver, account, container, obj = request.split_path( 2, 4, rest_with_last=True) except ValueError: return self.app if not container: # account request, so we pay attention to the quotas new_quota = request.headers.get( 'X-Account-Meta-Quota-Bytes') remove_quota = request.headers.get( 'X-Remove-Account-Meta-Quota-Bytes') else: # container or object request; even if the quota headers are set # in the request, they're meaningless new_quota = remove_quota = None if remove_quota: new_quota = 0 # X-Remove dominates if both are present if request.environ.get('reseller_request') is True: if new_quota and not new_quota.isdigit(): return HTTPBadRequest() return self.app # deny quota set for non-reseller if new_quota is not None: return HTTPForbidden() if request.method == "POST" or not obj: return self.app if request.method == 'COPY': copy_from = container + '/' + obj else: if 'x-copy-from' in request.headers: src_cont, src_obj = check_copy_from_header(request) copy_from = "%s/%s" % (src_cont, src_obj) else: copy_from = None content_length = (request.content_length or 0) account_info = get_account_info(request.environ, self.app) if not account_info or not account_info['bytes']: return self.app try: quota = int(account_info['meta'].get('quota-bytes', -1)) except ValueError: return self.app if quota < 0: return self.app if copy_from: path = '/' + ver + '/' + account + '/' + copy_from object_info = get_object_info(request.environ, self.app, path) if not object_info or not object_info['length']: content_length = 0 else: content_length = int(object_info['length']) new_size = int(account_info['bytes']) + content_length if quota < new_size: resp = HTTPRequestEntityTooLarge(body='Upload exceeds quota.') if 'swift.authorize' in request.environ: orig_authorize = request.environ['swift.authorize'] def reject_authorize(*args, **kwargs): aresp = orig_authorize(*args, **kwargs) if aresp: return aresp return resp request.environ['swift.authorize'] = reject_authorize else: return resp return self.app def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" register_swift_info('account_quotas') def account_quota_filter(app): return AccountQuotaMiddleware(app) return account_quota_filter
apache-2.0
druuu/django
django/contrib/flatpages/templatetags/flatpages.py
472
3632
from django import template from django.conf import settings from django.contrib.flatpages.models import FlatPage from django.contrib.sites.shortcuts import get_current_site register = template.Library() class FlatpageNode(template.Node): def __init__(self, context_name, starts_with=None, user=None): self.context_name = context_name if starts_with: self.starts_with = template.Variable(starts_with) else: self.starts_with = None if user: self.user = template.Variable(user) else: self.user = None def render(self, context): if 'request' in context: site_pk = get_current_site(context['request']).pk else: site_pk = settings.SITE_ID flatpages = FlatPage.objects.filter(sites__id=site_pk) # If a prefix was specified, add a filter if self.starts_with: flatpages = flatpages.filter( url__startswith=self.starts_with.resolve(context)) # If the provided user is not authenticated, or no user # was provided, filter the list to only public flatpages. if self.user: user = self.user.resolve(context) if not user.is_authenticated(): flatpages = flatpages.filter(registration_required=False) else: flatpages = flatpages.filter(registration_required=False) context[self.context_name] = flatpages return '' @register.tag def get_flatpages(parser, token): """ Retrieves all flatpage objects available for the current site and visible to the specific user (or visible to all users if no user is specified). Populates the template context with them in a variable whose name is defined by the ``as`` clause. An optional ``for`` clause can be used to control the user whose permissions are to be used in determining which flatpages are visible. An optional argument, ``starts_with``, can be applied to limit the returned flatpages to those beginning with a particular base URL. This argument can be passed as a variable or a string, as it resolves from the template context. Syntax:: {% get_flatpages ['url_starts_with'] [for user] as context_name %} Example usage:: {% get_flatpages as flatpages %} {% get_flatpages for someuser as flatpages %} {% get_flatpages '/about/' as about_pages %} {% get_flatpages prefix as about_pages %} {% get_flatpages '/about/' for someuser as about_pages %} """ bits = token.split_contents() syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s " "['url_starts_with'] [for user] as context_name" % dict(tag_name=bits[0])) # Must have at 3-6 bits in the tag if len(bits) >= 3 and len(bits) <= 6: # If there's an even number of bits, there's no prefix if len(bits) % 2 == 0: prefix = bits[1] else: prefix = None # The very last bit must be the context name if bits[-2] != 'as': raise template.TemplateSyntaxError(syntax_message) context_name = bits[-1] # If there are 5 or 6 bits, there is a user defined if len(bits) >= 5: if bits[-4] != 'for': raise template.TemplateSyntaxError(syntax_message) user = bits[-3] else: user = None return FlatpageNode(context_name, starts_with=prefix, user=user) else: raise template.TemplateSyntaxError(syntax_message)
bsd-3-clause
oaklen/Shelf
node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py
1558
4945
# Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import re import os def XmlToString(content, encoding='utf-8', pretty=False): """ Writes the XML content to disk, touching the file only if it has changed. Visual Studio files have a lot of pre-defined structures. This function makes it easy to represent these structures as Python data structures, instead of having to create a lot of function calls. Each XML element of the content is represented as a list composed of: 1. The name of the element, a string, 2. The attributes of the element, a dictionary (optional), and 3+. The content of the element, if any. Strings are simple text nodes and lists are child elements. Example 1: <test/> becomes ['test'] Example 2: <myelement a='value1' b='value2'> <childtype>This is</childtype> <childtype>it!</childtype> </myelement> becomes ['myelement', {'a':'value1', 'b':'value2'}, ['childtype', 'This is'], ['childtype', 'it!'], ] Args: content: The structured content to be converted. encoding: The encoding to report on the first XML line. pretty: True if we want pretty printing with indents and new lines. Returns: The XML content as a string. """ # We create a huge list of all the elements of the file. xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding] if pretty: xml_parts.append('\n') _ConstructContentList(xml_parts, content, pretty) # Convert it to a string return ''.join(xml_parts) def _ConstructContentList(xml_parts, specification, pretty, level=0): """ Appends the XML parts corresponding to the specification. Args: xml_parts: A list of XML parts to be appended to. specification: The specification of the element. See EasyXml docs. pretty: True if we want pretty printing with indents and new lines. level: Indentation level. """ # The first item in a specification is the name of the element. if pretty: indentation = ' ' * level new_line = '\n' else: indentation = '' new_line = '' name = specification[0] if not isinstance(name, str): raise Exception('The first item of an EasyXml specification should be ' 'a string. Specification was ' + str(specification)) xml_parts.append(indentation + '<' + name) # Optionally in second position is a dictionary of the attributes. rest = specification[1:] if rest and isinstance(rest[0], dict): for at, val in sorted(rest[0].iteritems()): xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True))) rest = rest[1:] if rest: xml_parts.append('>') all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True) multi_line = not all_strings if multi_line and new_line: xml_parts.append(new_line) for child_spec in rest: # If it's a string, append a text node. # Otherwise recurse over that child definition if isinstance(child_spec, str): xml_parts.append(_XmlEscape(child_spec)) else: _ConstructContentList(xml_parts, child_spec, pretty, level + 1) if multi_line and indentation: xml_parts.append(indentation) xml_parts.append('</%s>%s' % (name, new_line)) else: xml_parts.append('/>%s' % new_line) def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False, win32=False): """ Writes the XML content to disk, touching the file only if it has changed. Args: content: The structured content to be written. path: Location of the file. encoding: The encoding to report on the first line of the XML file. pretty: True if we want pretty printing with indents and new lines. """ xml_string = XmlToString(content, encoding, pretty) if win32 and os.linesep != '\r\n': xml_string = xml_string.replace('\n', '\r\n') try: xml_string = xml_string.encode(encoding) except Exception: xml_string = unicode(xml_string, 'latin-1').encode(encoding) # Get the old content try: f = open(path, 'r') existing = f.read() f.close() except: existing = None # It has changed, write it if existing != xml_string: f = open(path, 'w') f.write(xml_string) f.close() _xml_escape_map = { '"': '&quot;', "'": '&apos;', '<': '&lt;', '>': '&gt;', '&': '&amp;', '\n': '&#xA;', '\r': '&#xD;', } _xml_escape_re = re.compile( "(%s)" % "|".join(map(re.escape, _xml_escape_map.keys()))) def _XmlEscape(value, attr=False): """ Escape a string for inclusion in XML.""" def replace(match): m = match.string[match.start() : match.end()] # don't replace single quotes in attrs if attr and m == "'": return m return _xml_escape_map[m] return _xml_escape_re.sub(replace, value)
mit
macosforge/ccs-calendarserver
txdav/caldav/datastore/scheduling/ischedule/remoteservers.py
1
6936
## # Copyright (c) 2006-2017 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## from twext.python.filepath import CachingFilePath as FilePath from twext.python.log import Logger from twistedcaldav.config import config, fullServerPath from twistedcaldav import xmlutil """ XML based iSchedule configuration file handling. This is for handling of remote servers. The localservers.py module handles servers that are local (podded). """ __all__ = [ "IScheduleServers", ] log = Logger() class IScheduleServers(object): _fileInfo = None _xmlFile = None _servers = None _domainMap = None def __init__(self): if IScheduleServers._servers is None: self._loadConfig() def _loadConfig(self): if config.Scheduling.iSchedule.RemoteServers: if IScheduleServers._servers is None: IScheduleServers._xmlFile = FilePath( fullServerPath( config.ConfigRoot, config.Scheduling.iSchedule.RemoteServers, ) ) if IScheduleServers._xmlFile.exists(): IScheduleServers._xmlFile.restat() fileInfo = (IScheduleServers._xmlFile.getmtime(), IScheduleServers._xmlFile.getsize()) if fileInfo != IScheduleServers._fileInfo: parser = IScheduleServersParser(IScheduleServers._xmlFile) IScheduleServers._servers = parser.servers self._mapDomains() IScheduleServers._fileInfo = fileInfo else: IScheduleServers._servers = () IScheduleServers._domainMap = {} else: IScheduleServers._servers = () IScheduleServers._domainMap = {} def _mapDomains(self): IScheduleServers._domainMap = {} for server in IScheduleServers._servers: for domain in server.domains: IScheduleServers._domainMap[domain] = server def mapDomain(self, domain): """ Map a calendar user address domain to a suitable server that can handle server-to-server requests for that user. """ return IScheduleServers._domainMap.get(domain) ELEMENT_SERVERS = "servers" ELEMENT_SERVER = "server" ELEMENT_URI = "uri" ELEMENT_AUTHENTICATION = "authentication" ATTRIBUTE_TYPE = "type" ATTRIBUTE_BASICAUTH = "basic" ELEMENT_USER = "user" ELEMENT_PASSWORD = "password" ELEMENT_ALLOW_REQUESTS_FROM = "allow-requests-from" ELEMENT_ALLOW_REQUESTS_TO = "allow-requests-to" ELEMENT_DOMAINS = "domains" ELEMENT_DOMAIN = "domain" ELEMENT_CLIENT_HOSTS = "hosts" ELEMENT_HOST = "host" class IScheduleServersParser(object): """ Server-to-server configuration file parser. """ def __repr__(self): return "<{} {}>".format(self.__class__.__name__, self.xmlFile) def __init__(self, xmlFile): self.servers = [] # Read in XML _ignore_etree, servers_node = xmlutil.readXML(xmlFile.path, ELEMENT_SERVERS) self._parseXML(servers_node) def _parseXML(self, node): """ Parse the XML root node from the server-to-server configuration document. @param node: the L{Node} to parse. """ for child in node: if child.tag == ELEMENT_SERVER: self.servers.append(IScheduleServerRecord()) self.servers[-1].parseXML(child) class IScheduleServerRecord (object): """ Contains server-to-server details. """ def __init__(self, uri=None, rewriteCUAddresses=True, moreHeaders=[], podding=False): """ @param recordType: record type for directory entry. """ self.uri = "" self.authentication = None self.allow_from = False self.allow_to = True self.domains = [] self.client_hosts = [] self.rewriteCUAddresses = rewriteCUAddresses self.moreHeaders = moreHeaders self._podding = podding if uri: self.uri = uri self._parseDetails() def details(self): return (self.ssl, self.host, self.port, self.path,) def podding(self): return self._podding def redirect(self, location): """ Permanent redirect for the lifetime of this record. """ self.uri = location self._parseDetails() def parseXML(self, node): for child in node: if child.tag == ELEMENT_URI: self.uri = child.text elif child.tag == ELEMENT_AUTHENTICATION: self._parseAuthentication(child) elif child.tag == ELEMENT_ALLOW_REQUESTS_FROM: self.allow_from = True elif child.tag == ELEMENT_ALLOW_REQUESTS_TO: self.allow_to = True elif child.tag == ELEMENT_DOMAINS: self._parseList(child, ELEMENT_DOMAIN, self.domains) elif child.tag == ELEMENT_CLIENT_HOSTS: self._parseList(child, ELEMENT_HOST, self.client_hosts) else: raise RuntimeError("[{}] Unknown attribute: {}".format(self.__class__, child.tag,)) self._parseDetails() def _parseList(self, node, element_name, appendto): for child in node: if child.tag == element_name: appendto.append(child.text) def _parseAuthentication(self, node): if node.get(ATTRIBUTE_TYPE) != ATTRIBUTE_BASICAUTH: return for child in node: if child.tag == ELEMENT_USER: user = child.text elif child.tag == ELEMENT_PASSWORD: password = child.text self.authentication = ("basic", user, password,) def _parseDetails(self): # Extract scheme, host, port and path if self.uri.startswith("http://"): self.ssl = False rest = self.uri[7:] elif self.uri.startswith("https://"): self.ssl = True rest = self.uri[8:] splits = rest.split("/", 1) hostport = splits[0].split(":") self.host = hostport[0] if len(hostport) > 1: self.port = int(hostport[1]) else: self.port = {False: 80, True: 443}[self.ssl] self.path = "/" if len(splits) > 1: self.path += splits[1]
apache-2.0
rocky/python3-trepan
test/unit/test-cmdfns.py
1
2471
#!/usr/bin/env python3 'Unit test for trepan.processor.command.cmdfns' import unittest from trepan.processor import cmdfns as Mcmdfns class TestCommandHelper(unittest.TestCase): def setUp(self): self.errors = [] return def errmsg(self, msg): self.errors.append(msg) return def test_get_an_int(self): self.assertEqual(0, Mcmdfns.get_an_int(self.errmsg, '0', 'foo', 0)) self.assertEqual(0, len(self.errors)) self.assertEqual(6, Mcmdfns.get_an_int(self.errmsg, '6*1', 'foo', 5)) self.assertEqual(0, len(self.errors)) self.assertEqual(None, Mcmdfns.get_an_int(self.errmsg, '0', '0 is too small', 5)) self.assertEqual(1, len(self.errors)) self.assertEqual(None, Mcmdfns.get_an_int(self.errmsg, '4+a', '4+a is invalid', 5)) self.assertEqual('4+a is invalid', self.errors[-1]) return def test_get_int(self): self.assertEqual(1, Mcmdfns.get_int(self.errmsg, '1', 5)) self.assertEqual(3, Mcmdfns.get_int(self.errmsg, '1+2', 5)) self.assertEqual(5, Mcmdfns.get_int(self.errmsg, None, 5)) self.assertEqual(1, Mcmdfns.get_int(self.errmsg, None)) self.assertRaises(ValueError, Mcmdfns.get_int, *(self.errmsg, 'Foo', 5)) return def test_get_onoff(self): for arg in ('1', 'on'): self.assertEqual(True, Mcmdfns.get_onoff(self.errmsg, arg)) pass for arg in ('0', 'off'): self.assertEqual(False, Mcmdfns.get_onoff(self.errmsg, arg)) pass for result in (True, False): self.assertEqual(result, Mcmdfns.get_onoff(self.errmsg, None, result)) pass self.assertRaises(ValueError, Mcmdfns.get_onoff, *(self.errmsg, 'Foo')) return def test_want_different_line(self): for cmd, default, expected in [ ('s+', False, True), ('s-', True, False), ('s', False, False), ('n', True, True) ]: self.assertEqual(expected, Mcmdfns.want_different_line(cmd, default), cmd) pass return pass if __name__ == '__main__': unittest.main()
gpl-3.0
BrandonY/python-docs-samples
appengine/standard/multitenancy/datastore_test.py
9
1124
# Copyright 2015 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import webtest import datastore def test_datastore(testbed): app = webtest.TestApp(datastore.app) response = app.get('/datastore') assert response.status_int == 200 assert 'Global: 1' in response.body response = app.get('/datastore/a') assert response.status_int == 200 assert 'Global: 2' in response.body assert 'a: 1' in response.body response = app.get('/datastore/b') assert response.status_int == 200 assert 'Global: 3' in response.body assert 'b: 1' in response.body
apache-2.0
darmaa/odoo
addons/edi/models/edi.py
44
31991
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import base64 import hashlib import simplejson as json import logging import re import time import urllib2 import openerp import openerp.release as release from openerp.osv import osv, fields from openerp.tools.translate import _ from openerp.tools.safe_eval import safe_eval as eval _logger = logging.getLogger(__name__) EXTERNAL_ID_PATTERN = re.compile(r'^([^.:]+)(?::([^.]+))?\.(\S+)$') EDI_VIEW_WEB_URL = '%s/edi/view?db=%s&token=%s' EDI_PROTOCOL_VERSION = 1 # arbitrary ever-increasing version number EDI_GENERATOR = 'OpenERP ' + release.major_version EDI_GENERATOR_VERSION = release.version_info def split_external_id(ext_id): match = EXTERNAL_ID_PATTERN.match(ext_id) assert match, \ _("'%s' is an invalid external ID") % (ext_id) return {'module': match.group(1), 'db_uuid': match.group(2), 'id': match.group(3), 'full': match.group(0)} def safe_unique_id(database_id, model, record_id): """Generate a unique string to represent a (database_uuid,model,record_id) pair without being too long, and with a very low probability of collisions. """ msg = "%s-%s-%s-%s" % (time.time(), database_id, model, record_id) digest = hashlib.sha1(msg).digest() # fold the sha1 20 bytes digest to 9 bytes digest = ''.join(chr(ord(x) ^ ord(y)) for (x,y) in zip(digest[:9], digest[9:-2])) # b64-encode the 9-bytes folded digest to a reasonable 12 chars ASCII ID digest = base64.urlsafe_b64encode(digest) return '%s-%s' % (model.replace('.','_'), digest) def last_update_for(record): """Returns the last update timestamp for the given record, if available, otherwise False """ if record._model._log_access: record_log = record.perm_read()[0] return record_log.get('write_date') or record_log.get('create_date') or False return False class edi(osv.AbstractModel): _name = 'edi.edi' _description = 'EDI Subsystem' def new_edi_token(self, cr, uid, record): """Return a new, random unique token to identify this model record, and to be used as token when exporting it as an EDI document. :param browse_record record: model record for which a token is needed """ db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid') edi_token = hashlib.sha256('%s-%s-%s-%s' % (time.time(), db_uuid, record._name, record.id)).hexdigest() return edi_token def serialize(self, edi_documents): """Serialize the given EDI document structures (Python dicts holding EDI data), using JSON serialization. :param [dict] edi_documents: list of EDI document structures to serialize :return: UTF-8 encoded string containing the serialized document """ serialized_list = json.dumps(edi_documents) return serialized_list def generate_edi(self, cr, uid, records, context=None): """Generates a final EDI document containing the EDI serialization of the given records, which should all be instances of a Model that has the :meth:`~.edi` mixin. The document is not saved in the database. :param list(browse_record) records: records to export as EDI :return: UTF-8 encoded string containing the serialized records """ edi_list = [] for record in records: record_model = record._model edi_list += record_model.edi_export(cr, uid, [record], context=context) return self.serialize(edi_list) def load_edi(self, cr, uid, edi_documents, context=None): """Import the given EDI document structures into the system, using :meth:`~.import_edi`. :param edi_documents: list of Python dicts containing the deserialized version of EDI documents :return: list of (model, id, action) tuple containing the model and database ID of all records that were imported in the system, plus a suggested action definition dict for displaying each document. """ ir_module = self.pool.get('ir.module.module') res = [] for edi_document in edi_documents: module = edi_document.get('__import_module') or edi_document.get('__module') assert module, 'a `__module` or `__import_module` attribute is required in each EDI document.' if module != 'base' and not ir_module.search(cr, uid, [('name','=',module),('state','=','installed')]): raise osv.except_osv(_('Missing Application.'), _("The document you are trying to import requires the OpenERP `%s` application. " "You can install it by connecting as the administrator and opening the configuration assistant.")%(module,)) model = edi_document.get('__import_model') or edi_document.get('__model') assert model, 'a `__model` or `__import_model` attribute is required in each EDI document.' assert model in self.pool, 'model `%s` cannot be found, despite module `%s` being available - '\ 'this EDI document seems invalid or unsupported.' % (model,module) model_obj = self.pool[model] record_id = model_obj.edi_import(cr, uid, edi_document, context=context) record_action = model_obj._edi_record_display_action(cr, uid, record_id, context=context) res.append((model, record_id, record_action)) return res def deserialize(self, edi_documents_string): """Return deserialized version of the given EDI Document string. :param str|unicode edi_documents_string: UTF-8 string (or unicode) containing JSON-serialized EDI document(s) :return: Python object representing the EDI document(s) (usually a list of dicts) """ return json.loads(edi_documents_string) def import_edi(self, cr, uid, edi_document=None, edi_url=None, context=None): """Import a JSON serialized EDI Document string into the system, first retrieving it from the given ``edi_url`` if provided. :param str|unicode edi: UTF-8 string or unicode containing JSON-serialized EDI Document to import. Must not be provided if ``edi_url`` is given. :param str|unicode edi_url: URL where the EDI document (same format as ``edi``) may be retrieved, without authentication. """ if edi_url: assert not edi_document, 'edi must not be provided if edi_url is given.' edi_document = urllib2.urlopen(edi_url).read() assert edi_document, 'EDI Document is empty!' edi_documents = self.deserialize(edi_document) return self.load_edi(cr, uid, edi_documents, context=context) class EDIMixin(object): """Mixin class for Model objects that want be exposed as EDI documents. Classes that inherit from this mixin class should override the ``edi_import()`` and ``edi_export()`` methods to implement their specific behavior, based on the primitives provided by this mixin.""" def _edi_requires_attributes(self, attributes, edi): model_name = edi.get('__imported_model') or edi.get('__model') or self._name for attribute in attributes: assert edi.get(attribute),\ 'Attribute `%s` is required in %s EDI documents.' % (attribute, model_name) # private method, not RPC-exposed as it creates ir.model.data entries as # SUPERUSER based on its parameters def _edi_external_id(self, cr, uid, record, existing_id=None, existing_module=None, context=None): """Generate/Retrieve unique external ID for ``record``. Each EDI record and each relationship attribute in it is identified by a unique external ID, which includes the database's UUID, as a way to refer to any record within any OpenERP instance, without conflict. For OpenERP records that have an existing "External ID" (i.e. an entry in ir.model.data), the EDI unique identifier for this record will be made of "%s:%s:%s" % (module, database UUID, ir.model.data ID). The database's UUID MUST NOT contain a colon characters (this is guaranteed by the UUID algorithm). For records that have no existing ir.model.data entry, a new one will be created during the EDI export. It is recommended that the generated external ID contains a readable reference to the record model, plus a unique value that hides the database ID. If ``existing_id`` is provided (because it came from an import), it will be used instead of generating a new one. If ``existing_module`` is provided (because it came from an import), it will be used instead of using local values. :param browse_record record: any browse_record needing an EDI external ID :param string existing_id: optional existing external ID value, usually coming from a just-imported EDI record, to be used instead of generating a new one :param string existing_module: optional existing module name, usually in the format ``module:db_uuid`` and coming from a just-imported EDI record, to be used instead of local values :return: the full unique External ID to use for record """ ir_model_data = self.pool.get('ir.model.data') db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid') ext_id = record.get_external_id()[record.id] if not ext_id: ext_id = existing_id or safe_unique_id(db_uuid, record._name, record.id) # ID is unique cross-db thanks to db_uuid (already included in existing_module) module = existing_module or "%s:%s" % (record._original_module, db_uuid) _logger.debug("%s: Generating new external ID `%s.%s` for %r.", self._name, module, ext_id, record) ir_model_data.create(cr, openerp.SUPERUSER_ID, {'name': ext_id, 'model': record._name, 'module': module, 'res_id': record.id}) else: module, ext_id = ext_id.split('.') if not ':' in module: # this record was not previously EDI-imported if not module == record._original_module: # this could happen for data records defined in a module that depends # on the module that owns the model, e.g. purchase defines # product.pricelist records. _logger.debug('Mismatching module: expected %s, got %s, for %s.', module, record._original_module, record) # ID is unique cross-db thanks to db_uuid module = "%s:%s" % (module, db_uuid) return '%s.%s' % (module, ext_id) def _edi_record_display_action(self, cr, uid, id, context=None): """Returns an appropriate action definition dict for displaying the record with ID ``rec_id``. :param int id: database ID of record to display :return: action definition dict """ return {'type': 'ir.actions.act_window', 'view_mode': 'form,tree', 'view_type': 'form', 'res_model': self._name, 'res_id': id} def edi_metadata(self, cr, uid, records, context=None): """Return a list containing the boilerplate EDI structures for exporting ``records`` as EDI, including the metadata fields The metadata fields always include:: { '__model': 'some.model', # record model '__module': 'module', # require module '__id': 'module:db-uuid:model.id', # unique global external ID for the record '__last_update': '2011-01-01 10:00:00', # last update date in UTC! '__version': 1, # EDI spec version '__generator' : 'OpenERP', # EDI generator '__generator_version' : [6,1,0], # server version, to check compatibility. '__attachments_': } :param list(browse_record) records: records to export :return: list of dicts containing boilerplate EDI metadata for each record, at the corresponding index from ``records``. """ ir_attachment = self.pool.get('ir.attachment') results = [] for record in records: ext_id = self._edi_external_id(cr, uid, record, context=context) edi_dict = { '__id': ext_id, '__last_update': last_update_for(record), '__model' : record._name, '__module' : record._original_module, '__version': EDI_PROTOCOL_VERSION, '__generator': EDI_GENERATOR, '__generator_version': EDI_GENERATOR_VERSION, } attachment_ids = ir_attachment.search(cr, uid, [('res_model','=', record._name), ('res_id', '=', record.id)]) if attachment_ids: attachments = [] for attachment in ir_attachment.browse(cr, uid, attachment_ids, context=context): attachments.append({ 'name' : attachment.name, 'content': attachment.datas, # already base64 encoded! 'file_name': attachment.datas_fname, }) edi_dict.update(__attachments=attachments) results.append(edi_dict) return results def edi_m2o(self, cr, uid, record, context=None): """Return a m2o EDI representation for the given record. The EDI format for a many2one is:: ['unique_external_id', 'Document Name'] """ edi_ext_id = self._edi_external_id(cr, uid, record, context=context) relation_model = record._model name = relation_model.name_get(cr, uid, [record.id], context=context) name = name and name[0][1] or False return [edi_ext_id, name] def edi_o2m(self, cr, uid, records, edi_struct=None, context=None): """Return a list representing a O2M EDI relationship containing all the given records, according to the given ``edi_struct``. This is basically the same as exporting all the record using :meth:`~.edi_export` with the given ``edi_struct``, and wrapping the results in a list. Example:: [ # O2M fields would be a list of dicts, with their { '__id': 'module:db-uuid.id', # own __id. '__last_update': 'iso date', # update date 'name': 'some name', #... }, # ... ], """ result = [] for record in records: result += record._model.edi_export(cr, uid, [record], edi_struct=edi_struct, context=context) return result def edi_m2m(self, cr, uid, records, context=None): """Return a list representing a M2M EDI relationship directed towards all the given records. This is basically the same as exporting all the record using :meth:`~.edi_m2o` and wrapping the results in a list. Example:: # M2M fields are exported as a list of pairs, like a list of M2O values [ ['module:db-uuid.id1', 'Task 01: bla bla'], ['module:db-uuid.id2', 'Task 02: bla bla'] ] """ return [self.edi_m2o(cr, uid, r, context=context) for r in records] def edi_export(self, cr, uid, records, edi_struct=None, context=None): """Returns a list of dicts representing EDI documents containing the records, and matching the given ``edi_struct``, if provided. :param edi_struct: if provided, edi_struct should be a dictionary with a skeleton of the fields to export. Basic fields can have any key as value, but o2m values should have a sample skeleton dict as value, to act like a recursive export. For example, for a res.partner record:: edi_struct: { 'name': True, 'company_id': True, 'address': { 'name': True, 'street': True, } } Any field not specified in the edi_struct will not be included in the exported data. Fields with no value (False) will be omitted in the EDI struct. If edi_struct is omitted, no fields will be exported """ if edi_struct is None: edi_struct = {} fields_to_export = edi_struct.keys() results = [] for record in records: edi_dict = self.edi_metadata(cr, uid, [record], context=context)[0] for field in fields_to_export: column = self._all_columns[field].column value = getattr(record, field) if not value and value not in ('', 0): continue elif column._type == 'many2one': value = self.edi_m2o(cr, uid, value, context=context) elif column._type == 'many2many': value = self.edi_m2m(cr, uid, value, context=context) elif column._type == 'one2many': value = self.edi_o2m(cr, uid, value, edi_struct=edi_struct.get(field, {}), context=context) edi_dict[field] = value results.append(edi_dict) return results def _edi_get_object_by_name(self, cr, uid, name, model_name, context=None): model = self.pool[model_name] search_results = model.name_search(cr, uid, name, operator='=', context=context) if len(search_results) == 1: return model.browse(cr, uid, search_results[0][0], context=context) return False def _edi_generate_report_attachment(self, cr, uid, record, context=None): """Utility method to generate the first PDF-type report declared for the current model with ``usage`` attribute set to ``default``. This must be called explicitly by models that need it, usually at the beginning of ``edi_export``, before the call to ``super()``.""" ir_actions_report = self.pool.get('ir.actions.report.xml') matching_reports = ir_actions_report.search(cr, uid, [('model','=',self._name), ('report_type','=','pdf'), ('usage','=','default')]) if matching_reports: report = ir_actions_report.browse(cr, uid, matching_reports[0]) result, format = openerp.report.render_report(cr, uid, [record.id], report.report_name, {'model': self._name}, context=context) eval_context = {'time': time, 'object': record} if not report.attachment or not eval(report.attachment, eval_context): # no auto-saving of report as attachment, need to do it manually result = base64.b64encode(result) file_name = record.name_get()[0][1] file_name = re.sub(r'[^a-zA-Z0-9_-]', '_', file_name) file_name += ".pdf" self.pool.get('ir.attachment').create(cr, uid, { 'name': file_name, 'datas': result, 'datas_fname': file_name, 'res_model': self._name, 'res_id': record.id, 'type': 'binary' }, context=context) def _edi_import_attachments(self, cr, uid, record_id, edi, context=None): ir_attachment = self.pool.get('ir.attachment') for attachment in edi.get('__attachments', []): # check attachment data is non-empty and valid file_data = None try: file_data = base64.b64decode(attachment.get('content')) except TypeError: pass assert file_data, 'Incorrect/Missing attachment file content.' assert attachment.get('name'), 'Incorrect/Missing attachment name.' assert attachment.get('file_name'), 'Incorrect/Missing attachment file name.' assert attachment.get('file_name'), 'Incorrect/Missing attachment file name.' ir_attachment.create(cr, uid, {'name': attachment['name'], 'datas_fname': attachment['file_name'], 'res_model': self._name, 'res_id': record_id, # should be pure 7bit ASCII 'datas': str(attachment['content']), }, context=context) def _edi_get_object_by_external_id(self, cr, uid, external_id, model, context=None): """Returns browse_record representing object identified by the model and external_id, or None if no record was found with this external id. :param external_id: fully qualified external id, in the EDI form ``module:db_uuid:identifier``. :param model: model name the record belongs to. """ ir_model_data = self.pool.get('ir.model.data') # external_id is expected to have the form: ``module:db_uuid:model.random_name`` ext_id_members = split_external_id(external_id) db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid') module = ext_id_members['module'] ext_id = ext_id_members['id'] modules = [] ext_db_uuid = ext_id_members['db_uuid'] if ext_db_uuid: modules.append('%s:%s' % (module, ext_id_members['db_uuid'])) if ext_db_uuid is None or ext_db_uuid == db_uuid: # local records may also be registered without the db_uuid modules.append(module) data_ids = ir_model_data.search(cr, uid, [('model','=',model), ('name','=',ext_id), ('module','in',modules)]) if data_ids: model = self.pool[model] data = ir_model_data.browse(cr, uid, data_ids[0], context=context) if model.exists(cr, uid, [data.res_id]): return model.browse(cr, uid, data.res_id, context=context) # stale external-id, cleanup to allow re-import, as the corresponding record is gone ir_model_data.unlink(cr, 1, [data_ids[0]]) def edi_import_relation(self, cr, uid, model, value, external_id, context=None): """Imports a M2O/M2M relation EDI specification ``[external_id,value]`` for the given model, returning the corresponding database ID: * First, checks if the ``external_id`` is already known, in which case the corresponding database ID is directly returned, without doing anything else; * If the ``external_id`` is unknown, attempts to locate an existing record with the same ``value`` via name_search(). If found, the given external_id will be assigned to this local record (in addition to any existing one) * If previous steps gave no result, create a new record with the given value in the target model, assign it the given external_id, and return the new database ID :param str value: display name of the record to import :param str external_id: fully-qualified external ID of the record :return: database id of newly-imported or pre-existing record """ _logger.debug("%s: Importing EDI relationship [%r,%r]", model, external_id, value) target = self._edi_get_object_by_external_id(cr, uid, external_id, model, context=context) need_new_ext_id = False if not target: _logger.debug("%s: Importing EDI relationship [%r,%r] - ID not found, trying name_get.", self._name, external_id, value) target = self._edi_get_object_by_name(cr, uid, value, model, context=context) need_new_ext_id = True if not target: _logger.debug("%s: Importing EDI relationship [%r,%r] - name not found, creating it.", self._name, external_id, value) # also need_new_ext_id here, but already been set above model = self.pool[model] res_id, _ = model.name_create(cr, uid, value, context=context) target = model.browse(cr, uid, res_id, context=context) else: _logger.debug("%s: Importing EDI relationship [%r,%r] - record already exists with ID %s, using it", self._name, external_id, value, target.id) if need_new_ext_id: ext_id_members = split_external_id(external_id) # module name is never used bare when creating ir.model.data entries, in order # to avoid being taken as part of the module's data, and cleanup up at next update module = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid']) # create a new ir.model.data entry for this value self._edi_external_id(cr, uid, target, existing_id=ext_id_members['id'], existing_module=module, context=context) return target.id def edi_import(self, cr, uid, edi, context=None): """Imports a dict representing an EDI document into the system. :param dict edi: EDI document to import :return: the database ID of the imported record """ assert self._name == edi.get('__import_model') or \ ('__import_model' not in edi and self._name == edi.get('__model')), \ "EDI Document Model and current model do not match: '%s' (EDI) vs '%s' (current)." % \ (edi.get('__model'), self._name) # First check the record is now already known in the database, in which case it is ignored ext_id_members = split_external_id(edi['__id']) existing = self._edi_get_object_by_external_id(cr, uid, ext_id_members['full'], self._name, context=context) if existing: _logger.info("'%s' EDI Document with ID '%s' is already known, skipping import!", self._name, ext_id_members['full']) return existing.id record_values = {} o2m_todo = {} # o2m values are processed after their parent already exists for field_name, field_value in edi.iteritems(): # skip metadata and empty fields if field_name.startswith('__') or field_value is None or field_value is False: continue field_info = self._all_columns.get(field_name) if not field_info: _logger.warning('Ignoring unknown field `%s` when importing `%s` EDI document.', field_name, self._name) continue field = field_info.column # skip function/related fields if isinstance(field, fields.function): _logger.warning("Unexpected function field value is found in '%s' EDI document: '%s'." % (self._name, field_name)) continue relation_model = field._obj if field._type == 'many2one': record_values[field_name] = self.edi_import_relation(cr, uid, relation_model, field_value[1], field_value[0], context=context) elif field._type == 'many2many': record_values[field_name] = [self.edi_import_relation(cr, uid, relation_model, m2m_value[1], m2m_value[0], context=context) for m2m_value in field_value] elif field._type == 'one2many': # must wait until parent report is imported, as the parent relationship # is often required in o2m child records o2m_todo[field_name] = field_value else: record_values[field_name] = field_value module_ref = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid']) record_id = self.pool.get('ir.model.data')._update(cr, uid, self._name, module_ref, record_values, xml_id=ext_id_members['id'], context=context) record_display, = self.name_get(cr, uid, [record_id], context=context) # process o2m values, connecting them to their parent on-the-fly for o2m_field, o2m_value in o2m_todo.iteritems(): field = self._all_columns[o2m_field].column dest_model = self.pool[field._obj] for o2m_line in o2m_value: # link to parent record: expects an (ext_id, name) pair o2m_line[field._fields_id] = (ext_id_members['full'], record_display[1]) dest_model.edi_import(cr, uid, o2m_line, context=context) # process the attachments, if any self._edi_import_attachments(cr, uid, record_id, edi, context=context) return record_id # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
matthiasrichter/AliceO2
Analysis/Scripts/update_ccdb.py
3
6042
#!/usr/bin/env python3 # Copyright 2019-2020 CERN and copyright holders of ALICE O2. # See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. # All rights not expressly granted are reserved. # # This software is distributed under the terms of the GNU General Public # License v3 (GPL Version 3), copied verbatim in the file "COPYING". # # In applying this license CERN does not waive the privileges and immunities # granted to it by virtue of its status as an Intergovernmental Organization # or submit itself to any jurisdiction. """ Script to update the CCDB with timestamp non-overlapping objects. If an object is found in the range specified, the object is split into two. If the requested range was overlapping three objects are uploaded on CCDB: 1) latest object with requested timestamp validity 2) old object with validity [old_lower_validity-requested_lower_bound] 3) old object with validity [requested_upper_bound, old_upper_validity] Author: Nicolo' Jacazio on 2020-06-22 TODO add support for 3 files update """ import subprocess from datetime import datetime import matplotlib.pyplot as plt import argparse def convert_timestamp(ts): """ Converts the timestamp in milliseconds in human readable format """ return datetime.utcfromtimestamp(ts/1000).strftime('%Y-%m-%d %H:%M:%S') def get_ccdb_obj(path, timestamp, dest="/tmp/", verbose=0): """ Gets the ccdb object from 'path' and 'timestamp' and downloads it into 'dest' """ if verbose: print("Getting obj", path, "with timestamp", timestamp, convert_timestamp(timestamp)) cmd = f"o2-ccdb-downloadccdbfile --path {path} --dest {dest} --timestamp {timestamp}" subprocess.run(cmd.split()) def get_ccdb_obj_validity(path, dest="/tmp/", verbose=0): """ Gets the timestamp validity for an object downloaded from CCDB. Returns a list with the initial and end timestamps. """ cmd = f"o2-ccdb-inspectccdbfile {dest}{path}/snapshot.root" process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output, error = process.communicate() output = output.decode("utf-8").split("\n") error = error.decode("utf-8").split("\n") if error is not None else error if verbose: print("out:") print(*output, "\n") print("err:") print(error) result = list(filter(lambda x: x.startswith('Valid-'), output)) ValidFrom = result[0].split() ValidUntil = result[1].split() return [int(ValidFrom[-1]), int(ValidUntil[-1])] def upload_ccdb_obj(path, timestamp_from, timestamp_until, dest="/tmp/", meta=""): """ Uploads a new object to CCDB in the 'path' using the validity timestamp specified """ print("Uploading obj", path, "with timestamp", [timestamp_from, timestamp_until], convert_timestamp(timestamp_from), convert_timestamp(timestamp_until)) key = path.split("/")[-1] cmd = f"o2-ccdb-upload -f {dest}{path}/snapshot.root " cmd += f"--key {key} --path {path} " cmd += f"--starttimestamp {timestamp_from} --endtimestamp {timestamp_until} --meta \"{meta}\"" subprocess.run(cmd.split()) def main(path, timestamp_from, timestamp_until, verbose=0, show=False): """ Used to upload a new object to CCDB in 'path' valid from 'timestamp_from' to 'timestamp_until' Gets the object from CCDB specified in 'path' and for 'timestamp_from-1' Gets the object from CCDB specified in 'path' and for 'timestamp_until+1' If required plots the situation before and after the update """ get_ccdb_obj(path, timestamp_from-1) val_before = get_ccdb_obj_validity(path, verbose=verbose) get_ccdb_obj(path, timestamp_until+1) val_after = get_ccdb_obj_validity(path, verbose=verbose) overlap_before = val_before[1] > timestamp_from overlap_after = val_after[0] < timestamp_until if verbose: if overlap_before: print("Previous objects overalps") if overlap_after: print("Next objects overalps") trimmed_before = val_before if not overlap_before else [ val_before[0], timestamp_from - 1] trimmed_after = val_after if not overlap_after else [ timestamp_until+1, val_after[1]] if show: fig, ax = plt.subplots() fig def bef_af(v, y): return [v[0] - 1] + v + [v[1] + 1], [0, y, y, 0] if True: ax.plot(*bef_af(val_before, 0.95), label='before') ax.plot(*bef_af(val_after, 1.05), label='after') if False: ax.plot(*bef_af(trimmed_before, 0.9), label='trimmed before') ax.plot(*bef_af(trimmed_after, 1.1), label='trimmed after') ax.plot(*bef_af([timestamp_from, timestamp_until], 1), label='object') xlim = 10000000 plt.xlim([timestamp_from-xlim, timestamp_until+xlim]) plt.ylim(0, 2) plt.xlabel('Timestamp') plt.ylabel('Validity') plt.legend() plt.show() if __name__ == "__main__": parser = argparse.ArgumentParser( description="Uploads timestamp non overlapping objects to CCDB." "Basic example: `./update_ccdb.py qc/TOF/TOFTaskCompressed/hDiagnostic 1588956517161 1588986517161 --show --verbose`") parser.add_argument('path', metavar='path_to_object', type=str, help='Path of the object in the CCDB repository') parser.add_argument('timestamp_from', metavar='from_timestamp', type=int, help='Timestamp of start for the new object to use') parser.add_argument('timestamp_until', metavar='until_timestamp', type=int, help='Timestamp of stop for the new object to use') parser.add_argument('--verbose', '-v', action='count', default=0) parser.add_argument('--show', '-s', action='count', default=0) args = parser.parse_args() main(path=args.path, timestamp_from=args.timestamp_from, timestamp_until=args.timestamp_until, verbose=args.verbose, show=args.show)
gpl-3.0
Aravinthu/odoo
addons/website_event_sale/models/sale_order.py
16
4747
# -*- coding: utf-8 -*- from odoo import api, models, _ from odoo.exceptions import UserError class SaleOrder(models.Model): _inherit = "sale.order" @api.multi def _cart_find_product_line(self, product_id=None, line_id=None, **kwargs): self.ensure_one() lines = super(SaleOrder, self)._cart_find_product_line(product_id, line_id) if line_id: return lines domain = [('id', 'in', lines.ids)] if self.env.context.get("event_ticket_id"): domain.append(('event_ticket_id', '=', self.env.context.get("event_ticket_id"))) return self.env['sale.order.line'].sudo().search(domain) @api.multi def _website_product_id_change(self, order_id, product_id, qty=0): order = self.env['sale.order'].sudo().browse(order_id) if self._context.get('pricelist') != order.pricelist_id.id: self = self.with_context(pricelist=order.pricelist_id.id) values = super(SaleOrder, self)._website_product_id_change(order_id, product_id, qty=qty) event_ticket_id = None if self.env.context.get("event_ticket_id"): event_ticket_id = self.env.context.get("event_ticket_id") else: product = self.env['product.product'].browse(product_id) if product.event_ticket_ids: event_ticket_id = product.event_ticket_ids[0].id if event_ticket_id: ticket = self.env['event.event.ticket'].browse(event_ticket_id) if product_id != ticket.product_id.id: raise UserError(_("The ticket doesn't match with this product.")) values['product_id'] = ticket.product_id.id values['event_id'] = ticket.event_id.id values['event_ticket_id'] = ticket.id values['price_unit'] = ticket.price_reduce or ticket.price values['name'] = "%s\n%s" % (ticket.event_id.display_name, ticket.name) # avoid writing related values that end up locking the product record values.pop('event_ok', None) return values @api.multi def _cart_update(self, product_id=None, line_id=None, add_qty=0, set_qty=0, **kwargs): OrderLine = self.env['sale.order.line'] if line_id: line = OrderLine.browse(line_id) ticket = line.event_ticket_id old_qty = int(line.product_uom_qty) if ticket.id: self = self.with_context(event_ticket_id=ticket.id, fixed_price=1) else: line = None ticket = self.env['event.event.ticket'].search([('product_id', '=', product_id)], limit=1) old_qty = 0 new_qty = set_qty if set_qty else (add_qty or 0 + old_qty) # case: buying tickets for a sold out ticket values = {} if ticket and ticket.seats_availability == 'limited' and ticket.seats_available <= 0: values['warning'] = _('Sorry, The %(ticket)s tickets for the %(event)s event are sold out.') % { 'ticket': ticket.name, 'event': ticket.event_id.name} new_qty, set_qty, add_qty = 0, 0, 0 # case: buying tickets, too much attendees elif ticket and ticket.seats_availability == 'limited' and new_qty > ticket.seats_available: values['warning'] = _('Sorry, only %(remaining_seats)d seats are still available for the %(ticket)s ticket for the %(event)s event.') % { 'remaining_seats': ticket.seats_available, 'ticket': ticket.name, 'event': ticket.event_id.name} new_qty, set_qty, add_qty = ticket.seats_available, ticket.seats_available, 0 values.update(super(SaleOrder, self)._cart_update(product_id, line_id, add_qty, set_qty, **kwargs)) # removing attendees if ticket and new_qty < old_qty: attendees = self.env['event.registration'].search([ ('state', '!=', 'cancel'), ('sale_order_id', 'in', self.ids), # To avoid break on multi record set ('event_ticket_id', '=', ticket.id), ], offset=new_qty, limit=(old_qty - new_qty), order='create_date asc') attendees.button_reg_cancel() # adding attendees elif ticket and new_qty > old_qty: line = OrderLine.browse(values['line_id']) line._update_registrations(confirm=False, cancel_to_draft=True, registration_data=kwargs.get('registration_data', [])) # add in return values the registrations, to display them on website (or not) values['attendee_ids'] = self.env['event.registration'].search([('sale_order_line_id', '=', line.id), ('state', '!=', 'cancel')]).ids return values
agpl-3.0
CGATOxford/bioconda-recipes
recipes/topas/topas.py
38
2648
#!/usr/bin/env python # # Wrapper script for Java Conda packages that ensures that the java runtime # is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128). # # # Program Parameters # import os import sys import subprocess from os import access, getenv, X_OK jar_file = 'TOPAS.jar' default_jvm_mem_opts = ['-Xms512m', '-Xmx1g'] # !!! End of parameter section. No user-serviceable code below this line !!! def real_dirname(path): """Return the symlink-resolved, canonicalized directory-portion of path.""" return os.path.dirname(os.path.realpath(path)) def java_executable(): """Return the executable name of the Java interpreter.""" java_home = getenv('JAVA_HOME') java_bin = os.path.join('bin', 'java') if java_home and access(os.path.join(java_home, java_bin), X_OK): return os.path.join(java_home, java_bin) else: return 'java' def jvm_opts(argv): """Construct list of Java arguments based on our argument list. The argument list passed in argv must not include the script name. The return value is a 3-tuple lists of strings of the form: (memory_options, prop_options, passthrough_options) """ mem_opts = [] prop_opts = [] pass_args = [] for arg in argv: if arg.startswith('-D'): prop_opts.append(arg) elif arg.startswith('-XX'): prop_opts.append(arg) elif arg.startswith('-Xm'): mem_opts.append(arg) else: pass_args.append(arg) # In the original shell script the test coded below read: # if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ] # To reproduce the behaviour of the above shell code fragment # it is important to explictly check for equality with None # in the second condition, so a null envar value counts as True! if mem_opts == [] and getenv('_JAVA_OPTIONS') == None: mem_opts = default_jvm_mem_opts return (mem_opts, prop_opts, pass_args) def main(): java = java_executable() jar_dir = real_dirname(sys.argv[0]) (mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:]) if pass_args != [] and pass_args[0].startswith('eu'): jar_arg = '-cp' else: jar_arg = '-jar' jar_path = os.path.join(jar_dir, jar_file) java_args = [java]+ mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args if '--jar_dir' in sys.argv[1:]: print(jar_path) else: sys.exit(subprocess.call(java_args)) if __name__ == '__main__': main()
mit
jaxkodex/odoo
addons/point_of_sale/report/pos_order_report.py
283
4297
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import tools from openerp.osv import fields,osv class pos_order_report(osv.osv): _name = "report.pos.order" _description = "Point of Sale Orders Statistics" _auto = False _columns = { 'date': fields.datetime('Date Order', readonly=True), 'partner_id':fields.many2one('res.partner', 'Partner', readonly=True), 'product_id':fields.many2one('product.product', 'Product', readonly=True), 'state': fields.selection([('draft', 'New'), ('paid', 'Closed'), ('done', 'Synchronized'), ('invoiced', 'Invoiced'), ('cancel', 'Cancelled')], 'Status'), 'user_id':fields.many2one('res.users', 'Salesperson', readonly=True), 'price_total':fields.float('Total Price', readonly=True), 'total_discount':fields.float('Total Discount', readonly=True), 'average_price': fields.float('Average Price', readonly=True,group_operator="avg"), 'location_id':fields.many2one('stock.location', 'Location', readonly=True), 'company_id':fields.many2one('res.company', 'Company', readonly=True), 'nbr':fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines 'product_qty':fields.integer('Product Quantity', readonly=True), 'journal_id': fields.many2one('account.journal', 'Journal'), 'delay_validation': fields.integer('Delay Validation'), 'product_categ_id': fields.many2one('product.category', 'Product Category', readonly=True), } _order = 'date desc' def init(self, cr): tools.drop_view_if_exists(cr, 'report_pos_order') cr.execute(""" create or replace view report_pos_order as ( select min(l.id) as id, count(*) as nbr, s.date_order as date, sum(l.qty * u.factor) as product_qty, sum(l.qty * l.price_unit) as price_total, sum((l.qty * l.price_unit) * (l.discount / 100)) as total_discount, (sum(l.qty*l.price_unit)/sum(l.qty * u.factor))::decimal as average_price, sum(cast(to_char(date_trunc('day',s.date_order) - date_trunc('day',s.create_date),'DD') as int)) as delay_validation, s.partner_id as partner_id, s.state as state, s.user_id as user_id, s.location_id as location_id, s.company_id as company_id, s.sale_journal as journal_id, l.product_id as product_id, pt.categ_id as product_categ_id from pos_order_line as l left join pos_order s on (s.id=l.order_id) left join product_product p on (p.id=l.product_id) left join product_template pt on (pt.id=p.product_tmpl_id) left join product_uom u on (u.id=pt.uom_id) group by s.date_order, s.partner_id,s.state, pt.categ_id, s.user_id,s.location_id,s.company_id,s.sale_journal,l.product_id,s.create_date having sum(l.qty * u.factor) != 0)""") # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
fabianrost84/cython
Cython/Plex/Errors.py
33
1169
#======================================================================= # # Python Lexical Analyser # # Exception classes # #======================================================================= class PlexError(Exception): message = "" class PlexTypeError(PlexError, TypeError): pass class PlexValueError(PlexError, ValueError): pass class InvalidRegex(PlexError): pass class InvalidToken(PlexError): def __init__(self, token_number, message): PlexError.__init__(self, "Token number %d: %s" % (token_number, message)) class InvalidScanner(PlexError): pass class AmbiguousAction(PlexError): message = "Two tokens with different actions can match the same string" def __init__(self): pass class UnrecognizedInput(PlexError): scanner = None position = None state_name = None def __init__(self, scanner, state_name): self.scanner = scanner self.position = scanner.get_position() self.state_name = state_name def __str__(self): return ("'%s', line %d, char %d: Token not recognised in state %r" % ( self.position + (self.state_name,)))
apache-2.0
analurandis/Tur
backend/venv/Lib/site-packages/sphinx/builders/qthelp.py
11
10819
# -*- coding: utf-8 -*- """ sphinx.builders.qthelp ~~~~~~~~~~~~~~~~~~~~~~ Build input files for the Qt collection generator. :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import os import re import codecs import posixpath from os import path from docutils import nodes from sphinx import addnodes from sphinx.builders.html import StandaloneHTMLBuilder from sphinx.util import force_decode from sphinx.util.pycompat import htmlescape _idpattern = re.compile( r'(?P<title>.+) (\((class in )?(?P<id>[\w\.]+)( (?P<descr>\w+))?\))$') # Qt Help Collection Project (.qhcp). # Is the input file for the help collection generator. # It contains references to compressed help files which should be # included in the collection. # It may contain various other information for customizing Qt Assistant. collection_template = u'''\ <?xml version="1.0" encoding="utf-8" ?> <QHelpCollectionProject version="1.0"> <assistant> <title>%(title)s</title> <homePage>%(homepage)s</homePage> <startPage>%(startpage)s</startPage> </assistant> <docFiles> <generate> <file> <input>%(outname)s.qhp</input> <output>%(outname)s.qch</output> </file> </generate> <register> <file>%(outname)s.qch</file> </register> </docFiles> </QHelpCollectionProject> ''' # Qt Help Project (.qhp) # This is the input file for the help generator. # It contains the table of contents, indices and references to the # actual documentation files (*.html). # In addition it defines a unique namespace for the documentation. project_template = u'''\ <?xml version="1.0" encoding="utf-8" ?> <QtHelpProject version="1.0"> <namespace>%(namespace)s</namespace> <virtualFolder>doc</virtualFolder> <customFilter name="%(project)s %(version)s"> <filterAttribute>%(outname)s</filterAttribute> <filterAttribute>%(version)s</filterAttribute> </customFilter> <filterSection> <filterAttribute>%(outname)s</filterAttribute> <filterAttribute>%(version)s</filterAttribute> <toc> <section title="%(title)s" ref="%(masterdoc)s.html"> %(sections)s </section> </toc> <keywords> %(keywords)s </keywords> <files> %(files)s </files> </filterSection> </QtHelpProject> ''' section_template = '<section title="%(title)s" ref="%(ref)s"/>' file_template = ' '*12 + '<file>%(filename)s</file>' class QtHelpBuilder(StandaloneHTMLBuilder): """ Builder that also outputs Qt help project, contents and index files. """ name = 'qthelp' # don't copy the reST source copysource = False supported_image_types = ['image/svg+xml', 'image/png', 'image/gif', 'image/jpeg'] # don't add links add_permalinks = False # don't add sidebar etc. embedded = True def init(self): StandaloneHTMLBuilder.init(self) # the output files for HTML help must be .html only self.out_suffix = '.html' #self.config.html_style = 'traditional.css' def handle_finish(self): self.build_qhp(self.outdir, self.config.qthelp_basename) def build_qhp(self, outdir, outname): self.info('writing project file...') # sections tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self, prune_toctrees=False) istoctree = lambda node: ( isinstance(node, addnodes.compact_paragraph) and node.has_key('toctree')) sections = [] for node in tocdoc.traverse(istoctree): sections.extend(self.write_toc(node)) for indexname, indexcls, content, collapse in self.domain_indices: item = section_template % {'title': indexcls.localname, 'ref': '%s.html' % indexname} sections.append(' ' * 4 * 4 + item) # sections may be unicode strings or byte strings, we have to make sure # they are all unicode strings before joining them new_sections = [] for section in sections: if not isinstance(section, unicode): new_sections.append(force_decode(section, None)) else: new_sections.append(section) sections = u'\n'.join(new_sections) # keywords keywords = [] index = self.env.create_index(self, group_entries=False) for (key, group) in index: for title, (refs, subitems) in group: keywords.extend(self.build_keywords(title, refs, subitems)) keywords = u'\n'.join(keywords) # files if not outdir.endswith(os.sep): outdir += os.sep olen = len(outdir) projectfiles = [] staticdir = path.join(outdir, '_static') imagesdir = path.join(outdir, '_images') for root, dirs, files in os.walk(outdir): resourcedir = root.startswith(staticdir) or \ root.startswith(imagesdir) for fn in files: if (resourcedir and not fn.endswith('.js')) or \ fn.endswith('.html'): filename = path.join(root, fn)[olen:] projectfiles.append(file_template % {'filename': htmlescape(filename)}) projectfiles = '\n'.join(projectfiles) # it seems that the "namespace" may not contain non-alphanumeric # characters, and more than one successive dot, or leading/trailing # dots, are also forbidden nspace = 'org.sphinx.%s.%s' % (outname, self.config.version) nspace = re.sub('[^a-zA-Z0-9.]', '', nspace) nspace = re.sub(r'\.+', '.', nspace).strip('.') nspace = nspace.lower() # write the project file f = codecs.open(path.join(outdir, outname+'.qhp'), 'w', 'utf-8') try: f.write(project_template % { 'outname': htmlescape(outname), 'title': htmlescape(self.config.html_title), 'version': htmlescape(self.config.version), 'project': htmlescape(self.config.project), 'namespace': htmlescape(nspace), 'masterdoc': htmlescape(self.config.master_doc), 'sections': sections, 'keywords': keywords, 'files': projectfiles}) finally: f.close() homepage = 'qthelp://' + posixpath.join( nspace, 'doc', self.get_target_uri(self.config.master_doc)) startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html') self.info('writing collection project file...') f = codecs.open(path.join(outdir, outname+'.qhcp'), 'w', 'utf-8') try: f.write(collection_template % { 'outname': htmlescape(outname), 'title': htmlescape(self.config.html_short_title), 'homepage': htmlescape(homepage), 'startpage': htmlescape(startpage)}) finally: f.close() def isdocnode(self, node): if not isinstance(node, nodes.list_item): return False if len(node.children) != 2: return False if not isinstance(node.children[0], addnodes.compact_paragraph): return False if not isinstance(node.children[0][0], nodes.reference): return False if not isinstance(node.children[1], nodes.bullet_list): return False return True def write_toc(self, node, indentlevel=4): # XXX this should return a Unicode string, not a bytestring parts = [] if self.isdocnode(node): refnode = node.children[0][0] link = refnode['refuri'] title = htmlescape(refnode.astext()).replace('"', '&quot;') item = '<section title="%(title)s" ref="%(ref)s">' % \ {'title': title, 'ref': link} parts.append(' '*4*indentlevel + item) for subnode in node.children[1]: parts.extend(self.write_toc(subnode, indentlevel+1)) parts.append(' '*4*indentlevel + '</section>') elif isinstance(node, nodes.list_item): for subnode in node: parts.extend(self.write_toc(subnode, indentlevel)) elif isinstance(node, nodes.reference): link = node['refuri'] title = htmlescape(node.astext()).replace('"','&quot;') item = section_template % {'title': title, 'ref': link} item = u' ' * 4 * indentlevel + item parts.append(item.encode('ascii', 'xmlcharrefreplace')) elif isinstance(node, nodes.bullet_list): for subnode in node: parts.extend(self.write_toc(subnode, indentlevel)) elif isinstance(node, addnodes.compact_paragraph): for subnode in node: parts.extend(self.write_toc(subnode, indentlevel)) return parts def keyword_item(self, name, ref): matchobj = _idpattern.match(name) if matchobj: groupdict = matchobj.groupdict() shortname = groupdict['title'] id = groupdict.get('id') #descr = groupdict.get('descr') if shortname.endswith('()'): shortname = shortname[:-2] id = '%s.%s' % (id, shortname) else: id = None if id: item = ' '*12 + '<keyword name="%s" id="%s" ref="%s"/>' % ( name, id, ref[1]) else: item = ' '*12 + '<keyword name="%s" ref="%s"/>' % (name, ref[1]) item.encode('ascii', 'xmlcharrefreplace') return item def build_keywords(self, title, refs, subitems): keywords = [] title = htmlescape(title) # if len(refs) == 0: # XXX # write_param('See Also', title) if len(refs) == 1: keywords.append(self.keyword_item(title, refs[0])) elif len(refs) > 1: for i, ref in enumerate(refs): # XXX # item = (' '*12 + # '<keyword name="%s [%d]" ref="%s"/>' % ( # title, i, ref)) # item.encode('ascii', 'xmlcharrefreplace') # keywords.append(item) keywords.append(self.keyword_item(title, ref)) if subitems: for subitem in subitems: keywords.extend(self.build_keywords(subitem[0], subitem[1], [])) return keywords
mit
jaingaurav/Diamond
src/diamond/handler/test/teststatsdhandler.py
20
3122
#!/usr/bin/python # coding=utf-8 ########################################################################## from test import unittest from test import run_only from mock import patch import configobj from diamond.handler.stats_d import StatsdHandler from diamond.metric import Metric def run_only_if_statsd_is_available(func): try: import statsd except ImportError: statsd = None pred = lambda: statsd is not None return run_only(func, pred) class TestStatsdHandler(unittest.TestCase): @run_only_if_statsd_is_available @patch('statsd.StatsClient') def test_single_gauge(self, mock_client): config = configobj.ConfigObj() config['host'] = 'localhost' config['port'] = '9999' config['batch'] = 1 metric = Metric('servers.com.example.www.cpu.total.idle', 123, raw_value=123, timestamp=1234567, host='will-be-ignored', metric_type='GAUGE') expected_data = ('servers.com.example.www.cpu.total.idle', 123) handler = StatsdHandler(config) handler.process(metric) handler.connection.gauge.assert_called_with(*expected_data) handler.connection.send.assert_called_with() @run_only_if_statsd_is_available @patch('statsd.StatsClient') def test_single_counter(self, mock_client): config = configobj.ConfigObj() config['host'] = 'localhost' config['port'] = '9999' config['batch'] = 1 metric = Metric('servers.com.example.www.cpu.total.idle', 5, raw_value=123, timestamp=1234567, host='will-be-ignored', metric_type='COUNTER') expected_data = ('servers.com.example.www.cpu.total.idle', 123) handler = StatsdHandler(config) handler.process(metric) handler.connection.incr.assert_called_with(*expected_data) handler.connection.send.assert_called_with() @run_only_if_statsd_is_available @patch('statsd.StatsClient') def test_multiple_counter(self, mock_client): config = configobj.ConfigObj() config['host'] = 'localhost' config['port'] = '9999' config['batch'] = 1 metric1 = Metric('servers.com.example.www.cpu.total.idle', 5, raw_value=123, timestamp=1234567, host='will-be-ignored', metric_type='COUNTER') metric2 = Metric('servers.com.example.www.cpu.total.idle', 7, raw_value=128, timestamp=1234567, host='will-be-ignored', metric_type='COUNTER') expected_data1 = ('servers.com.example.www.cpu.total.idle', 123) expected_data2 = ('servers.com.example.www.cpu.total.idle', 5) handler = StatsdHandler(config) handler.process(metric1) handler.connection.incr.assert_called_with(*expected_data1) handler.connection.send.assert_called_with() handler.process(metric2) handler.connection.incr.assert_called_with(*expected_data2) handler.connection.send.assert_called_with()
mit
ckjoshi9/Auto-Mate-for-Tinder
Django App/tinderapp/src/Pixel.py
2
1491
from __future__ import division from colorsys import * class Pixel: def __init__(self, x, y, red, green, blue): self.x = x self.y = y self.red = red self.green = green self.blue = blue self.region = None @property def region(self): return self.region @region.setter def region(self, value): self.region = value @property def x(self): return self.x @property def y(self): return self.y def in_region(self): if self.region == None: return False else: return True def is_skin(self): r = self.red g = self.green b = self.blue rgbClassifier = ((r > 95) and (g > 40 and g < 100) and (b > 20) and ((max(r, g, b) - min(r, g, b)) > 15) and (abs(r-g) > 15) and (r > g) and (r > b)) normalizedRGBClassifier = False if r != 0 and g != 0 and b != 0: normR = (r/(r + g + b)) normG = (g/(r + g + b)) normB = (b/(r + g + b)) normalizedRGBClassifier = (((normR/normG) > 1.185) and (((r * b)/(pow(r + g + b, 2))) > 0.107) and (((r * g)/(pow(r + g + b,2))) > 0.112)) hsv = rgb_to_hsv(r, g, b) hsvClassifier = (hsv[0] > 0 and hsv[0] < 35 and hsv[1] > 0.23 and hsv[1] < 0.68) return (rgbClassifier or normalizedRGBClassifier or hsvClassifier) def intensity(self): return (self.red + self.green + self.blue)/3
mit
pquentin/libcloud
libcloud/storage/drivers/s3.py
3
42505
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import hmac import time from hashlib import sha1 import libcloud.utils.py3 try: if libcloud.utils.py3.DEFAULT_LXML: from lxml.etree import Element, SubElement else: from xml.etree.ElementTree import Element, SubElement except ImportError: from xml.etree.ElementTree import Element, SubElement from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlquote from libcloud.utils.py3 import b from libcloud.utils.py3 import tostring from libcloud.utils.xml import fixxpath, findtext from libcloud.utils.files import read_in_chunks from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.common.base import ConnectionUserAndKey, RawResponse from libcloud.common.aws import AWSBaseResponse, AWSDriver, \ AWSTokenConnection, SignedAWSConnection from libcloud.storage.base import Object, Container, StorageDriver from libcloud.storage.types import ContainerError from libcloud.storage.types import ContainerIsNotEmptyError from libcloud.storage.types import InvalidContainerNameError from libcloud.storage.types import ContainerDoesNotExistError from libcloud.storage.types import ObjectDoesNotExistError from libcloud.storage.types import ObjectHashMismatchError # How long before the token expires EXPIRATION_SECONDS = 15 * 60 S3_US_STANDARD_HOST = 's3.amazonaws.com' S3_US_EAST2_HOST = 's3-us-east-2.amazonaws.com' S3_US_WEST_HOST = 's3-us-west-1.amazonaws.com' S3_US_WEST_OREGON_HOST = 's3-us-west-2.amazonaws.com' S3_US_GOV_WEST_HOST = 's3-us-gov-west-1.amazonaws.com' S3_CN_NORTH_HOST = 's3.cn-north-1.amazonaws.com.cn' S3_EU_WEST_HOST = 's3-eu-west-1.amazonaws.com' S3_EU_WEST2_HOST = 's3-eu-west-2.amazonaws.com' S3_EU_CENTRAL_HOST = 's3-eu-central-1.amazonaws.com' S3_AP_SOUTH_HOST = 's3-ap-south-1.amazonaws.com' S3_AP_SOUTHEAST_HOST = 's3-ap-southeast-1.amazonaws.com' S3_AP_SOUTHEAST2_HOST = 's3-ap-southeast-2.amazonaws.com' S3_AP_NORTHEAST1_HOST = 's3-ap-northeast-1.amazonaws.com' S3_AP_NORTHEAST2_HOST = 's3-ap-northeast-2.amazonaws.com' S3_AP_NORTHEAST_HOST = S3_AP_NORTHEAST1_HOST S3_SA_EAST_HOST = 's3-sa-east-1.amazonaws.com' S3_SA_SOUTHEAST2_HOST = 's3-sa-east-2.amazonaws.com' S3_CA_CENTRAL_HOST = 's3-ca-central-1.amazonaws.com' API_VERSION = '2006-03-01' NAMESPACE = 'http://s3.amazonaws.com/doc/%s/' % (API_VERSION) # AWS multi-part chunks must be minimum 5MB CHUNK_SIZE = 5 * 1024 * 1024 # Desired number of items in each response inside a paginated request in # ex_iterate_multipart_uploads. RESPONSES_PER_REQUEST = 100 class S3Response(AWSBaseResponse): namespace = None valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT, httplib.BAD_REQUEST] def success(self): i = int(self.status) return i >= 200 and i <= 299 or i in self.valid_response_codes def parse_error(self): if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]: raise InvalidCredsError(self.body) elif self.status == httplib.MOVED_PERMANENTLY: raise LibcloudError('This bucket is located in a different ' + 'region. Please use the correct driver.', driver=S3StorageDriver) raise LibcloudError('Unknown error. Status code: %d' % (self.status), driver=S3StorageDriver) class S3RawResponse(S3Response, RawResponse): pass class BaseS3Connection(ConnectionUserAndKey): """ Represents a single connection to the S3 Endpoint """ host = 's3.amazonaws.com' responseCls = S3Response rawResponseCls = S3RawResponse @staticmethod def get_auth_signature(method, headers, params, expires, secret_key, path, vendor_prefix): """ Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ) ); StringToSign = HTTP-VERB + "\n" + Content-MD5 + "\n" + Content-Type + "\n" + Expires + "\n" + CanonicalizedVendorHeaders + CanonicalizedResource; """ special_headers = {'content-md5': '', 'content-type': '', 'date': ''} vendor_headers = {} for key, value in list(headers.items()): key_lower = key.lower() if key_lower in special_headers: special_headers[key_lower] = value.strip() elif key_lower.startswith(vendor_prefix): vendor_headers[key_lower] = value.strip() if expires: special_headers['date'] = str(expires) buf = [method] for _, value in sorted(special_headers.items()): buf.append(value) string_to_sign = '\n'.join(buf) buf = [] for key, value in sorted(vendor_headers.items()): buf.append('%s:%s' % (key, value)) header_string = '\n'.join(buf) values_to_sign = [] for value in [string_to_sign, header_string, path]: if value: values_to_sign.append(value) string_to_sign = '\n'.join(values_to_sign) b64_hmac = base64.b64encode( hmac.new(b(secret_key), b(string_to_sign), digestmod=sha1).digest() ) return b64_hmac.decode('utf-8') def add_default_params(self, params): expires = str(int(time.time()) + EXPIRATION_SECONDS) params['AWSAccessKeyId'] = self.user_id params['Expires'] = expires return params def pre_connect_hook(self, params, headers): params['Signature'] = self.get_auth_signature( method=self.method, headers=headers, params=params, expires=params['Expires'], secret_key=self.key, path=self.action, vendor_prefix=self.driver.http_vendor_prefix) return params, headers class S3Connection(AWSTokenConnection, BaseS3Connection): """ Represents a single connection to the S3 endpoint, with AWS-specific features. """ pass class S3SignatureV4Connection(SignedAWSConnection, BaseS3Connection): service_name = 's3' version = API_VERSION def __init__(self, user_id, key, secure=True, host=None, port=None, url=None, timeout=None, proxy_url=None, token=None, retry_delay=None, backoff=None): super(S3SignatureV4Connection, self).__init__( user_id, key, secure, host, port, url, timeout, proxy_url, token, retry_delay, backoff, 4) # force version 4 class S3MultipartUpload(object): """ Class representing an amazon s3 multipart upload """ def __init__(self, key, id, created_at, initiator, owner): """ Class representing an amazon s3 multipart upload :param key: The object/key that was being uploaded :type key: ``str`` :param id: The upload id assigned by amazon :type id: ``str`` :param created_at: The date/time at which the upload was started :type created_at: ``str`` :param initiator: The AWS owner/IAM user who initiated this :type initiator: ``str`` :param owner: The AWS owner/IAM who will own this object :type owner: ``str`` """ self.key = key self.id = id self.created_at = created_at self.initiator = initiator self.owner = owner def __repr__(self): return ('<S3MultipartUpload: key=%s>' % (self.key)) class BaseS3StorageDriver(StorageDriver): name = 'Amazon S3 (standard)' website = 'http://aws.amazon.com/s3/' connectionCls = BaseS3Connection hash_type = 'md5' supports_chunked_encoding = False supports_s3_multipart_upload = True ex_location_name = '' namespace = NAMESPACE http_vendor_prefix = 'x-amz' def iterate_containers(self): response = self.connection.request('/') if response.status == httplib.OK: containers = self._to_containers(obj=response.object, xpath='Buckets/Bucket') return containers raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) def list_container_objects(self, container, ex_prefix=None): """ Return a list of objects for the given container. :param container: Container instance. :type container: :class:`Container` :param ex_prefix: Only return objects starting with ex_prefix :type ex_prefix: ``str`` :return: A list of Object instances. :rtype: ``list`` of :class:`Object` """ return list(self.iterate_container_objects(container, ex_prefix=ex_prefix)) def iterate_container_objects(self, container, ex_prefix=None): """ Return a generator of objects for the given container. :param container: Container instance :type container: :class:`Container` :param ex_prefix: Only return objects starting with ex_prefix :type ex_prefix: ``str`` :return: A generator of Object instances. :rtype: ``generator`` of :class:`Object` """ params = {} if ex_prefix: params['prefix'] = ex_prefix last_key = None exhausted = False container_path = self._get_container_path(container) while not exhausted: if last_key: params['marker'] = last_key response = self.connection.request(container_path, params=params) if response.status != httplib.OK: raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) objects = self._to_objs(obj=response.object, xpath='Contents', container=container) is_truncated = response.object.findtext(fixxpath( xpath='IsTruncated', namespace=self.namespace)).lower() exhausted = (is_truncated == 'false') last_key = None for obj in objects: last_key = obj.name yield obj def get_container(self, container_name): try: response = self.connection.request('/%s' % container_name, method='HEAD') if response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value=None, driver=self, container_name=container_name) except InvalidCredsError: # This just means the user doesn't have IAM permissions to do a # HEAD request but other requests might work. pass return Container(name=container_name, extra=None, driver=self) def get_object(self, container_name, object_name): container = self.get_container(container_name=container_name) object_path = self._get_object_path(container, object_name) response = self.connection.request(object_path, method='HEAD') if response.status == httplib.OK: obj = self._headers_to_object(object_name=object_name, container=container, headers=response.headers) return obj raise ObjectDoesNotExistError(value=None, driver=self, object_name=object_name) def _get_container_path(self, container): """ Return a container path :param container: Container instance :type container: :class:`Container` :return: A path for this container. :rtype: ``str`` """ return '/%s' % (container.name) def _get_object_path(self, container, object_name): """ Return an object's CDN path. :param container: Container instance :type container: :class:`Container` :param object_name: Object name :type object_name: :class:`str` :return: A path for this object. :rtype: ``str`` """ container_url = self._get_container_path(container) object_name_cleaned = self._clean_object_name(object_name) object_path = '%s/%s' % (container_url, object_name_cleaned) return object_path def create_container(self, container_name): if self.ex_location_name: root = Element('CreateBucketConfiguration') child = SubElement(root, 'LocationConstraint') child.text = self.ex_location_name data = tostring(root) else: data = '' response = self.connection.request('/%s' % (container_name), data=data, method='PUT') if response.status == httplib.OK: container = Container(name=container_name, extra=None, driver=self) return container elif response.status == httplib.CONFLICT: raise InvalidContainerNameError( value='Container with this name already exists. The name must ' 'be unique among all the containers in the system', container_name=container_name, driver=self) elif response.status == httplib.BAD_REQUEST: raise ContainerError( value='Bad request when creating container: %s' % response.body, container_name=container_name, driver=self) raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) def delete_container(self, container): # Note: All the objects in the container must be deleted first response = self.connection.request('/%s' % (container.name), method='DELETE') if response.status == httplib.NO_CONTENT: return True elif response.status == httplib.CONFLICT: raise ContainerIsNotEmptyError( value='Container must be empty before it can be deleted.', container_name=container.name, driver=self) elif response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value=None, driver=self, container_name=container.name) return False def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): obj_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(obj_path, method='GET', raw=True) return self._get_object(obj=obj, callback=self._save_object, response=response, callback_kwargs={ 'obj': obj, 'response': response.response, 'destination_path': destination_path, 'overwrite_existing': overwrite_existing, 'delete_on_failure': delete_on_failure}, success_status_code=httplib.OK) def download_object_as_stream(self, obj, chunk_size=None): obj_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(obj_path, method='GET', stream=True, raw=True) return self._get_object( obj=obj, callback=read_in_chunks, response=response, callback_kwargs={'iterator': response.iter_content(CHUNK_SIZE), 'chunk_size': chunk_size}, success_status_code=httplib.OK) def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True, ex_storage_class=None): """ @inherits: :class:`StorageDriver.upload_object` :param ex_storage_class: Storage class :type ex_storage_class: ``str`` """ return self._put_object(container=container, object_name=object_name, extra=extra, file_path=file_path, verify_hash=verify_hash, storage_class=ex_storage_class) def _initiate_multipart(self, container, object_name, headers=None): """ Initiates a multipart upload to S3 :param container: The destination container :type container: :class:`Container` :param object_name: The name of the object which we are uploading :type object_name: ``str`` :keyword headers: Additional headers to send with the request :type headers: ``dict`` :return: The id of the newly created multipart upload :rtype: ``str`` """ headers = headers or {} request_path = self._get_object_path(container, object_name) params = {'uploads': ''} response = self.connection.request(request_path, method='POST', headers=headers, params=params) if response.status != httplib.OK: raise LibcloudError('Error initiating multipart upload', driver=self) return findtext(element=response.object, xpath='UploadId', namespace=self.namespace) def _upload_multipart_chunks(self, container, object_name, upload_id, stream, calculate_hash=True): """ Uploads data from an iterator in fixed sized chunks to S3 :param container: The destination container :type container: :class:`Container` :param object_name: The name of the object which we are uploading :type object_name: ``str`` :param upload_id: The upload id allocated for this multipart upload :type upload_id: ``str`` :param stream: The generator for fetching the upload data :type stream: ``generator`` :keyword calculate_hash: Indicates if we must calculate the data hash :type calculate_hash: ``bool`` :return: A tuple of (chunk info, checksum, bytes transferred) :rtype: ``tuple`` """ data_hash = None if calculate_hash: data_hash = self._get_hash_function() bytes_transferred = 0 count = 1 chunks = [] params = {'uploadId': upload_id} request_path = self._get_object_path(container, object_name) # Read the input data in chunk sizes suitable for AWS for data in read_in_chunks(stream, chunk_size=CHUNK_SIZE, fill_size=True, yield_empty=True): bytes_transferred += len(data) if calculate_hash: data_hash.update(data) chunk_hash = self._get_hash_function() chunk_hash.update(data) chunk_hash = base64.b64encode(chunk_hash.digest()).decode('utf-8') # The Content-MD5 header provides an extra level of data check and # is recommended by amazon headers = { 'Content-Length': len(data), 'Content-MD5': chunk_hash, } params['partNumber'] = count resp = self.connection.request(request_path, method='PUT', data=data, headers=headers, params=params) if resp.status != httplib.OK: raise LibcloudError('Error uploading chunk', driver=self) server_hash = resp.headers['etag'].replace('"', '') # Keep this data for a later commit chunks.append((count, server_hash)) count += 1 if calculate_hash: data_hash = data_hash.hexdigest() return (chunks, data_hash, bytes_transferred) def _commit_multipart(self, container, object_name, upload_id, chunks): """ Makes a final commit of the data. :param container: The destination container :type container: :class:`Container` :param object_name: The name of the object which we are uploading :type object_name: ``str`` :param upload_id: The upload id allocated for this multipart upload :type upload_id: ``str`` :param chunks: A list of (chunk_number, chunk_hash) tuples. :type chunks: ``list`` :return: The server side hash of the uploaded data :rtype: ``str`` """ root = Element('CompleteMultipartUpload') for (count, etag) in chunks: part = SubElement(root, 'Part') part_no = SubElement(part, 'PartNumber') part_no.text = str(count) etag_id = SubElement(part, 'ETag') etag_id.text = str(etag) data = tostring(root) headers = {'Content-Length': len(data)} params = {'uploadId': upload_id} request_path = self._get_object_path(container, object_name) response = self.connection.request(request_path, headers=headers, params=params, data=data, method='POST') if response.status != httplib.OK: element = response.object # pylint: disable=maybe-no-member code, message = response._parse_error_details(element=element) msg = 'Error in multipart commit: %s (%s)' % (message, code) raise LibcloudError(msg, driver=self) # Get the server's etag to be passed back to the caller body = response.parse_body() server_hash = body.find(fixxpath(xpath='ETag', namespace=self.namespace)).text return server_hash def _abort_multipart(self, container, object_name, upload_id): """ Aborts an already initiated multipart upload :param container: The destination container :type container: :class:`Container` :param object_name: The name of the object which we are uploading :type object_name: ``str`` :param upload_id: The upload id allocated for this multipart upload :type upload_id: ``str`` """ params = {'uploadId': upload_id} request_path = self._get_object_path(container, object_name) resp = self.connection.request(request_path, method='DELETE', params=params) if resp.status != httplib.NO_CONTENT: raise LibcloudError('Error in multipart abort. status_code=%d' % (resp.status), driver=self) def upload_object_via_stream(self, iterator, container, object_name, extra=None, ex_storage_class=None): """ @inherits: :class:`StorageDriver.upload_object_via_stream` :param ex_storage_class: Storage class :type ex_storage_class: ``str`` """ method = 'PUT' params = None # This driver is used by other S3 API compatible drivers also. # Amazon provides a different (complex?) mechanism to do multipart # uploads if self.supports_s3_multipart_upload: return self._put_object_multipart(container=container, object_name=object_name, extra=extra, stream=iterator, verify_hash=False, storage_class=ex_storage_class) return self._put_object(container=container, object_name=object_name, extra=extra, method=method, query_args=params, stream=iterator, verify_hash=False, storage_class=ex_storage_class) def delete_object(self, obj): object_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(object_path, method='DELETE') if response.status == httplib.NO_CONTENT: return True elif response.status == httplib.NOT_FOUND: raise ObjectDoesNotExistError(value=None, driver=self, object_name=obj.name) return False def ex_iterate_multipart_uploads(self, container, prefix=None, delimiter=None): """ Extension method for listing all in-progress S3 multipart uploads. Each multipart upload which has not been committed or aborted is considered in-progress. :param container: The container holding the uploads :type container: :class:`Container` :keyword prefix: Print only uploads of objects with this prefix :type prefix: ``str`` :keyword delimiter: The object/key names are grouped based on being split by this delimiter :type delimiter: ``str`` :return: A generator of S3MultipartUpload instances. :rtype: ``generator`` of :class:`S3MultipartUpload` """ if not self.supports_s3_multipart_upload: raise LibcloudError('Feature not supported', driver=self) # Get the data for a specific container request_path = self._get_container_path(container) params = {'max-uploads': RESPONSES_PER_REQUEST, 'uploads': ''} if prefix: params['prefix'] = prefix if delimiter: params['delimiter'] = delimiter def finder(node, text): return node.findtext(fixxpath(xpath=text, namespace=self.namespace)) while True: response = self.connection.request(request_path, params=params) if response.status != httplib.OK: raise LibcloudError('Error fetching multipart uploads. ' 'Got code: %s' % response.status, driver=self) body = response.parse_body() # pylint: disable=maybe-no-member for node in body.findall(fixxpath(xpath='Upload', namespace=self.namespace)): initiator = node.find(fixxpath(xpath='Initiator', namespace=self.namespace)) owner = node.find(fixxpath(xpath='Owner', namespace=self.namespace)) key = finder(node, 'Key') upload_id = finder(node, 'UploadId') created_at = finder(node, 'Initiated') initiator = finder(initiator, 'DisplayName') owner = finder(owner, 'DisplayName') yield S3MultipartUpload(key, upload_id, created_at, initiator, owner) # Check if this is the last entry in the listing # pylint: disable=maybe-no-member is_truncated = body.findtext(fixxpath(xpath='IsTruncated', namespace=self.namespace)) if is_truncated.lower() == 'false': break # Provide params for the next request upload_marker = body.findtext(fixxpath(xpath='NextUploadIdMarker', namespace=self.namespace)) key_marker = body.findtext(fixxpath(xpath='NextKeyMarker', namespace=self.namespace)) params['key-marker'] = key_marker params['upload-id-marker'] = upload_marker def ex_cleanup_all_multipart_uploads(self, container, prefix=None): """ Extension method for removing all partially completed S3 multipart uploads. :param container: The container holding the uploads :type container: :class:`Container` :keyword prefix: Delete only uploads of objects with this prefix :type prefix: ``str`` """ # Iterate through the container and delete the upload ids for upload in self.ex_iterate_multipart_uploads(container, prefix, delimiter=None): self._abort_multipart(container, upload.key, upload.id) def _clean_object_name(self, name): name = urlquote(name) return name def _put_object(self, container, object_name, method='PUT', query_args=None, extra=None, file_path=None, stream=None, verify_hash=True, storage_class=None): headers = {} extra = extra or {} headers.update(self._to_storage_class_headers(storage_class)) content_type = extra.get('content_type', None) meta_data = extra.get('meta_data', None) acl = extra.get('acl', None) if meta_data: for key, value in list(meta_data.items()): key = self.http_vendor_prefix + '-meta-%s' % (key) headers[key] = value if acl: headers[self.http_vendor_prefix + '-acl'] = acl request_path = self._get_object_path(container, object_name) if query_args: request_path = '?'.join((request_path, query_args)) result_dict = self._upload_object( object_name=object_name, content_type=content_type, request_path=request_path, request_method=method, headers=headers, file_path=file_path, stream=stream) response = result_dict['response'] bytes_transferred = result_dict['bytes_transferred'] headers = response.headers response = response server_hash = headers.get('etag', '').replace('"', '') if (verify_hash and result_dict['data_hash'] != server_hash): raise ObjectHashMismatchError( value='MD5 hash {0} checksum does not match {1}'.format( server_hash, result_dict['data_hash']), object_name=object_name, driver=self) elif response.status == httplib.OK: obj = Object( name=object_name, size=bytes_transferred, hash=server_hash, extra={'acl': acl}, meta_data=meta_data, container=container, driver=self) return obj else: raise LibcloudError( 'Unexpected status code, status_code=%s' % (response.status), driver=self) def _put_object_multipart(self, container, object_name, stream, extra=None, verify_hash=False, storage_class=None): """ Uploads an object using the S3 multipart algorithm. :param container: The destination container :type container: :class:`Container` :param object_name: The name of the object which we are uploading :type object_name: ``str`` :param stream: The generator for fetching the upload data :type stream: ``generator`` :keyword verify_hash: Indicates if we must calculate the data hash :type verify_hash: ``bool`` :keyword extra: Additional options :type extra: ``dict`` :keyword storage_class: The name of the S3 object's storage class :type extra: ``str`` :return: The uploaded object :rtype: :class:`Object` """ headers = {} extra = extra or {} headers.update(self._to_storage_class_headers(storage_class)) content_type = extra.get('content_type', None) meta_data = extra.get('meta_data', None) acl = extra.get('acl', None) if content_type: headers['Content-Type'] = content_type if meta_data: for key, value in list(meta_data.items()): key = self.http_vendor_prefix + '-meta-%s' % (key) headers[key] = value if acl: headers[self.http_vendor_prefix + '-acl'] = acl upload_id = self._initiate_multipart(container, object_name, headers=headers) try: result = self._upload_multipart_chunks(container, object_name, upload_id, stream, calculate_hash=verify_hash) chunks, data_hash, bytes_transferred = result # Commit the chunk info and complete the upload etag = self._commit_multipart(container, object_name, upload_id, chunks) except Exception: # Amazon provides a mechanism for aborting an upload. self._abort_multipart(container, object_name, upload_id) raise return Object( name=object_name, size=bytes_transferred, hash=etag, extra={'acl': acl}, meta_data=meta_data, container=container, driver=self) def _to_storage_class_headers(self, storage_class): """ Generates request headers given a storage class name. :keyword storage_class: The name of the S3 object's storage class :type extra: ``str`` :return: Headers to include in a request :rtype: :dict: """ headers = {} storage_class = storage_class or 'standard' if storage_class not in ['standard', 'reduced_redundancy']: raise ValueError( 'Invalid storage class value: %s' % (storage_class)) key = self.http_vendor_prefix + '-storage-class' headers[key] = storage_class.upper() return headers def _to_containers(self, obj, xpath): for element in obj.findall(fixxpath(xpath=xpath, namespace=self.namespace)): yield self._to_container(element) def _to_objs(self, obj, xpath, container): return [self._to_obj(element, container) for element in obj.findall(fixxpath(xpath=xpath, namespace=self.namespace))] def _to_container(self, element): extra = { 'creation_date': findtext(element=element, xpath='CreationDate', namespace=self.namespace) } container = Container(name=findtext(element=element, xpath='Name', namespace=self.namespace), extra=extra, driver=self ) return container def _headers_to_object(self, object_name, container, headers): hash = headers['etag'].replace('"', '') extra = {'content_type': headers['content-type'], 'etag': headers['etag']} meta_data = {} if 'last-modified' in headers: extra['last_modified'] = headers['last-modified'] for key, value in headers.items(): if not key.lower().startswith(self.http_vendor_prefix + '-meta-'): continue key = key.replace(self.http_vendor_prefix + '-meta-', '') meta_data[key] = value obj = Object(name=object_name, size=headers['content-length'], hash=hash, extra=extra, meta_data=meta_data, container=container, driver=self) return obj def _to_obj(self, element, container): owner_id = findtext(element=element, xpath='Owner/ID', namespace=self.namespace) owner_display_name = findtext(element=element, xpath='Owner/DisplayName', namespace=self.namespace) meta_data = {'owner': {'id': owner_id, 'display_name': owner_display_name}} last_modified = findtext(element=element, xpath='LastModified', namespace=self.namespace) extra = {'last_modified': last_modified} obj = Object(name=findtext(element=element, xpath='Key', namespace=self.namespace), size=int(findtext(element=element, xpath='Size', namespace=self.namespace)), hash=findtext(element=element, xpath='ETag', namespace=self.namespace).replace('"', ''), extra=extra, meta_data=meta_data, container=container, driver=self ) return obj class S3StorageDriver(AWSDriver, BaseS3StorageDriver): name = 'Amazon S3 (us-east-1)' connectionCls = S3SignatureV4Connection region_name = 'us-east-1' class S3USEast2Connection(S3SignatureV4Connection): host = S3_US_EAST2_HOST class S3USEast2StorageDriver(S3StorageDriver): name = 'Amazon S3 (us-east-2)' connectionCls = S3USEast2Connection ex_location_name = 'us-east-2' region_name = 'us-east-2' class S3USWestConnection(S3SignatureV4Connection): host = S3_US_WEST_HOST class S3USWestStorageDriver(S3StorageDriver): name = 'Amazon S3 (us-west-1)' connectionCls = S3USWestConnection ex_location_name = 'us-west-1' region_name = 'us-west-1' class S3USWestOregonConnection(S3SignatureV4Connection): host = S3_US_WEST_OREGON_HOST class S3USWestOregonStorageDriver(S3StorageDriver): name = 'Amazon S3 (us-west-2)' connectionCls = S3USWestOregonConnection ex_location_name = 'us-west-2' region_name = 'us-west-2' class S3USGovWestConnection(S3SignatureV4Connection): host = S3_US_GOV_WEST_HOST class S3USGovWestStorageDriver(S3StorageDriver): name = 'Amazon S3 (us-gov-west-1)' connectionCls = S3USGovWestConnection ex_location_name = 'us-gov-west-1' region_name = 'us-gov-west-1' class S3CNNorthConnection(S3SignatureV4Connection): host = S3_CN_NORTH_HOST class S3CNNorthStorageDriver(S3StorageDriver): name = 'Amazon S3 (cn-north-1)' connectionCls = S3CNNorthConnection ex_location_name = 'cn-north-1' region_name = 'cn-north-1' class S3EUWestConnection(S3SignatureV4Connection): host = S3_EU_WEST_HOST class S3EUWestStorageDriver(S3StorageDriver): name = 'Amazon S3 (eu-west-1)' connectionCls = S3EUWestConnection ex_location_name = 'EU' region_name = 'eu-west-1' class S3EUWest2Connection(S3SignatureV4Connection): host = S3_EU_WEST2_HOST class S3EUWest2StorageDriver(S3StorageDriver): name = 'Amazon S3 (eu-west-2)' connectionCls = S3EUWest2Connection ex_location_name = 'eu-west-2' region_name = 'eu-west-2' class S3EUCentralConnection(S3SignatureV4Connection): host = S3_EU_CENTRAL_HOST class S3EUCentralStorageDriver(S3StorageDriver): name = 'Amazon S3 (eu-central-1)' connectionCls = S3EUCentralConnection ex_location_name = 'eu-central-1' region_name = 'eu-central-1' class S3APSEConnection(S3SignatureV4Connection): host = S3_AP_SOUTHEAST_HOST class S3APSEStorageDriver(S3StorageDriver): name = 'Amazon S3 (ap-southeast-1)' connectionCls = S3APSEConnection ex_location_name = 'ap-southeast-1' region_name = 'ap-southeast-1' class S3APSE2Connection(S3SignatureV4Connection): host = S3_AP_SOUTHEAST2_HOST class S3APSE2StorageDriver(S3StorageDriver): name = 'Amazon S3 (ap-southeast-2)' connectionCls = S3APSE2Connection ex_location_name = 'ap-southeast-2' region_name = 'ap-southeast-2' class S3APNE1Connection(S3SignatureV4Connection): host = S3_AP_NORTHEAST1_HOST S3APNEConnection = S3APNE1Connection class S3APNE1StorageDriver(S3StorageDriver): name = 'Amazon S3 (ap-northeast-1)' connectionCls = S3APNEConnection ex_location_name = 'ap-northeast-1' region_name = 'ap-northeast-1' S3APNEStorageDriver = S3APNE1StorageDriver class S3APNE2Connection(S3SignatureV4Connection): host = S3_AP_NORTHEAST2_HOST class S3APNE2StorageDriver(S3StorageDriver): name = 'Amazon S3 (ap-northeast-2)' connectionCls = S3APNE2Connection ex_location_name = 'ap-northeast-2' region_name = 'ap-northeast-2' class S3APSouthConnection(S3SignatureV4Connection): host = S3_AP_SOUTH_HOST class S3APSouthStorageDriver(S3StorageDriver): name = 'Amazon S3 (ap-south-1)' connectionCls = S3APSouthConnection ex_location_name = 'ap-south-1' region_name = 'ap-south-1' class S3SAEastConnection(S3SignatureV4Connection): host = S3_SA_EAST_HOST class S3SAEastStorageDriver(S3StorageDriver): name = 'Amazon S3 (sa-east-1)' connectionCls = S3SAEastConnection ex_location_name = 'sa-east-1' region_name = 'sa-east-1' class S3CACentralConnection(S3SignatureV4Connection): host = S3_CA_CENTRAL_HOST class S3CACentralStorageDriver(S3StorageDriver): name = 'Amazon S3 (ca-central-1)' connectionCls = S3CACentralConnection ex_location_name = 'ca-central-1' region_name = 'ca-central-1'
apache-2.0
dims/nova
nova/scheduler/filters/trusted_filter.py
14
9323
# Copyright (c) 2012 Intel, Inc. # Copyright (c) 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Filter to add support for Trusted Computing Pools (EXPERIMENTAL). Filter that only schedules tasks on a host if the integrity (trust) of that host matches the trust requested in the ``extra_specs`` for the flavor. The ``extra_specs`` will contain a key/value pair where the key is ``trust``. The value of this pair (``trusted``/``untrusted``) must match the integrity of that host (obtained from the Attestation service) before the task can be scheduled on that host. Note that the parameters to control access to the Attestation Service are in the ``nova.conf`` file in a separate ``trust`` section. For example, the config file will look something like: [DEFAULT] verbose=True ... [trust] server=attester.mynetwork.com Details on the specific parameters can be found in the file ``trust_attest.py``. Details on setting up and using an Attestation Service can be found at the Open Attestation project at: https://github.com/OpenAttestation/OpenAttestation """ from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import timeutils import requests import nova.conf from nova import context from nova.i18n import _LW from nova import objects from nova.scheduler import filters LOG = logging.getLogger(__name__) CONF = nova.conf.CONF class AttestationService(object): # Provide access wrapper to attestation server to get integrity report. def __init__(self): self.api_url = CONF.trusted_computing.attestation_api_url self.host = CONF.trusted_computing.attestation_server self.port = CONF.trusted_computing.attestation_port self.auth_blob = CONF.trusted_computing.attestation_auth_blob self.key_file = None self.cert_file = None self.ca_file = CONF.trusted_computing.attestation_server_ca_file self.request_count = 100 # If the CA file is not provided, let's check the cert if verification # asked self.verify = (not CONF.trusted_computing.attestation_insecure_ssl and self.ca_file or True) self.cert = (self.cert_file, self.key_file) def _do_request(self, method, action_url, body, headers): # Connects to the server and issues a request. # :returns: result data # :raises: IOError if the request fails action_url = "https://%s:%s%s/%s" % (self.host, self.port, self.api_url, action_url) try: res = requests.request(method, action_url, data=body, headers=headers, cert=self.cert, verify=self.verify) status_code = res.status_code if status_code in (requests.codes.OK, requests.codes.CREATED, requests.codes.ACCEPTED, requests.codes.NO_CONTENT): try: return requests.codes.OK, jsonutils.loads(res.text) except (TypeError, ValueError): return requests.codes.OK, res.text return status_code, None except requests.exceptions.RequestException: return IOError, None def _request(self, cmd, subcmd, hosts): body = {} body['count'] = len(hosts) body['hosts'] = hosts cooked = jsonutils.dumps(body) headers = {} headers['content-type'] = 'application/json' headers['Accept'] = 'application/json' if self.auth_blob: headers['x-auth-blob'] = self.auth_blob status, res = self._do_request(cmd, subcmd, cooked, headers) return status, res def do_attestation(self, hosts): """Attests compute nodes through OAT service. :param hosts: hosts list to be attested :returns: dictionary for trust level and validate time """ result = None status, data = self._request("POST", "PollHosts", hosts) if data is not None: result = data.get('hosts') return result class ComputeAttestationCache(object): """Cache for compute node attestation Cache compute node's trust level for sometime, if the cache is out of date, poll OAT service to flush the cache. OAT service may have cache also. OAT service's cache valid time should be set shorter than trusted filter's cache valid time. """ def __init__(self): self.attestservice = AttestationService() self.compute_nodes = {} admin = context.get_admin_context() # Fetch compute node list to initialize the compute_nodes, # so that we don't need poll OAT service one by one for each # host in the first round that scheduler invokes us. computes = objects.ComputeNodeList.get_all(admin) for compute in computes: host = compute.hypervisor_hostname self._init_cache_entry(host) def _cache_valid(self, host): cachevalid = False if host in self.compute_nodes: node_stats = self.compute_nodes.get(host) if not timeutils.is_older_than( node_stats['vtime'], CONF.trusted_computing.attestation_auth_timeout): cachevalid = True return cachevalid def _init_cache_entry(self, host): self.compute_nodes[host] = { 'trust_lvl': 'unknown', 'vtime': timeutils.normalize_time( timeutils.parse_isotime("1970-01-01T00:00:00Z"))} def _invalidate_caches(self): for host in self.compute_nodes: self._init_cache_entry(host) def _update_cache_entry(self, state): entry = {} host = state['host_name'] entry['trust_lvl'] = state['trust_lvl'] try: # Normalize as naive object to interoperate with utcnow(). entry['vtime'] = timeutils.normalize_time( timeutils.parse_isotime(state['vtime'])) except ValueError: try: # Mt. Wilson does not necessarily return an ISO8601 formatted # `vtime`, so we should try to parse it as a string formatted # datetime. vtime = timeutils.parse_strtime(state['vtime'], fmt="%c") entry['vtime'] = timeutils.normalize_time(vtime) except ValueError: # Mark the system as un-trusted if get invalid vtime. entry['trust_lvl'] = 'unknown' entry['vtime'] = timeutils.utcnow() self.compute_nodes[host] = entry def _update_cache(self): self._invalidate_caches() states = self.attestservice.do_attestation( list(self.compute_nodes.keys())) if states is None: return for state in states: self._update_cache_entry(state) def get_host_attestation(self, host): """Check host's trust level.""" if host not in self.compute_nodes: self._init_cache_entry(host) if not self._cache_valid(host): self._update_cache() level = self.compute_nodes.get(host).get('trust_lvl') return level class ComputeAttestation(object): def __init__(self): self.caches = ComputeAttestationCache() def is_trusted(self, host, trust): level = self.caches.get_host_attestation(host) return trust == level class TrustedFilter(filters.BaseHostFilter): """Trusted filter to support Trusted Compute Pools.""" def __init__(self): self.compute_attestation = ComputeAttestation() LOG.warning(_LW('The TrustedFilter is considered experimental ' 'by the OpenStack project because it receives much ' 'less testing than the rest of Nova. This may change ' 'in the future, but current deployers should be aware ' 'that the use of it in production right now may be ' 'risky.')) # The hosts the instances are running on doesn't change within a request run_filter_once_per_request = True def host_passes(self, host_state, spec_obj): instance_type = spec_obj.flavor extra = (instance_type.extra_specs if 'extra_specs' in instance_type else {}) trust = extra.get('trust:trusted_host') host = host_state.nodename if trust: return self.compute_attestation.is_trusted(host, trust) return True
apache-2.0
liu602348184/django
django/contrib/postgres/fields/ranges.py
172
5636
import json from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange, Range from django.contrib.postgres import forms, lookups from django.db import models from django.utils import six from .utils import AttributeSetter __all__ = [ 'RangeField', 'IntegerRangeField', 'BigIntegerRangeField', 'FloatRangeField', 'DateTimeRangeField', 'DateRangeField', ] class RangeField(models.Field): empty_strings_allowed = False def get_prep_value(self, value): if value is None: return None elif isinstance(value, Range): return value elif isinstance(value, (list, tuple)): return self.range_type(value[0], value[1]) return value def to_python(self, value): if isinstance(value, six.string_types): # Assume we're deserializing vals = json.loads(value) for end in ('lower', 'upper'): if end in vals: vals[end] = self.base_field.to_python(vals[end]) value = self.range_type(**vals) elif isinstance(value, (list, tuple)): value = self.range_type(value[0], value[1]) return value def set_attributes_from_name(self, name): super(RangeField, self).set_attributes_from_name(name) self.base_field.set_attributes_from_name(name) def value_to_string(self, obj): value = self.value_from_object(obj) if value is None: return None if value.isempty: return json.dumps({"empty": True}) base_field = self.base_field result = {"bounds": value._bounds} for end in ('lower', 'upper'): obj = AttributeSetter(base_field.attname, getattr(value, end)) result[end] = base_field.value_to_string(obj) return json.dumps(result) def formfield(self, **kwargs): kwargs.setdefault('form_class', self.form_field) return super(RangeField, self).formfield(**kwargs) class IntegerRangeField(RangeField): base_field = models.IntegerField() range_type = NumericRange form_field = forms.IntegerRangeField def db_type(self, connection): return 'int4range' class BigIntegerRangeField(RangeField): base_field = models.BigIntegerField() range_type = NumericRange form_field = forms.IntegerRangeField def db_type(self, connection): return 'int8range' class FloatRangeField(RangeField): base_field = models.FloatField() range_type = NumericRange form_field = forms.FloatRangeField def db_type(self, connection): return 'numrange' class DateTimeRangeField(RangeField): base_field = models.DateTimeField() range_type = DateTimeTZRange form_field = forms.DateTimeRangeField def db_type(self, connection): return 'tstzrange' class DateRangeField(RangeField): base_field = models.DateField() range_type = DateRange form_field = forms.DateRangeField def db_type(self, connection): return 'daterange' RangeField.register_lookup(lookups.DataContains) RangeField.register_lookup(lookups.ContainedBy) RangeField.register_lookup(lookups.Overlap) class RangeContainedBy(models.Lookup): lookup_name = 'contained_by' type_mapping = { 'integer': 'int4range', 'bigint': 'int8range', 'double precision': 'numrange', 'date': 'daterange', 'timestamp with time zone': 'tstzrange', } def as_sql(self, qn, connection): field = self.lhs.output_field if isinstance(field, models.FloatField): sql = '%s::numeric <@ %s::{}'.format(self.type_mapping[field.db_type(connection)]) else: sql = '%s <@ %s::{}'.format(self.type_mapping[field.db_type(connection)]) lhs, lhs_params = self.process_lhs(qn, connection) rhs, rhs_params = self.process_rhs(qn, connection) params = lhs_params + rhs_params return sql % (lhs, rhs), params def get_prep_lookup(self): return RangeField().get_prep_lookup(self.lookup_name, self.rhs) models.DateField.register_lookup(RangeContainedBy) models.DateTimeField.register_lookup(RangeContainedBy) models.IntegerField.register_lookup(RangeContainedBy) models.BigIntegerField.register_lookup(RangeContainedBy) models.FloatField.register_lookup(RangeContainedBy) @RangeField.register_lookup class FullyLessThan(lookups.PostgresSimpleLookup): lookup_name = 'fully_lt' operator = '<<' @RangeField.register_lookup class FullGreaterThan(lookups.PostgresSimpleLookup): lookup_name = 'fully_gt' operator = '>>' @RangeField.register_lookup class NotLessThan(lookups.PostgresSimpleLookup): lookup_name = 'not_lt' operator = '&>' @RangeField.register_lookup class NotGreaterThan(lookups.PostgresSimpleLookup): lookup_name = 'not_gt' operator = '&<' @RangeField.register_lookup class AdjacentToLookup(lookups.PostgresSimpleLookup): lookup_name = 'adjacent_to' operator = '-|-' @RangeField.register_lookup class RangeStartsWith(lookups.FunctionTransform): lookup_name = 'startswith' function = 'lower' @property def output_field(self): return self.lhs.output_field.base_field @RangeField.register_lookup class RangeEndsWith(lookups.FunctionTransform): lookup_name = 'endswith' function = 'upper' @property def output_field(self): return self.lhs.output_field.base_field @RangeField.register_lookup class IsEmpty(lookups.FunctionTransform): lookup_name = 'isempty' function = 'isempty' output_field = models.BooleanField()
bsd-3-clause
spencerlyon2/pygments
pygments/lexers/data.py
2
17895
# -*- coding: utf-8 -*- """ pygments.lexers.data ~~~~~~~~~~~~~~~~~~~~ Lexers for data file format. :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \ include, bygroups from pygments.token import Text, Comment, Keyword, Name, String, Number, \ Punctuation, Literal __all__ = ['YamlLexer', 'JsonLexer'] class YamlLexerContext(LexerContext): """Indentation context for the YAML lexer.""" def __init__(self, *args, **kwds): super(YamlLexerContext, self).__init__(*args, **kwds) self.indent_stack = [] self.indent = -1 self.next_indent = 0 self.block_scalar_indent = None class YamlLexer(ExtendedRegexLexer): """ Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization language. .. versionadded:: 0.11 """ name = 'YAML' aliases = ['yaml'] filenames = ['*.yaml', '*.yml'] mimetypes = ['text/x-yaml'] def something(token_class): """Do not produce empty tokens.""" def callback(lexer, match, context): text = match.group() if not text: return yield match.start(), token_class, text context.pos = match.end() return callback def reset_indent(token_class): """Reset the indentation levels.""" def callback(lexer, match, context): text = match.group() context.indent_stack = [] context.indent = -1 context.next_indent = 0 context.block_scalar_indent = None yield match.start(), token_class, text context.pos = match.end() return callback def save_indent(token_class, start=False): """Save a possible indentation level.""" def callback(lexer, match, context): text = match.group() extra = '' if start: context.next_indent = len(text) if context.next_indent < context.indent: while context.next_indent < context.indent: context.indent = context.indent_stack.pop() if context.next_indent > context.indent: extra = text[context.indent:] text = text[:context.indent] else: context.next_indent += len(text) if text: yield match.start(), token_class, text if extra: yield match.start()+len(text), token_class.Error, extra context.pos = match.end() return callback def set_indent(token_class, implicit=False): """Set the previously saved indentation level.""" def callback(lexer, match, context): text = match.group() if context.indent < context.next_indent: context.indent_stack.append(context.indent) context.indent = context.next_indent if not implicit: context.next_indent += len(text) yield match.start(), token_class, text context.pos = match.end() return callback def set_block_scalar_indent(token_class): """Set an explicit indentation level for a block scalar.""" def callback(lexer, match, context): text = match.group() context.block_scalar_indent = None if not text: return increment = match.group(1) if increment: current_indent = max(context.indent, 0) increment = int(increment) context.block_scalar_indent = current_indent + increment if text: yield match.start(), token_class, text context.pos = match.end() return callback def parse_block_scalar_empty_line(indent_token_class, content_token_class): """Process an empty line in a block scalar.""" def callback(lexer, match, context): text = match.group() if (context.block_scalar_indent is None or len(text) <= context.block_scalar_indent): if text: yield match.start(), indent_token_class, text else: indentation = text[:context.block_scalar_indent] content = text[context.block_scalar_indent:] yield match.start(), indent_token_class, indentation yield (match.start()+context.block_scalar_indent, content_token_class, content) context.pos = match.end() return callback def parse_block_scalar_indent(token_class): """Process indentation spaces in a block scalar.""" def callback(lexer, match, context): text = match.group() if context.block_scalar_indent is None: if len(text) <= max(context.indent, 0): context.stack.pop() context.stack.pop() return context.block_scalar_indent = len(text) else: if len(text) < context.block_scalar_indent: context.stack.pop() context.stack.pop() return if text: yield match.start(), token_class, text context.pos = match.end() return callback def parse_plain_scalar_indent(token_class): """Process indentation spaces in a plain scalar.""" def callback(lexer, match, context): text = match.group() if len(text) <= context.indent: context.stack.pop() context.stack.pop() return if text: yield match.start(), token_class, text context.pos = match.end() return callback tokens = { # the root rules 'root': [ # ignored whitespaces (r'[ ]+(?=#|$)', Text), # line breaks (r'\n+', Text), # a comment (r'#[^\n]*', Comment.Single), # the '%YAML' directive (r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'), # the %TAG directive (r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'), # document start and document end indicators (r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace), 'block-line'), # indentation spaces (r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True), ('block-line', 'indentation')), ], # trailing whitespaces after directives or a block scalar indicator 'ignored-line': [ # ignored whitespaces (r'[ ]+(?=#|$)', Text), # a comment (r'#[^\n]*', Comment.Single), # line break (r'\n', Text, '#pop:2'), ], # the %YAML directive 'yaml-directive': [ # the version number (r'([ ]+)([0-9]+\.[0-9]+)', bygroups(Text, Number), 'ignored-line'), ], # the %YAG directive 'tag-directive': [ # a tag handle and the corresponding prefix (r'([ ]+)(!|![0-9A-Za-z_-]*!)' r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)', bygroups(Text, Keyword.Type, Text, Keyword.Type), 'ignored-line'), ], # block scalar indicators and indentation spaces 'indentation': [ # trailing whitespaces are ignored (r'[ ]*$', something(Text), '#pop:2'), # whitespaces preceeding block collection indicators (r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)), # block collection indicators (r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)), # the beginning a block line (r'[ ]*', save_indent(Text), '#pop'), ], # an indented line in the block context 'block-line': [ # the line end (r'[ ]*(?=#|$)', something(Text), '#pop'), # whitespaces separating tokens (r'[ ]+', Text), # tags, anchors and aliases, include('descriptors'), # block collections and scalars include('block-nodes'), # flow collections and quoted scalars include('flow-nodes'), # a plain scalar (r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])', something(Name.Variable), 'plain-scalar-in-block-context'), ], # tags, anchors, aliases 'descriptors': [ # a full-form tag (r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type), # a tag in the form '!', '!suffix' or '!handle!suffix' (r'!(?:[0-9A-Za-z_-]+)?' r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type), # an anchor (r'&[0-9A-Za-z_-]+', Name.Label), # an alias (r'\*[0-9A-Za-z_-]+', Name.Variable), ], # block collections and scalars 'block-nodes': [ # implicit key (r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)), # literal and folded scalars (r'[|>]', Punctuation.Indicator, ('block-scalar-content', 'block-scalar-header')), ], # flow collections and quoted scalars 'flow-nodes': [ # a flow sequence (r'\[', Punctuation.Indicator, 'flow-sequence'), # a flow mapping (r'\{', Punctuation.Indicator, 'flow-mapping'), # a single-quoted scalar (r'\'', String, 'single-quoted-scalar'), # a double-quoted scalar (r'\"', String, 'double-quoted-scalar'), ], # the content of a flow collection 'flow-collection': [ # whitespaces (r'[ ]+', Text), # line breaks (r'\n+', Text), # a comment (r'#[^\n]*', Comment.Single), # simple indicators (r'[?:,]', Punctuation.Indicator), # tags, anchors and aliases include('descriptors'), # nested collections and quoted scalars include('flow-nodes'), # a plain scalar (r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])', something(Name.Variable), 'plain-scalar-in-flow-context'), ], # a flow sequence indicated by '[' and ']' 'flow-sequence': [ # include flow collection rules include('flow-collection'), # the closing indicator (r'\]', Punctuation.Indicator, '#pop'), ], # a flow mapping indicated by '{' and '}' 'flow-mapping': [ # include flow collection rules include('flow-collection'), # the closing indicator (r'\}', Punctuation.Indicator, '#pop'), ], # block scalar lines 'block-scalar-content': [ # line break (r'\n', Text), # empty line (r'^[ ]+$', parse_block_scalar_empty_line(Text, Name.Constant)), # indentation spaces (we may leave the state here) (r'^[ ]*', parse_block_scalar_indent(Text)), # line content (r'[^\n\r\f\v]+', Name.Constant), ], # the content of a literal or folded scalar 'block-scalar-header': [ # indentation indicator followed by chomping flag (r'([1-9])?[+-]?(?=[ ]|$)', set_block_scalar_indent(Punctuation.Indicator), 'ignored-line'), # chomping flag followed by indentation indicator (r'[+-]?([1-9])?(?=[ ]|$)', set_block_scalar_indent(Punctuation.Indicator), 'ignored-line'), ], # ignored and regular whitespaces in quoted scalars 'quoted-scalar-whitespaces': [ # leading and trailing whitespaces are ignored (r'^[ ]+', Text), (r'[ ]+$', Text), # line breaks are ignored (r'\n+', Text), # other whitespaces are a part of the value (r'[ ]+', Name.Variable), ], # single-quoted scalars 'single-quoted-scalar': [ # include whitespace and line break rules include('quoted-scalar-whitespaces'), # escaping of the quote character (r'\'\'', String.Escape), # regular non-whitespace characters (r'[^ \t\n\r\f\v\']+', String), # the closing quote (r'\'', String, '#pop'), ], # double-quoted scalars 'double-quoted-scalar': [ # include whitespace and line break rules include('quoted-scalar-whitespaces'), # escaping of special characters (r'\\[0abt\tn\nvfre "\\N_LP]', String), # escape codes (r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})', String.Escape), # regular non-whitespace characters (r'[^ \t\n\r\f\v\"\\]+', String), # the closing quote (r'"', String, '#pop'), ], # the beginning of a new line while scanning a plain scalar 'plain-scalar-in-block-context-new-line': [ # empty lines (r'^[ ]+$', Text), # line breaks (r'\n+', Text), # document start and document end indicators (r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'), # indentation spaces (we may leave the block line state here) (r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'), ], # a plain scalar in the block context 'plain-scalar-in-block-context': [ # the scalar ends with the ':' indicator (r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'), # the scalar ends with whitespaces followed by a comment (r'[ ]+(?=#)', Text, '#pop'), # trailing whitespaces are ignored (r'[ ]+$', Text), # line breaks are ignored (r'\n+', Text, 'plain-scalar-in-block-context-new-line'), # other whitespaces are a part of the value (r'[ ]+', Literal.Scalar.Plain), # regular non-whitespace characters (r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain), ], # a plain scalar is the flow context 'plain-scalar-in-flow-context': [ # the scalar ends with an indicator character (r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'), # the scalar ends with a comment (r'[ ]+(?=#)', Text, '#pop'), # leading and trailing whitespaces are ignored (r'^[ ]+', Text), (r'[ ]+$', Text), # line breaks are ignored (r'\n+', Text), # other whitespaces are a part of the value (r'[ ]+', Name.Variable), # regular non-whitespace characters (r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable), ], } def get_tokens_unprocessed(self, text=None, context=None): if context is None: context = YamlLexerContext(text, 0) return super(YamlLexer, self).get_tokens_unprocessed(text, context) class JsonLexer(RegexLexer): """ For JSON data structures. .. versionadded:: 1.5 """ name = 'JSON' aliases = ['json'] filenames = ['*.json'] mimetypes = ['application/json'] flags = re.DOTALL # integer part of a number int_part = r'-?(0|[1-9]\d*)' # fractional part of a number frac_part = r'\.\d+' # exponential part of a number exp_part = r'[eE](\+|-)?\d+' tokens = { 'whitespace': [ (r'\s+', Text), ], # represents a simple terminal value 'simplevalue': [ (r'(true|false|null)\b', Keyword.Constant), (('%(int_part)s(%(frac_part)s%(exp_part)s|' '%(exp_part)s|%(frac_part)s)') % vars(), Number.Float), (int_part, Number.Integer), (r'"(\\\\|\\"|[^"])*"', String.Double), ], # the right hand side of an object, after the attribute name 'objectattribute': [ include('value'), (r':', Punctuation), # comma terminates the attribute but expects more (r',', Punctuation, '#pop'), # a closing bracket terminates the entire object, so pop twice (r'}', Punctuation, ('#pop', '#pop')), ], # a json object - { attr, attr, ... } 'objectvalue': [ include('whitespace'), (r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'), (r'}', Punctuation, '#pop'), ], # json array - [ value, value, ... } 'arrayvalue': [ include('whitespace'), include('value'), (r',', Punctuation), (r']', Punctuation, '#pop'), ], # a json value - either a simple value or a complex value (object or array) 'value': [ include('whitespace'), include('simplevalue'), (r'{', Punctuation, 'objectvalue'), (r'\[', Punctuation, 'arrayvalue'), ], # the root of a json document whould be a value 'root': [ include('value'), ], }
bsd-2-clause
MQQiang/kbengine
kbe/src/lib/python/Lib/threading.py
61
48900
"""Thread module emulating a subset of Java's threading model.""" import sys as _sys import _thread try: from time import monotonic as _time except ImportError: from time import time as _time from traceback import format_exc as _format_exc from _weakrefset import WeakSet from itertools import islice as _islice try: from _collections import deque as _deque except ImportError: from collections import deque as _deque # Note regarding PEP 8 compliant names # This threading model was originally inspired by Java, and inherited # the convention of camelCase function and method names from that # language. Those original names are not in any imminent danger of # being deprecated (even for Py3k),so this module provides them as an # alias for the PEP 8 compliant names # Note that using the new PEP 8 compliant names facilitates substitution # with the multiprocessing module, which doesn't provide the old # Java inspired names. __all__ = ['active_count', 'Condition', 'current_thread', 'enumerate', 'Event', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', 'Barrier', 'Timer', 'ThreadError', 'setprofile', 'settrace', 'local', 'stack_size'] # Rename some stuff so "from threading import *" is safe _start_new_thread = _thread.start_new_thread _allocate_lock = _thread.allocate_lock _set_sentinel = _thread._set_sentinel get_ident = _thread.get_ident ThreadError = _thread.error try: _CRLock = _thread.RLock except AttributeError: _CRLock = None TIMEOUT_MAX = _thread.TIMEOUT_MAX del _thread # Support for profile and trace hooks _profile_hook = None _trace_hook = None def setprofile(func): """Set a profile function for all threads started from the threading module. The func will be passed to sys.setprofile() for each thread, before its run() method is called. """ global _profile_hook _profile_hook = func def settrace(func): """Set a trace function for all threads started from the threading module. The func will be passed to sys.settrace() for each thread, before its run() method is called. """ global _trace_hook _trace_hook = func # Synchronization classes Lock = _allocate_lock def RLock(*args, **kwargs): """Factory function that returns a new reentrant lock. A reentrant lock must be released by the thread that acquired it. Once a thread has acquired a reentrant lock, the same thread may acquire it again without blocking; the thread must release it once for each time it has acquired it. """ if _CRLock is None: return _PyRLock(*args, **kwargs) return _CRLock(*args, **kwargs) class _RLock: """This class implements reentrant lock objects. A reentrant lock must be released by the thread that acquired it. Once a thread has acquired a reentrant lock, the same thread may acquire it again without blocking; the thread must release it once for each time it has acquired it. """ def __init__(self): self._block = _allocate_lock() self._owner = None self._count = 0 def __repr__(self): owner = self._owner try: owner = _active[owner].name except KeyError: pass return "<%s owner=%r count=%d>" % ( self.__class__.__name__, owner, self._count) def acquire(self, blocking=True, timeout=-1): """Acquire a lock, blocking or non-blocking. When invoked without arguments: if this thread already owns the lock, increment the recursion level by one, and return immediately. Otherwise, if another thread owns the lock, block until the lock is unlocked. Once the lock is unlocked (not owned by any thread), then grab ownership, set the recursion level to one, and return. If more than one thread is blocked waiting until the lock is unlocked, only one at a time will be able to grab ownership of the lock. There is no return value in this case. When invoked with the blocking argument set to true, do the same thing as when called without arguments, and return true. When invoked with the blocking argument set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true. When invoked with the floating-point timeout argument set to a positive value, block for at most the number of seconds specified by timeout and as long as the lock cannot be acquired. Return true if the lock has been acquired, false if the timeout has elapsed. """ me = get_ident() if self._owner == me: self._count += 1 return 1 rc = self._block.acquire(blocking, timeout) if rc: self._owner = me self._count = 1 return rc __enter__ = acquire def release(self): """Release a lock, decrementing the recursion level. If after the decrement it is zero, reset the lock to unlocked (not owned by any thread), and if any other threads are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. If after the decrement the recursion level is still nonzero, the lock remains locked and owned by the calling thread. Only call this method when the calling thread owns the lock. A RuntimeError is raised if this method is called when the lock is unlocked. There is no return value. """ if self._owner != get_ident(): raise RuntimeError("cannot release un-acquired lock") self._count = count = self._count - 1 if not count: self._owner = None self._block.release() def __exit__(self, t, v, tb): self.release() # Internal methods used by condition variables def _acquire_restore(self, state): self._block.acquire() self._count, self._owner = state def _release_save(self): if self._count == 0: raise RuntimeError("cannot release un-acquired lock") count = self._count self._count = 0 owner = self._owner self._owner = None self._block.release() return (count, owner) def _is_owned(self): return self._owner == get_ident() _PyRLock = _RLock class Condition: """Class that implements a condition variable. A condition variable allows one or more threads to wait until they are notified by another thread. If the lock argument is given and not None, it must be a Lock or RLock object, and it is used as the underlying lock. Otherwise, a new RLock object is created and used as the underlying lock. """ def __init__(self, lock=None): if lock is None: lock = RLock() self._lock = lock # Export the lock's acquire() and release() methods self.acquire = lock.acquire self.release = lock.release # If the lock defines _release_save() and/or _acquire_restore(), # these override the default implementations (which just call # release() and acquire() on the lock). Ditto for _is_owned(). try: self._release_save = lock._release_save except AttributeError: pass try: self._acquire_restore = lock._acquire_restore except AttributeError: pass try: self._is_owned = lock._is_owned except AttributeError: pass self._waiters = _deque() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def __repr__(self): return "<Condition(%s, %d)>" % (self._lock, len(self._waiters)) def _release_save(self): self._lock.release() # No state to save def _acquire_restore(self, x): self._lock.acquire() # Ignore saved state def _is_owned(self): # Return True if lock is owned by current_thread. # This method is called only if __lock doesn't have _is_owned(). if self._lock.acquire(0): self._lock.release() return False else: return True def wait(self, timeout=None): """Wait until notified or until a timeout occurs. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method releases the underlying lock, and then blocks until it is awakened by a notify() or notify_all() call for the same condition variable in another thread, or until the optional timeout occurs. Once awakened or timed out, it re-acquires the lock and returns. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). When the underlying lock is an RLock, it is not released using its release() method, since this may not actually unlock the lock when it was acquired multiple times recursively. Instead, an internal interface of the RLock class is used, which really unlocks it even when it has been recursively acquired several times. Another internal interface is then used to restore the recursion level when the lock is reacquired. """ if not self._is_owned(): raise RuntimeError("cannot wait on un-acquired lock") waiter = _allocate_lock() waiter.acquire() self._waiters.append(waiter) saved_state = self._release_save() gotit = False try: # restore state no matter what (e.g., KeyboardInterrupt) if timeout is None: waiter.acquire() gotit = True else: if timeout > 0: gotit = waiter.acquire(True, timeout) else: gotit = waiter.acquire(False) return gotit finally: self._acquire_restore(saved_state) if not gotit: try: self._waiters.remove(waiter) except ValueError: pass def wait_for(self, predicate, timeout=None): """Wait until a condition evaluates to True. predicate should be a callable which result will be interpreted as a boolean value. A timeout may be provided giving the maximum time to wait. """ endtime = None waittime = timeout result = predicate() while not result: if waittime is not None: if endtime is None: endtime = _time() + waittime else: waittime = endtime - _time() if waittime <= 0: break self.wait(waittime) result = predicate() return result def notify(self, n=1): """Wake up one or more threads waiting on this condition, if any. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method wakes up at most n of the threads waiting for the condition variable; it is a no-op if no threads are waiting. """ if not self._is_owned(): raise RuntimeError("cannot notify on un-acquired lock") all_waiters = self._waiters waiters_to_notify = _deque(_islice(all_waiters, n)) if not waiters_to_notify: return for waiter in waiters_to_notify: waiter.release() try: all_waiters.remove(waiter) except ValueError: pass def notify_all(self): """Wake up all threads waiting on this condition. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. """ self.notify(len(self._waiters)) notifyAll = notify_all class Semaphore: """This class implements semaphore objects. Semaphores manage a counter representing the number of release() calls minus the number of acquire() calls, plus an initial value. The acquire() method blocks if necessary until it can return without making the counter negative. If not given, value defaults to 1. """ # After Tim Peters' semaphore class, but not quite the same (no maximum) def __init__(self, value=1): if value < 0: raise ValueError("semaphore initial value must be >= 0") self._cond = Condition(Lock()) self._value = value def acquire(self, blocking=True, timeout=None): """Acquire a semaphore, decrementing the internal counter by one. When invoked without arguments: if the internal counter is larger than zero on entry, decrement it by one and return immediately. If it is zero on entry, block, waiting until some other thread has called release() to make it larger than zero. This is done with proper interlocking so that if multiple acquire() calls are blocked, release() will wake exactly one of them up. The implementation may pick one at random, so the order in which blocked threads are awakened should not be relied on. There is no return value in this case. When invoked with blocking set to true, do the same thing as when called without arguments, and return true. When invoked with blocking set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true. When invoked with a timeout other than None, it will block for at most timeout seconds. If acquire does not complete successfully in that interval, return false. Return true otherwise. """ if not blocking and timeout is not None: raise ValueError("can't specify timeout for non-blocking acquire") rc = False endtime = None with self._cond: while self._value == 0: if not blocking: break if timeout is not None: if endtime is None: endtime = _time() + timeout else: timeout = endtime - _time() if timeout <= 0: break self._cond.wait(timeout) else: self._value -= 1 rc = True return rc __enter__ = acquire def release(self): """Release a semaphore, incrementing the internal counter by one. When the counter is zero on entry and another thread is waiting for it to become larger than zero again, wake up that thread. """ with self._cond: self._value += 1 self._cond.notify() def __exit__(self, t, v, tb): self.release() class BoundedSemaphore(Semaphore): """Implements a bounded semaphore. A bounded semaphore checks to make sure its current value doesn't exceed its initial value. If it does, ValueError is raised. In most situations semaphores are used to guard resources with limited capacity. If the semaphore is released too many times it's a sign of a bug. If not given, value defaults to 1. Like regular semaphores, bounded semaphores manage a counter representing the number of release() calls minus the number of acquire() calls, plus an initial value. The acquire() method blocks if necessary until it can return without making the counter negative. If not given, value defaults to 1. """ def __init__(self, value=1): Semaphore.__init__(self, value) self._initial_value = value def release(self): """Release a semaphore, incrementing the internal counter by one. When the counter is zero on entry and another thread is waiting for it to become larger than zero again, wake up that thread. If the number of releases exceeds the number of acquires, raise a ValueError. """ with self._cond: if self._value >= self._initial_value: raise ValueError("Semaphore released too many times") self._value += 1 self._cond.notify() class Event: """Class implementing event objects. Events manage a flag that can be set to true with the set() method and reset to false with the clear() method. The wait() method blocks until the flag is true. The flag is initially false. """ # After Tim Peters' event class (without is_posted()) def __init__(self): self._cond = Condition(Lock()) self._flag = False def _reset_internal_locks(self): # private! called by Thread._reset_internal_locks by _after_fork() self._cond.__init__() def is_set(self): """Return true if and only if the internal flag is true.""" return self._flag isSet = is_set def set(self): """Set the internal flag to true. All threads waiting for it to become true are awakened. Threads that call wait() once the flag is true will not block at all. """ self._cond.acquire() try: self._flag = True self._cond.notify_all() finally: self._cond.release() def clear(self): """Reset the internal flag to false. Subsequently, threads calling wait() will block until set() is called to set the internal flag to true again. """ self._cond.acquire() try: self._flag = False finally: self._cond.release() def wait(self, timeout=None): """Block until the internal flag is true. If the internal flag is true on entry, return immediately. Otherwise, block until another thread calls set() to set the flag to true, or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). This method returns the internal flag on exit, so it will always return True except if a timeout is given and the operation times out. """ self._cond.acquire() try: signaled = self._flag if not signaled: signaled = self._cond.wait(timeout) return signaled finally: self._cond.release() # A barrier class. Inspired in part by the pthread_barrier_* api and # the CyclicBarrier class from Java. See # http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and # http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/ # CyclicBarrier.html # for information. # We maintain two main states, 'filling' and 'draining' enabling the barrier # to be cyclic. Threads are not allowed into it until it has fully drained # since the previous cycle. In addition, a 'resetting' state exists which is # similar to 'draining' except that threads leave with a BrokenBarrierError, # and a 'broken' state in which all threads get the exception. class Barrier: """Implements a Barrier. Useful for synchronizing a fixed number of threads at known synchronization points. Threads block on 'wait()' and are simultaneously once they have all made that call. """ def __init__(self, parties, action=None, timeout=None): """Create a barrier, initialised to 'parties' threads. 'action' is a callable which, when supplied, will be called by one of the threads after they have all entered the barrier and just prior to releasing them all. If a 'timeout' is provided, it is uses as the default for all subsequent 'wait()' calls. """ self._cond = Condition(Lock()) self._action = action self._timeout = timeout self._parties = parties self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken self._count = 0 def wait(self, timeout=None): """Wait for the barrier. When the specified number of threads have started waiting, they are all simultaneously awoken. If an 'action' was provided for the barrier, one of the threads will have executed that callback prior to returning. Returns an individual index number from 0 to 'parties-1'. """ if timeout is None: timeout = self._timeout with self._cond: self._enter() # Block while the barrier drains. index = self._count self._count += 1 try: if index + 1 == self._parties: # We release the barrier self._release() else: # We wait until someone releases us self._wait(timeout) return index finally: self._count -= 1 # Wake up any threads waiting for barrier to drain. self._exit() # Block until the barrier is ready for us, or raise an exception # if it is broken. def _enter(self): while self._state in (-1, 1): # It is draining or resetting, wait until done self._cond.wait() #see if the barrier is in a broken state if self._state < 0: raise BrokenBarrierError assert self._state == 0 # Optionally run the 'action' and release the threads waiting # in the barrier. def _release(self): try: if self._action: self._action() # enter draining state self._state = 1 self._cond.notify_all() except: #an exception during the _action handler. Break and reraise self._break() raise # Wait in the barrier until we are relased. Raise an exception # if the barrier is reset or broken. def _wait(self, timeout): if not self._cond.wait_for(lambda : self._state != 0, timeout): #timed out. Break the barrier self._break() raise BrokenBarrierError if self._state < 0: raise BrokenBarrierError assert self._state == 1 # If we are the last thread to exit the barrier, signal any threads # waiting for the barrier to drain. def _exit(self): if self._count == 0: if self._state in (-1, 1): #resetting or draining self._state = 0 self._cond.notify_all() def reset(self): """Reset the barrier to the initial state. Any threads currently waiting will get the BrokenBarrier exception raised. """ with self._cond: if self._count > 0: if self._state == 0: #reset the barrier, waking up threads self._state = -1 elif self._state == -2: #was broken, set it to reset state #which clears when the last thread exits self._state = -1 else: self._state = 0 self._cond.notify_all() def abort(self): """Place the barrier into a 'broken' state. Useful in case of error. Any currently waiting threads and threads attempting to 'wait()' will have BrokenBarrierError raised. """ with self._cond: self._break() def _break(self): # An internal error was detected. The barrier is set to # a broken state all parties awakened. self._state = -2 self._cond.notify_all() @property def parties(self): """Return the number of threads required to trip the barrier.""" return self._parties @property def n_waiting(self): """Return the number of threads currently waiting at the barrier.""" # We don't need synchronization here since this is an ephemeral result # anyway. It returns the correct value in the steady state. if self._state == 0: return self._count return 0 @property def broken(self): """Return True if the barrier is in a broken state.""" return self._state == -2 # exception raised by the Barrier class class BrokenBarrierError(RuntimeError): pass # Helper to generate new thread names _counter = 0 def _newname(template="Thread-%d"): global _counter _counter += 1 return template % _counter # Active thread administration _active_limbo_lock = _allocate_lock() _active = {} # maps thread id to Thread object _limbo = {} _dangling = WeakSet() # Main class for threads class Thread: """A class that represents a thread of control. This class can be safely subclassed in a limited fashion. There are two ways to specify the activity: by passing a callable object to the constructor, or by overriding the run() method in a subclass. """ __initialized = False # Need to store a reference to sys.exc_info for printing # out exceptions when a thread tries to use a global var. during interp. # shutdown and thus raises an exception about trying to perform some # operation on/with a NoneType __exc_info = _sys.exc_info # Keep sys.exc_clear too to clear the exception just before # allowing .join() to return. #XXX __exc_clear = _sys.exc_clear def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None): """This constructor should always be called with keyword arguments. Arguments are: *group* should be None; reserved for future extension when a ThreadGroup class is implemented. *target* is the callable object to be invoked by the run() method. Defaults to None, meaning nothing is called. *name* is the thread name. By default, a unique name is constructed of the form "Thread-N" where N is a small decimal number. *args* is the argument tuple for the target invocation. Defaults to (). *kwargs* is a dictionary of keyword arguments for the target invocation. Defaults to {}. If a subclass overrides the constructor, it must make sure to invoke the base class constructor (Thread.__init__()) before doing anything else to the thread. """ assert group is None, "group argument must be None for now" if kwargs is None: kwargs = {} self._target = target self._name = str(name or _newname()) self._args = args self._kwargs = kwargs if daemon is not None: self._daemonic = daemon else: self._daemonic = current_thread().daemon self._ident = None self._tstate_lock = None self._started = Event() self._is_stopped = False self._initialized = True # sys.stderr is not stored in the class like # sys.exc_info since it can be changed between instances self._stderr = _sys.stderr # For debugging and _after_fork() _dangling.add(self) def _reset_internal_locks(self, is_alive): # private! Called by _after_fork() to reset our internal locks as # they may be in an invalid state leading to a deadlock or crash. self._started._reset_internal_locks() if is_alive: self._set_tstate_lock() else: # The thread isn't alive after fork: it doesn't have a tstate # anymore. self._is_stopped = True self._tstate_lock = None def __repr__(self): assert self._initialized, "Thread.__init__() was not called" status = "initial" if self._started.is_set(): status = "started" self.is_alive() # easy way to get ._is_stopped set when appropriate if self._is_stopped: status = "stopped" if self._daemonic: status += " daemon" if self._ident is not None: status += " %s" % self._ident return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status) def start(self): """Start the thread's activity. It must be called at most once per thread object. It arranges for the object's run() method to be invoked in a separate thread of control. This method will raise a RuntimeError if called more than once on the same thread object. """ if not self._initialized: raise RuntimeError("thread.__init__() not called") if self._started.is_set(): raise RuntimeError("threads can only be started once") with _active_limbo_lock: _limbo[self] = self try: _start_new_thread(self._bootstrap, ()) except Exception: with _active_limbo_lock: del _limbo[self] raise self._started.wait() def run(self): """Method representing the thread's activity. You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively. """ try: if self._target: self._target(*self._args, **self._kwargs) finally: # Avoid a refcycle if the thread is running a function with # an argument that has a member that points to the thread. del self._target, self._args, self._kwargs def _bootstrap(self): # Wrapper around the real bootstrap code that ignores # exceptions during interpreter cleanup. Those typically # happen when a daemon thread wakes up at an unfortunate # moment, finds the world around it destroyed, and raises some # random exception *** while trying to report the exception in # _bootstrap_inner() below ***. Those random exceptions # don't help anybody, and they confuse users, so we suppress # them. We suppress them only when it appears that the world # indeed has already been destroyed, so that exceptions in # _bootstrap_inner() during normal business hours are properly # reported. Also, we only suppress them for daemonic threads; # if a non-daemonic encounters this, something else is wrong. try: self._bootstrap_inner() except: if self._daemonic and _sys is None: return raise def _set_ident(self): self._ident = get_ident() def _set_tstate_lock(self): """ Set a lock object which will be released by the interpreter when the underlying thread state (see pystate.h) gets deleted. """ self._tstate_lock = _set_sentinel() self._tstate_lock.acquire() def _bootstrap_inner(self): try: self._set_ident() self._set_tstate_lock() self._started.set() with _active_limbo_lock: _active[self._ident] = self del _limbo[self] if _trace_hook: _sys.settrace(_trace_hook) if _profile_hook: _sys.setprofile(_profile_hook) try: self.run() except SystemExit: pass except: # If sys.stderr is no more (most likely from interpreter # shutdown) use self._stderr. Otherwise still use sys (as in # _sys) in case sys.stderr was redefined since the creation of # self. if _sys: _sys.stderr.write("Exception in thread %s:\n%s\n" % (self.name, _format_exc())) else: # Do the best job possible w/o a huge amt. of code to # approximate a traceback (code ideas from # Lib/traceback.py) exc_type, exc_value, exc_tb = self._exc_info() try: print(( "Exception in thread " + self.name + " (most likely raised during interpreter shutdown):"), file=self._stderr) print(( "Traceback (most recent call last):"), file=self._stderr) while exc_tb: print(( ' File "%s", line %s, in %s' % (exc_tb.tb_frame.f_code.co_filename, exc_tb.tb_lineno, exc_tb.tb_frame.f_code.co_name)), file=self._stderr) exc_tb = exc_tb.tb_next print(("%s: %s" % (exc_type, exc_value)), file=self._stderr) # Make sure that exc_tb gets deleted since it is a memory # hog; deleting everything else is just for thoroughness finally: del exc_type, exc_value, exc_tb finally: # Prevent a race in # test_threading.test_no_refcycle_through_target when # the exception keeps the target alive past when we # assert that it's dead. #XXX self.__exc_clear() pass finally: with _active_limbo_lock: try: # We don't call self._delete() because it also # grabs _active_limbo_lock. del _active[get_ident()] except: pass def _stop(self): # After calling ._stop(), .is_alive() returns False and .join() returns # immediately. ._tstate_lock must be released before calling ._stop(). # # Normal case: C code at the end of the thread's life # (release_sentinel in _threadmodule.c) releases ._tstate_lock, and # that's detected by our ._wait_for_tstate_lock(), called by .join() # and .is_alive(). Any number of threads _may_ call ._stop() # simultaneously (for example, if multiple threads are blocked in # .join() calls), and they're not serialized. That's harmless - # they'll just make redundant rebindings of ._is_stopped and # ._tstate_lock. Obscure: we rebind ._tstate_lock last so that the # "assert self._is_stopped" in ._wait_for_tstate_lock() always works # (the assert is executed only if ._tstate_lock is None). # # Special case: _main_thread releases ._tstate_lock via this # module's _shutdown() function. lock = self._tstate_lock if lock is not None: assert not lock.locked() self._is_stopped = True self._tstate_lock = None def _delete(self): "Remove current thread from the dict of currently running threads." # Notes about running with _dummy_thread: # # Must take care to not raise an exception if _dummy_thread is being # used (and thus this module is being used as an instance of # dummy_threading). _dummy_thread.get_ident() always returns -1 since # there is only one thread if _dummy_thread is being used. Thus # len(_active) is always <= 1 here, and any Thread instance created # overwrites the (if any) thread currently registered in _active. # # An instance of _MainThread is always created by 'threading'. This # gets overwritten the instant an instance of Thread is created; both # threads return -1 from _dummy_thread.get_ident() and thus have the # same key in the dict. So when the _MainThread instance created by # 'threading' tries to clean itself up when atexit calls this method # it gets a KeyError if another Thread instance was created. # # This all means that KeyError from trying to delete something from # _active if dummy_threading is being used is a red herring. But # since it isn't if dummy_threading is *not* being used then don't # hide the exception. try: with _active_limbo_lock: del _active[get_ident()] # There must not be any python code between the previous line # and after the lock is released. Otherwise a tracing function # could try to acquire the lock again in the same thread, (in # current_thread()), and would block. except KeyError: if 'dummy_threading' not in _sys.modules: raise def join(self, timeout=None): """Wait until the thread terminates. This blocks the calling thread until the thread whose join() method is called terminates -- either normally or through an unhandled exception or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). As join() always returns None, you must call isAlive() after join() to decide whether a timeout happened -- if the thread is still alive, the join() call timed out. When the timeout argument is not present or None, the operation will block until the thread terminates. A thread can be join()ed many times. join() raises a RuntimeError if an attempt is made to join the current thread as that would cause a deadlock. It is also an error to join() a thread before it has been started and attempts to do so raises the same exception. """ if not self._initialized: raise RuntimeError("Thread.__init__() not called") if not self._started.is_set(): raise RuntimeError("cannot join thread before it is started") if self is current_thread(): raise RuntimeError("cannot join current thread") if timeout is None: self._wait_for_tstate_lock() else: # the behavior of a negative timeout isn't documented, but # historically .join(timeout=x) for x<0 has acted as if timeout=0 self._wait_for_tstate_lock(timeout=max(timeout, 0)) def _wait_for_tstate_lock(self, block=True, timeout=-1): # Issue #18808: wait for the thread state to be gone. # At the end of the thread's life, after all knowledge of the thread # is removed from C data structures, C code releases our _tstate_lock. # This method passes its arguments to _tstate_lock.aquire(). # If the lock is acquired, the C code is done, and self._stop() is # called. That sets ._is_stopped to True, and ._tstate_lock to None. lock = self._tstate_lock if lock is None: # already determined that the C code is done assert self._is_stopped elif lock.acquire(block, timeout): lock.release() self._stop() @property def name(self): """A string used for identification purposes only. It has no semantics. Multiple threads may be given the same name. The initial name is set by the constructor. """ assert self._initialized, "Thread.__init__() not called" return self._name @name.setter def name(self, name): assert self._initialized, "Thread.__init__() not called" self._name = str(name) @property def ident(self): """Thread identifier of this thread or None if it has not been started. This is a nonzero integer. See the thread.get_ident() function. Thread identifiers may be recycled when a thread exits and another thread is created. The identifier is available even after the thread has exited. """ assert self._initialized, "Thread.__init__() not called" return self._ident def is_alive(self): """Return whether the thread is alive. This method returns True just before the run() method starts until just after the run() method terminates. The module function enumerate() returns a list of all alive threads. """ assert self._initialized, "Thread.__init__() not called" if self._is_stopped or not self._started.is_set(): return False self._wait_for_tstate_lock(False) return not self._is_stopped isAlive = is_alive @property def daemon(self): """A boolean value indicating whether this thread is a daemon thread. This must be set before start() is called, otherwise RuntimeError is raised. Its initial value is inherited from the creating thread; the main thread is not a daemon thread and therefore all threads created in the main thread default to daemon = False. The entire Python program exits when no alive non-daemon threads are left. """ assert self._initialized, "Thread.__init__() not called" return self._daemonic @daemon.setter def daemon(self, daemonic): if not self._initialized: raise RuntimeError("Thread.__init__() not called") if self._started.is_set(): raise RuntimeError("cannot set daemon status of active thread") self._daemonic = daemonic def isDaemon(self): return self.daemon def setDaemon(self, daemonic): self.daemon = daemonic def getName(self): return self.name def setName(self, name): self.name = name # The timer class was contributed by Itamar Shtull-Trauring class Timer(Thread): """Call a function after a specified number of seconds: t = Timer(30.0, f, args=None, kwargs=None) t.start() t.cancel() # stop the timer's action if it's still waiting """ def __init__(self, interval, function, args=None, kwargs=None): Thread.__init__(self) self.interval = interval self.function = function self.args = args if args is not None else [] self.kwargs = kwargs if kwargs is not None else {} self.finished = Event() def cancel(self): """Stop the timer if it hasn't finished yet.""" self.finished.set() def run(self): self.finished.wait(self.interval) if not self.finished.is_set(): self.function(*self.args, **self.kwargs) self.finished.set() # Special thread class to represent the main thread # This is garbage collected through an exit handler class _MainThread(Thread): def __init__(self): Thread.__init__(self, name="MainThread", daemon=False) self._set_tstate_lock() self._started.set() self._set_ident() with _active_limbo_lock: _active[self._ident] = self # Dummy thread class to represent threads not started here. # These aren't garbage collected when they die, nor can they be waited for. # If they invoke anything in threading.py that calls current_thread(), they # leave an entry in the _active dict forever after. # Their purpose is to return *something* from current_thread(). # They are marked as daemon threads so we won't wait for them # when we exit (conform previous semantics). class _DummyThread(Thread): def __init__(self): Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True) self._started.set() self._set_ident() with _active_limbo_lock: _active[self._ident] = self def _stop(self): pass def join(self, timeout=None): assert False, "cannot join a dummy thread" # Global API functions def current_thread(): """Return the current Thread object, corresponding to the caller's thread of control. If the caller's thread of control was not created through the threading module, a dummy thread object with limited functionality is returned. """ try: return _active[get_ident()] except KeyError: return _DummyThread() currentThread = current_thread def active_count(): """Return the number of Thread objects currently alive. The returned count is equal to the length of the list returned by enumerate(). """ with _active_limbo_lock: return len(_active) + len(_limbo) activeCount = active_count def _enumerate(): # Same as enumerate(), but without the lock. Internal use only. return list(_active.values()) + list(_limbo.values()) def enumerate(): """Return a list of all Thread objects currently alive. The list includes daemonic threads, dummy thread objects created by current_thread(), and the main thread. It excludes terminated threads and threads that have not yet been started. """ with _active_limbo_lock: return list(_active.values()) + list(_limbo.values()) from _thread import stack_size # Create the main thread object, # and make it available for the interpreter # (Py_Main) as threading._shutdown. _main_thread = _MainThread() def _shutdown(): # Obscure: other threads may be waiting to join _main_thread. That's # dubious, but some code does it. We can't wait for C code to release # the main thread's tstate_lock - that won't happen until the interpreter # is nearly dead. So we release it here. Note that just calling _stop() # isn't enough: other threads may already be waiting on _tstate_lock. tlock = _main_thread._tstate_lock # The main thread isn't finished yet, so its thread state lock can't have # been released. assert tlock is not None assert tlock.locked() tlock.release() _main_thread._stop() t = _pickSomeNonDaemonThread() while t: t.join() t = _pickSomeNonDaemonThread() _main_thread._delete() def _pickSomeNonDaemonThread(): for t in enumerate(): if not t.daemon and t.is_alive(): return t return None def main_thread(): """Return the main thread object. In normal conditions, the main thread is the thread from which the Python interpreter was started. """ return _main_thread # get thread-local implementation, either from the thread # module, or from the python fallback try: from _thread import _local as local except ImportError: from _threading_local import local def _after_fork(): # This function is called by Python/ceval.c:PyEval_ReInitThreads which # is called from PyOS_AfterFork. Here we cleanup threading module state # that should not exist after a fork. # Reset _active_limbo_lock, in case we forked while the lock was held # by another (non-forked) thread. http://bugs.python.org/issue874900 global _active_limbo_lock, _main_thread _active_limbo_lock = _allocate_lock() # fork() only copied the current thread; clear references to others. new_active = {} current = current_thread() _main_thread = current with _active_limbo_lock: # Dangling thread instances must still have their locks reset, # because someone may join() them. threads = set(_enumerate()) threads.update(_dangling) for thread in threads: # Any lock/condition variable may be currently locked or in an # invalid state, so we reinitialize them. if thread is current: # There is only one active thread. We reset the ident to # its new value since it can have changed. thread._reset_internal_locks(True) ident = get_ident() thread._ident = ident new_active[ident] = thread else: # All the others are already stopped. thread._reset_internal_locks(False) thread._stop() _limbo.clear() _active.clear() _active.update(new_active) assert len(_active) == 1
lgpl-3.0
zzragida/PythonExamples
MemberShip/deploy/qa-taiwan/web/db/__init__.py
4
1563
# -*- coding:utf-8 -*- from SQLRelay import PySQLRClient from SQLRelay import PySQLRDB from config import SQLRELAYS INSTANCES = [] for sqlrelay in SQLRELAYS: INSTANCES.append([0, sqlrelay]) def sqlrelay_cursor(): ''' Connect sqlrelay rdb ''' info = sorted(INSTANCES, key=lambda x: x[0])[0] try: con = PySQLRDB.connect( info[1]['host'], info[1]['port'], '', info[1]['user'], info[1]['pass'], 0, 1) cur = con.cursor() except PySQLRDB.DatabaseError, e: raise info[0] += 1 return con, cur def sqlrelay_close(cur, con): ''' Close sqlrelay rdb ''' if cur: cur.close() del cur if con: con.close() del con import gc; gc.collect() def sqlrelay_client_cursor(debug=False): ''' Connect sqlrelay client ''' info = sorted(INSTANCES, key=lambda x: x[0])[0] try: con = PySQLRClient.sqlrconnection( info[1]['host'], info[1]['port'], '', info[1]['user'], info[1]['pass'], 0, 1) cur = PySQLRClient.sqlrcursor(con) if debug: con.debugOn() except Exception, e: raise info[0] += 1 return con, cur def sqlrelay_client_close(cur, con): ''' Close sqlrelay client ''' if cur: del cur if con: con.debugOff() con.endSession() del con import gc; gc.collect()
mit
algorythmic/bash-completion
test/t/unit/test_unit_count_args.py
2
2035
import pytest from conftest import TestUnitBase, assert_bash_exec @pytest.mark.bashcomp( cmd=None, ignore_env=r"^[+-](args|COMP_(WORDS|CWORD|LINE|POINT))=" ) class TestUnitCountArgs(TestUnitBase): def _test(self, *args, **kwargs): return self._test_unit("_count_args %s; echo $args", *args, **kwargs) def test_1(self, bash): assert_bash_exec(bash, "COMP_CWORD= _count_args >/dev/null") def test_2(self, bash): """a b| should set args to 1""" output = self._test(bash, "(a b)", 1, "a b", 3) assert output == "1" def test_3(self, bash): """a b|c should set args to 1""" output = self._test(bash, "(a bc)", 1, "a bc", 3) assert output == "1" def test_4(self, bash): """a b c| should set args to 2""" output = self._test(bash, "(a b c)", 2, "a b c", 4) assert output == "2" def test_5(self, bash): """a b| c should set args to 1""" output = self._test(bash, "(a b c)", 1, "a b c", 3) assert output == "1" def test_6(self, bash): """a b -c| d should set args to 2""" output = self._test(bash, "(a b -c d)", 2, "a b -c d", 6) assert output == "2" def test_7(self, bash): """a b -c d e| with -c arg excluded should set args to 2""" output = self._test( bash, "(a b -c d e)", 4, "a b -c d e", 10, arg='"" "@(-c|--foo)"' ) assert output == "2" def test_8(self, bash): """a -b -c d e| with -c arg excluded and -b included should set args to 1""" output = self._test( bash, "(a -b -c d e)", 4, "a -b -c d e", 11, arg='"" "@(-c|--foo)" "-[b]"', ) assert output == "2" def test_9(self, bash): """a -b -c d e| with -b included should set args to 3""" output = self._test( bash, "(a -b -c d e)", 4, "a -b -c d e", 11, arg='"" "" "-b"' ) assert output == "3"
gpl-2.0
gioman/QGIS
python/plugins/processing/gui/MultipleInputDialog.py
2
4243
# -*- coding: utf-8 -*- """ *************************************************************************** MultipleInputDialog.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from builtins import range from builtins import basestring __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.PyQt import uic from qgis.PyQt.QtCore import Qt from qgis.PyQt.QtWidgets import QDialog, QAbstractItemView, QPushButton, QDialogButtonBox from qgis.PyQt.QtGui import QStandardItemModel, QStandardItem pluginPath = os.path.split(os.path.dirname(__file__))[0] WIDGET, BASE = uic.loadUiType( os.path.join(pluginPath, 'ui', 'DlgMultipleSelection.ui')) class MultipleInputDialog(BASE, WIDGET): def __init__(self, options, selectedoptions=None): super(MultipleInputDialog, self).__init__(None) self.setupUi(self) self.lstLayers.setSelectionMode(QAbstractItemView.NoSelection) self.options = [] for i, option in enumerate(options): if option is None or isinstance(option, basestring): self.options.append((i, option)) else: self.options.append((option[0], option[1])) self.selectedoptions = selectedoptions or [] # Additional buttons self.btnSelectAll = QPushButton(self.tr('Select all')) self.buttonBox.addButton(self.btnSelectAll, QDialogButtonBox.ActionRole) self.btnClearSelection = QPushButton(self.tr('Clear selection')) self.buttonBox.addButton(self.btnClearSelection, QDialogButtonBox.ActionRole) self.btnToggleSelection = QPushButton(self.tr('Toggle selection')) self.buttonBox.addButton(self.btnToggleSelection, QDialogButtonBox.ActionRole) self.btnSelectAll.clicked.connect(lambda: self.selectAll(True)) self.btnClearSelection.clicked.connect(lambda: self.selectAll(False)) self.btnToggleSelection.clicked.connect(self.toggleSelection) self.populateList() def populateList(self): model = QStandardItemModel() for value, text in self.options: item = QStandardItem(text) item.setData(value, Qt.UserRole) item.setCheckState(Qt.Checked if value in self.selectedoptions else Qt.Unchecked) item.setCheckable(True) model.appendRow(item) self.lstLayers.setModel(model) def accept(self): self.selectedoptions = [] model = self.lstLayers.model() for i in range(model.rowCount()): item = model.item(i) if item.checkState() == Qt.Checked: self.selectedoptions.append(item.data(Qt.UserRole)) QDialog.accept(self) def reject(self): self.selectedoptions = None QDialog.reject(self) def selectAll(self, value): model = self.lstLayers.model() for i in range(model.rowCount()): item = model.item(i) item.setCheckState(Qt.Checked if value else Qt.Unchecked) def toggleSelection(self): model = self.lstLayers.model() for i in range(model.rowCount()): item = model.item(i) checked = item.checkState() == Qt.Checked item.setCheckState(Qt.Unchecked if checked else Qt.Checked)
gpl-2.0
en0/Supybot_sasl
plugins/String/config.py
8
2799
### # Copyright (c) 2003-2005, Jeremiah Fincher # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### import supybot.conf as conf import supybot.registry as registry def configure(advanced): # This will be called by supybot to configure this module. advanced is # a bool that specifies whether the user identified himself as an advanced # user or not. You should effect your configuration by manipulating the # registry as appropriate. from supybot.questions import expect, anything, something, yn conf.registerPlugin('String', True) String = conf.registerPlugin('String') conf.registerGroup(String, 'levenshtein') conf.registerGlobalValue(String.levenshtein, 'max', registry.PositiveInteger(256, """Determines the maximum size of a string given to the levenshtein command. The levenshtein command uses an O(m*n) algorithm, which means that with strings of length 256, it can take 1.5 seconds to finish; with strings of length 384, though, it can take 4 seconds to finish, and with strings of much larger lengths, it takes more and more time. Using nested commands, strings can get quite large, hence this variable, to limit the size of arguments passed to the levenshtein command.""")) # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
bsd-3-clause
Xeralux/tensorflow
tensorflow/python/keras/_impl/keras/engine/training.py
1
72917
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Training-related part of the Keras engine. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.keras._impl.keras import backend as K from tensorflow.python.keras._impl.keras import losses from tensorflow.python.keras._impl.keras import metrics as metrics_module from tensorflow.python.keras._impl.keras import optimizers from tensorflow.python.keras._impl.keras.engine import training_arrays from tensorflow.python.keras._impl.keras.engine import training_eager from tensorflow.python.keras._impl.keras.engine import training_generator from tensorflow.python.keras._impl.keras.engine import training_utils from tensorflow.python.keras._impl.keras.engine.base_layer import Layer from tensorflow.python.keras._impl.keras.engine.network import Network from tensorflow.python.keras._impl.keras.utils.generic_utils import slice_arrays from tensorflow.python.layers.base import _DeferredTensor from tensorflow.python.ops import array_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import optimizer as tf_optimizer_module from tensorflow.python.util.tf_export import tf_export @tf_export('keras.models.Model', 'keras.Model') class Model(Network): """`Model` groups layers into an object with training and inference features. There are two ways to instantiate a `Model`: 1 - With the "functional API", where you start from `Input`, you chain layer calls to specify the model's forward pass, and finally you create your model from inputs and outputs: ```python import tensorflow as tf inputs = tf.keras.Input(shape=(3,)) x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs) outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) ``` 2 - By subclassing the `Model` class: in that case, you should define your layers in `__init__` and you should implement the model's forward pass in `call`. ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) def call(self, inputs): x = self.dense1(inputs) return self.dense2(x) model = MyModel() ``` If you subclass `Model`, you can optionally have a `training` argument (boolean) in `call`, which you can use to specify a different behavior in training and inference: ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) self.dropout = tf.keras.layers.Dropout(0.5) def call(self, inputs, training=False): x = self.dense1(inputs) if training: x = self.dropout(x, training=training) return self.dense2(x) model = MyModel() ``` """ def compile(self, optimizer, loss=None, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None, **kwargs): """Configures the model for training. Arguments: optimizer: String (name of optimizer) or optimizer instance. See [optimizers](/optimizers). loss: String (name of objective function) or objective function. See [losses](/losses). If the model has multiple outputs, you can use a different loss on each output by passing a dictionary or a list of losses. The loss value that will be minimized by the model will then be the sum of all individual losses. metrics: List of metrics to be evaluated by the model during training and testing. Typically you will use `metrics=['accuracy']`. To specify different metrics for different outputs of a multi-output model, you could also pass a dictionary, such as `metrics={'output_a': 'accuracy'}`. loss_weights: Optional list or dictionary specifying scalar coefficients (Python floats) to weight the loss contributions of different model outputs. The loss value that will be minimized by the model will then be the *weighted sum* of all individual losses, weighted by the `loss_weights` coefficients. If a list, it is expected to have a 1:1 mapping to the model's outputs. If a tensor, it is expected to map output names (strings) to scalar coefficients. sample_weight_mode: If you need to do timestep-wise sample weighting (2D weights), set this to `"temporal"`. `None` defaults to sample-wise weights (1D). If the model has multiple outputs, you can use a different `sample_weight_mode` on each output by passing a dictionary or a list of modes. weighted_metrics: List of metrics to be evaluated and weighted by sample_weight or class_weight during training and testing. target_tensors: By default, Keras will create placeholders for the model's target, which will be fed with the target data during training. If instead you would like to use your own target tensors (in turn, Keras will not expect external Numpy data for these targets at training time), you can specify them via the `target_tensors` argument. It can be a single tensor (for a single-output model), a list of tensors, or a dict mapping output names to target tensors. **kwargs: These arguments are passed to `tf.Session.run`. Raises: ValueError: In case of invalid arguments for `optimizer`, `loss`, `metrics` or `sample_weight_mode`. """ loss = loss or {} if context.executing_eagerly() and not isinstance( optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)): raise ValueError('Only TF native optimizers are supported in Eager mode.') self.optimizer = optimizers.get(optimizer) self.loss = loss self.metrics = metrics or [] self.loss_weights = loss_weights if context.executing_eagerly() and sample_weight_mode is not None: raise ValueError('sample_weight_mode is not supported in Eager mode.') self.sample_weight_mode = sample_weight_mode if context.executing_eagerly() and weighted_metrics is not None: raise ValueError('weighted_metrics is not supported in Eager mode.') self.weighted_metrics = weighted_metrics if context.executing_eagerly() and target_tensors is not None: raise ValueError('target_tensors is not supported in Eager mode.') self.target_tensors = target_tensors if not self.built: # Model is not compilable because it does not know its number of inputs # and outputs, nor their shapes and names. We will compile after the first # time the model gets called on training data. return self._is_compiled = True # Prepare loss functions. if isinstance(loss, dict): for name in loss: if name not in self.output_names: raise ValueError( 'Unknown entry in loss ' 'dictionary: "' + name + '". ' 'Only expected the following keys: ' + str(self.output_names)) loss_functions = [] for name in self.output_names: if name not in loss: logging.warning( 'Output "' + name + '" missing from loss dictionary. ' 'We assume this was done on purpose, ' 'and we will not be expecting ' 'any data to be passed to "' + name + '" during training.') loss_functions.append(losses.get(loss.get(name))) elif isinstance(loss, list): if len(loss) != len(self.outputs): raise ValueError('When passing a list as loss, ' 'it should have one entry per model outputs. ' 'The model has ' + str(len(self.outputs)) + ' outputs, but you passed loss=' + str(loss)) loss_functions = [losses.get(l) for l in loss] else: loss_function = losses.get(loss) loss_functions = [loss_function for _ in range(len(self.outputs))] self.loss_functions = loss_functions weighted_losses = [training_utils.weighted_masked_objective(fn) for fn in loss_functions] skip_target_indices = [] skip_target_weighing_indices = [] self._feed_outputs = [] self._feed_output_names = [] self._feed_output_shapes = [] self._feed_loss_fns = [] for i in range(len(weighted_losses)): if weighted_losses[i] is None: skip_target_indices.append(i) skip_target_weighing_indices.append(i) # Prepare output masks. if not context.executing_eagerly(): masks = self.compute_mask(self.inputs, mask=None) if masks is None: masks = [None for _ in self.outputs] if not isinstance(masks, list): masks = [masks] # Prepare loss weights. if loss_weights is None: loss_weights_list = [1. for _ in range(len(self.outputs))] elif isinstance(loss_weights, dict): for name in loss_weights: if name not in self.output_names: raise ValueError( 'Unknown entry in loss_weights ' 'dictionary: "' + name + '". ' 'Only expected the following keys: ' + str(self.output_names)) loss_weights_list = [] for name in self.output_names: loss_weights_list.append(loss_weights.get(name, 1.)) elif isinstance(loss_weights, list): if len(loss_weights) != len(self.outputs): raise ValueError( 'When passing a list as loss_weights, ' 'it should have one entry per model output. ' 'The model has ' + str(len(self.outputs)) + ' outputs, but you passed loss_weights=' + str(loss_weights)) loss_weights_list = loss_weights else: raise TypeError('Could not interpret loss_weights argument: ' + str(loss_weights) + ' - expected a list of dicts.') self.loss_weights_list = loss_weights_list # initialization for Eager mode execution if context.executing_eagerly(): if target_tensors is not None: raise ValueError('target_tensors are not currently supported in Eager ' 'mode.') self.total_loss = None self.metrics_tensors = [] self.metrics_names = ['loss'] for i in range(len(self.outputs)): if len(self.outputs) > 1: self.metrics_names.append(self.output_names[i] + '_loss') self.nested_metrics = training_utils.collect_metrics(metrics, self.output_names) self._feed_sample_weight_modes = [] for i in range(len(self.outputs)): self._feed_sample_weight_modes.append(None) self.sample_weights = [] self.targets = [] for i in range(len(self.outputs)): self._feed_output_names.append(self.output_names[i]) self._collected_trainable_weights = self.trainable_weights return # Prepare targets of model. self.targets = [] self._feed_targets = [] if target_tensors not in (None, []): if isinstance(target_tensors, list): if len(target_tensors) != len(self.outputs): raise ValueError( 'When passing a list as `target_tensors`, ' 'it should have one entry per model output. ' 'The model has ' + str(len(self.outputs)) + ' outputs, but you passed target_tensors=' + str(target_tensors)) elif isinstance(target_tensors, dict): for name in target_tensors: if name not in self.output_names: raise ValueError( 'Unknown entry in `target_tensors` ' 'dictionary: "' + name + '". ' 'Only expected the following keys: ' + str(self.output_names)) tmp_target_tensors = [] for name in self.output_names: tmp_target_tensors.append(target_tensors.get(name, None)) target_tensors = tmp_target_tensors else: raise TypeError('Expected `target_tensors` to be ' 'a list or dict, but got:', target_tensors) for i in range(len(self.outputs)): if i in skip_target_indices: self.targets.append(None) else: shape = K.int_shape(self.outputs[i]) name = self.output_names[i] if target_tensors not in (None, []): target = target_tensors[i] else: target = None if target is None or K.is_placeholder(target): if target is None: target = K.placeholder( ndim=len(shape), name=name + '_target', sparse=K.is_sparse(self.outputs[i]), dtype=K.dtype(self.outputs[i])) self._feed_targets.append(target) self._feed_outputs.append(self.outputs[i]) self._feed_output_names.append(name) self._feed_output_shapes.append(shape) self._feed_loss_fns.append(self.loss_functions[i]) else: skip_target_weighing_indices.append(i) self.targets.append(target) # Prepare sample weights. sample_weights = [] sample_weight_modes = [] if isinstance(sample_weight_mode, dict): for name in sample_weight_mode: if name not in self.output_names: raise ValueError( 'Unknown entry in ' 'sample_weight_mode dictionary: "' + name + '". ' 'Only expected the following keys: ' + str(self.output_names)) for i, name in enumerate(self.output_names): if i in skip_target_weighing_indices: weight = None sample_weight_modes.append(None) else: if name not in sample_weight_mode: raise ValueError( 'Output "' + name + '" missing from sample_weight_modes ' 'dictionary') if sample_weight_mode.get(name) == 'temporal': weight = K.placeholder(ndim=2, name=name + '_sample_weights') sample_weight_modes.append('temporal') else: weight = K.placeholder(ndim=1, name=name + 'sample_weights') sample_weight_modes.append(None) sample_weights.append(weight) elif isinstance(sample_weight_mode, list): if len(sample_weight_mode) != len(self.outputs): raise ValueError('When passing a list as sample_weight_mode, ' 'it should have one entry per model output. ' 'The model has ' + str(len(self.outputs)) + ' outputs, but you passed ' 'sample_weight_mode=' + str(sample_weight_mode)) for i in range(len(self.output_names)): if i in skip_target_weighing_indices: weight = None sample_weight_modes.append(None) else: mode = sample_weight_mode[i] name = self.output_names[i] if mode == 'temporal': weight = K.placeholder(ndim=2, name=name + '_sample_weights') sample_weight_modes.append('temporal') else: weight = K.placeholder(ndim=1, name=name + '_sample_weights') sample_weight_modes.append(None) sample_weights.append(weight) else: for i, name in enumerate(self.output_names): if i in skip_target_weighing_indices: sample_weight_modes.append(None) sample_weights.append(None) else: if sample_weight_mode == 'temporal': sample_weights.append(array_ops.placeholder_with_default( [[1.]], shape=[None, None], name=name + '_sample_weights')) sample_weight_modes.append('temporal') else: sample_weights.append(array_ops.placeholder_with_default( [1.], shape=[None], name=name + '_sample_weights')) sample_weight_modes.append(None) self.sample_weight_modes = sample_weight_modes self._feed_sample_weight_modes = [] for i in range(len(self.outputs)): if i not in skip_target_weighing_indices: self._feed_sample_weight_modes.append(self.sample_weight_modes[i]) # Prepare metrics. self.weighted_metrics = weighted_metrics self.metrics_names = ['loss'] self.metrics_tensors = [] # Compute total loss. total_loss = None with K.name_scope('loss'): for i in range(len(self.outputs)): if i in skip_target_indices: continue y_true = self.targets[i] y_pred = self.outputs[i] weighted_loss = weighted_losses[i] sample_weight = sample_weights[i] mask = masks[i] loss_weight = loss_weights_list[i] with K.name_scope(self.output_names[i] + '_loss'): output_loss = weighted_loss(y_true, y_pred, sample_weight, mask) if len(self.outputs) > 1: self.metrics_tensors.append(output_loss) self.metrics_names.append(self.output_names[i] + '_loss') if total_loss is None: total_loss = loss_weight * output_loss else: total_loss += loss_weight * output_loss if total_loss is None: if not self.losses: raise ValueError('The model cannot be compiled ' 'because it has no loss to optimize.') else: total_loss = 0. # Add regularization penalties # and other layer-specific losses. for loss_tensor in self.losses: total_loss += loss_tensor # List of same size as output_names. # contains tuples (metrics for output, names of metrics). nested_metrics = training_utils.collect_metrics(metrics, self.output_names) nested_weighted_metrics = training_utils.collect_metrics(weighted_metrics, self.output_names) self.metrics_updates = [] self.stateful_metric_names = [] with K.name_scope('metrics'): for i in range(len(self.outputs)): if i in skip_target_indices: continue y_true = self.targets[i] y_pred = self.outputs[i] weights = sample_weights[i] output_metrics = nested_metrics[i] output_weighted_metrics = nested_weighted_metrics[i] def handle_metrics(metrics, weights=None): metric_name_prefix = 'weighted_' if weights is not None else '' for metric in metrics: if metric in ('accuracy', 'acc', 'crossentropy', 'ce'): # custom handling of accuracy/crossentropy # (because of class mode duality) output_shape = self.outputs[i].get_shape().as_list() if (output_shape[-1] == 1 or self.loss_functions[i] == losses.binary_crossentropy): # case: binary accuracy/crossentropy if metric in ('accuracy', 'acc'): metric_fn = metrics_module.binary_accuracy elif metric in ('crossentropy', 'ce'): metric_fn = metrics_module.binary_crossentropy elif self.loss_functions[ i] == losses.sparse_categorical_crossentropy: # case: categorical accuracy/crossentropy with sparse targets if metric in ('accuracy', 'acc'): metric_fn = metrics_module.sparse_categorical_accuracy elif metric in ('crossentropy', 'ce'): metric_fn = metrics_module.sparse_categorical_crossentropy else: # case: categorical accuracy/crossentropy if metric in ('accuracy', 'acc'): metric_fn = metrics_module.categorical_accuracy elif metric in ('crossentropy', 'ce'): metric_fn = metrics_module.categorical_crossentropy if metric in ('accuracy', 'acc'): suffix = 'acc' elif metric in ('crossentropy', 'ce'): suffix = 'ce' weighted_metric_fn = training_utils.weighted_masked_objective( metric_fn) metric_name = metric_name_prefix + suffix else: metric_fn = metrics_module.get(metric) weighted_metric_fn = training_utils.weighted_masked_objective( metric_fn) # Get metric name as string if hasattr(metric_fn, 'name'): metric_name = metric_fn.name else: metric_name = metric_fn.__name__ metric_name = metric_name_prefix + metric_name with K.name_scope(metric_name): metric_result = weighted_metric_fn( y_true, y_pred, weights=weights, mask=masks[i]) # Append to self.metrics_names, self.metric_tensors, # self.stateful_metric_names if len(self.output_names) > 1: metric_name = '%s_%s' % (self.output_names[i], metric_name) # Dedupe name j = 1 base_metric_name = metric_name while metric_name in self.metrics_names: metric_name = '%s_%d' % (base_metric_name, j) j += 1 self.metrics_names.append(metric_name) self.metrics_tensors.append(metric_result) # Keep track of state updates created by # stateful metrics (i.e. metrics layers). if isinstance(metric_fn, Layer): self.stateful_metric_names.append(metric_name) self.metrics_updates += metric_fn.updates handle_metrics(output_metrics) handle_metrics(output_weighted_metrics, weights=weights) # Prepare gradient updates and state updates. self.total_loss = total_loss self.sample_weights = sample_weights self._feed_sample_weights = [] for i in range(len(self.sample_weights)): if i not in skip_target_weighing_indices: self._feed_sample_weights.append(self.sample_weights[i]) # Functions for train, test and predict will # be compiled lazily when required. # This saves time when the user is not using all functions. self._function_kwargs = kwargs self.train_function = None self.test_function = None self.predict_function = None # Collected trainable weights, sorted in topological order. trainable_weights = self.trainable_weights self._collected_trainable_weights = trainable_weights def _check_trainable_weights_consistency(self): """Check trainable weights count consistency. This will raise a warning if `trainable_weights` and `_collected_trainable_weights` are inconsistent (i.e. have different number of parameters). Inconsistency will typically arise when one modifies `model.trainable` without calling `model.compile` again. """ if not hasattr(self, '_collected_trainable_weights'): return if len(self.trainable_weights) != len(self._collected_trainable_weights): logging.warning( UserWarning( 'Discrepancy between trainable weights and collected trainable' ' weights, did you set `model.trainable` without calling' ' `model.compile` after ?')) def _make_train_function(self): if not hasattr(self, 'train_function'): raise RuntimeError('You must compile your model before using it.') self._check_trainable_weights_consistency() if self.train_function is None: inputs = (self._feed_inputs + self._feed_targets + self._feed_sample_weights) if self.uses_learning_phase and not isinstance(K.learning_phase(), int): inputs += [K.learning_phase()] with K.name_scope('training'): with K.name_scope(self.optimizer.__class__.__name__): # Training updates updates = self.optimizer.get_updates( params=self._collected_trainable_weights, loss=self.total_loss) # Unconditional updates updates += self.get_updates_for(None) # Conditional updates relevant to this model updates += self.get_updates_for(self._feed_inputs) # Stateful metrics updates updates += self.metrics_updates # Gets loss and metrics. Updates weights at each call. self.train_function = K.function( inputs, [self.total_loss] + self.metrics_tensors, updates=updates, name='train_function', **self._function_kwargs) def _make_test_function(self): if not hasattr(self, 'test_function'): raise RuntimeError('You must compile your model before using it.') if self.test_function is None: inputs = (self._feed_inputs + self._feed_targets + self._feed_sample_weights) if self.uses_learning_phase and not isinstance(K.learning_phase(), int): inputs += [K.learning_phase()] # Return loss and metrics, no gradient updates. # Does update the network states. self.test_function = K.function( inputs, [self.total_loss] + self.metrics_tensors, updates=self.state_updates + self.metrics_updates, name='test_function', **self._function_kwargs) def _make_predict_function(self): if not hasattr(self, 'predict_function'): self.predict_function = None if self.predict_function is None: if self.uses_learning_phase and not isinstance(K.learning_phase(), int): inputs = self._feed_inputs + [K.learning_phase()] else: inputs = self._feed_inputs # Gets network outputs. Does not update weights. # Does update the network states. kwargs = getattr(self, '_function_kwargs', {}) self.predict_function = K.function( inputs, self.outputs, updates=self.state_updates, name='predict_function', **kwargs) def _standardize_user_data(self, x, y=None, sample_weight=None, class_weight=None, batch_size=None): """Runs validation checks on input and target data passed by the user. Also standardizes the data to lists of arrays, in order. Also builds and compiles the model on the fly if it is a subclassed model that has never been called before (and thus has no inputs/outputs). This is a purely internal method, subject to refactoring at any time. Args: x: An array or list of arrays, to be used as input data. If the model has known, named inputs, this could also be a dict mapping input names to the corresponding array. y: An array or list of arrays, to be used as target data. If the model has known, named outputs, this could also be a dict mapping output names to the corresponding array. sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in `x`. class_weight: An optional class-weight array by the user to weight the importance of samples in `x` based on the class they belong to, as conveyed by `y`. batch_size: Integer batch size. If provided, it is used to run additional validation checks on stateful models. Returns: A tuple of 3 lists: input arrays, target arrays, sample-weight arrays. If the model's input and targets are symbolic, these lists are empty (since the model takes no user-provided data, instead the data comes from the symbolic inputs/targets). Raises: ValueError: In case of invalid user-provided data. RuntimeError: If the model was never compiled. """ # First, we build/compile the model on the fly if necessary. all_inputs = [] if not self.built: # We need to use `x` to set the model inputs. # We type-check that `x` and `y` are either single arrays # or lists of arrays. if isinstance(x, (list, tuple)): if not all(isinstance(v, np.ndarray) or tensor_util.is_tensor(v) for v in x): raise ValueError('Please provide as model inputs either a single ' 'array or a list of arrays. You passed: x=' + str(x)) all_inputs += list(x) elif isinstance(x, dict): raise ValueError('Please do not pass a dictionary as model inputs.') else: if not isinstance(x, np.ndarray) and not tensor_util.is_tensor(x): raise ValueError('Please provide as model inputs either a single ' 'array or a list of arrays. You passed: x=' + str(x)) all_inputs.append(x) # Build the model using the retrieved inputs (value or symbolic). # If values, then in symbolic-mode placeholders will be created # to match the value shapes. if not self.inputs: self._set_inputs(x) if y is not None: if not self.optimizer: raise RuntimeError('You must compile a model before ' 'training/testing. ' 'Use `model.compile(optimizer, loss)`.') if not self._is_compiled: # On-the-fly compilation of the model. # We need to use `y` to set the model targets. if isinstance(y, (list, tuple)): if not all(isinstance(v, np.ndarray) or tensor_util.is_tensor(v) for v in y): raise ValueError('Please provide as model targets either a single ' 'array or a list of arrays. ' 'You passed: y=' + str(y)) elif isinstance(y, dict): raise ValueError('Please do not pass a dictionary as model targets.') else: if not isinstance(y, np.ndarray) and not tensor_util.is_tensor(y): raise ValueError('Please provide as model targets either a single ' 'array or a list of arrays. ' 'You passed: y=' + str(y)) # Typecheck that all inputs are *either* value *or* symbolic. # TODO(fchollet): this check could be removed in Eager mode? if y is not None: if isinstance(y, (list, tuple)): all_inputs += list(y) else: all_inputs.append(y) if any(tensor_util.is_tensor(v) for v in all_inputs): if not all(tensor_util.is_tensor(v) for v in all_inputs): raise ValueError('Do not pass inputs that mix Numpy arrays and ' 'TensorFlow tensors. ' 'You passed: x=' + str(x) + '; y=' + str(y)) if context.executing_eagerly(): target_tensors = None else: # Handle target tensors if any passed. if not isinstance(y, (list, tuple)): y = [y] target_tensors = [v for v in y if tensor_util.is_tensor(v)] self.compile(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics, loss_weights=self.loss_weights, target_tensors=target_tensors) # If `x` and `y` were all symbolic, then no model should not be fed any # inputs and targets. # Note: in this case, `any` and `all` are equivalent since we disallow # mixed symbolic/value inputs. if any(tensor_util.is_tensor(v) for v in all_inputs): return [], [], [] # What follows is input validation and standardization to list format, # in the case where all inputs are value arrays. if context.executing_eagerly(): # In eager mode, do not do shape validation. feed_input_names = self.input_names feed_input_shapes = None elif not self._is_graph_network: # Case: symbolic-mode subclassed network. Do not do shape validation. feed_input_names = self._feed_input_names feed_input_shapes = None else: # Case: symbolic-mode graph network. # In this case, we run extensive shape validation checks. feed_input_names = self._feed_input_names feed_input_shapes = self._feed_input_shapes # Standardize the inputs. x = training_utils.standardize_input_data( x, feed_input_names, feed_input_shapes, check_batch_axis=False, # Don't enforce the batch size. exception_prefix='input') if y is not None: if context.executing_eagerly(): feed_output_names = self.output_names feed_output_shapes = None # Sample weighting not supported in this case. # TODO(fchollet): consider supporting it. feed_sample_weight_modes = [None for _ in self.outputs] elif not self._is_graph_network: feed_output_names = self._feed_output_names feed_output_shapes = None # Sample weighting not supported in this case. # TODO(fchollet): consider supporting it. feed_sample_weight_modes = [None for _ in self.outputs] else: feed_output_names = self._feed_output_names feed_sample_weight_modes = self._feed_sample_weight_modes feed_output_shapes = [] for output_shape, loss_fn in zip(self._feed_output_shapes, self._feed_loss_fns): if loss_fn is losses.sparse_categorical_crossentropy: feed_output_shapes.append(output_shape[:-1] + (1,)) elif (not hasattr(loss_fn, '__name__') or getattr(losses, loss_fn.__name__, None) is None): # If `loss_fn` is not a function (e.g. callable class) # or if it not in the `losses` module, then # it is a user-defined loss and we make no assumptions # about it. feed_output_shapes.append(None) else: feed_output_shapes.append(output_shape) # Standardize the outputs. y = training_utils.standardize_input_data( y, feed_output_names, feed_output_shapes, check_batch_axis=False, # Don't enforce the batch size. exception_prefix='target') # Generate sample-wise weight values given the `sample_weight` and # `class_weight` arguments. sample_weights = training_utils.standardize_sample_weights( sample_weight, feed_output_names) class_weights = training_utils.standardize_class_weights( class_weight, feed_output_names) sample_weights = [ training_utils.standardize_weights(ref, sw, cw, mode) for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights, feed_sample_weight_modes) ] # Check that all arrays have the same length. training_utils.check_array_lengths(x, y, sample_weights) if self._is_graph_network and not context.executing_eagerly(): # Additional checks to avoid users mistakenly using improper loss fns. training_utils.check_loss_and_target_compatibility( y, self._feed_loss_fns, feed_output_shapes) else: y = [] sample_weights = [] if self.stateful and batch_size: # Check that for stateful networks, number of samples is a multiple # of the static batch size. if x[0].shape[0] % batch_size != 0: raise ValueError('In a stateful network, ' 'you should only pass inputs with ' 'a number of samples that can be ' 'divided by the batch size. Found: ' + str(x[0].shape[0]) + ' samples') return x, y, sample_weights def _set_inputs(self, inputs, training=None): """Set model's input and output specs based on the input data received. This is to be used for Model subclasses, which do not know at instantiation time what their inputs look like. Args: inputs: Single array, or list of arrays. The arrays could be placeholders, Numpy arrays, or data tensors. - if placeholders: the model is built on top of these placeholders, and we expect Numpy data to be fed for them when calling `fit`/etc. - if Numpy data: we create placeholders matching the shape of the Numpy arrays. We expect Numpy data to be fed for these placeholders when calling `fit`/etc. - if data tensors: the model is built on top of these tensors. We do not expect any Numpy data to be provided when calling `fit`/etc. training: Boolean or None. Only relevant in symbolic mode. Specifies whether to build the model's graph in inference mode (False), training mode (True), or using the Keras learning phase (None). """ if self.__class__.__name__ == 'Sequential': # Note: we can't test whether the model is `Sequential` via `isinstance` # since `Sequential` depends on `Model`. if isinstance(inputs, list): assert len(inputs) == 1 inputs = inputs[0] self.build(input_shape=(None,) + inputs.shape[1:]) elif context.executing_eagerly(): self._eager_set_inputs(inputs) else: self._symbolic_set_inputs(inputs, training=training) def _set_scope(self, scope=None): """Modify the Layer scope creation logic to create ResourceVariables.""" super(Model, self)._set_scope(scope=scope) # Subclassed Models create ResourceVariables by default. This makes it # easier to use Models in an eager/graph agnostic way (since eager execution # always uses ResourceVariables). if not self._is_graph_network: self._scope.set_use_resource(True) def _eager_set_inputs(self, inputs): """Set model's input and output specs based on the input data received. This is to be used for Model subclasses, which do not know at instantiation time what their inputs look like. We assume the number and ndim of outputs does not change over different calls. Args: inputs: Argument `x` (input data) passed by the user upon first model use. Raises: ValueError: If the model's inputs are already set. """ assert context.executing_eagerly() if self.inputs: raise ValueError('Model inputs are already set.') # On-the-fly setting of model inputs/outputs as DeferredTensors, # to keep track of number of inputs and outputs and their ndim. if isinstance(inputs, (list, tuple)): dummy_output_values = self.call( [ops.convert_to_tensor(v, dtype=K.floatx()) for v in inputs]) dummy_input_values = list(inputs) else: dummy_output_values = self.call( ops.convert_to_tensor(inputs, dtype=K.floatx())) dummy_input_values = [inputs] if isinstance(dummy_output_values, (list, tuple)): dummy_output_values = list(dummy_output_values) else: dummy_output_values = [dummy_output_values] self.outputs = [ _DeferredTensor(shape=(None for _ in v.shape), dtype=v.dtype) for v in dummy_output_values] self.inputs = [ _DeferredTensor(shape=(None for _ in v.shape), dtype=v.dtype) for v in dummy_input_values] self.input_names = [ 'input_%d' % (i + 1) for i in range(len(dummy_input_values))] self.output_names = [ 'output_%d' % (i + 1) for i in range(len(dummy_output_values))] self.built = True def _symbolic_set_inputs(self, inputs, outputs=None, training=None): """Set model's inputs and output specs based. This is to be used for Model subclasses, which do not know at instantiation time what their inputs look like. Args: inputs: Argument `x` (input data) passed by the user upon first model use. outputs: None, a data tensor, or a list of data tensors. If None, the outputs will be determined by invoking self.call(), otherwise the provided value will be used. training: Boolean or None. Only relevant in symbolic mode. Specifies whether to build the model's graph in inference mode (False), training mode (True), or using the Keras learning phase (None). Raises: ValueError: If the model's inputs are already set. """ assert not context.executing_eagerly() if self.inputs: raise ValueError('Model inputs are already set.') # On-the-fly setting of symbolic model inputs (either by using the tensor # provided, or by creating a placeholder if Numpy data was provided). self.inputs = [] self.input_names = [] self._feed_inputs = [] self._feed_input_names = [] self._feed_input_shapes = [] if isinstance(inputs, (list, tuple)): inputs = list(inputs) else: inputs = [inputs] for i, v in enumerate(inputs): name = 'input_%d' % (i + 1) self.input_names.append(name) if isinstance(v, list): v = np.asarray(v) if v.ndim == 1: v = np.expand_dims(v, 1) if isinstance(v, (np.ndarray)): # We fix the placeholder shape except the batch size. # This is suboptimal, but it is the best we can do with the info # we have. The user should call `model._set_inputs(placeholders)` # to specify custom placeholders if the need arises. shape = (None,) + v.shape[1:] placeholder = K.placeholder(shape=shape, name=name) self.inputs.append(placeholder) self._feed_inputs.append(placeholder) self._feed_input_names.append(name) self._feed_input_shapes.append(shape) else: # Assumed tensor - TODO(fchollet) additional type check? self.inputs.append(v) if K.is_placeholder(v): self._feed_inputs.append(v) self._feed_input_names.append(name) self._feed_input_shapes.append(K.int_shape(v)) if outputs is None: # Obtain symbolic outputs by calling the model. if len(self.inputs) == 1: if self._expects_training_arg: outputs = self.call(self.inputs[0], training=training) else: outputs = self.call(self.inputs[0]) else: if self._expects_training_arg: outputs = self.call(self.inputs, training=training) else: outputs = self.call(self.inputs) if isinstance(outputs, (list, tuple)): outputs = list(outputs) else: outputs = [outputs] self.outputs = outputs self.output_names = [ 'output_%d' % (i + 1) for i in range(len(self.outputs))] self.built = True def fit(self, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, **kwargs): """Trains the model for a fixed number of epochs (iterations on a dataset). Arguments: x: Numpy array of training data (if the model has a single input), or list of Numpy arrays (if the model has multiple inputs). If input layers in the model are named, you can also pass a dictionary mapping input names to Numpy arrays. `x` can be `None` (default) if feeding from TensorFlow data tensors. y: Numpy array of target (label) data (if the model has a single output), or list of Numpy arrays (if the model has multiple outputs). If output layers in the model are named, you can also pass a dictionary mapping output names to Numpy arrays. `y` can be `None` (default) if feeding from TensorFlow data tensors. batch_size: Integer or `None`. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. epochs: Integer. Number of epochs to train the model. An epoch is an iteration over the entire `x` and `y` data provided. Note that in conjunction with `initial_epoch`, `epochs` is to be understood as "final epoch". The model is not trained for a number of iterations given by `epochs`, but merely until the epoch of index `epochs` is reached. verbose: Integer. 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during training. See [callbacks](/callbacks). validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss and any model metrics on this data at the end of each epoch. The validation data is selected from the last samples in the `x` and `y` data provided, before shuffling. validation_data: tuple `(x_val, y_val)` or tuple `(x_val, y_val, val_sample_weights)` on which to evaluate the loss and any model metrics at the end of each epoch. The model will not be trained on this data. `validation_data` will override `validation_split`. shuffle: Boolean (whether to shuffle the training data before each epoch) or str (for 'batch'). 'batch' is a special option for dealing with the limitations of HDF5 data; it shuffles in batch-sized chunks. Has no effect when `steps_per_epoch` is not `None`. class_weight: Optional dictionary mapping class indices (integers) to a weight (float) value, used for weighting the loss function (during training only). This can be useful to tell the model to "pay more attention" to samples from an under-represented class. sample_weight: Optional Numpy array of weights for the training samples, used for weighting the loss function (during training only). You can either pass a flat (1D) Numpy array with the same length as the input samples (1:1 mapping between weights and samples), or in the case of temporal data, you can pass a 2D array with shape `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. In this case you should make sure to specify `sample_weight_mode="temporal"` in `compile()`. initial_epoch: Integer. Epoch at which to start training (useful for resuming a previous training run). steps_per_epoch: Integer or `None`. Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. When training with input tensors such as TensorFlow data tensors, the default `None` is equal to the number of samples in your dataset divided by the batch size, or 1 if that cannot be determined. validation_steps: Only relevant if `steps_per_epoch` is specified. Total number of steps (batches of samples) to validate before stopping. **kwargs: Used for backwards compatibility. Returns: A `History` object. Its `History.history` attribute is a record of training loss values and metrics values at successive epochs, as well as validation loss values and validation metrics values (if applicable). Raises: RuntimeError: If the model was never compiled. ValueError: In case of mismatch between the provided input data and what the model expects. """ # TODO(fchollet): this method may be creating reference cycles, which would # lead to accumulating garbage in memory when called in a loop. Investigate. # Backwards compatibility if batch_size is None and steps_per_epoch is None: batch_size = 32 # Legacy support if 'nb_epoch' in kwargs: logging.warning( 'The `nb_epoch` argument in `fit` ' 'has been renamed `epochs`.') epochs = kwargs.pop('nb_epoch') if kwargs: raise TypeError('Unrecognized keyword arguments: ' + str(kwargs)) if x is None and y is None and steps_per_epoch is None: raise ValueError('If fitting from data tensors, ' 'you should specify the `steps_per_epoch` ' 'argument.') # Validate user data. x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight, batch_size=batch_size) # Prepare validation data. if validation_data: if len(validation_data) == 2: val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence val_sample_weight = None elif len(validation_data) == 3: val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence else: raise ValueError( 'When passing validation_data, ' 'it must contain 2 (x_val, y_val) ' 'or 3 (x_val, y_val, val_sample_weights) ' 'items, however it contains %d items' % len(validation_data)) val_x, val_y, val_sample_weights = self._standardize_user_data( val_x, val_y, sample_weight=val_sample_weight, batch_size=batch_size) elif validation_split and 0. < validation_split < 1.: if hasattr(x[0], 'shape'): split_at = int(x[0].shape[0] * (1. - validation_split)) else: split_at = int(len(x[0]) * (1. - validation_split)) x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at)) y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at)) sample_weights, val_sample_weights = (slice_arrays( sample_weights, 0, split_at), slice_arrays(sample_weights, split_at)) elif validation_steps: val_x = [] val_y = [] val_sample_weights = [] else: val_x = None val_y = None val_sample_weights = None if context.executing_eagerly(): return training_eager.fit_loop( self, inputs=x, targets=y, sample_weights=sample_weights, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, val_inputs=val_x, val_targets=val_y, val_sample_weights=val_sample_weights, shuffle=shuffle, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps) else: return training_arrays.fit_loop( self, x, y, sample_weights=sample_weights, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, val_inputs=val_x, val_targets=val_y, val_sample_weights=val_sample_weights, shuffle=shuffle, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps) def evaluate(self, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None): """Returns the loss value & metrics values for the model in test mode. Computation is done in batches. Arguments: x: Numpy array of test data (if the model has a single input), or list of Numpy arrays (if the model has multiple inputs). If input layers in the model are named, you can also pass a dictionary mapping input names to Numpy arrays. `x` can be `None` (default) if feeding from TensorFlow data tensors. y: Numpy array of target (label) data (if the model has a single output), or list of Numpy arrays (if the model has multiple outputs). If output layers in the model are named, you can also pass a dictionary mapping output names to Numpy arrays. `y` can be `None` (default) if feeding from TensorFlow data tensors. batch_size: Integer or `None`. Number of samples per evaluation step. If unspecified, `batch_size` will default to 32. verbose: 0 or 1. Verbosity mode. 0 = silent, 1 = progress bar. sample_weight: Optional Numpy array of weights for the test samples, used for weighting the loss function. You can either pass a flat (1D) Numpy array with the same length as the input samples (1:1 mapping between weights and samples), or in the case of temporal data, you can pass a 2D array with shape `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. In this case you should make sure to specify `sample_weight_mode="temporal"` in `compile()`. steps: Integer or `None`. Total number of steps (batches of samples) before declaring the evaluation round finished. Ignored with the default value of `None`. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: in case of invalid arguments. """ # Backwards compatibility. if batch_size is None and steps is None: batch_size = 32 if x is None and y is None and steps is None: raise ValueError('If evaluating from data tensors, ' 'you should specify the `steps` ' 'argument.') # Validate user data. x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, batch_size=batch_size) if context.executing_eagerly(): return training_eager.test_loop( self, inputs=x, targets=y, sample_weights=sample_weights, batch_size=batch_size, verbose=verbose, steps=steps) else: return training_arrays.test_loop( self, inputs=x, targets=y, sample_weights=sample_weights, batch_size=batch_size, verbose=verbose, steps=steps) def predict(self, x, batch_size=None, verbose=0, steps=None): """Generates output predictions for the input samples. Computation is done in batches. Arguments: x: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple outputs). batch_size: Integer. If unspecified, it will default to 32. verbose: Verbosity mode, 0 or 1. steps: Total number of steps (batches of samples) before declaring the prediction round finished. Ignored with the default value of `None`. Returns: Numpy array(s) of predictions. Raises: ValueError: In case of mismatch between the provided input data and the model's expectations, or in case a stateful model receives a number of samples that is not a multiple of the batch size. """ # Backwards compatibility. if batch_size is None and steps is None: batch_size = 32 if x is None and steps is None: raise ValueError('If predicting from data tensors, ' 'you should specify the `steps` ' 'argument.') x, _, _ = self._standardize_user_data(x) if context.executing_eagerly(): return training_eager.predict_loop( self, x, batch_size=batch_size, verbose=verbose, steps=steps) else: return training_arrays.predict_loop( self, x, batch_size=batch_size, verbose=verbose, steps=steps) def train_on_batch(self, x, y, sample_weight=None, class_weight=None): """Runs a single gradient update on a single batch of data. Arguments: x: Numpy array of training data, or list of Numpy arrays if the model has multiple inputs. If all inputs in the model are named, you can also pass a dictionary mapping input names to Numpy arrays. y: Numpy array of target data, or list of Numpy arrays if the model has multiple outputs. If all outputs in the model are named, you can also pass a dictionary mapping output names to Numpy arrays. sample_weight: Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode="temporal" in compile(). class_weight: Optional dictionary mapping class indices (integers) to a weight (float) to apply to the model's loss for the samples from this class during training. This can be useful to tell the model to "pay more attention" to samples from an under-represented class. Returns: Scalar training loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: In case of invalid user-provided arguments. """ x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight) if context.executing_eagerly(): outputs = training_eager.train_on_batch( self, x, y, sample_weights=sample_weights) else: if self.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = x + y + sample_weights + [1] else: ins = x + y + sample_weights self._make_train_function() outputs = self.train_function(ins) if len(outputs) == 1: return outputs[0] return outputs def test_on_batch(self, x, y, sample_weight=None): """Test the model on a single batch of samples. Arguments: x: Numpy array of test data, or list of Numpy arrays if the model has multiple inputs. If all inputs in the model are named, you can also pass a dictionary mapping input names to Numpy arrays. y: Numpy array of target data, or list of Numpy arrays if the model has multiple outputs. If all outputs in the model are named, you can also pass a dictionary mapping output names to Numpy arrays. sample_weight: Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode="temporal" in compile(). Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: In case of invalid user-provided arguments. """ x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight) if context.executing_eagerly(): outputs = training_eager.test_on_batch( self, x, y, sample_weights=sample_weights) else: if self.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = x + y + sample_weights + [0] else: ins = x + y + sample_weights self._make_test_function() outputs = self.test_function(ins) if len(outputs) == 1: return outputs[0] return outputs def predict_on_batch(self, x): """Returns predictions for a single batch of samples. Arguments: x: Input samples, as a Numpy array. Returns: Numpy array(s) of predictions. """ x, _, _ = self._standardize_user_data(x) if context.executing_eagerly(): inputs = [ops.convert_to_tensor(val, dtype=K.floatx()) for val in x] return self(inputs) # pylint: disable=not-callable if not context.executing_eagerly(): if self.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = x + [0] else: ins = x self._make_predict_function() outputs = self.predict_function(ins) if len(outputs) == 1: return outputs[0] return outputs def fit_generator(self, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0): """Fits the model on data yielded batch-by-batch by a Python generator. The generator is run in parallel to the model, for efficiency. For instance, this allows you to do real-time data augmentation on images on CPU in parallel to training your model on GPU. The use of `keras.utils.Sequence` guarantees the ordering and guarantees the single use of every input per epoch when using `use_multiprocessing=True`. Arguments: generator: A generator or an instance of `Sequence` (`keras.utils.Sequence`) object in order to avoid duplicate data when using multiprocessing. The output of the generator must be either - a tuple `(inputs, targets)` - a tuple `(inputs, targets, sample_weights)`. This tuple (a single output of the generator) makes a single batch. Therefore, all arrays in this tuple must have the same length (equal to the size of this batch). Different batches may have different sizes. For example, the last batch of the epoch is commonly smaller than the others, if the size of the dataset is not divisible by the batch size. The generator is expected to loop over its data indefinitely. An epoch finishes when `steps_per_epoch` batches have been seen by the model. steps_per_epoch: Total number of steps (batches of samples) to yield from `generator` before declaring one epoch finished and starting the next epoch. It should typically be equal to the number of samples of your dataset divided by the batch size. Optional for `Sequence`: if unspecified, will use the `len(generator)` as a number of steps. epochs: Integer, total number of iterations on the data. verbose: Verbosity mode, 0, 1, or 2. callbacks: List of callbacks to be called during training. validation_data: This can be either - a generator for the validation data - a tuple (inputs, targets) - a tuple (inputs, targets, sample_weights). validation_steps: Only relevant if `validation_data` is a generator. Total number of steps (batches of samples) to yield from `generator` before stopping. Optional for `Sequence`: if unspecified, will use the `len(validation_data)` as a number of steps. class_weight: Dictionary mapping class indices to a weight for the class. max_queue_size: Integer. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. shuffle: Boolean. Whether to shuffle the order of the batches at the beginning of each epoch. Only used with instances of `Sequence` (`keras.utils.Sequence`). Has no effect when `steps_per_epoch` is not `None`. initial_epoch: Epoch at which to start training (useful for resuming a previous training run) Returns: A `History` object. Example: ```python def generate_arrays_from_file(path): while 1: f = open(path) for line in f: # create numpy arrays of input data # and labels, from each line in the file x1, x2, y = process_line(line) yield ({'input_1': x1, 'input_2': x2}, {'output': y}) f.close() model.fit_generator(generate_arrays_from_file('/my_file.txt'), steps_per_epoch=10000, epochs=10) ``` Raises: ValueError: In case the generator yields data in an invalid format. """ if not self.built and not self._is_graph_network: raise NotImplementedError( '`fit_generator` is not yet enabled for unbuilt Model subclasses') return training_generator.fit_generator( self, generator, steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, class_weight=class_weight, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, shuffle=shuffle, initial_epoch=initial_epoch) def evaluate_generator(self, generator, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False): """Evaluates the model on a data generator. The generator should return the same kind of data as accepted by `test_on_batch`. Arguments: generator: Generator yielding tuples (inputs, targets) or (inputs, targets, sample_weights) or an instance of Sequence (keras.utils.Sequence) object in order to avoid duplicate data when using multiprocessing. steps: Total number of steps (batches of samples) to yield from `generator` before stopping. Optional for `Sequence`: if unspecified, will use the `len(generator)` as a number of steps. max_queue_size: maximum size for the generator queue workers: Integer. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: in case of invalid arguments. Raises: ValueError: In case the generator yields data in an invalid format. """ if not self.built and not self._is_graph_network: raise NotImplementedError( '`evaluate_generator` is not yet enabled for ' 'unbuilt Model subclasses') return training_generator.evaluate_generator( self, generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing) def predict_generator(self, generator, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0): """Generates predictions for the input samples from a data generator. The generator should return the same kind of data as accepted by `predict_on_batch`. Arguments: generator: Generator yielding batches of input samples or an instance of Sequence (keras.utils.Sequence) object in order to avoid duplicate data when using multiprocessing. steps: Total number of steps (batches of samples) to yield from `generator` before stopping. Optional for `Sequence`: if unspecified, will use the `len(generator)` as a number of steps. max_queue_size: Maximum size for the generator queue. workers: Integer. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. verbose: verbosity mode, 0 or 1. Returns: Numpy array(s) of predictions. Raises: ValueError: In case the generator yields data in an invalid format. """ if not self.built and not self._is_graph_network: raise NotImplementedError( '`predict_generator` is not yet enabled for unbuilt Model subclasses') return training_generator.predict_generator( self, generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose)
apache-2.0
awkspace/ansible
lib/ansible/plugins/callback/slack.py
40
8260
# (C) 2014-2015, Matt Martz <[email protected]> # (C) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' callback: slack callback_type: notification requirements: - whitelist in configuration - prettytable (python library) short_description: Sends play events to a Slack channel version_added: "2.1" description: - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. - Before 2.4 only environment variables were available for configuring this plugin options: webhook_url: required: True description: Slack Webhook URL env: - name: SLACK_WEBHOOK_URL ini: - section: callback_slack key: webhook_url channel: default: "#ansible" description: Slack room to post in. env: - name: SLACK_CHANNEL ini: - section: callback_slack key: channel username: description: Username to post as. env: - name: SLACK_USERNAME default: ansible ini: - section: callback_slack key: username validate_certs: description: validate the SSL certificate of the Slack server. (For HTTPS URLs) version_added: "2.8" env: - name: SLACK_VALIDATE_CERTS ini: - section: callback_slack key: validate_certs default: True type: bool ''' import json import os import uuid from ansible import context from ansible.module_utils._text import to_text from ansible.module_utils.urls import open_url from ansible.plugins.callback import CallbackBase try: import prettytable HAS_PRETTYTABLE = True except ImportError: HAS_PRETTYTABLE = False class CallbackModule(CallbackBase): """This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'slack' CALLBACK_NEEDS_WHITELIST = True def __init__(self, display=None): super(CallbackModule, self).__init__(display=display) if not HAS_PRETTYTABLE: self.disabled = True self._display.warning('The `prettytable` python module is not ' 'installed. Disabling the Slack callback ' 'plugin.') self.playbook_name = None # This is a 6 character identifier provided with each message # This makes it easier to correlate messages when there are more # than 1 simultaneous playbooks running self.guid = uuid.uuid4().hex[:6] def set_options(self, task_keys=None, var_options=None, direct=None): super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) self.webhook_url = self.get_option('webhook_url') self.channel = self.get_option('channel') self.username = self.get_option('username') self.show_invocation = (self._display.verbosity > 1) self.validate_certs = self.get_option('validate_certs') if self.webhook_url is None: self.disabled = True self._display.warning('Slack Webhook URL was not provided. The ' 'Slack Webhook URL can be provided using ' 'the `SLACK_WEBHOOK_URL` environment ' 'variable.') def send_msg(self, attachments): headers = { 'Content-type': 'application/json', } payload = { 'channel': self.channel, 'username': self.username, 'attachments': attachments, 'parse': 'none', 'icon_url': ('http://cdn2.hubspot.net/hub/330046/' 'file-449187601-png/ansible_badge.png'), } data = json.dumps(payload) self._display.debug(data) self._display.debug(self.webhook_url) try: response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs, headers=headers) return response.read() except Exception as e: self._display.warning(u'Could not submit message to Slack: %s' % to_text(e)) def v2_playbook_on_start(self, playbook): self.playbook_name = os.path.basename(playbook._file_name) title = [ '*Playbook initiated* (_%s_)' % self.guid ] invocation_items = [] if context.CLIARGS and self.show_invocation: tags = context.CLIARGS['tags'] skip_tags = context.CLIARGS['skip_tags'] extra_vars = context.CLIARGS['extra_vars'] subset = context.CLIARGS['subset'] inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']] invocation_items.append('Inventory: %s' % ', '.join(inventory)) if tags and tags != ['all']: invocation_items.append('Tags: %s' % ', '.join(tags)) if skip_tags: invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags)) if subset: invocation_items.append('Limit: %s' % subset) if extra_vars: invocation_items.append('Extra Vars: %s' % ' '.join(extra_vars)) title.append('by *%s*' % context.CLIARGS['remote_user']) title.append('\n\n*%s*' % self.playbook_name) msg_items = [' '.join(title)] if invocation_items: msg_items.append('```\n%s\n```' % '\n'.join(invocation_items)) msg = '\n'.join(msg_items) attachments = [{ 'fallback': msg, 'fields': [ { 'value': msg } ], 'color': 'warning', 'mrkdwn_in': ['text', 'fallback', 'fields'], }] self.send_msg(attachments=attachments) def v2_playbook_on_play_start(self, play): """Display Play start messages""" name = play.name or 'Play name not specified (%s)' % play._uuid msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name) attachments = [ { 'fallback': msg, 'text': msg, 'color': 'warning', 'mrkdwn_in': ['text', 'fallback', 'fields'], } ] self.send_msg(attachments=attachments) def v2_playbook_on_stats(self, stats): """Display info about playbook statistics""" hosts = sorted(stats.processed.keys()) t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable', 'Failures', 'Rescued', 'Ignored']) failures = False unreachable = False for h in hosts: s = stats.summarize(h) if s['failures'] > 0: failures = True if s['unreachable'] > 0: unreachable = True t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable', 'failures', 'rescued', 'ignored']]) attachments = [] msg_items = [ '*Playbook Complete* (_%s_)' % self.guid ] if failures or unreachable: color = 'danger' msg_items.append('\n*Failed!*') else: color = 'good' msg_items.append('\n*Success!*') msg_items.append('```\n%s\n```' % t) msg = '\n'.join(msg_items) attachments.append({ 'fallback': msg, 'fields': [ { 'value': msg } ], 'color': color, 'mrkdwn_in': ['text', 'fallback', 'fields'] }) self.send_msg(attachments=attachments)
gpl-3.0
annahs/atmos_research
WHI_long_term_2min_data_to_db.py
1
8596
import sys import os import numpy as np from pprint import pprint from datetime import datetime from datetime import timedelta import mysql.connector import math import calendar import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib import dates start = datetime(2009,7,15,4) #2009 - 20090628 2010 - 20100610 2012 - 20100405 end = datetime(2009,8,17) #2009 - 20090816 2010 - 20100726 2012 - 20100601 timestep = 6.#1./30 #hours sample_min = 117 #117 for all 2009-2012 sample_max = 123 #123 for all 2009-2012 yag_min = 3.8 #3.8 for all 2009-2012 yag_max = 6 #6 for all 2009-2012 BC_VED_min = 70 BC_VED_max = 220 min_scat_pkht = 20 mass_min = ((BC_VED_min/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15) mass_max = ((BC_VED_max/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15) lag_threshold_2009 = 0.1 lag_threshold_2010 = 0.25 lag_threshold_2012 = 1.5 print 'mass limits', mass_min, mass_max cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon') cursor = cnx.cursor() def check_spike_times(particle_start_time,particle_end_time): cursor.execute('''SELECT count(*) FROM whi_spike_times_2009to2012 WHERE (spike_start_UTC <= %s AND spike_end_UTC > %s) OR (spike_start_UTC <= %s AND spike_end_UTC > %s) ''', (particle_start_time,particle_start_time,particle_end_time,particle_end_time)) spike_count = cursor.fetchall()[0][0] return spike_count def get_hysplit_id(particle_start_time): cursor.execute('''SELECT id FROM whi_hysplit_hourly_data WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s) ''', (particle_start_time,particle_start_time)) hy_id_list = cursor.fetchall() if hy_id_list == []: hy_id = None else: hy_id = hy_id_list[0][0] return hy_id def get_met_info(particle_start_time): cursor.execute('''SELECT id,pressure_Pa,room_temp_C FROM whi_sampling_conditions WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s) ''', (particle_start_time,particle_start_time)) met_list = cursor.fetchall() if met_list == []: met_list = [[np.nan,np.nan,np.nan]] return met_list[0] def get_gc_id(particle_start_time): cursor.execute('''SELECT id FROM whi_gc_hourly_bc_data WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s) ''', (particle_start_time,particle_start_time)) gc_id_list = cursor.fetchall() if gc_id_list == []: gc_id = None else: gc_id = gc_id_list[0][0] return gc_id def get_sample_factor(UNIX_start): date_time = datetime.utcfromtimestamp(UNIX_start) sample_factors_2012 = [ [datetime(2012,4,4,19,43,4), datetime(2012,4,5,13,47,9), 3.0], [datetime(2012,4,5,13,47,9), datetime(2012,4,10,3,3,25), 1.0], [datetime(2012,4,10,3,3,25), datetime(2012,5,16,6,9,13), 3.0], [datetime(2012,5,16,6,9,13), datetime(2012,6,7,18,14,39), 10.0], ] if date_time.year in [2009,2010]: sample_factor = 1.0 if date_time.year == 2012: for date_range in sample_factors_2012: start_date = date_range[0] end_date = date_range[1] range_sample_factor = date_range[2] if start_date<= date_time < end_date: sample_factor = range_sample_factor return sample_factor def lag_time_calc(BB_incand_pk_pos,BB_scat_pk_pos): long_lags = 0 short_lags = 0 lag_time = np.nan if (-10 < lag_time < 10): lag_time = (BB_incand_pk_pos-BB_scat_pk_pos)*0.2 #us if start_dt.year == 2009 and lag_time > lag_threshold_2009: long_lags = 1 elif start_dt.year == 2010 and lag_time > lag_threshold_2010: long_lags = 1 elif start_dt.year == 2012 and lag_time > lag_threshold_2012: long_lags = 1 else: short_lags = 1 return [lag_time,long_lags,short_lags] #query to add 1h mass conc data add_data = ('''INSERT INTO whi_sp2_2min_data (UNIX_UTC_start_time,UNIX_UTC_end_time,number_particles,rBC_mass_conc,rBC_mass_conc_err,volume_air_sampled,sampling_duration,mean_lag_time,sample_factor,hysplit_hourly_id,whi_sampling_cond_id,gc_hourly_id) VALUES (%(UNIX_UTC_start_time)s,%(UNIX_UTC_end_time)s,%(number_particles)s,%(rBC_mass_conc)s,%(rBC_mass_conc_err)s,%(volume_air_sampled)s,%(sampling_duration)s,%(mean_lag_time)s,%(sample_factor)s,%(hysplit_hourly_id)s,%(whi_sampling_cond_id)s,%(gc_hourly_id)s)''' ) # multiple_records = [] i=1 while start <= end: long_lags = 0 short_lags = 0 if (4 <= start.hour < 16): UNIX_start = calendar.timegm(start.utctimetuple()) UNIX_end = UNIX_start + timestep*3600.0 print start, UNIX_start+60 print datetime.utcfromtimestamp(UNIX_end) #filter on hk data here cursor.execute('''(SELECT mn.UNIX_UTC_ts_int_start, mn.UNIX_UTC_ts_int_end, mn.rBC_mass_fg_BBHG, mn.rBC_mass_fg_BBHG_err, mn.BB_incand_pk_pos, mn.BB_scat_pk_pos, mn.BB_scat_pkht, hk.sample_flow, mn.BB_incand_HG FROM whi_sp2_particle_data mn FORCE INDEX (hourly_binning) JOIN whi_hk_data hk on mn.HK_id = hk.id WHERE mn.UNIX_UTC_ts_int_start >= %s AND mn.UNIX_UTC_ts_int_end < %s AND hk.sample_flow >= %s AND hk.sample_flow < %s AND hk.yag_power >= %s AND hk.yag_power < %s)''', (UNIX_start,UNIX_end,sample_min,sample_max,yag_min,yag_max)) ind_data = cursor.fetchall() data={ 'rBC_mass_fg':[], 'rBC_mass_fg_err':[], 'lag_time':[] } total_sample_vol = 0 for row in ind_data: ind_start_time = float(row[0]) ind_end_time = float(row[1]) bbhg_mass_corr11 = float(row[2]) bbhg_mass_corr_err = float(row[3]) BB_incand_pk_pos = float(row[4]) BB_scat_pk_pos = float(row[5]) BB_scat_pk_ht = float(row[6]) sample_flow = float(row[7]) #in vccm incand_pkht = float(row[8]) #filter spike times here if check_spike_times(ind_start_time,ind_end_time): print 'spike' continue #skip the long interval if (ind_end_time - ind_start_time) > 540: print 'long interval' continue #skip if no sample flow if sample_flow == None: print 'no flow' continue #get sampling conditions id and met conditions met_data = get_met_info(UNIX_start) met_id = met_data[0] pressure = met_data[1] temperature = met_data[2]+273.15 correction_factor_for_STP = (273*pressure)/(101325*temperature) sample_vol = (sample_flow*(ind_end_time-ind_start_time)/60)*correction_factor_for_STP #/60 b/c sccm and time in secs total_sample_vol = total_sample_vol + sample_vol bbhg_mass_corr = 0.01244+0.0172*incand_pkht if (mass_min <= bbhg_mass_corr < mass_max): #get sample factor sample_factor = get_sample_factor(UNIX_start) data['rBC_mass_fg'].append(bbhg_mass_corr*sample_factor) data['rBC_mass_fg_err'].append(bbhg_mass_corr_err) #only calc lag time if there is a scattering signal if BB_scat_pk_ht > min_scat_pkht: lags = lag_time_calc(BB_incand_pk_pos,BB_scat_pk_pos) data['lag_time'].append(lags[0]) long_lags += lags[1] short_lags += lags[2] tot_rBC_mass_fg = sum(data['rBC_mass_fg']) tot_rBC_mass_uncer = sum(data['rBC_mass_fg_err']) rBC_number = len(data['rBC_mass_fg']) mean_lag = float(np.mean(data['lag_time'])) if np.isnan(mean_lag): mean_lag = None #get hysplit_id hysplit_id = None #get_hysplit_id(UNIX_start) #get GC id gc_id = None #get_gc_id(UNIX_start) if total_sample_vol != 0: mass_conc = (tot_rBC_mass_fg/total_sample_vol) mass_conc_uncer = (tot_rBC_mass_uncer/total_sample_vol) #add to db single_record = { 'UNIX_UTC_start_time' :UNIX_start, 'UNIX_UTC_end_time' :UNIX_end, 'number_particles' :rBC_number, 'rBC_mass_conc' :mass_conc, 'rBC_mass_conc_err' :mass_conc_uncer, 'volume_air_sampled' :total_sample_vol, 'sampling_duration' :(total_sample_vol/2), 'mean_lag_time' :mean_lag, 'number_long_lag' :long_lags, 'number_short_lag' :short_lags, 'sample_factor' :sample_factor, 'hysplit_hourly_id' :hysplit_id, 'whi_sampling_cond_id' :met_id, 'gc_hourly_id' :gc_id, } multiple_records.append((single_record)) #bulk insert to db table if i%1 == 0: cursor.executemany(add_data, multiple_records) cnx.commit() multiple_records = [] #increment count i+= 1 start += timedelta(hours = timestep) #bulk insert of remaining records to db if multiple_records != []: cursor.executemany(add_data, multiple_records) cnx.commit() multiple_records = [] cnx.close()
mit
turicas/outputty
tests/test_Table_html.py
2
5790
#!/usr/bin/env python # coding: utf-8 # Copyright 2011 Álvaro Justen # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest import tempfile import os from textwrap import dedent from outputty import Table class TestTableHtml(unittest.TestCase): def test_to_html_should_without_parameters_should_return_string(self): my_table = Table(headers=['ham', 'spam', 'eggs']) self.assertTrue(isinstance(my_table.write('html'), str)) def test_to_html_with_only_headers(self): my_table = Table(headers=['ham', 'spam', 'eggs', 'blah']) output = my_table.write('html', css_classes=False) expected = dedent(''' <table> <thead> <tr> <th>ham</th> <th>spam</th> <th>eggs</th> <th>blah</th> </tr> </thead> </table> ''').strip() self.assertEquals(output, expected) def test_to_html_with_headers_and_some_rows(self): my_table = Table(headers=['ham', 'spam', 'eggs']) my_table.append(['python', 'rules', '!']) my_table.append({'ham': 'spam', 'spam': 'eggs', 'eggs': 'ham'}) output = my_table.write('html', css_classes=False) expected = dedent(''' <table> <thead> <tr> <th>ham</th> <th>spam</th> <th>eggs</th> </tr> </thead> <tbody> <tr> <td>python</td> <td>rules</td> <td>!</td> </tr> <tr> <td>spam</td> <td>eggs</td> <td>ham</td> </tr> </tbody> </table> ''').strip() self.assertEquals(output, expected) def test_to_html_with_headers_and_rows_with_some_columns_empty(self): my_table = Table(headers=['ham', 'spam', 'eggs']) my_table.append({'ham': 'spam'}) my_table.append({'spam': 'eggs'}) my_table.append({'eggs': 'ham'}) output = my_table.write('html', css_classes=False) expected = dedent(''' <table> <thead> <tr> <th>ham</th> <th>spam</th> <th>eggs</th> </tr> </thead> <tbody> <tr> <td>spam</td> <td></td> <td></td> </tr> <tr> <td></td> <td>eggs</td> <td></td> </tr> <tr> <td></td> <td></td> <td>ham</td> </tr> </tbody> </table> ''').strip() self.assertEquals(output, expected) def test_to_html_with_a_parameter_should_save_a_file(self): temp_fp = tempfile.NamedTemporaryFile(delete=False) temp_fp.close() my_table = Table(headers=['ham', 'spam', 'eggs']) my_table.append(['python', 'rules', '!']) my_table.append({'ham': 'spam', 'spam': 'eggs', 'eggs': 'ham'}) my_table.write('html', temp_fp.name, css_classes=False) temp_fp = open(temp_fp.name) output = temp_fp.read() temp_fp.close() os.remove(temp_fp.name) expected = dedent(''' <table> <thead> <tr> <th>ham</th> <th>spam</th> <th>eggs</th> </tr> </thead> <tbody> <tr> <td>python</td> <td>rules</td> <td>!</td> </tr> <tr> <td>spam</td> <td>eggs</td> <td>ham</td> </tr> </tbody> </table> ''').strip() self.assertEquals(output, expected) def test_to_html_should_create_CSS_classes_for_odd_and_even_rows(self): my_table = Table(headers=['ham', 'spam', 'eggs']) my_table.append(['python', 'rules', '!']) my_table.append({'ham': 'spam', 'spam': 'eggs', 'eggs': 'ham'}) my_table.append(['python', 'rules', '!']) my_table.append({'ham': 'spam', 'spam': 'eggs', 'eggs': 'ham'}) output = my_table.write('html', css_classes=True) expected = dedent(''' <table> <thead> <tr class="header"> <th>ham</th> <th>spam</th> <th>eggs</th> </tr> </thead> <tbody> <tr class="odd"> <td>python</td> <td>rules</td> <td>!</td> </tr> <tr class="even"> <td>spam</td> <td>eggs</td> <td>ham</td> </tr> <tr class="odd"> <td>python</td> <td>rules</td> <td>!</td> </tr> <tr class="even"> <td>spam</td> <td>eggs</td> <td>ham</td> </tr> </tbody> </table> ''').strip() self.assertEquals(output, expected) #TODO: test input and output encoding
gpl-3.0
varunarya10/tempest
tempest/tests/fake_http.py
42
2411
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import httplib2 class fake_httplib2(object): def __init__(self, return_type=None, *args, **kwargs): self.return_type = return_type def request(self, uri, method="GET", body=None, headers=None, redirections=5, connection_type=None): if not self.return_type: fake_headers = httplib2.Response(headers) return_obj = { 'uri': uri, 'method': method, 'body': body, 'headers': headers } return (fake_headers, return_obj) elif isinstance(self.return_type, int): body = "fake_body" header_info = { 'content-type': 'text/plain', 'status': str(self.return_type), 'content-length': len(body) } resp_header = httplib2.Response(header_info) return (resp_header, body) else: msg = "unsupported return type %s" % self.return_type raise TypeError(msg) class fake_httplib(object): def __init__(self, headers, body=None, version=1.0, status=200, reason="Ok"): """ :param headers: dict representing HTTP response headers :param body: file-like object :param version: HTTP Version :param status: Response status code :param reason: Status code related message. """ self.body = body self.status = status self.reason = reason self.version = version self.headers = headers def getheaders(self): return copy.deepcopy(self.headers).items() def getheader(self, key, default): return self.headers.get(key, default) def read(self, amt): return self.body.read(amt)
apache-2.0
Saicheg/omim
3party/Alohalytics/tests/googletest/xcode/Scripts/versiongenerate.py
3088
4536
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A script to prepare version informtion for use the gtest Info.plist file. This script extracts the version information from the configure.ac file and uses it to generate a header file containing the same information. The #defines in this header file will be included in during the generation of the Info.plist of the framework, giving the correct value to the version shown in the Finder. This script makes the following assumptions (these are faults of the script, not problems with the Autoconf): 1. The AC_INIT macro will be contained within the first 1024 characters of configure.ac 2. The version string will be 3 integers separated by periods and will be surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first segment represents the major version, the second represents the minor version and the third represents the fix version. 3. No ")" character exists between the opening "(" and closing ")" of AC_INIT, including in comments and character strings. """ import sys import re # Read the command line argument (the output directory for Version.h) if (len(sys.argv) < 3): print "Usage: versiongenerate.py input_dir output_dir" sys.exit(1) else: input_dir = sys.argv[1] output_dir = sys.argv[2] # Read the first 1024 characters of the configure.ac file config_file = open("%s/configure.ac" % input_dir, 'r') buffer_size = 1024 opening_string = config_file.read(buffer_size) config_file.close() # Extract the version string from the AC_INIT macro # The following init_expression means: # Extract three integers separated by periods and surrounded by squre # brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy # (*? is the non-greedy flag) since that would pull in everything between # the first "(" and the last ")" in the file. version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)", re.DOTALL) version_values = version_expression.search(opening_string) major_version = version_values.group(1) minor_version = version_values.group(2) fix_version = version_values.group(3) # Write the version information to a header file to be included in the # Info.plist file. file_data = """// // DO NOT MODIFY THIS FILE (but you can delete it) // // This file is autogenerated by the versiongenerate.py script. This script // is executed in a "Run Script" build phase when creating gtest.framework. This // header file is not used during compilation of C-source. Rather, it simply // defines some version strings for substitution in the Info.plist. Because of // this, we are not not restricted to C-syntax nor are we using include guards. // #define GTEST_VERSIONINFO_SHORT %s.%s #define GTEST_VERSIONINFO_LONG %s.%s.%s """ % (major_version, minor_version, major_version, minor_version, fix_version) version_file = open("%s/Version.h" % output_dir, 'w') version_file.write(file_data) version_file.close()
apache-2.0
inviwo/inviwo
data/scripts/matplotlib_create_transferfunction.py
2
1270
# Inviwo Python script import matplotlib.cm as cm import matplotlib.pyplot as plt import inviwopy from inviwopy.glm import vec2,vec3,vec4 #http://matplotlib.org/examples/color/colormaps_reference.html #Perceptually Uniform Sequential : #['viridis', 'inferno', 'plasma', 'magma'] #Sequential : #['Blues', 'BuGn', 'BuPu','GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu','Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd'] #Diverging : #['afmhot', 'autumn', 'bone', 'cool','copper', 'gist_heat', 'gray', 'hot','pink', 'spring', 'summer', 'winter'] #Qualitative : #['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral', 'seismic'] #Miscellaneous : #['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3'] #Sequential : #['gist_earth', 'terrain', 'ocean', 'gist_stern','brg', 'CMRmap', 'cubehelix','gnuplot', 'gnuplot2', 'gist_ncar', 'nipy_spectral', 'jet', 'rainbow', 'gist_rainbow', 'hsv', 'flag', 'prism'] tf = inviwopy.app.network.VolumeRaycaster.transferFunction tf.clear() cmapName = "viridis" cmap=plt.get_cmap(cmapName) N = 128 for i in range(0,N,1): x = i / (N-1) a = 1.0 color = cmap(x) tf.add(x, vec4(color[0],color[1],color[2], a))
bsd-2-clause
Codefans-fan/odoo
openerp/addons/base/tests/test_orm.py
20
17911
from collections import defaultdict from openerp.tools import mute_logger from openerp.tests import common UID = common.ADMIN_USER_ID DB = common.DB class TestORM(common.TransactionCase): """ test special behaviors of ORM CRUD functions TODO: use real Exceptions types instead of Exception """ def setUp(self): super(TestORM, self).setUp() cr, uid = self.cr, self.uid self.partner = self.registry('res.partner') self.users = self.registry('res.users') self.p1 = self.partner.name_create(cr, uid, 'W')[0] self.p2 = self.partner.name_create(cr, uid, 'Y')[0] self.ir_rule = self.registry('ir.rule') # sample unprivileged user employee_gid = self.ref('base.group_user') self.uid2 = self.users.create(cr, uid, {'name': 'test user', 'login': 'test', 'groups_id': [4,employee_gid]}) @mute_logger('openerp.models') def testAccessDeletedRecords(self): """ Verify that accessing deleted records works as expected """ cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2 self.partner.unlink(cr, uid, [p1]) # read() is expected to skip deleted records because our API is not # transactional for a sequence of search()->read() performed from the # client-side... a concurrent deletion could therefore cause spurious # exceptions even when simply opening a list view! # /!\ Using unprileged user to detect former side effects of ir.rules! self.assertEqual([{'id': p2, 'name': 'Y'}], self.partner.read(cr, uid2, [p1,p2], ['name']), "read() should skip deleted records") self.assertEqual([], self.partner.read(cr, uid2, [p1], ['name']), "read() should skip deleted records") # Deleting an already deleted record should be simply ignored self.assertTrue(self.partner.unlink(cr, uid, [p1]), "Re-deleting should be a no-op") # Updating an already deleted record should raise, even as admin with self.assertRaises(Exception): self.partner.write(cr, uid, [p1], {'name': 'foo'}) @mute_logger('openerp.models') def testAccessFilteredRecords(self): """ Verify that accessing filtered records works as expected for non-admin user """ cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2 partner_model = self.registry('ir.model').search(cr, uid, [('model','=','res.partner')])[0] self.ir_rule.create(cr, uid, {'name': 'Y is invisible', 'domain_force': [('id', '!=', p1)], 'model_id': partner_model}) # search as unprivileged user partners = self.partner.search(cr, uid2, []) self.assertFalse(p1 in partners, "W should not be visible...") self.assertTrue(p2 in partners, "... but Y should be visible") # read as unprivileged user with self.assertRaises(Exception): self.partner.read(cr, uid2, [p1], ['name']) # write as unprivileged user with self.assertRaises(Exception): self.partner.write(cr, uid2, [p1], {'name': 'foo'}) # unlink as unprivileged user with self.assertRaises(Exception): self.partner.unlink(cr, uid2, [p1]) # Prepare mixed case self.partner.unlink(cr, uid, [p2]) # read mixed records: some deleted and some filtered with self.assertRaises(Exception): self.partner.read(cr, uid2, [p1,p2], ['name']) # delete mixed records: some deleted and some filtered with self.assertRaises(Exception): self.partner.unlink(cr, uid2, [p1,p2]) def test_multi_read(self): record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'}) records = self.partner.read(self.cr, UID, [record_id]) self.assertIsInstance(records, list) def test_one_read(self): record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'}) record = self.partner.read(self.cr, UID, record_id) self.assertIsInstance(record, dict) @mute_logger('openerp.models') def test_search_read(self): # simple search_read self.partner.create(self.cr, UID, {'name': 'MyPartner1'}) found = self.partner.search_read(self.cr, UID, [['name', '=', 'MyPartner1']], ['name']) self.assertEqual(len(found), 1) self.assertEqual(found[0]['name'], 'MyPartner1') self.assertTrue('id' in found[0]) # search_read correct order self.partner.create(self.cr, UID, {'name': 'MyPartner2'}) found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name") self.assertEqual(len(found), 2) self.assertEqual(found[0]['name'], 'MyPartner1') self.assertEqual(found[1]['name'], 'MyPartner2') found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name desc") self.assertEqual(len(found), 2) self.assertEqual(found[0]['name'], 'MyPartner2') self.assertEqual(found[1]['name'], 'MyPartner1') # search_read that finds nothing found = self.partner.search_read(self.cr, UID, [['name', '=', 'Does not exists']], ['name']) self.assertEqual(len(found), 0) def test_exists(self): partner = self.partner.browse(self.cr, UID, []) # check that records obtained from search exist recs = partner.search([]) self.assertTrue(recs) self.assertEqual(recs.exists(), recs) # check that there is no record with id 0 recs = partner.browse([0]) self.assertFalse(recs.exists()) def test_groupby_date(self): partners = dict( A='2012-11-19', B='2012-12-17', C='2012-12-31', D='2013-01-07', E='2013-01-14', F='2013-01-28', G='2013-02-11', ) all_partners = [] partners_by_day = defaultdict(set) partners_by_month = defaultdict(set) partners_by_year = defaultdict(set) for name, date in partners.items(): p = self.partner.create(self.cr, UID, dict(name=name, date=date)) all_partners.append(p) partners_by_day[date].add(p) partners_by_month[date.rsplit('-', 1)[0]].add(p) partners_by_year[date.split('-', 1)[0]].add(p) def read_group(interval, domain=None): main_domain = [('id', 'in', all_partners)] if domain: domain = ['&'] + main_domain + domain else: domain = main_domain rg = self.partner.read_group(self.cr, self.uid, domain, ['date'], 'date' + ':' + interval) result = {} for r in rg: result[r['date:' + interval]] = set(self.partner.search(self.cr, self.uid, r['__domain'])) return result self.assertEqual(len(read_group('day')), len(partners_by_day)) self.assertEqual(len(read_group('month')), len(partners_by_month)) self.assertEqual(len(read_group('year')), len(partners_by_year)) rg = self.partner.read_group(self.cr, self.uid, [('id', 'in', all_partners)], ['date'], ['date:month', 'date:day'], lazy=False) self.assertEqual(len(rg), len(all_partners)) class TestInherits(common.TransactionCase): """ test the behavior of the orm for models that use _inherits; specifically: res.users, that inherits from res.partner """ def setUp(self): super(TestInherits, self).setUp() self.partner = self.registry('res.partner') self.user = self.registry('res.users') def test_default(self): """ `default_get` cannot return a dictionary or a new id """ defaults = self.user.default_get(self.cr, UID, ['partner_id']) if 'partner_id' in defaults: self.assertIsInstance(defaults['partner_id'], (bool, int, long)) def test_create(self): """ creating a user should automatically create a new partner """ partners_before = self.partner.search(self.cr, UID, []) foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'}) foo = self.user.browse(self.cr, UID, foo_id) self.assertNotIn(foo.partner_id.id, partners_before) def test_create_with_ancestor(self): """ creating a user with a specific 'partner_id' should not create a new partner """ par_id = self.partner.create(self.cr, UID, {'name': 'Foo'}) partners_before = self.partner.search(self.cr, UID, []) foo_id = self.user.create(self.cr, UID, {'partner_id': par_id, 'login': 'foo', 'password': 'foo'}) partners_after = self.partner.search(self.cr, UID, []) self.assertEqual(set(partners_before), set(partners_after)) foo = self.user.browse(self.cr, UID, foo_id) self.assertEqual(foo.name, 'Foo') self.assertEqual(foo.partner_id.id, par_id) @mute_logger('openerp.models') def test_read(self): """ inherited fields should be read without any indirection """ foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'}) foo_values, = self.user.read(self.cr, UID, [foo_id]) partner_id = foo_values['partner_id'][0] partner_values, = self.partner.read(self.cr, UID, [partner_id]) self.assertEqual(foo_values['name'], partner_values['name']) foo = self.user.browse(self.cr, UID, foo_id) self.assertEqual(foo.name, foo.partner_id.name) @mute_logger('openerp.models') def test_copy(self): """ copying a user should automatically copy its partner, too """ foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'}) foo_before, = self.user.read(self.cr, UID, [foo_id]) del foo_before['__last_update'] bar_id = self.user.copy(self.cr, UID, foo_id, {'login': 'bar', 'password': 'bar'}) foo_after, = self.user.read(self.cr, UID, [foo_id]) del foo_after['__last_update'] self.assertEqual(foo_before, foo_after) foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id]) self.assertEqual(bar.login, 'bar') self.assertNotEqual(foo.id, bar.id) self.assertNotEqual(foo.partner_id.id, bar.partner_id.id) @mute_logger('openerp.models') def test_copy_with_ancestor(self): """ copying a user with 'parent_id' in defaults should not duplicate the partner """ foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo', 'login_date': '2016-01-01', 'signature': 'XXX'}) par_id = self.partner.create(self.cr, UID, {'name': 'Bar'}) foo_before, = self.user.read(self.cr, UID, [foo_id]) del foo_before['__last_update'] partners_before = self.partner.search(self.cr, UID, []) bar_id = self.user.copy(self.cr, UID, foo_id, {'partner_id': par_id, 'login': 'bar'}) foo_after, = self.user.read(self.cr, UID, [foo_id]) del foo_after['__last_update'] partners_after = self.partner.search(self.cr, UID, []) self.assertEqual(foo_before, foo_after) self.assertEqual(set(partners_before), set(partners_after)) foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id]) self.assertNotEqual(foo.id, bar.id) self.assertEqual(bar.partner_id.id, par_id) self.assertEqual(bar.login, 'bar', "login is given from copy parameters") self.assertFalse(bar.login_date, "login_date should not be copied from original record") self.assertEqual(bar.name, 'Bar', "name is given from specific partner") self.assertEqual(bar.signature, foo.signature, "signature should be copied") CREATE = lambda values: (0, False, values) UPDATE = lambda id, values: (1, id, values) DELETE = lambda id: (2, id, False) FORGET = lambda id: (3, id, False) LINK_TO = lambda id: (4, id, False) DELETE_ALL = lambda: (5, False, False) REPLACE_WITH = lambda ids: (6, False, ids) def sorted_by_id(list_of_dicts): "sort dictionaries by their 'id' field; useful for comparisons" return sorted(list_of_dicts, key=lambda d: d.get('id')) class TestO2MSerialization(common.TransactionCase): """ test the orm method 'write' on one2many fields """ def setUp(self): super(TestO2MSerialization, self).setUp() self.partner = self.registry('res.partner') def test_no_command(self): " empty list of commands yields an empty list of records " results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', []) self.assertEqual(results, []) def test_CREATE_commands(self): " returns the VALUES dict as-is " values = [{'foo': 'bar'}, {'foo': 'baz'}, {'foo': 'baq'}] results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', map(CREATE, values)) self.assertEqual(results, values) def test_LINK_TO_command(self): " reads the records from the database, records are returned with their ids. " ids = [ self.partner.create(self.cr, UID, {'name': 'foo'}), self.partner.create(self.cr, UID, {'name': 'bar'}), self.partner.create(self.cr, UID, {'name': 'baz'}) ] commands = map(LINK_TO, ids) results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', commands, ['name']) self.assertEqual(sorted_by_id(results), sorted_by_id([ {'id': ids[0], 'name': 'foo'}, {'id': ids[1], 'name': 'bar'}, {'id': ids[2], 'name': 'baz'} ])) def test_bare_ids_command(self): " same as the equivalent LINK_TO commands " ids = [ self.partner.create(self.cr, UID, {'name': 'foo'}), self.partner.create(self.cr, UID, {'name': 'bar'}), self.partner.create(self.cr, UID, {'name': 'baz'}) ] results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', ids, ['name']) self.assertEqual(sorted_by_id(results), sorted_by_id([ {'id': ids[0], 'name': 'foo'}, {'id': ids[1], 'name': 'bar'}, {'id': ids[2], 'name': 'baz'} ])) def test_UPDATE_command(self): " take the in-db records and merge the provided information in " id_foo = self.partner.create(self.cr, UID, {'name': 'foo'}) id_bar = self.partner.create(self.cr, UID, {'name': 'bar'}) id_baz = self.partner.create(self.cr, UID, {'name': 'baz', 'city': 'tag'}) results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', [ LINK_TO(id_foo), UPDATE(id_bar, {'name': 'qux', 'city': 'tagtag'}), UPDATE(id_baz, {'name': 'quux'}) ], ['name', 'city']) self.assertEqual(sorted_by_id(results), sorted_by_id([ {'id': id_foo, 'name': 'foo', 'city': False}, {'id': id_bar, 'name': 'qux', 'city': 'tagtag'}, {'id': id_baz, 'name': 'quux', 'city': 'tag'} ])) def test_DELETE_command(self): " deleted records are not returned at all. " ids = [ self.partner.create(self.cr, UID, {'name': 'foo'}), self.partner.create(self.cr, UID, {'name': 'bar'}), self.partner.create(self.cr, UID, {'name': 'baz'}) ] commands = [DELETE(ids[0]), DELETE(ids[1]), DELETE(ids[2])] results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', commands, ['name']) self.assertEqual(results, []) def test_mixed_commands(self): ids = [ self.partner.create(self.cr, UID, {'name': name}) for name in ['NObar', 'baz', 'qux', 'NOquux', 'NOcorge', 'garply'] ] results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', [ CREATE({'name': 'foo'}), UPDATE(ids[0], {'name': 'bar'}), LINK_TO(ids[1]), DELETE(ids[2]), UPDATE(ids[3], {'name': 'quux',}), UPDATE(ids[4], {'name': 'corge'}), CREATE({'name': 'grault'}), LINK_TO(ids[5]) ], ['name']) self.assertEqual(sorted_by_id(results), sorted_by_id([ {'name': 'foo'}, {'id': ids[0], 'name': 'bar'}, {'id': ids[1], 'name': 'baz'}, {'id': ids[3], 'name': 'quux'}, {'id': ids[4], 'name': 'corge'}, {'name': 'grault'}, {'id': ids[5], 'name': 'garply'} ])) def test_LINK_TO_pairs(self): "LINK_TO commands can be written as pairs, instead of triplets" ids = [ self.partner.create(self.cr, UID, {'name': 'foo'}), self.partner.create(self.cr, UID, {'name': 'bar'}), self.partner.create(self.cr, UID, {'name': 'baz'}) ] commands = map(lambda id: (4, id), ids) results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', commands, ['name']) self.assertEqual(sorted_by_id(results), sorted_by_id([ {'id': ids[0], 'name': 'foo'}, {'id': ids[1], 'name': 'bar'}, {'id': ids[2], 'name': 'baz'} ])) def test_singleton_commands(self): "DELETE_ALL can appear as a singleton" results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', [DELETE_ALL()], ['name']) self.assertEqual(results, []) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0