src
stringlengths 721
1.04M
|
---|
# -*- coding: utf-8 -*-
import logging
from .server import FHIRNotFoundException, FHIRServer, FHIRUnauthorizedException
__version__ = '3.0.0'
__author__ = 'SMART Platforms Team'
__license__ = 'APACHE2'
__copyright__ = "Copyright 2017 Boston Children's Hospital"
scope_default = 'user/*.* patient/*.read openid profile'
scope_haslaunch = 'launch'
scope_patientlaunch = 'launch/patient'
logger = logging.getLogger(__name__)
class FHIRClient(object):
""" Instances of this class handle authorizing and talking to SMART on FHIR
servers.
The settings dictionary supports:
- `app_id`*: Your app/client-id, e.g. 'my_web_app'
- `app_secret`*: Your app/client-secret
- `api_base`*: The FHIR service to connect to, e.g. 'https://fhir-api-dstu2.smarthealthit.org'
- `redirect_uri`: The callback/redirect URL for your app, e.g. 'http://localhost:8000/fhir-app/' when testing locally
- `patient_id`: The patient id against which to operate, if already known
- `scope`: Space-separated list of scopes to request, if other than default
- `launch_token`: The launch token
"""
def __init__(self, settings=None, state=None, save_func=lambda x:x):
self.app_id = None
self.app_secret = None
""" The app-id for the app this client is used in. """
self.server = None
self.scope = scope_default
self.redirect = None
""" The redirect-uri that will be used to redirect after authorization. """
self.launch_token = None
""" The token/id provided at launch, if any. """
self.launch_context = None
""" Context parameters supplied by the server during launch. """
self.wants_patient = True
""" If true and launched without patient, will add the correct scope
to indicate that the server should prompt for a patient after login. """
self.patient_id = None
self._patient = None
if save_func is None:
raise Exception("Must supply a save_func when initializing the SMART client")
self._save_func = save_func
# init from state
if state is not None:
self.from_state(state)
# init from settings dict
elif settings is not None:
if not 'app_id' in settings:
raise Exception("Must provide 'app_id' in settings dictionary")
if not 'api_base' in settings:
raise Exception("Must provide 'api_base' in settings dictionary")
self.app_id = settings['app_id']
self.app_secret = settings.get('app_secret')
self.redirect = settings.get('redirect_uri')
self.patient_id = settings.get('patient_id')
self.scope = settings.get('scope', self.scope)
self.launch_token = settings.get('launch_token')
self.server = FHIRServer(self, base_uri=settings['api_base'])
else:
raise Exception("Must either supply settings or a state upon client initialization")
# MARK: Authorization
@property
def desired_scope(self):
""" Ensures `self.scope` is completed with launch scopes, according to
current client settings.
"""
scope = self.scope
if self.launch_token is not None:
scope = ' '.join([scope_haslaunch, scope])
elif self.patient_id is None and self.wants_patient:
scope = ' '.join([scope_patientlaunch, scope])
return scope
@property
def ready(self):
""" Returns True if the client is ready to make API calls (e.g. there
is an access token or this is an open server).
:returns: True if the server can make authenticated calls
"""
return self.server.ready if self.server is not None else False
def prepare(self):
""" Returns True if the client is ready to make API calls (e.g. there
is an access token or this is an open server). In contrast to the
`ready` property, this method will fetch the server's capability
statement if it hasn't yet been fetched.
:returns: True if the server can make authenticated calls
"""
if self.server:
if self.server.ready:
return True
return self.server.prepare()
return False
@property
def authorize_url(self):
""" The URL to use to receive an authorization token.
"""
return self.server.authorize_uri if self.server is not None else None
def handle_callback(self, url):
""" You can call this to have the client automatically handle the
auth callback after the user has logged in.
:param str url: The complete callback URL
"""
ctx = self.server.handle_callback(url) if self.server is not None else None
self._handle_launch_context(ctx)
def reauthorize(self):
""" Try to reauthorize with the server.
:returns: A bool indicating reauthorization success
"""
ctx = self.server.reauthorize() if self.server is not None else None
self._handle_launch_context(ctx)
return self.launch_context is not None
def _handle_launch_context(self, ctx):
logger.debug("SMART: Handling launch context: {0}".format(ctx))
if 'patient' in ctx:
#print('Patient id was {0}, row context is {1}'.format(self.patient_id, ctx))
self.patient_id = ctx['patient'] # TODO: TEST THIS!
if 'id_token' in ctx:
logger.warning("SMART: Received an id_token, ignoring")
self.launch_context = ctx
self.save_state()
# MARK: Current Patient
@property
def patient(self):
if self._patient is None and self.patient_id is not None and self.ready:
from . import models.patient
try:
logger.debug("SMART: Attempting to read Patient {0}".format(self.patient_id))
self._patient = models.patient.Patient.read(self.patient_id, self.server)
except FHIRUnauthorizedException as e:
if self.reauthorize():
logger.debug("SMART: Attempting to read Patient {0} after reauthorizing"
.format(self.patient_id))
self._patient = models.patient.Patient.read(self.patient_id, self.server)
except FHIRNotFoundException as e:
logger.warning("SMART: Patient with id {0} not found".format(self.patient_id))
self.patient_id = None
self.save_state()
return self._patient
def human_name(self, human_name_instance):
""" Formats a `HumanName` instance into a string.
"""
if human_name_instance is None:
return 'Unknown'
parts = []
for n in [human_name_instance.prefix, human_name_instance.given, human_name_instance.family]:
if n is not None:
parts.extend(n)
if len(human_name_instance.suffix) > 0:
if len(parts) > 0:
parts[len(parts)-1] = parts[len(parts)-1]+','
parts.extend(human_name_instance.suffix)
return ' '.join(parts) if len(parts) > 0 else 'Unnamed'
# MARK: State
def reset_patient(self):
self.launch_token = None
self.launch_context = None
self.patient_id = None
self._patient = None
self.save_state()
@property
def state(self):
return {
'app_id': self.app_id,
'app_secret': self.app_secret,
'scope': self.scope,
'redirect': self.redirect,
'patient_id': self.patient_id,
'server': self.server.state,
'launch_token': self.launch_token,
'launch_context': self.launch_context,
}
def from_state(self, state):
assert state
self.app_id = state.get('app_id') or self.app_id
self.app_secret = state.get('app_secret') or self.app_secret
self.scope = state.get('scope') or self.scope
self.redirect = state.get('redirect') or self.redirect
self.patient_id = state.get('patient_id') or self.patient_id
self.launch_token = state.get('launch_token') or self.launch_token
self.launch_context = state.get('launch_context') or self.launch_context
self.server = FHIRServer(self, state=state.get('server'))
def save_state (self):
self._save_func(self.state)
|
# coding: utf-8
"""
MailMojo API
v1 of the MailMojo API # noqa: E501
OpenAPI spec version: 1.1.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import mailmojo_sdk
from mailmojo_sdk.api.newsletter_api import NewsletterApi # noqa: E501
from mailmojo_sdk.rest import ApiException
class TestNewsletterApi(unittest.TestCase):
"""NewsletterApi unit test stubs"""
def setUp(self):
self.api = mailmojo_sdk.api.newsletter_api.NewsletterApi() # noqa: E501
def tearDown(self):
pass
def test_cancel_newsletter(self):
"""Test case for cancel_newsletter
Cancel a newsletter. # noqa: E501
"""
pass
def test_create_newsletter(self):
"""Test case for create_newsletter
Create a newsletter draft. # noqa: E501
"""
pass
def test_get_newsletter_by_id(self):
"""Test case for get_newsletter_by_id
Retrieve a newsletter by id. # noqa: E501
"""
pass
def test_get_newsletters(self):
"""Test case for get_newsletters
Retrieve all newsletters. # noqa: E501
"""
pass
def test_send_newsletter(self):
"""Test case for send_newsletter
Send a newsletter. # noqa: E501
"""
pass
def test_test_newsletter(self):
"""Test case for test_newsletter
Send a test newsletter. # noqa: E501
"""
pass
def test_update_newsletter(self):
"""Test case for update_newsletter
Update a newsletter draft partially. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'URLTweetImage'
db.create_table(u'tweets_urltweetimage', (
(u'tweetimage_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['tweets.TweetImage'], unique=True, primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
))
db.send_create_signal(u'tweets', ['URLTweetImage'])
def backwards(self, orm):
# Deleting model 'URLTweetImage'
db.delete_table(u'tweets_urltweetimage')
models = {
u'tweets.articletweet': {
'Meta': {'ordering': "['-tweeted']", 'object_name': 'ArticleTweet', '_ormbases': [u'tweets.Tweet']},
'article': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tweets.FlickrTweetImage']"}),
u'tweet_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['tweets.Tweet']", 'unique': 'True', 'primary_key': 'True'})
},
u'tweets.bracketedcolorbigram': {
'Meta': {'ordering': "['-f']", 'unique_together': "(('start_bracket', 'w1', 'w2', 'end_bracket'),)", 'object_name': 'BracketedColorBigram'},
'end_bracket': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'f': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_bracket': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'w1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'w2': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'tweets.color': {
'Meta': {'unique_together': "(('rgb_r', 'rgb_g', 'rgb_b'),)", 'object_name': 'Color'},
'a': ('django.db.models.fields.FloatField', [], {}),
'b': ('django.db.models.fields.FloatField', [], {}),
'hex': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '8'}),
'html': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '7'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'l': ('django.db.models.fields.FloatField', [], {}),
'rgb_b': ('django.db.models.fields.IntegerField', [], {}),
'rgb_g': ('django.db.models.fields.IntegerField', [], {}),
'rgb_r': ('django.db.models.fields.IntegerField', [], {})
},
u'tweets.colormap': {
'Meta': {'object_name': 'ColorMap'},
'base_color': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'color': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tweets.Color']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stereotype': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'tweets.colorunigram': {
'Meta': {'ordering': "['-f']", 'object_name': 'ColorUnigram'},
'f': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'solid_compound': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'tweets.colorunigramsplit': {
'Meta': {'ordering': "['w1', 'w2']", 'unique_together': "(('w1', 'w2'),)", 'object_name': 'ColorUnigramSplit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tweets.ColorUnigram']"}),
'w1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'w2': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'tweets.everycolorbottweet': {
'Meta': {'ordering': "['-added', 'color', 'url', 'tweeted']", 'object_name': 'EveryColorBotTweet'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tweets.Color']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tweet_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'tweeted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'})
},
u'tweets.flickrtweetimage': {
'Meta': {'object_name': 'FlickrTweetImage', '_ormbases': [u'tweets.TweetImage']},
'description': ('django.db.models.fields.TextField', [], {'max_length': '20000'}),
'flickr_farm': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'flickr_id': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'flickr_secret': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'flickr_server': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'flickr_user_id': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'flickr_user_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'tweetimage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['tweets.TweetImage']", 'unique': 'True', 'primary_key': 'True'})
},
u'tweets.pluralcolorbigram': {
'Meta': {'ordering': "['-f']", 'unique_together': "(('w1', 'w2', 'singular'),)", 'object_name': 'PluralColorBigram'},
'f': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'singular': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'w1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'w2': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'tweets.retweet': {
'Meta': {'ordering': "['-retweeted']", 'object_name': 'ReTweet'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'retweeted': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'tweet': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tweets.Tweet']"}),
'tweet_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
u'tweets.tweet': {
'Meta': {'ordering': "['-tweeted']", 'object_name': 'Tweet'},
'color_code': ('django.db.models.fields.CharField', [], {'default': "'0xffffff'", 'max_length': '10'}),
'color_name': ('django.db.models.fields.CharField', [], {'default': "'None'", 'max_length': '100'}),
'context': ('django.db.models.fields.CharField', [], {'default': "'None'", 'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'muse': ('django.db.models.fields.CharField', [], {'default': "'None'", 'max_length': '100'}),
'reasoning': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True'}),
'tweeted': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
u'tweets.tweetimage': {
'Meta': {'object_name': 'TweetImage'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interjection': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'original': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'processed': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'})
},
u'tweets.unbracketedcolorbigram': {
'Meta': {'ordering': "['-f']", 'unique_together': "(('w1', 'w2'),)", 'object_name': 'UnbracketedColorBigram'},
'f': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'w1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'w2': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'tweets.urltweetimage': {
'Meta': {'object_name': 'URLTweetImage', '_ormbases': [u'tweets.TweetImage']},
u'tweetimage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['tweets.TweetImage']", 'unique': 'True', 'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['tweets']
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2013,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import logging
import argparse
try:
import ms.version
except ImportError:
pass
else:
ms.version.addpkg('twisted', '12.0.0')
ms.version.addpkg('zope.interface', '3.6.1')
ms.version.addpkg('setuptools', '0.6c11')
ms.version.addpkg('protobuf', '3.0.0b2')
ms.version.addpkg('six', '1.7.3')
ms.version.addpkg('python-daemon', '2.0.5')
ms.version.addpkg('lockfile', '0.9.1')
from twisted.internet.protocol import Factory
from twisted.protocols.basic import Int32StringReceiver
from twisted.internet import reactor
from google.protobuf.json_format import MessageToJson
from daemon import DaemonContext
from daemon.pidfile import TimeoutPIDLockFile
# -- begin path_setup --
BINDIR = os.path.dirname(os.path.realpath(sys.argv[0]))
LIBDIR = os.path.join(BINDIR, "..", "lib")
if LIBDIR not in sys.path:
sys.path.append(LIBDIR)
# -- end path_setup --
from aquilon.config import Config
class EventProtocol(Int32StringReceiver):
def __init__(self, storedir):
self.storedir = storedir
# Late import of protocol buffers after path correction
import aqdnotifications_pb2
self.skeleton = aqdnotifications_pb2.Notification
def stringReceived(self, data):
msg = self.skeleton()
msg.ParseFromString(data)
json_str = MessageToJson(msg)
if self.storedir:
path = os.path.join(self.storedir, '{}.json'.format(msg.uuid))
with open(path, 'w') as fh:
fh.write(json_str)
else:
sys.stdout.write(json_str)
sys.stdout.write('\n')
class EventFactory(Factory):
def __init__(self, storedir):
self.storedir = storedir
def buildProtocol(self, addr):
return EventProtocol(self.storedir)
def run_reactor(sockname, storedir):
reactor.listenUNIX(sockname, EventFactory(storedir))
reactor.run()
def daemonize(pidfile, sockname, storedir):
pidcmgr = TimeoutPIDLockFile(pidfile)
with DaemonContext(pidfile=pidcmgr) as dc:
run_reactor(sockname, storedir)
def main():
parser = argparse.ArgumentParser(description="Send out broker notifications")
parser.add_argument("-d", "--daemon", action='store_true',
help="Run as a daemon process")
parser.add_argument("-s", "--store", action='store_true',
help="Write messages to a file")
parser.add_argument("-c", "--config", dest="config",
help="location of the broker configuration file")
opts = parser.parse_args()
logger = logging.getLogger("read_events")
# Load configuration
config = Config(configfile=opts.config)
# Load the specified version of the protcol buffers
sys.path.append(config.get("protocols", "directory"))
# Find and create the socket directory
sockdir = config.get("broker", "sockdir")
if not os.path.exists(sockdir):
os.makedirs(sockdir)
# Remove a stale socket
sockname = os.path.join(sockdir, "events")
if os.path.exists(sockname):
logger.info("Removing old socket " + sockname)
try:
os.unlink(sockname)
except OSError as err:
logger.error("Failed to remove %s: %s", sockname, err)
# Are we storing messages we recieve?
storedir = None
if opts.store:
if config.has_section('unittest'):
storedir = os.path.join(config.get('unittest', 'scratchdir'), 'events')
else:
storedir = os.path.join(config.get('quattordir'), 'scratch', 'events')
if not os.path.exists(storedir):
os.makedirs(storedir)
# Decide if we want to daemionize
if opts.daemon:
rundir = config.get('broker', 'rundir')
if not os.path.exists(rundir):
os.makedirs(rundir)
pidfile = os.path.join(rundir, 'read_events.pid')
daemonize(pidfile, sockname, storedir)
else:
run_reactor(sockname, storedir)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
pyrseas.dbobject.column
~~~~~~~~~~~~~~~~~~~~~~~
This module defines two classes: Column derived from
DbSchemaObject and ColumnDict derived from DbObjectDict.
"""
from pyrseas.dbobject import DbObjectDict, DbSchemaObject, quote_id
from pyrseas.dbobject.privileges import privileges_from_map, add_grant
from pyrseas.dbobject.privileges import diff_privs
class Column(DbSchemaObject):
"A table column definition"
keylist = ['schema', 'table']
allprivs = 'arwx'
def to_map(self, db, no_privs):
"""Convert a column to a YAML-suitable format
:param no_privs: exclude privilege information
:return: dictionary
"""
if hasattr(self, 'dropped'):
return None
dct = self._base_map(db, False, no_privs)
del dct['number'], dct['name']
if 'collation' in dct and dct['collation'] == 'default':
del dct['collation']
if hasattr(self, 'inherited'):
dct['inherited'] = (self.inherited != 0)
if hasattr(self, 'statistics') and self.statistics == -1:
del dct['statistics']
return {self.name: dct}
def add(self):
"""Return a string to specify the column in a CREATE or ALTER TABLE
:return: partial SQL statement
"""
stmt = "%s %s" % (quote_id(self.name), self.type)
if hasattr(self, 'not_null'):
stmt += ' NOT NULL'
if hasattr(self, 'default'):
stmt += ' DEFAULT ' + self.default
if hasattr(self, 'collation') and self.collation != 'default':
stmt += ' COLLATE "%s"' % self.collation
return (stmt, '' if self.description is None else self.comment())
def add_privs(self):
"""Generate SQL statements to grant privileges on new column
:return: list of SQL statements
"""
return [add_grant(self._table, priv, self.name)
for priv in self.privileges]
def diff_privileges(self, incol):
"""Generate SQL statements to grant or revoke privileges
:param incol: a YAML map defining the input column
:return: list of SQL statements
"""
return [diff_privs(self._table, self.privileges, incol._table,
incol.privileges, self.name)]
def comment(self):
"""Return a SQL COMMENT statement for the column
:return: SQL statement
"""
return "COMMENT ON COLUMN %s.%s IS %s" % (
self._table.qualname(), self.name, self._comment_text())
def drop(self):
"""Return string to drop the column via ALTER TABLE
:return: SQL statement
"""
if hasattr(self, 'dropped'):
return []
if hasattr(self, '_table'):
(comptype, objtype) = (self._table.objtype, 'COLUMN')
compname = self._table.qualname()
elif hasattr(self, '_type'):
(comptype, objtype) = ('TYPE', 'ATTRIBUTE')
compname = self._type.qualname()
else:
raise TypeError("Cannot determine type of %s", self.name)
return "ALTER %s %s DROP %s %s" % (comptype, compname, objtype,
quote_id(self.name))
def rename(self, newname):
"""Return SQL statement to RENAME the column
:param newname: the new name of the object
:return: SQL statement
"""
if hasattr(self, '_table'):
(comptype, objtype) = (self._table.objtype, 'COLUMN')
compname = self._table.qualname()
elif hasattr(self, '_type'):
(comptype, objtype) = ('TYPE', 'ATTRIBUTE')
compname = self._type.qualname()
else:
raise TypeError("Cannot determine type of %s", self.name)
stmt = "ALTER %s %s RENAME %s %s TO %s" % (
comptype, compname, objtype, self.name, newname)
self.name = newname
return stmt
def set_sequence_default(self):
"""Return SQL statements to set a nextval() DEFAULT
:return: list of SQL statements
"""
stmts = []
stmts.append("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT %s" % (
self.qualname(self.table), quote_id(self.name), self.default))
return stmts
def alter(self, incol):
"""Generate SQL to transform an existing column
:param insequence: a YAML map defining the new column
:return: list of partial SQL statements
Compares the column to an input column and generates partial
SQL statements to transform it into the one represented by the
input.
"""
stmts = []
base = "ALTER COLUMN %s " % quote_id(self.name)
# check NOT NULL
if not hasattr(self, 'not_null') and hasattr(incol, 'not_null'):
stmts.append(base + "SET NOT NULL")
if hasattr(self, 'not_null') and not hasattr(incol, 'not_null'):
stmts.append(base + "DROP NOT NULL")
# check data types
if not hasattr(self, 'type'):
raise ValueError("Column '%s' missing datatype" % self.name)
if not hasattr(incol, 'type'):
raise ValueError("Input column '%s' missing datatype" % incol.name)
if self.type != incol.type:
# validate type conversion?
stmts.append(base + "TYPE %s" % incol.type)
# check DEFAULTs
if not hasattr(self, 'default') and hasattr(incol, 'default'):
stmts.append(base + "SET DEFAULT %s" % incol.default)
if hasattr(self, 'default'):
if not hasattr(incol, 'default'):
stmts.append(base + "DROP DEFAULT")
elif self.default != incol.default:
stmts.append(base + "SET DEFAULT %s" % incol.default)
# check STATISTICS
if hasattr(self, 'statistics'):
if self.statistics == -1 and (
hasattr(incol, 'statistics') and incol.statistics != -1):
stmts.append(base + "SET STATISTICS %d" % incol.statistics)
if self.statistics != -1 and (not hasattr(incol, 'statistics') or
incol.statistics == -1):
stmts.append(base + "SET STATISTICS -1")
return (", ".join(stmts), self.diff_description(incol))
QUERY_PRE91 = \
"""SELECT nspname AS schema, relname AS table, attname AS name,
attnum AS number, format_type(atttypid, atttypmod) AS type,
attnotnull AS not_null, attinhcount AS inherited,
pg_get_expr(adbin, adrelid) AS default,
attstattarget AS statistics, attisdropped AS dropped,
array_to_string(attacl, ',') AS privileges,
col_description(c.oid, attnum) AS description
FROM pg_attribute JOIN pg_class c ON (attrelid = c.oid)
JOIN pg_namespace ON (relnamespace = pg_namespace.oid)
LEFT JOIN pg_attrdef ON (attrelid = pg_attrdef.adrelid
AND attnum = pg_attrdef.adnum)
WHERE relkind in ('c', 'r', 'f')
AND (nspname != 'pg_catalog'
AND nspname != 'information_schema')
AND attnum > 0
ORDER BY nspname, relname, attnum"""
class ColumnDict(DbObjectDict):
"The collection of columns in tables in a database"
cls = Column
query = \
"""SELECT nspname AS schema, relname AS table, attname AS name,
attnum AS number, format_type(atttypid, atttypmod) AS type,
attnotnull AS not_null, attinhcount AS inherited,
pg_get_expr(adbin, adrelid) AS default,
attstattarget AS statistics,
collname AS collation, attisdropped AS dropped,
array_to_string(attacl, ',') AS privileges,
col_description(c.oid, attnum) AS description
FROM pg_attribute JOIN pg_class c ON (attrelid = c.oid)
JOIN pg_namespace ON (relnamespace = pg_namespace.oid)
LEFT JOIN pg_attrdef ON (attrelid = pg_attrdef.adrelid
AND attnum = pg_attrdef.adnum)
LEFT JOIN pg_collation l ON (attcollation = l.oid)
WHERE relkind in ('c', 'r', 'f')
AND (nspname != 'pg_catalog'
AND nspname != 'information_schema')
AND attnum > 0
ORDER BY nspname, relname, attnum"""
def _from_catalog(self):
"""Initialize the dictionary of columns by querying the catalogs"""
if self.dbconn.version < 90100:
self.query = QUERY_PRE91
for col in self.fetch():
sch, tbl = col.key()
if (sch, tbl) not in self:
self[(sch, tbl)] = []
self[(sch, tbl)].append(col)
def from_map(self, table, incols):
"""Initialize the dictionary of columns by converting the input list
:param table: table or type owning the columns/attributes
:param incols: YAML list defining the columns
"""
if not incols:
raise ValueError("Table '%s' has no columns" % table.name)
cols = self[(table.schema, table.name)] = []
for incol in incols:
for key in incol:
if isinstance(incol[key], dict):
arg = incol[key]
else:
arg = {'type': incol[key]}
col = Column(schema=table.schema, table=table.name, name=key,
**arg)
if len(col.privileges) > 0:
if table.owner is None:
raise ValueError("Column '%s.%s' has privileges but "
"no owner information" % (
table.name, key))
col.privileges = privileges_from_map(
col.privileges, col.allprivs, table.owner)
cols.append(col)
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Type aliases and assumptions that are specific to the MCTS agent."""
from typing import Callable, Tuple, Union
import numpy as np
# pylint: disable=invalid-name
# Assumption: actions are scalar and discrete (integral).
Action = Union[int, np.int32, np.int64]
# Assumption: observations are array-like.
Observation = np.ndarray
# Assumption: rewards and discounts are scalar.
Reward = Union[float, np.float32, np.float64]
Discount = Union[float, np.float32, np.float64]
# Notation: policy logits/probabilities are simply a vector of floats.
Probs = np.ndarray
# Notation: the value function is scalar-valued.
Value = float
# Notation: the 'evaluation function' maps observations -> (probs, value).
EvaluationFn = Callable[[Observation], Tuple[Probs, Value]]
|
#!/usr/bin/env python
'''
Copyright (c) 2016, Paul-Edouard Sarlin
All rights reserved.
Project: Autonomous Monitoring System
File: capture.py
Date: 2016-08-08
Author: Paul-Edouard Sarlin
Website: https://github.com/skydes/monitoring
'''
from multiprocessing import Process, Event, Lock, Queue
from Queue import Full
from time import sleep
import v4l2capture
import select
import cv2
import os
import logging
import numpy as np
FAIL = False
class Capture(Process):
def __init__(self, out_queue, conf, conf_lock):
Process.__init__(self)
self._out_queue = out_queue
self._stop = Event()
self._stop.set()
self._new_conf = Event()
self._new_conf.clear()
self._conf_lock = conf_lock
self._conf = conf
self._stream = None
self._device_name = None
def setDevice(self, device):
self._device_name = device
def openStream(self):
logging.debug("Opening stream.")
try:
self._stream = v4l2capture.Video_device("/dev/"+self._device_name)
except IOError as err_pref:
logging.debug("Could not open default device.")
devices = [x for x in os.listdir("/dev/") if x.startswith("video")]
devices.sort()
for device_new in devices:
try:
self._stream = v4l2capture.Video_device("/dev/"+device_new)
except IOError as err_new:
pass
else:
logging.warning("Device {default} was not available but {new} could be opened.".format(default=self._device_name, new=device_new))
self._device_name = device_new
return
raise err_pref
else:
return
def setupStream(self):
with self._conf_lock:
self._stream.set_format(self._conf["capture-res"][0], self._conf["capture-res"][1], fourcc='MJPG')
self._stream.create_buffers(1)
self._stream.queue_all_buffers()
def newConf(self):
self._new_conf.set()
def run(self):
self._stop.clear()
with self._conf_lock:
conf = self._conf.copy() # Create thread-safe local copy
sleep(float(conf["capture-warmup"])) # Camera warm-up wait
while True :
if self._stop.is_set():
break
if self._new_conf.is_set():
with self._conf_lock:
conf = self._conf.copy()
self._new_conf.clear()
logging.debug("New configuration set: {conf}".format(conf=conf))
if conf["capture"]:
if self._stream is None:
if self.tryOpenStream() is FAIL:
continue
try:
select.select((self._stream,), (), ())
raw = self._stream.read_and_queue()
except IOError as err_first:
self._stream.close()
self.tryOpenStream()
continue
if raw is None:
logging.warning("Grabbed frame is empty.")
while True:
try:
self._out_queue.put(cv2.imdecode(np.fromstring(raw, dtype=np.byte), flags=cv2.IMREAD_COLOR), block=False)
except Full:
self._out_queue.get()
else:
break
else:
sleep(1) # Reduce CPU consumption
if self._stream is not None:
self._stream.close()
logging.info("Thread stopped.")
def tryOpenStream(self):
try:
self.openStream()
except IOError as err:
with self._conf_lock:
self._conf["capture"] = False
self._conf["error"]["capture"] = True
self._stream = None
self.newConf()
logging.error("Capture disabled: could not open stream, no device available.")
return FAIL
else:
self.setupStream()
self._stream.start()
return (not FAIL)
def stop(self):
self._stop.set()
|
#!/usr/bin/env python
"""Testsuite for svglib.
This tests conversion of sample SVG files into PDF files.
Some tests try using a tool called uniconv (if installed)
to convert SVG files into PDF for comparision with svglib.
Read ``tests/README.rst`` for more information on testing!
"""
import os
import glob
import re
import gzip
import io
import json
import tarfile
import textwrap
from http.client import HTTPSConnection
from os.path import dirname, splitext, exists, join, basename, getsize
from urllib.parse import quote, unquote, urlparse
from urllib.request import urlopen
from reportlab.lib.utils import haveImages
from reportlab.graphics import renderPDF, renderPM
from reportlab.graphics.shapes import Group, Rect
import pytest
from svglib import svglib
TEST_ROOT = dirname(__file__)
def found_uniconv():
"Do we have uniconv installed?"
res = os.popen("which uniconv").read().strip()
return len(res) > 0
class TestSVGSamples:
"Tests on misc. sample SVG files included in this test suite."
def cleanup(self):
"Remove generated files created by this class."
paths = glob.glob(f"{TEST_ROOT}/samples/misc/*.pdf")
for i, path in enumerate(paths):
print(f"deleting [{i}] {path}")
os.remove(path)
def test_convert_pdf(self):
"Test convert sample SVG files to PDF using svglib."
paths = glob.glob(f"{TEST_ROOT}/samples/misc/*")
paths = [p for p in paths if splitext(p.lower())[1] in [".svg", ".svgz"]]
for i, path in enumerate(paths):
print(f"working on [{i}] {path}")
# convert
drawing = svglib.svg2rlg(path)
# save as PDF
base = splitext(path)[0] + '-svglib.pdf'
renderPDF.drawToFile(drawing, base, showBoundary=0)
@pytest.mark.skipif(not found_uniconv(), reason="needs uniconv")
def test_create_pdf_uniconv(self):
"Test converting sample SVG files to PDF using uniconverter."
paths = glob.glob(f"{TEST_ROOT}/samples/misc/*.svg")
for path in paths:
out = splitext(path)[0] + '-uniconv.pdf'
cmd = f"uniconv '{path}' '{out}'"
os.popen(cmd).read()
if exists(out) and getsize(out) == 0:
os.remove(out)
class TestWikipediaSymbols:
"Tests on sample symbol SVG files from wikipedia.org."
def fetch_file(self, server, path):
"Fetch file using httplib module."
print(f"downloading https://{server}{path}")
req = HTTPSConnection(server)
req.putrequest('GET', path)
req.putheader('Host', server)
req.putheader('Accept', 'text/svg')
req.endheaders()
r1 = req.getresponse()
data = r1.read().decode('utf-8')
req.close()
return data
def setup_method(self):
"Check if files exists, else download and unpack it."
self.folder_path = f"{TEST_ROOT}/samples/wikipedia/symbols"
# create directory if not existing
if not exists(self.folder_path):
os.mkdir(self.folder_path)
# list sample files, found on:
# http://en.wikipedia.org/wiki/List_of_symbols
server = "upload.wikimedia.org"
paths = textwrap.dedent("""\
/wikipedia/commons/f/f7/Biohazard.svg
/wikipedia/commons/1/11/No_smoking_symbol.svg
/wikipedia/commons/b/b0/Dharma_wheel.svg
/wikipedia/commons/a/a7/Eye_of_Horus_bw.svg
/wikipedia/commons/1/17/Yin_yang.svg
/wikipedia/commons/a/a7/Olympic_flag.svg
/wikipedia/commons/4/46/Ankh.svg
/wikipedia/commons/5/5b/Star_of_life2.svg
/wikipedia/commons/9/97/Tudor_rose.svg
/wikipedia/commons/0/08/Flower-of-Life-small.svg
/wikipedia/commons/d/d0/Countries_by_Population_Density_in_2015.svg
/wikipedia/commons/8/84/CO2_responsibility_1950-2000.svg
""").strip().split()
# convert
for path in paths:
data = None
p = join(os.getcwd(), self.folder_path, basename(path))
if not exists(p):
try:
data = self.fetch_file(server, path)
except Exception:
print("Check your internet connection and try again!")
break
if data:
with open(p, "w", encoding='UTF-8') as fh:
fh.write(data)
def cleanup(self):
"Remove generated files when running this test class."
paths = glob.glob(join(self.folder_path, '*.pdf'))
for i, path in enumerate(paths):
print(f"deleting [{i}] {path}")
os.remove(path)
def test_convert_pdf(self):
"Test converting symbol SVG files to PDF using svglib."
paths = glob.glob(f"{self.folder_path}/*")
paths = [p for p in paths if splitext(p.lower())[1] in [".svg", ".svgz"]]
for i, path in enumerate(paths):
print(f"working on [{i}] {path}")
# convert
drawing = svglib.svg2rlg(path)
# save as PDF
base = splitext(path)[0] + '-svglib.pdf'
renderPDF.drawToFile(drawing, base, showBoundary=0)
@pytest.mark.skipif(not found_uniconv(), reason="needs uniconv")
def test_convert_pdf_uniconv(self):
"Test converting symbol SVG files to PDF using uniconverter."
paths = glob.glob(f"{self.folder_path}/*")
paths = [p for p in paths if splitext(p.lower())[1] in [".svg", ".svgz"]]
for path in paths:
out = splitext(path)[0] + '-uniconv.pdf'
cmd = f"uniconv '{path}' '{out}'"
os.popen(cmd).read()
if exists(out) and getsize(out) == 0:
os.remove(out)
class TestWikipediaFlags:
"Tests using SVG flags from Wikipedia.org."
def fetch_file(self, url):
"Get content with some given URL, uncompress if needed."
parsed = urlparse(url)
conn = HTTPSConnection(parsed.netloc)
conn.request("GET", parsed.path)
r1 = conn.getresponse()
if (r1.status, r1.reason) == (200, "OK"):
data = r1.read()
if r1.getheader("content-encoding") == "gzip":
zbuf = io.BytesIO(data)
zfile = gzip.GzipFile(mode="rb", fileobj=zbuf)
data = zfile.read()
zfile.close()
data = data.decode('utf-8')
else:
data = None
conn.close()
return data
def flag_url2filename(self, url):
"""Convert given flag URL into a local filename.
http://upload.wikimedia.org/wikipedia/commons
/9/91/Flag_of_Bhutan.svg
-> Bhutan.svg
/f/fa/Flag_of_the_People%27s_Republic_of_China.svg
-> The_People's_Republic_of_China.svg
"""
path = basename(url)[len("Flag_of_"):]
path = path.capitalize() # capitalise leading "the_"
path = unquote(path)
return path
def setup_method(self):
"Check if files exists, else download."
self.folder_path = f"{TEST_ROOT}/samples/wikipedia/flags"
# create directory if not already present
if not exists(self.folder_path):
os.mkdir(self.folder_path)
# fetch flags.html, if not already present
path = join(self.folder_path, "flags.html")
if not exists(path):
u = "https://en.wikipedia.org/wiki/Gallery_of_sovereign_state_flags"
data = self.fetch_file(u)
if data:
with open(path, "w", encoding='UTF-8') as f:
f.write(data)
else:
with open(path, encoding='UTF-8') as f:
data = f.read()
# find all flag base filenames
# ["Flag_of_Bhutan.svg", "Flag_of_Bhutan.svg", ...]
flag_names = re.findall(r"\:(Flag_of_.*?\.svg)", data)
flag_names = [unquote(fn) for fn in flag_names]
# save flag URLs into a JSON file, if not already present
json_path = join(self.folder_path, "flags.json")
if not exists(json_path):
flag_url_map = []
prefix = "https://en.wikipedia.org/wiki/File:"
for i, fn in enumerate(flag_names):
# load single flag HTML page, like
# https://en.wikipedia.org/wiki/Image:Flag_of_Bhutan.svg
flag_html = self.fetch_file(prefix + quote(fn))
# search link to single SVG file to download, like
# https://upload.wikimedia.org/wikipedia/commons/9/91/Flag_of_Bhutan.svg
svg_pat = "//upload.wikimedia.org/wikipedia/commons"
p = rf"({svg_pat}/.*?/{quote(fn)})\""
print(f"check {prefix}{fn}")
m = re.search(p, flag_html)
if m:
flag_url = m.groups()[0]
flag_url_map.append((prefix + fn, flag_url))
with open(json_path, "w", encoding='UTF-8') as fh:
json.dump(flag_url_map, fh)
# download flags in SVG format, if not present already
with open(json_path, encoding='UTF-8') as fh:
flag_url_map = json.load(fh)
for dummy, flag_url in flag_url_map:
path = join(self.folder_path, self.flag_url2filename(flag_url))
if not exists(path):
print(f"fetch {flag_url}")
flag_svg = self.fetch_file(flag_url)
with open(path, "w", encoding='UTF-8') as f:
f.write(flag_svg)
def cleanup(self):
"Remove generated files when running this test class."
paths = glob.glob(join(self.folder_path, '*.pdf'))
for i, path in enumerate(paths):
print(f"deleting [{i}] {path}")
os.remove(path)
def test_convert_pdf(self):
"Test converting flag SVG files to PDF using svglib."
paths = glob.glob(f"{self.folder_path}/*")
paths = [p for p in paths if splitext(p.lower())[1] in [".svg", ".svgz"]]
for i, path in enumerate(paths):
print(f"working on [{i}] {path}")
# convert
drawing = svglib.svg2rlg(path)
# save as PDF
base = splitext(path)[0] + '-svglib.pdf'
renderPDF.drawToFile(drawing, base, showBoundary=0)
@pytest.mark.skipif(not found_uniconv(), reason="needs uniconv")
def test_convert_pdf_uniconv(self):
"Test converting flag SVG files to PDF using uniconverer."
paths = glob.glob(f"{self.folder_path}/*")
paths = [p for p in paths if splitext(p.lower())[1] in [".svg", ".svgz"]]
for path in paths:
out = splitext(path)[0] + '-uniconv.pdf'
cmd = f"uniconv '{path}' '{out}'"
os.popen(cmd).read()
if exists(out) and getsize(out) == 0:
os.remove(out)
class TestW3CSVG:
"Tests using the official W3C SVG testsuite."
def setup_method(self):
"Check if testsuite archive exists, else download and unpack it."
server = "http://www.w3.org"
path = "/Graphics/SVG/Test/20070907/W3C_SVG_12_TinyTestSuite.tar.gz"
url = server + path
archive_path = basename(url)
tar_path = splitext(archive_path)[0]
self.folder_path = join(TEST_ROOT, "samples", splitext(tar_path)[0])
if not exists(self.folder_path):
if not exists(join(TEST_ROOT, "samples", tar_path)):
if not exists(join(TEST_ROOT, "samples", archive_path)):
print(f"downloading {url}")
try:
data = urlopen(url).read()
except OSError as details:
print(details)
print("Check your internet connection and try again!")
return
archive_path = basename(url)
with open(join(TEST_ROOT, "samples", archive_path), "wb") as f:
f.write(data)
print(f"unpacking {archive_path}")
tar_data = gzip.open(join(TEST_ROOT, "samples", archive_path), "rb").read()
with open(join(TEST_ROOT, "samples", tar_path), "wb") as f:
f.write(tar_data)
print(f"extracting into {self.folder_path}")
os.mkdir(self.folder_path)
tar_file = tarfile.TarFile(join(TEST_ROOT, "samples", tar_path))
tar_file.extractall(self.folder_path)
tar_file.close()
if exists(join(TEST_ROOT, "samples", tar_path)):
os.remove(join(TEST_ROOT, "samples", tar_path))
def cleanup(self):
"Remove generated files when running this test class."
paths = glob.glob(join(self.folder_path, 'svg/*-svglib.pdf'))
paths += glob.glob(join(self.folder_path, 'svg/*-uniconv.pdf'))
paths += glob.glob(join(self.folder_path, 'svg/*-svglib.png'))
for i, path in enumerate(paths):
print(f"deleting [{i}] {path}")
os.remove(path)
def test_convert_pdf_png(self):
"""
Test converting W3C SVG files to PDF and PNG using svglib.
``renderPM.drawToFile()`` used in this test is known to trigger an
error sometimes in reportlab which was fixed in reportlab 3.3.26.
See https://github.com/deeplook/svglib/issues/47
"""
exclude_list = [
"animate-elem-41-t.svg", # Freeze renderPM in pathFill()
"animate-elem-78-t.svg", # id
"paint-stroke-06-t.svg",
"paint-stroke-207-t.svg",
"coords-trans-09-t.svg", # renderPDF issue (div by 0)
]
paths = glob.glob(f"{self.folder_path}/svg/*.svg")
msg = f"Destination folder '{self.folder_path}/svg' not found."
assert len(paths) > 0, msg
for i, path in enumerate(paths):
print(f"working on [{i}] {path}")
if basename(path) in exclude_list:
print("excluded (to be tested later)")
continue
# convert
drawing = svglib.svg2rlg(path)
# save as PDF
base = splitext(path)[0] + '-svglib.pdf'
renderPDF.drawToFile(drawing, base, showBoundary=0)
# save as PNG
# (endless loop for file paint-stroke-06-t.svg)
base = splitext(path)[0] + '-svglib.png'
try:
# Can trigger an error in reportlab < 3.3.26.
renderPM.drawToFile(drawing, base, 'PNG')
except TypeError:
print('Svglib: Consider upgrading reportlab to version >= 3.3.26!')
raise
@pytest.mark.skipif(not found_uniconv(), reason="needs uniconv")
def test_convert_pdf_uniconv(self):
"Test converting W3C SVG files to PDF using uniconverter."
paths = glob.glob(f"{self.folder_path}/svg/*")
paths = [p for p in paths if splitext(p.lower())[1] in [".svg", ".svgz"]]
for path in paths:
out = splitext(path)[0] + '-uniconv.pdf'
cmd = f"uniconv '{path}' '{out}'"
os.popen(cmd).read()
if exists(out) and getsize(out) == 0:
os.remove(out)
class TestOtherFiles:
@pytest.mark.skipif(not haveImages, reason="missing pillow library")
def test_png_in_svg(self):
path = join(TEST_ROOT, "samples", "others", "png_in_svg.svg")
drawing = svglib.svg2rlg(path)
result = renderPDF.drawToString(drawing)
# If the PNG image is really included, the size is over 7k.
assert len(result) > 7000
def test_external_svg_in_svg(self):
path = join(TEST_ROOT, "samples", "others", "svg_in_svg.svg")
drawing = svglib.svg2rlg(path)
img_group = drawing.contents[0].contents[0]
# First image points to SVG rendered as a group
assert isinstance(img_group.contents[0], Group)
assert isinstance(img_group.contents[0].contents[0].contents[0], Rect)
assert img_group.contents[0].transform, (1, 0, 0, 1, 200.0, 100.0)
# Second image points directly to a Group with Rect element
assert isinstance(img_group.contents[1], Group)
assert isinstance(img_group.contents[1].contents[0], Rect)
assert img_group.contents[1].transform, (1, 0, 0, 1, 100.0, 200.0)
|
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Base class for Tsubame platform modules.
#----------------------------------------------------------------------------
# Copyright 2017, Martin Kolman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from core import constants
from core.signal import Signal
class PlatformModule(object):
"""A Tsubame base platform module."""
def __init__(self):
self.internet_connectivity_changed = Signal()
@property
def platform_id(self):
"""Return an unique string identifying the device module."""
return None
@property
def device_name(self):
"""Return a human readable name of the device."""
return "unknown device"
@property
def preferred_window_wh(self):
"""Return the preferred application window size in pixels."""
# we'll use VGA as a default value
return 640, 480
@property
def start_in_fullscreen(self):
"""Return if Tsubame should be started fullscreen.
NOTE: this is a default value and can be overridden by a
user-set options key, etc.
"""
return False
@property
def fullscreen_only(self):
"""Report if the platform is fullscreen-only.
Some platforms are basically fullscreen-only (Harmattan),
as applications only switch between fullscreen and a task switcher.
"""
return False
@property
def screen_blanking_control_supported(self):
"""There is no universal way to control screen blanking, so its off by default.
NOTE: Screen blanking can be implemented and enabled in the corresponding
device or gui module.
"""
return False
def pause_screen_blanking(self):
"""Pause screen blanking controlled by device module.
calling this method should pause screen blanking
* on mobile devices, screen balking needs to be paused every n seconds
* on desktop, one call might be enough, still, several calls should
be handled without issues
* also what about restoring the screen blanking on Desktop
once Tsubame exits ?
"""
pass
@property
def supported_gui_module_ids(self):
"""Supported GUI module IDs, ordered by preference from left to right.
THE ":" NOTATION
single GUI modules might support different subsets, the usability of
these subsets can vary based on the current platform
-> this functions enabled device modules to report which GUI subsets
are most suitable for the given platform
-> the string starts with the module id prefix, is separated by : and
continues with the subset id
EXAMPLE: ["QML:harmattan","QML:indep","GTK"]
-> QML GUI with Harmattan Qt Components is preferred,
QML GUI with platform independent Qt Components is less preferred
and the GTK GUI is set as a fallback if everything else fails
CURRENT USAGE
there are different incompatible native Qt Component sets
on various platforms (Harmattan QTC, Plasma Active QTC, Jolla QTC,...)
the QML GUI aims to support most of these components sets to provide
native look & feel and the subset id is used by the device module
to signal the GUI module which QTC component to use
"""
return ["qt5"] # the Qt 5 GUI is the default
@property
def has_notification_support(self):
"""Report if the device provides its own notification method."""
return False
def notify(self, message, msTimeout=0, icon=""):
"""Send a notification using platform/device specific API."""
pass
@property
def has_keyboard(self):
"""Report if the device has a hardware keyboard."""
return True
@property
def has_buttons(self):
"""Report if the device has some usable buttons other than a hardware keyboard."""
if self.has_volume_keys:
return True
else:
return False
@property
def has_volume_keys(self):
"""Report if the device has application-usable volume control keys or their equivalent.
Basically basically just two nearby button that can be used for zooming up/down,
skipping to next/previous and similar actions.
"""
return False
def enable_volume_keys(self):
pass
@property
def profile_path(self):
"""Return path to the main profile folder or None if default path should be used.
:returns: path to the profile folder or None
:rtype: str or None
"""
return None
@property
def needs_quit_button(self):
"""On some platforms applications need to provide their own shutdown buttons."""
return False
@property
def needs_back_button(self):
"""Some platforms (Sailfish OS) don't need a in-UI back button."""
return True
@property
def needs_page_background(self):
"""Some platforms (Sailfish OS) don't need a page background."""
return True
@property
def handles_url_opening(self):
"""Some platform provide specific APIs for URL opening.
For example, on the N900 a special DBUS command not available
elsewhere needs to be used.
"""
return False
def open_url(self, url):
"""Open a URL."""
import webbrowser
webbrowser.open(url)
@property
def connectivity_status(self):
"""Report the current status of internet connectivity on the device.
None - status reporting not supported or status unknown
True - connected to the Internet
False - disconnected from the Internet
"""
connected = constants.InternetConnectivityStatus.OFFLINE
# open the /proc/net/route file
with open('/proc/net/route', 'r') as f:
for line in f:
# the line is delimited by tabulators
lineSplit = line.split('\t')
# check if the length is valid
if len(lineSplit) >= 11:
if lineSplit[1] == '00000000' and lineSplit[7] == '00000000':
# if destination and mask are 00000000,
# it is probably an Internet connection
connected = constants.InternetConnectivityStatus.ONLINE
break
return connected
def enable_internet_connectivity(self):
"""Try to make sure that the device connects to the Internet."""
pass
@property
def device_type(self):
"""Returns type of the current device.
The device can currently be either a PC
(desktop or laptop/notebook),
smartphone or a tablet.
This is currently used mainly for rough
DPI estimation.
Example:
* high resolution & PC -> low DPI
* high resolution & smartphone -> high DPI
* high resolution & smartphone -> low DPI
This could also be used in the future to
use different PC/smartphone/tablet GUI styles.
By default, the device type is unknown.
"""
return None
@property
def qmlscene_command(self):
"""What should be called to start the qmlscene.
:returns: command to run to start qmlscene
:rtype: str
"""
return "qmlscene"
@property
def universal_components_backend(self):
"""Path to a Universal Components backend suitable for the given platform.
We default to the Controls UC backend.
:returns: path to suitable UC backend
:rtype: str
"""
return "controls"
|
"""
@author: dhoomakethu
"""
from __future__ import absolute_import, unicode_literals
import os
from apocalypse.app import App
from apocalypse.exceptions import ServiceNotRunningError, handle_exception
from apocalypse.exceptions import NoServiceRunningError, NetError
from apocalypse.chaos.events.net import NetworkEmulator
from apocalypse.utils.service_store import (
ServiceStore, update_service_state)
from apocalypse.utils.docker_client import DockerClient
from apocalypse.utils.logger import get_logger
from docker.errors import APIError
import random
logger = get_logger()
stress_exe = "/bin/stress"
tmp_loc = "/tmp"
curr_dir = os.path.dirname(__file__)
curr_dir = curr_dir.split(os.sep)[:-1]
stress_exe = os.sep.join(curr_dir)+stress_exe
# @HandleException(logger, NoServiceRunningError, NetError)
class ChaosApp(App):
"""
Represents Chaos app
"""
_dirty_service = {} # services stopped or terminated
def __init__(self, network):
super(ChaosApp, self).__init__(network)
self._driver = DockerClient()
self.init()
@handle_exception(logger, "exit", NoServiceRunningError, NetError)
def init(self):
"""
creates an connection to compute engine
"""
self._service_store = ServiceStore(self.driver, self.network)
self._emulator = NetworkEmulator(self.store, self.driver)
@handle_exception(logger, "exit", NoServiceRunningError, NetError)
def init_network_emulator(self):
logger.info("Initializing Network emulator")
self.store.update_network_devices()
# self.emulator.init()
@update_service_state
def stop_services(self, services):
"""
stops a cloud instance
"""
services = self._filter_cid(services)
for service in services:
ctr = self.check_service_running(service, raise_on=['terminated'])
logger.info("Stopping docker instance : %s" % service)
self.driver.stop_container(ctr['Id'])
if service not in self._dirty_service:
self._dirty_service[service] = {"ctr": ctr,
"terminated": False}
# self.store.update_service_map()
return services
@update_service_state
def terminate_services(self, services):
"""
terminates a cloud instance
"""
services = self._filter_cid(services)
for service in services:
ctr = self.check_service_running(service,
raise_on=['terminated'])
logger.info("Stopping and "
"removing docker instance : %s" % service)
self.driver.stop_container(ctr['Id'], remove=True)
if service not in self._dirty_service:
self._dirty_service[service] = {"ctr": ctr,
"terminated": True}
else:
self._dirty_service[service]["terminated"] = True
return services
@update_service_state
def kill_process(self, services, process, signal=9):
services = self._filter_cid(services)
for service in services:
ctr = self.check_service_running(service)
procs = self._get_running_processes(ctr["Id"])
for proc in procs:
try:
cmd = "kill -%s %s" % (signal, proc['pid'])
logger.info('killing random process {pid: %s, name: %s, '
'owner: %s} from cid %s' % (
proc['pid'], proc['name'], proc['user'], service))
self.driver.execute_command(ctr["Id"], cmd)
except APIError as e:
logger.error('Docker error : %s' % e)
break
return services
@update_service_state
def remote_kill_process(self, services, **kwargs):
# process, signal, ssh, cmd, sudo
super(ChaosApp, self).remote_kill_process(
instance_ids=services, **kwargs)
return services
@update_service_state
def stop_upstart_job(self, services, **kwargs):
# ssh, job, cmd,
super(ChaosApp, self).stop_upstart_job(instance_ids=services,
**kwargs)
return services
@update_service_state
def stop_initd_job(self, services, **kwargs):
# ssh, job, cmd
super(ChaosApp, self).stop_initd_job(instance_ids=services,
**kwargs)
return services
@update_service_state
def reboot_services(self, services):
remove = False
services = self._filter_cid(services)
for service in services:
ctr = self.check_service_running(service, ["terminated"])
logger.info("Rebooting docker container %s " % service)
self.driver.restart_container(ctr["Id"])
if remove:
self._dirty_service.pop(service)
return services
@update_service_state
def burn_cpu(self, services, cpuload, duration, cpu_core):
services = self._filter_cid(services)
t = tmp_loc + "/" + os.path.basename(stress_exe)
access_cmd = "chmod 755 %s" % t
cmd = ("%s cpu --cpuload %s"
" --duration %s"
" --cpucore %s" % (t,
cpuload,
duration,
cpu_core
)
)
for service in services:
ctr = self.check_service_running(service)
logger.info("Setting CPU load to %s "
"for %s Seconds on "
"container %s" % (cpuload, duration, service))
# self._copy_to_container(ctr, stress_exe, tmp_loc)
self.driver.copy_to_container(ctr, stress_exe, tmp_loc)
self.driver.execute_command(ctr, access_cmd)
self.driver.execute_command(ctr, cmd)
return services
@update_service_state
def burn_ram(self, services, ramload, duration):
services = self._filter_cid(services)
t = tmp_loc + "/" + os.path.basename(stress_exe)
access_cmd = "chmod 755 %s" % t
cmd = ("%s ram --ramload %s"
" --duration %s" % (t, ramload, duration))
for service in services:
ctr = self.check_service_running(service)
logger.info("Setting RAM load to %s "
"for %s Seconds on "
"container %s" % (ramload, duration, service))
self.driver.copy_to_container(ctr, stress_exe, tmp_loc)
result = self.driver.execute_command(ctr, access_cmd)
logger.debug(result)
result = self.driver.execute_command(ctr, cmd)
logger.debug(result)
return services
@update_service_state
def burn_io(self, services, **kwargs):
services = self._filter_cid(services)
super(ChaosApp, self).burn_io(instance_ids=services)
return services
@update_service_state
def burn_disk(self, services, **kwargs):
# instance_ids, size, path, duration
services = self._filter_cid(services)
super(ChaosApp, self).burn_disk(instance_ids=services, **kwargs)
return services
def network_loss(self, services, **kwargs):
"""
{
loss: 5%,
correlation: 25
}
:param service:
:param kwargs:
:return:
"""
for service in services:
self.check_service_running(service)
logger.info("Simulating network loss on %s with %s" % (service,
kwargs))
self.emulator.loss(service, **kwargs)
return services
def network_blackout(self, services):
for service in services:
logger.info("Simulating network blackout on %s " % service)
self.check_service_running(service)
self.emulator.blackhole(service)
return services
def network_corrupt(self, services, **kwargs):
for service in services:
logger.info("Simulating network packet corruption "
"on %s with %s" % (service, kwargs))
self.check_service_running(service)
self.emulator.corrupt(service, **kwargs)
return services
def network_duplicate(self, services, **kwargs):
for service in services:
logger.info("Simulating network packet duplication "
"on %s with %s" % (service, kwargs))
self.check_service_running(service)
self.emulator.duplicate(service, **kwargs)
return services
def network_delay(self, services, **kwargs):
"""
{
"service": "userservice",
"event": {
"jitter": "100ms",
"delay": "1s",
"distribution": "normal"
}
}
:param client:
:param services:
:param params:
:param kwargs:
:return:
"""
for service in services:
logger.info("Simulating network delay "
"on %s with %s" % (service, kwargs))
self.check_service_running(service)
self.emulator.delay(service, **kwargs)
return services
def network_reorder(self, services, **kwargs):
for service in services:
logger.info("Simulating network packet reorder "
"on %s with %s" % (service, kwargs))
self.check_service_running(service)
self.emulator.reorder(service, **kwargs)
return services
def get_services(self):
services = self.store.services
return services
def get_service_state(self, service):
return self.store.get_state(service)
def choice(self, services=None, event=None):
"""
choose a random vm
"""
external = ['redis', 'etcd', 'cassandra', 'registrator']
exclude = ['killprocesses', 'burncpu', 'burnram', 'burndisk', 'burnio']
if services:
if isinstance(services, (list, tuple)):
_services = list(set(services) & set(self.get_services()))
# for _service in services:
# for ctr in self.get_services():
# if _service in ctr['Name'] or _service in ctr['Id']:
# _services.append(ctr)
if _services:
return _services
else:
# for ctr in self.get_services().items():
# if services in ctr['Name'] or services in ctr['Id']:
# return ctr
return list(set([services]) & set(self.get_services()))
logger.info("Docker containers '%s' is "
"not running/found!!!" % (services,))
return None
if event is not None and event.lower() in exclude:
_services = [service for service in self.get_services()
if not any(exclude == service
for exclude in external)]
if not _services:
logger.info("No docker services running!!!")
vm = None
else:
vm = random.choice(_services)
else:
services = self.get_services()
if self.get_services():
vm = random.choice(services)
logger.info("Picking random docker container %s " % vm)
else:
logger.info("No docker containers running!!!")
vm = None
return vm
def __repr__(self):
return '%s' % self.__class__.__name__
def _get_running_processes(self, cid):
cmd = "ps aux"
resp = self.driver.execute_command(cid, cmd)
header = resp[0].split()
pid_col = 0
user_col = 1
cmd_col = 2
if len(header) > 4:
for i, col in enumerate(header):
if "pid" in col.lower():
pid_col = i
elif "user" in col.lower():
user_col = i
elif "command" in col.lower():
cmd_col = i
columns = len(header) - 1
procs = []
for r in resp[1:]:
cols = r.split(None, columns)
if len(cols) >= max(pid_col, user_col, cmd_col):
pid, user, name = cols[pid_col], cols[user_col], cols[cmd_col]
if name != cmd:
procs.append({'pid': pid, 'user': user, 'name': name})
return procs
def _filter_cid(self, cids):
"""
Filters container ID's to work with only running containers
"""
return [cid for cid in cids if cid is not None]
def _update_service_state(self, service, category, state):
service_info = self.store.get_service(service)
service_state = service_info.get("state")
if state not in service_state:
service_state.update(category, state)
def get_controller_attr(self, ctr, port_dict, scheme_dict):
ctr_name = self.driver.get_container_info(ctr, 'Name')
combined_dict = {}
for (service1, port), (service2, scheme) in zip(port_dict.items(), scheme_dict.items()):
if service1 in ctr_name:
combined_dict[ctr_name] = [port, scheme]
return combined_dict, ctr_name
def check_service_running(self, service,
raise_on=["terminated", "stopped"]):
if service in self._dirty_service:
dirty_info = self._dirty_service.get(service)
ctr = self._dirty_service.get(service).get("ctr")
state = "stopped"
if dirty_info.get("terminated"):
state = "terminated"
if state in raise_on:
raise ServiceNotRunningError("Service %s "
"is %s" % (service, state))
return ctr
return self.store.get_container_info(service)
|
from django.utils import timezone
from edc_constants.constants import YES, NO, POS, NEG, IND
from .factories import MaternalEligibilityFactory
from .factories import MaternalConsentFactory
from td_maternal.forms import RapidTestResultForm, BaseMaternalModelForm
from ..models import RapidTestResult
from .base_test_case import BaseTestCase
from datetime import date, datetime
class TestRapidTestForm(BaseTestCase):
def setUp(self):
super(TestRapidTestForm, self).setUp()
self.maternal_eligibility = MaternalEligibilityFactory()
self.assertTrue(self.maternal_eligibility.is_eligible)
self.maternal_consent = MaternalConsentFactory(
registered_subject=self.maternal_eligibility.registered_subject)
self.registered_subject = self.maternal_consent.registered_subject
self.data = {'rapid_test_done': YES,
'result_date': timezone.now(),
'result': NEG, }
def test_result_date_provided(self):
"""Test if result date of the rapid test is provided"""
self.data['rapid_test_done'] = YES
self.data['result_date'] = None
rapid_form = RapidTestResultForm(data=self.data)
self.assertIn('If a rapid test was processed, what is the date'
' of the rapid test?', rapid_form.errors.get('__all__'))
def test_rapid_test_results(self):
"""Test if the result of rapid test is provided"""
self.data['rapid_test_done'] = YES
self.data['result_date'] = timezone.now()
self.data['result'] = None
rapid_form = RapidTestResultForm(data=self.data)
self.assertIn('If a rapid test was processed, what is the test result?',
rapid_form.errors.get('__all__'))
def test_result_date_present_no_rapid_test_result(self):
"""Test if there is a date for test and there is no test"""
result_date = date.today()
self.data['rapid_test_done'] = NO
self.data['result_date'] = timezone.now()
rapid_form = RapidTestResultForm(data=self.data)
self.assertIn('If a rapid test was not processed, please do not provide the result date. '
'Got {}.'.format(result_date.strftime('%Y-%m-%d')), rapid_form.errors.get('__all__'))
def test_validate_rapid_test_not_done(self):
"""test if the rapid test is not done"""
self.data['rapid_test_done'] = NO
self.data['result_date'] = None
self.data['result'] = None
rapid_form = RapidTestResultForm(data=self.data)
rapid_form.is_valid()
self.assertFalse(rapid_form.is_valid())
def test_rapid_test_result_present_no_rapid_test_done(self):
"""Test if the results are present and there is no rapid test performed"""
self.data['rapid_test_done'] = NO
self.data['result_date'] = None
self.data['result'] = NEG
rapid_form = RapidTestResultForm(data=self.data)
errors = ''.join(rapid_form.errors.get('__all__'))
self.assertIn('If a rapid test was not processed, please do not provide the result. '
'Got {}.'.format(self.data['result']), errors)
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import hashlib
import logging
import os
import sys
import tempfile
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
import isolate
# Create shortcuts.
from isolate import KEY_TOUCHED, KEY_TRACKED, KEY_UNTRACKED
def _size(*args):
return os.stat(os.path.join(ROOT_DIR, *args)).st_size
def _sha1(*args):
with open(os.path.join(ROOT_DIR, *args), 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
class IsolateTest(unittest.TestCase):
def setUp(self):
super(IsolateTest, self).setUp()
# Everything should work even from another directory.
os.chdir(os.path.dirname(ROOT_DIR))
def test_load_isolate_for_flavor_empty(self):
content = "{}"
command, infiles, touched, read_only = isolate.load_isolate_for_flavor(
content, isolate.get_flavor())
self.assertEqual([], command)
self.assertEqual([], infiles)
self.assertEqual([], touched)
self.assertEqual(None, read_only)
def test_isolated_load_empty(self):
values = {
}
expected = {
'command': [],
'files': {},
'os': isolate.get_flavor(),
}
self.assertEqual(expected, isolate.IsolatedFile.load(values).flatten())
def test_isolated_load(self):
values = {
'command': 'maybe',
'files': {'foo': 42},
'read_only': 2,
}
expected = {
'command': 'maybe',
'files': {'foo': 42},
'os': isolate.get_flavor(),
'read_only': 2,
}
self.assertEqual(expected, isolate.IsolatedFile.load(values).flatten())
def test_isolated_load_unexpected(self):
values = {
'foo': 'bar',
}
expected = (
("Found unexpected entry {'foo': 'bar'} while constructing an "
"object IsolatedFile"),
{'foo': 'bar'},
'IsolatedFile')
try:
isolate.IsolatedFile.load(values)
self.fail()
except ValueError, e:
self.assertEqual(expected, e.args)
def test_savedstate_load_empty(self):
values = {
}
expected = {
'variables': {},
}
self.assertEqual(expected, isolate.SavedState.load(values).flatten())
def test_savedstate_load(self):
values = {
'isolate_file': os.path.join(ROOT_DIR, 'maybe'),
'variables': {'foo': 42},
}
expected = {
'isolate_file': os.path.join(ROOT_DIR, 'maybe'),
'variables': {'foo': 42},
}
self.assertEqual(expected, isolate.SavedState.load(values).flatten())
def test_unknown_key(self):
try:
isolate.verify_variables({'foo': [],})
self.fail()
except AssertionError:
pass
def test_unknown_var(self):
try:
isolate.verify_condition({'variables': {'foo': [],}})
self.fail()
except AssertionError:
pass
def test_union(self):
value1 = {
'a': set(['A']),
'b': ['B', 'C'],
'c': 'C',
}
value2 = {
'a': set(['B', 'C']),
'b': [],
'd': set(),
}
expected = {
'a': set(['A', 'B', 'C']),
'b': ['B', 'C'],
'c': 'C',
'd': set(),
}
self.assertEqual(expected, isolate.union(value1, value2))
def test_eval_content(self):
try:
# Intrinsics are not available.
isolate.eval_content('map(str, [1, 2])')
self.fail()
except NameError:
pass
def test_load_isolate_as_config_empty(self):
self.assertEqual({}, isolate.load_isolate_as_config(
{}, None, []).flatten())
def test_load_isolate_as_config(self):
value = {
'variables': {
KEY_TRACKED: ['a'],
KEY_UNTRACKED: ['b'],
KEY_TOUCHED: ['touched'],
},
'conditions': [
['OS=="atari"', {
'variables': {
KEY_TRACKED: ['c', 'x'],
KEY_UNTRACKED: ['d'],
KEY_TOUCHED: ['touched_a'],
'command': ['echo', 'Hello World'],
'read_only': True,
},
}, { # else
'variables': {
KEY_TRACKED: ['e', 'x'],
KEY_UNTRACKED: ['f'],
KEY_TOUCHED: ['touched_e'],
'command': ['echo', 'You should get an Atari'],
},
}],
['OS=="amiga"', {
'variables': {
KEY_TRACKED: ['g'],
'read_only': False,
},
}],
['OS=="dendy"', {
}],
['OS=="coleco"', {
}, { # else
'variables': {
KEY_UNTRACKED: ['h'],
'read_only': None,
},
}],
],
}
expected = {
'amiga': {
'command': ['echo', 'You should get an Atari'],
KEY_TOUCHED: ['touched', 'touched_e'],
KEY_TRACKED: ['a', 'e', 'g', 'x'],
KEY_UNTRACKED: ['b', 'f', 'h'],
'read_only': False,
},
'atari': {
'command': ['echo', 'Hello World'],
KEY_TOUCHED: ['touched', 'touched_a'],
KEY_TRACKED: ['a', 'c', 'x'],
KEY_UNTRACKED: ['b', 'd', 'h'],
'read_only': True,
},
'coleco': {
'command': ['echo', 'You should get an Atari'],
KEY_TOUCHED: ['touched', 'touched_e'],
KEY_TRACKED: ['a', 'e', 'x'],
KEY_UNTRACKED: ['b', 'f'],
},
'dendy': {
'command': ['echo', 'You should get an Atari'],
KEY_TOUCHED: ['touched', 'touched_e'],
KEY_TRACKED: ['a', 'e', 'x'],
KEY_UNTRACKED: ['b', 'f', 'h'],
},
}
self.assertEqual(
expected, isolate.load_isolate_as_config(value, None, []).flatten())
def test_load_isolate_as_config_duplicate_command(self):
value = {
'variables': {
'command': ['rm', '-rf', '/'],
},
'conditions': [
['OS=="atari"', {
'variables': {
'command': ['echo', 'Hello World'],
},
}],
],
}
try:
isolate.load_isolate_as_config(value, None, [])
self.fail()
except AssertionError:
pass
def test_load_isolate_as_config_no_condition(self):
value = {
'variables': {
KEY_TRACKED: ['a'],
KEY_UNTRACKED: ['b'],
},
}
expected = {
KEY_TRACKED: ['a'],
KEY_UNTRACKED: ['b'],
}
actual = isolate.load_isolate_as_config(value, None, [])
# Flattening the whole config will discard 'None'.
self.assertEqual({}, actual.flatten())
self.assertEqual([None], actual.per_os.keys())
# But the 'None' value is still available as a backup.
self.assertEqual(expected, actual.per_os[None].flatten())
def test_invert_map(self):
value = {
'amiga': {
'command': ['echo', 'You should get an Atari'],
KEY_TOUCHED: ['touched', 'touched_e'],
KEY_TRACKED: ['a', 'e', 'g', 'x'],
KEY_UNTRACKED: ['b', 'f', 'h'],
'read_only': False,
},
'atari': {
'command': ['echo', 'Hello World'],
KEY_TOUCHED: ['touched', 'touched_a'],
KEY_TRACKED: ['a', 'c', 'x'],
KEY_UNTRACKED: ['b', 'd', 'h'],
'read_only': True,
},
'coleco': {
'command': ['echo', 'You should get an Atari'],
KEY_TOUCHED: ['touched', 'touched_e'],
KEY_TRACKED: ['a', 'e', 'x'],
KEY_UNTRACKED: ['b', 'f'],
},
'dendy': {
'command': ['echo', 'You should get an Atari'],
KEY_TOUCHED: ['touched', 'touched_e'],
KEY_TRACKED: ['a', 'e', 'x'],
KEY_UNTRACKED: ['b', 'f', 'h'],
},
}
expected_values = {
'command': {
('echo', 'Hello World'): set(['atari']),
('echo', 'You should get an Atari'): set(['amiga', 'coleco', 'dendy']),
},
KEY_TRACKED: {
'a': set(['amiga', 'atari', 'coleco', 'dendy']),
'c': set(['atari']),
'e': set(['amiga', 'coleco', 'dendy']),
'g': set(['amiga']),
'x': set(['amiga', 'atari', 'coleco', 'dendy']),
},
KEY_UNTRACKED: {
'b': set(['amiga', 'atari', 'coleco', 'dendy']),
'd': set(['atari']),
'f': set(['amiga', 'coleco', 'dendy']),
'h': set(['amiga', 'atari', 'dendy']),
},
KEY_TOUCHED: {
'touched': set(['amiga', 'atari', 'coleco', 'dendy']),
'touched_a': set(['atari']),
'touched_e': set(['amiga', 'coleco', 'dendy']),
},
'read_only': {
None: set(['coleco', 'dendy']),
False: set(['amiga']),
True: set(['atari']),
},
}
expected_oses = set(['amiga', 'atari', 'coleco', 'dendy'])
actual_values, actual_oses = isolate.invert_map(value)
self.assertEqual(expected_values, actual_values)
self.assertEqual(expected_oses, actual_oses)
def test_reduce_inputs(self):
values = {
'command': {
('echo', 'Hello World'): set(['atari']),
('echo', 'You should get an Atari'): set(['amiga', 'coleco', 'dendy']),
},
KEY_TRACKED: {
'a': set(['amiga', 'atari', 'coleco', 'dendy']),
'c': set(['atari']),
'e': set(['amiga', 'coleco', 'dendy']),
'g': set(['amiga']),
'x': set(['amiga', 'atari', 'coleco', 'dendy']),
},
KEY_UNTRACKED: {
'b': set(['amiga', 'atari', 'coleco', 'dendy']),
'd': set(['atari']),
'f': set(['amiga', 'coleco', 'dendy']),
'h': set(['amiga', 'atari', 'dendy']),
},
KEY_TOUCHED: {
'touched': set(['amiga', 'atari', 'coleco', 'dendy']),
'touched_a': set(['atari']),
'touched_e': set(['amiga', 'coleco', 'dendy']),
},
'read_only': {
None: set(['coleco', 'dendy']),
False: set(['amiga']),
True: set(['atari']),
},
}
oses = set(['amiga', 'atari', 'coleco', 'dendy'])
expected_values = {
'command': {
('echo', 'Hello World'): set(['atari']),
('echo', 'You should get an Atari'): set(['!atari']),
},
KEY_TRACKED: {
'a': set([None]),
'c': set(['atari']),
'e': set(['!atari']),
'g': set(['amiga']),
'x': set([None]),
},
KEY_UNTRACKED: {
'b': set([None]),
'd': set(['atari']),
'f': set(['!atari']),
'h': set(['!coleco']),
},
KEY_TOUCHED: {
'touched': set([None]),
'touched_a': set(['atari']),
'touched_e': set(['!atari']),
},
'read_only': {
None: set(['coleco', 'dendy']),
False: set(['amiga']),
True: set(['atari']),
},
}
actual_values, actual_oses = isolate.reduce_inputs(values, oses)
self.assertEqual(expected_values, actual_values)
self.assertEqual(oses, actual_oses)
def test_reduce_inputs_merge_subfolders_and_files(self):
values = {
'command': {},
KEY_TRACKED: {
'folder/tracked_file': set(['win']),
'folder_helper/tracked_file': set(['win']),
},
KEY_UNTRACKED: {
'folder/': set(['linux', 'mac', 'win']),
'folder/subfolder/': set(['win']),
'folder/untracked_file': set(['linux', 'mac', 'win']),
'folder_helper/': set(['linux']),
},
KEY_TOUCHED: {
'folder/touched_file': set (['win']),
'folder/helper_folder/deep_file': set(['win']),
'folder_helper/touched_file1': set (['mac', 'win']),
'folder_helper/touched_file2': set (['linux']),
},
}
oses = set(['linux', 'mac', 'win'])
expected_values = {
'command': {},
KEY_TRACKED: {
'folder_helper/tracked_file': set(['win']),
},
KEY_UNTRACKED: {
'folder/': set([None]),
'folder_helper/': set(['linux']),
},
KEY_TOUCHED: {
'folder_helper/touched_file1': set (['!linux']),
},
'read_only': {},
}
actual_values, actual_oses = isolate.reduce_inputs(values, oses)
self.assertEqual(expected_values, actual_values)
self.assertEqual(oses, actual_oses)
def test_reduce_inputs_take_strongest_dependency(self):
values = {
'command': {
('echo', 'Hello World'): set(['atari']),
('echo', 'You should get an Atari'): set(['amiga', 'coleco', 'dendy']),
},
KEY_TRACKED: {
'a': set(['amiga', 'atari', 'coleco', 'dendy']),
'b': set(['amiga', 'atari', 'coleco']),
},
KEY_UNTRACKED: {
'c': set(['amiga', 'atari', 'coleco', 'dendy']),
'd': set(['amiga', 'coleco', 'dendy']),
},
KEY_TOUCHED: {
'a': set(['amiga', 'atari', 'coleco', 'dendy']),
'b': set(['atari', 'coleco', 'dendy']),
'c': set(['amiga', 'atari', 'coleco', 'dendy']),
'd': set(['atari', 'coleco', 'dendy']),
},
}
oses = set(['amiga', 'atari', 'coleco', 'dendy'])
expected_values = {
'command': {
('echo', 'Hello World'): set(['atari']),
('echo', 'You should get an Atari'): set(['!atari']),
},
KEY_TRACKED: {
'a': set([None]),
'b': set(['!dendy']),
},
KEY_UNTRACKED: {
'c': set([None]),
'd': set(['!atari']),
},
KEY_TOUCHED: {
'b': set(['dendy']),
'd': set(['atari']),
},
'read_only': {},
}
actual_values, actual_oses = isolate.reduce_inputs(values, oses)
self.assertEqual(expected_values, actual_values)
self.assertEqual(oses, actual_oses)
def test_convert_map_to_isolate_dict(self):
values = {
'command': {
('echo', 'Hello World'): set(['atari']),
('echo', 'You should get an Atari'): set(['!atari']),
},
KEY_TRACKED: {
'a': set([None]),
'c': set(['atari']),
'e': set(['!atari']),
'g': set(['amiga']),
'x': set([None]),
},
KEY_UNTRACKED: {
'b': set([None]),
'd': set(['atari']),
'f': set(['!atari']),
'h': set(['!coleco']),
},
KEY_TOUCHED: {
'touched': set([None]),
'touched_a': set(['atari']),
'touched_e': set(['!atari']),
},
'read_only': {
None: set(['coleco', 'dendy']),
False: set(['amiga']),
True: set(['atari']),
},
}
oses = set(['amiga', 'atari', 'coleco', 'dendy'])
expected = {
'variables': {
KEY_TRACKED: ['a', 'x'],
KEY_UNTRACKED: ['b'],
KEY_TOUCHED: ['touched'],
},
'conditions': [
['OS=="amiga"', {
'variables': {
KEY_TRACKED: ['g'],
'read_only': False,
},
}],
['OS=="atari"', {
'variables': {
'command': ['echo', 'Hello World'],
KEY_TRACKED: ['c'],
KEY_UNTRACKED: ['d'],
KEY_TOUCHED: ['touched_a'],
'read_only': True,
},
}, {
'variables': {
'command': ['echo', 'You should get an Atari'],
KEY_TRACKED: ['e'],
KEY_UNTRACKED: ['f'],
KEY_TOUCHED: ['touched_e'],
},
}],
['OS=="coleco"', {
}, {
'variables': {
KEY_UNTRACKED: ['h'],
},
}],
],
}
self.assertEqual(
expected, isolate.convert_map_to_isolate_dict(values, oses))
def test_merge_two_empty(self):
# Flat stay flat. Pylint is confused about union() return type.
# pylint: disable=E1103
actual = isolate.union(
isolate.union(
isolate.Configs([], None),
isolate.load_isolate_as_config({}, None, [])),
isolate.load_isolate_as_config({}, None, [])).flatten()
self.assertEqual({}, actual)
def test_merge_empty(self):
actual = isolate.convert_map_to_isolate_dict(
*isolate.reduce_inputs(*isolate.invert_map({})))
self.assertEqual({}, actual)
def test_load_two_conditions(self):
linux = {
'conditions': [
['OS=="linux"', {
'variables': {
'isolate_dependency_tracked': [
'file_linux',
'file_common',
],
},
}],
],
}
mac = {
'conditions': [
['OS=="mac"', {
'variables': {
'isolate_dependency_tracked': [
'file_mac',
'file_common',
],
},
}],
],
}
expected = {
'linux': {
'isolate_dependency_tracked': ['file_common', 'file_linux'],
},
'mac': {
'isolate_dependency_tracked': ['file_common', 'file_mac'],
},
}
# Pylint is confused about union() return type.
# pylint: disable=E1103
configs = isolate.union(
isolate.union(
isolate.Configs([], None),
isolate.load_isolate_as_config(linux, None, [])),
isolate.load_isolate_as_config(mac, None, [])).flatten()
self.assertEqual(expected, configs)
def test_load_three_conditions(self):
linux = {
'conditions': [
['OS=="linux"', {
'variables': {
'isolate_dependency_tracked': [
'file_linux',
'file_common',
],
},
}],
],
}
mac = {
'conditions': [
['OS=="mac"', {
'variables': {
'isolate_dependency_tracked': [
'file_mac',
'file_common',
],
},
}],
],
}
win = {
'conditions': [
['OS=="win"', {
'variables': {
'isolate_dependency_tracked': [
'file_win',
'file_common',
],
},
}],
],
}
expected = {
'linux': {
'isolate_dependency_tracked': ['file_common', 'file_linux'],
},
'mac': {
'isolate_dependency_tracked': ['file_common', 'file_mac'],
},
'win': {
'isolate_dependency_tracked': ['file_common', 'file_win'],
},
}
# Pylint is confused about union() return type.
# pylint: disable=E1103
configs = isolate.union(
isolate.union(
isolate.union(
isolate.Configs([], None),
isolate.load_isolate_as_config(linux, None, [])),
isolate.load_isolate_as_config(mac, None, [])),
isolate.load_isolate_as_config(win, None, [])).flatten()
self.assertEqual(expected, configs)
def test_merge_three_conditions(self):
values = {
'linux': {
'isolate_dependency_tracked': ['file_common', 'file_linux'],
},
'mac': {
'isolate_dependency_tracked': ['file_common', 'file_mac'],
},
'win': {
'isolate_dependency_tracked': ['file_common', 'file_win'],
},
}
expected = {
'variables': {
'isolate_dependency_tracked': [
'file_common',
],
},
'conditions': [
['OS=="linux"', {
'variables': {
'isolate_dependency_tracked': [
'file_linux',
],
},
}],
['OS=="mac"', {
'variables': {
'isolate_dependency_tracked': [
'file_mac',
],
},
}],
['OS=="win"', {
'variables': {
'isolate_dependency_tracked': [
'file_win',
],
},
}],
],
}
actual = isolate.convert_map_to_isolate_dict(
*isolate.reduce_inputs(*isolate.invert_map(values)))
self.assertEqual(expected, actual)
def test_configs_comment(self):
# Pylint is confused with isolate.union() return type.
# pylint: disable=E1103
configs = isolate.union(
isolate.load_isolate_as_config({}, '# Yo dawg!\n# Chill out.\n', []),
isolate.load_isolate_as_config({}, None, []))
self.assertEqual('# Yo dawg!\n# Chill out.\n', configs.file_comment)
configs = isolate.union(
isolate.load_isolate_as_config({}, None, []),
isolate.load_isolate_as_config({}, '# Yo dawg!\n# Chill out.\n', []))
self.assertEqual('# Yo dawg!\n# Chill out.\n', configs.file_comment)
# Only keep the first one.
configs = isolate.union(
isolate.load_isolate_as_config({}, '# Yo dawg!\n', []),
isolate.load_isolate_as_config({}, '# Chill out.\n', []))
self.assertEqual('# Yo dawg!\n', configs.file_comment)
def test_extract_comment(self):
self.assertEqual(
'# Foo\n# Bar\n', isolate.extract_comment('# Foo\n# Bar\n{}'))
self.assertEqual('', isolate.extract_comment('{}'))
def _test_pretty_print_impl(self, value, expected):
actual = cStringIO.StringIO()
isolate.pretty_print(value, actual)
self.assertEqual(expected, actual.getvalue())
def test_pretty_print_empty(self):
self._test_pretty_print_impl({}, '{\n}\n')
def test_pretty_print_mid_size(self):
value = {
'variables': {
'bar': [
'file1',
'file2',
],
},
'conditions': [
['OS=\"foo\"', {
'variables': {
isolate.KEY_UNTRACKED: [
'dir1',
'dir2',
],
isolate.KEY_TRACKED: [
'file4',
'file3',
],
'command': ['python', '-c', 'print "H\\i\'"'],
'read_only': True,
'relative_cwd': 'isol\'at\\e',
},
}],
['OS=\"bar\"', {
'variables': {},
}, {
'variables': {},
}],
],
}
expected = (
"{\n"
" 'variables': {\n"
" 'bar': [\n"
" 'file1',\n"
" 'file2',\n"
" ],\n"
" },\n"
" 'conditions': [\n"
" ['OS=\"foo\"', {\n"
" 'variables': {\n"
" 'command': [\n"
" 'python',\n"
" '-c',\n"
" 'print \"H\\i\'\"',\n"
" ],\n"
" 'relative_cwd': 'isol\\'at\\\\e',\n"
" 'read_only': True\n"
" 'isolate_dependency_tracked': [\n"
" 'file4',\n"
" 'file3',\n"
" ],\n"
" 'isolate_dependency_untracked': [\n"
" 'dir1',\n"
" 'dir2',\n"
" ],\n"
" },\n"
" }],\n"
" ['OS=\"bar\"', {\n"
" 'variables': {\n"
" },\n"
" }, {\n"
" 'variables': {\n"
" },\n"
" }],\n"
" ],\n"
"}\n")
self._test_pretty_print_impl(value, expected)
class IsolateLoad(unittest.TestCase):
def setUp(self):
super(IsolateLoad, self).setUp()
self.directory = tempfile.mkdtemp(prefix='isolate_')
def tearDown(self):
isolate.run_isolated.rmtree(self.directory)
super(IsolateLoad, self).tearDown()
def _get_option(self, isolate_file):
class Options(object):
isolated = os.path.join(self.directory, 'isolated')
outdir = os.path.join(self.directory, 'outdir')
isolate = isolate_file
variables = {'foo': 'bar'}
ignore_broken_items = False
return Options()
def _cleanup_isolated(self, expected_isolated, actual_isolated):
"""Modifies isolated to remove the non-deterministic parts."""
if sys.platform == 'win32':
# 'm' are not saved in windows.
for values in expected_isolated['files'].itervalues():
del values['m']
for item in actual_isolated['files'].itervalues():
if 't' in item:
self.assertTrue(item.pop('t'))
def test_load_stale_isolated(self):
isolate_file = os.path.join(
ROOT_DIR, 'tests', 'isolate', 'touch_root.isolate')
# Data to be loaded in the .isolated file. Do not create a .state file.
input_data = {
'command': ['python'],
'files': {
'foo': {
"m": 416,
"h": "invalid",
"s": 538,
"t": 1335146921,
},
os.path.join('tests', 'isolate', 'touch_root.py'): {
"m": 488,
"h": "invalid",
"s": 538,
"t": 1335146921,
},
},
}
options = self._get_option(isolate_file)
isolate.trace_inputs.write_json(options.isolated, input_data, False)
# A CompleteState object contains two parts:
# - Result instance stored in complete_state.isolated, corresponding to the
# .isolated file, is what is read by run_test_from_archive.py.
# - SavedState instance stored in compelte_state.saved_state,
# corresponding to the .state file, which is simply to aid the developer
# when re-running the same command multiple times and contain
# discardable information.
complete_state = isolate.load_complete_state(options, None)
actual_isolated = complete_state.isolated.flatten()
actual_saved_state = complete_state.saved_state.flatten()
expected_isolated = {
'command': ['python', 'touch_root.py'],
'files': {
os.path.join(u'tests', 'isolate', 'touch_root.py'): {
'm': 488,
'h': _sha1('tests', 'isolate', 'touch_root.py'),
's': _size('tests', 'isolate', 'touch_root.py'),
},
'isolate.py': {
'm': 488,
'h': _sha1('isolate.py'),
's': _size('isolate.py'),
},
},
'os': isolate.get_flavor(),
'relative_cwd': os.path.join('tests', 'isolate'),
}
self._cleanup_isolated(expected_isolated, actual_isolated)
self.assertEqual(expected_isolated, actual_isolated)
expected_saved_state = {
'isolate_file': isolate_file,
'variables': {'foo': 'bar'},
}
self.assertEqual(expected_saved_state, actual_saved_state)
def test_subdir(self):
# The resulting .isolated file will be missing ../../isolate.py. It is
# because this file is outside the --subdir parameter.
isolate_file = os.path.join(
ROOT_DIR, 'tests', 'isolate', 'touch_root.isolate')
options = self._get_option(isolate_file)
complete_state = isolate.load_complete_state(
options, os.path.join('tests', 'isolate'))
actual_isolated = complete_state.isolated.flatten()
actual_saved_state = complete_state.saved_state.flatten()
expected_isolated = {
'command': ['python', 'touch_root.py'],
'files': {
os.path.join('tests', 'isolate', 'touch_root.py'): {
'm': 488,
'h': _sha1('tests', 'isolate', 'touch_root.py'),
's': _size('tests', 'isolate', 'touch_root.py'),
},
},
'os': isolate.get_flavor(),
'relative_cwd': os.path.join('tests', 'isolate'),
}
self._cleanup_isolated(expected_isolated, actual_isolated)
self.assertEqual(expected_isolated, actual_isolated)
expected_saved_state = {
'isolate_file': isolate_file,
'variables': {'foo': 'bar'},
}
self.assertEqual(expected_saved_state, actual_saved_state)
def test_subdir_variable(self):
# The resulting .isolated file will be missing ../../isolate.py. It is
# because this file is outside the --subdir parameter.
isolate_file = os.path.join(
ROOT_DIR, 'tests', 'isolate', 'touch_root.isolate')
options = self._get_option(isolate_file)
options.variables['BAZ'] = os.path.join('tests', 'isolate')
complete_state = isolate.load_complete_state(options, '<(BAZ)')
actual_isolated = complete_state.isolated.flatten()
actual_saved_state = complete_state.saved_state.flatten()
expected_isolated = {
'command': ['python', 'touch_root.py'],
'files': {
os.path.join('tests', 'isolate', 'touch_root.py'): {
'm': 488,
'h': _sha1('tests', 'isolate', 'touch_root.py'),
's': _size('tests', 'isolate', 'touch_root.py'),
},
},
'os': isolate.get_flavor(),
'relative_cwd': os.path.join('tests', 'isolate'),
}
self._cleanup_isolated(expected_isolated, actual_isolated)
self.assertEqual(expected_isolated, actual_isolated)
expected_saved_state = {
'isolate_file': isolate_file,
'variables': {
'foo': 'bar',
'BAZ': os.path.join('tests', 'isolate'),
},
}
self.assertEqual(expected_saved_state, actual_saved_state)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR,
format='%(levelname)5s %(filename)15s(%(lineno)3d): %(message)s')
unittest.main()
|
from commando import management
BaseMigrationCheckCommand = management.get_command_class(
"migrationcheck", exclude_packages=("commando",))
if BaseMigrationCheckCommand is not None:
base = BaseMigrationCheckCommand()
class MigrationCheckCommandOptions(management.CommandOptions):
"""
MigrationCheck command options.
"""
args = base.args
help = base.help
option_list = base.option_list[
len(management.BaseCommandOptions.option_list):]
option_groups = (
("[migrationcheck options]",
"These options will be passed to migrationcheck.",
option_list,
),) if option_list else ()
actions = ("migrationcheck",)
def handle_migrationcheck(self, *args, **options):
return self.call_command("migrationcheck", *args, **options)
class MigrationCheckCommand(MigrationCheckCommandOptions, management.StandardCommand):
"""
MigrationCheck command.
"""
option_list = management.StandardCommand.option_list
option_groups = \
MigrationCheckCommandOptions.option_groups + \
management.StandardCommand.option_groups
else:
MigrationCheckCommand = management.StandardCommand
|
# -*- coding:utf8 -*-
# File : desc_vae_mnist_mlp_bernoulli_adam.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 3/17/17
#
# This file is part of TensorArtist.
from tartist.core import get_env, get_logger
from tartist.core.utils.naming import get_dump_directory, get_data_directory
from tartist.nn import opr as O, optimizer, summary
logger = get_logger(__file__)
__envs__ = {
'dir': {
'root': get_dump_directory(__file__),
'data': get_data_directory('WellKnown/mnist')
},
'trainer': {
'learning_rate': 0.001,
'batch_size': 100,
'epoch_size': 500,
'nr_epochs': 100,
},
'inference': {
'batch_size': 256,
'epoch_size': 40
},
'demo': {
'is_reconstruct': False
}
}
def make_network(env):
with env.create_network() as net:
code_length = 20
h, w, c = 28, 28, 1
is_reconstruct = get_env('demo.is_reconstruct', False)
dpc = env.create_dpcontroller()
with dpc.activate():
def inputs():
img = O.placeholder('img', shape=(None, h, w, c))
return [img]
def forward(x):
if is_reconstruct or env.phase is env.Phase.TRAIN:
with env.variable_scope('encoder'):
_ = x
_ = O.fc('fc1', _, 500, nonlin=O.tanh)
_ = O.fc('fc2', _, 500, nonlin=O.tanh)
mu = O.fc('fc3_mu', _, code_length)
log_var = O.fc('fc3_sigma', _, code_length)
var = O.exp(log_var)
std = O.sqrt(var)
epsilon = O.random_normal([x.shape[0], code_length])
z_given_x = mu + std * epsilon
else:
z_given_x = O.random_normal([1, code_length])
with env.variable_scope('decoder'):
_ = z_given_x
_ = O.fc('fc1', _, 500, nonlin=O.tanh)
_ = O.fc('fc2', _, 500, nonlin=O.tanh)
_ = O.fc('fc3', _, 784, nonlin=O.sigmoid)
_ = _.reshape(-1, h, w, c)
x_given_z = _
if env.phase is env.Phase.TRAIN:
with env.variable_scope('loss'):
content_loss = O.raw_cross_entropy_prob('raw_content', x_given_z.flatten2(), x.flatten2())
content_loss = content_loss.sum(axis=1).mean(name='content')
# distrib_loss = 0.5 * (O.sqr(mu) + O.sqr(std) - 2. * O.log(std + 1e-8) - 1.0).sum(axis=1)
distrib_loss = -0.5 * (1. + log_var - O.sqr(mu) - var).sum(axis=1)
distrib_loss = distrib_loss.mean(name='distrib')
loss = content_loss + distrib_loss
dpc.add_output(loss, name='loss', reduce_method='sum')
dpc.add_output(x_given_z, name='output')
dpc.set_input_maker(inputs).set_forward_func(forward)
net.add_all_dpc_outputs(dpc, loss_name='loss')
if env.phase is env.Phase.TRAIN:
summary.inference.scalar('loss', net.loss)
def make_optimizer(env):
wrapper = optimizer.OptimizerWrapper()
wrapper.set_base_optimizer(optimizer.base.AdamOptimizer(get_env('trainer.learning_rate')))
wrapper.append_grad_modifier(optimizer.grad_modifier.LearningRateMultiplier([
('*/b', 2.0),
]))
# wrapper.append_grad_modifier(optimizer.grad_modifier.WeightDecay([
# ('*/W', 0.0005)
# ]))
env.set_optimizer(wrapper)
from data_provider_vae_mnist import *
def main_train(trainer):
from tartist.plugins.trainer_enhancer import summary
summary.enable_summary_history(trainer)
summary.enable_echo_summary_scalar(trainer)
from tartist.plugins.trainer_enhancer import progress
progress.enable_epoch_progress(trainer)
from tartist.plugins.trainer_enhancer import snapshot
snapshot.enable_snapshot_saver(trainer)
from tartist.plugins.trainer_enhancer import inference
inference.enable_inference_runner(trainer, make_dataflow_inference)
trainer.train()
|
"""Utility functions for generating Twitter OAuth headers and making API calls.
"""
import logging
import urllib2
import appengine_config
import handlers
import requests
import requests_oauthlib
import tweepy
def auth_header(url, token_key, token_secret, method='GET'):
"""Generates an Authorization header and returns it in a header dict.
Args:
url: string
token_key: string
token_secret: string
method: string
Returns: single element dict with key 'Authorization'
"""
oauth1 = requests_oauthlib.OAuth1(
client_key=appengine_config.TWITTER_APP_KEY,
client_secret=appengine_config.TWITTER_APP_SECRET,
resource_owner_key=token_key,
resource_owner_secret=token_secret,
)
req = requests.Request(method=method, url=url, auth=oauth1).prepare()
logging.debug(
'Generated Authorization header from access token key %s... and secret %s...',
token_key[:4], token_secret[:4])
return req.headers
def signed_urlopen(url, token_key, token_secret, headers=None, **kwargs):
"""Wraps urllib2.urlopen() and adds an OAuth signature.
"""
if headers is None:
headers = {}
# if this is a post, move the body params into the URL. Tweepy's OAuth
# signing doesn't work if they're in the body; Twitter returns a 401.
data = kwargs.get('data')
if data:
method = 'POST'
url += ('&' if '?' in url else '?') + data
kwargs['data'] = ''
else:
method = 'GET'
headers.update(auth_header(url, token_key, token_secret, method=method))
timeout = kwargs.pop('timeout', appengine_config.HTTP_TIMEOUT)
logging.debug('Fetching %s', url)
try:
return urllib2.urlopen(urllib2.Request(url, headers=headers, **kwargs),
timeout=timeout)
except BaseException, e:
handlers.interpret_http_exception(e)
raise
def tweepy_auth(token_key, token_secret):
"""Returns a tweepy.OAuthHandler.
"""
assert (appengine_config.TWITTER_APP_KEY and
appengine_config.TWITTER_APP_SECRET), (
"Please fill in the twitter_app_key and twitter_app_secret files in "
"your app's root directory.")
handler = tweepy.OAuthHandler(appengine_config.TWITTER_APP_KEY,
appengine_config.TWITTER_APP_SECRET)
handler.set_access_token(token_key, token_secret)
return handler
|
#! /usr/bin/python2.7
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
from time import clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = 0):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(loops)
#print "Pystone(%s) time for %d passes = %g" % \
# (__version__, loops, benchtime)
print "This machine benchmarks at " + str(stones) + " pystones/second"
def pystones(loops=LOOPS):
return Proc0(loops)
IntGlob = 0
BoolGlob = FALSE
Char1Glob = '\0'
Char2Glob = '\0'
Array1Glob = [0]*51
Array2Glob = map(lambda x: x[:], [Array1Glob]*51)
PtrGlb = None
PtrGlbNext = None
def Proc0(loops=LOOPS):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
starttime = clock()
for i in range(loops):
pass
nulltime = clock() - starttime
PtrGlbNext = Record()
PtrGlb = Record()
PtrGlb.PtrComp = PtrGlbNext
PtrGlb.Discr = Ident1
PtrGlb.EnumComp = Ident3
PtrGlb.IntComp = 40
PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
starttime = clock()
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = 'A'
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
benchtime = clock() - starttime - nulltime
if benchtime == 0.0:
loopsPerBenchtime = 0.0
else:
loopsPerBenchtime = (loops / benchtime)
return benchtime, loopsPerBenchtime
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
while 1:
if Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = Proc7(10, IntGlob)
return PtrParOut
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == 'A'
BoolLoc = BoolLoc or BoolGlob
Char2Glob = 'B'
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = 'A'
BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
if __name__ == '__main__':
import sys
def error(msg):
print >>sys.stderr, msg,
print >>sys.stderr, "usage: %s [number_of_loops]" % sys.argv[0]
sys.exit(100)
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try: loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
loops = LOOPS
main(loops)
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GroupSnooze.count'
db.add_column(
'sentry_groupsnooze',
'count',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True),
keep_default=False
)
# Adding field 'GroupSnooze.window'
db.add_column(
'sentry_groupsnooze',
'window',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True),
keep_default=False
)
# Adding field 'GroupSnooze.user_count'
db.add_column(
'sentry_groupsnooze',
'user_count',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True),
keep_default=False
)
# Adding field 'GroupSnooze.user_window'
db.add_column(
'sentry_groupsnooze',
'user_window',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True),
keep_default=False
)
# Adding field 'GroupSnooze.state'
db.add_column(
'sentry_groupsnooze',
'state',
self.gf('sentry.db.models.fields.jsonfield.JSONField')(null=True),
keep_default=False
)
# Changing field 'GroupSnooze.until'
db.alter_column(
'sentry_groupsnooze',
'until',
self.gf('django.db.models.fields.DateTimeField')(null=True)
)
def backwards(self, orm):
raise RuntimeError(
"Cannot reverse this migration. 'GroupSnooze.until' and its values cannot be restored."
)
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apiapplication': {
'Meta': {
'object_name': 'ApiApplication'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'client_id': (
'django.db.models.fields.CharField', [], {
'default': "'1fe2246606cd41688e14b95ae1bdc14c6b7652dea035446fa2dc8bcacf21afd6'",
'unique': 'True',
'max_length': '64'
}
),
'client_secret': (
'sentry.db.models.fields.encrypted.EncryptedTextField', [], {
'default': "'7f918820281a421d991389c5fad78a41551739601ae745e8a24e9cb56ee8ffaa'"
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'homepage_url':
('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'default': "'Trusting Weasel'",
'max_length': '64',
'blank': 'True'
}
),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'privacy_url':
('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'terms_url':
('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
})
},
'sentry.apiauthorization': {
'Meta': {
'unique_together': "(('user', 'application'),)",
'object_name': 'ApiAuthorization'
},
'application': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiApplication']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'scope_list': (
'sentry.db.models.fields.array.ArrayField', [], {
'of': ('django.db.models.fields.TextField', [], {})
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.apigrant': {
'Meta': {
'object_name': 'ApiGrant'
},
'application': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiApplication']"
}
),
'code': (
'django.db.models.fields.CharField', [], {
'default': "'d959d133f88c4292a581081e6190b949'",
'max_length': '64',
'db_index': 'True'
}
),
'expires_at': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 6, 1, 0, 0)',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'redirect_uri': ('django.db.models.fields.CharField', [], {
'max_length': '255'
}),
'scope_list': (
'sentry.db.models.fields.array.ArrayField', [], {
'of': ('django.db.models.fields.TextField', [], {})
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scope_list': (
'sentry.db.models.fields.array.ArrayField', [], {
'of': ('django.db.models.fields.TextField', [], {})
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.apitoken': {
'Meta': {
'object_name': 'ApiToken'
},
'application': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiApplication']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'expires_at': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 7, 1, 0, 0)',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'refresh_token': (
'django.db.models.fields.CharField', [], {
'default': "'6c4fadd19de34e39ac0859f3f896065cd8c3cd19c56c453287ab9f199c539138'",
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'scope_list': (
'sentry.db.models.fields.array.ArrayField', [], {
'of': ('django.db.models.fields.TextField', [], {})
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'token': (
'django.db.models.fields.CharField', [], {
'default': "'94b568466766407cad05e6e2a630f6561a04ecb269c047c381f78c857d84422a'",
'unique': 'True',
'max_length': '64'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authenticator': {
'Meta': {
'unique_together': "(('user', 'type'),)",
'object_name': 'Authenticator',
'db_table': "'auth_authenticator'"
},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config':
('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 6, 8, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.commit': {
'Meta': {
'unique_together': "(('repository_id', 'key'),)",
'object_name': 'Commit',
'index_together': "(('repository_id', 'date_added'),)"
},
'author': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.CommitAuthor']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'message': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'repository_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {
'unique_together':
"(('organization_id', 'email'), ('organization_id', 'external_id'))",
'object_name':
'CommitAuthor'
},
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'external_id':
('django.db.models.fields.CharField', [], {
'max_length': '164',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.commitfilechange': {
'Meta': {
'unique_together': "(('commit', 'filename'),)",
'object_name': 'CommitFileChange'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'filename': ('django.db.models.fields.CharField', [], {
'max_length': '255'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '1'
})
},
'sentry.counter': {
'Meta': {
'object_name': 'Counter',
'db_table': "'sentry_projectcounter'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'unique': 'True'
}
),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deploy': {
'Meta': {
'object_name': 'Deploy'
},
'date_finished':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'notified': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'db_index': 'True',
'blank': 'True'
}
),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.distribution': {
'Meta': {
'unique_together': "(('release', 'name'),)",
'object_name': 'Distribution'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.dsymapp': {
'Meta': {
'unique_together': "(('project', 'platform', 'app_id'),)",
'object_name': 'DSymApp'
},
'app_id': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'sync_id':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
})
},
'sentry.dsymbundle': {
'Meta': {
'object_name': 'DSymBundle'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'sdk': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymSDK']"
}
)
},
'sentry.dsymobject': {
'Meta': {
'object_name': 'DSymObject'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_path': ('django.db.models.fields.TextField', [], {
'db_index': 'True'
}),
'uuid':
('django.db.models.fields.CharField', [], {
'max_length': '36',
'db_index': 'True'
}),
'vmaddr':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'vmsize':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
})
},
'sentry.dsymsdk': {
'Meta': {
'object_name':
'DSymSDK',
'index_together':
"[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"
},
'dsym_type':
('django.db.models.fields.CharField', [], {
'max_length': '20',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'sdk_name': ('django.db.models.fields.CharField', [], {
'max_length': '20'
}),
'version_build': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {
'unique_together': "[('object', 'address')]",
'object_name': 'DSymSymbol'
},
'address':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {
'unique_together': "(('project_id', 'name'), ('organization_id', 'name'))",
'object_name': 'Environment'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Project']",
'through': "orm['sentry.EnvironmentProject']",
'symmetrical': 'False'
}
)
},
'sentry.environmentproject': {
'Meta': {
'unique_together': "(('project', 'environment'),)",
'object_name': 'EnvironmentProject'
},
'environment': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Environment']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {
'unique_together': "(('raw_event', 'processing_issue'),)",
'object_name': 'EventProcessingIssue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'processing_issue': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ProcessingIssue']"
}
),
'raw_event': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.RawEvent']"
}
)
},
'sentry.eventtag': {
'Meta': {
'unique_together':
"(('event_id', 'key_id', 'value_id'),)",
'object_name':
'EventTag',
'index_together':
"(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {
'object_name': 'GlobalDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '36'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'short_id'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'short_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupcommitresolution': {
'Meta': {
'unique_together': "(('group_id', 'commit_id'),)",
'object_name': 'GroupCommitResolution'
},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
})
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {
'object_name': 'GroupRedirect'
},
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'previous_group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'unique': 'True'
})
},
'sentry.grouprelease': {
'Meta': {
'unique_together': "(('group_id', 'release_id', 'environment'),)",
'object_name': 'GroupRelease'
},
'environment':
('django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64'
}),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'count':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'state': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'null': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'user_count':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'user_window':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'window':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.groupsubscription': {
'Meta': {
'unique_together': "(('group', 'user'),)",
'object_name': 'GroupSubscription'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Project']"
}
),
'reason':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('group_id', 'key', 'value'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'",
'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationavatar': {
'Meta': {
'object_name': 'OrganizationAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.Organization']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'token': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True',
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {
'unique_together': "(('project', 'checksum', 'type'),)",
'object_name': 'ProcessingIssue'
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'db_index': 'True'
}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '30'
})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'flags':
('django.db.models.fields.BigIntegerField', [], {
'default': '0',
'null': 'True'
}),
'forced_color': (
'django.db.models.fields.CharField', [], {
'max_length': '6',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectdsymfile': {
'Meta': {
'unique_together': "(('project', 'uuid'),)",
'object_name': 'ProjectDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'uuid': ('django.db.models.fields.CharField', [], {
'max_length': '36'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'rate_limit_count':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'rate_limit_window':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {
'unique_together': "(('project_id', 'platform'),)",
'object_name': 'ProjectPlatform'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.rawevent': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'RawEvent'
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.release': {
'Meta': {
'unique_together': "(('organization', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'releases'",
'symmetrical': 'False',
'through': "orm['sentry.ReleaseProject']",
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasecommit': {
'Meta': {
'unique_together': "(('release', 'commit'), ('release', 'order'))",
'object_name': 'ReleaseCommit'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseenvironment': {
'Meta': {
'unique_together':
"(('project_id', 'release_id', 'environment_id'), ('organization_id', 'release_id', 'environment_id'))",
'object_name':
'ReleaseEnvironment',
'db_table':
"'sentry_environmentrelease'"
},
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'dist': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Distribution']",
'null': 'True'
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseheadcommit': {
'Meta': {
'unique_together': "(('repository_id', 'release'),)",
'object_name': 'ReleaseHeadCommit'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'repository_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {
'unique_together': "(('project', 'release'),)",
'object_name': 'ReleaseProject',
'db_table': "'sentry_release_project'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.repository': {
'Meta': {
'unique_together':
"(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))",
'object_name':
'Repository'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'external_id':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'provider':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'url': ('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
})
},
'sentry.reprocessingreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'ReprocessingReport'
},
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.scheduleddeletion': {
'Meta': {
'unique_together': "(('app_label', 'model_name', 'object_id'),)",
'object_name': 'ScheduledDeletion'
},
'aborted': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'actor_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'app_label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_scheduled': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 7, 1, 0, 0)'
}
),
'guid': (
'django.db.models.fields.CharField', [], {
'default': "'7dcd5c1ace824812b6cc232360d975f7'",
'unique': 'True',
'max_length': '32'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'in_progress': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'model_name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_password_expired':
('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'session_nonce':
('django.db.models.fields.CharField', [], {
'max_length': '12',
'null': 'True'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useravatar': {
'Meta': {
'object_name': 'UserAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.useremail': {
'Meta': {
'unique_together': "(('user', 'email'),)",
'object_name': 'UserEmail'
},
'date_hash_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_verified': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'emails'",
'to': "orm['sentry.User']"
}
),
'validation_hash': (
'django.db.models.fields.CharField', [], {
'default': "u'UgLIAnDusbhZ8E66pCx3Af5EoUtzEmSA'",
'max_length': '32'
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'event_user_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.versiondsymfile': {
'Meta': {
'unique_together': "(('dsym_file', 'version', 'build'),)",
'object_name': 'VersionDSymFile'
},
'build':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'dsym_app': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymApp']"
}
),
'dsym_file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ProjectDSymFile']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '32'
})
}
}
complete_apps = ['sentry']
|
#!/usr/bin/env python
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
import gdfmm
missing_mask = (cv2.imread('missing_mask.png', cv2.CV_LOAD_IMAGE_UNCHANGED) == 0)
for i in xrange(100):
if os.path.isfile('images/rgb%d.png' % i) and \
os.path.isfile('images/dep%d.png' % i) and \
os.path.isfile('images/missing%d.png' % i):
bgr = cv2.imread('images/rgb%d.png' % i, cv2.CV_LOAD_IMAGE_UNCHANGED)
rgb = cv2.cvtColor(bgr, cv2.cv.CV_BGR2RGB)
dep = cv2.imread('images/dep%d.png' % i, cv2.CV_LOAD_IMAGE_UNCHANGED)
if dep.dtype == np.uint8:
dep = np.array(dep, dtype=np.uint16) * (10000 / 256)
missing = dep.copy()
missing[missing_mask] = 0
inpainted = gdfmm.InpaintDepth2(missing,
rgb,
1, # epsilon
1, # homogenizing constant
blur_sigma = 2.0,
window_size = 11)
# scale the depths to some visible range
dep_scaled = (dep / 10000.0).reshape(dep.shape + (1,)).repeat(3, axis=2)
inp_scaled = (inpainted/ 10000.0).reshape(dep.shape + (1,)).repeat(3, axis=2)
mis_scaled = (missing / 10000.0).reshape(dep.shape + (1,)).repeat(3, axis=2)
rgb_scaled = rgb / 255.0
dep_scaled = np.asarray(dep_scaled, dtype=np.float)
inp_scaled = np.asarray(inp_scaled, dtype=np.float)
rgb_scaled = np.asarray(rgb_scaled, dtype=np.float)
mis_scaled = np.asarray(mis_scaled, dtype=np.float)
side_by_side = np.concatenate(
(np.concatenate( (rgb_scaled, dep_scaled), axis=0 ),
np.concatenate( (mis_scaled, inp_scaled), axis=0 )), axis=1)
plt.figure(figsize=(13,13))
plt.imshow(side_by_side)
plt.show()
|
import os
from stat import *
from datetime import date, datetime
import re
# @TODO Support for rotated log files - currently using the current year for 'Jan 01' dates.
class LogFileTimeParser(object):
"""
Extracts parts of a log file based on a start and enddate
Uses binary search logic to speed up searching
Common usage: validate log files during testing
Faster than awk parsing for big log files
"""
version = "0.01a"
# Set some initial values
BUF_SIZE = 4096 # self.handle long lines, but put a limit to them
REWIND = 100 # arbitrary, the optimal value is highly dependent on the structure of the file
LIMIT = 75 # arbitrary, allow for a VERY large file, but stop it if it runs away
line_date = ''
line = None
opened_file = None
@staticmethod
def parse_date(text, validate=True):
# Supports Aug 16 14:59:01 , 2016-08-16 09:23:09 Jun 1 2005 1:33:06PM (with or without seconds, miliseconds)
for fmt in ('%Y-%m-%d %H:%M:%S %f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',
'%b %d %H:%M:%S %f', '%b %d %H:%M', '%b %d %H:%M:%S',
'%b %d %Y %H:%M:%S %f', '%b %d %Y %H:%M', '%b %d %Y %H:%M:%S',
'%b %d %Y %I:%M:%S%p', '%b %d %Y %I:%M%p', '%b %d %Y %I:%M:%S%p %f'):
try:
if fmt in ['%b %d %H:%M:%S %f', '%b %d %H:%M', '%b %d %H:%M:%S']:
return datetime.strptime(text, fmt).replace(datetime.now().year)
return datetime.strptime(text, fmt)
except ValueError:
pass
if validate:
raise ValueError("No valid date format found for '{0}'".format(text))
else:
# Cannot use NoneType to compare datetimes. Using minimum instead
return datetime.min
# Function to read lines from file and extract the date and time
def read_lines(self):
"""
Read a line from a file
Return a tuple containing:
the date/time in a format supported in parse_date om the line itself
"""
try:
self.line = self.opened_file.readline(self.BUF_SIZE)
except:
raise IOError("File I/O Error")
if self.line == '':
raise EOFError("EOF reached")
# Remove \n from read lines.
if self.line[-1] == '\n':
self.line = self.line.rstrip('\n')
else:
if len(self.line) >= self.BUF_SIZE:
raise ValueError("Line length exceeds buffer size")
else:
raise ValueError("Missing newline")
words = self.line.split(' ')
# This results into Jan 1 01:01:01 000000 or 1970-01-01 01:01:01 000000
if len(words) >= 3:
self.line_date = self.parse_date(words[0] + " " + words[1] + " " + words[2],False)
else:
self.line_date = self.parse_date('', False)
return self.line_date, self.line
def get_lines_between_timestamps(self, start, end, path_to_file, debug=False):
# Set some initial values
count = 0
size = os.stat(path_to_file)[ST_SIZE]
begin_range = 0
mid_range = size / 2
old_mid_range = mid_range
end_range = size
pos1 = pos2 = 0
# If only hours are supplied
# test for times to be properly formatted, allow hh:mm or hh:mm:ss
p = re.compile(r'(^[2][0-3]|[0-1][0-9]):[0-5][0-9](:[0-5][0-9])?$')
if p.match(start) or p.match(end):
# Determine Time Range
yesterday = date.fromordinal(date.today().toordinal() - 1).strftime("%Y-%m-%d")
today = datetime.now().strftime("%Y-%m-%d")
now = datetime.now().strftime("%R")
if start > now or start > end:
search_start = yesterday
else:
search_start = today
if end > start > now:
search_end = yesterday
else:
search_end = today
search_start = self.parse_date(search_start + " " + start)
search_end = self.parse_date(search_end + " " + end)
else:
# Set dates
search_start = self.parse_date(start)
search_end = self.parse_date(end)
try:
self.opened_file = open(path_to_file, 'r')
except:
raise IOError("File Open Error")
if debug:
print("File: '{0}' Size: {1} Start: '{2}' End: '{3}'"
.format(path_to_file, size, search_start, search_end))
# Seek using binary search -- ONLY WORKS ON FILES WHO ARE SORTED BY DATES (should be true for log files)
try:
while pos1 != end_range and old_mid_range != 0 and self.line_date != search_start:
self.opened_file.seek(mid_range)
# sync to self.line ending
self.line_date, self.line = self.read_lines()
pos1 = self.opened_file.tell()
# if not beginning of file, discard first read
if mid_range > 0:
if debug:
print("...partial: (len: {0}) '{1}'".format((len(self.line)), self.line))
self.line_date, self.line = self.read_lines()
pos2 = self.opened_file.tell()
count += 1
if debug:
print("#{0} Beginning: {1} Mid: {2} End: {3} P1: {4} P2: {5} Timestamp: '{6}'".
format(count, begin_range, mid_range, end_range, pos1, pos2, self.line_date))
if search_start > self.line_date:
begin_range = mid_range
else:
end_range = mid_range
old_mid_range = mid_range
mid_range = (begin_range + end_range) / 2
if count > self.LIMIT:
raise IndexError("ERROR: ITERATION LIMIT EXCEEDED")
if debug:
print("...stopping: '{0}'".format(self.line))
# Rewind a bit to make sure we didn't miss any
seek = old_mid_range
while self.line_date >= search_start and seek > 0:
if seek < self.REWIND:
seek = 0
else:
seek -= self.REWIND
if debug:
print("...rewinding")
self.opened_file.seek(seek)
# sync to self.line ending
self.line_date, self.line = self.read_lines()
if debug:
print("...junk: '{0}'".format(self.line))
self.line_date, self.line = self.read_lines()
if debug:
print("...comparing: '{0}'".format(self.line_date))
# Scan forward
while self.line_date < search_start:
if debug:
print("...skipping: '{0}'".format(self.line_date))
self.line_date, self.line = self.read_lines()
if debug:
print("...found: '{0}'".format(self.line))
if debug:
print("Beginning: {0} Mid: {1} End: {2} P1: {3} P2: {4} Timestamp: '{5}'".
format(begin_range, mid_range, end_range, pos1, pos2, self.line_date))
# Now that the preliminaries are out of the way, we just loop,
# reading lines and printing them until they are beyond the end of the range we want
while self.line_date <= search_end:
# Exclude our 'Nonetype' values
if not self.line_date == datetime.min:
print self.line
self.line_date, self.line = self.read_lines()
if debug:
print("Start: '{0}' End: '{1}'".format(search_start, search_end))
self.opened_file.close()
# Do not display EOFErrors:
except EOFError as e:
pass
|
#!/usr/bin/env python3
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import contextlib
import copy
import glob
import io
import itertools
import os
import unittest
import re
import shutil
import subprocess
import sys
import tempfile
import zipfile
import archive
import data_quality
import describe
import diff
import file_format
import models
import test_util
_SCRIPT_DIR = os.path.dirname(__file__)
_TEST_DATA_DIR = os.path.join(_SCRIPT_DIR, 'testdata')
_TEST_SDK_DIR = os.path.join(_TEST_DATA_DIR, 'mock_sdk')
_TEST_SOURCE_DIR = os.path.join(_TEST_DATA_DIR, 'mock_source_directory')
_TEST_OUTPUT_DIR = os.path.join(_TEST_SOURCE_DIR, 'out', 'Release')
_TEST_TOOL_PREFIX = os.path.join(
os.path.abspath(_TEST_DATA_DIR), 'mock_toolchain', '')
_TEST_APK_ROOT_DIR = os.path.join(_TEST_DATA_DIR, 'mock_apk')
_TEST_MAP_PATH = os.path.join(_TEST_DATA_DIR, 'test.map')
_TEST_PAK_INFO_PATH = os.path.join(
_TEST_OUTPUT_DIR, 'size-info/test.apk.pak.info')
_TEST_ELF_FILE_BEGIN = os.path.join(_TEST_OUTPUT_DIR, 'elf.begin')
_TEST_APK_LOCALE_PAK_PATH = os.path.join(_TEST_APK_ROOT_DIR, 'assets/en-US.pak')
_TEST_APK_PAK_PATH = os.path.join(_TEST_APK_ROOT_DIR, 'assets/resources.pak')
_TEST_ON_DEMAND_MANIFEST_PATH = os.path.join(_TEST_DATA_DIR,
'AndroidManifest_OnDemand.xml')
_TEST_ALWAYS_INSTALLED_MANIFEST_PATH = os.path.join(
_TEST_DATA_DIR, 'AndroidManifest_AlwaysInstalled.xml')
# The following files are dynamically created.
_TEST_ELF_PATH = os.path.join(_TEST_OUTPUT_DIR, 'elf')
_TEST_APK_PATH = os.path.join(_TEST_OUTPUT_DIR, 'test.apk')
_TEST_NOT_ON_DEMAND_SPLIT_APK_PATH = os.path.join(_TEST_OUTPUT_DIR,
'not_on_demand.apk')
_TEST_ON_DEMAND_SPLIT_APK_PATH = os.path.join(_TEST_OUTPUT_DIR, 'on_demand.apk')
_TEST_MINIMAL_APKS_PATH = os.path.join(_TEST_OUTPUT_DIR, 'Bundle.minimal.apks')
_TEST_SSARGS_PATH = os.path.join(_TEST_OUTPUT_DIR, 'test.ssargs')
# Generated file paths relative to apk
_TEST_APK_SO_PATH = 'test.so'
_TEST_APK_SMALL_SO_PATH = 'smalltest.so'
_TEST_APK_DEX_PATH = 'test.dex'
_TEST_APK_OTHER_FILE_PATH = 'assets/icudtl.dat'
_TEST_APK_RES_FILE_PATH = 'res/drawable-v13/test.xml'
def _CompareWithGolden(name=None):
def real_decorator(func):
basename = name
if not basename:
basename = func.__name__.replace('test_', '')
golden_path = os.path.join(_TEST_DATA_DIR, basename + '.golden')
def inner(self):
actual_lines = func(self)
actual_lines = (re.sub(r'(elf_mtime=).*', r'\1{redacted}', l)
for l in actual_lines)
actual_lines = (re.sub(r'(Loaded from ).*', r'\1{redacted}', l)
for l in actual_lines)
test_util.Golden.CheckOrUpdate(golden_path, actual_lines)
return inner
return real_decorator
@contextlib.contextmanager
def _AddMocksToPath():
prev_path = os.environ['PATH']
os.environ['PATH'] = _TEST_TOOL_PREFIX[:-1] + os.path.pathsep + prev_path
os.environ['APK_ANALYZER'] = os.path.join(_TEST_SDK_DIR, 'tools', 'bin',
'apkanalyzer')
os.environ['AAPT2'] = os.path.join(_TEST_SDK_DIR, 'tools', 'bin', 'aapt2')
try:
yield
finally:
os.environ['PATH'] = prev_path
del os.environ['APK_ANALYZER']
del os.environ['AAPT2']
def _RunApp(name, args, debug_measures=False):
argv = [os.path.join(_SCRIPT_DIR, 'main.py'), name]
argv.extend(args)
with _AddMocksToPath():
env = None
if debug_measures:
env = os.environ.copy()
env['SUPERSIZE_DISABLE_ASYNC'] = '1'
env['SUPERSIZE_MEASURE_GZIP'] = '1'
return subprocess.check_output(argv, env=env).decode('utf-8').splitlines()
def _AllMetadata(size_info):
return [c.metadata for c in size_info.containers]
class IntegrationTest(unittest.TestCase):
maxDiff = None # Don't trucate diffs in errors.
cached_size_info = {}
@staticmethod
def _CreateBlankData(power_of_two):
data = '\0'
for _ in range(power_of_two):
data = data + data
return data
@staticmethod
def _SafeRemoveFiles(file_names):
for file_name in file_names:
if os.path.exists(file_name):
os.remove(file_name)
@classmethod
def setUpClass(cls):
shutil.copy(_TEST_ELF_FILE_BEGIN, _TEST_ELF_PATH)
# Exactly 128MB of data (2^27), extra bytes will be accounted in overhead.
with open(_TEST_ELF_PATH, 'a') as elf_file:
elf_file.write(IntegrationTest._CreateBlankData(27))
with zipfile.ZipFile(_TEST_APK_PATH, 'w') as apk_file:
apk_file.write(_TEST_ELF_PATH, _TEST_APK_SO_PATH)
# Exactly 4MB of data (2^22), with some zipalign overhead.
info = zipfile.ZipInfo(_TEST_APK_SMALL_SO_PATH)
info.extra = b'\x00' * 16
apk_file.writestr(info, IntegrationTest._CreateBlankData(22))
# Exactly 1MB of data (2^20).
apk_file.writestr(
_TEST_APK_OTHER_FILE_PATH, IntegrationTest._CreateBlankData(20))
# Exactly 1KB of data (2^10).
apk_file.writestr(
_TEST_APK_RES_FILE_PATH, IntegrationTest._CreateBlankData(10))
locale_pak_rel_path = os.path.relpath(
_TEST_APK_LOCALE_PAK_PATH, _TEST_APK_ROOT_DIR)
apk_file.write(_TEST_APK_LOCALE_PAK_PATH, locale_pak_rel_path)
pak_rel_path = os.path.relpath(_TEST_APK_PAK_PATH, _TEST_APK_ROOT_DIR)
apk_file.write(_TEST_APK_PAK_PATH, pak_rel_path)
# Exactly 8MB of data (2^23).
apk_file.writestr(
_TEST_APK_DEX_PATH, IntegrationTest._CreateBlankData(23))
with zipfile.ZipFile(_TEST_NOT_ON_DEMAND_SPLIT_APK_PATH, 'w') as z:
z.write(_TEST_ALWAYS_INSTALLED_MANIFEST_PATH, 'AndroidManifest.xml')
with zipfile.ZipFile(_TEST_ON_DEMAND_SPLIT_APK_PATH, 'w') as z:
z.write(_TEST_ON_DEMAND_MANIFEST_PATH, 'AndroidManifest.xml')
with zipfile.ZipFile(_TEST_MINIMAL_APKS_PATH, 'w') as apk_file:
apk_file.writestr('toc.pb', 'x' * 80)
apk_file.write(_TEST_APK_PATH, 'splits/base-master.apk')
apk_file.writestr('splits/base-en.apk', 'x' * 10)
apk_file.write(_TEST_NOT_ON_DEMAND_SPLIT_APK_PATH,
'splits/not_on_demand-master.apk')
apk_file.write(_TEST_ON_DEMAND_SPLIT_APK_PATH,
'splits/on_demand-master.apk')
apk_file.writestr('splits/vr-en.apk', 'x' * 40)
@classmethod
def tearDownClass(cls):
IntegrationTest._SafeRemoveFiles([
_TEST_ELF_PATH,
_TEST_APK_PATH,
_TEST_NOT_ON_DEMAND_SPLIT_APK_PATH,
_TEST_ON_DEMAND_SPLIT_APK_PATH,
_TEST_MINIMAL_APKS_PATH,
])
def _CreateTestArgs(self):
parser = argparse.ArgumentParser()
archive.AddArguments(parser)
ret = parser.parse_args(['foo'])
return ret
def _CloneSizeInfo(self,
use_output_directory=True,
use_elf=False,
use_apk=False,
use_minimal_apks=False,
use_pak=False,
use_aux_elf=False):
assert not use_elf or use_output_directory
assert not (use_apk and use_pak)
cache_key = (use_output_directory, use_elf, use_apk, use_minimal_apks,
use_pak, use_aux_elf)
if cache_key not in IntegrationTest.cached_size_info:
knobs = archive.SectionSizeKnobs()
# Override for testing. Lower the bar for compacting symbols, to allow
# smaller test cases to be created.
knobs.max_same_name_alias_count = 3
args = self._CreateTestArgs()
args.elf_file = _TEST_ELF_PATH if use_elf or use_aux_elf else None
args.map_file = _TEST_MAP_PATH
args.output_directory = _TEST_OUTPUT_DIR if use_output_directory else None
args.source_directory = _TEST_SOURCE_DIR
args.tool_prefix = _TEST_TOOL_PREFIX
apk_so_path = None
size_info_prefix = None
extracted_minimal_apk_path = None
container_name = ''
if use_apk:
args.apk_file = _TEST_APK_PATH
elif use_minimal_apks:
args.minimal_apks_file = _TEST_MINIMAL_APKS_PATH
extracted_minimal_apk_path = _TEST_APK_PATH
container_name = 'Bundle.minimal.apks'
if use_apk or use_minimal_apks:
apk_so_path = _TEST_APK_SO_PATH
if args.output_directory:
if use_apk:
orig_path = _TEST_APK_PATH
else:
orig_path = _TEST_MINIMAL_APKS_PATH.replace('.minimal.apks', '.aab')
size_info_prefix = os.path.join(args.output_directory, 'size-info',
os.path.basename(orig_path))
pak_files = None
pak_info_file = None
if use_pak:
pak_files = [_TEST_APK_LOCALE_PAK_PATH, _TEST_APK_PAK_PATH]
pak_info_file = _TEST_PAK_INFO_PATH
linker_name = 'gold'
# For simplicity, using |args| for both params. This is okay since
# |args.ssargs_file| is unassigned.
opts = archive.ContainerArchiveOptions(args, args)
with _AddMocksToPath():
build_config = {}
metadata = archive.CreateMetadata(args, linker_name, build_config)
container_list = []
raw_symbols_list = []
container, raw_symbols = archive.CreateContainerAndSymbols(
knobs=knobs,
opts=opts,
container_name='{}/base.apk'.format(container_name)
if container_name else '',
metadata=metadata,
map_path=args.map_file,
tool_prefix=args.tool_prefix,
output_directory=args.output_directory,
source_directory=args.source_directory,
elf_path=args.elf_file,
apk_path=args.apk_file or extracted_minimal_apk_path,
apk_so_path=apk_so_path,
pak_files=pak_files,
pak_info_file=pak_info_file,
linker_name=linker_name,
size_info_prefix=size_info_prefix)
container_list.append(container)
raw_symbols_list.append(raw_symbols)
if use_minimal_apks:
opts.analyze_native = False
args.split_name = 'not_on_demand'
args.apk_file = _TEST_NOT_ON_DEMAND_SPLIT_APK_PATH
args.elf_file = None
args.map_file = None
metadata = archive.CreateMetadata(args, None, build_config)
container, raw_symbols = archive.CreateContainerAndSymbols(
knobs=knobs,
opts=opts,
container_name='{}/not_on_demand.apk'.format(container_name),
metadata=metadata,
tool_prefix=args.tool_prefix,
output_directory=args.output_directory,
source_directory=args.source_directory,
apk_path=_TEST_NOT_ON_DEMAND_SPLIT_APK_PATH,
size_info_prefix=size_info_prefix)
container_list.append(container)
raw_symbols_list.append(raw_symbols)
args.split_name = 'on_demand'
args.apk_file = _TEST_ON_DEMAND_SPLIT_APK_PATH
metadata = archive.CreateMetadata(args, None, build_config)
container, raw_symbols = archive.CreateContainerAndSymbols(
knobs=knobs,
opts=opts,
container_name='{}/on_demand.apk?'.format(container_name),
metadata=metadata,
tool_prefix=args.tool_prefix,
output_directory=args.output_directory,
source_directory=args.source_directory,
apk_path=_TEST_ON_DEMAND_SPLIT_APK_PATH,
size_info_prefix=size_info_prefix)
container_list.append(container)
raw_symbols_list.append(raw_symbols)
IntegrationTest.cached_size_info[cache_key] = archive.CreateSizeInfo(
build_config, container_list, raw_symbols_list)
return copy.deepcopy(IntegrationTest.cached_size_info[cache_key])
def _DoArchive(self,
archive_path,
use_output_directory=True,
use_elf=False,
use_apk=False,
use_ssargs=False,
use_minimal_apks=False,
use_pak=False,
use_aux_elf=None,
debug_measures=False,
include_padding=False):
args = [
archive_path,
'--source-directory',
_TEST_SOURCE_DIR,
# --map-file ignored for use_ssargs.
'--map-file',
_TEST_MAP_PATH,
]
if use_output_directory:
# Let autodetection find output_directory when --elf-file is used.
if not use_elf:
args += ['--output-directory', _TEST_OUTPUT_DIR]
else:
args += ['--no-output-directory']
if use_ssargs:
args += ['-f', _TEST_SSARGS_PATH]
elif use_apk:
args += ['-f', _TEST_APK_PATH]
elif use_minimal_apks:
args += ['-f', _TEST_MINIMAL_APKS_PATH]
elif use_elf:
args += ['-f', _TEST_ELF_PATH]
if use_pak:
args += ['--pak-file', _TEST_APK_LOCALE_PAK_PATH,
'--pak-file', _TEST_APK_PAK_PATH,
'--pak-info-file', _TEST_PAK_INFO_PATH]
if use_aux_elf:
args += ['--aux-elf-file', _TEST_ELF_PATH]
if include_padding:
args += ['--include-padding']
_RunApp('archive', args, debug_measures=debug_measures)
def _DoArchiveTest(self,
use_output_directory=True,
use_elf=False,
use_apk=False,
use_minimal_apks=False,
use_pak=False,
use_aux_elf=False,
debug_measures=False,
include_padding=False):
with tempfile.NamedTemporaryFile(suffix='.size') as temp_file:
self._DoArchive(temp_file.name,
use_output_directory=use_output_directory,
use_elf=use_elf,
use_apk=use_apk,
use_minimal_apks=use_minimal_apks,
use_pak=use_pak,
use_aux_elf=use_aux_elf,
debug_measures=debug_measures,
include_padding=include_padding)
size_info = archive.LoadAndPostProcessSizeInfo(temp_file.name)
# Check that saving & loading is the same as directly parsing.
expected_size_info = self._CloneSizeInfo(
use_output_directory=use_output_directory,
use_elf=use_elf,
use_apk=use_apk,
use_minimal_apks=use_minimal_apks,
use_pak=use_pak,
use_aux_elf=use_aux_elf)
self.assertEqual(_AllMetadata(expected_size_info), _AllMetadata(size_info))
# Don't cluster.
expected_size_info.symbols = expected_size_info.raw_symbols
size_info.symbols = size_info.raw_symbols
expected = list(describe.GenerateLines(expected_size_info, verbose=True))
actual = list(describe.GenerateLines(size_info, verbose=True))
self.assertEqual(expected, actual)
sym_strs = (repr(sym) for sym in size_info.symbols)
stats = data_quality.DescribeSizeInfoCoverage(size_info)
if len(size_info.containers) == 1:
# If there's only one container, merge the its metadata into build_config.
merged_data_desc = describe.DescribeDict(size_info.metadata_legacy)
else:
merged_data_desc = describe.DescribeDict(size_info.build_config)
for m in _AllMetadata(size_info):
merged_data_desc.extend(describe.DescribeDict(m))
return itertools.chain(merged_data_desc, stats, sym_strs)
@_CompareWithGolden()
def test_Archive(self):
return self._DoArchiveTest(use_output_directory=False, use_elf=False)
@_CompareWithGolden()
def test_Archive_OutputDirectory(self):
return self._DoArchiveTest()
@_CompareWithGolden()
def test_Archive_Elf(self):
return self._DoArchiveTest(use_elf=True)
@_CompareWithGolden()
def test_Archive_Apk(self):
return self._DoArchiveTest(use_apk=True, use_aux_elf=True)
@_CompareWithGolden()
def test_Archive_MinimalApks(self):
return self._DoArchiveTest(use_minimal_apks=True, use_aux_elf=True)
@_CompareWithGolden()
def test_Archive_Pak_Files(self):
return self._DoArchiveTest(use_pak=True, use_aux_elf=True)
@_CompareWithGolden(name='Archive_Elf')
def test_Archive_Elf_DebugMeasures(self):
return self._DoArchiveTest(use_elf=True, debug_measures=True)
@_CompareWithGolden(name='Archive_Apk')
def test_ArchiveSparse(self):
return self._DoArchiveTest(use_apk=True,
use_aux_elf=True,
include_padding=True)
def test_SaveDeltaSizeInfo(self):
# Check that saving & loading is the same as directly parsing.
orig_info1 = self._CloneSizeInfo(use_apk=True, use_aux_elf=True)
orig_info2 = self._CloneSizeInfo(use_elf=True)
orig_delta = diff.Diff(orig_info1, orig_info2)
with tempfile.NamedTemporaryFile(suffix='.sizediff') as sizediff_file:
file_format.SaveDeltaSizeInfo(orig_delta, sizediff_file.name)
new_info1, new_info2 = archive.LoadAndPostProcessDeltaSizeInfo(
sizediff_file.name)
new_delta = diff.Diff(new_info1, new_info2)
# File format discards unchanged symbols.
orig_delta.raw_symbols = orig_delta.raw_symbols.WhereDiffStatusIs(
models.DIFF_STATUS_UNCHANGED).Inverted()
self.assertEqual(
'\n'.join(describe.GenerateLines(orig_delta, verbose=True)),
'\n'.join(describe.GenerateLines(new_delta, verbose=True)))
@_CompareWithGolden()
def test_Console(self):
with tempfile.NamedTemporaryFile(suffix='.size') as size_file, \
tempfile.NamedTemporaryFile(suffix='.txt') as output_file:
file_format.SaveSizeInfo(self._CloneSizeInfo(use_elf=True),
size_file.name)
query = [
'ShowExamples()',
'ExpandRegex("_foo_")',
'canned_queries.CategorizeGenerated()',
'canned_queries.CategorizeByChromeComponent()',
'canned_queries.LargeFiles()',
'canned_queries.TemplatesByName()',
'canned_queries.StaticInitializers()',
'canned_queries.PakByPath()',
'Print(ReadStringLiterals(elf_path={}))'.format(repr(_TEST_ELF_PATH)),
'Print(size_info, to_file=%r)' % output_file.name,
]
ret = _RunApp('console', [size_file.name, '--query', '; '.join(query)])
with open(output_file.name) as f:
ret.extend(l.rstrip() for l in f)
return ret
@_CompareWithGolden()
def test_Csv(self):
with tempfile.NamedTemporaryFile(suffix='.size') as size_file, \
tempfile.NamedTemporaryFile(suffix='.txt') as output_file:
file_format.SaveSizeInfo(self._CloneSizeInfo(use_elf=True),
size_file.name)
query = [
'Csv(size_info, to_file=%r)' % output_file.name,
]
ret = _RunApp('console', [size_file.name, '--query', '; '.join(query)])
with open(output_file.name) as f:
ret.extend(l.rstrip() for l in f)
return ret
@_CompareWithGolden()
def test_Diff_NullDiff(self):
with tempfile.NamedTemporaryFile(suffix='.size') as temp_file:
file_format.SaveSizeInfo(self._CloneSizeInfo(use_elf=True),
temp_file.name)
return _RunApp('diff', [temp_file.name, temp_file.name])
# Runs archive 3 times, and asserts the contents are the same each time.
def test_Idempotent(self):
prev_contents = None
for _ in range(3):
with tempfile.NamedTemporaryFile(suffix='.size') as temp_file:
self._DoArchive(temp_file.name)
contents = temp_file.read()
self.assertTrue(prev_contents is None or contents == prev_contents)
prev_contents = contents
@_CompareWithGolden()
def test_Diff_Basic(self):
size_info1 = self._CloneSizeInfo(use_pak=True)
size_info2 = self._CloneSizeInfo(use_pak=True)
size_info2.build_config['git_revision'] = 'xyz789'
container1 = size_info1.containers[0]
container2 = size_info2.containers[0]
container1.metadata = {"foo": 1, "bar": [1, 2, 3], "baz": "yes"}
container2.metadata = {"foo": 1, "bar": [1, 3], "baz": "yes"}
size_info1.raw_symbols -= size_info1.raw_symbols.WhereNameMatches(
r'pLinuxKernelCmpxchg|pLinuxKernelMemoryBarrier')
size_info2.raw_symbols -= size_info2.raw_symbols.WhereNameMatches(
r'IDS_AW_WEBPAGE_PARENTAL_|IDS_WEB_FONT_FAMILY|IDS_WEB_FONT_SIZE')
changed_sym = size_info1.raw_symbols.WhereNameMatches('Patcher::Name_')[0]
changed_sym.size -= 10
padding_sym = size_info2.raw_symbols.WhereNameMatches('symbol gap 0')[0]
padding_sym.padding += 20
padding_sym.size += 20
# Test pak symbols changing .grd files. They should not show as changed.
pak_sym = size_info2.raw_symbols.WhereNameMatches(
r'IDR_PDF_COMPOSITOR_MANIFEST')[0]
pak_sym.full_name = pak_sym.full_name.replace('.grd', '2.grd')
# Serialize & de-serialize so that name normalization runs again for the pak
# symbol.
bytesio = io.BytesIO()
file_format.SaveSizeInfo(size_info2, 'path', file_obj=bytesio)
bytesio.seek(0)
size_info2 = archive.LoadAndPostProcessSizeInfo('path', file_obj=bytesio)
d = diff.Diff(size_info1, size_info2)
d.raw_symbols = d.raw_symbols.Sorted()
self.assertEqual((1, 2, 3), d.raw_symbols.CountsByDiffStatus()[1:])
changed_sym = d.raw_symbols.WhereNameMatches('Patcher::Name_')[0]
padding_sym = d.raw_symbols.WhereNameMatches('symbol gap 0')[0]
bss_sym = d.raw_symbols.WhereInSection(models.SECTION_BSS)[0]
# Padding-only deltas should sort after all non-padding changes.
padding_idx = d.raw_symbols.index(padding_sym)
changed_idx = d.raw_symbols.index(changed_sym)
bss_idx = d.raw_symbols.index(bss_sym)
self.assertLess(changed_idx, padding_idx)
# And before bss.
self.assertLess(padding_idx, bss_idx)
return describe.GenerateLines(d, verbose=True)
@_CompareWithGolden()
def test_FullDescription(self):
size_info = self._CloneSizeInfo(use_elf=True)
# Show both clustered and non-clustered so that they can be compared.
size_info.symbols = size_info.raw_symbols
return itertools.chain(
describe.GenerateLines(size_info, verbose=True),
describe.GenerateLines(size_info.symbols._Clustered(), recursive=True,
verbose=True),
)
@_CompareWithGolden()
def test_SymbolGroupMethods(self):
all_syms = self._CloneSizeInfo(use_elf=True).symbols
global_syms = all_syms.WhereNameMatches('GLOBAL')
# Tests Filter(), Inverted(), and __sub__().
non_global_syms = global_syms.Inverted()
self.assertEqual(non_global_syms, (all_syms - global_syms))
# Tests Sorted() and __add__().
self.assertEqual(all_syms.Sorted(),
(global_syms + non_global_syms).Sorted())
# Tests GroupedByName() and __len__().
return itertools.chain(
['GroupedByName()'],
describe.GenerateLines(all_syms.GroupedByName()),
['GroupedByName(depth=1)'],
describe.GenerateLines(all_syms.GroupedByName(depth=1)),
['GroupedByName(depth=-1)'],
describe.GenerateLines(all_syms.GroupedByName(depth=-1)),
['GroupedByName(depth=1, min_count=2)'],
describe.GenerateLines(all_syms.GroupedByName(depth=1, min_count=2)),
)
@_CompareWithGolden()
def test_ArchiveContainers(self):
with tempfile.NamedTemporaryFile(suffix='.size') as temp_file:
self._DoArchive(temp_file.name,
use_output_directory=True,
use_ssargs=True)
size_info = archive.LoadAndPostProcessSizeInfo(temp_file.name)
# Don't cluster.
size_info.symbols = size_info.raw_symbols
sym_strs = (repr(sym) for sym in size_info.symbols)
build_config = describe.DescribeDict(size_info.build_config)
metadata = itertools.chain.from_iterable(
itertools.chain([c.name], describe.DescribeDict(c.metadata))
for c in size_info.containers)
return itertools.chain(
['BuildConfig:'],
build_config,
['Metadata:'],
metadata,
['Symbols:'],
sym_strs,
)
def main():
argv = sys.argv
if len(argv) > 1 and argv[1] == '--update':
argv.pop(0)
test_util.Golden.EnableUpdate()
for f in glob.glob(os.path.join(_TEST_DATA_DIR, '*.golden')):
os.unlink(f)
unittest.main(argv=argv, verbosity=2)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
hunk.production
~~~~~~~~~~~~~~~
Provides a class to configure production environment.
"""
import importlib
import os
import sys
from ._compat import urljoin, urlunsplit
class ProductionEnvironment(object):
"""Holds information for a production environment to dispatch to it."""
def __init__(self):
self.routes = set()
self.scheme = 'http'
self.hostname = 'localhost'
self.port = 9000
def load(self, dirpath, filename):
filepath = os.path.join(dirpath, filename)
if not os.path.exists(filepath):
return # skipped
modname, _ = os.path.splitext(filename)
sys.path.append(dirpath)
config = importlib.import_module(modname)
for attr in ['scheme', 'hostname', 'port']:
if hasattr(config, attr):
setattr(self, attr, getattr(config, attr))
if hasattr(config, 'routes'):
self.routes.update(config.routes)
def build_url(self, path):
base_url = urlunsplit((
self.scheme,
':'.join([self.hostname, str(self.port)]),
'', '', ''
))
return urljoin(base_url, path)
|
"""
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
#
# To run this and other RESTx test files, use bin/testrun.
#
# These imports are necessary for all component tests
from restx.testtools.utils import *
from restx.components.api import *
# Importing the component we wish to test
from restx.components.Filter import Filter
# ============================
# Testing the Filter component
# ============================
def runtest():
#
# -------------------------------------------------------------------
# Mocking setup: Provide overrides for some of the component methods
# -------------------------------------------------------------------
#
class MyBaseCapabilities(BaseCapabilities):
def accessResource(self, resource_uri, input=None, params=None, method=HTTP.GET):
return RESOURCE_DICT[resource_uri]
#
# -------------------------------------------------------------------
# The actual tests
# -------------------------------------------------------------------
#
#
# Setting up a dummy component
#
rctp = dict(
input_resource_uri = "/resource/foo",
filter_expression_1 = "a/b/c = 123",
filter_expression_2 = "",
filter_expression_3 = "",
match_all = True,
)
c = make_component(rctp, Filter, MyBaseCapabilities)
#
# Testing filter_compile()
#
test_evaluator("Test 1", compare_list(c._filter_compile('a/b/c = 123'),
(['a', 'b', 'c'], '=', 123)))
test_evaluator("Test 2", compare_list(c._filter_compile('a/b/c = "123"'),
(['a', 'b', 'c'], '=', '123')))
test_evaluator("Test 3", compare_list(c._filter_compile('"a"/"one two b"/c=x = >= true'),
(['a', 'one two b', 'c=x ='], '>=', True)))
test_evaluator("Test 4", compare_list(c._filter_compile('"a" >= true'),
(['a'], '>=', True)))
test_evaluator("Test 5", compare_list(c._filter_compile('1 >= 123'),
([1], '>=', 123)))
test_evaluator("Test 6", compare_list(c._filter_compile('a/1/2 >= 123'),
(['a', 1, 2], '>=', 123)))
test_evaluator("Test 7", compare_list(c._filter_compile('a/"1"/2 >= 123'),
(['a', '1', 2], '>=', 123)))
#
# Testing element extraction
#
test_evaluator("Test 8", compare_elem(123, c._get_elem([ 111, 123 ], c._filter_compile("1 = 1")[0])))
test_evaluator("Test 9", compare_elem(123, c._get_elem({ 1: 123}, c._filter_compile("1 = 1")[0])))
test_evaluator("Test 10", compare_elem(123, c._get_elem({ "1": 123}, c._filter_compile('"1" = 1')[0])))
test_evaluator("Test 11", compare_elem(1, c._get_elem({ "1": [ 1, 2 ]}, c._filter_compile('"1"/0 = 1')[0])))
test_evaluator("Test 12", compare_elem("a", c._get_elem({ "x": [ 1, "a" ]}, c._filter_compile('x/1 = 1')[0])))
test_evaluator("Test 13", compare_elem("a", c._get_elem({ "x": [ 1, { "b" : "a" } ]}, c._filter_compile('x/1/b = 1')[0])))
#
# Testing filtering
#
rctp['filter_expression_1'] = "foo = xyz"
c = make_component(rctp, Filter, MyBaseCapabilities)
data = [
{ "email" : "[email protected]", "foo" : "abc" },
{ "blah" : 123 },
{ "email" : "[email protected]", "foo" : "xyz" },
{ "email" : "[email protected]", "foo" : "xyz" },
]
RESOURCE_DICT = { c.input_resource_uri : ( 200, data ) }
#
# Test 14: PASS filter
#
res = c.filter(None, None, False)
should_be = [
{ "email" : "[email protected]", "foo" : "xyz" },
{ "email" : "[email protected]", "foo" : "xyz" },
]
test_evaluator("Test 14", compare_out_lists(res, 200, should_be))
#
# Test 15: Deny filter
#
res = c.filter(None, None, True)
should_be = [
{ "email" : "[email protected]", "foo" : "abc" },
{ "blah" : 123 },
]
test_evaluator("Test 15", compare_out_lists(res, 200, should_be))
#
# Test 16: Filter with dictionary at top level
#
c = make_component(rctp, Filter, MyBaseCapabilities)
data = {
"aaa" : { "email" : "[email protected]", "foo" : "abc" },
"bbb" : { "blah" : 123 },
"ccc" : { "email" : "[email protected]", "foo" : "xyz" },
"ddd" : { "email" : "[email protected]", "foo" : "xyz" },
}
RESOURCE_DICT = { c.input_resource_uri : (200, data) }
res = c.filter(None, None, False)
should_be = {
"ccc" : { "email" : "[email protected]", "foo" : "xyz" },
"ddd" : { "email" : "[email protected]", "foo" : "xyz" },
}
test_evaluator("Test 16", compare_out_dicts(res, 200, should_be))
#
# Test 17: Other operator: !=
#
rctp['filter_expression_1'] = "foo != xyz"
c = make_component(rctp, Filter, MyBaseCapabilities)
res = c.filter(None, None, False)
should_be = {
"aaa" : { "email" : "[email protected]", "foo" : "abc" },
}
test_evaluator("Test 17", compare_out_dicts(res, 200, should_be))
#
# Test 18: Multiple expressions with AND
#
rctp['filter_expression_1'] = "b = 2"
rctp['filter_expression_2'] = "c = 1"
c = make_component(rctp, Filter, MyBaseCapabilities)
data = [
{ "a" : 1, "b" : 2, "c" : 1 },
{ "a" : 1, "b" : 1, "c" : 1 },
{ "a" : 1, "b" : 2, "c" : 1 },
{ "a" : 1, "b" : 3, "c" : 1 },
{ "a" : 1, "b" : 3, "c" : 4 },
]
RESOURCE_DICT = { c.input_resource_uri : (200, data) }
res = c.filter(None, None, False)
should_be = [
{ "a" : 1, "b" : 2, "c" : 1 },
{ "a" : 1, "b" : 2, "c" : 1 },
]
test_evaluator("Test 18", compare_out_lists(res, 200, should_be))
#
# Test 19: Multiple expressions with OR
#
rctp['filter_expression_2'] = "c = 4"
rctp['match_all'] = False
c = make_component(rctp, Filter, MyBaseCapabilities)
res = c.filter(None, None, False)
should_be = [
{ "a" : 1, "b" : 2, "c" : 1 },
{ "a" : 1, "b" : 2, "c" : 1 },
{ "a" : 1, "b" : 3, "c" : 4 },
]
test_evaluator("Test 19", compare_out_lists(res, 200, should_be))
return get_test_result()
|
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from jacket import context as nova_context
from jacket.compute import test
from jacket.tests.compute.unit import fake_instance
from jacket.tests.compute.unit.virt.ironic import utils as ironic_utils
from jacket.compute.virt.ironic import patcher
CONF = cfg.CONF
class IronicDriverFieldsTestCase(test.NoDBTestCase):
def setUp(self):
super(IronicDriverFieldsTestCase, self).setUp()
self.image_meta = ironic_utils.get_test_image_meta()
self.flavor = ironic_utils.get_test_flavor()
self.ctx = nova_context.get_admin_context()
self.instance = fake_instance.fake_instance_obj(self.ctx)
self.node = ironic_utils.get_test_node(driver='fake')
# Generic expected patches
self._expected_deploy_patch = [
{'path': '/instance_info/image_source',
'value': self.image_meta.id,
'op': 'add'},
{'path': '/instance_info/root_gb',
'value': str(self.instance['root_gb']),
'op': 'add'},
{'path': '/instance_info/swap_mb',
'value': str(self.flavor['swap']),
'op': 'add'},
{'path': '/instance_info/display_name',
'value': self.instance['display_name'],
'op': 'add'},
{'path': '/instance_info/vcpus',
'value': str(self.instance['vcpus']),
'op': 'add'},
{'path': '/instance_info/memory_mb',
'value': str(self.instance['memory_mb']),
'op': 'add'},
{'path': '/instance_info/local_gb',
'value': str(self.node.properties.get('local_gb', 0)),
'op': 'add'}
]
def test_create_generic(self):
node = ironic_utils.get_test_node(driver='pxe_fake')
patcher_obj = patcher.create(node)
self.assertIsInstance(patcher_obj, patcher.GenericDriverFields)
def test_generic_get_deploy_patch(self):
node = ironic_utils.get_test_node(driver='fake')
patch = patcher.create(node).get_deploy_patch(
self.instance, self.image_meta, self.flavor)
self.assertEqual(sorted(self._expected_deploy_patch), sorted(patch))
def test_generic_get_deploy_patch_capabilities(self):
node = ironic_utils.get_test_node(driver='fake')
self.flavor['extra_specs']['capabilities:boot_mode'] = 'bios'
expected = [{'path': '/instance_info/capabilities',
'value': '{"boot_mode": "bios"}',
'op': 'add'}]
expected += self._expected_deploy_patch
patch = patcher.create(node).get_deploy_patch(
self.instance, self.image_meta, self.flavor)
self.assertEqual(sorted(expected), sorted(patch))
def test_generic_get_deploy_patch_capabilities_op(self):
node = ironic_utils.get_test_node(driver='fake')
self.flavor['extra_specs']['capabilities:boot_mode'] = '<in> bios'
expected = [{'path': '/instance_info/capabilities',
'value': '{"boot_mode": "<in> bios"}',
'op': 'add'}]
expected += self._expected_deploy_patch
patch = patcher.create(node).get_deploy_patch(
self.instance, self.image_meta, self.flavor)
self.assertEqual(sorted(expected), sorted(patch))
def test_generic_get_deploy_patch_capabilities_nested_key(self):
node = ironic_utils.get_test_node(driver='fake')
self.flavor['extra_specs']['capabilities:key1:key2'] = '<in> bios'
expected = [{'path': '/instance_info/capabilities',
'value': '{"key1:key2": "<in> bios"}',
'op': 'add'}]
expected += self._expected_deploy_patch
patch = patcher.create(node).get_deploy_patch(
self.instance, self.image_meta, self.flavor)
self.assertEqual(sorted(expected), sorted(patch))
def test_generic_get_deploy_patch_ephemeral(self):
CONF.set_override('default_ephemeral_format', 'testfmt')
node = ironic_utils.get_test_node(driver='fake')
instance = fake_instance.fake_instance_obj(self.ctx,
ephemeral_gb=10)
patch = patcher.create(node).get_deploy_patch(
instance, self.image_meta, self.flavor)
expected = [{'path': '/instance_info/ephemeral_gb',
'value': str(instance.ephemeral_gb),
'op': 'add'},
{'path': '/instance_info/ephemeral_format',
'value': 'testfmt',
'op': 'add'}]
expected += self._expected_deploy_patch
self.assertEqual(sorted(expected), sorted(patch))
def test_generic_get_deploy_patch_preserve_ephemeral(self):
node = ironic_utils.get_test_node(driver='fake')
for preserve in [True, False]:
patch = patcher.create(node).get_deploy_patch(
self.instance, self.image_meta, self.flavor,
preserve_ephemeral=preserve)
expected = [{'path': '/instance_info/preserve_ephemeral',
'value': str(preserve), 'op': 'add', }]
expected += self._expected_deploy_patch
self.assertEqual(sorted(expected), sorted(patch))
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
# Copyright 2014 Oeyvind Brandtsegg and Axel Tidemann
#
# This file is part of [self.]
#
# [self.] is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3
# as published by the Free Software Foundation.
#
# [self.] is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with [self.]. If not, see <http://www.gnu.org/licenses/>.
'''Very simple communication module for [self.]
@author: Axel Tidemann
@contact: [email protected]
@license: GPL
Talk to [self.] over ØMQ sockets.
'''
import sys
import zmq
# Setup so it can be accessed from processes which don't have a zmq context, i.e. for one-shot messaging.
# Do not use this in contexts where timing is important, i.e. create a proper socket similar to this one.
def send(message, context=None, host='localhost', port=5566):
print 'This send() should only be used in simple circumstances, i.e. not in something that runs in performance-critical code!'
context = context or zmq.Context()
sender = context.socket(zmq.PUSH)
sender.connect('tcp://{}:{}'.format(host, port))
sender.send_json(message)
if __name__ == '__main__':
if len(sys.argv) > 1:
send(' '.join(sys.argv[1:]))
|
from horse.models import User, Movie
import pytest
@pytest.fixture
def service(app):
return app.ctx.recommendations.smart
def assert_recommendations(result, expected):
def readable(lst):
return [(item.pk, item.title) for item in lst]
result_movies = [movie for (movie, weight) in result]
assert readable(result_movies) == readable(expected)
def test_liked_movie_is_not_included(service, movies_repo):
user = User('root')
other = User('other')
movie_a = Movie('a')
movie_b = Movie('b')
movies_repo.store(movie_a)
movies_repo.store(movie_b)
user.add_to_liked_movies(movie_a)
other.add_to_liked_movies(movie_b)
result = service.recommend(user)
assert_recommendations(result, [movie_b])
def test_followed_users_movie_is_more_influential(service, movies_repo):
user = User('root')
other_user = User('other')
followed_user = User('followed')
movie_a = Movie('a')
movie_b = Movie('b')
movies_repo.store(movie_a)
movies_repo.store(movie_b)
user.add_to_followed_users(followed_user)
followed_user.add_to_liked_movies(movie_a)
other_user.add_to_liked_movies(movie_b)
result = service.recommend(user)
assert_recommendations(result, [movie_a, movie_b])
def test_nested_follows_are_more_influential(service, movies_repo):
movie_a = Movie('a')
movie_b = Movie('b')
movies_repo.store(movie_b)
movies_repo.store(movie_a)
user = User('root')
followed_user_1 = User('followed 1')
followed_user_2 = User('followed 2')
user.add_to_followed_users(followed_user_1)
user.add_to_followed_users(followed_user_2)
followed_user_2.add_to_followed_users(followed_user_1)
followed_user_1.add_to_liked_movies(movie_a)
followed_user_2.add_to_liked_movies(movie_b)
result = service.recommend(user)
assert_recommendations(result, [movie_a, movie_b])
def test_similar_users_are_more_influential(service, movies_repo):
movie_a = Movie('a')
movie_b = Movie('b')
movie_c = Movie('c')
movies_repo.store(movie_a)
movies_repo.store(movie_b)
movies_repo.store(movie_c)
user = User('root')
followed_user_1 = User('followed 1')
followed_user_2 = User('followed 2')
user.add_to_followed_users(followed_user_1)
user.add_to_followed_users(followed_user_2)
followed_user_1.add_to_liked_movies(movie_a)
followed_user_2.add_to_liked_movies(movie_b)
followed_user_2.add_to_liked_movies(movie_c)
# User shares a common movie with followed_user_2
user.add_to_liked_movies(movie_c)
result = service.recommend(user)
assert_recommendations(result, [movie_b, movie_a])
def test_globally_liked_movies_are_more_influential(service, movies_repo):
user = User('root')
movie_a = Movie('a')
movie_b = Movie('b')
movie_c = Movie('c')
movies_repo.store(movie_a)
movies_repo.store(movie_b)
movies_repo.store(movie_c)
user_a = User('a')
user_b = User('b')
user_a.add_to_liked_movies(movie_a)
user_a.add_to_liked_movies(movie_b)
user_b.add_to_liked_movies(movie_b)
result = service.recommend(user)
assert_recommendations(result, [movie_b, movie_a])
def test_movie_without_likes_is_not_recommended(service, movies_repo):
user = User('root')
movie_a = Movie('a')
movie_b = Movie('b')
movies_repo.store(movie_a)
movies_repo.store(movie_b)
user.add_to_liked_movies(movie_b)
result = service.recommend(user)
assert_recommendations(result, [])
|
from PyQt5.QtWidgets import QItemDelegate, QStyle, QStyleOptionViewItem, QComboBox, QWidget
from PyQt5.QtCore import Qt, QModelIndex, QAbstractItemModel, pyqtSlot
from urh.controller.SendRecvSettingsDialogController import SendRecvSettingsDialogController
from urh.dev.BackendHandler import BackendHandler
from urh import SimulatorSettings
class SimulatorSettingsComboBoxDelegate(QItemDelegate):
def __init__(self, controller, is_rx=True, parent=None):
super().__init__(parent)
self.controller = controller
self.generator_tab_controller = controller.generator_tab_controller
self.project_manager = controller.project_manager
self.compare_frame_controller = controller.compare_frame_controller
self.editor = None
self.is_rx = is_rx
self.backend_handler = BackendHandler()
@property
def is_tx(self):
return not self.is_rx
def createEditor(self, parent: QWidget, option: QStyleOptionViewItem, index: QModelIndex):
self.editor = QComboBox(parent)
self.load_combobox()
self.editor.activated.connect(self.combobox_activated)
return self.editor
def load_combobox(self):
self.editor.blockSignals(True)
self.editor.clear()
for profile in SimulatorSettings.profiles:
if self.is_rx and profile['supports_rx']:
self.editor.addItem(profile['name'], profile)
if self.is_tx and profile['supports_tx']:
self.editor.addItem(profile['name'], profile)
self.editor.addItem("...")
self.editor.blockSignals(False)
def setEditorData(self, editor: QWidget, index: QModelIndex):
self.editor.blockSignals(True)
item = index.model().data(index, Qt.EditRole)
self.editor.setCurrentIndex(self.find_index(item))
self.editor.blockSignals(False)
def setModelData(self, editor: QWidget, model: QAbstractItemModel, index: QModelIndex):
model.setData(index, editor.itemData(editor.currentIndex()), Qt.EditRole)
def dialog_finished(self):
self.load_combobox()
selected_profile = SimulatorSettings.profiles[self.sender().ui.comboBoxProfiles.currentIndex()]
self.editor.setCurrentIndex(self.find_index(selected_profile))
self.commitData.emit(self.editor)
def find_index(self, profile):
indx = self.editor.findData(profile)
return self.editor.count() - 1 if indx == -1 else indx
@pyqtSlot(int)
def combobox_activated(self, index: int):
if index == -1:
return
pm = self.project_manager
if index == self.editor.count() - 1:
signal = None
for proto in self.compare_frame_controller.protocol_list:
signal = proto.signal
if signal:
break
if signal:
bit_len = signal.bit_len
mod_type = signal.modulation_type
tolerance = signal.tolerance
noise = signal.noise_threshold
center = signal.qad_center
else:
bit_len = 100
mod_type = 1
tolerance = 5
noise = 0.001
center = 0.02
dialog = SendRecvSettingsDialogController(pm, noise, center, bit_len, tolerance, mod_type,
self.generator_tab_controller, parent=self.editor)
dialog.finished.connect(self.dialog_finished)
dialog.show()
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Issue.slug'
db.add_column('issue_issue', 'slug', self.gf('django.db.models.fields.SlugField')(max_length=80, null=True, db_index=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Issue.slug'
db.delete_column('issue_issue', 'slug')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'issue.issue': {
'Meta': {'object_name': 'Issue'},
'body': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
'hotness': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'offensiveness': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '80', 'null': 'True', 'db_index': 'True'}),
'source_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 9, 16, 14, 17, 28, 118475)'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['issue']
|
import numpy as np
from fitensemble import belt, ensemble_fitter
import experiment_loader
import sys
import ALA3
belt.ne.set_num_threads(1)
def run(ff, prior, regularization_strength, bootstrap_index_list):
pymc_filename = ALA3.data_directory + "/models/model_%s_%s_reg-%.1f-BB%d.h5" % (ff, prior, regularization_strength, bayesian_bootstrap_run)
populations_filename = ALA3.data_directory + "/frame_populations/pops_%s_%s_reg-%.1f-BB%d.dat" % (ff, prior, regularization_strength, bayesian_bootstrap_run)
predictions, measurements, uncertainties = experiment_loader.load(ff)
num_frames, num_measurements = predictions.shape
bootstrap_index_list = np.array_split(np.arange(num_frames), ALA3.num_blocks)
if bayesian_bootstrap_run == 0:
prior_pops = None
else:
prior_pops = ensemble_fitter.sample_prior_pops(num_frames, bootstrap_index_list)
if prior == "maxent":
model = belt.MaxEntBELT(predictions.values, measurements.values, uncertainties.values, regularization_strength, prior_pops=prior_pops)
elif prior == "dirichlet":
model = belt.DirichletBELT(predictions.values, measurements.values, uncertainties.values, regularization_strength, prior_pops=prior_pops)
elif prior == "MVN":
model = belt.MVNBELT(predictions.values, measurements.values, uncertainties.values, regularization_strength, prior_pops=prior_pops)
model.sample(ALA3.num_samples, thin=ALA3.thin, burn=ALA3.burn, filename=pymc_filename)
p = model.accumulate_populations()
np.savetxt(populations_filename, p)
if __name__ == "__main__":
ff = sys.argv[1]
prior = sys.argv[2]
regularization_strength = float(sys.argv[3])
bayesian_bootstrap_run = int(sys.argv[4])
run(ff, prior, regularization_strength, bayesian_bootstrap_run)
|
from cadnano.proxies.cnproxy import UndoCommand
from cadnano.cntypes import (
DocT,
StrandT
)
class AddModsCommand(UndoCommand):
def __init__(self, document: DocT, strand: StrandT, idx: int, mod_id: str):
super(AddModsCommand, self).__init__()
self._strand = strand
self._id_num = strand.idNum()
self._idx = idx
self._mod_id = mod_id
self.document = document
# end def
def redo(self):
strand = self._strand
mid = self._mod_id
part = strand.part()
idx = self._idx
part.addModStrandInstance(strand, idx, mid)
strand.strandModsAddedSignal.emit(strand, self.document, mid, idx)
# end def
def undo(self):
strand = self._strand
mid = self._mod_id
part = strand.part()
idx = self._idx
part.removeModStrandInstance(strand, idx, mid)
strand.strandModsRemovedSignal.emit(strand, self.document, mid, idx)
# end def
# end class
class RemoveModsCommand(UndoCommand):
def __init__(self, document, strand, idx, mod_id):
super(RemoveModsCommand, self).__init__()
self._strand = strand
self._id_num = strand.idNum()
self._idx = idx
self._mod_id = mod_id
self.document = document
# end def
def redo(self):
strand = self._strand
strand.isStaple()
mid = self._mod_id
part = strand.part()
idx = self._idx
part.removeModStrandInstance(strand, idx, mid)
strand.strandModsRemovedSignal.emit(strand, self.document, mid, idx)
# end def
def undo(self):
strand = self._strand
strand.isStaple()
mid = self._mod_id
part = strand.part()
idx = self._idx
part.addModStrandInstance(strand, idx, mid)
strand.strandModsAddedSignal.emit(strand, self.document, mid, idx)
# end def
# end class
|
import path
import logging
import subprocess
import pymel.core as pm
import os
import shutil
import sys
import xml.etree.cElementTree as ET
import xml.dom.minidom as minidom
#from test.badsyntax_future3 import result
#global shader dictionary
SHADER_DICT = {}
log = logging.getLogger("renderLogger")
def compileOSLShaders(renderer="Corona"):
oslShaderPath = path.path("H:/UserDatenHaggi/Documents/coding/OpenMaya/src/mayaTo{0}/mt{1}_devmodule/shaders".format(renderer, renderer[:2].lower()))
oslShadersToCompile = []
for p in oslShaderPath.listdir("*.osl"):
oslFile = path.path(p)
osoFile = path.path(p[:-3] + "oso")
if osoFile.exists():
if osoFile.mtime < oslFile.mtime:
osoFile.remove()
if osoFile.exists():
continue
oslShadersToCompile.append(oslFile)
for oslShader in oslShadersToCompile:
cmd = "{compiler} -v -o {output} {oslFile}".format(compiler="oslc.exe", output=oslShader.replace(".osl", ".oso"), oslFile=oslShader)
log.debug("Compiling osl shader: {0}".format(oslShader))
log.debug("Command: {0}".format(cmd))
IDLE_PRIORITY_CLASS = 64
process = subprocess.Popen(cmd, bufsize=1, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, creationflags=IDLE_PRIORITY_CLASS)
while 1:
line = process.stdout.readline()
if not line: break
log.debug(line)
pm.mel.trace(line.strip())
def getShaderInfo(shaderPath):
print "Getting shader info for path", shaderPath
osoFiles = getOSOFiles(shaderPath)
return osoFiles
def getOSODirs(renderer = "appleseed"):
try:
shaderDir = os.environ['{0}_OSL_SHADERS_LOCATION'.format(renderer.upper())]
except KeyError:
shaderDir = path.path(__file__).parent() + "/shaders"
print "Error: there is no environmentvariable called OSL_SHADERS_LOCATION. Please create one and point it to the base shader dir."
osoDirs = set()
for root, dirname, files in os.walk(shaderDir):
for filename in files:
if filename.endswith(".oso"):
osoDirs.add(root.replace("\\", "/"))
return list(osoDirs)
def getOSOFiles(renderer = "appleseed"):
try:
shaderDir = os.environ['{0}_OSL_SHADERS_LOCATION'.format(renderer.upper())]
except KeyError:
shaderDir = path.path(__file__).parent() + "/shaders"
print "Error: there is no environmentvariable called OSL_SHADERS_LOCATION. Please create one and point it to the base shader dir."
osoFiles = set()
for root, dirname, files in os.walk(shaderDir):
for filename in files:
if filename.endswith(".oso"):
osoFiles.add(os.path.join(root, filename).replace("\\", "/"))
return list(osoFiles)
def getOSLFiles(renderer = "appleseed"):
try:
shaderDir = os.environ['{0}_OSL_SHADERS_LOCATION'.format(renderer.upper())]
except KeyError:
shaderDir = path.path(__file__).parent() + "/shaders"
print "Error: there is no environmentvariable called OSL_SHADERS_LOCATION. Please create one and point it to the base shader dir."
osoFiles = set()
for root, dirname, files in os.walk(shaderDir):
for filename in files:
if filename.endswith(".osl"):
osoFiles.add(os.path.join(root, filename).replace("\\", "/"))
return list(osoFiles)
import pprint
def analyzeContent(content):
print "Analyze Content", content
d = {}
currentElement = None
for line in content:
if len(line) == 0:
continue
if line.startswith("shader"):
d['name'] = line.split(" ")[1].replace("\"", "")
d['mayaClassification'] = ""
d['mayaId'] = 0
d['help'] = ""
d['inputs'] = []
d['outputs'] = []
currentElement = d
else:
if line.startswith("Default value"):
currentElement['default'] = line.split(" ")[-1].replace("\"", "")
if currentElement.has_key("type"):
if currentElement["type"] in ["color", "vector"]:
vector = line.split("[")[-1].split("]")[0]
vector = vector.strip()
currentElement['default'] = map(float, vector.split(" "))
if line.startswith("Unknown default value"):
if currentElement.has_key("type"):
if currentElement["type"] in ["color", "vector"]:
currentElement['default'] = "[ 0.0, 0.0, 0.0 ]"
if line.startswith("metadata"):
if "options = " in line:
currentElement['options'] = line.split(" ")[-1].replace("\"", "").split("|")
if "hint = " in line:
currentElement['hint'] = line.split(" ")[-1].replace("\"", "")
if "min = " in line:
currentElement['min'] = line.split(" ")[-1]
if "max = " in line:
currentElement['max'] = line.split(" ")[-1]
if "help = " in line:
currentElement['help'] = " ".join(line.split("=")[1:]).replace("\"", "").strip()
if "mayaClassification = " in line:
#print "mayaClassification", " ".join(line.split("=")[1:]).replace("\"", "").strip()
currentElement['mayaClassification'] = " ".join(line.split("=")[1:]).replace("\"", "").strip()
if "mayaId = " in line:
#print "Found maya id", int(line.split("=")[-1])
currentElement['mayaId'] = int(line.split("=")[-1])
if line.startswith("\""): # found a parameter
currentElement = {}
currentElement['name'] = line.split(" ")[0].replace("\"", "")
currentElement['type'] = " ".join(line.split(" ")[1:]).replace("\"", "")
if "output" in line:
d['outputs'].append(currentElement)
currentElement = d['outputs'][-1]
else:
d['inputs'].append(currentElement)
currentElement = d['inputs'][-1]
return d
def readShadersXMLDescription():
if "MayaToCommon" in path.path(__file__):
xmlFile = path.path("H:/UserDatenHaggi/Documents/coding/mayaToAppleseed/mtap_devmodule/resources/shaderdefinitions.xml")
else:
xmlFile = path.path(__file__).parent / "resources/shaderdefinitions.xml"
if not xmlFile.exists():
log.error("No shader xml file: {0}".format(xmlFile))
return
tree = ET.parse(xmlFile)
shaders = tree.getroot()
log.debug("Reading shader info file: {0}".format(xmlFile))
#sys.stdout.write("Reading shader info file: {0}\n".format(xmlFile))
shaderDict = {}
for shader in shaders:
shDict = {}
shDict['name'] = shader.find('name').text
shDict['mayaClassification'] = ""
element = shader.find('mayaClassification')
if element is not None:
shDict['mayaClassification'] = element.text
shDict['mayaId'] = 0
element = shader.find('mayaId')
if element is not None:
shDict['mayaId'] = int(element.text)
shDict['help'] = ""
element = shader.find('help')
if element is not None:
shDict['help'] = element.text
shDict['inputs'] = []
shDict['outputs'] = []
for inp in shader.find('inputs'):
inpp = {}
inpp['name'] = inp.find('name').text
inpp['type'] = inp.find('type').text
inpp['help'] = ""
inpp['hint'] = ""
inpp['min'] = 0
inpp['max'] = 1
inpp['default'] = 0
findElement = inp.find('help')
if findElement is not None:
inpp['help'] = findElement.text
findElement = inp.find('hint')
if findElement is not None:
inpp['hint'] = inp.find('hint').text
findElement = inp.find('min')
if findElement is not None:
inpp['min'] = inp.find('min').text
findElement = inp.find('max')
if findElement is not None:
inpp['max'] = inp.find('max').text
findElement = inp.find('default')
if findElement is not None:
inpp['default'] = inp.find('default').text
findElement = inp.find('options')
if findElement is not None:
inpp['options'] = findElement.text
shDict['inputs'].append(inpp)
for inp in shader.find('outputs'):
inpp = {}
inpp['name'] = inp.find('name').text
inpp['type'] = inp.find('type').text
inpp['help'] = ""
shDict['outputs'].append(inpp)
shaderDict[shDict['name']] = shDict
global SHADER_DICT
SHADER_DICT = shaderDict
return shaderDict
def addSubElementList(listEntry, parentElement, subName = "input"):
for element in listEntry:
inElement = ET.SubElement(parentElement,subName)
for ikey, ivalue in element.iteritems():
subElement = ET.SubElement(inElement,ikey)
subElement.text = str(ivalue)
def writeXMLShaderDescription(shaderDict=None):
global SHADER_DICT
if shaderDict is None:
shaderDict = SHADER_DICT
xmlFile = None
if "MayaToCommon" in path.path(__file__):
xmlFile = path.path("H:/UserDatenHaggi/Documents/coding/mayaToAppleseed/mtap_devmodule/resources/shaderdefinitions.xml")
else:
xmlFile = path.path(__file__).parent / "resources/shaderdefinitions.xml"
if not xmlFile.exists():
log.error("No shader xml file: {0}".format(xmlFile))
return
root = ET.Element('shaders')
for shaderKey in shaderDict.keys():
shader = shaderDict[shaderKey]
sh = ET.SubElement(root,"shader")
for key, value in shader.iteritems():
if key == "inputs":
ins = ET.SubElement(sh,"inputs")
addSubElementList(value, ins, subName="input")
elif key == "outputs":
ins = ET.SubElement(sh,"outputs")
addSubElementList(value, ins, subName="output")
else:
subElement = ET.SubElement(sh,key)
subElement.text = str(value)
tree = ET.ElementTree(root)
tree.write(xmlFile)
log.debug("Writing shader info file: {0}".format(xmlFile))
# just make it nice to read
xml = minidom.parse(xmlFile)
pretty_xml_as_string = xml.toprettyxml()
root = ET.fromstring(pretty_xml_as_string)
tree = ET.ElementTree(root)
tree.write(xmlFile)
def updateOSLShaderInfo(force=False, osoFiles=[]):
pp = pprint.PrettyPrinter(indent=4)
IDLE_PRIORITY_CLASS = 64
cmd = "oslinfo -v"
infoDict = {}
# if we have updates we need to update the xml file as well.
# first read the xml file
readShadersXMLDescription()
global SHADER_DICT
for osoFile in osoFiles:
infoCmd = cmd + " " + osoFile
shaderName = path.path(osoFile).basename().replace(".oso", "")
log.info("Updating shader info for shader {1}. cmd: {0}".format(infoCmd, shaderName))
process = subprocess.Popen(infoCmd, bufsize=1, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, creationflags=IDLE_PRIORITY_CLASS)
content = []
while 1:
line = process.stdout.readline()
line = line.strip()
content.append(line)
if not line: break
infoDict[shaderName] = analyzeContent(content)
SHADER_DICT[shaderName] = infoDict[shaderName]
#pp.pprint(infoDict)
writeXMLShaderDescription()
return infoDict
def compileAllShaders(renderer = "appleseed"):
print "cas"
try:
shaderDir = os.environ['{0}_OSL_SHADERS_LOCATION'.format(renderer.upper())]
except KeyError:
log.error("Error: there is no environmentvariable called OSL_SHADERS_LOCATION. Please create one and point it to the base shader dir.")
# we expect this file in module/scripts so we can try to find the shaders in ../shaders
log.error("Trying to find the shaders dir from current file: {0}".format(__file__))
shaderDir = path.path(__file__).parent / "shaders"
if shaderDir.exists():
log.info("Using found shaders directory {0}".format(shaderDir))
include_dir = os.path.join(shaderDir, "src/include")
log.info("reading shaders from {0}".format(shaderDir))
oslc_cmd = "oslc"
failureDict = {}
osoInfoShaders = []
for root, dirname, files in os.walk(shaderDir):
for filename in files:
if filename.endswith(".osl"):
oslPath = os.path.join(root, filename)
dest_dir = root.replace("\\", "/").replace("shaders/src", "shaders") + "/"
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
osoOutputPath = dest_dir + filename.replace(".osl", ".oso")
osoOutputFile = path.path(osoOutputPath)
oslInputFile = path.path(oslPath)
if osoOutputFile.exists():
if osoOutputFile.mtime > oslInputFile.mtime:
log.debug("oso file {0} up to date, no compilation needed.".format(osoOutputFile.basename()))
continue
else:
osoOutputFile.remove()
log.debug("compiling shader: {0}".format(oslInputFile))
saved_wd = os.getcwd()
os.chdir(root)
compileCmd = oslc_cmd + " -v -I" + include_dir + ' -o '+ osoOutputPath + ' ' + oslInputFile
log.debug("compile command: {0}".format(compileCmd))
IDLE_PRIORITY_CLASS = 64
process = subprocess.Popen(compileCmd, bufsize=1, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, creationflags=IDLE_PRIORITY_CLASS)
progress = []
fail = False
while 1:
line = process.stdout.readline()
if not line: break
log.debug(line)
line.strip()
progress.append(line)
pm.mel.trace(line.strip())
if "error" in line:
fail = True
if fail:
print "set dict", osoOutputFile.basename(), "to", progress
failureDict[osoOutputFile.basename()] = progress
else:
osoInfoShaders.append(osoOutputPath)
os.chdir(saved_wd)
if len(failureDict.keys()) > 0:
log.info("\n\nShader compilation failed for:")
for key, content in failureDict.iteritems():
log.info("Shader {0}\n{1}\n\n".format(key, "\n".join(content)))
else:
log.info("Shader compilation done.")
if len(osoInfoShaders) > 0:
log.info("Updating shaderInfoFile.")
updateOSLShaderInfo(force=False, osoFiles=osoInfoShaders)
|
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
# File : paravis.py
# Module : PARAVIS
#
import os, new
import PARAVIS
import SALOME
import SALOME_Session_idl
import SALOMEDS
import SALOME_ModuleCatalog
from omniORB import CORBA
from time import sleep
from salome import *
myORB = None
myNamingService = None
myLifeCycleCORBA = None
myNamingService = None
myLocalStudyManager = None
myLocalStudy = None
myLocalParavis = None
myDelay = None
mySession = None
## Initialization of paravis server
def Initialize(theORB, theNamingService, theLifeCycleCORBA, theStudyManager, theStudy, theDelay) :
global myORB, myNamingService, myLifeCycleCORBA, myLocalStudyManager, myLocalStudy
global mySession, myDelay
myDelay = theDelay
myORB = theORB
myNamingService = theNamingService
myLifeCycleCORBA = theLifeCycleCORBA
myLocalStudyManager = theStudyManager
while mySession == None:
mySession = myNamingService.Resolve("/Kernel/Session")
mySession = mySession._narrow(SALOME.Session)
mySession.GetInterface()
myDelay = theDelay
sleep(myDelay)
myLocalParavis = myLifeCycleCORBA.FindOrLoadComponent("FactoryServer", "PARAVIS")
myLocalStudy = theStudy
myLocalParavis.SetCurrentStudy(myLocalStudy)
myLocalParavis.ActivateModule()
return myLocalParavis
def ImportFile(theFileName):
"Import a file of any format supported by ParaView"
myParavis.ImportFile(theFileName)
def createFunction(theName):
"Create function - constructor of Paravis object"
def MyFunction():
return myParavis.CreateClass(theName)
return MyFunction
def createConstructors():
"Create constructor functions according to list of extracted classes"
g = globals()
aClassNames = myParavis.GetClassesList();
for aName in aClassNames:
g[aName] = createFunction(aName)
## Initialize of a PARAVIS interface
myParavis = Initialize(orb, naming_service,lcc,myStudyManager,myStudy, 2)
## Initialize constructor functions
createConstructors()
## Initialize Paravis static objects
vtkSMObject = vtkSMObject()
vtkProcessModule = vtkProcessModule()
vtkPVPythonModule = vtkPVPythonModule()
vtkSMProxyManager = vtkSMProxyManager()
|
import nltk.grammar as gram
import pycryptics.grammar.nodes as nd
from pycryptics.utils.indicators import INDICATORS
"""
A Context Free Grammar (CFG) to describe allowed substructures of cryptic crossword clues and how to solve each substructure.
"""
# The basic wordplay transforms
top = gram.Nonterminal(nd.TopNode)
lit = gram.Nonterminal(nd.LitNode)
d = gram.Nonterminal(nd.DNode)
syn = gram.Nonterminal(nd.SynNode)
first = gram.Nonterminal(nd.FirstNode)
null = gram.Nonterminal(nd.NullNode)
# Clue functions
ana = gram.Nonterminal(nd.AnaNode)
sub = gram.Nonterminal(nd.SubNode)
sub_init = gram.Nonterminal(nd.SubInitNode)
sub_final = gram.Nonterminal(nd.SubFinalNode)
ins = gram.Nonterminal(nd.InsNode)
rev = gram.Nonterminal(nd.RevNode)
# ana_, rev_, etc. are anagram/reversal/etc indicators,
# so they produce no text in the wordplay output
ana_ = gram.Nonterminal(nd.AnaIndNode)
sub_ = gram.Nonterminal(nd.SubIndNode)
sub_init_ = gram.Nonterminal(nd.SubInitIndNode)
sub_final_ = gram.Nonterminal(nd.SubFinalIndNode)
ins_ = gram.Nonterminal(nd.InsIndNode)
rev_ = gram.Nonterminal(nd.RevIndNode)
ind_nodes = [nd.AnaIndNode, nd.SubIndNode, nd.SubFinalIndNode, nd.SubInitIndNode, nd.InsIndNode, nd.RevIndNode]
# All the *_arg elements just exist to make the production rules more clear
# so they just pass their inputs literally
clue_arg = gram.Nonterminal(nd.ClueArgNode)
ins_arg = gram.Nonterminal(nd.InsArgNode)
ana_arg = gram.Nonterminal(nd.AnaArgNode)
sub_arg = gram.Nonterminal(nd.SubArgNode)
rev_arg = gram.Nonterminal(nd.RevArgNode)
production_rules = {
ins: [[ins_arg, ins_, ins_arg], [ins_arg, ins_arg, ins_]],
ana: [[ana_arg, ana_], [ana_, ana_arg]],
sub: [[sub_arg, sub_], [sub_, sub_arg]],
sub_init: [[sub_arg, sub_init_], [sub_init_, sub_arg]],
sub_final: [[sub_arg, sub_final_], [sub_final_, sub_arg]],
rev: [[rev_arg, rev_], [rev_, rev_arg]],
clue_arg: [[lit], [syn], [first], [null], [ana], [sub], [ins], [rev], [sub_init], [sub_final]],
ins_arg: [[lit], [ana], [syn], [sub], [sub_init], [sub_final], [first], [rev]],
ana_arg: [[lit]],
sub_arg: [[lit], [syn], [rev]],
rev_arg: [[lit], [syn]],
top: [[clue_arg, d],
[clue_arg, clue_arg, d],
[clue_arg, clue_arg, clue_arg, d],
[d, clue_arg],
[d, clue_arg, clue_arg],
[d, clue_arg, clue_arg, clue_arg],
]
}
additional_clue_rules = [[sub_init_] + [first] * i for i in range(3, 8)] + [[first] * i + [sub_init_] for i in range(3, 8)]
for r in additional_clue_rules:
production_rules[top].append(r + [d])
production_rules[top].append([d] + r)
base_prods = []
for n, rules in production_rules.items():
for r in rules:
base_prods.append(gram.Production(n, r))
known_functions = {'in': [ins_, lit, null, sub_],
'a': [lit, syn, null],
'is': [null, lit],
'for': [null, syn],
'large': [first, syn],
'primarily': [sub_init_],
'and': [null, lit],
'of': [null],
'on': [ins_, null, lit, syn],
'with': [null, ins_]}
def generate_grammar(phrases):
prods = []
for p in phrases:
if p in known_functions:
tags = known_functions[p]
else:
found = False
tags = [lit, d, syn, first]
for ind in ind_nodes:
if any(w == p or (len(w) > 5 and abs(len(w) - len(p)) <= 3 and p.startswith(w[:-3])) for w in INDICATORS[ind.name]):
tags.append(gram.Nonterminal(ind))
found = True
if not found:
tags = [lit, d, syn, first, ana_, sub_, sub_init_, sub_final_, rev_]
for t in tags:
prods.append(gram.Production(t, [p]))
return gram.ContextFreeGrammar(top, base_prods + prods)
|
import re
from django import forms
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from django.forms import formsets, ValidationError
from django.views.generic import TemplateView
from django.utils.datastructures import SortedDict
from django.utils.decorators import classonlymethod
from django.contrib.formtools.wizard.storage import get_storage
from django.contrib.formtools.wizard.storage.exceptions import NoFileStorageConfigured
from django.contrib.formtools.wizard.forms import ManagementForm
def normalize_name(name):
new = re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', name)
return new.lower().strip('_')
class StepsHelper(object):
def __init__(self, wizard):
self._wizard = wizard
def __dir__(self):
return self.all
def __len__(self):
return self.count
def __repr__(self):
return '<StepsHelper for %s (steps: %s)>' % (self._wizard, self.all)
@property
def all(self):
"Returns the names of all steps/forms."
return self._wizard.get_form_list().keys()
@property
def count(self):
"Returns the total number of steps/forms in this the wizard."
return len(self.all)
@property
def current(self):
"""
Returns the current step. If no current step is stored in the
storage backend, the first step will be returned.
"""
return self._wizard.storage.current_step or self.first
@property
def first(self):
"Returns the name of the first step."
return self.all[0]
@property
def last(self):
"Returns the name of the last step."
return self.all[-1]
@property
def next(self):
"Returns the next step."
return self._wizard.get_next_step()
@property
def prev(self):
"Returns the previous step."
return self._wizard.get_prev_step()
@property
def index(self):
"Returns the index for the current step."
return self._wizard.get_step_index()
@property
def step0(self):
return int(self.index)
@property
def step1(self):
return int(self.index) + 1
class WizardView(TemplateView):
"""
The WizardView is used to create multi-page forms and handles all the
storage and validation stuff. The wizard is based on Django's generic
class based views.
"""
storage_name = None
form_list = None
initial_dict = None
instance_dict = None
condition_dict = None
template_name = 'formtools/wizard/wizard_form.html'
def __repr__(self):
return '<%s: forms: %s>' % (self.__class__.__name__, self.form_list)
@classonlymethod
def as_view(cls, *args, **kwargs):
"""
This method is used within urls.py to create unique formwizard
instances for every request. We need to override this method because
we add some kwargs which are needed to make the formwizard usable.
"""
initkwargs = cls.get_initkwargs(*args, **kwargs)
return super(WizardView, cls).as_view(**initkwargs)
@classmethod
def get_initkwargs(cls, form_list, initial_dict=None,
instance_dict=None, condition_dict=None, *args, **kwargs):
"""
Creates a dict with all needed parameters for the form wizard instances.
* `form_list` - is a list of forms. The list entries can be single form
classes or tuples of (`step_name`, `form_class`). If you pass a list
of forms, the formwizard will convert the class list to
(`zero_based_counter`, `form_class`). This is needed to access the
form for a specific step.
* `initial_dict` - contains a dictionary of initial data dictionaries.
The key should be equal to the `step_name` in the `form_list` (or
the str of the zero based counter - if no step_names added in the
`form_list`)
* `instance_dict` - contains a dictionary of instance objects. This list
is only used when `ModelForm`s are used. The key should be equal to
the `step_name` in the `form_list`. Same rules as for `initial_dict`
apply.
* `condition_dict` - contains a dictionary of boolean values or
callables. If the value of for a specific `step_name` is callable it
will be called with the formwizard instance as the only argument.
If the return value is true, the step's form will be used.
"""
kwargs.update({
'initial_dict': initial_dict or {},
'instance_dict': instance_dict or {},
'condition_dict': condition_dict or {},
})
init_form_list = SortedDict()
assert len(form_list) > 0, 'at least one form is needed'
# walk through the passed form list
for i, form in enumerate(form_list):
if isinstance(form, (list, tuple)):
# if the element is a tuple, add the tuple to the new created
# sorted dictionary.
init_form_list[unicode(form[0])] = form[1]
else:
# if not, add the form with a zero based counter as unicode
init_form_list[unicode(i)] = form
# walk through the new created list of forms
for form in init_form_list.itervalues():
if issubclass(form, formsets.BaseFormSet):
# if the element is based on BaseFormSet (FormSet/ModelFormSet)
# we need to override the form variable.
form = form.form
# check if any form contains a FileField, if yes, we need a
# file_storage added to the formwizard (by subclassing).
for field in form.base_fields.itervalues():
if (isinstance(field, forms.FileField) and
not hasattr(cls, 'file_storage')):
raise NoFileStorageConfigured
# build the kwargs for the formwizard instances
kwargs['form_list'] = init_form_list
return kwargs
def get_wizard_name(self):
return normalize_name(self.__class__.__name__)
def get_prefix(self):
# TODO: Add some kind of unique id to prefix
return self.wizard_name
def get_form_list(self):
"""
This method returns a form_list based on the initial form list but
checks if there is a condition method/value in the condition_list.
If an entry exists in the condition list, it will call/read the value
and respect the result. (True means add the form, False means ignore
the form)
The form_list is always generated on the fly because condition methods
could use data from other (maybe previous forms).
"""
form_list = SortedDict()
for form_key, form_class in self.form_list.iteritems():
# try to fetch the value from condition list, by default, the form
# gets passed to the new list.
condition = self.condition_dict.get(form_key, True)
if callable(condition):
# call the value if needed, passes the current instance.
condition = condition(self)
if condition:
form_list[form_key] = form_class
return form_list
def dispatch(self, request, *args, **kwargs):
"""
This method gets called by the routing engine. The first argument is
`request` which contains a `HttpRequest` instance.
The request is stored in `self.request` for later use. The storage
instance is stored in `self.storage`.
After processing the request using the `dispatch` method, the
response gets updated by the storage engine (for example add cookies).
"""
# add the storage engine to the current formwizard instance
self.wizard_name = self.get_wizard_name()
self.prefix = self.get_prefix()
self.storage = get_storage(self.storage_name, self.prefix, request,
getattr(self, 'file_storage', None))
self.steps = StepsHelper(self)
response = super(WizardView, self).dispatch(request, *args, **kwargs)
# update the response (e.g. adding cookies)
self.storage.update_response(response)
return response
def get(self, request, *args, **kwargs):
"""
This method handles GET requests.
If a GET request reaches this point, the wizard assumes that the user
just starts at the first step or wants to restart the process.
The data of the wizard will be resetted before rendering the first step.
"""
self.storage.reset()
# reset the current step to the first step.
self.storage.current_step = self.steps.first
return self.render(self.get_form())
def post(self, *args, **kwargs):
"""
This method handles POST requests.
The wizard will render either the current step (if form validation
wasn't successful), the next step (if the current step was stored
successful) or the done view (if no more steps are available)
"""
# Look for a wizard_prev_step element in the posted data which
# contains a valid step name. If one was found, render the requested
# form. (This makes stepping back a lot easier).
wizard_prev_step = self.request.POST.get('wizard_prev_step', None)
if wizard_prev_step and wizard_prev_step in self.get_form_list():
self.storage.current_step = wizard_prev_step
form = self.get_form(
data=self.storage.get_step_data(self.steps.current),
files=self.storage.get_step_files(self.steps.current))
return self.render(form)
# Check if form was refreshed
management_form = ManagementForm(self.request.POST, prefix=self.prefix)
if not management_form.is_valid():
raise ValidationError(
'ManagementForm data is missing or has been tampered.')
form_current_step = management_form.cleaned_data['current_step']
if (form_current_step != self.steps.current and
self.storage.current_step is not None):
# form refreshed, change current step
self.storage.current_step = form_current_step
# get the form for the current step
form = self.get_form(data=self.request.POST, files=self.request.FILES)
# and try to validate
if form.is_valid():
# if the form is valid, store the cleaned data and files.
self.storage.set_step_data(self.steps.current, self.process_step(form))
self.storage.set_step_files(self.steps.current, self.process_step_files(form))
# check if the current step is the last step
if self.steps.current == self.steps.last:
# no more steps, render done view
return self.render_done(form, **kwargs)
else:
# proceed to the next step
return self.render_next_step(form)
return self.render(form)
def render_next_step(self, form, **kwargs):
"""
THis method gets called when the next step/form should be rendered.
`form` contains the last/current form.
"""
# get the form instance based on the data from the storage backend
# (if available).
next_step = self.steps.next
new_form = self.get_form(next_step,
data=self.storage.get_step_data(next_step),
files=self.storage.get_step_files(next_step))
# change the stored current step
self.storage.current_step = next_step
return self.render(new_form, **kwargs)
def render_done(self, form, **kwargs):
"""
This method gets called when all forms passed. The method should also
re-validate all steps to prevent manipulation. If any form don't
validate, `render_revalidation_failure` should get called.
If everything is fine call `done`.
"""
final_form_list = []
# walk through the form list and try to validate the data again.
for form_key in self.get_form_list():
form_obj = self.get_form(step=form_key,
data=self.storage.get_step_data(form_key),
files=self.storage.get_step_files(form_key))
if not form_obj.is_valid():
return self.render_revalidation_failure(form_key, form_obj, **kwargs)
final_form_list.append(form_obj)
# render the done view and reset the wizard before returning the
# response. This is needed to prevent from rendering done with the
# same data twice.
done_response = self.done(final_form_list, **kwargs)
self.storage.reset()
return done_response
def get_form_prefix(self, step=None, form=None):
"""
Returns the prefix which will be used when calling the actual form for
the given step. `step` contains the step-name, `form` the form which
will be called with the returned prefix.
If no step is given, the form_prefix will determine the current step
automatically.
"""
if step is None:
step = self.steps.current
return str(step)
def get_form_initial(self, step):
"""
Returns a dictionary which will be passed to the form for `step`
as `initial`. If no initial data was provied while initializing the
form wizard, a empty dictionary will be returned.
"""
return self.initial_dict.get(step, {})
def get_form_instance(self, step):
"""
Returns a object which will be passed to the form for `step`
as `instance`. If no instance object was provied while initializing
the form wizard, None be returned.
"""
return self.instance_dict.get(step, None)
def get_form_kwargs(self, step=None):
"""
Returns the keyword arguments for instantiating the form
(or formset) on given step.
"""
return {}
def get_form(self, step=None, data=None, files=None):
"""
Constructs the form for a given `step`. If no `step` is defined, the
current step will be determined automatically.
The form will be initialized using the `data` argument to prefill the
new form. If needed, instance or queryset (for `ModelForm` or
`ModelFormSet`) will be added too.
"""
if step is None:
step = self.steps.current
# prepare the kwargs for the form instance.
kwargs = self.get_form_kwargs(step)
kwargs.update({
'data': data,
'files': files,
'prefix': self.get_form_prefix(step, self.form_list[step]),
'initial': self.get_form_initial(step),
})
if issubclass(self.form_list[step], forms.ModelForm):
# If the form is based on ModelForm, add instance if available.
kwargs.update({'instance': self.get_form_instance(step)})
elif issubclass(self.form_list[step], forms.models.BaseModelFormSet):
# If the form is based on ModelFormSet, add queryset if available.
kwargs.update({'queryset': self.get_form_instance(step)})
return self.form_list[step](**kwargs)
def process_step(self, form):
"""
This method is used to postprocess the form data. By default, it
returns the raw `form.data` dictionary.
"""
return self.get_form_step_data(form)
def process_step_files(self, form):
"""
This method is used to postprocess the form files. By default, it
returns the raw `form.files` dictionary.
"""
return self.get_form_step_files(form)
def render_revalidation_failure(self, step, form, **kwargs):
"""
Gets called when a form doesn't validate when rendering the done
view. By default, it changed the current step to failing forms step
and renders the form.
"""
self.storage.current_step = step
return self.render(form, **kwargs)
def get_form_step_data(self, form):
"""
Is used to return the raw form data. You may use this method to
manipulate the data.
"""
return form.data
def get_form_step_files(self, form):
"""
Is used to return the raw form files. You may use this method to
manipulate the data.
"""
return form.files
def get_all_cleaned_data(self):
"""
Returns a merged dictionary of all step cleaned_data dictionaries.
If a step contains a `FormSet`, the key will be prefixed with formset
and contain a list of the formset' cleaned_data dictionaries.
"""
cleaned_data = {}
for form_key in self.get_form_list():
form_obj = self.get_form(
step=form_key,
data=self.storage.get_step_data(form_key),
files=self.storage.get_step_files(form_key)
)
if form_obj.is_valid():
if isinstance(form_obj.cleaned_data, (tuple, list)):
cleaned_data.update({
'formset-%s' % form_key: form_obj.cleaned_data
})
else:
cleaned_data.update(form_obj.cleaned_data)
return cleaned_data
def get_cleaned_data_for_step(self, step):
"""
Returns the cleaned data for a given `step`. Before returning the
cleaned data, the stored values are being revalidated through the
form. If the data doesn't validate, None will be returned.
"""
if step in self.form_list:
form_obj = self.get_form(step=step,
data=self.storage.get_step_data(step),
files=self.storage.get_step_files(step))
if form_obj.is_valid():
return form_obj.cleaned_data
return None
def get_next_step(self, step=None):
"""
Returns the next step after the given `step`. If no more steps are
available, None will be returned. If the `step` argument is None, the
current step will be determined automatically.
"""
if step is None:
step = self.steps.current
form_list = self.get_form_list()
key = form_list.keyOrder.index(step) + 1
if len(form_list.keyOrder) > key:
return form_list.keyOrder[key]
return None
def get_prev_step(self, step=None):
"""
Returns the previous step before the given `step`. If there are no
steps available, None will be returned. If the `step` argument is
None, the current step will be determined automatically.
"""
if step is None:
step = self.steps.current
form_list = self.get_form_list()
key = form_list.keyOrder.index(step) - 1
if key >= 0:
return form_list.keyOrder[key]
return None
def get_step_index(self, step=None):
"""
Returns the index for the given `step` name. If no step is given,
the current step will be used to get the index.
"""
if step is None:
step = self.steps.current
return self.get_form_list().keyOrder.index(step)
def get_context_data(self, form, *args, **kwargs):
"""
Returns the template context for a step. You can overwrite this method
to add more data for all or some steps. This method returns a
dictionary containing the rendered form step. Available template
context variables are:
* all extra data stored in the storage backend
* `form` - form instance of the current step
* `wizard` - the wizard instance itself
Example:
.. code-block:: python
class MyWizard(FormWizard):
def get_context_data(self, form, **kwargs):
context = super(MyWizard, self).get_context_data(form, **kwargs)
if self.steps.current == 'my_step_name':
context.update({'another_var': True})
return context
"""
context = super(WizardView, self).get_context_data(*args, **kwargs)
context.update(self.storage.extra_data)
context['wizard'] = {
'form': form,
'steps': self.steps,
'management_form': ManagementForm(prefix=self.prefix, initial={
'current_step': self.steps.current,
}),
}
return context
def render(self, form=None, **kwargs):
"""
Returns a ``HttpResponse`` containing a all needed context data.
"""
form = form or self.get_form()
context = self.get_context_data(form, **kwargs)
return self.render_to_response(context)
def done(self, form_list, **kwargs):
"""
This method muss be overrided by a subclass to process to form data
after processing all steps.
"""
raise NotImplementedError("Your %s class has not defined a done() "
"method, which is required." % self.__class__.__name__)
class SessionWizardView(WizardView):
"""
A WizardView with pre-configured SessionStorage backend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class CookieWizardView(WizardView):
"""
A WizardView with pre-configured CookieStorage backend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
class NamedUrlWizardView(WizardView):
"""
A WizardView with URL named steps support.
"""
url_name = None
done_step_name = None
@classmethod
def get_initkwargs(cls, *args, **kwargs):
"""
We require a url_name to reverse URLs later. Additionally users can
pass a done_step_name to change the URL name of the "done" view.
"""
assert 'url_name' in kwargs, 'URL name is needed to resolve correct wizard URLs'
extra_kwargs = {
'done_step_name': kwargs.pop('done_step_name', 'done'),
'url_name': kwargs.pop('url_name'),
}
initkwargs = super(NamedUrlWizardView, cls).get_initkwargs(*args, **kwargs)
initkwargs.update(extra_kwargs)
assert initkwargs['done_step_name'] not in initkwargs['form_list'], \
'step name "%s" is reserved for "done" view' % initkwargs['done_step_name']
return initkwargs
def get(self, *args, **kwargs):
"""
This renders the form or, if needed, does the http redirects.
"""
step_url = kwargs.get('step', None)
if step_url is None:
if 'reset' in self.request.GET:
self.storage.reset()
self.storage.current_step = self.steps.first
if self.request.GET:
query_string = "?%s" % self.request.GET.urlencode()
else:
query_string = ""
next_step_url = reverse(self.url_name, kwargs={
'step': self.steps.current,
}) + query_string
return redirect(next_step_url)
# is the current step the "done" name/view?
elif step_url == self.done_step_name:
last_step = self.steps.last
return self.render_done(self.get_form(step=last_step,
data=self.storage.get_step_data(last_step),
files=self.storage.get_step_files(last_step)
), **kwargs)
# is the url step name not equal to the step in the storage?
# if yes, change the step in the storage (if name exists)
elif step_url == self.steps.current:
# URL step name and storage step name are equal, render!
return self.render(self.get_form(
data=self.storage.current_step_data,
files=self.storage.current_step_data,
), **kwargs)
elif step_url in self.get_form_list():
self.storage.current_step = step_url
return self.render(self.get_form(
data=self.storage.current_step_data,
files=self.storage.current_step_data,
), **kwargs)
# invalid step name, reset to first and redirect.
else:
self.storage.current_step = self.steps.first
return redirect(self.url_name, step=self.steps.first)
def post(self, *args, **kwargs):
"""
Do a redirect if user presses the prev. step button. The rest of this
is super'd from FormWizard.
"""
prev_step = self.request.POST.get('wizard_prev_step', None)
if prev_step and prev_step in self.get_form_list():
self.storage.current_step = prev_step
return redirect(self.url_name, step=prev_step)
return super(NamedUrlWizardView, self).post(*args, **kwargs)
def render_next_step(self, form, **kwargs):
"""
When using the NamedUrlFormWizard, we have to redirect to update the
browser's URL to match the shown step.
"""
next_step = self.get_next_step()
self.storage.current_step = next_step
return redirect(self.url_name, step=next_step)
def render_revalidation_failure(self, failed_step, form, **kwargs):
"""
When a step fails, we have to redirect the user to the first failing
step.
"""
self.storage.current_step = failed_step
return redirect(self.url_name, step=failed_step)
def render_done(self, form, **kwargs):
"""
When rendering the done view, we have to redirect first (if the URL
name doesn't fit).
"""
if kwargs.get('step', None) != self.done_step_name:
return redirect(self.url_name, step=self.done_step_name)
return super(NamedUrlWizardView, self).render_done(form, **kwargs)
class NamedUrlSessionWizardView(NamedUrlWizardView):
"""
A NamedUrlWizardView with pre-configured SessionStorage backend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class NamedUrlCookieWizardView(NamedUrlWizardView):
"""
A NamedUrlFormWizard with pre-configured CookieStorageBackend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
|
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from testrunner.local import testsuite
from testrunner.objects import testcase
proposal_flags = [{
'name': 'reference-types',
'flags': ['--experimental-wasm-anyref',
'--no-experimental-wasm-bulk-memory']
},
{
'name': 'bulk-memory-operations',
'flags': ['--experimental-wasm-bulk-memory']
},
{
'name': 'js-types',
'flags': ['--experimental-wasm-type-reflection',
'--no-experimental-wasm-bulk-memory']
},
{
'name': 'JS-BigInt-integration',
'flags': ['--experimental-wasm-bigint']
},
]
class TestLoader(testsuite.JSTestLoader):
pass
class TestSuite(testsuite.TestSuite):
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
self.test_root = os.path.join(self.root, "tests")
self._test_loader.test_root = self.test_root
def _test_loader_class(self):
return TestLoader
def _test_class(self):
return TestCase
class TestCase(testcase.D8TestCase):
def _get_files_params(self):
return [os.path.join(self.suite.test_root, self.path + self._get_suffix())]
def _get_source_flags(self):
for proposal in proposal_flags:
if os.sep.join(['proposals', proposal['name']]) in self.path:
return proposal['flags']
return []
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
|
import os
from collections import namedtuple
from random import randint
RAXML_PATH = 'raxmlHPC-PTHREADS-AVX'
MODELS = ['GTRGAMMA', 'GTRGAMMAI', 'GTRCAT', 'GTRCATI']
RAXML_INFO_PREFIX = 'RAxML_info'
RAXML_LOG_PREFIX = 'RAxML_log'
RAXML_RESULT_PREFIX = 'RAxML_result'
RAXML_BESTTREE_PREFIX = 'RAxML_bestTree'
RAXML_PARSTREE_PREFIX = 'RAxML_parsimonyTree'
RAXML_BSTREE_PREFIX = 'RAxML_bootstrap'
RAXML_BP_PREFIX = 'RAxML.bipartitions'
RAXML_BPBRANCH_PREFIX = 'RAxML_bipartitionsBranchLabels'
Parameter = namedtuple('Parameter', 'argument value')
def add_file_argument(argument, file_path, error_message='File {0} does not exist.', as_str=True):
if os.path.isfile(file_path):
parameter = Parameter(argument=argument, value=file_path)
else:
raise FileNotFoundError(error_message.format(file_path))
return '{argument} {value}'.format_map(parameter._asdict()) if as_str else parameter
def add_dir_argument(argument, dir_path, error_message='Directory {0} does not exist.', as_str=True,
make_absolute=True):
if os.path.isdir(dir_path):
dir_path = os.path.abspath(dir_path) if make_absolute else dir_path
parameter = Parameter(argument=argument, value=dir_path)
else:
raise NotADirectoryError(error_message.format(dir_path))
return '{argument} {value}'.format_map(parameter._asdict()) if as_str else parameter
def std_options_statement(cls, partition_file_path=None, output_path=None,
model='GTRGAMMA', suffix: str='tree', threads: int=1):
cmd_lst = list()
cmd_lst.append('-n {suffix}'.format(suffix=suffix))
if partition_file_path:
cmd_lst.append(add_file_argument('-q', partition_file_path,
error_message='Partition file {0} does not exist.'))
if output_path:
cmd_lst.append(add_dir_argument('-w', output_path))
if model in cls.MODELS:
cmd_lst.append('-m {model}'.format(model=model))
else:
raise ValueError('"{0}" is not a valid model.'.format(model))
if 'PTHREADS' in cls.RAXML_PATH:
cmd_lst.append('-T {threads}'.format(threads=os.cpu_count() if threads == -1 else threads))
else:
if threads:
raise Warning('RAxML configured for this task is not multithreaded.')
return cmd_lst
def make_parsimony_tree(cls, alignment_path, partition_file_path=None, output_path=None,
model='GTRGAMMA', suffix: str='pars_tree', seed: int=None, threads: int=1):
"""
Run RAxML to make a parsimony tree.
@param alignment_path: Path to alignment file in PHYLIP or FASTA formats
@param partition_file_path: Path to partition file in RAxML format
@param output_path: Directory where RAxML will write files
@param model: Model of Nucleotide substitution
Currently implemented are
- GTRGAMMA/I
- GTRCAT/I
@param suffix: File name suffix for all generated files
@param seed: Random seed for parsimony inference
@param threads: Number of threads for RAxML PTHREADS
@return: Dictionary of output files and corresponding filenames
"""
cmd_lst = cls.std_options_statement(partition_file_path=partition_file_path, output_path=output_path,
model=model, suffix=suffix, threads=threads)
cmd_lst.insert(0, '{raxml} -y'.format(raxml=cls.RAXML_PATH))
cmd_lst.append('-p {seed}'.format(seed=seed if seed else randint(0, 999999)))
cmd_lst.append(add_file_argument('-s', alignment_path, error_message='Alignment file {0} does not exist.'))
# print(' '.join(cmd_lst))
os.system(' '.join(cmd_lst))
outfiles = {'info': '{info}.{suffix}'.format(info=cls.RAXML_INFO_PREFIX, suffix=suffix),
'tree': '{prefix}.{suffix}'.format(prefix=cls.RAXML_PARSTREE_PREFIX, suffix=suffix)}
abs_output_path = os.path.abspath(output_path) if output_path else os.path.abspath('.')
return {k: os.path.join(abs_output_path, filename) for k, filename in outfiles.items()
if os.path.isfile(os.path.join(abs_output_path, filename))}
def integrate_bootstrap_trees(cls, bootstrap_file_path, given_tree_file_path, partition_file_path=None,
output_path=None, model='GTRGAMMA', suffix: str='bp_tree', threads: int=1):
"""
Draw bipartitions on a given tree based a set of multple trees (bootstrap)
@param bootstrap_file_path: Path to multiple trees file in Newick format.
@param given_tree_file_path: Path to base tree in Newick format.
@param partition_file_path: Path to partition file in RAxML format.
If None, RAxML will perform an unpartitioned analysis.
@param output_path: Directory where RAxML will write files
@param model: Model of Nucleotide substitution
Currently implemented are
- GTRGAMMA/I
- GTRCAT/I
@param suffix: File name suffix for all generated files
@param threads: Number of threads for RAxML PTHREADS
@return: Dictionary of output files and corresponding filenames
"""
cmd_lst = cls.std_options_statement(partition_file_path=partition_file_path, output_path=output_path,
model=model, suffix=suffix, threads=threads)
cmd_lst.insert(0, '{raxml} -f b'.format(raxml=cls.RAXML_PATH))
cmd_lst.append(add_file_argument('-z', bootstrap_file_path, error_message='Bootstrap file {0} does not exist.'))
cmd_lst.append(add_file_argument('-t', given_tree_file_path,
error_message='Best tree file {0} does not exist.'))
# print(' '.join(cmd_lst))
os.system(' '.join(cmd_lst))
outfiles = {'info': '{info}.{suffix}'.format(info=cls.RAXML_INFO_PREFIX, suffix=suffix),
'bptree': '{prefix}.{suffix}'.format(prefix=cls.RAXML_BSTREE_PREFIX, suffix=suffix),
'bpbranchtree': '{prefix}.{suffix}'.format(prefix=cls.RAXML_BPBRANCH_PREFIX, suffix=suffix)}
abs_output_path = os.path.abspath(output_path) if output_path else os.path.abspath('.')
return {k: os.path.join(abs_output_path, filename) for k, filename in outfiles.items()
if os.path.isfile(os.path.join(abs_output_path, filename))}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_device_group
short_description: Manage device groups on a BIG-IP
description:
- Managing device groups allows you to create HA pairs and clusters
of BIG-IP devices. Usage of this module should be done in conjunction
with the C(bigip_configsync_actions) to sync the configuration across
the pair or cluster if auto-sync is disabled.
version_added: "1.0.0"
options:
name:
description:
- Specifies the name of the device group.
type: str
required: True
type:
description:
- Specifies the type of group.
- A C(sync-failover) device group contains devices that synchronize their
configuration data and fail over to one another when a device becomes
unavailable.
- A C(sync-only) device group has no such failover. When creating a new
device group, this option will default to C(sync-only).
- This setting cannot be changed once it has been set.
type: str
choices:
- sync-failover
- sync-only
description:
description:
- Description of the device group.
type: str
auto_sync:
description:
- Indicates whether configuration synchronization occurs manually or
automatically.
- When creating a new device group, this option will default to C(no).
type: bool
default: no
save_on_auto_sync:
description:
- When performing an auto-sync, specifies whether the configuration
will be saved or not.
- When C(no), only the running configuration will be changed on the
device(s) being synced to.
- When creating a new device group, this option will default to C(no).
type: bool
full_sync:
description:
- Specifies whether the system synchronizes the entire configuration
during synchronization operations.
- When C(no), the system performs incremental synchronization operations,
based on the cache size specified in C(max_incremental_sync_size).
- Incremental configuration synchronization is a mechanism for synchronizing
a device-group's configuration among its members, without requiring a
full configuration load for each configuration change.
- In order for this to work, all devices in the device-group must initially
agree on the configuration. Typically this requires at least one full
configuration load to each device.
- When creating a new device group, this option will default to C(no).
type: bool
max_incremental_sync_size:
description:
- Specifies the size of the changes cache for incremental sync.
- For example, using the default, if you make more than 1024 KB worth of
incremental changes, the system performs a full synchronization operation.
- Using incremental synchronization operations can reduce the per-device sync/load
time for configuration changes.
- This setting is relevant only when C(full_sync) is C(no).
type: int
state:
description:
- When C(state) is C(present), ensures the device group exists.
- When C(state) is C(absent), ensures the device group is removed.
type: str
choices:
- present
- absent
default: present
network_failover:
description:
- Indicates whether failover occurs over the network or is hard-wired.
- This parameter is only valid for C(type)s that are C(sync-failover).
type: bool
notes:
- This module is primarily used as a component of configuring HA pairs of
BIG-IP devices.
- Requires BIG-IP >= 12.1.x.
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a sync-only device group
bigip_device_group:
name: foo-group
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create a sync-only device group with auto-sync enabled
bigip_device_group:
name: foo-group
auto_sync: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
save_on_auto_sync:
description: The new save_on_auto_sync value of the device group.
returned: changed
type: bool
sample: true
full_sync:
description: The new full_sync value of the device group.
returned: changed
type: bool
sample: false
description:
description: The new description of the device group.
returned: changed
type: str
sample: this is a device group
type:
description: The new type of the device group.
returned: changed
type: str
sample: sync-failover
auto_sync:
description: The new auto_sync value of the device group.
returned: changed
type: bool
sample: true
max_incremental_sync_size:
description: The new sync size of the device group.
returned: changed
type: int
sample: 1000
network_failover:
description: Whether or not network failover is enabled.
returned: changed
type: bool
sample: yes
'''
from datetime import datetime
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, f5_argument_spec
)
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'saveOnAutoSync': 'save_on_auto_sync',
'fullLoadOnSync': 'full_sync',
'autoSync': 'auto_sync',
'incrementalConfigSyncSizeMax': 'max_incremental_sync_size',
'networkFailover': 'network_failover',
}
api_attributes = [
'saveOnAutoSync',
'fullLoadOnSync',
'description',
'type',
'autoSync',
'incrementalConfigSyncSizeMax',
'networkFailover',
]
returnables = [
'save_on_auto_sync',
'full_sync',
'description',
'type',
'auto_sync',
'max_incremental_sync_size',
'network_failover',
]
updatables = [
'save_on_auto_sync',
'full_sync',
'description',
'auto_sync',
'max_incremental_sync_size',
'network_failover',
]
@property
def max_incremental_sync_size(self):
if not self.full_sync and self._values['max_incremental_sync_size'] is not None:
if self._values['__warnings'] is None:
self._values['__warnings'] = []
self._values['__warnings'].append(
[
dict(
msg='"max_incremental_sync_size has no effect if "full_sync" is not true',
version='2.4'
)
]
)
if self._values['max_incremental_sync_size'] is None:
return None
return int(self._values['max_incremental_sync_size'])
class ApiParameters(Parameters):
@property
def network_failover(self):
if self._values['network_failover'] is None:
return None
elif self._values['network_failover'] == 'enabled':
return True
return False
@property
def auto_sync(self):
if self._values['auto_sync'] is None:
return None
elif self._values['auto_sync'] == 'enabled':
return True
return False
@property
def save_on_auto_sync(self):
if self._values['save_on_auto_sync'] is None:
return None
elif self._values['save_on_auto_sync'] in BOOLEANS_TRUE:
return True
else:
return False
@property
def full_sync(self):
if self._values['full_sync'] is None:
return None
elif self._values['full_sync'] in BOOLEANS_TRUE:
return True
else:
return False
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
change = getattr(self, returnable)
if isinstance(change, dict):
result.update(change)
else:
result[returnable] = change
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
@property
def network_failover(self):
if self._values['network_failover'] is None:
return None
elif self._values['network_failover']:
return 'enabled'
return 'disabled'
@property
def auto_sync(self):
if self._values['auto_sync'] is None:
return None
elif self._values['auto_sync']:
return 'enabled'
return 'disabled'
@property
def save_on_auto_sync(self):
if self._values['save_on_auto_sync'] is None:
return None
elif self._values['save_on_auto_sync'] in BOOLEANS_TRUE:
return "true"
else:
return "false"
@property
def full_sync(self):
if self._values['full_sync'] is None:
return None
elif self._values['full_sync'] in BOOLEANS_TRUE:
return "true"
else:
return "false"
class ReportableChanges(Changes):
@property
def network_failover(self):
if self._values['network_failover'] is None:
return None
elif self._values['network_failover'] == 'enabled':
return 'yes'
return 'no'
@property
def auto_sync(self):
if self._values['auto_sync'] is None:
return None
elif self._values['auto_sync'] == 'enabled':
return 'yes'
return 'no'
@property
def save_on_auto_sync(self):
if self._values['save_on_auto_sync'] is None:
return None
elif self._values['save_on_auto_sync'] in BOOLEANS_TRUE:
return "yes"
return "no"
@property
def full_sync(self):
if self._values['full_sync'] is None:
return None
elif self._values['full_sync'] in BOOLEANS_TRUE:
return "yes"
return "no"
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.client, self.module, version)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_members_in_group_from_device()
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the device group")
return True
def create(self):
self._set_changed_options()
if self.want.type == 'sync-only' and self.want.network_failover is not None:
raise F5ModuleError(
"'network_failover' may only be specified when 'type' is 'sync-failover'."
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def exists(self):
errors = [401, 403, 409, 500, 501, 502, 503, 504]
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_members_in_group_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/{2}/devices/".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status not in [200, 201] or 'code' in response and response['code'] not in [200, 201]:
raise F5ModuleError(resp.content)
for item in response['items']:
new_uri = uri + '{0}'.format(item['name'])
response = self.client.api.delete(new_uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.name
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.name
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return ApiParameters(params=response)
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
type=dict(
choices=['sync-failover', 'sync-only']
),
description=dict(),
auto_sync=dict(
type='bool',
default='no'
),
save_on_auto_sync=dict(
type='bool',
),
full_sync=dict(
type='bool'
),
name=dict(
required=True
),
max_incremental_sync_size=dict(
type='int'
),
state=dict(
default='present',
choices=['absent', 'present']
),
network_failover=dict(type='bool'),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
# =========================================================================== #
# Name: myanimelist.py
# Purpose: Provide an interface to anime data; syncronize with the MyAnimeList
# server;
#
# Copyright (c) 2008 Gareth Latty
# Copyright (c) 2009 Sebastian Bartos
# Copyright (c) 2009 Daniel Anderson - dankles/evilsage4
#
# License: GPL v3, see COPYING file for details
# =========================================================================== #
import urllib
import urllib2
from cookielib import LWPCookieJar
import socket
from lib.beautifulsoup import BeautifulSoup
import re
import urlparse
from datetime import date, datetime
import os, time
from data import mal_anime_data_schema
from database import db as local_database
from globs import ac_log_path, ac_data_path
class anime_data(object):
"""
Anime data module. Reads and writes local anime data to disk, fetches and
syncs with MyanimeList server.
username: login username
password: login password
db_path: path to database
db: local anime database that is a nested dict and has ASCII-fied series
titles as keys and and fields form mal_anime_data_schema as dict data.
"""
def __init__(self, **kw):
"""
Setup credentials, read local data and setup network connection
environment. Optionally sync with MAL on startup.
Does not take positional arguments. Keyword arguments can either be
given individually (username, password, initsync) or as an
ac_config() instance. This will not be retained.
In the latter form we support some additional command line options.
"""
# When the architecture stabilizes, switch to config as the sole
# positional argument, and retain it instead of copying parts.
# That would also enable reconfiguration at runtime.
self.username = kw.get('username', kw['config'].get('mal', 'username'))
self.password = kw.get('password', kw['config'].get('mal', 'password'))
initsync = kw.get('initsync', kw['config'].get('startup', 'sync'))
try:
self.login = kw['config'].get('mal', 'login')
except KeyError:
# We need a default even if arguments were given individually.
self.login = True
try:
self.mirror = kw['config'].get('mal', 'mirror')
except KeyError:
self.mirror = None
# pull the local DB as a dictionary object
#self.db = {}
self.local_db = local_database()
self.db = self.local_db.get_db()
# setup cookie handler
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(LWPCookieJar()))
urllib2.install_opener(opener)
socket.setdefaulttimeout(40)
if initsync:
self.sync()
def save(self):
""" Only saves the current state to disk w/o network activity.
"""
self.local_db.set_db(self.db)
def sync(self):
"""
Syncronize local anime database with the MyAnimeList server.
(fetch -> compare -> push -> update local)
Return:
nested dict of remote updates with ASCII-fied series titles as
keys and a list of keys that got deleted on the MyAnimeList server.
"""
# Three way switch: login (un)successfull or don't even try.
login = _login(self.username,self.password) if self.login else None
if login is False:
print 'Login failed..'
return False
remoteAnime_db = _getAnimeList(self.username, self.mirror)
if self.db:
# If local DB is already initialized then filter changes
# and push local updates
(remote_updates, local_updates, deleted_entry_keys) = \
_filter_sync_changes(remoteAnime_db, self.db)
_logchanges(remote_updates, local_updates, deleted_entry_keys)
if login:
_push_list(local_updates)
else:
print 'Warning! Your local data goes ouf of sync'
# update local anime list with changes
for key in deleted_entry_keys:
del self.db[key]
for key, value in remote_updates.items():
self.db[key] = value
# write to local DB
self.local_db.set_db(self.db)
return (remote_updates, deleted_entry_keys)
else:
# initialize local data, as it was empty before
self.db = remoteAnime_db
# write to local DB
self.local_db.set_db(self.db)
return (self.db, {})
def fetch(self):
"""
UNUSED
Only fetch anime data from MyAnimeList server (overwrites local data,
if existent). Useful for initializing and resetting local database.
Returns a copy of the fetched database on success, None on failure.
"""
self.db = _getAnimeList(self.username)
# write to local DB
self.local_db.set_db(self.db)
return self.db
def _appInfoURL(user, status = 'all', typ = None):
"""
Safely generate a URL to get XML.
Type may be 'manga'.
"""
# Example taken from the site.
template = 'http://myanimelist.net/malappinfo.php?u=Wile&status=all&type=manga'
# Make tuple mutable.
parts = list(urlparse.urlparse(template))
# New parameters.
query = {'u': user}
if status:
query['status'] = status
if typ:
query['type'] = typ
# urlencode would literally output 'None'.
parts[4] = urllib.urlencode(query)
return urlparse.urlunparse(parts)
def _getAnimeList(username, mirror):
"""
Retrieve Anime XML from MyAnimeList server.
Returns: dictionary object.
Ways in which the ouput of malAppInfo is *not* XML:
Declared as UTF-8 but contains illegal byte sequences (characters)
Uses entities inside CDATA, which is exactly the wrong way round.
It further disagrees with the Expat C extension behind minidom:
Contains tabs and newlines outside of tags.
"""
# This function should be broken up and partly refactored into
# the class to be better configurable.
fetch_url = _appInfoURL(username)
try:
fetch_response = open(mirror, 'rb')
except:
# TODO whatever error open(None) raises.
fetch_response = urllib2.urlopen(fetch_url)
# BeautifulSoup could do the read() and unicode-conversion, if it
# weren't for the illegal characters, as it internally doesn't
# use 'replace'.
fetch_response = unicode(fetch_response.read(), 'utf-8', 'replace')
xmldata = BeautifulSoup.BeautifulStoneSoup(fetch_response)
# For unknown reasons it doesn't work without recursive.
# Nor does iterating over myanimelist.anime. BS documentation broken?
anime_nodes = xmldata.myanimelist.findAll('anime', recursive = True)
# We have to manually convert after getting them out of the CDATA.
entity = lambda m: BeautifulSoup.Tag.XML_ENTITIES_TO_SPECIAL_CHARS[m.group(1)]
# Walk through all the anime nodes and convert the data to a python
# dictionary.
ac_remote_anime_dict = dict()
for anime in anime_nodes:
# ac_node builds the output of our function. Everything added to it
# must either be made independent of the parse tree by calling
# NavigableString.extract() or, preferrably, be turned into a
# different type like unicode(). This is a side-effect of using
# non-mutators like string.strip()
# Failing to do this will crash cPickle.
ac_node = dict()
for node, typ in mal_anime_data_schema.iteritems():
try:
value = getattr(anime, node).string.strip()
# One would think re.sub directly accepts string subclasses
# like NavigableString. Raises a TypeError, though.
value = re.sub(r'&(\w+);', entity, value)
except AttributeError:
continue
if typ is datetime:
# process my_last_updated unix timestamp
ac_node[node] = datetime.fromtimestamp(int(value))
elif typ is int:
# process integer slots
ac_node[node] = int(value)
elif typ is date and value != '0000-00-00':
# proces date slots
(y,m,d) = value.split('-')
(y,m,d) = int(y), int(m), int(d)
if y and m and d:
ac_node[node] = date(y,m,d)
else:
# process string slots
ac_node[node] = value
# series titles are used as anime identifiers
# the keys for the resulting dictionary are encoded to ASCII, so they
# can be simply put into shelves
key = ac_node['series_title'].encode('utf-8')
# add node entry to the resulting nodelist
ac_remote_anime_dict[key] = ac_node
# the resulting dict is like this:
# {<ASCII-fied key from title>: {<mal_anime_data_schema-fields>: <values>}, ...}
return ac_remote_anime_dict
def _logchanges(remote, local, deleted):
""" Writes changes to logfile.
"""
f = open(ac_log_path, 'a')
now = str(int(time.mktime(datetime.now().timetuple())))
for key, value in remote.items():
f.write(now + ': Fetching "' + key +
'" episode ' + str(value['my_watched_episodes']) + '\n')
for key, value in local.items():
f.write(now + ': Pushing "' + key +
'" episode ' + str(value['my_watched_episodes']) + '\n')
for entry in deleted:
f.write(now + ': Deleted "' + entry + '"\n')
f.close()
def _login(username, password):
"""
Log in to MyAnimeList server.
Returns: True on success, False on failure
"""
# prepare login data
login_base_url = 'http://myanimelist.net/login.php'
headers = {
'User-Agent': 'anichou',
'Content-Type': 'application/x-www-form-urlencoded'}
login_data = urllib.urlencode({
'username': username,
'password': password,
'cookie': 1,
'sublogin': 'Login'})
# phrase login request (to perform a POST request)
login_request = urllib2.Request(login_base_url, login_data, headers)
# try to connect and authenticate with MyAnimeList server
try:
login_response = urllib2.urlopen(login_request).read()
except urllib2.URLError, e:
if hasattr(e, 'reason'):
print 'Failed to reach myanimelist.net.'
print 'Reason: ', e.reason
elif hasattr(e, 'code'):
print 'The server couldn\'t fulfill the request.'
print 'Error code: ', e.code
return False
# check if login was successful
if not login_response.count('<div class="badresult">'):
if login_response == "Couldn't open s-database. Please contact Xinil.":
return False
return True
else:
return False
def _filter_sync_changes(ac_remote_anime_dict, ac_local_anime_dict):
"""
Private Method
Compares the anime entry my_last_updated in both parameters and returns two
dictionaries of changed values of both parameters.
The one for the local dictionary can be used to push changes to the mal
server while the other can be used to update the local display and database.
Returns:
remote_updates: changes that are more up to date on the server
local_updates: changes that are more up to date locally
deleted_enry_keys: keys that are in the local database, but not in the
remote list.
"""
remote_updates = dict()
local_updates = dict()
# search for entirely new enries and deleted entries
remote_keys = ac_remote_anime_dict.keys()
local_keys = ac_local_anime_dict.keys()
deleted_entry_keys = \
filter(lambda x:x not in remote_keys, local_keys)
new_entry_keys = \
filter(lambda x:x not in local_keys, remote_keys)
for key in new_entry_keys:
remote_updates[key] = ac_remote_anime_dict[key]
# search in both dictionaries for differing update keys and append to the
# other's updates depending on which key is newer
common_keys = filter(lambda x:x in local_keys, remote_keys)
for key in common_keys:
remote_timestamp = ac_remote_anime_dict[key]['my_last_updated']
local_timestamp = ac_local_anime_dict[key]['my_last_updated']
if remote_timestamp > local_timestamp:
remote_updates[key] = ac_remote_anime_dict[key]
elif remote_timestamp < local_timestamp:
local_updates[key] = ac_local_anime_dict[key]
return (remote_updates, local_updates, deleted_entry_keys)
def _push_list(local_updates):
"""
Private Medoth
Updates every entry in the local updates dictionary to the mal server.
Should be called after the local updates are determined with the
filter_sync_changes function.
Returns:
True on success, False on failure
"""
headers = {
'User-Agent': 'anichou',
'Content-Type': 'application/x-www-form-urlencoded'}
for anime in local_updates.values():
# construct push request for entry update
postdata = urllib.urlencode({
# id entry
'series_animedb_id': str(anime['series_animedb_id']),
'series_title': str(anime['series_animedb_id']),
# set interesting values
'completed_eps': str(anime['my_watched_episodes']),
'status': str(anime['my_status']),
'score': str(anime['my_score']),
# protocol stuff
'close_on_update': 'true',
'submitIt': 2 })
push_base_url = \
'http://myanimelist.net/panel.php?keepThis=true&go=edit&id=' + \
str(anime['my_id']) + '&hidenav=true&TB_iframe=false'
push_request = urllib2.Request(push_base_url, postdata, headers)
# push update request
try:
response = urllib2.urlopen(push_request)
# print response.read() # -- for testing
except URLError, e:
if hasattr(e, 'reason'):
print 'We failed to reach a server.'
print 'Reason: ', e.reason
elif hasattr(e, 'code'):
print 'The server couldn\'t fulfill the request.'
print 'Error code: ', e.code
return False
return True
|
import os
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QTabWidget, QDialogButtonBox
from umlfri2.application import Application
from umlfri2.application.events.application import LanguageChangedEvent
from .general import SettingsDialogGeneral
from .updates import SettingsDialogUpdates
class SettingsDialog(QDialog):
__tab_classes = [SettingsDialogGeneral, SettingsDialogUpdates]
def __init__(self, main_window):
super().__init__(main_window)
if os.name == 'nt':
self.setWindowTitle(_("Options"))
else:
self.setWindowTitle(_("Preferences"))
layout = QVBoxLayout()
self.__tabs = QTabWidget()
layout.addWidget(self.__tabs)
for tab_class in self.__tab_classes:
self.__tabs.addTab(tab_class(self), "")
self.__button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel | QDialogButtonBox.Apply)
self.__button_box.button(QDialogButtonBox.Apply).clicked.connect(self.__apply_clicked)
self.__button_box.accepted.connect(self.__accept_clicked)
self.__button_box.rejected.connect(self.reject)
layout.addWidget(self.__button_box)
self.setLayout(layout)
Application().event_dispatcher.subscribe(LanguageChangedEvent, self.__language_changed)
self.__reload_texts()
def __apply_clicked(self, checked=False):
self.__apply_settings()
def __accept_clicked(self):
self.__apply_settings()
self.accept()
def __apply_settings(self):
for no in range(self.__tabs.count()):
self.__tabs.widget(no).apply_settings()
def __language_changed(self, event):
self.__reload_texts()
def __reload_texts(self):
self.__button_box.button(QDialogButtonBox.Ok).setText(_("Ok"))
self.__button_box.button(QDialogButtonBox.Cancel).setText(_("Cancel"))
self.__button_box.button(QDialogButtonBox.Apply).setText(_("Apply"))
for no in range(self.__tabs.count()):
widget = self.__tabs.widget(no)
widget.reload_texts()
self.__tabs.setTabText(no, widget.get_name())
|
"""
The MIT License
Copyright (c) 2010 Ricky Rosario
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import httplib2
import oauth2
from urllib import urlencode
from datetime import date as datetype
try:
import json # python 2.6
except ImportError:
import simplejson as json # python 2.4 to 2.5
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
REQUEST_TOKEN_URL = 'http://twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'http://twitter.com/oauth/access_token'
AUTHORIZE_URL = 'http://twitter.com/oauth/authorize'
AUTHENTICATE_URL = 'http://twitter.com/oauth/authenticate'
DEFAULT_HTTP_HEADERS = {
"User-Agent" : "python-twitapi/0.1 (http://github.com/rlr/python-twitapi)"
}
class NoAuth(object):
"""
No Authentitcation
"""
def make_request(self, url, method="GET", body=None, headers=None,
cache=None, timeout=None, proxy_info=None):
"""
Make a request using no authentication.
"""
client = httplib2.Http(
cache=cache,
timeout=timeout,
proxy_info=proxy_info
)
return client.request(url, method, body)
class BasicAuth(object):
"""
Basic Authentication
It uses the user's username and password for access to the Twitter API.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def make_request(self, url, method="GET", body=None, headers=None,
cache=None, timeout=None, proxy_info=None):
"""
Make a request using Basic Authentication using the username
and passowor provided.
"""
client = httplib2.Http(
cache=cache,
timeout=timeout,
proxy_info=proxy_info
)
client.add_credentials(self.username, self.password)
return client.request(url, method, body)
class OAuth(object):
"""
OAuth Authentication
It uses the application's consumer key and secret and user's access token
key and secret for access to the Twitter API.
"""
consumer = None
token = None
def __init__(self, consumer_key, consumer_secret,
token=None, token_secret=None):
self.consumer = oauth2.Consumer(key=consumer_key,
secret=consumer_secret)
if token and token_secret:
self.token = {
"oauth_token":token,
"oauth_token_secret":token_secret
}
else:
self.token = None
def get_request_token(self, request_token_url=REQUEST_TOKEN_URL):
"""
Get the oauth request token.
"""
client = oauth2.Client(self.consumer)
resp, content = client.request(request_token_url, "GET")
if resp['status'] != '200':
raise Exception("Invalid response %s." % resp['status'])
return dict(parse_qsl(content))
def get_authorization_url(self, token=None, authorize_url=AUTHORIZE_URL):
'''
Create a URL for the user to authorize the application with the Twitter
API.
Returns:
The authorization URL
'''
if not token:
self.token = token = self.get_request_token()
return "%s?oauth_token=%s" % (authorize_url, token['oauth_token'])
def get_signin_url(self, token=None, authenticate_url=AUTHENTICATE_URL):
'''
Create a URL for the user to sign in to the application with Twitter.
Returns:
The sign-in URL
'''
if not token:
self.token = token = self.get_request_token()
return self.get_authorization_url(token, authenticate_url)
def get_access_token(self, oauth_verifier=None,
access_token_url=ACCESS_TOKEN_URL):
"""
Get the access token.
This should be called after user has authorized/authenticated.
If a PIN was provided, it should be passed as the oauth_verifier.
"""
token = oauth2.Token(self.token['oauth_token'],
self.token['oauth_token_secret'])
if oauth_verifier:
token.set_verifier(oauth_verifier)
client = oauth2.Client(self.consumer, token)
resp, content = client.request(access_token_url, "POST")
if resp['status'] != '200':
raise Exception("Invalid response %s." % resp['status'])
return dict(parse_qsl(content))
def set_token(self, token):
"""
Set the oauth token.
"""
self.token = token
def make_request(self, url, method="GET", body=None, headers=None,
cache=None, timeout=None, proxy_info=None):
"""
Make a request using OAuth authentication with the consumer key and
secret and the provided token.
"""
token = oauth2.Token(self.token['oauth_token'],
self.token['oauth_token_secret'])
client = oauth2.Client(
consumer=self.consumer,
token=token,
cache=cache,
timeout=timeout,
proxy_info=proxy_info
)
return client.request(url, method, body)
class Client(object):
"""
The Twitter API Client
A Twitter API client that can use Basic Authentication, OAuth, or no
authentication at all (for the methods that allow that).
To use.....
"""
auth = None
base_api_url = None
base_search_url = None
cache = None
timeout = None
proxy_info = None
def __init__(self, auth=None, base_api_url="http://api.twitter.com/1",
base_search_url="http://search.twitter.com", cache=None,
timeout=None, proxy_info=None):
if not auth:
auth = NoAuth()
self.auth = auth
self.base_api_url = base_api_url
self.base_search_url = base_search_url
self.cache = cache
self.timeout = timeout
self.proxy_info = proxy_info
def request(self, url, method="GET", body=None, headers=None):
"""
Make a request with the provided authentication.
The response is assumed to be json and is parsed to a dict that is
returned along with the response headers. If an exception is caught while
decoding the json, then the raw response body is returned (should only happen
if status != '200').
NOTE: Feels ugly.. Should I be doing this in a different way?
"""
if headers is None:
headers = DEFAULT_HTTP_HEADERS.copy()
resp, content = self.auth.make_request(url, method, body, headers,
self.cache, self.timeout, self.proxy_info)
try:
decoded = json.loads(content)
content = decoded
except:
pass
return resp, content
#####################
# Search API Methods
#####################
def search(self, q, **kwargs):
"""
Returns tweets that match a specified query.
See the Twitter Search API documentation for all the parameters:
http://apiwiki.twitter.com/Twitter-Search-API-Method:-search
Example::
# create the client (authentication not required for search)
twitter = Client()
# search for beer
resp, search_results = twitter.search('beer')
"""
params = kwargs
if q:
params['q'] = q
return self.request(self.base_search_url+'/search.json?%s' %
urlencode(params), "GET")
def trends(self):
"""
Returns the top ten topics that are currently trending on Twitter.
The response includes the time of the request, the name of each
trend, and the url to the Twitter Search results page for that topic.
Example::
# create the client (authentication not required for trends)
twitter = Client()
# get the trending topics
resp, trending = twitter.trends()
"""
return self.request(self.base_search_url+'/trends.json', "GET")
def trends_current(self, exclude=None):
"""
Returns the current top 10 trending topics on Twitter. The response
ncludes the time of the request, the name of each trending topic,
and query used on Twitter Search results page for that topic.
Setting exclude parameter to 'hashtags' will remove all hashtags
from the trends list.
Example::
# create the client (authentication not required for trends)
twitter = Client()
# get the current trending topics, no hashtags
resp, trending = twitter.trends_current(exclude='hashtags')
"""
params = get_params_dict(exclude=exclude)
return self.request(self.base_search_url+'/trends/current.json?%s' %
urlencode(params), "GET")
def trends_daily(self, date=None, exclude=None):
"""
Returns the top 20 trending topics for each hour in a given day.
Setting exclude parameter to 'hashtags' will remove all hashtags
from the trends list.
Example::
# create the client (authentication not required for trends)
twitter = Client()
# get the today's trending topics
from datetime import date
resp, trending = twitter.trends_daily(date=date.today())
"""
if isinstance(date, datetype):
date = date.strftime('%Y-%m-%d')
params = get_params_dict(date=date, exclude=exclude)
return self.request(self.base_search_url+'/trends/daily.json?%s' %
urlencode(params), "GET")
def trends_weekly(self, date=None, exclude=None):
"""
Returns the top 30 trending topics for each day in a given week.
date parameter specifies a start date for the report.
Setting exclude parameter to 'hashtags' will remove all hashtags
from the trends list.
Example::
# create the client (authentication not required for trends)
twitter = Client()
# get the trending topics for a week
resp, trending = twitter.trends_weekly()
"""
if isinstance(date, datetype):
date = date.strftime('%Y-%m-%d')
params = get_params_dict(date=date, exclude=exclude)
return self.request(self.base_search_url+'/trends/weekly.json?%s' %
urlencode(params), "GET")
###################
# Timeline Methods
###################
def statuses_home_timeline(self, since_id=None, max_id=None, count=None,
page=None):
"""
Returns the most recent statuses, including retweets, posted by the
authenticating user and that user's friends.
"""
params = get_params_dict(since_id=since_id, max_id=max_id,
count=count, page=page)
return self.request(self.base_api_url+
'/statuses/home_timeline.json?%s' % urlencode(params), "GET")
def statuses_friends_timeline(self, since_id=None, max_id=None, count=None,
page=None):
"""
Returns the most recent statuses posted by the authenticating user and
that user's friends.
Note: Retweets will not appear in the friends_timeline for backwards
compatibility. If you want retweets included use home_timeline.
"""
params = get_params_dict(since_id=since_id, max_id=max_id,
count=count, page=page)
return self.request(self.base_api_url+
'/statuses/friends_timeline.json?%s' % urlencode(params),
"GET")
def statuses_user_timeline(self, user_id=None, screen_name=None,
since_id=None, max_id=None, count=None, page=None):
"""
Returns the most recent statuses posted from the authenticating user.
It's also possible to request another user's timeline via the user_id
or screen_name parameter.
"""
params = get_params_dict(user_id=user_id, screen_name=screen_name,
since_id=since_id, max_id=max_id, count=count, page=page)
return self.request(self.base_api_url+
'/statuses/user_timeline.json?%s' % urlencode(params), "GET")
def statuses_mentions(self, since_id=None, max_id=None, count=None,
page=None):
"""
Returns the most recent mentions (status containing @username) for
the authenticating user.
"""
params = get_params_dict(since_id=since_id, max_id=max_id,
count=count, page=page)
return self.request(self.base_api_url+
'/statuses/mentions.json?%s' % urlencode(params), "GET")
def statuses_retweeted_by_me(self, since_id=None, max_id=None, count=None,
page=None):
"""
Returns the most recent retweets posted by the authenticating user.
"""
params = get_params_dict(since_id=since_id, max_id=max_id,
count=count, page=page)
return self.request(self.base_api_url+
'/statuses/retweeted_by_me.json?%s' % urlencode(params), "GET")
def statuses_retweeted_to_me(self, since_id=None, max_id=None, count=None,
page=None):
"""
Returns the most recent retweets posted by the authenticating user's
friends.
"""
params = get_params_dict(since_id=since_id, max_id=max_id,
count=count, page=page)
return self.request(self.base_api_url+
'/statuses/retweeted_to_me.json?%s' % urlencode(params), "GET")
def statuses_retweeted_of_me(self, since_id=None, max_id=None, count=None,
page=None):
"""
Returns the most recent tweets of the authenticated user that have
been retweeted by others.
"""
params = get_params_dict(since_id=since_id, max_id=max_id,
count=count, page=page)
return self.request(self.base_api_url+
'/statuses/retweeted_of_me.json?%s' % urlencode(params), "GET")
#################
# Status Methods
#################
def statuses_show(self, id):
"""
Returns a single status, specified by the id parameter. The status's
author will be returned inline.
"""
return self.request(self.base_api_url+
'/statuses/show/%s.format' % id, "GET")
def statuses_update(self, status, in_reply_to_status_id=None, lat=None,
long=None, place_id=None, display_coordinates=None):
"""
Updates the authenticating user's status.
Note: A status update with text identical to the authenticating
user's current status will be ignored to prevent duplicates.
"""
params = get_params_dict(status=status,
in_reply_to_status_id=in_reply_to_status_id,
lat=lat, long=long, place_id=place_id,
display_coordinates=display_coordinates)
return self.request(self.base_api_url+'/statuses/update.json', "POST",
urlencode(params))
def statuses_destroy(self, id):
"""
Destroys the status specified by the required ID parameter. The
authenticating user must be the author of the specified status.
"""
return self.request(self.base_api_url+'/statuses/destroy/%s.json' % id,
"POST")
def statuses_retweet(self, id):
"""
Retweets a tweet. Requires the id parameter of the tweet you are
Returns the original tweet with retweet details embedded.
"""
return self.request(self.base_api_url+'/statuses/retweet/%s.json' % id,
"POST")
def statuses_retweets(self, id, count=None):
"""
Returns up to 100 of the first retweets of a given tweet.
"""
params = get_params_dict(count=count)
return self.request(self.base_api_url+'/statuses/retweets/%s.json?%s' %
(id, urlencode(params)), "GET")
###############
# User Methods
###############
def users_show(self, user_id=None, screen_name=None):
"""
Returns extended information of a given user, specified by ID or
screen name as per the required id parameter. The author's most
recent status will be returned inline.
"""
if not user_id and not screen_name:
raise Exception("A user_id or screen_name must be provided.")
if user_id and screen_name:
raise Exception("A user_id OR screen_name must be provided.")
params = get_params_dict(user_id=user_id, screen_name=screen_name)
return self.request(self.base_api_url+'/users/show.json?%s' %
urlencode(params), "GET")
def users_lookup(self, user_id=None, screen_name=None):
"""
Return up to 100 users worth of extended information, specified by
either ID, screen name, or combination of the two. The author's most
recent status (if the authenticating user has permission) will be
returned inline.
"""
if user_id and not isinstance(user_id, str) and \
not isinstance(user_id, int):
user_id = ",".join(user_id)
if screen_name and not isinstance(screen_name, str):
screen_name = ",".join(screen_name)
params = get_params_dict(user_id=user_id, screen_name=screen_name)
return self.request(self.base_api_url+'/users/lookup.json?%s' %
urlencode(params), "GET")
def users_search(self, q, per_page=None, page=None):
"""
Run a search for users similar to Find People button on Twitter.com;
the same results returned by people search on Twitter.com will be
returned by using this API (about being listed in the People Search).
It is only possible to retrieve the first 1000 matches from this API.
"""
params = get_params_dict(q=q, per_page=per_page, page=page)
return self.request(self.base_api_url+'/users/search.json?%s' %
urlencode(params), "GET")
def users_suggestions(self):
"""
Access to Twitter's suggested user list. This returns the list of
suggested user categories. The category can be used in the
users_suggestions_category method to get the users in that category.
"""
return self.request(self.base_api_url+'/users/suggestions.json', "GET")
def users_suggestions_category(self, slug):
"""
Access the users in a given category of the Twitter suggested user
list.
"""
return self.request(self.base_api_url+'/users/suggestions/%s.json' %
slug, "GET")
def statuses_friends(self, user_id=None, screen_name=None, cursor=None):
"""
Returns a user's friends, each with current status inline. They are
ordered by the order in which the user followed them, most recently
followed first, 100 at a time.
Use the cursor option to access older friends. With no user specified,
request defaults to the authenticated user's friends. It's also
possible to request another user's friends list via the id,
screen_name or user_id parameter.
"""
params = get_params_dict(user_id=user_id, screen_name=screen_name,
cursor=cursor)
return self.request(self.base_api_url+'/statuses/friends.json?%s' %
urlencode(params), "GET")
def statuses_followers(self, user_id=None, screen_name=None, cursor=None):
"""
Returns the authenticating user's followers, each with current status
inline. They are ordered by the order in which they followed the user,
100 at a time.
Use the cursor option to access earlier followers.
"""
params = get_params_dict(user_id=user_id, screen_name=screen_name,
cursor=cursor)
return self.request(self.base_api_url+'/statuses/followers.json?%s' %
urlencode(params), "GET")
###############
# List Methods
###############
def create_list(self, user, name, mode=None, description=None):
"""
Creates a new list for the authenticated user.
Accounts are limited to 20 lists.
"""
params = get_params_dict(name=name, mode=mode,
description=description)
return self.request(self.base_api_url+'/%s/lists.json' % user,
"POST", urlencode(params))
def update_list(self, user, id, name=None, mode=None, description=None):
"""
Updates the specified list.
"""
params = get_params_dict(name=name, mode=mode,
description=description)
return self.request(self.base_api_url+'/%s/lists/%s.json' %
(user, id), "POST", urlencode(params))
def get_lists(self, user, cursor=None):
"""
List the lists of the specified user.
Private lists will be included if the authenticated users is the same
as the user who'se lists are being returned.
"""
params = get_params_dict(cursor=cursor)
return self.request(self.base_api_url+'/%s/lists.json?%s' %
(user, urlencode(params)), "GET")
def get_list(self, user, id):
"""
Show the specified list.
Private lists will only be shown if the authenticated user owns the
specified list.
"""
return self.request(self.base_api_url+'/%s/lists/%s.json' %
(user, id), "GET")
def delete_list(self, user, id):
"""
Deletes the specified list. Must be owned by the authenticated user.
"""
return self.request(self.base_api_url+'/%s/lists/%s.json' %
(user, id), "DELETE")
def get_list_statuses(self, user, list_id, since_id=None, max_id=None,
per_page=None, page=None):
"""
Show tweet timeline for members of the specified list.
"""
params = get_params_dict(since_id=since_id, max_id=max_id,
per_page=per_page, page=page)
return self.request(self.base_api_url+
'/%s/lists/%s/statuses.json' %
(user, list_id), "GET")
def get_list_memberships(self, user, cursor=None):
"""
List the lists the specified user has been added to.
"""
params = get_params_dict(cursor=cursor)
return self.request(self.base_api_url+
'/%s/lists/memberships.json?%s' %
(user, urlencode(params)), "GET")
def get_list_subscriptions(self, user, cursor=None):
"""
List the lists the specified user follows.
"""
params = get_params_dict(cursor=cursor)
return self.request(self.base_api_url+
'/%s/lists/subscriptions.json?%s' %
(user, urlencode(params)), "GET")
#######################
# List Members Methods
#######################
def get_list_members(self, user, list_id, cursor=None):
"""
Returns the members of the specified list.
"""
params = get_params_dict(cursor=cursor)
return self.request(self.base_api_url+
'/%s/%s/members.json?%s' %
(user, list_id, urlencode(params)), "GET")
def add_list_member(self, user, list_id, id):
"""
Add a member to a list.
id is the user's user_id or screen_name to add.
The authenticated user must own the list to be able to add members to
it. Lists are limited to having 500 members.
"""
params = get_params_dict(id=id)
return self.request(self.base_api_url+'/%s/%s/members.json' %
(user, list_id), "POST", urlencode(params))
def delete_list_member(self, user, list_id, id):
"""
Removes the specified member from the list.
id is the user's user_id or screen_name to delete from the list.
The authenticated user must be the list's owner to remove members
from the list.
"""
params = get_params_dict(id=id)
return self.request(self.base_api_url+'/%s/%s/members.json?%s' %
(user, list_id, urlencode(params)), "DELETE")
def get_list_members_id(self, user, list_id, id):
"""
Check if a user is a member of the specified list.
id is the user_id or screen_name of the user who you want to know
is a member or not of the specified list.
"""
return self.request(self.base_api_url+ '/%s/%s/members/%s.json' %
(user, list_id, id), "GET")
###########################
# List Subscribers Methods
###########################
def get_list_subscribers(self, user, list_id, cursor=None):
"""
Returns the subscribers of the specified list.
"""
params = get_params_dict(cursor=cursor)
return self.request(self.base_api_url+
'/%s/%s/subscribers.json?%s' %
(user, list_id, urlencode(params)), "GET")
def subscribe_to_list(self, user, list_id):
"""
Make the authenticated user follow the specified list.
"""
return self.request(self.base_api_url+'/%s/%s/subscribers.json' %
(user, list_id), "POST")
def unsubscribe_from_list(self, user, list_id):
"""
Unsubscribes the authenticated user form the specified list.
"""
return self.request(self.base_api_url+'/%s/%s/subscribers.json' %
(user, list_id), "DELETE")
def get_list_subscribers_id(self, user, list_id, id):
"""
Check if the specified user is a subscriber of the specified list.
id is the user_id or screen_name of the user who you want to know
is a subscriber or not of the specified list.
"""
return self.request(self.base_api_url+ '/%s/%s/subscribers/%s.json' %
(user, list_id, id), "GET")
#########################
# Direct Message Methods
#########################
def direct_messages(self, since_id=None, max_id=None, count=None,
page=None):
"""
Returns a list of the most recent direct messages sent to the
authenticating user. Includes detailed information about the
sending and recipient users.
"""
params = get_params_dict(since_id=since_id, max_id=max_id,
count=count, page=page)
return self.request(self.base_api_url+'/direct_messages.json', "GET")
def direct_messages_sent(self, since_id=None, max_id=None, count=None,
page=None):
"""
Returns a list of the most recent direct messages by the
authenticating user. Includes detailed information about the
sending and recipient users.
"""
params = get_params_dict(since_id=since_id, max_id=max_id,
count=count, page=page)
return self.request(self.base_api_url+'/direct_messages/sent.json',
"GET")
def direct_messages_new(self, user, text):
"""
Sends a new direct message to the specified user from the
authenticating user.
Returns the sent message in the requested format when successful.
"""
params = get_params_dict(user=user, text=text)
return self.request(self.base_api_url+'/direct_messages/new.json',
"POST", urlencode(params))
def direct_messages_destroy(self, id):
"""
Destroys the direct message specified in the required ID parameter.
The authenticating user must be the recipient of the specified
direct message.
"""
return self.request(self.base_api_url+
'/direct_messages/destroy/%s.json' % id, "DELETE")
#####################
# Friendship Methods
#####################
def friendships_create(self, user_id=None, screen_name=None, follow=False):
"""
Allows the authenticating users to follow the user specified in
the ID parameter. Returns the befriended user in the requested
format when successful. Returns a string describing the failure
condition when unsuccessful. If you are already friends with the
user an HTTP 403 will be returned.
Setting follow to True enables notifications for the target user
in addition to becoming friends.
"""
if not user_id and not screen_name:
raise Exception("A user_id or screen_name must be provided.")
if user_id and screen_name:
raise Exception("A user_id OR screen_name must be provided.")
if follow:
follow = 'true'
params = get_params_dict(user_id=user_id, screen_name=screen_name,
follow=follow)
return self.request(self.base_api_url+'/friendships/create.json',
"POST", urlencode(params))
def friendships_destroy(self, user_id=None, screen_name=None):
"""
Allows the authenticating users to unfollow the user specified in
the ID parameter. Returns the unfollowed user in the requested
format when successful. Returns a string describing the failure
condition when unsuccessful.
"""
if not user_id and not screen_name:
raise Exception("A user_id or screen_name must be provided.")
if user_id and screen_name:
raise Exception("A user_id OR screen_name must be provided.")
params = get_params_dict(user_id=user_id, screen_name=screen_name)
return self.request(self.base_api_url+'/friendships/destroy.json',
"POST", urlencode(params))
def friendships_exists(self, user_a, user_b):
"""
Tests for the existance of friendship between two users.
Will return true if user_a follows user_b, otherwise will return false.
user_a and user_b can be the user_id or screen_name of the users.
"""
params = get_params_dict(user_a=user_a, user_b=user_b)
return self.request(self.base_api_url+'/friendships/exists.json?%s' %
urlencode(params), "GET")
#######################
# Social Graph Methods
#######################
def friends_ids(self, user_id=None, screen_name=None, cursor=None):
"""
Returns an array of numeric IDs for every user the specified user
is following.
"""
if not user_id and not screen_name:
raise Exception("A user_id or screen_name must be provided.")
if user_id and screen_name:
raise Exception("A user_id OR screen_name must be provided.")
params = get_params_dict(user_id=user_id, screen_name=screen_name,
cursor=cursor)
return self.request(self.base_api_url+'/friends/ids.json?%s' %
urlencode(params), "GET")
def followers_ids(self, user_id=None, screen_name=None, cursor=None):
"""
Returns an array of numeric IDs for every user following the
specified user.
"""
if not user_id and not screen_name:
raise Exception("A user_id or screen_name must be provided.")
if user_id and screen_name:
raise Exception("A user_id OR screen_name must be provided.")
params = get_params_dict(user_id=user_id, screen_name=screen_name,
cursor=cursor)
return self.request(self.base_api_url+'/followers/ids.json?%s' %
urlencode(params), "GET")
##################
# Account Methods
##################
def verify_credentials(self):
"""
Use this method to test if supplied user credentials are valid.
Returns an HTTP 200 OK response code and a representation of the
requesting user if authentication was successful; returns a 401
status code and an error message if not.
"""
return self.request(self.base_api_url+
'/account/verify_credentials.json', "GET")
def rate_limit_status(self):
"""
Returns the remaining number of API requests available to the
requesting user before the API limit is reached for the current hour.
If authentication credentials are provided, the rate limit status for
the authenticating user is returned. Otherwise, the rate limit status
for the requester's IP address is returned.
"""
return self.request(self.base_api_url+
'/account/rate_limit_status.json', "GET")
def get_params_dict(**kwargs):
"""
Utility function that returns a dict with the set parameters (not None)
"""
for key in kwargs.keys():
if kwargs[key] == None:
del kwargs[key]
return kwargs
__all__ = ["OAuth", "BasicAuth", "Client"]
|
# This uses the dev branch of guizero which needs to be linked to the appropriate
# directory - in future this will use the normal production version of guizero
from guizero.build.lib.guizero import App, Text, PushButton, info, MenuBar, Picture, yesno
import quizdetails
# For testing the gui without the arduino comment out the quizarduino entry and replace with quizarduinodev
import quizarduino
#import quizarduinodev as quizarduino
import quizstrings
import time
from tkinter import filedialog
class QuizApp():
## These values are hardcoded here in this version
quiz_filename = "quizzes/quiz1.json"
serial_port = '/dev/ttyACM0'
def __init__ (self, app):
self.app = app
# Load Strings for pages
self.strings = quizstrings.QuizStrings()
self.strings.load()
# Questions are held in QuizDetails
self.quiz = quizdetails.QuizDetails()
# Setup serial connection to arduino
self.arduino = quizarduino.QuizArduino(self.serial_port)
self.arduino.connect()
# send blue to indicate startup
self.arduino.send_recv ([3,3,3,3,3,3])
def open_quiz_file(self):
filename = filedialog.askopenfilename(initialdir = "quizzes/",title = "Select file",filetypes = (("Quiz files","*.json"),("all files","*.*")))
# If valid filename then update
if (filename):
self.quiz_filename = filename
self.load_quiz()
self.home()
# Updates screen to a different page
# Updates button labels, but not their functions
def upd_page(self, page_name):
page_strings = self.strings.getPage(page_name)
self.text_title.value = self.strings.getTitle()
self.text_question_title.value = page_strings["title"]
self.text_question_details_1.value = page_strings["details"][0]
self.text_question_details_2.value = page_strings["details"][1]
self.text_question_details_3.value = page_strings["details"][2]
self.text_question_details_4.value = page_strings["details"][3]
self.text_question_details_5.value = page_strings["details"][4]
self.text_question_details_6.value = page_strings["details"][5]
self.text_question_option_1.value = page_strings["options"][0]
self.text_question_option_2.value = page_strings["options"][1]
self.text_question_option_3.value = page_strings["options"][2]
self.text_question_option_4.value = page_strings["options"][3]
self.image_question.value = "images/"+page_strings["image"]
self.left_button.text = page_strings["left_button"]
self.right_button.text = page_strings["right_button"]
# Set home page with appropriate values
def home(self):
self.upd_page("home")
# update buttons
# left button does nothing (returns here)
self.left_button.change_command(self.home)
self.right_button.change_command(self.start_quiz)
# Reset quiz position to 0
# Updates buttons on gui to reflect first and last buttons
# Also highlights appropriate port for question
def upd_buttons(self):
if self.quiz.isFirst():
self.left_button.text="Return"
self.left_button.change_command(self.home)
else:
self.left_button.text="<< Previous"
self.left_button.change_command(self.prev_question)
if self.quiz.isLast():
self.right_button.text="End Quiz"
self.right_button.change_command(self.end_quiz)
else:
self.right_button.text="Next >>"
self.right_button.change_command(self.next_question)
# Light up the current question
status_leds = [0,0,0,0,0,0]
status_leds[self.quiz.getQuestionNum()] = 3
self.arduino.send_recv(status_leds)
# Load quiz from disk
def load_quiz(self):
self.quiz.load(self.quiz_filename)
pass
# Start the quiz
def start_quiz(self):
self.load_quiz()
self.text_title.value = self.quiz.getTitle()
self.upd_question()
self.upd_buttons()
#print ("Start Quiz - Q "+str(self.quiz.getQuestionNum()))
# Update display of question
def upd_question(self):
#print ("Show question - Q "+str(self.quiz.getQuestionNum()))
this_question = self.quiz.getQuestion()
self.text_question_title.value = this_question.getTitle()
details = this_question.getDetails()
self.text_question_details_1.value = details[0]
self.text_question_details_2.value = details[1]
self.text_question_details_3.value = details[2]
self.text_question_details_4.value = details[3]
self.text_question_details_5.value = details[4]
self.text_question_details_6.value = details[5]
options = this_question.getOptions()
self.text_question_option_1.value = options[0]
self.text_question_option_2.value = options[1]
self.text_question_option_3.value = options[2]
self.text_question_option_4.value = options[3]
self.image_question.value = "images/"+this_question.getImage()
# Move to prev question
def prev_question(self):
self.quiz.prevQuestion()
self.upd_question()
self.upd_buttons()
# Move to next question
def next_question(self):
#print ("Nex Q - was "+str(self.quiz.getQuestionNum()))
self.quiz.nextQuestion()
self.upd_question()
self.upd_buttons()
#print ("Nex Q - now "+str(self.quiz.getQuestionNum()))
# Allows to restart and retry same quiz
def review(self):
# Reset to question 1 and restart
self.quiz.setQuestionNum(0)
self.upd_question()
self.upd_buttons()
# End quiz
def end_quiz(self):
# Check with user they really want to end
mark_quiz = yesno("Exam completed", "Have you answered all the questions?")
if (mark_quiz == False):
return
# Set all leds blue to indicate marking and get status
status_leds = [3,3,3,3,3,3]
given_answers = self.arduino.send_recv(status_leds)
score = 0
# compare given_answers with correct answers
details = []
for i in range (0,6):
# get the question
this_question = self.quiz.getQuestion(i)
# compare whether answer correct
#print ("Question "+str(i)+" given answer "+str(given_answers[i])+" correct answer "+str(this_question.getAnswer()))
if (given_answers[i] == this_question.getAnswer()):
# correct answer
score += 1
details.append(this_question.getTitle()+ " is correct, Answer = "+ this_question.getAnswerLetter())
status_leds[i] = 1
else:
details.append(this_question.getTitle()+ " is incorrect, Correct answer = "+ this_question.getAnswerLetter())
status_leds[i] = 2
self.text_question_title.value = "Results"
self.text_question_details_1.value = details[0]
self.text_question_details_2.value = details[1]
self.text_question_details_3.value = details[2]
self.text_question_details_4.value = details[3]
self.text_question_details_5.value = details[4]
self.text_question_details_6.value = details[5]
# Set eval based on score
if (score < 2) :
eval_string = "Your network is NOT working"
eval_image = "poor.gif"
elif (score > 4) :
eval_string = "High speed network"
eval_image = "good.gif"
else:
eval_string = "Network performance acceptable"
eval_image = "average.gif"
# Show score and updated image
self.text_question_option_1.value = ""
self.text_question_option_2.value = "Score "+str(score)+" out of 6"
self.text_question_option_3.value = ""
self.text_question_option_4.value = eval_string
self.image_question.value = "images/"+eval_image
# Update LEDs with status
self.arduino.send_recv(status_leds)
# Set back button "Review" - goes back to first question to allow retry
self.left_button.text="Review"
self.left_button.change_command(self.review)
# Set right button to home to restart process
self.right_button.text="Home"
self.right_button.change_command(self.home)
# Open a new quiz
def file_open(self):
##Todo load different quiz
self.open_quiz_file()
pass
# exit the self.app
def file_exit(self):
self.app.destroy()
# About
def help_about(self):
info("About Quiz", "Created by Stewart Watkiss\nhttp://www.penguintutor.com")
def setup_gui(self):
menubar = MenuBar(self.app,
toplevel=["File", "Help"],
options=[
[ ["Open",self.file_open],["Exit", self.file_exit] ] ,
[ ["About", self.help_about] ]
])
# Text / buttons are created without any details and are then updated
# based on the quizstrings.json file
# This is done prior app.display() so the user will not see flicker etc.
# column 0 and row 0 are used for dummy images for spacing
# cols 1 to 5 used for actual display
# dimensions shown to the right are minimum (using image)
padding0_0 = Picture(self.app, image="layout/0_0.gif", grid=[0,0]) # 1 pixel
padding1_0 = Picture(self.app, image="layout/1_0.gif", grid=[1,0]) # 100 pixel
padding2_0 = Picture(self.app, image="layout/2_0.gif", grid=[2,0]) # 550 pixel
padding2_0 = Picture(self.app, image="layout/3_0.gif", grid=[3,0]) # 100 pixel
padding3_0 = Picture(self.app, image="layout/4_0.gif", grid=[4,0]) # 100 pixel
padding0_2 = Picture(self.app, image="layout/0_2.gif", grid=[0,2]) # 100 pixel
padding0_12 = Picture(self.app, image="layout/0_13.gif", grid=[0,13]) # 100 pixel
self.text_title = Text(self.app, text="", size=30, grid=[2,1,2,1])
image_logo = Picture(self.app, image="images/logo.gif", grid=[4,1,2,1])
self.text_question_title = Text(self.app, text="", align="left", size=25, grid=[1,2,2,1])
self.text_question_details_1 = Text(self.app, text="", align="left", size=18, grid=[1,3,3,1])
self.text_question_details_2 = Text(self.app, text="", align="left", size=18, grid=[1,4,2,1])
self.text_question_details_3 = Text(self.app, text="", align="left", size=18, grid=[1,5,2,1])
self.text_question_details_4 = Text(self.app, text="", align="left", size=18, grid=[1,6,2,1])
self.text_question_details_5 = Text(self.app, text="", align="left", size=18, grid=[1,7,2,1])
self.text_question_details_6 = Text(self.app, text="", align="left", size=18, grid=[1,8,2,1])
self.text_question_option_1 = Text(self.app, text="", align="left", size=18, grid=[1,9,2,1])
self.text_question_option_2 = Text(self.app, text="", align="left", size=18, grid=[1,10,2,1])
self.text_question_option_3 = Text(self.app, text="", align="left", size=18, grid=[1,11,2,1])
self.text_question_option_4 = Text(self.app, text="", align="left", size=18, grid=[1,12,2,1])
self.image_question = Picture(self.app, image="images/quiz.gif", grid=[3,3,3,9])
self.left_button = PushButton(self.app, text="", command=self.prev_question, grid=[1,13])
self.right_button = PushButton(self.app, text="", command=self.start_quiz, grid=[5,13])
self.home()
self.app.display()
|
from collections import OrderedDict
from django.http import HttpResponse
from django.template import Context, Template
from django.utils.html import escape
from sqlreports.utils import CSVWriter, get_db_connection
from sqlreports.models import SQLReport
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
OrderedDict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
class ReportFormatter(object):
def filename(self):
return self.filename_template
class ReportCSVFormatter(ReportFormatter):
filename_template = 'sqlreports.csv'
def get_csv_writer(self, file_handle, **kwargs):
return CSVWriter(open_file=file_handle, **kwargs)
def generate_response(self, headers, objects, **kwargs):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s' \
% self.filename(**kwargs)
self.generate_csv(response, headers, objects)
return response
def generate_csv(self, response, headers, objects):
writer = self.get_csv_writer(response)
# Write a first row with header information
writer.writerow(headers)
# Write data rows
for data_obj in objects:
writer.writerow([data_obj[header] for header in headers])
return response
class ReportHTMLFormatter(ReportFormatter):
def generate_response(self, headers, objects, **kwargs):
return objects
class ReportGenerator(object):
formatters = {
'CSV_formatter': ReportCSVFormatter,
'HTML_formatter': ReportHTMLFormatter
}
def __init__(self, **kwargs):
formatter_name = '%s_formatter' % kwargs['formatter']
self.formatter = self.formatters[formatter_name]()
def generate(self, report_id, params):
records = self.get_report_data(report_id, params)
headers = records[0].keys()
return self.formatter.generate_response(headers, records)
def get_report_query(self, report_id, params_dict):
""" QueryExample:
select id, checkin_time from auth_user where email = '{{EMAIL_ID}}'
"""
# FIXME: Need to include MySQL Escape
query = SQLReport.objects.get(id=report_id).query
t = Template(query)
# Escaping Params
escaped_params = {}
for item in params_dict.items():
escaped_params[item[0]] = escape(item[1])
c = Context(escaped_params)
return t.render(c)
def get_report_data(self, report_id, params):
""" For given sqlreports id and params it return the sqlreports data"""
# FIXME: Connection should have only read only permission
query = self.get_report_query(report_id, params)
cursor = get_db_connection().cursor()
cursor.execute(query)
return dictfetchall(cursor)
def is_available_to(self, user, report):
"""
Checks whether this report is available to this user
"""
if user.is_superuser:
# Super users are allowed everything
return True
if not user.is_staff:
# Non Staff are never allowed access to sqlreports
return False
# Allowed only if sqlreports is designated as a non-super user allowed
if not report.user_allowed:
return False
return True
|
import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.activation import sigmoid
from chainer.functions.array import broadcast
from chainer.functions.array import where
from chainer.functions.math import exponential
from chainer.functions.math import logarithm_1p
from chainer import utils
import numpy
import warnings
class BernoulliLogProb(chainer.function_node.FunctionNode):
def forward(self, inputs):
logit, x = inputs
self.retain_inputs((0, 1))
xp = cuda.get_array_module(x)
y = logit * (x - 1) - xp.log(xp.exp(-logit) + 1)
y = utils.force_array(y)
self.invalid = utils.force_array(xp.bitwise_and(x != 0, x != 1))
y[self.invalid] = - xp.inf
# extreme logit
logit_isinf = xp.isinf(logit)
self.to_zero = xp.bitwise_and(
logit_isinf, xp.sign(x-0.5) == xp.sign(logit))
self.to_m_inf = xp.bitwise_and(
logit_isinf, xp.sign(x-0.5) != xp.sign(logit))
y[self.to_zero] = 0.
y[self.to_m_inf] = - xp.inf
return utils.force_array(y, logit.dtype),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
logit, x = self.get_retained_inputs()
xp = cuda.get_array_module(x)
dlogit = x - 1. / (1. + exponential.exp(-logit))
# extreme logit
nan_dlogit = xp.zeros_like(dlogit.array)
nan_dlogit[self.invalid] = xp.nan
nan_dlogit[self.to_zero] = xp.nan
nan_dlogit[self.to_m_inf] = xp.nan
dlogit += nan_dlogit
return gy * dlogit, None
class ModifiedXLogX(chainer.function_node.FunctionNode):
def __init__(self, logx):
self._logx = logx
def forward(self, inputs):
x, = inputs
self.x_zero = utils.force_array(x == 0)
y = utils.force_array(x * self._logx.array)
y[self.x_zero] = 0.
return y,
def backward(self, indexes, grad_outputs):
if self.x_zero.any():
warnings.warn(
'cannot calculate gradient for zero input.',
RuntimeWarning)
gy, = grad_outputs
dx = (1 + self._logx) * (1 - self.x_zero)
return gy * dx,
def _bernoulli_log_prob(logit, x):
y, = BernoulliLogProb().apply((logit, x))
return y
def _modified_xlogx(x):
x = chainer.as_variable(x)
xp = x.xp
return ModifiedXLogX(exponential.log(
where.where(utils.force_array(x.array > 0),
x, xp.ones_like(x.array)))).apply((x,))[0]
class Bernoulli(distribution.Distribution):
"""Bernoulli Distribution.
The probability mass function of the distribution is expressed as
.. math::
P(x = 1; p) = p \\\\
P(x = 0; p) = 1 - p
Args:
p(:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Parameter of distribution representing \
:math:`p`. Either `p` or `logit` (not both) must have a value.
logit(:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Parameter of distribution representing \
:math:`\\log\\{p/(1-p)\\}`. Either `p` or `logit` (not both) must \
have a value.
"""
def __init__(self, p=None, logit=None):
super(Bernoulli, self).__init__()
if not (p is None) ^ (logit is None):
raise ValueError(
"Either `p` or `logit` (not both) must have a value.")
with chainer.using_config('enable_backprop', True):
if p is None:
self.logit = chainer.as_variable(logit)
self.p = sigmoid.sigmoid(self.logit)
else:
self.p = chainer.as_variable(p)
self.logit = exponential.log(self.p) \
- logarithm_1p.log1p(-self.p)
@property
def batch_shape(self):
return self.p.shape
@property
def entropy(self):
p = self.p
q = p.dtype.type(1.) - p
return - _modified_xlogx(p) - _modified_xlogx(q)
@property
def event_shape(self):
return ()
@property
def _is_gpu(self):
return isinstance(self.p.array, cuda.ndarray)
def log_prob(self, x):
return _bernoulli_log_prob(self.logit, x)
@property
def mean(self):
return self.p
def prob(self, x):
x = chainer.as_variable(x)
if self._is_gpu:
valid = cuda.cupy.bitwise_or(x.array == 0, x.array == 1)
else:
valid = numpy.bitwise_or(x.array == 0, x.array == 1)
ret = x * broadcast.broadcast_to(self.p, x.shape) \
+ (1 - x) * (1 - broadcast.broadcast_to(self.p, x.shape))
return ret * valid
def sample_n(self, n):
if self._is_gpu:
eps = cuda.cupy.random.binomial(
1, self.p.array, size=(n,)+self.p.shape)
else:
eps = numpy.random.binomial(
1, self.p.array, size=(n,)+self.p.shape)
return chainer.Variable(eps)
@property
def stddev(self):
return (self.p * (1 - self.p)) ** 0.5
@property
def support(self):
return '{0, 1}'
@property
def variance(self):
return self.p * (1 - self.p)
@distribution.register_kl(Bernoulli, Bernoulli)
def _kl_bernoulli_bernoulli(dist1, dist2):
return (dist1.logit - dist2.logit) * (dist1.p - 1.) \
- exponential.log(exponential.exp(-dist1.logit) + 1) \
+ exponential.log(exponential.exp(-dist2.logit) + 1)
|
"""
Breadth-first search and depth-first search.
Author: Wenru Dong
"""
from typing import List, Optional, Generator, IO
from collections import deque
class Graph:
"""Undirected graph."""
def __init__(self, num_vertices: int):
self._num_vertices = num_vertices
self._adjacency = [[] for _ in range(num_vertices)]
def add_edge(self, s: int, t: int) -> None:
self._adjacency[s].append(t)
self._adjacency[t].append(s)
def _generate_path(self, s: int, t: int, prev: List[Optional[int]]) -> Generator[str, None, None]:
if prev[t] or s != t:
yield from self._generate_path(s, prev[t], prev)
yield str(t)
def bfs(self, s: int, t: int) -> IO[str]:
"""Print out the path from Vertex s to Vertex t
using bfs.
"""
if s == t: return
visited = [False] * self._num_vertices
visited[s] = True
q = deque()
q.append(s)
prev = [None] * self._num_vertices
while q:
v = q.popleft()
for neighbour in self._adjacency[v]:
if not visited[neighbour]:
prev[neighbour] = v
if neighbour == t:
print("->".join(self._generate_path(s, t, prev)))
return
visited[neighbour] = True
q.append(neighbour)
def dfs(self, s: int, t: int) -> IO[str]:
"""Print out a path from Vertex s to Vertex t
using dfs.
"""
found = False
visited = [False] * self._num_vertices
prev = [None] * self._num_vertices
def _dfs(from_vertex: int) -> None:
nonlocal found
if found: return
visited[from_vertex] = True
if from_vertex == t:
found = True
return
for neighbour in self._adjacency[from_vertex]:
if not visited[neighbour]:
prev[neighbour] = from_vertex
_dfs(neighbour)
_dfs(s)
print("->".join(self._generate_path(s, t, prev)))
if __name__ == "__main__":
graph = Graph(8)
graph.add_edge(0, 1)
graph.add_edge(0, 3)
graph.add_edge(1, 2)
graph.add_edge(1, 4)
graph.add_edge(2, 5)
graph.add_edge(3, 4)
graph.add_edge(4, 5)
graph.add_edge(4, 6)
graph.add_edge(5, 7)
graph.add_edge(6, 7)
graph.bfs(0, 7)
graph.dfs(0, 7)
|
#!/usr/bin/python
import pytest
from symmRestApi import Restful
######################################
## ADMINISTRATION Resource group
######################################
def test_administration(variables):
# in the following, 'resource_id' or similar string is a valid type string but
# will never be successfully found by the api
api = Restful(variables['URL'], variables['user'], variables['pass'])
assert isinstance(api.get_app_list(variables['URL']), list)
assert isinstance(api.start_system_backup(variables['URL']), dict)
assert isinstance(api.auth_cirrus_user(variables['URL']), dict)
assert isinstance(api.enroll_cirrus_user(variables['URL'], 'cirrus_id', TRUE), dict)
assert isinstance(api.get_sharding_info(variables['URL']), dict)
assert isinstance(api.unenroll_cirrus_user(variables['URL'], 'token'), dict)
######################################
## COMMON Resource group
######################################
def test_common(variables):
# in the following, 'resource_id' or similar string is a valid type string but
# will never be successfully found by the api
api = Restful(variables['URL'], variables['user'], variables['pass'])
assert isinstance(api.get_iterator(variables['URL'], 'iterator_id'), dict)
assert isinstance(api.delete_iterator(variables['URL'], 'iterator_id'), dict)
assert isinstance(api.get_iterator_page(variables['URL'], 'iterator_id'), dict)
######################################
## MANAGEMENT Resource group
######################################
def test_management(variables):
# in the following, 'resource_id' or similar string is a valid type string but
# will never be successfully found by the api
api = Restful(variables['URL'], variables['user'], variables['pass'])
assert isinstance(api.get_usage_stats(variables['URL']), dict)
######################################
## PERFORMANCE Resource group
######################################
######################################
## SLOPROVISIONING and PROVISIONING Resource groups
######################################
def test_provisioning(variables):
# in the following, 'resource_id' or similar string is a valid type string but
# will never be successfully found by the api
api = Restful(variables['URL'], variables['user'], variables['pass'])
assert isinstance(api.get_arrays(variables['URL']), 'SLO', list)
assert isinstance(api.get_array_directors(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_director(variables['URL'], 'SLO', 'array_id', 'director_id'), dict)
assert isinstance(api.get_array_director_ports(variables['URL'], 'SLO', 'array_id', 'director_id'), list)
assert isinstance(api.get_array_director_port(variables['URL'], 'SLO', 'array_id', 'director_id', 'port_id'), dict)
assert isinstance(api.get_array_fastpolicies(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_fastpolicy(variables['URL'], 'SLO', 'array_id', 'policy_id'), dict)
assert isinstance(api.get_array_hosts(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_host(variables['URL'], 'SLO', 'array_id', 'host_id'), dict)
assert isinstance(api.get_array_hostgroups(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_hostgroup(variables['URL'], 'SLO', 'array_id', 'hostgroup_id'), dict)
assert isinstance(api.get_array_initiators(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_initiator(variables['URL'], 'SLO', 'array_id', 'initiator_id'), dict)
assert isinstance(api.get_array_maskingviews(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_maskingview(variables['URL'], 'SLO', 'array_id', 'maskingview_id'), dict)
assert isinstance(api.get_array_maskingview_connections(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_ports(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_portgoups(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_portgroup(variables['URL'], 'SLO', 'array_id', 'portgroup_id'), dict)
assert isinstance(api.get_array_slos(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_slo(variables['URL'], 'SLO', 'array_id', 'slo_id'), dict)
assert isinstance(api.get_array_srps(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_srp(variables['URL'], 'SLO', 'array_id', 'srp_id'), dict)
assert isinstance(api.get_array_storagegroups(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_storagegroup(variables['URL'], 'SLO', 'array_id', 'storagegroup_id'), dict)
assert isinstance(api.get_array_volumes(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_arrays(variables['URL']), 'NOTSLO', list)
assert isinstance(api.get_array_directors(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_director(variables['URL'], 'NOTSLO', 'array_id', 'director_id'), dict)
assert isinstance(api.get_array_director_ports(variables['URL'], 'NOTSLO', 'array_id', 'director_id'), list)
assert isinstance(api.get_array_director_port(variables['URL'], 'NOTSLO', 'array_id', 'director_id', 'port_id'), dict)
assert isinstance(api.get_array_fastpolicies(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_fastpolicy(variables['URL'], 'NOTSLO', 'array_id', 'policy_id'), dict)
assert isinstance(api.get_array_hosts(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_host(variables['URL'], 'NOTSLO', 'array_id', 'host_id'), dict)
assert isinstance(api.get_array_hostgroups(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_hostgroup(variables['URL'], 'NOTSLO', 'array_id', 'hostgroup_id'), dict)
assert isinstance(api.get_array_initiators(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_initiator(variables['URL'], 'NOTSLO', 'array_id', 'initiator_id'), dict)
assert isinstance(api.get_array_maskingviews(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_maskingview(variables['URL'], 'NOTSLO', 'array_id', 'maskingview_id'), dict)
assert isinstance(api.get_array_maskingview_connections(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_ports(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_portgoups(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_portgroup(variables['URL'], 'NOTSLO', 'array_id', 'portgroup_id'), dict)
assert isinstance(api.get_array_storagegroups(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_storagegroup(variables['URL'], 'NOTSLO', 'array_id', 'storagegroup_id'), dict)
assert isinstance(api.get_array_thinpools(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_thinpool(variables['URL'], 'NOTSLO', 'array_id', 'thinpool_id'), dict)
assert isinstance(api.get_array_tiers(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_tier(variables['URL'], 'NOTSLO', 'array_id', 'tier_id'), dict)
assert isinstance(api.get_array_volumes(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_volume(variables['URL'], 'NOTSLO', 'array_id', 'volume_id'), dict)
######################################
## REPLICATION Resource group
######################################
def test_replication(variables):
# in the following, 'resource_id' or similar string is a valid type string but
# will never be successfully found by the api
api = Restful(variables['URL'], variables['user'], variables['pass'])
assert isinstance(api.get_replica_abilities(variables['URL']), list)
assert isinstance(api.get_replica_devicegroups(variables['URL']), list)
assert isinstance(api.get_replica_devicegroup(variables['URL'], 'array_id', 'devicegroup_id'), dict)
assert isinstance(api.get_replica_devicegroup_number(variables['URL'], 'array_id', 'devicegroup_id'), dict)
assert isinstance(api.get_replica_arrays(variables['URL']), list)
assert isinstance(api.get_replica_array(variables['URL'], 'array_id'), dict)
assert isinstance(api.get_replica_rdfgroups(variables['URL'], 'array_id'), list)
assert isinstance(api.get_replica_rdfgroup(variables['URL'], 'array_id', 'rdfg_num'), dict)
assert isinstance(api.get_replica_storagegroups(variables['URL'], 'array_id'), list)
assert isinstance(api.get_replica_storagegroup(variables['URL'], 'array_id', 'storagegroup_id'), dict)
assert isinstance(api.get_replica_storagegroup_snaps(variables['URL'], 'array_id', 'storagegroup_id'), list)
assert isinstance(api.get_replica_storagegroup_snap(variables['URL'], 'array_id', 'storagegroup_id', 'snap_id'), dict)
assert isinstance(api.get_replica_storagegroup_snap_generations(variables['URL'], 'array_id', 'storagegroup_id', 'snap_id'), list)
assert isinstance(api.get_replica_storagegroup_snap_generation(variables['URL'], 'array_id', 'storagegroup_id', 'snap_id', 'generation_num'), dict)
'''
######################################
## SYSTEM Resource group
######################################
def getAlerts(self, URL):
def getAlert(self, URL, resourceId):
def getJobs(self, URL):
def getJob(self, URL, resourceId):
def getSymms(self, URL):
def getSymm(self, URL, resourceId):
def getSymmAlerts(self, URL, resourceId):
def getSymmAlert(self, URL, symId, alertId):
def getSymmJobs(self, URL, resourceId):
def getSymmJob(self, URL, symId, jobId):
def getVersion(self, URL):
######################################
## WORKLOAD Resource group
######################################
'''
|
#####################################
# With some tiny modification, this code is the one used by Tensorflow slim at:
# https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim
# Please refer to the link for further explanations.
### The difference is this architecture is written in fully-convolutional fashion.
### The advantage is that, this model can be used for larger image sizes with some average pooling in the last layer.
import tensorflow as tf
slim = tf.contrib.slim
def net_architecture(images, num_classes=10, is_training=False,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='Net'):
"""Creates a variant of the Net model.
Args:
images: The batch of `Tensors`: size [batch_size, height, width, channels].
num_classes: Total number of classes.
is_training: Training/Validation.
dropout_keep_prob: The percentage of activation values: Only active in training mode!
scope: Variable_scope.
Returns:
logits: the pre-softmax activations of size [batch_size, `num_classes`]
end_points: The dictionary for the layers outputs.
"""
# Create empty dictionary
end_points = {}
with tf.variable_scope(scope, 'Net', [images, num_classes]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d and max_pool2d.
with tf.contrib.framework.arg_scope([tf.contrib.layers.conv2d, tf.contrib.layers.max_pool2d],
outputs_collections=end_points_collection):
# Layer-1
net = tf.contrib.layers.conv2d(images, 32, [5, 5], scope='conv1')
net = tf.contrib.layers.max_pool2d(net, [2, 2], 2, scope='pool1')
# Layer-2
net = tf.contrib.layers.conv2d(net, 64, [5, 5], scope='conv2')
net = tf.contrib.layers.max_pool2d(net, [2, 2], 2, scope='pool2')
# Layer-3
net = tf.contrib.layers.conv2d(net, 1024, [7, 7], padding='VALID', scope='fc3')
net = tf.contrib.layers.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout3')
# Last layer which is the logits for classes
logits = tf.contrib.layers.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='fc4')
# Return the collections as a dictionary
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
# Squeeze spatially to eliminate extra dimensions.(embedding layer)
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='fc4/squeezed')
end_points[sc.name + '/fc4'] = logits
return logits, end_points
def net_arg_scope(weight_decay=0.0005, is_training=False):
"""Defines the default network argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the model.
"""
if is_training:
with tf.contrib.framework.arg_scope(
[tf.contrib.layers.conv2d],
padding='SAME',
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG',
uniform=False, seed=None,
dtype=tf.float32),
activation_fn=tf.nn.relu) as sc:
return sc
else:
with tf.contrib.framework.arg_scope(
[tf.contrib.layers.conv2d],
padding='SAME',
activation_fn=tf.nn.relu) as sc:
return sc
|
import unittest
import numpy as np
import bayesnet as bn
class TestProduct(unittest.TestCase):
def test_product(self):
arrays = [
1,
np.arange(1, 5),
np.arange(1, 7).reshape(2, 3),
np.arange(1, 7).reshape(2, 3, 1)
]
axes = [
None,
None,
1,
(0, 2)
]
keepdims = [
False,
False,
True,
False
]
grads = [
1,
np.array([24., 12., 8., 6.]),
np.array([
[6., 3., 2.],
[30., 24., 20.]
]),
np.array([4., 5., 6., 1., 2., 3.]).reshape(2, 3, 1)
]
for arr, ax, keep, g in zip(arrays, axes, keepdims, grads):
a = bn.Parameter(arr)
b = a.prod(ax, keep)
b.backward(np.ones(b.shape))
if isinstance(g, int):
self.assertEqual(g, a.grad)
else:
self.assertTrue((g == a.grad).all())
if __name__ == '__main__':
unittest.main()
|
"""
WSGI config for techbikers project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "techbikers.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply Sentry middleware.
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
application = Sentry(application)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------
# Filename: data_handler.py
# Purpose: handling data (waveform/response) in obspyDMT
# Author: Kasra Hosseini
# Email: [email protected]
# License: GNU Lesser General Public License, Version 3
# -------------------------------------------------------------------
# -----------------------------------------------------------------------
# ----------------Import required Modules (Python and Obspy)-------------
# -----------------------------------------------------------------------
from __future__ import print_function
from datetime import datetime
import fileinput
import glob
import multiprocessing
import numpy as np
try:
from obspy.clients.fdsn import Client as Client_fdsn
except:
from obspy.fdsn import Client as Client_fdsn
try:
from obspy.clients.arclink import Client as Client_arclink
except:
from obspy.arclink import Client as Client_arclink
try:
from obspy.clients.fdsn import RoutingClient
except:
print("[WARNING] RoutingClient could not be imported.")
from obspy.clients.syngine import Client as Client_syngine
try:
from obspy.geodetics.base import gps2dist_azimuth as gps2DistAzimuth
except:
try:
from obspy.geodetics import gps2DistAzimuth
except:
from obspy.core.util import gps2DistAzimuth
from obspy import read_inventory
import os
import pickle
from .utility_codes import calculate_time_phase, getFolderSize
from .utility_codes import geocen_calc
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# ##################### get_data ###############################
def get_data(stas_avail, event, input_dics, info_event):
"""
get the waveform/response from FDSN and ArcLink
:param stas_avail:
:param event:
:param input_dics:
:param info_event:
:return:
"""
t_wave_1 = datetime.now()
req_clients = np.unique(stas_avail[:, 8])
print("[INFO] requested data sources:")
for req_c in req_clients:
print(req_c, end=' ')
print('\n')
if input_dics['test']:
stas_avail = stas_avail[0:input_dics['test_num']]
if input_dics['req_parallel']:
par_jobs = []
for req_cli in req_clients:
st_avail = stas_avail[stas_avail[:, 8] == req_cli]
if not req_cli.lower() in ['arclink']:
p = multiprocessing.Process(target=fdsn_waveform,
args=(st_avail, event,
input_dics, req_cli,
info_event))
par_jobs.append(p)
elif req_cli.lower() == 'arclink':
p = multiprocessing.Process(target=arc_waveform,
args=(st_avail, event,
input_dics, req_cli,
info_event))
par_jobs.append(p)
sub_par_jobs = []
for l in range(len(par_jobs)):
counter = len(req_clients)
while counter >= len(req_clients):
counter = 0
for ll in range(len(sub_par_jobs)):
if par_jobs[sub_par_jobs[ll]].is_alive():
counter += 1
par_jobs[l].start()
sub_par_jobs.append(l)
counter = len(req_clients)
while counter > 0:
counter = 0
for ll in range(len(par_jobs)):
if par_jobs[ll].is_alive():
counter += 1
else:
for req_cli in req_clients:
st_avail = stas_avail[stas_avail[:, 8] == req_cli]
if not req_cli.lower() in ['arclink']:
fdsn_waveform(st_avail, event, input_dics, req_cli, info_event)
elif req_cli.lower() == 'arclink':
arc_waveform(st_avail, event, input_dics, req_cli, info_event)
print("\n========================")
print("DONE with Event: %s" % event['event_id'])
print("Time: %s" % (datetime.now() - t_wave_1))
print("========================")
# ##################### fdsn_waveform ###############################
def fdsn_waveform(stas_avail, event, input_dics, req_cli, info_event):
"""
get Waveforms, StationXML files and meta-data from FDSN
:param stas_avail:
:param event:
:param input_dics:
:param req_cli:
:param info_event:
:return:
"""
eventpath = os.path.join(input_dics['datapath'])
target_path = os.path.join(eventpath, event['event_id'])
if input_dics['bulk']:
try:
fdsn_bulk_request(target_path, req_cli, input_dics)
except Exception as error:
print('[WARNING] %s' % error)
print('DONE')
# Following parameter is set to 'N' to avoid
# retrieving the waveforms twice
# When using bulk requests, waveforms are retreived in bulk
# but not response/StationXML files and not metadata
input_dics['waveform'] = False
print('%s bulkdataselect request is done for event: %s' \
% (req_cli, target_path))
fdsn_serial_parallel(stas_avail, event, input_dics, target_path,
req_cli, info_event)
# ##################### fdsn_serial_parallel ##################################
def fdsn_serial_parallel(stas_avail, event, input_dics, target_path,
req_cli, info_event):
"""
retrieving data from FDSN
:param stas_avail:
:param event:
:param input_dics:
:param target_path:
:param req_cli:
:param info_event:
:return:
"""
print('%s -- event: %s' % (req_cli, target_path))
if req_cli.lower() in ["iris-federator", "eida-routing"]:
client_fdsn = RoutingClient(req_cli)
else:
client_fdsn = Client_fdsn(base_url=req_cli,
user=input_dics['username_fdsn'],
password=input_dics['password_fdsn'])
#debug=True)
client_syngine = Client_syngine()
if input_dics['req_parallel']:
if input_dics['password_fdsn']:
print("[INFO] Restricted data from %s" % req_cli)
print("[WARNING] parallel retrieving is now possible!")
print("[WARNING] serial retrieving is activated!")
# num_req_np = 1
num_req_np = input_dics['req_np']
else:
num_req_np = input_dics['req_np']
par_jobs = []
st_counter = 0
for st_avail in stas_avail:
st_counter += 1
info_station = '[%s-%s/%s]' % (info_event, st_counter,
len(stas_avail))
p = multiprocessing.Process(target=fdsn_download_core,
args=(st_avail, event,
input_dics, target_path,
client_fdsn, client_syngine,
req_cli,
info_station))
par_jobs.append(p)
sub_par_jobs = []
for l in range(len(par_jobs)):
counter = num_req_np
while counter >= num_req_np:
counter = 0
for ll in range(len(sub_par_jobs)):
if par_jobs[sub_par_jobs[ll]].is_alive():
counter += 1
par_jobs[l].start()
sub_par_jobs.append(l)
counter = num_req_np
while counter > 0:
counter = 0
for ll in range(len(par_jobs)):
if par_jobs[ll].is_alive():
counter += 1
else:
st_counter = 0
for st_avail in stas_avail:
st_counter += 1
info_station = '[%s-%s/%s]' % (info_event, st_counter,
len(stas_avail))
fdsn_download_core(st_avail, event, input_dics, target_path,
client_fdsn, client_syngine,
req_cli, info_station)
update_sta_ev_file(target_path, event)
if input_dics['bulk']:
input_dics['waveform'] = True
sta_saved_path = glob.glob(os.path.join(target_path, 'raw', '*.*.*.*'))
print('\n[INFO] adjusting the station_event file for bulk request...', end='')
sta_saved_list = []
for sta_num in range(len(sta_saved_path)):
sta_saved_list.append(os.path.basename(sta_saved_path[sta_num]))
sta_ev_new = []
for line in fileinput.FileInput(
os.path.join(target_path, 'info', 'station_event')):
line_split = line.split(',')
if not '%s.%s.%s.%s' \
% (line_split[0], line_split[1], line_split[2],
line_split[3]) in sta_saved_list:
pass
else:
sta_ev_new.append(line)
file_staev_open = open(os.path.join(target_path, 'info',
'station_event'), 'wt')
file_staev_open.writelines(sta_ev_new)
file_staev_open.close()
print('DONE')
# ##################### fdsn_download_core ##################################
def fdsn_download_core(st_avail, event, input_dics, target_path,
client_fdsn, client_syngine, req_cli, info_station):
"""
downloading the waveforms, reponse files (StationXML) and metadata
this function should be normally called by some higher-level functions
:param st_avail:
:param event:
:param input_dics:
:param target_path:
:param client_fdsn:
:param client_syngine:
:param req_cli:
:param info_station:
:return:
"""
dummy = 'initializing'
t11 = datetime.now()
identifier = 0
st_id = '%s.%s.%s.%s' % (st_avail[0], st_avail[1],
st_avail[2], st_avail[3])
try:
if st_avail[2] == '--' or st_avail[2] == ' ':
st_avail[2] = ''
if input_dics['cut_time_phase']:
t_start, t_end = calculate_time_phase(event, st_avail)
else:
t_start = event['t1']
t_end = event['t2']
if input_dics['min_azi'] or input_dics['max_azi'] or \
input_dics['min_epi'] or input_dics['max_epi']:
dist, azi, bazi = gps2DistAzimuth(event['latitude'],
event['longitude'],
float(st_avail[4]),
float(st_avail[5]))
epi_dist = dist/111.194/1000.
if input_dics['min_epi']:
if epi_dist < input_dics['min_epi']:
raise Exception('%s out of epi range: %s'
% (st_id, epi_dist))
if input_dics['max_epi']:
if epi_dist > input_dics['max_epi']:
raise Exception('%s out of epi range: %s'
% (st_id, epi_dist))
if input_dics['min_azi']:
if azi < input_dics['min_azi']:
raise Exception('%s outo f Azimuth range: %s'
% (st_id, azi))
if input_dics['max_azi']:
if azi > input_dics['max_azi']:
raise Exception('%s out of Azimuth range: %s'
% (st_id, azi))
if input_dics['waveform']:
dummy = 'waveform'
if (not os.path.isfile(os.path.join(target_path, 'raw', st_id)))\
or input_dics['force_waveform']:
waveforms_path2write = os.path.join(target_path, 'raw', st_id)
if req_cli.lower() in ["iris-federator", "eida-routing"]:
dl_waveform = \
client_fdsn.get_waveforms(
network=st_avail[0],
station=st_avail[1],
location=st_avail[2],
channel=st_avail[3],
starttime=t_start,
endtime=t_end)
if len(dl_waveform) > 0:
dl_waveform.write(waveforms_path2write, format='mseed')
else:
raise Exception
else:
client_fdsn.get_waveforms(network=st_avail[0],
station=st_avail[1],
location=st_avail[2],
channel=st_avail[3],
starttime=t_start,
endtime=t_end,
filename=waveforms_path2write)
identifier += 10
print('%s -- %s -- saving waveform for: %s ---> DONE' \
% (info_station, req_cli, st_id))
else:
identifier += 1
if input_dics['response']:
dummy = 'response'
if (not os.path.isfile(os.path.join(target_path, 'resp',
'STXML.' + st_id))) \
or input_dics['force_response']:
resp_path2write = os.path.join(target_path, 'resp', 'STXML.%s' % st_id)
if req_cli.lower() in ["iris-federator", "eida-routing"]:
dl_response = \
client_fdsn.get_stations(
network=st_avail[0],
station=st_avail[1],
location=st_avail[2],
channel=st_avail[3],
starttime=t_start,
endtime=t_end,
level='response')
if len(dl_response) > 0:
dl_response.write(resp_path2write, format="stationxml")
else:
raise Exception
else:
client_fdsn.get_stations(network=st_avail[0],
station=st_avail[1],
location=st_avail[2],
channel=st_avail[3],
starttime=t_start,
endtime=t_end,
filename=resp_path2write,
level='response')
identifier += 100
print("%s -- %s -- saving response for: %s ---> DONE" \
% (info_station, req_cli, st_id))
else:
identifier += 1
if input_dics['syngine']:
dummy = 'syngine_waveform'
syn_dirpath = os.path.join(
target_path, 'syngine_%s' % input_dics['syngine_bg_model'])
if not os.path.isdir(syn_dirpath):
os.makedirs(syn_dirpath)
if (not os.path.isfile(os.path.join(syn_dirpath, st_id)))\
or input_dics['force_waveform']:
if input_dics['syngine_geocentric_lat']:
rcvlatitude = geocen_calc(float(st_avail[4]))
evlatitude = geocen_calc(event['latitude'])
else:
rcvlatitude = float(st_avail[4])
evlatitude = event['latitude']
if not event['focal_mechanism']:
syngine_momenttensor = None
else:
syngine_momenttensor = event['focal_mechanism']
# XXX some other arguments
# sourcedoublecouple=None,
# dt=None
# kernelwidth=None
# sourceforce=None
# label=None
req_syngine_component = st_avail[3][-1]
if req_syngine_component == '1':
req_syngine_component = 'E'
elif req_syngine_component == '2':
req_syngine_component = 'N'
st_id = '%s.%s.%s.%s' % (st_avail[0],
st_avail[1],
st_avail[2],
st_avail[3][:-1] + req_syngine_component)
syn_st = client_syngine.get_waveforms(
model=input_dics['syngine_bg_model'],
receiverlatitude=rcvlatitude,
receiverlongitude=float(st_avail[5]),
networkcode=st_avail[0],
stationcode=st_avail[1],
sourcelatitude=evlatitude,
sourcelongitude=event['longitude'],
sourcedepthinmeters=float(event['depth'])*1000.,
origintime=event['datetime'],
components=req_syngine_component,
units=input_dics['syngine_units'],
sourcemomenttensor=syngine_momenttensor,
starttime=t_start,
endtime=t_end)[0]
syn_st.stats.location = st_avail[2]
syn_st.stats.channel = st_avail[3][:-1] + req_syngine_component
syn_st.write(os.path.join(syn_dirpath, st_id),
format='mseed')
identifier += 1000
print('%s -- %s -- saving syngine for: %s ---> DONE' \
% (info_station, req_cli, st_id))
else:
identifier += 1
# if identifier in [0, 2, 3, 10, 11, 100]:
# raise Exception("CODE: %s will not be registered! (666)"
# % identifier)
t22 = datetime.now()
except Exception as error:
t22 = datetime.now()
if len(st_avail) > 0:
ee = '%s -- %s -- %s -- %s\n' % (req_cli, dummy, st_id, error)
else:
ee = '%s: There is no available station for this event.' % req_cli
Exception_file = open(os.path.join(target_path,
'info', 'exception'), 'at+')
Exception_file.writelines(ee)
Exception_file.close()
# ##################### fdsn_bulk_request ##################################
def fdsn_bulk_request(target_path, req_cli, input_dics):
"""
send bulk request to FDSN
:param target_path:
:param req_cli:
:param input_dics:
:return:
"""
print('\n[INFO] sending bulk request to: %s' % req_cli)
client_fdsn = Client_fdsn(base_url=req_cli,
user=input_dics['username_fdsn'],
password=input_dics['password_fdsn'])
bulk_list_fio = open(os.path.join(target_path, 'info',
'bulkdata_list_%s' % req_cli), 'rb')
bulk_list = pickle.load(bulk_list_fio)
bulk_smgrs = client_fdsn.get_waveforms_bulk(bulk_list)
print('[INFO] saving the retrieved waveforms from %s...' % req_cli)
for bulk_st in bulk_smgrs:
bulk_st.write(os.path.join(target_path, 'raw', '%s.%s.%s.%s'
% (bulk_st.stats['network'],
bulk_st.stats['station'],
bulk_st.stats['location'],
bulk_st.stats['channel'])),
'MSEED')
# ##################### arc_waveform ###############################
def arc_waveform(stas_avail, event, input_dics, req_cli, info_event):
"""
get Waveforms, StationXML files and meta-data from ArcLink
:param stas_avail:
:param event:
:param input_dics:
:param req_cli:
:param info_event:
:return:
"""
eventpath = os.path.join(input_dics['datapath'])
target_path = os.path.join(eventpath, event['event_id'])
arc_serial_parallel(stas_avail, event, input_dics, target_path,
req_cli, info_event)
# ##################### arc_serial_parallel ##################################
def arc_serial_parallel(stas_avail, event, input_dics, target_path,
req_cli, info_event):
"""
retrieving data from ArcLink
:param stas_avail:
:param event:
:param input_dics:
:param target_path:
:param req_cli:
:param info_event:
:return:
"""
print('%s -- event: %s' % (req_cli, target_path))
client_arclink = Client_arclink(user=input_dics['username_arclink'],
host=input_dics['host_arclink'],
port=input_dics['port_arclink'],
password=input_dics['password_arclink'],
timeout=input_dics['arc_wave_timeout'])
client_syngine = Client_syngine()
if input_dics['req_parallel']:
par_jobs = []
st_counter = 0
for st_avail in stas_avail:
st_counter += 1
info_station = '[%s-%s/%s]' % (info_event, st_counter,
len(stas_avail))
p = multiprocessing.Process(target=arc_download_core,
args=(st_avail, event,
input_dics, target_path,
client_arclink, client_syngine,
req_cli,
info_station))
par_jobs.append(p)
sub_par_jobs = []
for l in range(len(par_jobs)):
counter = input_dics['req_np']
while counter >= input_dics['req_np']:
counter = 0
for ll in range(len(sub_par_jobs)):
if par_jobs[sub_par_jobs[ll]].is_alive():
counter += 1
par_jobs[l].start()
sub_par_jobs.append(l)
counter = input_dics['req_np']
while counter > 0:
counter = 0
for ll in range(len(par_jobs)):
if par_jobs[ll].is_alive():
counter += 1
else:
st_counter = 0
for st_avail in stas_avail:
st_counter += 1
info_station = '[%s-%s/%s]' % (info_event, st_counter,
len(stas_avail))
arc_download_core(st_avail, event, input_dics, target_path,
client_arclink, client_syngine,
req_cli, info_station)
# ##################### arc_download_core ##################################
def arc_download_core(st_avail, event, input_dics, target_path,
client_arclink, client_syngine, req_cli, info_station):
"""
downloading the waveforms, reponse files (StationXML) and metadata
this function should be normally called by some higher-level functions
:param st_avail:
:param event:
:param input_dics:
:param target_path:
:param client_arclink:
:param client_syngine:
:param req_cli:
:param info_station:
:return:
"""
dummy = 'initializing'
t11 = datetime.now()
identifier = 0
st_id = '%s.%s.%s.%s' % (st_avail[0], st_avail[1],
st_avail[2], st_avail[3])
try:
if st_avail[2] == '--' or st_avail[2] == ' ':
st_avail[2] = ''
if input_dics['cut_time_phase']:
t_start, t_end = calculate_time_phase(event, st_avail)
else:
t_start = event['t1']
t_end = event['t2']
if input_dics['min_azi'] or input_dics['max_azi'] or \
input_dics['min_epi'] or input_dics['max_epi']:
dist, azi, bazi = gps2DistAzimuth(event['latitude'],
event['longitude'],
float(st_avail[4]),
float(st_avail[5]))
epi_dist = dist/111.194/1000.
if input_dics['min_epi']:
if epi_dist < input_dics['min_epi']:
raise Exception('%s out of epi range: %s'
% (st_id, epi_dist))
if input_dics['max_epi']:
if epi_dist > input_dics['max_epi']:
raise Exception('%s out of epi range: %s'
% (st_id, epi_dist))
if input_dics['min_azi']:
if azi < input_dics['min_azi']:
raise Exception('%s outo f Azimuth range: %s'
% (st_id, azi))
if input_dics['max_azi']:
if azi > input_dics['max_azi']:
raise Exception('%s out of Azimuth range: %s'
% (st_id, azi))
if input_dics['waveform']:
dummy = 'waveform'
if (not os.path.isfile(os.path.join(target_path, 'raw', st_id))) \
or input_dics['force_waveform']:
if hasattr(client_arclink, 'save_waveforms'):
client_arclink.save_waveforms(os.path.join(target_path,
'raw', st_id),
st_avail[0], st_avail[1],
st_avail[2], st_avail[3],
t_start, t_end)
elif hasattr(client_arclink, 'saveWaveform'):
client_arclink.saveWaveform(os.path.join(target_path,
'raw', st_id),
st_avail[0], st_avail[1],
st_avail[2], st_avail[3],
t_start, t_end)
identifier += 10
print('%s -- %s -- saving waveform for: %s ---> DONE' \
% (info_station, req_cli, st_id))
else:
identifier += 1
if input_dics['response']:
dummy = 'response'
if (not os.path.isfile(os.path.join(target_path, 'resp',
'STXML.' + st_id))) \
or input_dics['force_response']:
if (not os.path.isfile(os.path.join(target_path, 'resp',
'DATALESS.' + st_id))) \
or input_dics['force_response']:
if hasattr(client_arclink, 'save_response'):
client_arclink.save_response(
os.path.join(target_path, 'resp',
'DATALESS.%s' % st_id),
st_avail[0], st_avail[1], st_avail[2], st_avail[3],
t_start, t_end)
if hasattr(client_arclink, 'saveResponse'):
client_arclink.saveResponse(
os.path.join(target_path, 'resp',
'DATALESS.%s' % st_id),
st_avail[0], st_avail[1], st_avail[2], st_avail[3],
t_start, t_end)
if input_dics['dataless2xml']:
try:
datalessResp = read_inventory(
os.path.join(target_path, 'resp', 'DATALESS.%s' % st_id))
datalessResp.write(os.path.join(
target_path, 'resp', 'STXML.%s' % st_id), format='STATIONXML')
os.remove(os.path.join(target_path, 'resp', 'DATALESS.%s' % st_id))
except Exception as error:
pass
identifier += 100
print("%s -- %s -- saving response for: %s ---> DONE" \
% (info_station, req_cli, st_id))
else:
identifier += 1
else:
identifier += 1
if input_dics['syngine']:
dummy = 'syngine_waveform'
syn_dirpath = os.path.join(
target_path, 'syngine_%s' % input_dics['syngine_bg_model'])
if not os.path.isdir(syn_dirpath):
os.makedirs(syn_dirpath)
if (not os.path.isfile(os.path.join(syn_dirpath, st_id)))\
or input_dics['force_waveform']:
if input_dics['syngine_geocentric_lat']:
rcvlatitude = geocen_calc(float(st_avail[4]))
evlatitude = geocen_calc(event['latitude'])
else:
rcvlatitude = float(st_avail[4])
evlatitude = event['latitude']
if not event['focal_mechanism']:
syngine_momenttensor = None
else:
syngine_momenttensor = event['focal_mechanism']
# XXX some other arguments
# sourcedoublecouple=None,
# dt=None
# kernelwidth=None
# sourceforce=None
# label=None
syn_st = client_syngine.get_waveforms(
model=input_dics['syngine_bg_model'],
receiverlatitude=rcvlatitude,
receiverlongitude=float(st_avail[5]),
networkcode=st_avail[0],
stationcode=st_avail[1],
sourcelatitude=evlatitude,
sourcelongitude=event['longitude'],
sourcedepthinmeters=float(event['depth'])*1000.,
origintime=event['datetime'],
components=st_avail[3][-1],
units=input_dics['syngine_units'],
sourcemomenttensor=syngine_momenttensor,
starttime=t_start,
endtime=t_end)[0]
syn_st.stats.location = st_avail[2]
syn_st.stats.channel = st_avail[3]
syn_st.write(os.path.join(syn_dirpath, st_id),
format='mseed')
identifier += 1000
print('%s -- %s -- saving syngine for: %s ---> DONE' \
% (info_station, req_cli, st_id))
else:
identifier += 1
# if identifier in [0, 2, 10, 11, 100]:
# raise Exception("CODE: %s will not be registered! (666)"
# % identifier)
t22 = datetime.now()
except Exception as error:
t22 = datetime.now()
if len(st_avail) != 0:
ee = '%s -- %s -- %s -- %s\n' % (req_cli, dummy, st_id, error)
else:
ee = '%s: There is no available station for this event.' % req_cli
Exception_file = open(os.path.join(target_path,
'info', 'exception'), 'at+')
Exception_file.writelines(ee)
Exception_file.close()
# ##################### update_sta_ev_file ##################################
def update_sta_ev_file(target_path, event):
"""
update the station_event file based on already stored waveforms
:param target_path:
:param event:
:return:
"""
avail_arr = np.loadtxt(os.path.join(target_path, 'info',
'availability.txt'),
delimiter=',', dtype=bytes, ndmin=2).astype(np.str)
avail_arr = avail_arr.astype(np.object)
sta_ev_add = os.path.join(target_path, 'info', 'station_event')
sta_ev_fio = open(sta_ev_add, 'wt+')
if not np.shape(avail_arr)[0] < 1:
sta_ev_names = avail_arr[:, 0] + '.' + avail_arr[:, 1] + '.' + \
avail_arr[:, 2] + '.' + avail_arr[:, 3]
sta_saved_path = glob.glob(
os.path.join(target_path, 'raw', '*.*.*.*'))
sta_saved_path.sort()
sta_sorted = []
for sta_sav_abs in sta_saved_path:
try:
sta_sav = os.path.basename(sta_sav_abs)
sta_indx = np.where(sta_ev_names == sta_sav)[0][-1]
sta_sorted.append(avail_arr[sta_indx])
except:
continue
if len(np.shape(sta_sorted)) == 1:
sta_sorted = np.reshape(sta_sorted, [1, len(sta_sorted)])
if not np.shape(sta_sorted)[1] < 1:
for sts in sta_sorted:
sta_ev_line = '%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\n' \
% (sts[0], sts[1], sts[2], sts[3], sts[4],
sts[5], sts[6], sts[7], sts[8],
event['event_id'], event['latitude'],
event['longitude'], event['depth'],
event['magnitude'], sts[10], sts[11], '10')
sta_ev_fio.writelines(sta_ev_line)
sta_ev_fio.close()
# -------------------------------- TRASH
# to shuffle the stations, we do not need it anymore as we
# parallelize it over the data-sources as well
# all_sta = []
# for req_cli in req_clients:
# all_sta.append(stas_avail[stas_avail[:, -1] == req_cli])
# all_sta = np.array(list(roundrobin(all_sta)))
# from itertools import cycle, islice
# def roundrobin(iterables):
# # Recipe credited to George Sakkis
# pending = len(iterables)
# nexts = cycle(iter(it).next for it in iterables)
# while pending:
# try:
# for next in nexts:
# yield next()
# except StopIteration:
# pending -= 1
# nexts = cycle(islice(nexts, pending))
# parallel_len_req_fdsn = range(0, len_req_fdsn)
# start = 0
# end = len_req_fdsn
# step = (end - start) / input_dics['req_np'] + 1
# jobs = []
# for index in xrange(input_dics['req_np']):
# starti = start+index*step
# endi = min(start+(index+1)*step, end)
# p = multiprocessing.Process(target=FDSN_download_iter,
# args=(i, starti, endi, dic, req_type,
# len(events), events, add_event,
# Sta_req, input_dics, client_fdsn))
# jobs.append(p)
# for index in range(len(jobs)):
# jobs[index].start()
# pp_flag = True
# while pp_flag:
# for proc in jobs:
# if proc.is_alive():
# time.sleep(1)
# pp_flag = True
# break
# else:
# pp_flag = False
# if not pp_flag:
# print '\nAll the processes are finished...'
# len_par_grp = [parallel_len_req_fdsn[n:n+input_dics['req_np']] for n in
# range(0, len(parallel_len_req_fdsn), input_dics['req_np'])]
# # ##################### FDSN_download_iter ##################################
#
#
# def FDSN_download_iter(i, starti, endi, dic, type, len_events, events,
# add_event, Sta_req, input, client_fdsn):
# """
# This function only iterates over FDSN_download_core,
# this should be called by another program.
# """
# for j in range(starti, endi):
# FDSN_download_core(i=i, j=j, dic=dic, type=type,
# len_events=len_events, events=events,
# add_event=add_event, Sta_req=Sta_req,
# input=input, client_fdsn=client_fdsn)
|
#!/usr/bin/python
#
# Copyright (C) 2009, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for unittesting the mcpu module"""
import unittest
from ganeti import mcpu
from ganeti import opcodes
from ganeti.constants import \
LOCK_ATTEMPTS_TIMEOUT, \
LOCK_ATTEMPTS_MAXWAIT, \
LOCK_ATTEMPTS_MINWAIT
import testutils
class TestLockAttemptTimeoutStrategy(unittest.TestCase):
def testConstants(self):
tpa = mcpu.LockAttemptTimeoutStrategy._TIMEOUT_PER_ATTEMPT
self.assert_(len(tpa) > LOCK_ATTEMPTS_TIMEOUT / LOCK_ATTEMPTS_MAXWAIT)
self.assert_(sum(tpa) >= LOCK_ATTEMPTS_TIMEOUT)
def testSimple(self):
strat = mcpu.LockAttemptTimeoutStrategy(_random_fn=lambda: 0.5,
_time_fn=lambda: 0.0)
prev = None
for i in range(len(strat._TIMEOUT_PER_ATTEMPT)):
timeout = strat.NextAttempt()
self.assert_(timeout is not None)
self.assert_(timeout <= LOCK_ATTEMPTS_MAXWAIT)
self.assert_(timeout >= LOCK_ATTEMPTS_MINWAIT)
self.assert_(prev is None or timeout >= prev)
prev = timeout
for _ in range(10):
self.assert_(strat.NextAttempt() is None)
class TestDispatchTable(unittest.TestCase):
def test(self):
for opcls in opcodes.OP_MAPPING.values():
if not opcls.WITH_LU:
continue
self.assertTrue(opcls in mcpu.Processor.DISPATCH_TABLE,
msg="%s missing handler class" % opcls)
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
# This file is a part of the "SuMPF" package
# Copyright (C) 2018-2021 Jonas Schulte-Coerne
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tests the interpolation functions"""
import hypothesis.extra.numpy
import numpy
import pytest
import sumpf._internal as sumpf_internal
def xs_ys(data, interpolation):
"""A helper function, that creates arrays of x and y values from the data pairs,
that have been created by hypothesis.
"""
if data:
xs, ys = map(numpy.array, zip(*sorted(data)))
else:
xs = numpy.empty(0)
ys = numpy.empty(0)
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_X,
sumpf_internal.Interpolations.STAIRS_LOG):
if (xs <= 0).any():
xs -= xs.min()
xs += 1e-15
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_Y):
ys = numpy.abs(ys) + 1e-15
return xs, ys
@hypothesis.given(interpolation=hypothesis.strategies.sampled_from(sumpf_internal.Interpolations),
data=hypothesis.strategies.lists(elements=hypothesis.strategies.tuples(hypothesis.strategies.floats(min_value=-1e15, max_value=1e15), # pylint: disable=line-too-long
hypothesis.strategies.complex_numbers(min_magnitude=0.0, max_magnitude=1e15)), # pylint: disable=line-too-long
min_size=0, max_size=2 ** 12,
unique_by=lambda t: t[0]))
def test_supporting_points(interpolation, data):
"""Tests if the interpolation at a supporting point is exactly the given y value"""
func = sumpf_internal.interpolation.get(interpolation)
xs, ys = xs_ys(data, interpolation)
assert (func(xs, xs, ys) == ys).all()
@hypothesis.given(interpolation=hypothesis.strategies.sampled_from(sumpf_internal.Interpolations),
data=hypothesis.strategies.lists(elements=hypothesis.strategies.tuples(hypothesis.strategies.floats(min_value=-1e15, max_value=1e15), # pylint: disable=line-too-long
hypothesis.strategies.complex_numbers(min_magnitude=0.0, max_magnitude=1e15)), # pylint: disable=line-too-long
min_size=1, max_size=2 ** 12,
unique_by=lambda t: t[0]),
x=hypothesis.strategies.lists(elements=hypothesis.strategies.floats(min_value=-1e15, max_value=1e15), min_size=0, max_size=2 ** 12)) # pylint: disable=line-too-long
def test_x_as_scalar_and_vector(interpolation, data, x):
"""Tests if running a vectorized interpolation returns the same result as the scalar version."""
func = sumpf_internal.interpolation.get(interpolation)
xs, ys = xs_ys(data, interpolation)
x = numpy.array(x)
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_X,
sumpf_internal.Interpolations.STAIRS_LOG):
if (x <= 0).any():
x -= x.min()
x += 1e-15
scalar = [func(s, xs, ys) for s in x]
vector = list(func(x, xs, ys))
assert scalar == pytest.approx(vector, nan_ok=True)
@pytest.mark.filterwarnings("ignore:divide by zero")
@hypothesis.given(interpolation=hypothesis.strategies.sampled_from(sumpf_internal.Interpolations),
xs=hypothesis.extra.numpy.arrays(dtype=numpy.float64, shape=2, elements=hypothesis.strategies.floats(min_value=-1e15, max_value=1e15), unique=True), # pylint: disable=line-too-long
ys=hypothesis.extra.numpy.arrays(dtype=numpy.complex128, shape=2, elements=hypothesis.strategies.complex_numbers(min_magnitude=0.0, max_magnitude=1e15)), # pylint: disable=line-too-long
k=hypothesis.strategies.floats(min_value=1e-15, max_value=1.0 - 1e-15))
def test_interpolation(interpolation, xs, ys, k): # noqa: C901; the function is not complex, it's just a long switch case
# pylint: disable=too-many-branches
"""Tests the computation of an interpolated value."""
func = sumpf_internal.interpolation.get(interpolation)
xs = numpy.array(sorted(xs))
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_X,
sumpf_internal.Interpolations.STAIRS_LOG) and \
min(xs) < 0.0:
xs -= min(xs)
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC, sumpf_internal.Interpolations.LOG_Y):
ys = numpy.abs(ys)
x = xs[0] + k * (xs[1] - xs[0])
hypothesis.assume(x not in xs) # due to the limited precision of floating point numbers, this can still happen
if interpolation is sumpf_internal.Interpolations.ZERO:
assert func(x, xs, ys) == 0.0
elif interpolation is sumpf_internal.Interpolations.ONE:
assert func(x, xs, ys) == 1.0
elif interpolation is sumpf_internal.Interpolations.LINEAR:
assert func(x, xs, ys) == pytest.approx(numpy.interp(x, xs, ys))
elif interpolation is sumpf_internal.Interpolations.LOGARITHMIC:
log_xs = numpy.log2(xs)
log_ys = numpy.log(numpy.abs(ys))
assert func(x, xs, ys) == pytest.approx(numpy.exp(numpy.interp(numpy.log2(x), log_xs, log_ys)), nan_ok=True)
elif interpolation is sumpf_internal.Interpolations.LOG_X:
log_xs = numpy.log2(xs)
assert func(x, xs, ys) == pytest.approx(numpy.interp(numpy.log2(x), log_xs, ys))
elif interpolation is sumpf_internal.Interpolations.LOG_Y:
log_ys = numpy.log(numpy.abs(ys))
assert func(x, xs, ys) == pytest.approx(numpy.exp(numpy.interp(x, xs, log_ys)), nan_ok=True)
elif interpolation is sumpf_internal.Interpolations.STAIRS_LIN:
if k < 0.5:
assert func(x, xs, ys) == ys[0]
else:
assert func(x, xs, ys) == ys[1]
elif interpolation is sumpf_internal.Interpolations.STAIRS_LOG:
if numpy.log(x) - numpy.log(xs[0]) < numpy.log(xs[1]) - numpy.log(x):
assert func(x, xs, ys) == ys[0]
else:
assert func(x, xs, ys) == ys[1]
else:
raise ValueError(f"Unknown interpolation: {interpolation}.")
@pytest.mark.filterwarnings("ignore:divide by zero encountered in log", "ignore:invalid value encountered", "ignore:overflow encountered in exp") # pylint: disable=line-too-long
@hypothesis.given(xs=hypothesis.extra.numpy.arrays(dtype=numpy.float64, shape=2, elements=hypothesis.strategies.floats(min_value=0.0, max_value=1e12), unique=True), # pylint: disable=line-too-long
ys=hypothesis.extra.numpy.arrays(dtype=numpy.complex128, shape=2, elements=hypothesis.strategies.complex_numbers(min_magnitude=0.0, max_magnitude=1e15)), # pylint: disable=line-too-long
interpolation=hypothesis.strategies.sampled_from(sumpf_internal.Interpolations),
delta_x=hypothesis.strategies.floats(min_value=1e-15, max_value=1e15))
def test_extrapolation(xs, ys, interpolation, delta_x): # noqa: C901; the function is not complex, it's just a long switch case
# pylint: disable=too-many-branches,too-many-statements
"""Tests the computation of an extrapolated value."""
func = sumpf_internal.interpolation.get(interpolation)
xs = numpy.array(sorted(xs))
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_X,
sumpf_internal.Interpolations.STAIRS_LOG) and \
min(xs) < 0.0:
xs -= min(xs)
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC, sumpf_internal.Interpolations.LOG_Y):
ys = numpy.abs(ys)
x0 = xs[0] * (1.0 - delta_x) - delta_x
x1 = xs[1] * (1.0 + delta_x) + delta_x
if interpolation is sumpf_internal.Interpolations.ZERO:
assert func(x0, xs, ys) == 0.0
assert func(x1, xs, ys) == 0.0
elif interpolation is sumpf_internal.Interpolations.ONE:
assert func(x0, xs, ys) == 1.0
assert func(x1, xs, ys) == 1.0
elif interpolation is sumpf_internal.Interpolations.LINEAR:
m = (ys[1] - ys[0]) / (xs[1] - xs[0])
n0 = ys[0] - m * xs[0]
n1 = ys[1] - m * xs[1]
assert func(x0, xs, ys) == pytest.approx(m * x0 + n0)
assert func(x1, xs, ys) == pytest.approx(m * x1 + n1)
elif interpolation is sumpf_internal.Interpolations.LOGARITHMIC:
if 0.0 in ys:
assert numpy.isnan(func(x0, xs, ys))
assert numpy.isnan(func(x1, xs, ys))
else:
log_xs = numpy.log2(xs)
log_ys = numpy.log2(ys)
m = (log_ys[1] - log_ys[0]) / (log_xs[1] - log_xs[0])
r0 = numpy.exp2(m * numpy.log2(x0) + log_ys[0] - m * log_xs[0])
r1 = numpy.exp2(m * numpy.log2(x1) + log_ys[1] - m * log_xs[1])
assert (numpy.isnan(func(x0, xs, ys)) and numpy.isnan(r0)) or (func(x0, xs, ys) == pytest.approx(r0))
assert (numpy.isnan(func(x1, xs, ys)) and numpy.isnan(r1)) or (func(x1, xs, ys) == pytest.approx(r1))
elif interpolation is sumpf_internal.Interpolations.LOG_X:
log_xs = numpy.log2(xs)
m = (ys[1] - ys[0]) / (log_xs[1] - log_xs[0])
r0 = m * numpy.log2(x0) + ys[0] - m * log_xs[0]
r1 = m * numpy.log2(x1) + ys[1] - m * log_xs[1]
assert (numpy.isnan(func(x0, xs, ys)) and numpy.isnan(r0)) or (func(x0, xs, ys) == pytest.approx(r0))
assert (numpy.isnan(func(x1, xs, ys)) and numpy.isnan(r1)) or (func(x1, xs, ys) == pytest.approx(r1))
elif interpolation is sumpf_internal.Interpolations.LOG_Y:
if 0.0 in ys:
assert numpy.isnan(func(x0, xs, ys))
assert numpy.isnan(func(x1, xs, ys))
else:
log_ys = numpy.log2(ys)
m = (log_ys[1] - log_ys[0]) / (xs[1] - xs[0])
n0 = log_ys[0] - m * xs[0]
n1 = log_ys[1] - m * xs[1]
assert func(x0, xs, ys) == pytest.approx(numpy.exp2(m * x0 + n0))
assert func(x1, xs, ys) == pytest.approx(numpy.exp2(m * x1 + n1))
elif interpolation is sumpf_internal.Interpolations.STAIRS_LIN:
assert func(x0, xs, ys) == ys[0]
assert func(x1, xs, ys) == ys[1]
elif interpolation is sumpf_internal.Interpolations.STAIRS_LOG:
assert func(x0, xs, ys) == ys[0]
assert func(x1, xs, ys) == ys[1]
else:
raise ValueError(f"Unknown interpolation: {interpolation}.")
|
# -*- coding: utf-8 -*-
# emma
#
# Copyright (C) 2006 Florian Schmidt ([email protected])
# 2014 Nickolay Karnaukhov ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sqlite3
conn = sqlite3.connect(database='/home/nick/test_database.sqlite')
print conn
cur = conn.cursor()
print cur.execute("SELECT * FROM sqlite_master ORDER BY name")
print cur.description
res = cur.fetchall()
for row in res:
print row
# from SQLiteHost import SQLiteHost
#
# host = SQLiteHost(None, None, '/home/nick/test.sqlite')
# host.connect()
#
# host.databases['dummydb'].refresh()
# print host.databases['dummydb'].tables
#
# table = host.databases['dummydb'].tables['aaa']
# table.refresh()
#
# print "---------------------------"
# print "Table:"
# print table.__dict__
#
# print "---------------------------"
# print "Table fields:"
# for f in table.fields:
# print f.__dict__
#
# print "---------------------------"
# print "Table indexes:"
# for i in table.indexes:
# print i.__dict__
|
if ( ('PFVFLOW' in features) and ('TWOPHASEFLOW' in features) ):
from yade import pack
from yade import export
from yade import timing
from yade import plot
import time
from math import *
num_spheres=1000# number of spheres
young=1e6
compFricDegree = 3 # initial contact friction during the confining phase
finalFricDegree = 30 # contact friction during the deviatoric loading
mn,mx=Vector3(0,0,0),Vector3(1,1,0.4) # corners of the initial packing
graindensity=2600
toleranceWarning =1.e-11
toleranceCritical=1.e-6
O.materials.append(FrictMat(young=young,poisson=0.5,frictionAngle=radians(compFricDegree),density=graindensity,label='spheres'))
O.materials.append(FrictMat(young=young,poisson=0.5,frictionAngle=0,density=0,label='walls'))
walls=aabbWalls([mn,mx],thickness=0,material='walls')
wallIds=O.bodies.append(walls)
sp=pack.SpherePack()
sp.makeCloud(mn,mx,-1,0.3333,num_spheres,False, 0.95,seed=1) #"seed" make the "random" generation always the same
sp.toSimulation(material='spheres')
triax=TriaxialStressController(
maxMultiplier=1.+2e4/young, # spheres growing factor (fast growth)
finalMaxMultiplier=1.+2e3/young, # spheres growing factor (slow growth)
thickness = 0,
stressMask = 7,
max_vel = 0.005,
internalCompaction=True, # If true the confining pressure is generated by growing particles
)
newton=NewtonIntegrator(damping=0.2)
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()],label="iloop"
),
TwoPhaseFlowEngine(dead=1,label="flow"),#introduced as a dead engine for the moment, see 2nd section
GlobalStiffnessTimeStepper(active=1,timeStepUpdateInterval=100,timestepSafetyCoefficient=0.8),
triax,
newton
]
triax.goal1=triax.goal2=triax.goal3=-10000
while 1:
O.run(1000, True)
unb=unbalancedForce()
if unb<0.001 and abs(-10000-triax.meanStress)/10000<0.001:
break
setContactFriction(radians(finalFricDegree))
radius=0
for b in O.bodies:
if b.state.mass==0:
b.state.blockedDOFs='xyzXYZ'
b.state.vel=(0,0,0)
b.state.angVel=(0,0,0)
if b.state.mass>0:
radius+=b.shape.radius
#b.state.blockedDOFs='xyz'
#b.state.vel=(0,0,0)
radius=radius/num_spheres
triax.dead=True
while 1:
O.run(1000, True)
unb=unbalancedForce()
if unb<0.001:
break
press=1000.
O.run(10,1)
flow.dead=0
flow.meshUpdateInterval=-1
flow.useSolver=3
flow.permeabilityFactor=1
flow.viscosity=0.1
flow.bndCondIsWaterReservoir=[0,0,1,0,0,0]
flow.bndCondIsPressure=[0,0,1,0,0,0]
flow.bndCondValue=[0,0,press,0,0,0]
flow.boundaryUseMaxMin=[0,0,0,0,0,0]
flow.iniVoidVolumes=True
newton.damping=0.1
GlobalStiffnessTimeStepper.dead=True
O.dt=min(0.8*PWaveTimeStep(),0.8*1./1200.*pi/flow.viscosity*graindensity*radius**2)
O.dynDt=False
flow.surfaceTension = 0.0
flow.drainageFirst=False
flow.isDrainageActivated=False
flow.isImbibitionActivated=True
flow.isCellLabelActivated=True
flow.initialization()
cs=flow.getClusters()
c0=cs[1]
voidvol=0.0
voidvoltot=0.0
nvoids=flow.nCells()
initialvol=[0] * (nvoids)
bar=[0] * (nvoids)
initiallevel=O.bodies[flow.wallIds[flow.ymin]].state.pos[1]+(O.bodies[flow.wallIds[flow.ymax]].state.pos[1]-O.bodies[flow.wallIds[flow.ymin]].state.pos[1])/3
for ii in range(nvoids):
initialvol[ii]=1./flow.getCellInvVoidVolume(ii)
voidvoltot+=initialvol[ii]
bar[ii]=flow.getCellBarycenter(ii)[1]
iniok=0
while (iniok==0):
celleini1=[nvoids+1] * (nvoids)
celleini0=[0] * (nvoids)
for ii in range(len(c0.getInterfaces())):
if bar[c0.getInterfaces()[ii][1]]<initiallevel:
if celleini1[c0.getInterfaces()[ii][1]]==nvoids+1:
celleini1[c0.getInterfaces()[ii][1]]=ii
celleini0[c0.getInterfaces()[ii][1]]=c0.getInterfaces()[ii][0]
for ii in range(nvoids):
if celleini1[ii]!=nvoids+1:
flow.clusterOutvadePore(celleini0[ii],ii)
no=0
for ii in range(nvoids):
if bar[ii]<initiallevel:
if flow.getCellLabel(ii)==0:
no=1
if no==0:
iniok=1
for ii in range(len(c0.getInterfaces())):
c0.setCapVol(ii,0.0)
c0.solvePressure()
flow.computeCapillaryForce()
for b in O.bodies:
O.forces.setPermF(b.id, flow.fluidForce(b.id))
O.run(1,1)
flow.savePhaseVtk("./vtk",True)
timeini=O.time
ini=O.iter
Qin=0.0
#Qout=0.0
totalflux=[0] * (nvoids)
#totalCellSat=0.0
for ii in range(nvoids):
if flow.getCellLabel(ii)==0:
voidvol+=initialvol[ii]
bubble=0
dd=0.0
celleok=[0] * (nvoids)
deltabubble=0
col0=[0] * (nvoids)
neighK=[0.0] * (nvoids) #FIXME: after remeshing the size will be invalid since nvoids can change, initializations will have to go in the function itself
def pressureImbibition():
global Qin,total2,dd,deltabubble,bubble
c0.updateCapVolList(O.dt)
Qin+=-1.*(flow.getBoundaryFlux(flow.wallIds[flow.ymin]))*O.dt
#Qout+=(flow.getBoundaryFlux(flow.wallIds[flow.ymax]))*O.dt
col1=[0] * (nvoids)
delta=[0.0] * (nvoids)
for ii in range(nvoids):
if flow.getCellLabel(ii)==0:
totalflux[ii]+=-1.*flow.getCellFluxFromId(ii)*O.dt
if (totalflux[ii])>=initialvol[ii]:
col1[ii]=1
if (totalflux[ii])>initialvol[ii]:
delta[ii]=totalflux[ii]-initialvol[ii]
totalflux[ii]+=-1*delta[ii]
#dd+=delta[ii]
# advices:
# never write 'getInterfaces()' inside a loop, it's expensive, get the list once outside loop
# get interfaces again only if you know the list could have change (cluster got/lost pores).
# I'm fixing only the first loop below (old version left commented)
#
for ii in range(len(c0.getInterfaces())):
ll=c0.getInterfaces()[ii][1]
if col1[ll]==1:
if celleok[ll]==0:
celleok[ll]=1
col0[ll]=c0.getInterfaces()[ii][0]
for jj in range(nvoids):
if col1[jj]==1:
flow.clusterOutvadePore(col0[jj],jj)
#totalCellSat+=initialvol[jj]
for ii in range(len(c0.getInterfaces())):
ll=c0.getInterfaces()[ii][0]
if delta[ll]!=0:
neighK[ll]+=c0.getConductivity(ii)
for ii in range(len(c0.getInterfaces())):
ll=c0.getInterfaces()[ii][0]
if delta[ll]!=0:
c0.setCapVol(ii,delta[ll]/neighK[ll]*c0.getConductivity(ii))
totalflux[c0.getInterfaces()[ii][1]]+=delta[ll]/neighK[ll]*c0.getConductivity(ii)
for ii in range(nvoids):
if delta[ii]!=0:
if neighK[ii]==0:
deltabubble+=delta[ii]
bubble+=1
col1=[0] * (nvoids)
delta=[0.0] * (nvoids)
for ii in range(nvoids):
if flow.getCellLabel(ii)==0:
if (totalflux[ii])>=initialvol[ii]:
col1[ii]=1
if (totalflux[ii])>initialvol[ii]:
delta[ii]=totalflux[ii]-initialvol[ii]
totalflux[ii]+=-1*delta[ii]
#dd+=delta[ii]
if col1!=[0] * (nvoids):
for ii in range(len(c0.getInterfaces())):
ll=c0.getInterfaces()[ii][1]
if col1[ll]==1:
if celleok[ll]==0:
celleok[ll]=1
col0[ll]=c0.getInterfaces()[ii][0]
for jj in range(nvoids):
if col1[jj]==1:
flow.clusterOutvadePore(col0[jj],jj)
#totalCellSat+=initialvol[jj]
for ii in range(len(c0.getInterfaces())):
ll=c0.getInterfaces()[ii][0]
if delta[ll]!=0:
neighK[ll]+=c0.getConductivity(ii)
for ii in range(len(c0.getInterfaces())):
ll=c0.getInterfaces()[ii][0]
if delta[ll]!=0:
c0.setCapVol(ii,delta[ll]/neighK[ll]*c0.getConductivity(ii))
totalflux[c0.getInterfaces()[ii][1]]+=delta[ll]/neighK[ll]*c0.getConductivity(ii)
for ii in range(nvoids):
if delta[ii]!=0:
if neighK[ii]==0:
deltabubble+=delta[ii]
bubble+=1
col1=[0] * (nvoids)
delta=[0.0] * (nvoids)
for ii in range(nvoids):
if flow.getCellLabel(ii)==0:
if (totalflux[ii])>=initialvol[ii]:
col1[ii]=1
if (totalflux[ii])>initialvol[ii]:
delta[ii]=totalflux[ii]-initialvol[ii]
totalflux[ii]+=-1*delta[ii]
dd+=delta[ii]
print(O.iter,'waterloss',ii,delta[ii])
if col1!=[0] * (nvoids):
for ii in range(len(c0.getInterfaces())):
ll=c0.getInterfaces()[ii][1]
if col1[ll]==1:
if celleok[ll]==0:
celleok[ll]=1
col0[ll]=c0.getInterfaces()[ii][0]
for jj in range(nvoids):
if col1[jj]==1:
flow.clusterOutvadePore(col0[jj],jj)
#totalCellSat+=initialvol[jj]
total2=0.0
for ii in range(nvoids):
total2+=totalflux[ii]
c0.solvePressure()
flow.computeCapillaryForce()
for b in O.bodies:
O.forces.setPermF(b.id, flow.fluidForce(b.id))
file=open('Test.txt',"w")
checkdifference=0
def equilibriumtest():
global F33,F22,checkdifference
#unbalanced=utils.unbalancedForce()
F33=abs(O.forces.f(flow.wallIds[flow.ymax])[1])
F22=abs(O.forces.f(flow.wallIds[flow.ymin])[1])
#F11 =abs(O.forces.f(flow.wallIds[flow.xmax])[0]),
#F00=abs(O.forces.f(flow.wallIds[flow.xmin])[0]),
#F44=abs(O.forces.f(flow.wallIds[flow.zmin])[2]),
#F55=abs(O.forces.f(flow.wallIds[flow.zmax])[2]),
deltaF=abs(F33-F22)
file.write(str(O.iter)+" "+str(F33)+" "+str(F22)+" "+str(deltaF)+"\n")
if O.time>=timeini+1.5:
if checkdifference==0:
print('check F done')
if deltaF>0.01*press:
raise YadeCheckError('Error: too high difference between forces acting at the bottom and upper walls')
#O.pause()
checkdifference=1
once=0
def fluxtest():
global once,QinOk
no=0
QinOk=Qin-deltabubble
error=QinOk-total2
if error>toleranceWarning:
print("Warning: difference between total water volume flowing through bottom wall and water loss due to air bubble generations",QinOk," vs. total water volume flowing inside dry or partially saturated cells",total2)
if error>toleranceCritical:
raise YadeCheckError("The difference is more, than the critical tolerance!")
file.write(str(O.time-timeini)+" "+str(total2)+" "+str(QinOk)+" "+str(error)+"\n")
for ii in range(nvoids):
if flow.getCellLabel(ii)==0:
no=1
if once==0:
if no==0:
imbtime=O.time-timeini
print(imbtime,voidvol,total2,QinOk)
if voidvol-total2>toleranceWarning:
print("Warning: initial volume of dry voids",voidvol," vs. total water volume flowing inside dry or partially saturated cells",total2)
if voidvol-total2>toleranceCritical:
raise YadeCheckError("The difference is more, than the critical tolerance!")
file.write(str(imbtime)+" "+str(voidvol)+" "+str(total2)+" "+str(QinOk)+"\n")
once=1
timing.stats()
def addPlotData():
plot.addData(i1=O.iter,
t=O.time,
Fupper=F33,
Fbottom=F22,
Q=QinOk,
T=total2
)
plot.live=True
plot.plots={' t ':('Fupper','Fbottom'),'t':('Q','T')}
plot.plot()
def pl():
flow.savePhaseVtk("./vtk",True)
O.engines=O.engines+[PyRunner(iterPeriod=100,command='pl()')]
#O.engines=O.engines+[VTKRecorder(iterPeriod=100,recorders=['spheres'],fileName='./exp')]
O.engines=O.engines+[PyRunner(iterPeriod=1,command='equilibriumtest()')]
O.engines=O.engines+[PyRunner(iterPeriod=1,command='pressureImbibition()')]
O.engines=O.engines+[PyRunner(iterPeriod=1,command='fluxtest()')]
O.engines=O.engines+[PyRunner(iterPeriod=1,command='addPlotData()')]
O.timingEnabled=True
#file.close()
#plot.saveDataTxt('plots.txt',vars=('i1','t','Fupper','Fbottom','Q','T'))
#O.run(1,1)
import tempfile, shutil
dirpath = tempfile.mkdtemp()
for fileName in ['./vtk', './Test.txt' ]:
if (os.path.exists(fileName)): shutil.move(fileName,dirpath)
print("File %s moved into %s/ directory"%(fileName,dirpath))
else:
print("This check_TwoPhaseFlowEngine_PressureInjection.py cannot be executed because TWOPHASEFLOW or PFVFLOW are disabled")
|
""" This is a test of the FileCatalogDB
It supposes that the DB is present.
"""
# pylint: disable=invalid-name,wrong-import-position
import unittest
import itertools
import os
import sys
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.DataManagementSystem.DB.FileCatalogDB import FileCatalogDB
from DIRAC.Core.Security.Properties import FC_MANAGEMENT
seName = "mySE"
testUser = 'atsareg'
testGroup = 'dirac_user'
testDir = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir'
parentDir = '/vo.formation.idgrilles.fr/user/a/atsareg'
nonExistingDir = "/I/Dont/exist/dir"
testFile = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir/testfile'
nonExistingFile = "/I/Dont/exist"
x509Chain = "<X509Chain 3 certs [/DC=ch/DC=cern/OU=computers/CN=volhcb12.cern.ch]"
x509Chain += "[/DC=ch/DC=cern/CN=CERN Trusted Certification Authority][/DC=ch/DC=cern/CN=CERN Root CA]>"
credDict = {
'DN': '/DC=ch/DC=cern/OU=computers/CN=volhcb12.cern.ch',
'extraCredentials': 'hosts',
'group': 'visitor',
'CN': 'volhcb12.cern.ch',
'x509Chain': x509Chain,
'username': 'anonymous',
'isLimitedProxy': False,
'properties': [FC_MANAGEMENT],
'isProxy': False}
isAdmin = False
proxyUser = 'anonymous'
proxyGroup = 'visitor'
# TESTS WERE DESIGNED WITH THIS CONFIGURATION
# DATABASE_CONFIG = { 'UserGroupManager' : 'UserAndGroupManagerDB',
# 'SEManager' : 'SEManagerDB',
# 'SecurityManager' : 'NoSecurityManager',
# 'DirectoryManager' : 'DirectoryLevelTree',
# 'FileManager' : 'FileManager',
# 'DirectoryMetadata' : 'DirectoryMetadata',
# 'FileMetadata' : 'FileMetadata',
# 'DatasetManager' : 'DatasetManager',
# 'UniqueGUID' : False,
# 'GlobalReadAccess' : True,
# 'LFNPFNConvention' : 'Strong',
# 'ResolvePFN' : True,
# 'DefaultUmask' : 0775,
# 'ValidFileStatus' : ['AprioriGood', 'Trash', 'Removing', 'Probing'],
# 'ValidReplicaStatus' : ['AprioriGood', 'Trash', 'Removing', 'Probing'],
# 'VisibleFileStatus' : ['AprioriGood'],
# 'VisibleReplicaStatus': ['AprioriGood'] }
DATABASE_CONFIG = {
'UserGroupManager': 'UserAndGroupManagerDB', # UserAndGroupManagerDB, UserAndGroupManagerCS
'SEManager': 'SEManagerDB', # SEManagerDB, SEManagerCS
# NoSecurityManager, DirectorySecurityManager, FullSecurityManager
'SecurityManager': 'NoSecurityManager',
# DirectorySimpleTree, DirectoryFlatTree, DirectoryNodeTree, DirectoryLevelTree
'DirectoryManager': 'DirectoryLevelTree',
'FileManager': 'FileManager', # FileManagerFlat, FileManager
'DirectoryMetadata': 'DirectoryMetadata',
'FileMetadata': 'FileMetadata',
'DatasetManager': 'DatasetManager',
'UniqueGUID': True,
'GlobalReadAccess': True,
'LFNPFNConvention': 'Strong',
'ResolvePFN': True,
'DefaultUmask': 0o775,
'ValidFileStatus': ['AprioriGood', 'Trash', 'Removing', 'Probing'],
'ValidReplicaStatus': ['AprioriGood', 'Trash', 'Removing', 'Probing'],
'VisibleFileStatus': ['AprioriGood'],
'VisibleReplicaStatus': ['AprioriGood']}
ALL_MANAGERS = {
"UserGroupManager": [
"UserAndGroupManagerDB", "UserAndGroupManagerCS"], "SEManager": [
"SEManagerDB", "SEManagerCS"], "SecurityManager": [
"NoSecurityManager", "DirectorySecurityManager", "FullSecurityManager"], "DirectoryManager": [
"DirectorySimpleTree", "DirectoryFlatTree", "DirectoryNodeTree", "DirectoryLevelTree"], "FileManager": [
"FileManagerFlat", "FileManager"], }
ALL_MANAGERS_NO_CS = {
"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": [
"NoSecurityManager",
"DirectorySecurityManager",
"FullSecurityManager"],
"DirectoryManager": [
"DirectorySimpleTree",
"DirectoryFlatTree",
"DirectoryNodeTree",
"DirectoryLevelTree"],
"FileManager": [
"FileManagerFlat",
"FileManager"],
}
DEFAULT_MANAGER = {"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": ["DirectorySecurityManagerWithDelete"],
"DirectoryManager": ["DirectoryClosure"],
"FileManager": ["FileManagerPs"],
}
DEFAULT_MANAGER_2 = {"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": ["NoSecurityManager"],
"DirectoryManager": ["DirectoryLevelTree"],
"FileManager": ["FileManager"],
}
MANAGER_TO_TEST = DEFAULT_MANAGER
class FileCatalogDBTestCase(unittest.TestCase):
""" Base class for the FileCatalogDB test cases
"""
def setUp(self):
self.db = FileCatalogDB()
# for table in self.db._query( "Show tables;" )["Value"]:
# self.db.deleteEntries( table[0] )
self.db.setConfig(DATABASE_CONFIG)
def tearDown(self):
pass
# for table in self.db._query( "Show tables;" )["Value"]:
# self.db.deleteEntries( table[0] )
class SECase (FileCatalogDBTestCase):
def test_seOperations(self):
"""Testing SE related operation"""
# create SE
ret = self.db.addSE(seName, credDict)
if isAdmin:
self.assertTrue(ret["OK"], "addSE failed when adding new SE: %s" % ret)
seId = ret["Value"]
# create it again
ret = self.db.addSE(seName, credDict)
self.assertEqual(ret["Value"], seId, "addSE failed when adding existing SE: %s" % ret)
else:
self.assertEqual(
ret["OK"],
False,
"addSE should fail when adding new SE as non admin: %s" %
ret)
# remove it
ret = self.db.deleteSE(seName, credDict)
self.assertEqual(ret["OK"], True if isAdmin else False, "deleteE failed %s" % ret)
class UserGroupCase(FileCatalogDBTestCase):
def test_userOperations(self):
"""Testing the user related operations"""
expectedRes = None
if isAdmin:
print "Running UserTest in admin mode"
expectedRes = True
else:
print "Running UserTest in non admin mode"
expectedRes = False
# Add the user
result = self.db.addUser(testUser, credDict)
self.assertEqual(result['OK'], expectedRes, "AddUser failed when adding new user: %s" % result)
# Add an existing user
result = self.db.addUser(testUser, credDict)
self.assertEqual(
result['OK'],
expectedRes,
"AddUser failed when adding existing user: %s" %
result)
# Fetch the list of user
result = self.db.getUsers(credDict)
self.assertEqual(result['OK'], expectedRes, "getUsers failed: %s" % result)
if isAdmin:
# Check if our user is present
self.assertEqual(testUser in result['Value'], expectedRes, "getUsers failed: %s" % result)
# remove the user we created
result = self.db.deleteUser(testUser, credDict)
self.assertEqual(result['OK'], expectedRes, "deleteUser failed: %s" % result)
def test_groupOperations(self):
"""Testing the group related operations"""
expectedRes = None
if isAdmin:
print "Running UserTest in admin mode"
expectedRes = True
else:
print "Running UserTest in non admin mode"
expectedRes = False
# Create new group
result = self.db.addGroup(testGroup, credDict)
self.assertEqual(result['OK'], expectedRes, "AddGroup failed when adding new user: %s" % result)
result = self.db.addGroup(testGroup, credDict)
self.assertEqual(
result['OK'],
expectedRes,
"AddGroup failed when adding existing user: %s" %
result)
result = self.db.getGroups(credDict)
self.assertEqual(result['OK'], expectedRes, "getGroups failed: %s" % result)
if isAdmin:
self.assertEqual(testGroup in result['Value'], expectedRes)
result = self.db.deleteGroup(testGroup, credDict)
self.assertEqual(result['OK'], expectedRes, "deleteGroup failed: %s" % result)
class FileCase(FileCatalogDBTestCase):
def test_fileOperations(self):
"""
Tests the File related Operations
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new file
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
result = self.db.exists(testFile, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile) should be the same lfn %s" % result)
result = self.db.exists({testFile: '1000'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1000) should be the same lfn %s" % result)
result = self.db.exists({testFile: {'GUID': '1000', 'PFN': 'blabla'}}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1000) should be the same lfn %s" % result)
# In fact, we don't check if the GUID is correct...
result = self.db.exists({testFile: '1001'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1001) should be the same lfn %s" % result)
result = self.db.exists({testFile + '2': '1000'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile + '2'),
testFile, "exists( testFile2 : 1000) should return testFile %s" % result)
# Re-adding the same file
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(
result["OK"],
"addFile failed when adding existing file with same param %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"addFile failed: it should be possible to add an existing lfn with same param %s" %
result)
# Adding same file with different param
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '1'}}, credDict)
self.assertTrue(
result["OK"],
"addFile failed when adding existing file with different parem %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"addFile failed: it should not be possible to add an existing lfn with different param %s" %
result)
result = self.db.addFile({testFile + '2': {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result["OK"], "addFile failed when adding existing file %s" % result)
self.assertTrue(
testFile +
'2' in result["Value"]["Failed"],
"addFile failed: it should not be possible to add a new lfn with existing GUID %s" %
result)
##################################################################################
# Setting existing status of existing file
result = self.db.setFileStatus({testFile: "AprioriGood"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting existing status of existing file %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"setFileStatus failed: %s should be in successful (%s)" %
(testFile,
result))
# Setting unexisting status of existing file
result = self.db.setFileStatus({testFile: "Happy"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting un-existing status of existing file %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setFileStatus should have failed %s" %
result)
# Setting existing status of unexisting file
result = self.db.setFileStatus({nonExistingFile: "Trash"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting existing status of non-existing file %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"setFileStatus failed: %s should be in failed (%s)" %
(nonExistingFile,
result))
##################################################################################
result = self.db.isFile([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "isFile failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"isFile : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][testFile],
"isFile : %s should be seen as a file %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Successful"],
"isFile : %s should be in Successful %s" %
(nonExistingFile,
result))
self.assertTrue(result["Value"]["Successful"][nonExistingFile] is False,
"isFile : %s should be seen as a file %s" % (nonExistingFile, result))
result = self.db.changePathOwner({testFile: "toto", nonExistingFile: "tata"}, credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathOwner : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.changePathGroup({testFile: "toto", nonExistingFile: "tata"}, credDict)
self.assertTrue(result["OK"], "changePathGroup failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathGroup : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.changePathMode({testFile: 0o44, nonExistingFile: 0o44}, credDict)
self.assertTrue(result["OK"], "changePathMode failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathMode : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathMode : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.getFileSize([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "getFileSize failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getFileSize : %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile],
123,
"getFileSize got incorrect file size %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getFileSize : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.getFileMetadata([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "getFileMetadata failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getFileMetadata : %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile]["Owner"],
"toto",
"getFileMetadata got incorrect Owner %s" %
result)
self.assertEqual(
result["Value"]["Successful"][testFile]["Status"],
"AprioriGood",
"getFileMetadata got incorrect status %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getFileMetadata : %s should be in Failed %s" %
(nonExistingFile,
result))
# DOES NOT FOLLOW THE SUCCESSFUL/FAILED CONVENTION
# result = self.db.getFileDetails([testFile, nonExistingFile], credDict)
# self.assertTrue(result["OK"], "getFileDetails failed: %s" % result)
# self.assertTrue(
# testFile in result["Value"]["Successful"],
# "getFileDetails : %s should be in Successful %s" %
# (testFile,
# result))
# self.assertEqual(
# result["Value"]["Successful"][testFile]["Owner"],
# "toto",
# "getFileDetails got incorrect Owner %s" %
# result)
# self.assertTrue(
# nonExistingFile in result["Value"]["Failed"],
# "getFileDetails : %s should be in Failed %s" %
# (nonExistingFile,
# result))
# ADD SOMETHING ABOUT FILE ANCESTORS AND DESCENDENTS
result = self.db.getSEDump('testSE')
self.assertTrue(result['OK'], "Error when getting SE dump %s" % result)
self.assertEqual(result['Value'], ((testFile, '0', 123),),
"Did not get the expected SE Dump %s" % result['Value'])
result = self.db.removeFile([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"removeFile : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][testFile],
"removeFile : %s should be in True %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][nonExistingFile],
"removeFile : %s should be in True %s" %
(nonExistingFile,
result))
class ReplicaCase(FileCatalogDBTestCase):
def test_replicaOperations(self):
"""
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new file
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
# Adding new replica
result = self.db.addReplica({testFile: {"PFN": "testFile", "SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "addReplica failed when adding new Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"addReplica failed when adding new Replica %s" %
result)
# Adding the same replica
result = self.db.addReplica({testFile: {"PFN": "testFile", "SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "addReplica failed when adding new Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"addReplica failed when adding new Replica %s" %
result)
# Adding replica of a non existing file
result = self.db.addReplica({nonExistingFile: {"PFN": "Idontexist", "SE": "otherSE"}}, credDict)
self.assertTrue(
result['OK'],
"addReplica failed when adding Replica to non existing Replica %s" %
result)
self.assertTrue(
nonExistingFile in result['Value']["Failed"],
"addReplica for non existing file should go in Failed %s" %
result)
# Setting existing status of existing Replica
result = self.db.setReplicaStatus({testFile: {"Status": "Trash", "SE": "otherSE"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"setReplicaStatus failed: %s should be in successful (%s)" %
(testFile,
result))
# Setting non existing status of existing Replica
result = self.db.setReplicaStatus(
{testFile: {"Status": "randomStatus", "SE": "otherSE"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting non-existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(testFile,
result))
# Setting existing status of non-existing Replica
result = self.db.setReplicaStatus(
{testFile: {"Status": "Trash", "SE": "nonExistingSe"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of non-existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(testFile,
result))
# Setting existing status of non-existing File
result = self.db.setReplicaStatus(
{nonExistingFile: {"Status": "Trash", "SE": "nonExistingSe"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of non-existing File %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(nonExistingFile,
result))
# Getting existing status of existing Replica but not visible
result = self.db.getReplicaStatus({testFile: "testSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicaStatus failed: %s should be in Successful (%s)" %
(testFile,
result))
# Getting existing status of existing Replica but not visible
result = self.db.getReplicaStatus({testFile: "otherSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting existing status of existing Replica but not visible %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicaStatus failed: %s should be in Successful (%s)" %
(testFile,
result))
# Getting status of non-existing File but not visible
result = self.db.getReplicaStatus({nonExistingFile: "testSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting status of non existing File %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getReplicaStatus failed: %s should be in failed (%s)" %
(nonExistingFile,
result))
# Getting replicas of existing File and non existing file, seeing all replicas
result = self.db.getReplicas([testFile, nonExistingFile], allStatus=True, credDict=credDict)
self.assertTrue(result["OK"], "getReplicas failed %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicas failed, %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile], {
"otherSE": "", "testSE": ""}, "getReplicas failed, %s should be in Successful %s" %
(testFile, result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getReplicas failed, %s should be in Failed %s" %
(nonExistingFile,
result))
# removing master replica
result = self.db.removeReplica({testFile: {"SE": "testSE"}}, credDict)
self.assertTrue(result['OK'], "removeReplica failed when removing master Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing master Replica %s" %
result)
# removing non existing replica of existing File
result = self.db.removeReplica({testFile: {"SE": "nonExistingSe2"}}, credDict)
self.assertTrue(
result['OK'],
"removeReplica failed when removing non existing Replica %s" %
result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing new Replica %s" %
result)
# removing non existing replica of non existing file
result = self.db.removeReplica({nonExistingFile: {"SE": "nonExistingSe3"}}, credDict)
self.assertTrue(
result['OK'],
"removeReplica failed when removing replica of non existing File %s" %
result)
self.assertTrue(
nonExistingFile in result['Value']["Successful"],
"removeReplica of non existing file, %s should be in Successful %s" %
(nonExistingFile,
result))
# removing last replica
result = self.db.removeReplica({testFile: {"SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "removeReplica failed when removing last Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing last Replica %s" %
result)
# Cleaning after us
result = self.db.removeFile(testFile, credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
class DirectoryCase(FileCatalogDBTestCase):
def test_directoryOperations(self):
"""
Tests the Directory related Operations
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new directory
result = self.db.createDirectory(testDir, credDict)
self.assertTrue(result['OK'], "addDirectory failed when adding new directory %s" % result)
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
# Re-adding the same directory (CAUTION, different from addFile)
result = self.db.createDirectory(testDir, credDict)
self.assertTrue(result["OK"], "addDirectory failed when adding existing directory %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"addDirectory failed: it should be possible to add an existing lfn %s" %
result)
result = self.db.isDirectory([testDir, nonExistingDir], credDict)
self.assertTrue(result["OK"], "isDirectory failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"isDirectory : %s should be in Successful %s" %
(testDir,
result))
self.assertTrue(
result["Value"]["Successful"][testDir],
"isDirectory : %s should be seen as a directory %s" %
(testDir,
result))
self.assertTrue(
nonExistingDir in result["Value"]["Successful"],
"isDirectory : %s should be in Successful %s" %
(nonExistingDir,
result))
self.assertTrue(
result["Value"]["Successful"][nonExistingDir] is False,
"isDirectory : %s should be seen as a directory %s" %
(nonExistingDir,
result))
result = self.db.getDirectorySize([testDir, nonExistingDir], False, False, credDict)
self.assertTrue(result["OK"], "getDirectorySize failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"getDirectorySize : %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir],
{'LogicalFiles': 1,
'LogicalDirectories': 0,
'LogicalSize': 123},
"getDirectorySize got incorrect directory size %s" % result)
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"getDirectorySize : %s should be in Failed %s" %
(nonExistingDir,
result))
result = self.db.getDirectorySize([testDir, nonExistingDir], False, True, credDict)
self.assertTrue(result["OK"], "getDirectorySize (calc) failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"getDirectorySize (calc): %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir],
{'LogicalFiles': 1,
'LogicalDirectories': 0,
'LogicalSize': 123},
"getDirectorySize got incorrect directory size %s" % result)
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"getDirectorySize (calc) : %s should be in Failed %s" %
(nonExistingDir,
result))
result = self.db.listDirectory([parentDir, testDir, nonExistingDir], credDict)
self.assertTrue(result["OK"], "listDirectory failed: %s" % result)
self.assertTrue(
parentDir in result["Value"]["Successful"],
"listDirectory : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(result["Value"]["Successful"][parentDir]["SubDirs"].keys(), [testDir],
"listDir : incorrect content for %s (%s)" % (parentDir, result))
self.assertTrue(
testDir in result["Value"]["Successful"],
"listDirectory : %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir]["Files"].keys(), [testFile.split("/")[-1]],
"listDir : incorrect content for %s (%s)" % (testDir, result))
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"listDirectory : %s should be in Failed %s" %
(nonExistingDir,
result))
# We do it two times to make sure that
# when updating something to the same value
# returns a success if it is allowed
for attempt in xrange(2):
print "Attempt %s" % (attempt + 1)
# Only admin can change path group
resultM = self.db.changePathMode({parentDir: 0o777}, credDict)
result = self.db.changePathOwner({parentDir: "toto"}, credDict)
resultG = self.db.changePathGroup({parentDir: "toto"}, credDict)
result2 = self.db.getDirectoryMetadata([parentDir, testDir], credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultG["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultM["OK"], "changePathMode failed: %s" % result)
self.assertTrue(result2["OK"], "getDirectoryMetadata failed: %s" % result)
# Since we were the owner we should have been able to do it in any case, admin or not
self.assertTrue(
parentDir in resultM["Value"]["Successful"],
"changePathMode : %s should be in Successful %s" %
(parentDir,
resultM))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Mode'),
0o777,
"parentDir should have mode %s %s" %
(0o777,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('Mode'),
0o775,
"testDir should not have changed %s" %
result2)
if isAdmin:
self.assertTrue(
parentDir in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Owner'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('Owner'),
proxyUser,
"testDir should not have changed %s" %
result2)
self.assertTrue(
parentDir in resultG["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultG))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('OwnerGroup'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('OwnerGroup'),
proxyGroup,
"testDir should not have changed %s" %
result2)
else:
# depends on the policy manager so I comment
# self.assertTrue( parentDir in result["Value"]["Failed"], "changePathOwner : \
# %s should be in Failed %s" % ( parentDir, result ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'Owner' ), \
# proxyUser, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'Owner' ), \
# proxyUser, "testDir should not have changed %s" % result2 )
# self.assertTrue( parentDir in resultG["Value"]["Failed"], \
# "changePathGroup : %s should be in Failed %s" % ( parentDir, resultG ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testDir should not have changed %s" % result2 )
pass
# Only admin can change path group
resultM = self.db.changePathMode({parentDir: 0o777}, credDict, True)
result = self.db.changePathOwner({parentDir: "toto"}, credDict, True)
resultG = self.db.changePathGroup({parentDir: "toto"}, credDict, True)
result2 = self.db.getDirectoryMetadata([parentDir, testDir], credDict)
result3 = self.db.getFileMetadata(testFile, credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultG["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultM["OK"], "changePathMode failed: %s" % result)
self.assertTrue(result2["OK"], "getDirectoryMetadata failed: %s" % result)
self.assertTrue(result3["OK"], "getFileMetadata failed: %s" % result)
# Since we were the owner we should have been able to do it in any case, admin or not
self.assertTrue(
parentDir in resultM["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultM))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Mode'),
0o777,
"parentDir should have mode %s %s" %
(0o777,
result2))
self.assertEqual(
result2['Value'].get(
'Successful', {}).get(
testDir, {}).get('Mode'), 0o777, "testDir should have mode %s %s" %
(0o777, result2))
self.assertEqual(
result3['Value'].get(
'Successful', {}).get(
testFile, {}).get('Mode'), 0o777, "testFile should have mode %s %s" %
(0o777, result3))
if isAdmin:
self.assertTrue(
parentDir in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Owner'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful', {}).get(
testDir, {}).get('Owner'), 'toto', "testDir should belong to %s %s" %
(proxyUser, result2))
self.assertEqual(
result3['Value'].get(
'Successful',
{}).get(
testFile,
{}).get('Owner'),
'toto',
"testFile should belong to %s %s" %
(proxyUser,
result3))
self.assertTrue(
parentDir in resultG["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultG))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('OwnerGroup'),
'toto',
"parentDir should belong to %s %s" %
(proxyGroup,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('OwnerGroup'),
'toto',
"testDir should belong to %s %s" %
(proxyGroup,
result2))
self.assertEqual(
result3['Value'].get(
'Successful',
{}).get(
testFile,
{}).get('OwnerGroup'),
'toto',
"testFile should belong to %s %s" %
(proxyGroup,
result3))
else:
# depends on the policy manager so I comment
# self.assertTrue( parentDir in result["Value"]["Failed"], \
# "changePathOwner : %s should be in Failed %s" % ( parentDir, result ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'Owner' ), \
# proxyUser, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'Owner' ), \
# proxyUser, "testDir should not have changed %s" % result2 )
# self.assertEqual( result3['Value'].get( 'Successful', {} ).get( testFile, {} ).get( 'Owner' ), \
# proxyUser, "testFile should not have changed %s" % result3 )
#
# self.assertTrue( parentDir in resultG["Value"]["Failed"], \
# "changePathGroup : %s should be in Failed %s" % ( parentDir, resultG ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testDir should not have changed %s" % result2 )
# self.assertEqual( result3['Value'].get( 'Successful', {} ).get( testFile, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testFile should not have changed %s" % result3 )
pass
# Cleaning after us
result = self.db.removeFile(testFile, credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
pathParts = testDir.split('/')[1:]
startDir = '/'
pathToRemove = []
for part in pathParts:
startDir = os.path.join(startDir, part)
pathToRemove.append(startDir)
pathToRemove.reverse()
for toRemove in pathToRemove:
result = self.db.removeDirectory(toRemove, credDict)
self.assertTrue(result["OK"], "removeDirectory failed: %s" % result)
class DirectoryUsageCase (FileCatalogDBTestCase):
def getPhysicalSize(self, sizeDict, dirName, seName):
""" Extract the information from a ret dictionary
and return the tuple (files, size) for a given
directory and a se
"""
val = sizeDict[dirName]['PhysicalSize'][seName]
files = val['Files']
size = val['Size']
return (files, size)
def getLogicalSize(self, sizeDict, dirName):
""" Extract the information from a ret dictionary
and return the tuple (files, size) for a given
directory and a se
"""
files = sizeDict[dirName]['LogicalFiles']
size = sizeDict[dirName]['LogicalSize']
return (files, size)
def getAndCompareDirectorySize(self, dirList):
""" Fetch the directory size from the DirectoryUsage table
and calculate it, compare the results, and then return
the values
"""
retTable = self.db.getDirectorySize(dirList, True, False, credDict)
retCalc = self.db.getDirectorySize(dirList, True, True, credDict)
self.assertTrue(retTable["OK"])
self.assertTrue(retCalc["OK"])
succTable = retTable['Value']['Successful']
succCalc = retCalc['Value']['Successful']
# Since we have simple type, the == is recursive for dict :-)
retEquals = (succTable == succCalc)
self.assertTrue(retEquals, "Calc and table results different %s %s" % (succTable, succCalc))
return retTable
def test_directoryUsage(self):
"""Testing DirectoryUsage related operation"""
# create SE
# Only admin can run that
if not isAdmin:
return
d1 = '/sizeTest/d1'
d2 = '/sizeTest/d2'
f1 = d1 + '/f1'
f2 = d1 + '/f2'
f3 = d2 + '/f3'
f1Size = 3000000000
f2Size = 3000000001
f3Size = 3000000002
# f1Size = 1
# f2Size = 2
# f3Size = 5
for sen in ['se1', 'se2', 'se3']:
ret = self.db.addSE(sen, credDict)
self.assertTrue(ret["OK"])
for din in [d1, d2]:
ret = self.db.createDirectory(din, credDict)
self.assertTrue(ret["OK"])
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f2: {'PFN': 'f2se2',
'SE': 'se2',
'Size': f2Size,
'GUID': '1001',
'Checksum': '2'}}, credDict)
self.assertTrue(ret["OK"])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(d1s2, (1, f2Size), "Unexpected size %s, expected %s" % (d1s2, (1, f2Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
ret = self.db.addReplica({f1: {"PFN": "f1se2", "SE": "se2"},
f2: {"PFN": "f1se3", "SE": "se3"}},
credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(
d1s2, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1s2, (2, f1Size + f2Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
ret = self.db.removeFile([f1], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
# Here we should have the KeyError, since there are no files left on s1 in principle
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s2, (1, f2Size), "Unexpected size %s, expected %s" % (d1s2, (1, f2Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
ret = self.db.removeReplica({f2: {"SE": "se2"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
# Here we should have the KeyError, since there are no files left on s1 in principle
try:
d1s2 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s2 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s2, (0, 0), "Unexpected size %s, expected %s" % (d1s2, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f3: {'PFN': 'f3se3',
'SE': 'se3',
'Size': f3Size,
'GUID': '1003',
'Checksum': '3'}}, credDict)
self.assertTrue(ret["OK"])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeReplica({f1: {"SE": "se1"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeFile([f1], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeReplica({f2: {"SE": "se3"},
f3: {"SE": "se3"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
try:
d1s3 = self.getPhysicalSize(val, d1, 'se3')
except KeyError:
d1s3 = (0, 0)
try:
d2s3 = self.getPhysicalSize(val, d2, 'se3')
except KeyError:
d2s3 = (0, 0)
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (0, 0), "Unexpected size %s, expected %s" % (d1s3, (0, 0)))
self.assertEqual(d2s3, (0, 0), "Unexpected size %s, expected %s" % (d2s3, (0, 0)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeFile([f2, f3], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
try:
d1s3 = self.getPhysicalSize(val, d1, 'se3')
except KeyError:
d1s3 = (0, 0)
try:
d2s3 = self.getPhysicalSize(val, d2, 'se3')
except KeyError:
d2s3 = (0, 0)
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (0, 0), "Unexpected size %s, expected %s" % (d1s3, (0, 0)))
self.assertEqual(d2s3, (0, 0), "Unexpected size %s, expected %s" % (d2s3, (0, 0)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(d1l, (0, 0), "Unexpected size %s, expected %s" % (d1l, (0, 0)))
self.assertEqual(d2l, (0, 0), "Unexpected size %s, expected %s" % (d2l, (0, 0)))
# Removing Replicas and Files from the same directory
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f2: {'PFN': 'f2se2',
'SE': 'se1',
'Size': f2Size,
'GUID': '1001',
'Checksum': '2'}}, credDict)
ret = self.db.removeReplica({f1: {"SE": "se1"},
f2: {"SE": "se1"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
ret = self.db.removeFile([f1, f2], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1l, (0, 0), "Unexpected size %s, expected %s" % (d1l, (0, 0)))
# Try removing a replica from a non existing SE
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'}}, credDict)
ret = self.db.removeReplica({f1: {"SE": "se2"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s2 = self.getPhysicalSize(val, d1, 'se2')
except KeyError:
d1s2 = (0, 0)
self.assertEqual(d1s2, (0, 0), "Unexpected size %s, expected %s" % (d1s2, (0, 0)))
if __name__ == '__main__':
managerTypes = MANAGER_TO_TEST.keys()
all_combinations = list(itertools.product(*MANAGER_TO_TEST.values()))
numberOfManager = len(managerTypes)
for setup in all_combinations:
print "Running with:"
print ("".join(["\t %s : %s\n" % (managerTypes[i], setup[i]) for i in xrange(numberOfManager)]))
for i in xrange(numberOfManager):
DATABASE_CONFIG[managerTypes[i]] = setup[i]
suite = unittest.defaultTestLoader.loadTestsFromTestCase(SECase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(UserGroupCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FileCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ReplicaCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryUsageCase))
# Then run without admin privilege:
isAdmin = False
if FC_MANAGEMENT in credDict['properties']:
credDict['properties'].remove(FC_MANAGEMENT)
print "Running test without admin privileges"
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
# First run with admin privilege:
isAdmin = True
if FC_MANAGEMENT not in credDict['properties']:
credDict['properties'].append(FC_MANAGEMENT)
print "Running test with admin privileges"
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
|
import shm
from auv_math.math_utils import rotate
from mission.framework.combinators import Concurrent, Defer, Sequential
from mission.framework.movement import RelativeToInitialPositionN, RelativeToInitialPositionE, PositionN, PositionE, Heading, Depth
from mission.framework.task import Task
from mission.framework.primitive import FunctionTask
from shm import kalman
class WithPositionalControl(Task):
def on_first_run(self, task, enable=True, optimize=False):
enable_var = shm.navigation_settings.position_controls
optimize_var = shm.navigation_settings.optimize
init_enable, init_optimize = enable_var.get(), optimize_var.get()
def set_shm(enable, optimize):
enable_var.set(enable)
optimize_var.set(optimize)
self.use_task(Defer(
Sequential(
FunctionTask(lambda: set_shm(enable, optimize)),
task,
),
FunctionTask(lambda: set_shm(init_enable, init_optimize)),
))
class PositionalControl(Task):
def on_run(self, enable=True, *args, **kwargs):
shm.navigation_settings.position_controls.set(enable)
self.finish()
class MoveXY(Task):
def on_first_run(self, vector, deadband=0.01, *args, **kwargs):
delta_north, delta_east = rotate(vector, kalman.heading.get())
n_position = RelativeToInitialPositionN(offset=delta_north, error=deadband)
e_position = RelativeToInitialPositionE(offset=delta_east, error=deadband)
self.use_task(WithPositionalControl(
Concurrent(n_position, e_position, finite=False),
))
class MoveAngle(MoveXY):
def on_first_run(self, angle, distance, deadband=0.01, *args, **kwargs):
super().on_first_run(rotate((distance, 0), angle), deadband=deadband)
def MoveX(distance, deadband=0.01):
return MoveAngle(0, distance, deadband)
def MoveY(distance, deadband=0.01):
return MoveAngle(90, distance, deadband)
class GoToPosition(Task):
def on_first_run(self, north, east, heading=None, depth=None, optimize=False, rough=False, deadband=0.05):
self.north = north
self.east = east
if heading is None:
self.heading = shm.navigation_desires.heading.get()
else:
self.heading = heading
if depth is None:
self.depth = shm.navigation_desires.depth.get()
else:
self.depth = depth
self.use_task(WithPositionalControl(
Concurrent(
PositionN(self.north, error=deadband),
PositionE(self.east, error=deadband),
Heading(self.heading, error=deadband),
Depth(self.depth, error=deadband)
),
optimize=optimize,
))
def NavigationSpeed(task, speed):
speed_var = shm.navigation_settings.max_speed
init_speed = speed_var.get()
return Defer(
Sequential(
FunctionTask(lambda: speed_var.set(speed)),
task,
),
FunctionTask(lambda: speed_var.set(init_speed)),
)
class CheckDistance(Task):
"""
Finished once we've traveled too far
"""
def on_first_run(self, *args, **kwargs):
self.initial_pos = self.pos()
def on_run(self, distance, *args, **kwargs):
if self.dist_sq(self.pos(), self.initial_pos) > distance ** 2:
self.finish()
def pos(self):
return [kalman.north.get(), kalman.east.get()]
def dist_sq(self, p1, p2):
return (p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2
|
"""
scada.py
"""
from minicps.devices import SCADAServer
from utils import SCADA_PROTOCOL, STATE
from utils import SCADA_PERIOD_SEC
from utils import IP
from utils import CO_0_2a, CO_1_2a, CO_2_2a, CO_3_2a
from utils import HR_0_2a
import time
RTU2A_ADDR = IP['rtu2a'] + ':502'
RTU2B_ADDR = IP['rtu2b'] + ':502'
SCADA_ADDR = IP['scada'] + ':502'
class SCADAServer(SCADAServer):
def pre_loop(self, sleep=0.5):
"""scada pre loop.
- sleep
"""
time.sleep(sleep)
def main_loop(self):
"""scada main loop.
For each RTU in the network
- Read the pump status
"""
while(True):
#co_00_2a = self.receive(CO_0_2a, RTU2A_ADDR)
co_00_2a = self.receive(CO_0_2a, SCADA_ADDR)
# NOTE: used for testing first challenge
#print('DEBUG scada from rtu2a: CO 0-0 2a: {}'.format(co_00_2a))
# NOTE: used for testing second challenge
# NOTE: comment out
# hr_03_2a = self.receive(HR_0_2a, RTU2B_ADDR, count=3)
# print('DEBUG scada from rtu2b: HR 0-2 2a: {}'.format(hr_03_2a))
# print("DEBUG: scada main loop")
time.sleep(SCADA_PERIOD_SEC)
if __name__ == "__main__":
scada = SCADAServer(
name='scada',
state=STATE,
protocol=SCADA_PROTOCOL)
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# Tests for functionality in the proxy.py file.
import proxy
import unittest
class GetTargetUrlTest(unittest.TestCase):
def testSimple(self):
cases = [
('foo/bar', '/?q=foo/bar'),
('/home/~user', '/?q=/home/%7Euser')
]
for expected, path in cases:
actual = proxy.GetTargetUrl(path)
if expected != actual:
print 'Failed conversion for %s' % path
print 'expected: %s' % expected
print ' actual: %s' % actual
self.assertEquals(expected, actual)
if __name__ == '__main__':
unittest.main()
|
###
### Code for parsing extended SendDragonKeys syntax into a series of
### Input events suitable for calling SendInput with.
###
### Uses ctypes (requires Python 2.5+).
###
### Assumes input is 8-bit Windows-1252 encoding.
###
###
### Author: Mark Lillibridge
### Version: 0.7
###
import re
from ctypes import *
from recognition.actions.library.vocola.sendinput import *
debug = False
###
### Break SendDragonKeys input into the chords that make it up. Each
### chord is represented in terms of its three parts: modifiers, base,
### and effect.
###
### E.g., "a{shift+left_10} " -> [[None, "a", None], ["shift", "left",
### "10"], [None, "space", None]]
###
### Update: The chord's text is also stored for unparsing without information loss.
### E.g., "{{}" -> [None, "{", None, "{{}"]
###
def parse_into_chords(specification):
chords = []
while len(specification) > 0:
m = chord_pattern.match(specification)
if not m:
raise ValueError(f'Cannot parse chords from specification {specification}')
modifiers = m.group(1)
if modifiers: modifiers = modifiers[:-1] # remove final "+"
chords += [[modifiers, m.group(2), m.group(3), m.group(0)]]
specification = specification[m.end():]
return chords
# Because we can't be sure of the current code page, treat all non-ASCII
# characters as potential accented letters for now.
chord_pattern = re.compile(r"""\{ ( (?: [a-zA-Z0-9\x80-\xff]+ \+ )* )
( . | [-a-zA-Z0-9/*+.\x80-\xff]+ )
(?: [ _] (\d+|hold|release) )?
\}""", re.VERBOSE|re.IGNORECASE)
###
###
###
def chord_to_events(chord):
modifiers, base, effect, text = chord
if base == " ":
base = "space"
if modifiers:
modifiers = modifiers.split("+")
else:
modifiers = []
hold_count = release_count = 1
if effect:
effect = effect.lower()
if effect == "hold": release_count = 0
elif effect == "release": hold_count = 0
else:
hold_count = int(effect)
if hold_count == 0:
# check for bad names even when no events:
for modifier in modifiers:
single(modifier, False)
single(base, False)
return []
if len(base) == 1:
try:
m, f = how_type_character(base)
if debug and (len(m)>0 or describe_key(f)!=base):
mm = ""
if m: mm = '+'.join(m) + "+"
bb = "<" + base + ">"
if ord(base[0])<32: bb = hex(ord(base[0]))
print("typing " + bb + " by {" + mm + describe_key(f) + "}")
modifiers += m
base = "VK" + hex(f)
except:
if debug and ord(base[0])<128:
bb = "<" + base + ">"
if ord(base[0])<32: bb = hex(ord(base[0]))
print("can't type " + bb + " on current keyboard layout")
pass
events = []
modifiers_down = []
modifiers_up = []
for modifier in modifiers:
modifiers_down += single(modifier, False)
modifiers_up = modifiers_up + single(modifier, True)
try:
# down down up (hardware auto-repeat style) fails so use down,up pairs:
if hold_count > 1:
return modifiers_down \
+ (single(base,False)+single(base, True))*hold_count \
+ modifiers_up
if hold_count > 0:
events += modifiers_down + single(base,False)*hold_count
if release_count > 0:
events += single(base, True) + modifiers_up
return events
except:
if len(base) != 1:
raise
if len(modifiers) != 0:
print("Warning: unable to use modifiers with character: " + base)
# Unicode?
if release_count==0:
print("Warning: unable to independently hold character: " + base)
if hold_count==0:
print("Warning: unable to independently release character: " + base)
return []
if debug:
print("using numpad entry for: " + base)
return windows1252_to_events(ord(base[0])) * hold_count
###
### Pressing/releasing a single generalized virtual key or mouse button
###
##
## Keyboard key names:
##
Key_name = {
#
# SendDragonKeys virtual key names:
#
"alt" : VK_MENU,
"back" : VK_BACK,
"backspace" : VK_BACK,
"break" : VK_CANCEL,
"capslock" : VK_CAPITAL,
"center" : VK_CLEAR,
"ctrl" : VK_CONTROL,
"del" : VK_DELETE,
"down" : VK_DOWN,
"end" : VK_END,
"enter" : VK_RETURN,
"esc" : VK_ESCAPE,
"escape" : VK_ESCAPE,
"home" : VK_HOME,
"ins" : VK_INSERT,
"left" : VK_LEFT,
"numlock" : VK_NUMLOCK,
"pagedown" : VK_NEXT,
"pageup" : VK_PRIOR,
"pgdn" : VK_NEXT,
"pgup" : VK_PRIOR,
"pause" : VK_PAUSE,
"prtsc" : VK_SNAPSHOT,
"right" : VK_RIGHT,
"scrolllock" : VK_SCROLL,
"shift" : VK_SHIFT,
"space" : VK_SPACE,
#"sysreq" : VK_SYSREQ,# <<<>>>
"tab" : VK_TAB,
"up" : VK_UP,
"f1" : VK_F1,
"f2" : VK_F2,
"f3" : VK_F3,
"f4" : VK_F4,
"f5" : VK_F5,
"f6" : VK_F6,
"f7" : VK_F7,
"f8" : VK_F8,
"f9" : VK_F9,
"f10" : VK_F10,
"f11" : VK_F11,
"f12" : VK_F12,
"f13" : VK_F13,
"f14" : VK_F14,
"f15" : VK_F15,
"f16" : VK_F16,
"numkey/" : VK_DIVIDE,
"numkey*" : VK_MULTIPLY,
"numkey-" : VK_SUBTRACT,
"numkey+" : VK_ADD,
"numkey0" : VK_NUMPAD0,
"numkey1" : VK_NUMPAD1,
"numkey2" : VK_NUMPAD2,
"numkey3" : VK_NUMPAD3,
"numkey4" : VK_NUMPAD4,
"numkey5" : VK_NUMPAD5,
"numkey6" : VK_NUMPAD6,
"numkey7" : VK_NUMPAD7,
"numkey8" : VK_NUMPAD8,
"numkey9" : VK_NUMPAD9,
"numkey." : VK_DECIMAL,
"numkeyenter" : GK_NUM_RETURN,
"extdel" : GK_NUM_DELETE,
"extdown" : GK_NUM_DOWN,
"extend" : GK_NUM_END,
"exthome" : GK_NUM_HOME,
"extins" : GK_NUM_INSERT,
"extleft" : GK_NUM_LEFT,
"extpgdn" : GK_NUM_NEXT,
"extpgup" : GK_NUM_PRIOR,
"extright" : GK_NUM_RIGHT,
"extup" : GK_NUM_UP,
"leftalt" : VK_LMENU,
"rightalt" : VK_RMENU,
"leftctrl" : VK_LCONTROL,
"rightctrl" : VK_RCONTROL,
"leftshift" : VK_LSHIFT,
"rightshift" : VK_RSHIFT,
"0" : VK_0,
"1" : VK_1,
"2" : VK_2,
"3" : VK_3,
"4" : VK_4,
"5" : VK_5,
"6" : VK_6,
"7" : VK_7,
"8" : VK_8,
"9" : VK_9,
"a" : VK_A,
"b" : VK_B,
"c" : VK_C,
"d" : VK_D,
"e" : VK_E,
"f" : VK_F,
"g" : VK_G,
"h" : VK_H,
"i" : VK_I,
"j" : VK_J,
"k" : VK_K,
"l" : VK_L,
"m" : VK_M,
"n" : VK_N,
"o" : VK_O,
"p" : VK_P,
"q" : VK_Q,
"r" : VK_R,
"s" : VK_S,
"t" : VK_T,
"u" : VK_U,
"v" : VK_V,
"w" : VK_W,
"x" : VK_X,
"y" : VK_Y,
"z" : VK_Z,
#
# New names for virtual keys:
#
"win" : VK_LWIN,
"leftwin" : VK_LWIN,
"rightwin" : VK_RWIN,
"apps" : VK_APPS, # name may change...
"f17" : VK_F17,
"f18" : VK_F18,
"f19" : VK_F19,
"f20" : VK_F20,
"f21" : VK_F21,
"f22" : VK_F22,
"f23" : VK_F23,
"f24" : VK_F24,
"browserback" : VK_BROWSER_BACK,
"browserfavorites" : VK_BROWSER_FAVORITES,
"browserforward" : VK_BROWSER_FORWARD,
"browserhome" : VK_BROWSER_HOME,
"browserrefresh" : VK_BROWSER_REFRESH,
"browsersearch" : VK_BROWSER_SEARCH,
"browserstop" : VK_BROWSER_STOP,
# these names may change in the future...
"launchapp1" : VK_LAUNCH_APP1,
"launchapp2" : VK_LAUNCH_APP2,
"launchmail" : VK_LAUNCH_MAIL,
"launchmediaselect" : VK_LAUNCH_MEDIA_SELECT,
"medianexttrack" : VK_MEDIA_NEXT_TRACK,
"mediaplaypause" : VK_MEDIA_PLAY_PAUSE,
"mediaprevioustrack" : VK_MEDIA_PREV_TRACK,
"mediastop" : VK_MEDIA_STOP,
"volumedown" : VK_VOLUME_DOWN,
"volumemute" : VK_VOLUME_MUTE,
"volumeup" : VK_VOLUME_UP,
# possibly more names to come...
"oem1" : VK_OEM_1,
"oem2" : VK_OEM_2,
"oem3" : VK_OEM_3,
"oem4" : VK_OEM_4,
"oem5" : VK_OEM_5,
"oem6" : VK_OEM_6,
"oem7" : VK_OEM_7,
"oem8" : VK_OEM_8,
"oem102" : VK_OEM_102,
"oemcomma" : VK_OEM_COMMA,
"oemminus" : VK_OEM_MINUS,
"oemperiod" : VK_OEM_PERIOD,
"oemplus" : VK_OEM_PLUS,
}
Code_to_name = {}
for name in Key_name.keys():
Code_to_name[Key_name[name]] = name
def describe_key(code):
try:
return Code_to_name[code]
except:
return "VK" + hex(code)
##
## Mouse button names:
##
Button_name = {
"leftbutton" : "left", # really primary button
"middlebutton" : "middle",
"rightbutton" : "right", # really secondary button
"xbutton1" : "X1",
"xbutton2" : "X2",
}
GetSystemMetrics = windll.user32.GetSystemMetrics
GetSystemMetrics.argtypes = [c_int]
GetSystemMetrics.restype = c_int
# Convert ExtendSendDragonKeys mouse button names to those required
# by SendInput.py, swapping left & right buttons if user has "Switch
# primary and secondary buttons" selected:
def get_mouse_button(button_name):
try:
button = Button_name[button_name.lower()]
if button=="left" or button=="right":
if GetSystemMetrics(win32con.SM_SWAPBUTTON):
if button=="left":
button = "right"
else:
button = "left"
return button
except:
raise KeyError("unknown mouse button: " + key)
##
## Create a single virtual event to press or release a keyboard key or
## mouse button:
##
def single(key, releasing):
# universal syntax is VK0xhh for virtual key with code 0xhh:
if key[0:4] == "VK0x":
return [virtual_key_event(int(key[4:],16), releasing)]
lower_key = key.lower()
try:
return [virtual_key_event(Key_name[lower_key], releasing)]
except:
try:
return [mouse_button_event(get_mouse_button(lower_key), releasing)]
except:
raise KeyError("unknown key/button: " + key)
###
###
###
DWORD = c_ulong # 32 bits
SHORT = c_short # 16 bits
#TCHAR = c_char # if not using Unicode
TCHAR = c_wchar # if using Unicode
HKL = HANDLE = PVOID = c_void_p
GetKeyboardLayout = windll.user32.GetKeyboardLayout
GetKeyboardLayout.argtypes = [DWORD]
GetKeyboardLayout.restype = HKL
VkKeyScan = windll.user32.VkKeyScanW
VkKeyScan.argtypes = [TCHAR]
VkKeyScan.restype = SHORT
VkKeyScanEx = windll.user32.VkKeyScanExW
VkKeyScanEx.argtypes = [TCHAR, HKL]
VkKeyScanEx.restype = SHORT
def how_type_character(char):
how_type = VkKeyScan(char)
virtual_key = how_type & 0xff
if virtual_key == 0xff:
raise ValueError("unable to type character with current keyboard layout: "
+ char)
modifiers = []
if how_type&0x400: modifiers += ["alt"]
if how_type&0x200: modifiers += ["ctrl"]
if how_type&0x100: modifiers += ["shift"]
if how_type&0xf800:
raise ValueError("unknown modifiers required, tell MDL: " + hex(how_type))
return modifiers, virtual_key
###
###
###
def windows1252_to_events(code):
events = []
events += single("alt", False)
events += numpad(0)
events += numpad(code/100 %10)
events += numpad(code/10 %10)
events += numpad(code/1 %10)
events += single("alt", True)
return events
def numpad(i):
return chord_to_events([None, "numkey"+str(i), None, "{numkey"+str(i)+"}"])
|
import pytest
from pytest_django.fixtures import db
from uuid import uuid4
from django.core.management import call_command
from django.db import connection
from councilmatic_core.models import Bill, BillDocument, Event, EventDocument
@pytest.fixture
@pytest.mark.django_db
def organizations(db):
call_command('loaddata', 'tests/fixtures/organization.json')
@pytest.fixture
@pytest.mark.django_db
def bills(db):
call_command('loaddata', 'tests/fixtures/bill.json')
@pytest.fixture
@pytest.mark.django_db
def people(db):
call_command('loaddata', 'tests/fixtures/person.json')
@pytest.fixture
@pytest.mark.django_db
def events(db):
call_command('loaddata', 'tests/fixtures/event.json')
@pytest.fixture
@pytest.mark.django_db
def metro_bill(db):
bill_info = {
'ocd_id': 'ocd-bill/8ad8fe5a-59a0-4e06-88bd-58d6d0e5ef1a',
'description': 'CONSIDER: A. AUTHORIZING the CEO to execute Modification No. 2 to Contract C1153, Advanced Utility Relocations (Westwood/UCLA Station), with Steve Bubalo Construction Company for supply and installation of equipment for a traffic Video Detection System (VDS) required by Los Angeles Department of Transportation (LADOT), in the amount of $567,554, increasing the total contract value from $11,439,000 to $12,006,554; and B. APPROVING an increase in Contract Modification Authority (CMA) to Contract C1153, Advanced Utility Relocations (Westwood/UCLA Station), increasing the current CMA from $1,143,900 to $2,287,800.',
'identifier': '2018-0285',
'ocd_created_at': '2017-01-16 15:00:30.329048-06',
'ocd_updated_at': '2017-01-16 15:00:30.329048-06',
'updated_at': '2017-01-16 15:00:30.329048-06',
}
bill = Bill.objects.create(**bill_info)
return bill
@pytest.fixture
@pytest.mark.django_db
def metro_event(db):
event_info = {
'ocd_id': 'ocd-event/17fdaaa3-0aba-4df0-9893-2c2e8e94d18d',
'ocd_created_at': '2017-05-27 11:10:46.574-05',
'ocd_updated_at': '2017-05-27 11:10:46.574-05',
'name': 'System Safety, Security and Operations Committee',
'start_time': '2017-05-18 12:15:00-05',
'updated_at': '2017-05-17 11:06:47.1853',
'slug': uuid4(),
}
event = Event.objects.create(**event_info)
return event
@pytest.fixture
@pytest.mark.django_db
def metro_bill_document(metro_bill, db):
document_info = {
'bill_id': metro_bill.ocd_id,
'document_type': 'V',
'updated_at': '2017-02-16 15:00:30.329048-06',
'full_text': '',
'note': 'Board Report',
'url': 'https://metro.legistar.com/ViewReport.ashx?M=R&N=TextL5&GID=557&ID=5016&GUID=LATEST&Title=Board+Report',
}
document = BillDocument.objects.create(**document_info)
return document
@pytest.fixture
@pytest.mark.django_db
def metro_event_document(metro_event, db):
document_info = {
'event_id': metro_event.ocd_id,
'updated_at': '2017-05-27 11:10:46.574-05',
'full_text': '',
'note': 'Agenda',
'url': 'http://metro.legistar1.com/metro/meetings/2017/5/1216_A_System_Safety,_Security_and_Operations_Committee_17-05-18_Agenda.pdf',
}
document = EventDocument.objects.create(**document_info)
return document
@pytest.fixture
@pytest.mark.django_db
def metro_change_bill(metro_bill, db):
with connection.cursor() as cursor:
sql = '''
CREATE TABLE change_bill (
ocd_id VARCHAR,
PRIMARY KEY (ocd_id)
);
INSERT INTO change_bill (ocd_id)
VALUES ('{}');
'''.format(metro_bill.ocd_id)
cursor.execute(sql)
@pytest.fixture
@pytest.mark.django_db
def metro_change_event(metro_event, db):
with connection.cursor() as cursor:
sql = '''
CREATE TABLE change_event (
ocd_id VARCHAR,
PRIMARY KEY (ocd_id)
);
INSERT INTO change_event (ocd_id)
VALUES ('{}');
'''.format(metro_event.ocd_id)
cursor.execute(sql)
|
__author__ = 'belinkov'
import sys
import os
import codecs
import re
from data_utils import DIACS, REGEX_DIACS
REGEX_SOLUTION_DIAC = re.compile(r'\((.+?)\)') # for gold diacritized word
class WordAnalysis(object):
"""
A simplified pos analysis from treebank pos/before-treebank files.
Attributes:
input_string (str): INPUT STRING from LDC file
lookup_word (str): LOOK-UP WORD from LDC file (if exists)
comment (str): Comment from LDC file
index (str): INDEX from LDC file
gold_solution (str): the gold * SOLUTION from LDC file
word (str): for Arabic words, same as lookup_word with diacritics removed;
for non-Arabic words, same as input_string
word_diac (str): for Arabic words, the diacritized lookup_word from gold_solution;
for non-Arabic words, same as input_string
"""
def __init__(self, input_string, comment, index, gold_solution=None, lookup_word=None):
self.input_string = input_string
self.comment = comment
self.index = index
self.gold_solution = gold_solution
self.lookup_word = lookup_word
# if this is an Arabic script word
if lookup_word:
self.word = REGEX_DIACS.sub('', lookup_word)
if gold_solution:
match = REGEX_SOLUTION_DIAC.match(gold_solution)
if not match:
sys.stderr.write('Warning: could not find diacritized solution in: ' + gold_solution + '. ' + \
'Writing lookup word as is: ' + lookup_word + '\n')
self.word_diac = lookup_word
else:
self.word_diac = match.groups()[0]
self.check_match()
# there may be no solution if the word is unknown, so just write the lookup word
else:
self.word_diac = lookup_word
# this is a non-Arabic script word
else:
# TODO consider marking as Lating words (and exclude later)
self.word = input_string
self.word_diac = input_string
def check_match(self):
"""
Check match between word and word_diac
"""
if REGEX_DIACS.sub('', self.word_diac) != self.word:
sys.stderr.write('Warning: word ' + self.word + ' != word_diac ' + self.word_diac + \
' after removing diacritics. Attempting to correct\n')
self.unnormalize()
if REGEX_DIACS.sub('', self.word_diac) != self.word:
sys.stderr.write('Warning: could not correct, word ' + self.word + ' != word_diac ' + \
self.word_diac + '. Using undiacritized word_diac as word.\n')
self.word = REGEX_DIACS.sub('', self.word_diac)
if REGEX_DIACS.sub('', self.word_diac) != self.word:
sys.stderr.write('Warning: still word ' + self.word + ' != word_diac ' + self.word_diac + '\n')
def unnormalize(self):
"""
Try to reverse Buckwalter normalizations on diacritized word
"""
# first, remove "_" (elongation character)
self.word = self.word.replace('_', '')
self.word_diac = self.word_diac.replace('_', '')
# next, check for normalization mismatches
word_ind = 0
word_diac_ind = 0
new_word_diac = ''
while word_ind < len(self.word) and word_diac_ind < len(self.word_diac):
word_char = self.word[word_ind]
word_diac_char = self.word_diac[word_diac_ind]
if word_char == word_diac_char:
new_word_diac += word_diac_char
word_ind += 1
word_diac_ind += 1
elif word_diac_char in DIACS:
new_word_diac += word_diac_char
word_diac_ind += 1
else:
# this is probably a normalization
# print 'word_char:', word_char, 'word_diac_char:', word_diac_char
new_word_diac += word_char
word_ind += 1
word_diac_ind += 1
if word_ind == len(self.word) and word_diac_ind == len(self.word_diac) - 1:
# if we have one more char in word_diac
word_diac_char = self.word_diac[word_diac_ind]
if word_diac_char in DIACS:
new_word_diac += word_diac_char
self.word_diac = new_word_diac
def process_treebank_file(treebank_filename, output_file, output_file_diac):
"""
Extract data from a treebank file
:param treebank_filename: pos/before-treebank file
:param output_file: file to write words without diacritics
:param output_file_diac: file to write words with diacritics
:return:
"""
print 'extracting data from file:', treebank_filename
f = codecs.open(treebank_filename, encoding='utf8')
input_string, comment, index, gold_solution, lookup_word = ['']*5
prev_index = '' # keep track of previous index
for line in f:
if line.strip() == '':
if input_string == '':
continue
word_analysis = WordAnalysis(input_string, comment, index, gold_solution, lookup_word)
# check for a new paragraph
if prev_index.startswith('P') and index.startswith('P') and not prev_index.startswith(index.split('W')[0]):
output_file.write('\n')
output_file_diac.write('\n')
output_file.write(word_analysis.word + '\n')
output_file_diac.write(word_analysis.word_diac + '\n')
prev_index = index
input_string, comment, index, gold_solution, lookup_word = ['']*5
else:
splt = line.strip().split(':', 1)
if len(splt) != 2:
sys.stderr.write('Warning: could not split line on :, in: ' + line + '\n')
continue
field_name, field_val = splt[0].strip(), splt[1].strip()
if field_name == 'INPUT STRING':
input_string = field_val
elif field_name == 'LOOK-UP WORD':
lookup_word = field_val
elif field_name == 'Comment':
comment = field_val
elif field_name == 'INDEX':
index = field_val
elif field_name.startswith('* SOLUTION'):
gold_solution = field_val
elif field_name.startswith('SOLUTION') or field_name == '(GLOSS)':
continue
else:
sys.stderr.write('Warning: unkown field: ' + field_name + '\n')
f.close()
def process_dir(treebank_dir, output_filename, output_filename_diac):
"""
Extract data from a treebank dir
:param treebank_dir: pos/before-treebank directory
:param output_file: file to write words without diacritics
:param output_file_diac: file to write words with diacritics
:return:
"""
print 'processing treebank dir:', treebank_dir
g = codecs.open(output_filename, 'w', encoding='utf8')
g_diac = codecs.open(output_filename_diac, 'w', encoding='utf8')
for f in os.listdir(treebank_dir):
process_treebank_file(treebank_dir + '/' + f, g, g_diac)
g.close()
g_diac.close()
print 'written words to:', output_filename
print 'written diacritized words to:', output_filename_diac
if __name__ == '__main__':
if len(sys.argv) == 4:
process_dir(sys.argv[1], sys.argv[2], sys.argv[3])
else:
print 'USAGE: python ' + sys.argv[0] + ' <treebank dir> <output word file> <output diacritized word file>'
|
import sys
import os
import subprocess
import logging
import time
import re
import pandas
logging.basicConfig()
LOG = logging.getLogger(__name__)
class TraitTableEntry(object):
""" A single entry in a trait table """
def __init__(self, name):
self.name = name
self.traits = {}
def __str__(self):
return "TraitTableEntry {}".format(self.name)
def add_trait(self, trait, value):
""" Checks traits to make sure it doesn't already exist in the dict and adds it """
if trait in self.traits:
raise ValueError("{} already has a trait called '{}'.".format(str(self), trait))
else:
# see if we can convert the trait into a number
try:
value = float(value)
except ValueError:
pass
self.traits[trait] = value
def correlation(self, other, traits=None):
"""
Finds the correlation between self and other for the listed traits
If traits is not suppiled, uses all the traits from self.
Only uses traits that both have. I'm not sure if this is the intuitive default behavior.
It might make sense to throw an error if a trait isn't found.
Or to return a list of the ultimate traits used?
It may be unintuitive because if I pass the same set of traits to multiple comparisons,
each comparison may actually use different traits and not tell me.
"""
if traits is None:
traits = list(self.traits.keys())
pandas_dict = {}
for trait in traits:
try:
st = self.traits[trait]
except KeyError:
continue
try:
ot = other.traits[trait]
except KeyError:
continue
pandas_dict[trait] = {"self": st, "other": ot}
if not pandas_dict:
raise ValueError("No traits were shared between the entries.")
df = pandas.DataFrame.from_dict(pandas_dict, orient="index")
corr = df.corr(method="spearman")
return corr.loc["self", "other"]
class TraitTableManager(object):
""" A class for parsing and manipulating trait tables """
def __init__(self, trait_table_f):
self.trait_table_f = trait_table_f
# get headers
with open(self.trait_table_f, 'r') as IN:
headers = IN.readline().rstrip().split("\t")
# set the entry header replacing a comment line if present
self.entry_header = headers[0].replace("#", "")
self.traits = headers[1:]
def __iter__(self):
""" Yields a TraitTableEntry for each line in a trait table """
with open(self.trait_table_f, 'r') as IN:
# skip header line
IN.readline()
for line in IN:
# skip blank lines
if not line:
continue
try:
name, trait_values = line.rstrip().split("\t", 1)
except ValueError:
print((line,))
tte = TraitTableEntry(name)
for index, val in enumerate(trait_values.split("\t")):
tte.add_trait(self.traits[index], val)
yield tte
def get_ordered_traits(self, metadata_last=True):
""" Returns an ordered list of traits by a natural sort algorithm that optionally sends metadata to the back. """
def convert(char):
""" Attempts to convert a character into an integer """
try:
return int(char)
except ValueError:
return char
def nat_sort(entry):
""" Performs a natural sort that will sort text and numbers in a way that makes sense """
if metadata_last:
if entry.startswith("metadata"):
# I append "~" to the beginning because of its high Unicode value
entry = "~~~" + "metadata"
return [convert(char) for char in entry]
return sorted(self.traits, key=nat_sort)
def get_subset(self, subset_names, remove=False):
"""
A filter around the iter method that only gets entries in the subset_names list (or removes them)
Something is wrong with this method and it sometimes returns incorrect results!!!
"""
to_find = len(subset_names)
found = 0
for entry in self:
# check if we have exhausted the list to speed up the search
if found == to_find:
if remove:
yield entry
else:
return
if entry.name in subset_names:
if not remove:
found += 1
yield entry
else:
if remove:
found += 1
yield entry
@staticmethod
def write_entry(entry, fh, traits):
to_write = [entry.name]
for trait in traits:
try:
to_write.append(str(entry.traits[trait]))
except KeyError:
LOG.warning("Entry {} doesn't have trait {}. Writting 'NA'".format(str(entry), trait))
to_write.append("NA")
fh.write("\t".join(to_write) + "\n")
class PicrustExecuter(object):
""" Runs PICRUSt """
job_id = 0
@classmethod
def predict_traits_wf(cls, tree, trait_table, type="trait", base_dir=None):
""" Runs the predict_traits_wf. Returns a name and an output path """
# make a directory to hold the analysis
if base_dir is None:
base_dir = os.getcwd() + "/" + "picrust_project"
else:
base_dir = os.path.abspath(base_dir)
if not os.path.isdir(base_dir):
os.mkdir(base_dir)
if type == "trait":
format_dir = base_dir + "/" + "format_trait"
predict_out = base_dir + "/" + "predicted_traits.tab"
elif type == "marker":
format_dir = base_dir + "/" + "format_marker"
predict_out = base_dir + "/" + "predicted_markers.tab"
else:
raise ValueError("type must be one of 'trait', 'marker'")
format_cmd = cls._get_format_command(trait_table, tree, format_dir)
# formatted paths
fmt_table = format_dir + "/" + "trait_table.tab"
fmt_tree = format_dir + "/" + "reference_tree.newick"
prun_tree = format_dir + "/" + "pruned_tree.newick"
asr_out = format_dir + "/" + "asr.tab"
reconstruct_cmd = cls._get_asr_command(fmt_table, prun_tree, asr_out)
predict_cmd = cls._get_predict_traits_command(fmt_table, asr_out, fmt_tree, predict_out)
# link all the necessary commands into a single command
super_command = "; ".join([format_cmd, reconstruct_cmd, predict_cmd])
job_name = "picrust_cmd{}".format(cls.job_id)
subprocess.call([ "bsub",
"-o", "{}/auto_picrust.out".format(base_dir),
"-e", "{}/auto_picrust.err".format(base_dir),
"-J", job_name,
super_command
])
cls.job_id += 1
return job_name, predict_out
def predict_metagenome(cls, otu_table, copy_numbers, trait_table, base_dir=None):
# make a directory to hold the analysis
if base_dir is None:
base_dir = os.getcwd() + "/" + "picrust_project"
else:
base_dir = os.path.abspath(base_dir)
if not os.path.isdir(base_dir):
os.mkdir(base_dir)
norm_out = base_dir + "/" + "normalized_OTU_table.biom"
norm_cmd = cls._get_normalize_command(otu_table, copy_numbers, norm_out)
predict_out = base_dir + "/" + "predicted_metagenome.tab"
predict_cmd = cls._get_predict_metagenome_command(norm_out, trait_table, out=predict_out)
# link all the necessary commands into a single command
super_command = "; ".join([norm_cmd, predict_cmd])
job_name = "picrust_cmd{}".format(cls.job_id)
subprocess.call([ "bsub",
"-o", "{}/auto_picrust.out".format(base_dir),
"-e", "{}/auto_picrust.err".format(base_dir),
"-J", job_name,
super_command
])
cls.job_id += 1
return job_name, predict_out
@classmethod
def wait_for_job(cls, job_name="picrust_cmd*"):
""" waits for job to complete, checks every 10 seconds """
while cls._job_running(job_name):
time.sleep(10)
@staticmethod
def _job_running(job_name="picrust_cmd*"):
output = subprocess.check_output([
"bjobs",
"-J", "picrust_cmd*"
])
#print(output)
if output:
return True
else:
return False
@staticmethod
def _get_format_command(trait_tab, tree, out):
exe = subprocess.check_output(["which", "format_tree_and_trait_table.py"]).strip()
format_files = "python {exe} -t {tree} -i {trait_tab} -o {out}".format(exe=exe, tree=tree, trait_tab=trait_tab, out=out)
return format_files
@staticmethod
def _get_asr_command(trait_table, tree, out):
exe = subprocess.check_output(["which", "ancestral_state_reconstruction.py"]).strip()
asr = "python {exe} -i {trait_table} -t {tree} -o {out}".format(exe=exe, trait_table=trait_table, tree=tree, out=out)
return asr
@staticmethod
def _get_predict_traits_command(trait_table, asr_table, tree, out):
exe = subprocess.check_output(["which", "predict_traits.py"]).strip()
predict = "python {exe} -i {trait_table} -t {tree} -r {asr_table} -o {out} -a".format(exe=exe, trait_table=trait_table, asr_table=asr_table, tree=tree, out=out)
return predict
@staticmethod
def _get_normalize_command(otu_table, copy_numbers, out):
exe = subprocess.check_output(["which", "normalize_by_copy_number.py"]).strip()
normalize = "python {exe} -i {otu_table} -c {copy_numbers} -o {out}".format(exe=exe, otu_table=otu_table, copy_numbers=copy_numbers, out=out)
return normalize
@staticmethod
def _get_predict_metagenome_command(otu_table, trait_table, out):
exe = subprocess.check_output(["which", "predict_metagenomes.py"]).strip()
predict = "python {exe} -i {otu_table} -c {trait_table} -o {out} -f".format(exe=exe, otu_table=otu_table, trait_table=trait_table, out=out)
return predict
def get_ko_by_function(ko_metadata_f, level=2):
"""
Level 1 is the top level.
Level 2 is an intermediate level (Corresponding approx to COGs)
Level 3 is the pathway level
"""
if level not in [1, 2, 3]:
raise ValueError("Level must be 1, 2, or 3.")
data = {}
with open(ko_metadata_f, 'r') as IN:
# skip header line
IN.readline()
for line in IN:
fields = line.rstrip().split("\t")
ko_name = fields[0]
ko_pathways = fields[-1]
# multiple pathways sep by "|"
for pathway in ko_pathways.split("|"):
levels = pathway.split(";")
try:
data[";".join(levels[:level])].append(ko_name)
except KeyError:
data[";".join(levels[:level])] = [ko_name]
except IndexError:
LOG.warning("{} did not have a pathway at the requested level.".format(ko_name))
return data
def get_plant_associated_kos(plant_associated_f):
""" Reads in a database of plant associated kos; returns a dict of lineage: KOs """
data = {}
with open(plant_associated_f, 'r') as IN:
# skip header
IN.readline()
for line in IN:
lineage, kos = line[:-1].split("\t")
if kos:
data[lineage] = kos.split(";")
else:
data[lineage] = []
return data
if __name__ == "__main__":
args = sys.argv[1:]
test_executer(args)
|
from django.db import models
from symposion.proposals.models import ProposalBase
class Proposal(ProposalBase):
AUDIENCE_LEVEL_NOVICE = 1
AUDIENCE_LEVEL_EXPERIENCED = 2
AUDIENCE_LEVEL_INTERMEDIATE = 3
AUDIENCE_LEVELS = [
(AUDIENCE_LEVEL_NOVICE, "Novice"),
(AUDIENCE_LEVEL_INTERMEDIATE, "Intermediate"),
(AUDIENCE_LEVEL_EXPERIENCED, "Experienced"),
]
audience_level = models.IntegerField(choices=AUDIENCE_LEVELS)
recording_release = models.BooleanField(
default=True,
help_text="By submitting your proposal, you agree to give permission to the conference organizers to record, edit, and release audio and/or video of your presentation. If you do not agree to this, please uncheck this box."
)
def __unicode__(self):
return self.title
class Meta:
abstract = True
class TalkProposal(Proposal):
class Meta:
verbose_name = "talk proposal"
class TutorialProposal(Proposal):
class Meta:
verbose_name = "tutorial proposal"
|
# -*- coding: utf-8 -*-
import inspect
import json
import re
from celery import chain
from celery.result import AsyncResult
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.http import HttpResponse
from django.shortcuts import render
from appliances.models import (
Appliance, AppliancePool, Provider, Group, Template, User, GroupShepherd)
from appliances.tasks import (
appliance_power_on, appliance_power_off, appliance_suspend, appliance_rename,
connect_direct_lun, disconnect_direct_lun, mark_appliance_ready, wait_appliance_ready)
from sprout.log import create_logger
def json_response(data):
return HttpResponse(json.dumps(data), content_type="application/json")
def json_exception(e):
return json_response({
"status": "exception",
"result": {
"class": type(e).__name__,
"message": str(e)
}
})
def json_autherror(message):
return json_response({
"status": "autherror",
"result": {
"message": str(message)
}
})
def json_success(result):
return json_response({
"status": "success",
"result": result
})
class JSONMethod(object):
def __init__(self, method, auth=False):
self._method = method
if self._method.__doc__:
try:
head, body = self._method.__doc__.split("\n\n", 1)
head = head.strip()
self._doc = head
except ValueError:
self._doc = self._method.__doc__.strip()
else:
self._doc = ""
self.auth = auth
@property
def __name__(self):
return self._method.__name__
def __call__(self, *args, **kwargs):
return self._method(*args, **kwargs)
@property
def description(self):
f_args = inspect.getargspec(self._method).args
f_defaults = inspect.getargspec(self._method).defaults
defaults = {}
if f_defaults is not None:
for key, value in zip(f_args[-len(f_defaults):], f_defaults):
defaults[key] = value
return {
"name": self._method.__name__,
"args": f_args if not self.auth else f_args[1:],
"defaults": defaults,
"docstring": self._doc,
"needs_authentication": self.auth,
}
class JSONApi(object):
def __init__(self):
self._methods = {}
def method(self, f):
self._methods[f.__name__] = JSONMethod(f)
def authenticated_method(self, f):
self._methods[f.__name__] = JSONMethod(f, auth=True)
def doc(self, request):
return render(request, 'appliances/apidoc.html', {})
def __call__(self, request):
if request.method != 'POST':
return json_success({
"available_methods": sorted(
map(lambda m: m.description, self._methods.itervalues()),
key=lambda m: m["name"]),
})
try:
data = json.loads(request.body)
method_name = data["method"]
args = data["args"]
kwargs = data["kwargs"]
try:
method = self._methods[method_name]
except KeyError:
raise NameError("Method {} not found!".format(method_name))
create_logger(method).info(
"Calling with parameters {}{}".format(repr(tuple(args)), repr(kwargs)))
if method.auth:
if "auth" in data:
username, password = data["auth"]
try:
user = User.objects.get(username=username)
except ObjectDoesNotExist:
return json_autherror("User {} does not exist!".format(username))
if not user.check_password(password):
return json_autherror("Wrong password for user {}!".format(username))
create_logger(method).info(
"Called by user {}/{}".format(user.id, user.username))
return json_success(method(user, *args, **kwargs))
else:
return json_autherror("Method {} needs authentication!".format(method_name))
else:
return json_success(method(*args, **kwargs))
except Exception as e:
create_logger(method).error(
"Exception raised during call: {}: {}".format(type(e).__name__, str(e)))
return json_exception(e)
else:
create_logger(method).info("Call finished")
jsonapi = JSONApi()
def jsonapi_doc(*args, **kwargs):
return jsonapi.doc(*args, **kwargs)
@jsonapi.method
def has_template(template_name, preconfigured):
"""Check if Sprout tracks a template with a particular name.
Can check both fresh templates and preconfigured ones. It will only take the ones that are:
* Ready
* Existing
* Usable
Args:
template_name: Name of the *original* template.
preconfigured: Whether to check the fresh templates or preconfigured ones.
"""
query = Template.objects.filter(
ready=True, exists=True, usable=True, preconfigured=bool(preconfigured),
original_name=template_name)
return query.count() > 0
@jsonapi.method
def list_appliances(used=False):
"""Returns list of appliances.
Args:
used: Whether to report used or unused appliances
"""
query = Appliance.objects
if used:
query = query.exclude(appliance_pool__owner=None)
else:
query = query.filter(appliance_pool__owner=None)
result = []
for appliance in query:
result.append(appliance.serialized)
return result
@jsonapi.authenticated_method
def num_shepherd_appliances(user, group, version=None, date=None, provider=None):
"""Provides number of currently available shepherd appliances."""
group = Group.objects.get(id=group)
if provider is not None:
provider = Provider.objects.get(id=provider)
if version is None:
if provider is None:
try:
version = Template.get_versions(template_group=group)[0]
except IndexError:
# No version
pass
else:
try:
version = Template.get_versions(template_group=group, provider=provider)[0]
except IndexError:
# No version
pass
if date is None:
filter_kwargs = {"template_group": group}
if provider is not None:
filter_kwargs["provider"] = provider
if version is not None:
filter_kwargs["version"] = version
try:
date = Template.get_dates(**filter_kwargs)[0]
except IndexError:
# No date
pass
filter_kwargs = {"template__template_group": group, "ready": True, "appliance_pool": None}
if version is not None:
filter_kwargs["template__version"] = version
if date is not None:
filter_kwargs["template__date"] = date
if provider is not None:
filter_kwargs["template__provider"] = provider
return len(Appliance.objects.filter(**filter_kwargs))
@jsonapi.authenticated_method
def request_appliances(
user, group, count=1, lease_time=60, version=None, date=None, provider=None,
preconfigured=True, yum_update=False, container=False):
"""Request a number of appliances."""
if date:
date = datetime.strptime(date, "%y%m%d")
return AppliancePool.create(
user, group, version, date, provider, count, lease_time, preconfigured, yum_update,
container).id
@jsonapi.authenticated_method
def request_check(user, request_id):
"""Return status of the appliance pool"""
request = AppliancePool.objects.get(id=request_id)
if user != request.owner and not user.is_staff:
raise Exception("This pool belongs to a different user!")
return {
"fulfilled": request.fulfilled,
"finished": request.finished,
"preconfigured": request.preconfigured,
"yum_update": request.yum_update,
"progress": int(round(request.percent_finished * 100)),
"appliances": [
appliance.serialized
for appliance
in request.appliances
],
}
@jsonapi.authenticated_method
def prolong_appliance_lease(user, id, minutes=60):
"""Prolongs the appliance's lease time by specified amount of minutes from current time."""
appliance = Appliance.objects.get(id=id)
if appliance.owner is not None and user != appliance.owner and not user.is_staff:
raise Exception("This pool belongs to a different user!")
appliance.prolong_lease(time=minutes)
@jsonapi.authenticated_method
def prolong_appliance_pool_lease(user, id, minutes=60):
"""Prolongs the appliance pool's lease time by specified amount of minutes from current time."""
pool = AppliancePool.objects.get(id=id)
if user != pool.owner and not user.is_staff:
raise Exception("This pool belongs to a different user!")
pool.prolong_lease(time=minutes)
@jsonapi.authenticated_method
def destroy_pool(user, id):
"""Destroy the pool. Kills all associated appliances."""
pool = AppliancePool.objects.get(id=id)
if user != pool.owner and not user.is_staff:
raise Exception("This pool belongs to a different user!")
pool.kill()
@jsonapi.method
def pool_exists(id):
"""Check whether pool does exist"""
try:
AppliancePool.objects.get(id=id)
return True
except ObjectDoesNotExist:
return False
@jsonapi.authenticated_method
def get_number_free_appliances(user, group):
"""Get number of available appliances to keep in the pool"""
with transaction.atomic():
g = Group.objects.get(id=group)
return {
sg.user_group.name: sg.template_pool_size
for sg in
GroupShepherd.objects.filter(user_group__in=user.groups.all(), template_group=g)}
@jsonapi.authenticated_method
def set_number_free_appliances(user, group, n):
"""Set number of available appliances to keep in the pool"""
if not user.is_staff:
raise Exception("You don't have enough rights!")
if n < 0:
return False
with transaction.atomic():
g = Group.objects.get(id=group)
g.template_pool_size = n
g.save()
return True
@jsonapi.method
def available_cfme_versions(preconfigured=True):
"""Lists all versions that are available"""
return Template.get_versions(preconfigured=preconfigured)
@jsonapi.method
def available_groups():
return map(lambda group: group.id, Group.objects.all())
@jsonapi.method
def available_providers():
return map(lambda group: group.id, Provider.objects.all())
@jsonapi.authenticated_method
def add_provider(user, provider_key):
if not user.is_staff:
raise Exception("You don't have enough rights!")
try:
provider_o = Provider.objects.get(id=provider_key)
return False
except ObjectDoesNotExist:
provider_o = Provider(id=provider_key)
provider_o.save()
return True
def get_appliance(appliance, user=None):
"""'Multimethod' that receives an object and tries to guess by what field the appliance
should be retrieved. Then it retrieves the appliance"""
if isinstance(appliance, int):
appliance = Appliance.objects.get(id=appliance)
elif re.match(r"^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$", appliance) is not None:
appliance = Appliance.objects.get(ip_address=appliance)
else:
appliance = Appliance.objects.get(name=appliance)
if user is None:
return appliance
else:
if appliance.owner is None:
if not user.is_staff:
raise Exception("Only staff can operate with nonowned appliances")
elif appliance.owner != user:
raise Exception("This appliance belongs to a different user!")
return appliance
@jsonapi.authenticated_method
def appliance_data(user, appliance):
"""Returns data about the appliance serialized as JSON.
You can specify appliance by IP address, id or name.
"""
appliance = get_appliance(appliance, user)
return appliance.serialized
@jsonapi.authenticated_method
def destroy_appliance(user, appliance):
"""Destroy the appliance. If the kill task was called, id is returned, otherwise None
You can specify appliance by IP address, id or name.
"""
appliance = get_appliance(appliance, user)
try:
return Appliance.kill(appliance).task_id
except AttributeError: # None was returned
return None
@jsonapi.method
def power_state(appliance):
"""Return appliance's current power state.
You can specify appliance by IP address, id or name.
"""
return get_appliance(appliance).power_state
@jsonapi.authenticated_method
def power_on(user, appliance, wait_ready=True):
"""Power on the appliance. If task is called, an id is returned, otherwise None.
You can specify appliance by IP address, id or name.
"""
appliance = get_appliance(appliance, user)
if appliance.power_state != Appliance.Power.ON:
tasks = [appliance_power_on.si(appliance.id)]
if wait_ready:
tasks.append(wait_appliance_ready.si(appliance.id))
else:
tasks.append(mark_appliance_ready.si(appliance.id))
return chain(*tasks)().task_id
@jsonapi.authenticated_method
def power_off(user, appliance):
"""Power off the appliance. If task is called, an id is returned, otherwise None.
You can specify appliance by IP address, id or name.
"""
appliance = get_appliance(appliance, user)
if appliance.power_state != Appliance.Power.OFF:
return appliance_power_off.delay(appliance.id).task_id
@jsonapi.authenticated_method
def suspend(user, appliance):
"""Suspend the appliance. If task is called, an id is returned, otherwise None.
You can specify appliance by IP address, id or name.
"""
appliance = get_appliance(appliance, user)
if appliance.power_state == Appliance.Power.OFF:
return False
elif appliance.power_state != Appliance.Power.SUSPENDED:
return appliance_suspend.delay(appliance.id).task_id
@jsonapi.authenticated_method
def set_pool_description(user, pool_id, description):
"""Set the pool's description"""
pool = AppliancePool.objects.get(id=pool_id)
if pool.owner is None:
if not user.is_staff:
raise Exception("Only staff can operate with nonowned appliances")
elif pool.owner != user:
raise Exception("This appliance belongs to a different user!")
pool.description = description
pool.save()
return True
@jsonapi.authenticated_method
def get_pool_description(user, pool_id):
"""Get the pool's description"""
pool = AppliancePool.objects.get(id=pool_id)
if pool.owner is None:
if not user.is_staff:
raise Exception("Only staff can operate with nonowned appliances")
elif pool.owner != user:
raise Exception("This appliance belongs to a different user!")
return pool.description
@jsonapi.authenticated_method
def find_pools_by_description(user, description, partial=False):
"""Searches pools to find a pool with matching descriptions. When partial, `in` is used"""
pools = []
for pool in AppliancePool.objects.all():
if not pool.description:
continue
if partial:
if description in pool.description:
pools.append(pool)
else:
if pool.description == description:
pools.append(pool)
def _filter(pool):
return (pool.owner is None and user.is_staff) or (pool.owner == user)
return map(lambda pool: pool.id, filter(_filter, pools))
@jsonapi.authenticated_method
def rename_appliance(user, appliance, new_name):
"""Rename the appliance. Returns task id.
You can specify appliance by IP address, id or name.
"""
appliance = get_appliance(appliance, user)
return appliance_rename.delay(appliance.id, new_name).task_id
@jsonapi.method
def task_finished(task_id):
"""Returns whether specified task has already finished"""
result = AsyncResult(task_id)
return result.ready()
@jsonapi.method
def task_result(task_id):
"""Returns result of the task. Returns None if no result yet"""
result = AsyncResult(task_id)
if not result.ready():
return None
return result.get(timeout=1)
@jsonapi.authenticated_method
def appliance_provider_type(user, appliance):
"""Return appliance's provider class.
Corresponds to the mgmtsystem class names.
You can specify appliance by IP address, id or name.
"""
api_class = type(get_appliance(appliance, user).provider_api)
return api_class.__name__
@jsonapi.authenticated_method
def appliance_provider_key(user, appliance):
"""Return appliance's provider key.
You can specify appliance by IP address, id or name.
"""
return get_appliance(appliance, user).provider.id
@jsonapi.authenticated_method
def appliance_connect_direct_lun(user, appliance):
"""Connects direct LUN disk to the appliance (RHEV only).
You can specify appliance by IP address, id or name.
"""
appliance = get_appliance(appliance, user)
return connect_direct_lun(appliance.id).task_id
@jsonapi.authenticated_method
def appliance_disconnect_direct_lun(user, appliance):
"""Disconnects direct LUN disk from the appliance (RHEV only).
You can specify appliance by IP address, id or name.
"""
appliance = get_appliance(appliance, user)
return disconnect_direct_lun(appliance.id).task_id
|
"""
MOST OF THIS CODE IS NOT USED
ITS COPY/PASTED AND LEFT HERE FOR CONVENIENCE
"""
import os
import sys
# in case our module isn't installed (running from this folder)
if not os.path.abspath('../../../') in sys.path:
sys.path.append('../../../') # helps spyder get docs
import swhlab
import swhlab.common as cm
import matplotlib.pyplot as plt
import numpy as np
import warnings # suppress VisibleDeprecationWarning warning
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
def analyzeSweep(abf,plotToo=True,color=None,label=None):
Y=abf.sweepYsmartbase()[abf.pointsPerSec*.5:]
AV,SD=np.average(Y),np.std(Y)
dev=5 # number of stdevs from the avg to set the range
R1,R2=[(AV-SD)*dev,(AV+SD)*dev]
nBins=1000
hist,bins=np.histogram(Y,bins=nBins,range=[R1,R2],density=True)
histSmooth=abf.convolve(hist,cm.kernel_gaussian(nBins/5))
if plotToo:
plt.plot(bins[1:],hist,'.',color=color,alpha=.2,ms=10)
plt.plot(bins[1:],histSmooth,'-',color=color,lw=5,alpha=.5,label=label)
return
if __name__=="__main__":
#abfFile=R"C:\Users\scott\Documents\important\demodata\abfs\16d07022.abf"
abfFile=R"X:\Data\2P01\2016\2016-09-01 PIR TGOT\16d07022.abf"
abf=swhlab.ABF(abfFile)
# prepare figure
plt.figure(figsize=(10,10))
plt.grid()
plt.title("smart baseline value distribution")
plt.xlabel(abf.units2)
plt.ylabel("normalized density")
# do the analysis
abf.kernel=abf.kernel_gaussian(sizeMS=500)
abf.setsweep(175)
analyzeSweep(abf,color='b',label="baseline")
abf.setsweep(200)
analyzeSweep(abf,color='g',label="TGOT")
abf.setsweep(375)
analyzeSweep(abf,color='y',label="washout")
# show figure
plt.legend()
plt.margins(0,.1)
plt.show()
print("DONE")
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import unittest
import numpy as np
import sys
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.optimizer import AdamOptimizer
from test_imperative_base import new_program_scope
from paddle.fluid.dygraph.base import to_variable
def gen_data():
pass
class GraphConv(fluid.Layer):
def __init__(self, name_scope, in_features, out_features):
super(GraphConv, self).__init__(name_scope)
self._in_features = in_features
self._out_features = out_features
self.weight = self.create_parameter(
attr=None,
dtype='float32',
shape=[self._in_features, self._out_features])
self.bias = self.create_parameter(
attr=None, dtype='float32', shape=[self._out_features])
def forward(self, features, adj):
support = fluid.layers.matmul(features, self.weight)
# TODO(panyx0718): sparse matmul?
return fluid.layers.matmul(adj, support) + self.bias
class GCN(fluid.Layer):
def __init__(self, name_scope, num_hidden):
super(GCN, self).__init__(name_scope)
self.gc = GraphConv(self.full_name(), num_hidden, 32)
self.gc2 = GraphConv(self.full_name(), 32, 10)
def forward(self, x, adj):
x = fluid.layers.relu(self.gc(x, adj))
return self.gc2(x, adj)
class TestDygraphGNN(unittest.TestCase):
def test_gnn_float32(self):
seed = 90
startup = fluid.Program()
startup.random_seed = seed
main = fluid.Program()
main.random_seed = seed
scope = fluid.core.Scope()
with new_program_scope(main=main, startup=startup, scope=scope):
features = fluid.layers.data(
name='features',
shape=[1, 100, 50],
dtype='float32',
append_batch_size=False)
# Use selected rows when it's supported.
adj = fluid.layers.data(
name='adj',
shape=[1, 100, 100],
dtype='float32',
append_batch_size=False)
labels = fluid.layers.data(
name='labels',
shape=[100, 1],
dtype='int64',
append_batch_size=False)
model = GCN('test_gcn', 50)
logits = model(features, adj)
logits = fluid.layers.reshape(logits, logits.shape[1:])
# In other example, it's nll with log_softmax. However, paddle's
# log_loss only supports binary classification now.
loss = fluid.layers.softmax_with_cross_entropy(logits, labels)
loss = fluid.layers.reduce_sum(loss)
adam = AdamOptimizer(learning_rate=1e-3)
adam.minimize(loss)
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
exe.run(startup)
static_loss = exe.run(feed={
'features': np.zeros(
[1, 100, 50], dtype=np.float32),
'adj': np.zeros(
[1, 100, 100], dtype=np.float32),
'labels': np.zeros(
[100, 1], dtype=np.int64)
},
fetch_list=[loss])[0]
static_weight = np.array(
scope.find_var(model.gc.weight.name).get_tensor())
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
features = np.zeros([1, 100, 50], dtype=np.float32)
# Use selected rows when it's supported.
adj = np.zeros([1, 100, 100], dtype=np.float32)
labels = np.zeros([100, 1], dtype=np.int64)
model = GCN('test_gcn', 50)
logits = model(to_variable(features), to_variable(adj))
logits = fluid.layers.reshape(logits, logits.shape[1:])
# In other example, it's nll with log_softmax. However, paddle's
# log_loss only supports binary classification now.
loss = fluid.layers.softmax_with_cross_entropy(logits,
to_variable(labels))
loss = fluid.layers.reduce_sum(loss)
adam = AdamOptimizer(learning_rate=1e-3)
adam.minimize(loss)
self.assertEqual(static_loss, loss.numpy())
self.assertTrue(np.allclose(static_weight, model.gc.weight.numpy()))
sys.stderr.write('%s %s\n' % (static_loss, loss.numpy()))
if __name__ == '__main__':
unittest.main()
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
resource_data = sqlalchemy.Table(
'resource_data', meta,
sqlalchemy.Column('id',
sqlalchemy.Integer,
primary_key=True,
nullable=False),
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
sqlalchemy.Column('key', sqlalchemy.String(255)),
sqlalchemy.Column('value', sqlalchemy.Text),
sqlalchemy.Column('redact', sqlalchemy.Boolean),
sqlalchemy.Column('resource_id',
sqlalchemy.String(36),
sqlalchemy.ForeignKey('resource.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
sqlalchemy.Table('resource', meta, autoload=True)
resource_data.create()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
resource_data = sqlalchemy.Table('resource_data', meta, autoload=True)
resource_data.drop()
|
#!/usr/bin/python
# coding=utf-8
import falcon
import ujson
import json
from catcher import models
import datetime
from playhouse.shortcuts import model_to_dict
import logging
from catcher.models import User, NullUser
import peewee
class PeeweeConnection(object):
def process_request(self, req, resp):
models.db.connect()
def process_response(self, req, resp, resource):
if not models.db.is_closed():
models.db.close()
class Crossdomain(object):
def process_request(self, req, resp):
resp.append_header(
"Access-Control-Allow-Origin", "*"
)
resp.append_header(
"Access-Control-Allow-Headers",
"Content-Type,Authorization,X-Name"
)
resp.append_header(
"Access-Control-Allow-Methods",
"PUT,POST,DELETE,GET"
)
class Authorization(object):
def process_request(self, req, resp):
user = NullUser()
try:
if req.auth:
user = User.get(api_key=req.auth)
except User.DoesNotExist:
pass
# debug
print "LOGGED:", user
req.context["user"] = user
class RequireJSON(object):
def process_request(self, req, resp):
if not req.client_accepts_json:
raise falcon.HTTPNotAcceptable(
'This API only supports responses encoded as JSON.'
)
if req.method in ('POST', 'PUT'):
if not req.content_type or 'application/json' not in req.content_type:
raise falcon.HTTPUnsupportedMediaType(
'This API only supports requests encoded as JSON.'
)
class JSONTranslator(object):
def process_request(self, req, resp):
# req.stream corresponds to the WSGI wsgi.input environ variable,
# and allows you to read bytes from the request body
if req.content_length in (None, 0):
return # nothing to do
body = req.stream.read()
if not body:
raise falcon.HTTPBadRequest(
'Empty request body',
'A valid JSON document is required.'
)
try:
req.context['data'] = ujson.loads(body)
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPBadRequest(
'Malformed JSON',
'Could not decode the request body. The '
'JSON was incorrect or not encoded as '
'UTF-8.'
)
def process_response(self, req, resp, resource):
if 'result' not in req.context:
return
resp.body = json.dumps(req.context['result'], default = self.converter)
def converter(self, obj):
if isinstance(obj, datetime.time) or isinstance(obj, datetime.date) or isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, set):
return list(obj)
if isinstance(obj, peewee.Model):
return model_to_dict(obj)
if isinstance(obj, models.MySQLModel):
# TODO: I don't understand this, because it doesn't work
return model_to_dict(obj)
logging.warning("Converter doesn't know how convert data (%s [%s])" % (obj, type(obj)))
return None
|
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# Copyright (c), Toshio Kuratomi <[email protected]> 2016
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
# Ansible modules can be written in any language.
# The functions available here can be used to do many common tasks,
# to simplify development of Python modules.
import __main__
import atexit
import errno
import datetime
import grp
import fcntl
import locale
import os
import pwd
import platform
import re
import select
import shlex
import shutil
import signal
import stat
import subprocess
import sys
import tempfile
import time
import traceback
import types
from collections import deque
from itertools import chain, repeat
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.common.text.converters import (
jsonify,
container_to_bytes as json_dict_unicode_to_bytes,
container_to_text as json_dict_bytes_to_unicode,
)
from ansible.module_utils.common.text.formatters import (
lenient_lowercase,
bytes_to_human,
human_to_bytes,
SIZE_RANGES,
)
try:
from ansible.module_utils.common._json_compat import json
except ImportError as e:
print('\n{{"msg": "Error: ansible requires the stdlib json: {0}", "failed": true}}'.format(to_native(e)))
sys.exit(1)
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
# we may have been able to import md5 but it could still not be available
try:
hashlib.md5()
except ValueError:
AVAILABLE_HASH_ALGORITHMS.pop('md5', None)
except Exception:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except Exception:
pass
from ansible.module_utils.common._collections_compat import (
KeysView,
Mapping, MutableMapping,
Sequence, MutableSequence,
Set, MutableSet,
)
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.file import (
_PERM_BITS as PERM_BITS,
_EXEC_PERM_BITS as EXEC_PERM_BITS,
_DEFAULT_PERM as DEFAULT_PERM,
is_executable,
format_attributes,
get_flags_from_attributes,
)
from ansible.module_utils.common.sys_info import (
get_distribution,
get_distribution_version,
get_platform_subclass,
)
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.common.parameters import (
handle_aliases,
list_deprecations,
list_no_log_values,
PASS_VARS,
PASS_BOOLS,
)
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils.common.validation import (
check_missing_parameters,
check_mutually_exclusive,
check_required_arguments,
check_required_by,
check_required_if,
check_required_one_of,
check_required_together,
count_terms,
check_type_bool,
check_type_bits,
check_type_bytes,
check_type_float,
check_type_int,
check_type_jsonarg,
check_type_list,
check_type_dict,
check_type_path,
check_type_raw,
check_type_str,
safe_eval,
)
from ansible.module_utils.common._utils import get_all_subclasses as _get_all_subclasses
from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
SEQUENCETYPE = frozenset, KeysView, Sequence
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
# These are things we want. About setting metadata (mode, ownership, permissions in general) on
# created files (these are used by set_fs_attributes_if_different and included in
# load_file_common_arguments)
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
attributes=dict(aliases=['attr']),
# The following are not about perms and should not be in a rewritten file_common_args
src=dict(), # Maybe dest or path would be appropriate but src is not
follow=dict(type='bool', default=False), # Maybe follow is appropriate because it determines whether to follow symlinks for permission purposes too
force=dict(type='bool'),
# not taken by the file module, but other action plugins call the file module so this ignores
# them for now. In the future, the caller should take care of removing these from the module
# arguments before calling the file module.
content=dict(no_log=True), # used by copy
backup=dict(), # Used by a few modules to create a remote backup before updating the file
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Used for parsing symbolic file perms
MODE_OPERATOR_RE = re.compile(r'[+=-]')
USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
_PY3_MIN = sys.version_info[:2] >= (3, 5)
_PY2_MIN = (2, 6) <= sys.version_info[:2] < (3,)
_PY_MIN = _PY3_MIN or _PY2_MIN
if not _PY_MIN:
print(
'\n{"failed": true, '
'"msg": "Ansible requires a minimum of Python2 version 2.6 or Python3 version 3.5. Current version: %s"}' % ''.join(sys.version.splitlines())
)
sys.exit(1)
#
# Deprecated functions
#
def get_platform():
'''
**Deprecated** Use :py:func:`platform.system` directly.
:returns: Name of the platform the module is running on in a native string
Returns a native string that labels the platform ("Linux", "Solaris", etc). Currently, this is
the result of calling :py:func:`platform.system`.
'''
return platform.system()
# End deprecated functions
#
# Compat shims
#
def load_platform_subclass(cls, *args, **kwargs):
"""**Deprecated**: Use ansible.module_utils.common.sys_info.get_platform_subclass instead"""
platform_cls = get_platform_subclass(cls)
return super(cls, platform_cls).__new__(platform_cls)
def get_all_subclasses(cls):
"""**Deprecated**: Use ansible.module_utils.common._utils.get_all_subclasses instead"""
return list(_get_all_subclasses(cls))
# End compat shims
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
raise AnsibleFallbackNotFound
def missing_required_lib(library, reason=None, url=None):
hostname = platform.node()
msg = "Failed to import the required Python library (%s) on %s's Python %s." % (library, hostname, sys.executable)
if reason:
msg += " This is required %s." % reason
if url:
msg += " See %s for more info." % url
return msg + " Please read module documentation and install in the appropriate location"
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=None, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None, required_by=None):
'''
Common code for quickly building an ansible module in Python
(although you can write modules with anything that can return JSON).
See :ref:`developing_modules_general` for a general introduction
and :ref:`developing_program_flow_modules` for more detailed explanation.
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.bypass_checks = bypass_checks
self.no_log = no_log
# Check whether code set this explicitly for deprecation purposes
if check_invalid_arguments is None:
check_invalid_arguments = True
module_set_check_invalid_arguments = False
else:
module_set_check_invalid_arguments = True
self.check_invalid_arguments = check_invalid_arguments
self.mutually_exclusive = mutually_exclusive
self.required_together = required_together
self.required_one_of = required_one_of
self.required_if = required_if
self.required_by = required_by
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._shell = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self._clean = {}
self._string_conversion_action = ''
self.aliases = {}
self._legal_inputs = []
self._options_context = list()
self._tmpdir = None
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except (ValueError, TypeError) as e:
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
self._handle_no_log_values()
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._check_required_by(required_by)
self._set_defaults(pre=False)
# deal with options sub-spec
self._handle_options()
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
# Do this at the end so that logging parameters have been set up
# This is to warn third party module authors that the functionatlity is going away.
# We exclude uri and zfs as they have their own deprecation warnings for users and we'll
# make sure to update their code to stop using check_invalid_arguments when 2.9 rolls around
if module_set_check_invalid_arguments and self._name not in ('uri', 'zfs'):
self.deprecate('Setting check_invalid_arguments is deprecated and will be removed.'
' Update the code for this module In the future, AnsibleModule will'
' always check for invalid arguments.', version='2.9')
@property
def tmpdir(self):
# if _ansible_tmpdir was not set and we have a remote_tmp,
# the module needs to create it and clean it up once finished.
# otherwise we create our own module tmp dir from the system defaults
if self._tmpdir is None:
basedir = None
if self._remote_tmp is not None:
basedir = os.path.expanduser(os.path.expandvars(self._remote_tmp))
if basedir is not None and not os.path.exists(basedir):
try:
os.makedirs(basedir, mode=0o700)
except (OSError, IOError) as e:
self.warn("Unable to use %s as temporary directory, "
"failing back to system: %s" % (basedir, to_native(e)))
basedir = None
else:
self.warn("Module remote_tmp %s did not exist and was "
"created with a mode of 0700, this may cause"
" issues when running as another user. To "
"avoid this, create the remote_tmp dir with "
"the correct permissions manually" % basedir)
basefile = "ansible-moduletmp-%s-" % time.time()
try:
tmpdir = tempfile.mkdtemp(prefix=basefile, dir=basedir)
except (OSError, IOError) as e:
self.fail_json(
msg="Failed to create remote module tmp path at dir %s "
"with prefix %s: %s" % (basedir, basefile, to_native(e))
)
if not self._keep_remote_files:
atexit.register(shutil.rmtree, tmpdir)
self._tmpdir = tmpdir
return self._tmpdir
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError as e:
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except Exception:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
if self.check_file_absent_if_check_mode(path):
return True
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
except OSError as e:
self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
if owner is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except (IOError, OSError) as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
if group is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
if mode is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if self.check_file_absent_if_check_mode(b_path):
return True
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=to_native(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError as e:
if os.path.islink(b_path) and e.errno in (errno.EPERM, errno.EROFS): # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=to_native(e),
exception=traceback.format_exc())
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
existing = self.get_file_attributes(b_path)
attr_mod = '='
if attributes.startswith(('-', '+')):
attr_mod = attributes[0]
attributes = attributes[1:]
if existing.get('attr_flags', '') != attributes or attr_mod == '-':
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '%s%s' % (attr_mod, attributes), b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = '%s%s' % (attr_mod, attributes)
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
self.fail_json(path=to_text(b_path), msg='chattr failed',
details=to_native(e), exception=traceback.format_exc())
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split()
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except Exception:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3},
'o': {
'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO},
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def check_file_absent_if_check_mode(self, file_path):
return self.check_mode and not os.path.exists(file_path)
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception as e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
to_native(e), exception=traceback.format_exc())
def _handle_aliases(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
# this uses exceptions as it happens before we can safely call fail_json
alias_results, self._legal_inputs = handle_aliases(spec, param)
return alias_results
def _handle_no_log_values(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
self.no_log_values.update(list_no_log_values(spec, param))
self._deprecations.extend(list_deprecations(spec, param))
def _check_arguments(self, check_invalid_arguments, spec=None, param=None, legal_inputs=None):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
if legal_inputs is None:
legal_inputs = self._legal_inputs
for k in list(param.keys()):
if check_invalid_arguments and k not in legal_inputs:
unsupported_parameters.add(k)
for k in PASS_VARS:
# handle setting internal properties from internal ansible vars
param_key = '_ansible_%s' % k
if param_key in param:
if k in PASS_BOOLS:
setattr(self, PASS_VARS[k][0], self.boolean(param[param_key]))
else:
setattr(self, PASS_VARS[k][0], param[param_key])
# clean up internal top level params:
if param_key in self.params:
del self.params[param_key]
else:
# use defaults if not already set
if not hasattr(self, PASS_VARS[k][0]):
setattr(self, PASS_VARS[k][0], PASS_VARS[k][1])
if unsupported_parameters:
msg = "Unsupported parameters for (%s) module: %s" % (self._name, ', '.join(sorted(list(unsupported_parameters))))
if self._options_context:
msg += " found in %s." % " -> ".join(self._options_context)
msg += " Supported parameters include: %s" % (', '.join(sorted(spec.keys())))
self.fail_json(msg=msg)
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check, param=None):
if param is None:
param = self.params
return count_terms(check, param)
def _check_mutually_exclusive(self, spec, param=None):
if param is None:
param = self.params
try:
check_mutually_exclusive(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_one_of(self, spec, param=None):
if spec is None:
return
if param is None:
param = self.params
try:
check_required_one_of(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_together(self, spec, param=None):
if spec is None:
return
if param is None:
param = self.params
try:
check_required_together(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_by(self, spec, param=None):
if spec is None:
return
if param is None:
param = self.params
try:
check_required_by(spec, param)
except TypeError as e:
self.fail_json(msg=to_native(e))
def _check_required_arguments(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
try:
check_required_arguments(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_if(self, spec, param=None):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
if param is None:
param = self.params
try:
check_required_if(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
# Allow one or more when type='list' param with choices
if isinstance(param[k], list):
diff_list = ", ".join([item for item in param[k] if item not in choices])
if diff_list:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one or more of: %s. Got no match for: %s" % (k, choices_str, diff_list)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
elif param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = lenient_lowercase(choices)
overlap = BOOLEANS_FALSE.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = lenient_lowercase(choices)
overlap = BOOLEANS_TRUE.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
else:
msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def safe_eval(self, value, locals=None, include_exceptions=False):
return safe_eval(value, locals, include_exceptions)
def _check_type_str(self, value):
opts = {
'error': False,
'warn': False,
'ignore': True
}
# Ignore, warn, or error when converting to a string.
allow_conversion = opts.get(self._string_conversion_action, True)
try:
return check_type_str(value, allow_conversion)
except TypeError:
common_msg = 'quote the entire value to ensure it does not change.'
if self._string_conversion_action == 'error':
msg = common_msg.capitalize()
raise TypeError(to_native(msg))
elif self._string_conversion_action == 'warn':
msg = ('The value {0!r} (type {0.__class__.__name__}) in a string field was converted to {1!r} (type string). '
'If this does not look like what you expect, {2}').format(value, to_text(value), common_msg)
self.warn(to_native(msg))
return to_native(value, errors='surrogate_or_strict')
def _check_type_list(self, value):
return check_type_list(value)
def _check_type_dict(self, value):
return check_type_dict(value)
def _check_type_bool(self, value):
return check_type_bool(value)
def _check_type_int(self, value):
return check_type_int(value)
def _check_type_float(self, value):
return check_type_float(value)
def _check_type_path(self, value):
return check_type_path(value)
def _check_type_jsonarg(self, value):
return check_type_jsonarg(value)
def _check_type_raw(self, value):
return check_type_raw(value)
def _check_type_bytes(self, value):
return check_type_bytes(value)
def _check_type_bits(self, value):
return check_type_bits(value)
def _handle_options(self, argument_spec=None, params=None):
''' deal with options to create sub spec '''
if argument_spec is None:
argument_spec = self.argument_spec
if params is None:
params = self.params
for (k, v) in argument_spec.items():
wanted = v.get('type', None)
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if v.get('apply_defaults', False):
if spec is not None:
if params.get(k) is None:
params[k] = {}
else:
continue
elif spec is None or k not in params or params[k] is None:
continue
self._options_context.append(k)
if isinstance(params[k], dict):
elements = [params[k]]
else:
elements = params[k]
for param in elements:
if not isinstance(param, dict):
self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
self._set_fallbacks(spec, param)
options_aliases = self._handle_aliases(spec, param)
self._handle_no_log_values(spec, param)
options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs)
# check exclusive early
if not self.bypass_checks:
self._check_mutually_exclusive(v.get('mutually_exclusive', None), param)
self._set_defaults(pre=True, spec=spec, param=param)
if not self.bypass_checks:
self._check_required_arguments(spec, param)
self._check_argument_types(spec, param)
self._check_argument_values(spec, param)
self._check_required_together(v.get('required_together', None), param)
self._check_required_one_of(v.get('required_one_of', None), param)
self._check_required_if(v.get('required_if', None), param)
self._check_required_by(v.get('required_by', None), param)
self._set_defaults(pre=False, spec=spec, param=param)
# handle multi level options (sub argspec)
self._handle_options(spec, param)
self._options_context.pop()
def _get_wanted_type(self, wanted, k):
if not callable(wanted):
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
wanted = 'str'
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
else:
# set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
type_checker = wanted
wanted = getattr(wanted, '__name__', to_native(type(wanted)))
return type_checker, wanted
def _handle_elements(self, wanted, param, values):
type_checker, wanted_name = self._get_wanted_type(wanted, param)
validated_params = []
for value in values:
try:
validated_params.append(type_checker(value))
except (TypeError, ValueError) as e:
msg = "Elements value for option %s" % param
if self._options_context:
msg += " found in '%s'" % " -> ".join(self._options_context)
msg += " is of type %s and we were unable to convert to %s: %s" % (type(value), wanted_name, to_native(e))
self.fail_json(msg=msg)
return validated_params
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
value = param[k]
if value is None:
continue
type_checker, wanted_name = self._get_wanted_type(wanted, k)
try:
param[k] = type_checker(value)
wanted_elements = v.get('elements', None)
if wanted_elements:
if wanted != 'list' or not isinstance(param[k], list):
msg = "Invalid type %s for option '%s'" % (wanted_name, param)
if self._options_context:
msg += " found in '%s'." % " -> ".join(self._options_context)
msg += ", elements value check is supported only with 'list' type"
self.fail_json(msg=msg)
param[k] = self._handle_elements(wanted_elements, k, param[k])
except (TypeError, ValueError) as e:
msg = "argument %s is of type %s" % (k, type(value))
if self._options_context:
msg += " found in '%s'." % " -> ".join(self._options_context)
msg += " and we were unable to convert to %s: %s" % (wanted_name, to_native(e))
self.fail_json(msg=msg)
def _set_defaults(self, pre=True, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in param:
param[k] = default
else:
# make sure things without a default still get set None
if k not in param:
param[k] = default
def _set_fallbacks(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in param and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
param[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
if HAS_SYSLOG:
# If syslog_facility specified, it needs to convert
# from the facility name to the facility code, and
# set it as SYSLOG_FACILITY argument of journal.send()
facility = getattr(syslog,
self._syslog_facility,
syslog.LOG_USER) >> 3
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
SYSLOG_FACILITY=facility,
**dict(journal_args))
else:
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
**dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except Exception:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except Exception:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=None):
'''
Find system executable in PATH.
:param arg: The executable to find.
:param required: if executable is not found and required is ``True``, fail_json
:param opt_dirs: optional list of directories to search in addition to ``PATH``
:returns: if found return full path; otherwise return None
'''
bin_path = None
try:
bin_path = get_bin_path(arg, required, opt_dirs)
except ValueError as e:
self.fail_json(msg=to_text(e))
return bin_path
def boolean(self, arg):
'''Convert the argument to a boolean'''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
try:
return jsonify(data)
except UnicodeError as e:
self.fail_json(msg=to_text(e))
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
elif isinstance(d, Mapping):
self.deprecate(d['msg'], version=d.get('version', None))
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
if 'msg' not in kwargs:
raise AssertionError("implementation error -- msg to explain the error is required")
kwargs['failed'] = True
# Add traceback if debug or high verbosity and it is missing
# NOTE: Badly named as exception, it really always has been a traceback
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
if PY2:
# On Python 2 this is the last (stack frame) exception and as such may be unrelated to the failure
kwargs['exception'] = 'WARNING: The below traceback may *not* be related to the actual failure.\n' +\
''.join(traceback.format_tb(sys.exc_info()[2]))
else:
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
if not required_params:
return
try:
check_missing_parameters(self.params, required_params)
except TypeError as e:
self.fail_json(msg=to_native(e))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
b_filename = to_bytes(filename, errors='surrogate_or_strict')
if not os.path.exists(b_filename):
return None
if os.path.isdir(b_filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(b_filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
self.preserved_copy(fn, backupdest)
except (shutil.Error, IOError) as e:
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError as e:
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
def preserved_copy(self, src, dest):
"""Copy a file with preserved ownership, permissions and context"""
# shutil.copy2(src, dst)
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
#
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
# unaffected. src and dst are path names given as strings.
shutil.copy2(src, dest)
# Set the context
if self.selinux_enabled():
context = self.selinux_context(src)
self.set_context_if_different(dest, context, False)
# chown it
try:
dest_stat = os.stat(src)
tmp_stat = os.stat(dest)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
# Set the attributes
current_attribs = self.get_file_attributes(src)
current_attribs = current_attribs.get('attr_flags', '')
self.set_attributes_if_different(dest, current_attribs, True)
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError as e:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError as e:
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError) as e:
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
else:
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
b_dest_dir = os.path.dirname(b_dest)
b_suffix = os.path.basename(b_dest)
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=b'.ansible_tmp',
dir=b_dest_dir, suffix=b_suffix)
except (OSError, IOError) as e:
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating tmp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tmpdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError) as e:
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to make %s into to %s, failed final rename from %s: %s' %
(src, dest, b_tmp_dest_name, to_native(e)),
exception=traceback.format_exc())
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
out_dest = in_src = None
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), self.get_buffer_size(file_descriptor))
if data == b(''):
rpipes.remove(file_descriptor)
return data
def _clean_args(self, args):
if not self._clean:
# create a printable version of the command for use in reporting later,
# which strips out things like passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
self._clean = ' '.join(shlex_quote(arg) for arg in clean_args)
return self._clean
def _restore_signal_handlers(self):
# Reset SIGPIPE to SIG_DFL, otherwise in Python2.7 it gets ignored in subprocesses.
if PY2 and sys.platform != 'win32':
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict',
expand_user_and_vars=True, pass_fds=None, before_communicate_callback=None):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment variable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:kw expand_user_and_vars: When ``use_unsafe_shell=False`` this argument
dictates whether ``~`` is expanded in paths and environment variables
are expanded before running the command. When ``True`` a string such as
``$SHELL`` will be expanded regardless of escaping. When ``False`` and
``use_unsafe_shell=False`` no path or variable expansion will be done.
:kw pass_fds: When running on python3 this argument
dictates which file descriptors should be passed
to an underlying ``Popen`` constructor.
:kw before_communicate_callback: This function will be called
after ``Popen`` object will be created
but before communicating to the process.
(``Popen`` object will be passed to callback as a first argument)
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
# used by clean args later on
self._clean = None
if not isinstance(args, (list, binary_type, text_type)):
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
# stringify args for unsafe/direct shell usage
if isinstance(args, list):
args = b" ".join([to_bytes(shlex_quote(x), errors='surrogate_or_strict') for x in args])
else:
args = to_bytes(args, errors='surrogate_or_strict')
# not set explicitly, check if set by controller
if executable:
executable = to_bytes(executable, errors='surrogate_or_strict')
args = [executable, b'-c', args]
elif self._shell not in (None, '/bin/sh'):
args = [to_bytes(self._shell, errors='surrogate_or_strict'), b'-c', args]
else:
shell = True
else:
# ensure args are a list
if isinstance(args, (binary_type, text_type)):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
# expand ``~`` in paths, and all environment vars
if expand_user_and_vars:
args = [to_bytes(os.path.expanduser(os.path.expandvars(x)), errors='surrogate_or_strict') for x in args if x is not None]
else:
args = [to_bytes(x, errors='surrogate_or_strict') for x in args if x is not None]
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module.py and explode, the remote lib path will resemble:
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system:
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=self._restore_signal_handlers,
)
if PY3 and pass_fds:
kwargs["pass_fds"] = pass_fds
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = to_bytes(os.path.abspath(os.path.expanduser(cwd)), errors='surrogate_or_strict')
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError) as e:
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, to_native(e)),
exception=traceback.format_exc())
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + self._clean_args(args))
cmd = subprocess.Popen(args, **kwargs)
if before_communicate_callback:
before_communicate_callback(cmd)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError) as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=self._clean_args(args))
except Exception as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=self._clean_args(args), rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
@staticmethod
def get_buffer_size(fd):
try:
# 1032 == FZ_GETPIPE_SZ
buffer_size = fcntl.fcntl(fd, 1032)
except Exception:
try:
# not as exact as above, but should be good enough for most platforms that fail the previous call
buffer_size = select.PIPE_BUF
except Exception:
buffer_size = 9000 # use sane default JIC
return buffer_size
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ===------------------------------* Python *------------------------------===
# THIS FILE IS GENERATED BY INVAR. DO NOT EDIT !!!
# ===------------------------------------------------------------------------===
from TestAbcGender import Gender
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from InvarCodec import DataWriter
from InvarCodec import DataReader
class TestAbcConflict(object):
"""名字冲突的类型"""
CRC32_ = 0xCC7A29B9
SIZE_ = 13
__slots__ = (
'_key',
'_text',
'_bytes',
'_hotfix')
#__slots__
def __init__(self):
self._key = Gender.NONE
self._text = ''
self._bytes = []
self._hotfix = None
#def __init__
def __str__(self):
s = StringIO()
s.write(u'{')
s.write(u' ')
s.write(u'TestAbcConflict')
s.write(u',')
s.write(u' ')
s.write(u'key')
s.write(u':')
s.write(unicode(self._key))
s.write(u',')
s.write(u' ')
s.write(u'text')
s.write(u':')
s.write(u'"')
s.write(self._text)
s.write(u'"')
s.write(u',')
s.write(u' ')
s.write(u'bytes')
s.write(u':')
s.write(u'(')
s.write(str(len(self._bytes)))
s.write(u')')
s.write(u',')
s.write(u' ')
s.write(u'hotfix')
s.write(u':')
if self._hotfix is None:
s.write(u'null')
else:
s.write(u'[')
s.write(str(len(self._hotfix)))
s.write(u']')
s.write(u' ')
s.write(u'}')
result = s.getvalue()
s.close()
return result
#def __str__
def __len__(self):
size = TestAbcConflict.SIZE_
size += len(self._text)
if len(self._bytes) > 0:
size += len(self._bytes) * 1
if self._hotfix is not None:
size += 4
for (k1,v1) in self._hotfix.items():
size += len(k1)
size += len(v1)
return size
#def __len__
def read(r):
self._key = r.readInt32()
self._text = r.readString()
lenBytes = r.readUInt32()
num = 0
while num < lenBytes:
num += 1
n1 = r.readInt8()
self._bytes.append(n1)
hotfixExists = r.readInt8()
if 0x01 == hotfixExists:
if self._hotfix == None:
self._hotfix = dict()
lenHotfix = r.readUInt32()
num = 0
while num < lenHotfix:
num += 1
k1 = r.readString()
v1 = r.readString()
self._hotfix[k1] = v1
elif 0x00 == hotfixExists:
self._hotfix = None
else:
raise InvarError(498, 'Protoc read error: The value of \'hotfixExists\' is invalid.')
#def read
def write(w):
w.writeInt32(self._key)
w.writeString(self._text)
w.writeUInt32(len(self._bytes))
for n1 in self._bytes:
w.writeInt8(n1)
if self._hotfix != None:
w.writeUInt8(0x01)
w.writeUInt32(len(self._hotfix))
for (k1,v1) in self._hotfix.items():
w.writeString(k1)
w.writeString(v1)
else:
w.writeUInt8(0x00)
#def write
#class TestAbcConflict
if '__main__' == __name__:
print('dir(TestAbcConflict()) =>\n' + '\n'.join(dir(TestAbcConflict())))
print('TestAbcConflict.__doc__ => ' + TestAbcConflict.__doc__)
print('TestAbcConflict.__len__ => ' + str(len(TestAbcConflict())))
print('TestAbcConflict.__str__ => ' + str(TestAbcConflict()))
|
#!/usr/bin/env python
""" Online Constrained Nonnegative Matrix Factorization
The general file class which is used to analyze calcium imaging data in an
online fashion using the OnACID algorithm. The output of the algorithm
is storead in an Estimates class
More info:
------------
Giovannucci, A., Friedrich, J., Kaufman, M., Churchland, A., Chklovskii, D.,
Paninski, L., & Pnevmatikakis, E.A. (2017). OnACID: Online analysis of calcium
imaging data in real time. In Advances in Neural Information Processing Systems
(pp. 2381-2391).
@url http://papers.nips.cc/paper/6832-onacid-online-analysis-of-calcium-imaging-data-in-real-time
"""
from builtins import map
from builtins import range
from builtins import str
from builtins import zip
import cv2
import logging
from math import sqrt
from multiprocessing import current_process, cpu_count
import numpy as np
import os
from past.utils import old_div
from scipy.ndimage import percentile_filter
from scipy.ndimage.filters import gaussian_filter
from scipy.sparse import coo_matrix, csc_matrix, spdiags, hstack
from scipy.stats import norm
from sklearn.decomposition import NMF
from sklearn.preprocessing import normalize
import tensorflow as tf
from time import time
from typing import List, Tuple
import caiman
from .cnmf import CNMF
from .estimates import Estimates
from .initialization import imblur, initialize_components, hals, downscale
from .oasis import OASIS
from .params import CNMFParams
from .pre_processing import get_noise_fft
from .utilities import update_order, get_file_size, peak_local_max, decimation_matrix
from ... import mmapping
from ...components_evaluation import compute_event_exceptionality
from ...motion_correction import (motion_correct_iteration_fast,
tile_and_correct, high_pass_filter_space,
sliding_window)
from ...utils.utils import save_dict_to_hdf5, load_dict_from_hdf5, parmap, load_graph
from ...utils.stats import pd_solve
from ... import summary_images
try:
cv2.setNumThreads(0)
except():
pass
try:
profile
except:
def profile(a): return a
class OnACID(object):
""" Source extraction of streaming data using online matrix factorization.
The class can be initialized by passing a "params" object for setting up
the relevant parameters and an "Estimates" object for setting an initial
state of the algorithm (optional)
Methods:
initialize_online:
Initialize the online algorithm using a provided method, and prepare
the online object
_prepare_object:
Prepare the online object given a set of estimates
fit_next:
Fit the algorithm on the next data frame
fit_online:
Run the entire online pipeline on a given list of files
"""
def __init__(self, params=None, estimates=None, path=None, dview=None):
if path is None:
self.params = CNMFParams() if params is None else params
self.estimates = Estimates() if estimates is None else estimates
else:
onacid = load_OnlineCNMF(path)
self.params = params if params is not None else onacid.params
self.estimates= estimates if estimates is not None else onacid.estimates
self.dview = dview
# if params is None or estimates is None:
# raise ValueError("Cannot Specify Estimates and Params While \
# Loading Object From File")
@profile
def _prepare_object(self, Yr, T, new_dims=None, idx_components=None):
init_batch = self.params.get('online', 'init_batch')
old_dims = self.params.get('data', 'dims')
self.is1p = (self.params.get('init', 'method_init') == 'corr_pnr' and
self.params.get('init', 'ring_size_factor') is not None)
if idx_components is None:
idx_components = range(self.estimates.A.shape[-1])
self.estimates.A = self.estimates.A.astype(np.float32)
self.estimates.C = self.estimates.C.astype(np.float32)
if self.estimates.f is not None:
self.estimates.f = self.estimates.f.astype(np.float32)
if self.estimates.b is not None:
self.estimates.b = self.estimates.b.astype(np.float32)
self.estimates.YrA = self.estimates.YrA.astype(np.float32)
self.estimates.select_components(idx_components=idx_components)
self.N = self.estimates.A.shape[-1]
self.M = self.params.get('init', 'nb') + self.N
if not self.params.get('online', 'update_num_comps'):
self.params.set('online', {'expected_comps': self.N})
elif (self.params.get('online', 'expected_comps') <=
self.N + self.params.get('online', 'max_num_added')):
self.params.set('online', {'expected_comps': self.N +
self.params.get('online', 'max_num_added') + 200})
expected_comps = self.params.get('online', 'expected_comps')
if Yr.shape[-1] != self.params.get('online', 'init_batch'):
raise Exception(
'The movie size used for initialization does not match with the minibatch size')
if new_dims is not None:
new_Yr = np.zeros([np.prod(new_dims), init_batch])
for ffrr in range(init_batch):
tmp = cv2.resize(Yr[:, ffrr].reshape(old_dims, order='F'), new_dims[::-1])
print(tmp.shape)
new_Yr[:, ffrr] = tmp.reshape([np.prod(new_dims)], order='F')
Yr = new_Yr
A_new = csc_matrix((np.prod(new_dims), self.estimates.A.shape[-1]),
dtype=np.float32)
for neur in range(self.N):
a = self.estimates.A.tocsc()[:, neur].toarray()
a = a.reshape(old_dims, order='F')
a = cv2.resize(a, new_dims[::-1]).reshape([-1, 1], order='F')
A_new[:, neur] = csc_matrix(a)
self.estimates.A = A_new
if self.estimates.b.size:
self.estimates.b = self.estimates.b.reshape(old_dims, order='F')
self.estimates.b = cv2.resize(
self.estimates.b, new_dims[::-1]).reshape([-1, 1], order='F')
else:
self.estimates.b.shape = (np.prod(new_dims), 0)
if self.is1p:
# self.estimates.b0 is calculated below
# ToDo, but not easy: resize self.estimates.W
raise NotImplementedError('change of dimensions not yet implemented for CNMF-E')
self.estimates.dims = new_dims
else:
self.estimates.dims = old_dims
self.estimates.normalize_components()
self.estimates.A = self.estimates.A.todense()
self.estimates.noisyC = np.zeros(
(self.params.get('init', 'nb') + expected_comps, T), dtype=np.float32)
self.estimates.C_on = np.zeros((expected_comps, T), dtype=np.float32)
self.estimates.noisyC[self.params.get('init', 'nb'):self.M, :self.params.get('online', 'init_batch')] = self.estimates.C + self.estimates.YrA
self.estimates.noisyC[:self.params.get('init', 'nb'), :self.params.get('online', 'init_batch')] = self.estimates.f
if self.params.get('preprocess', 'p'):
# if no parameter for calculating the spike size threshold is given, then use L1 penalty
if self.params.get('temporal', 's_min') is None:
use_L1 = True
else:
use_L1 = False
self.estimates.OASISinstances = [OASIS(
g=gam[0], lam=0 if not use_L1 else l,
s_min=0 if use_L1 else (self.params.get('temporal', 's_min') if self.params.get('temporal', 's_min') > 0 else
(-self.params.get('temporal', 's_min') * sn * np.sqrt(1 - np.sum(gam)))),
b=b,
g2=0 if self.params.get('preprocess', 'p') < 2 else gam[1])
for gam, l, b, sn in zip(self.estimates.g, self.estimates.lam, self.estimates.bl, self.estimates.neurons_sn)]
for i, o in enumerate(self.estimates.OASISinstances):
o.fit(self.estimates.noisyC[i + self.params.get('init', 'nb'), :init_batch])
self.estimates.C_on[i, :init_batch] = o.c
else:
self.estimates.C_on[:self.N, :init_batch] = self.estimates.C
if self.is1p:
ssub_B = self.params.get('init', 'ssub_B') * self.params.get('init', 'ssub')
X = Yr[:, :init_batch] - np.asarray(self.estimates.A.dot(self.estimates.C))
self.estimates.b0 = X.mean(1)
X -= self.estimates.b0[:, None]
if ssub_B > 1:
self.estimates.downscale_matrix = decimation_matrix(self.estimates.dims, ssub_B)
self.estimates.upscale_matrix = self.estimates.downscale_matrix.T
self.estimates.upscale_matrix.data = np.ones_like(self.estimates.upscale_matrix.data)
X = self.estimates.downscale_matrix.dot(X)
if self.params.get('online', 'full_XXt'):
self.estimates.XXt = X.dot(X.T)
else:
self.XXt_mats = []
self.XXt_vecs = []
self.W_ind = []
W = self.estimates.W
for p in range(W.shape[0]):
index = W.indices[W.indptr[p]:W.indptr[p + 1]]
self.W_ind.append(index)
x_i = X[index]
self.XXt_mats.append(x_i.dot(x_i.T))
self.XXt_vecs.append(x_i.dot(X[p].T))
self.estimates.Ab, self.ind_A, self.estimates.CY, self.estimates.CC = init_shapes_and_sufficient_stats(
Yr[:, :init_batch].reshape(self.estimates.dims + (-1,), order='F'),
self.estimates.A, self.estimates.C_on[:self.N, :init_batch],
self.estimates.b, self.estimates.noisyC[:self.params.get('init', 'nb'), :init_batch],
W=self.estimates.W if self.is1p else None, b0=self.estimates.b0 if self.is1p else None,
ssub_B=self.params.get('init', 'ssub_B') * self.params.get('init', 'ssub'),
downscale_matrix=self.estimates.downscale_matrix if (self.is1p and ssub_B > 1) else None,
upscale_matrix=self.estimates.upscale_matrix if (self.is1p and ssub_B > 1) else None)
self.estimates.CY = self.estimates.CY * 1. / self.params.get('online', 'init_batch')
self.estimates.CC = 1 * self.estimates.CC / self.params.get('online', 'init_batch')
logging.info('Expecting {0} components'.format(str(expected_comps)))
self.estimates.CY.resize([expected_comps + self.params.get('init', 'nb'), self.estimates.CY.shape[-1]], refcheck=False)
if self.params.get('online', 'use_dense'):
self.estimates.Ab_dense = np.zeros((self.estimates.CY.shape[-1], expected_comps + self.params.get('init', 'nb')),
dtype=np.float32)
self.estimates.Ab_dense[:, :self.estimates.Ab.shape[1]] = self.estimates.Ab.toarray()
self.estimates.C_on = np.vstack(
[self.estimates.noisyC[:self.params.get('init', 'nb'), :], self.estimates.C_on.astype(np.float32)])
if not self.is1p:
self.params.set('init', {'gSiz': np.add(np.multiply(np.ceil(
self.params.get('init', 'gSig')).astype(np.int), 2), 1)})
self.estimates.Yr_buf = RingBuffer(Yr[:, self.params.get('online', 'init_batch') - self.params.get('online', 'minibatch_shape'):
self.params.get('online', 'init_batch')].T.copy(), self.params.get('online', 'minibatch_shape'))
self.estimates.Yres_buf = RingBuffer(self.estimates.Yr_buf - self.estimates.Ab.dot(
self.estimates.C_on[:self.M, self.params.get('online', 'init_batch') - self.params.get('online', 'minibatch_shape'):
self.params.get('online', 'init_batch')]).T, self.params.get('online', 'minibatch_shape'))
if self.is1p:
estim = self.estimates
d1, d2 = estim.dims
estim.Yres_buf -= estim.b0
if ssub_B == 1:
estim.Atb = estim.Ab.T.dot(estim.W.dot(estim.b0) - estim.b0)
estim.AtW = estim.Ab.T.dot(estim.W)
estim.AtWA = estim.AtW.dot(estim.Ab).toarray()
estim.Yres_buf -= estim.W.dot(estim.Yres_buf.T).T
else:
A_ds = estim.downscale_matrix.dot(estim.Ab)
estim.Atb = estim.Ab.T.dot(estim.upscale_matrix.dot(
estim.W.dot(estim.downscale_matrix.dot(estim.b0))) - estim.b0)
estim.AtW = A_ds.T.dot(estim.W)
estim.AtWA = estim.AtW.dot(A_ds).toarray()
estim.Yres_buf -= estim.upscale_matrix.dot(estim.W.dot(
estim.downscale_matrix.dot(estim.Yres_buf.T))).T
self.estimates.sn = np.array(np.std(self.estimates.Yres_buf,axis=0))
self.estimates.vr = np.array(np.var(self.estimates.Yres_buf,axis=0))
self.estimates.mn = self.estimates.Yres_buf.mean(0)
self.estimates.mean_buff = self.estimates.Yres_buf.mean(0)
self.estimates.ind_new = []
if self.params.get('online', 'use_corr_img'):
self.estimates.rho_buf = None
self.estimates.sv = None
else:
self.estimates.rho_buf = imblur(np.maximum(self.estimates.Yres_buf.T, 0).reshape(
self.estimates.dims + (-1,), order='F'), sig=self.params.get('init', 'gSig'),
siz=self.params.get('init', 'gSiz'), nDimBlur=len(self.estimates.dims))**2
self.estimates.rho_buf = np.reshape(
self.estimates.rho_buf, (np.prod(self.estimates.dims), -1)).T
self.estimates.rho_buf = np.ascontiguousarray(self.estimates.rho_buf)
self.estimates.rho_buf = RingBuffer(self.estimates.rho_buf, self.params.get('online', 'minibatch_shape'))
self.estimates.sv = np.sum(self.estimates.rho_buf.get_last_frames(
min(self.params.get('online', 'init_batch'), self.params.get('online', 'minibatch_shape')) - 1), 0)
self.estimates.AtA = (self.estimates.Ab.T.dot(self.estimates.Ab)).toarray()
self.estimates.AtY_buf = self.estimates.Ab.T.dot(self.estimates.Yr_buf.T)
self.estimates.groups = list(map(list, update_order(self.estimates.Ab)[0]))
self.update_counter = 2**np.linspace(0, 1, self.N, dtype=np.float32)
self.estimates.CC = np.ascontiguousarray(self.estimates.CC)
self.estimates.CY = np.ascontiguousarray(self.estimates.CY)
self.time_neuron_added:List = []
for nneeuu in range(self.N):
self.time_neuron_added.append((nneeuu, self.params.get('online', 'init_batch')))
if self.params.get('online', 'dist_shape_update'):
self.time_spend = 0
self.comp_upd:List = []
# setup per patch classifier
if self.params.get('online', 'path_to_model') is None or self.params.get('online', 'sniper_mode') is False:
loaded_model = None
self.params.set('online', {'sniper_mode': False})
self.tf_in = None
self.tf_out = None
else:
try:
from tensorflow.keras.models import model_from_json
logging.info('Using Keras')
use_keras = True
except(ModuleNotFoundError):
use_keras = False
logging.info('Using Tensorflow')
if use_keras:
path = self.params.get('online', 'path_to_model').split(".")[:-1]
json_path = ".".join(path + ["json"])
model_path = ".".join(path + ["h5"])
json_file = open(json_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(model_path)
#opt = tf.keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
#loaded_model.compile(loss=tf.keras.losses.categorical_crossentropy,
# optimizer=opt, metrics=['accuracy'])
self.tf_in = None
self.tf_out = None
else:
path = self.params.get('online', 'path_to_model').split(".")[:-1]
model_path = '.'.join(path + ['h5', 'pb'])
loaded_model = load_graph(model_path)
self.tf_in = loaded_model.get_tensor_by_name('prefix/conv2d_1_input:0')
self.tf_out = loaded_model.get_tensor_by_name('prefix/output_node0:0')
loaded_model = tf.Session(graph=loaded_model)
self.loaded_model = loaded_model
if self.is1p:
from skimage.morphology import disk
radius = int(round(self.params.get('init', 'ring_size_factor') *
self.params.get('init', 'gSiz')[0] / float(ssub_B)))
ring = disk(radius + 1)
ring[1:-1, 1:-1] -= disk(radius)
self._ringidx = [i - radius - 1 for i in np.nonzero(ring)]
self._dims_B = ((self.estimates.dims[0] - 1) // ssub_B + 1,
(self.estimates.dims[1] - 1) // ssub_B + 1)
def get_indices_of_pixels_on_ring(self, pixel):
pixel = np.unravel_index(pixel, self._dims_B, order='F')
x = pixel[0] + self._ringidx[0]
y = pixel[1] + self._ringidx[1]
inside = (x >= 0) * (x < self._dims_B[0]) * (y >= 0) * (y < self._dims_B[1])
return np.ravel_multi_index((x[inside], y[inside]), self._dims_B, order='F')
self.get_indices_of_pixels_on_ring = get_indices_of_pixels_on_ring.__get__(self)
# generate list of indices of XX' that get accessed
if self.params.get('online', 'full_XXt'):
l = np.prod(self._dims_B)
tmp = np.zeros((l, l), dtype=bool)
for p in range(l):
index = self.get_indices_of_pixels_on_ring(p)
tmp[index[:, None], index] = True
tmp[index, p] = True
self.estimates.XXt_ind = list([np.where(t)[0] for t in tmp])
if self.params.get('online', 'use_corr_img'):
Yres = Yr[:, :init_batch] - self.estimates.Ab.dot(
self.estimates.C_on[:self.M, :init_batch])
if self.is1p:
Yres -= self.estimates.b0[:, None]
if ssub_B == 1:
Yres -= self.estimates.W.dot(Yres)
else:
Yres -= estim.upscale_matrix.dot(estim.W.dot(
estim.downscale_matrix.dot(Yres)))
Yres = Yres.reshape((d1, d2, -1), order='F')
(self.estimates.first_moment, self.estimates.second_moment,
self.estimates.crosscorr, self.estimates.col_ind, self.estimates.row_ind,
self.estimates.num_neigbors, self.estimates.corrM, self.estimates.corr_img) = \
summary_images.prepare_local_correlations(Yres, swap_dim=True, eight_neighbours=False)
self.estimates.max_img = Yres.max(-1)
self.comp_upd = []
self.t_shapes:List = []
self.t_detect:List = []
self.t_motion:List = []
self.t_stat:List = []
return self
@profile
def fit_next(self, t, frame_in, num_iters_hals=3):
"""
This method fits the next frame using the CaImAn online algorithm and
updates the object.
Args
t : int
time measured in number of frames
frame_in : array
flattened array of shape (x * y [ * z],) containing the t-th image.
num_iters_hals: int, optional
maximal number of iterations for HALS (NNLS via blockCD)
"""
t_start = time()
# locally scoped variables for brevity of code and faster look up
nb_ = self.params.get('init', 'nb')
Ab_ = self.estimates.Ab
mbs = self.params.get('online', 'minibatch_shape')
ssub_B = self.params.get('init', 'ssub_B') * self.params.get('init', 'ssub')
d1, d2 = self.estimates.dims
expected_comps = self.params.get('online', 'expected_comps')
frame = frame_in.astype(np.float32)
# print(np.max(1/scipy.sparse.linalg.norm(self.estimates.Ab,axis = 0)))
self.estimates.Yr_buf.append(frame)
if len(self.estimates.ind_new) > 0:
self.estimates.mean_buff = self.estimates.Yres_buf.mean(0)
if (not self.params.get('online', 'simultaneously')) or self.params.get('preprocess', 'p') == 0:
# get noisy fluor value via NNLS (project data on shapes & demix)
C_in = self.estimates.noisyC[:self.M, t - 1].copy()
if self.is1p:
self.estimates.C_on[:self.M, t], self.estimates.noisyC[:self.M, t] = demix1p(
frame, self.estimates.Ab, C_in, self.estimates.AtA, Atb=self.estimates.Atb,
AtW=self.estimates.AtW, AtWA=self.estimates.AtWA, iters=num_iters_hals,
groups=self.estimates.groups, ssub_B=ssub_B,
downscale_matrix=self.estimates.downscale_matrix if ssub_B > 1 else None)
else:
self.estimates.C_on[:self.M, t], self.estimates.noisyC[:self.M, t] = HALS4activity(
frame, self.estimates.Ab, C_in, self.estimates.AtA, iters=num_iters_hals, groups=self.estimates.groups)
if self.params.get('preprocess', 'p'):
# denoise & deconvolve
for i, o in enumerate(self.estimates.OASISinstances):
o.fit_next(self.estimates.noisyC[nb_ + i, t])
self.estimates.C_on[nb_ + i, t - o.get_l_of_last_pool() +
1: t + 1] = o.get_c_of_last_pool()
else:
if self.is1p:
raise NotImplementedError(
'simultaneous demixing and deconvolution not implemented yet for CNMF-E')
# update buffer, initialize C with previous value
self.estimates.C_on[:, t] = self.estimates.C_on[:, t - 1]
self.estimates.noisyC[:, t] = self.estimates.C_on[:, t - 1]
self.estimates.AtY_buf = np.concatenate((self.estimates.AtY_buf[:, 1:], self.estimates.Ab.T.dot(frame)[:, None]), 1) \
if self.params.get('online', 'n_refit') else self.estimates.Ab.T.dot(frame)[:, None]
# demix, denoise & deconvolve
(self.estimates.C_on[:self.M, t + 1 - mbs:t + 1], self.estimates.noisyC[:self.M, t + 1 - mbs:t + 1],
self.estimates.OASISinstances) = demix_and_deconvolve(
self.estimates.C_on[:self.M, t + 1 - mbs:t + 1],
self.estimates.noisyC[:self.M, t + 1 - mbs:t + 1],
self.estimates.AtY_buf, self.estimates.AtA, self.estimates.OASISinstances, iters=num_iters_hals,
n_refit=self.params.get('online', 'n_refit'))
for i, o in enumerate(self.estimates.OASISinstances):
self.estimates.C_on[nb_ + i, t - o.get_l_of_last_pool() + 1: t +
1] = o.get_c_of_last_pool()
#self.estimates.mean_buff = self.estimates.Yres_buf.mean(0)
res_frame = frame - self.estimates.Ab.dot(self.estimates.C_on[:self.M, t])
if self.is1p:
self.estimates.b0 = self.estimates.b0 * (t-1)/t + res_frame/t
res_frame -= self.estimates.b0
res_frame -= (self.estimates.W.dot(res_frame) if ssub_B == 1 else
self.estimates.upscale_matrix.dot(self.estimates.W.dot(
self.estimates.downscale_matrix.dot(res_frame))))
mn_ = self.estimates.mn.copy()
self.estimates.mn = (t-1)/t*self.estimates.mn + res_frame/t
self.estimates.vr = (t-1)/t*self.estimates.vr + (res_frame - mn_)*(res_frame - self.estimates.mn)/t
self.estimates.sn = np.sqrt(self.estimates.vr)
t_new = time()
num_added = 0
if self.params.get('online', 'update_num_comps'):
if self.params.get('online', 'use_corr_img'):
corr_img_mode = 'simple' #'exponential' # 'cumulative'
self.estimates.corr_img = summary_images.update_local_correlations(
t + 1 if corr_img_mode == 'cumulative' else mbs,
res_frame.reshape((1,) + self.estimates.dims, order='F'),
self.estimates.first_moment, self.estimates.second_moment,
self.estimates.crosscorr, self.estimates.col_ind, self.estimates.row_ind,
self.estimates.num_neigbors, self.estimates.corrM,
del_frames=[self.estimates.Yres_buf[self.estimates.Yres_buf.cur]]
if corr_img_mode == 'simple' else None)
self.estimates.mean_buff += (res_frame-self.estimates.Yres_buf[self.estimates.Yres_buf.cur])/self.params.get('online', 'minibatch_shape')
self.estimates.Yres_buf.append(res_frame)
res_frame = np.reshape(res_frame, self.estimates.dims, order='F')
if self.params.get('online', 'use_corr_img'):
self.estimates.max_img = np.max([self.estimates.max_img, res_frame], 0)
else:
rho = imblur(np.maximum(res_frame,0), sig=self.params.get('init', 'gSig'),
siz=self.params.get('init', 'gSiz'),
nDimBlur=len(self.params.get('data', 'dims')))**2
rho = np.reshape(rho, np.prod(self.params.get('data', 'dims')))
self.estimates.rho_buf.append(rho)
# old_max_img = self.estimates.max_img.copy()
if self.params.get('preprocess', 'p') == 1:
g_est = np.mean(self.estimates.g)
elif self.params.get('preprocess', 'p') == 2:
g_est = np.mean(self.estimates.g, 0)
else:
g_est = 0
use_corr = self.params.get('online', 'use_corr_img')
(self.estimates.Ab, Cf_temp, self.estimates.Yres_buf, self.estimates.rho_buf,
self.estimates.CC, self.estimates.CY, self.ind_A, self.estimates.sv,
self.estimates.groups, self.estimates.ind_new, self.ind_new_all,
self.estimates.sv, self.cnn_pos) = update_num_components(
t, self.estimates.sv, self.estimates.Ab, self.estimates.C_on[:self.M, (t - mbs + 1):(t + 1)],
self.estimates.Yres_buf, self.estimates.Yr_buf, self.estimates.rho_buf,
self.params.get('data', 'dims'), self.params.get('init', 'gSig'),
self.params.get('init', 'gSiz'), self.ind_A, self.estimates.CY, self.estimates.CC,
rval_thr=self.params.get('online', 'rval_thr'),
thresh_fitness_delta=self.params.get('online', 'thresh_fitness_delta'),
thresh_fitness_raw=self.params.get('online', 'thresh_fitness_raw'),
thresh_overlap=self.params.get('online', 'thresh_overlap'), groups=self.estimates.groups,
batch_update_suff_stat=self.params.get('online', 'batch_update_suff_stat'),
gnb=self.params.get('init', 'nb'), sn=self.estimates.sn,
g=g_est, s_min=self.params.get('temporal', 's_min'),
Ab_dense=self.estimates.Ab_dense if self.params.get('online', 'use_dense') else None,
oases=self.estimates.OASISinstances if self.params.get('preprocess', 'p') else None,
N_samples_exceptionality=self.params.get('online', 'N_samples_exceptionality'),
max_num_added=self.params.get('online', 'max_num_added'),
min_num_trial=self.params.get('online', 'min_num_trial'),
loaded_model = self.loaded_model, test_both=self.params.get('online', 'test_both'),
thresh_CNN_noisy = self.params.get('online', 'thresh_CNN_noisy'),
sniper_mode=self.params.get('online', 'sniper_mode'),
use_peak_max=self.params.get('online', 'use_peak_max'),
mean_buff=self.estimates.mean_buff,
tf_in=self.tf_in, tf_out=self.tf_out,
ssub_B=ssub_B, W=self.estimates.W if self.is1p else None,
b0=self.estimates.b0 if self.is1p else None,
corr_img=self.estimates.corr_img if use_corr else None,
first_moment=self.estimates.first_moment if use_corr else None,
second_moment=self.estimates.second_moment if use_corr else None,
crosscorr=self.estimates.crosscorr if use_corr else None,
col_ind=self.estimates.col_ind if use_corr else None,
row_ind=self.estimates.row_ind if use_corr else None,
corr_img_mode=corr_img_mode if use_corr else None,
downscale_matrix=self.estimates.downscale_matrix if
(self.is1p and ssub_B > 1) else None,
upscale_matrix=self.estimates.upscale_matrix if
(self.is1p and ssub_B > 1) else None,
max_img=self.estimates.max_img if use_corr else None)
num_added = len(self.ind_A) - self.N
if num_added > 0:
# import matplotlib.pyplot as plt
# plt.figure(figsize=(15, 10))
# plt.subplot(231)
# plt.imshow(self.estimates.corr_img)
# foo = summary_images.update_local_correlations(
# np.inf, np.zeros((0,) + self.estimates.dims, order='F'),
# self.estimates.first_moment, self.estimates.second_moment,
# self.estimates.crosscorr, self.estimates.col_ind, self.estimates.row_ind,
# self.estimates.num_neigbors, self.estimates.corrM)
# plt.subplot(232)
# plt.imshow(foo)
# plt.subplot(233)
# plt.imshow(self.estimates.Ab_dense[:,self.M].reshape(self.estimates.dims, order='F'))
# plt.subplot(234)
# plt.imshow(old_max_img)
# plt.subplot(235)
# plt.imshow(self.estimates.max_img)
# plt.show()
self.N += num_added
self.M += num_added
if self.N + self.params.get('online', 'max_num_added') > expected_comps:
expected_comps += 200
self.params.set('online', {'expected_comps': expected_comps})
self.estimates.CY.resize(
[expected_comps + nb_, self.estimates.CY.shape[-1]])
# refcheck can trigger "ValueError: cannot resize an array references or is referenced
# by another array in this way. Use the resize function"
# np.resize didn't work, but refcheck=False seems fine
self.estimates.C_on.resize(
[expected_comps + nb_, self.estimates.C_on.shape[-1]], refcheck=False)
self.estimates.noisyC.resize(
[expected_comps + nb_, self.estimates.C_on.shape[-1]])
if self.params.get('online', 'use_dense'): # resize won't work due to contingency issue
# self.estimates.Ab_dense.resize([self.estimates.CY.shape[-1], expected_comps+nb_])
self.estimates.Ab_dense = np.zeros((self.estimates.CY.shape[-1], expected_comps + nb_),
dtype=np.float32)
self.estimates.Ab_dense[:, :Ab_.shape[1]] = Ab_.toarray()
logging.info('Increasing number of expected components to:' +
str(expected_comps))
self.update_counter.resize(self.N, refcheck=False)
self.estimates.noisyC[self.M - num_added:self.M, t - mbs +
1:t + 1] = Cf_temp[self.M - num_added:self.M]
for _ct in range(self.M - num_added, self.M):
self.time_neuron_added.append((_ct - nb_, t))
if self.params.get('preprocess', 'p'):
# N.B. OASISinstances are already updated within update_num_components
self.estimates.C_on[_ct, t - mbs + 1: t +
1] = self.estimates.OASISinstances[_ct - nb_].get_c(mbs)
else:
self.estimates.C_on[_ct, t - mbs + 1: t + 1] = np.maximum(
0, self.estimates.noisyC[_ct, t - mbs + 1: t + 1])
if self.params.get('online', 'simultaneously') and self.params.get('online', 'n_refit'):
self.estimates.AtY_buf = np.concatenate((
self.estimates.AtY_buf, [Ab_.data[Ab_.indptr[_ct]:Ab_.indptr[_ct + 1]].dot(
self.estimates.Yr_buf.T[Ab_.indices[Ab_.indptr[_ct]:Ab_.indptr[_ct + 1]]])]))
# N.B. Ab_dense is already updated within update_num_components as side effect
# self.estimates.AtA = (Ab_.T.dot(Ab_)).toarray()
# faster incremental update of AtA instead of above line:
AtA = self.estimates.AtA
self.estimates.AtA = np.zeros((self.M, self.M), dtype=np.float32)
self.estimates.AtA[:-num_added, :-num_added] = AtA
if self.params.get('online', 'use_dense'):
self.estimates.AtA[:, -num_added:] = self.estimates.Ab.T.dot(
self.estimates.Ab_dense[:, self.M - num_added:self.M])
else:
self.estimates.AtA[:, -num_added:] = self.estimates.Ab.T.dot(
self.estimates.Ab[:, -num_added:]).toarray()
self.estimates.AtA[-num_added:] = self.estimates.AtA[:, -num_added:].T
if self.is1p:
# # update XXt and W: TODO only update necessary pixels not all!
# x = (y - self.Ab.dot(ccf).T - self.b0).T if ssub_B == 1
# else (downscale((y.T - self.Ab.dot(ccf) - self.b0[:, None])
# .reshape(self.dims2 + (-1,), order='F'), (ssub_B, ssub_B, 1))
# .reshape((-1, len(y)), order='F'))
# for p in range(self.W.shape[0]):
# index = self.get_indices_of_pixels_on_ring(p)
# self.W.data[self.W.indptr[p]:self.W.indptr[p + 1]] = \
# np.linalg.inv(self.XXt[index[:, None], index]).dot(self.XXt[index, p])
if ssub_B == 1:
# self.estimates.AtW = Ab_.T.dot(self.estimates.W)
# self.estimates.AtWA = self.estimates.AtW.dot(Ab_).toarray()
# faster incremental update of AtW and AtWA instead of above lines:
csr_append(self.estimates.AtW, Ab_.T[-num_added:].dot(self.estimates.W))
AtWA = self.estimates.AtWA
self.estimates.AtWA = np.zeros((self.M, self.M), dtype=np.float32)
self.estimates.AtWA[:-num_added, :-num_added] = AtWA
self.estimates.AtWA[:, -num_added:] = self.estimates.AtW.dot(
Ab_[:, -num_added:]).toarray()
self.estimates.AtWA[-num_added:] = self.estimates.AtW[-num_added:].dot(
Ab_).toarray()
self.estimates.Atb = self.estimates.AtW.dot(
self.estimates.b0) - Ab_.T.dot(self.estimates.b0)
else:
A_ds = self.estimates.downscale_matrix.dot(self.estimates.Ab)
# self.estimates.AtW = A_ds.T.dot(self.estimates.W)
# self.estimates.AtWA = self.estimates.AtW.dot(A_ds).toarray()
# faster incremental update of AtW and AtWA instead of above lines:
csr_append(self.estimates.AtW, A_ds.T[-num_added:].dot(self.estimates.W))
AtWA = self.estimates.AtWA
self.estimates.AtWA = np.zeros((self.M, self.M), dtype=np.float32)
self.estimates.AtWA[:-num_added, :-num_added] = AtWA
self.estimates.AtWA[:, -num_added:] = self.estimates.AtW.dot(
A_ds[:, -num_added:]).toarray()
self.estimates.AtWA[-num_added:] = self.estimates.AtW[-num_added:].dot(
A_ds).toarray()
self.estimates.Atb = ssub_B**2 * self.estimates.AtW.dot(
self.estimates.downscale_matrix.dot(
self.estimates.b0)) - Ab_.T.dot(self.estimates.b0)
# set the update counter to 0 for components that are overlaping the newly added
idx_overlap = self.estimates.AtA[nb_:-num_added, -num_added:].nonzero()[0]
self.update_counter[idx_overlap] = 0
self.t_detect.append(time() - t_new)
t_stat = time()
if self.params.get('online', 'batch_update_suff_stat'):
# faster update using minibatch of frames
min_batch = min(self.params.get('online', 'update_freq'), mbs)
if ((t + 1 - self.params.get('online', 'init_batch')) % min_batch == 0):
ccf = self.estimates.C_on[:self.M, t - min_batch + 1:t + 1]
y = self.estimates.Yr_buf.get_last_frames(min_batch)
if self.is1p: # subtract background
if ssub_B == 1:
x = (y - self.estimates.Ab.dot(ccf).T - self.estimates.b0).T
y -= self.estimates.W.dot(x).T
else:
x = self.estimates.downscale_matrix.dot(
y.T - self.estimates.Ab.dot(ccf) - self.estimates.b0[:, None])
y -= self.estimates.upscale_matrix.dot(self.estimates.W.dot(x)).T
y -= self.estimates.b0
# self.estimates.XXt += x.dot(x.T)
# exploit that we only access some elements of XXt, hence update only these
if self.params.get('online', 'full_XXt'):
XXt = self.estimates.XXt # alias for faster repeated look up in large loop
for i, idx in enumerate(self.estimates.XXt_ind):
XXt[i, idx] += (x[i].dot(x[idx].T)).flatten()
else:
XXt_mats = self.XXt_mats
XXt_vecs = self.XXt_vecs
W = self.estimates.W
for p in range(len(XXt_mats)):
# index = W.indices[W.indptr[p]:W.indptr[p + 1]]
x_i = x[self.W_ind[p]]
XXt_mats[p] += x_i.dot(x_i.T)
XXt_vecs[p] += x_i.dot(x[p].T)
# much faster: exploit that we only access CY[m, ind_pixels], hence update only these
n0 = min_batch
t0 = 0 * self.params.get('online', 'init_batch')
w1 = (t - n0 + t0) * 1. / (t + t0) # (1 - 1./t)#mbs*1. / t
w2 = 1. / (t + t0) # 1.*mbs /t
ccf = np.ascontiguousarray(ccf)
y = np.asfortranarray(y)
for m in range(self.N):
self.estimates.CY[m + nb_, self.ind_A[m]] *= w1
self.estimates.CY[m + nb_, self.ind_A[m]] += w2 * \
ccf[m + nb_].dot(y[:, self.ind_A[m]])
self.estimates.CY[:nb_] = self.estimates.CY[:nb_] * w1 + \
w2 * ccf[:nb_].dot(y) # background
self.estimates.CC = self.estimates.CC * w1 + w2 * ccf.dot(ccf.T)
else:
ccf = self.estimates.C_on[:self.M, t - self.params.get('online', 'minibatch_suff_stat'):t -
self.params.get('online', 'minibatch_suff_stat') + 1]
y = self.estimates.Yr_buf.get_last_frames(self.params.get('online', 'minibatch_suff_stat') + 1)[:1]
if self.is1p: # subtract background
if ssub_B == 1:
x = (y - self.estimates.Ab.dot(ccf).T - self.estimates.b0).T
y -= self.estimates.W.dot(x).T
else:
x = self.estimates.downscale_matrix.dot(
y.T - self.estimates.Ab.dot(ccf) - self.estimates.b0[:, None])
y -= self.estimates.upscale_matrix.dot(self.estimates.W.dot(x)).T
y -= self.estimates.b0
# self.estimates.XXt += x.dot(x.T)
# exploit that we only access some elements of XXt, hence update only these
if self.params.get('online', 'full_XXt'):
XXt = self.estimates.XXt # alias for faster repeated look up in large loop
for i, idx in enumerate(self.estimates.XXt_ind):
XXt[i, idx] += (x[i] * x[idx]).flatten()
else:
XXt_mats = self.XXt_mats
XXt_vecs = self.XXt_vecs
W = self.estimates.W
for p in range(len(XXt_mats)):
# index = W.indices[W.indptr[p]:W.indptr[p + 1]]
x_i = x[self.W_ind[p]]
XXt_mats[p] += np.outer(x_i, x_i)
XXt_vecs[p] += x_i.dot(x[p].T)
# much faster: exploit that we only access CY[m, ind_pixels], hence update only these
ccf = np.ascontiguousarray(ccf)
y = np.asfortranarray(y)
for m in range(self.N):
self.estimates.CY[m + nb_, self.ind_A[m]] *= (1 - 1. / t)
self.estimates.CY[m + nb_, self.ind_A[m]] += ccf[m +
nb_].dot(y[:, self.ind_A[m]]) / t
self.estimates.CY[:nb_] = self.estimates.CY[:nb_] * (1 - 1. / t) + ccf[:nb_].dot(y / t)
self.estimates.CC = self.estimates.CC * (1 - 1. / t) + ccf.dot(ccf.T / t)
self.t_stat.append(time() - t_stat)
# update shapes
t_sh = time()
if not self.params.get('online', 'dist_shape_update'): # bulk shape update
if ((t + 1 - self.params.get('online', 'init_batch')) %
self.params.get('online', 'update_freq') == 0):
logging.info('Updating Shapes')
if self.N > self.params.get('online', 'max_comp_update_shape'):
indicator_components = np.where(self.update_counter <=
self.params.get('online', 'num_times_comp_updated'))[0]
# np.random.choice(self.N,10,False)
self.update_counter[indicator_components] += 1
else:
indicator_components = None
if self.params.get('online', 'use_dense'):
# update dense Ab and sparse Ab simultaneously;
# this is faster than calling update_shapes with sparse Ab only
Ab_, self.ind_A, self.estimates.Ab_dense[:, :self.M] = update_shapes(
self.estimates.CY, self.estimates.CC, self.estimates.Ab, self.ind_A,
indicator_components=indicator_components,
Ab_dense=self.estimates.Ab_dense[:, :self.M],
sn=self.estimates.sn, q=0.5, iters=self.params.get('online', 'iters_shape'))
else:
Ab_, self.ind_A, _ = update_shapes(
self.estimates.CY, self.estimates.CC, Ab_, self.ind_A,
indicator_components=indicator_components, sn=self.estimates.sn,
q=0.5, iters=self.params.get('online', 'iters_shape'))
self.estimates.AtA = (Ab_.T.dot(Ab_)).toarray()
if self.is1p and ((t + 1 - self.params.get('online', 'init_batch')) %
(self.params.get('online', 'W_update_factor') * self.params.get('online', 'update_freq')) == 0):
W = self.estimates.W
# for p in range(W.shape[0]):
# # index = self.get_indices_of_pixels_on_ring(p)
# index = W.indices[W.indptr[p]:W.indptr[p + 1]]
# # for _ in range(3): # update W via coordinate decent
# # for k, i in enumerate(index):
# # self.W.data[self.W.indptr[p] + k] += ((self.XXt[p, i] -
# # self.W.data[self.W.indptr[p]:self.W.indptr[p+1]].dot(self.XXt[index, i])) /
# # self.XXt[i, i])
# # update W using normal equations
# tmp = XXt[index[:, None], index]
# tmp[np.diag_indices(len(tmp))] += np.trace(tmp) * 1e-5
# W.data[W.indptr[p]:W.indptr[p + 1]] = np.linalg.inv(tmp).dot(XXt[index, p])
if self.params.get('online', 'full_XXt'):
XXt = self.estimates.XXt # alias for considerably faster look up in large loop
def process_pixel(p):
# index = W.indices[W.indptr[p]:W.indptr[p + 1]]
index = self.W_ind[p]
tmp = XXt[index[:, None], index]
tmp[np.diag_indices(len(tmp))] += np.trace(tmp) * 1e-5
return pd_solve(tmp, XXt[index, p])
if False: # current_process().name == 'MainProcess':
W.data = np.concatenate(parmap(process_pixel, range(W.shape[0])))
else:
W.data = np.concatenate(list(map(process_pixel, range(W.shape[0]))))
else:
XXt_mats = self.XXt_mats
XXt_vecs = self.XXt_vecs
# def process_pixel2(p):
# #return np.linalg.solve(a[0], a[1])
# return np.linalg.solve(XXt_mats[p], XXt_vecs[p])
# W.data = np.concatenate(list(map(process_pixel2, range(W.shape[0]))))
if self.dview is None:
W.data = np.concatenate(list(map(inv_mat_vec, zip(XXt_mats, XXt_vecs))))
elif 'multiprocessing' in str(type(self.dview)):
W.data = np.concatenate(list(self.dview.imap(inv_mat_vec, zip(XXt_mats, XXt_vecs), chunksize=256)))
else:
W.data = np.concatenate(list(self.dview.map_sync(inv_mat_vec, zip(XXt_mats, XXt_vecs))))
self.dview.results.clear()
#W.data = np.concatenate(parmap(process_pixel2, range(W.shape[0])))
#W.data = np.concatenate(parmap(process_pixel2, zip(XXt_mats, XXt_vecs)))
if ssub_B == 1:
self.estimates.Atb = Ab_.T.dot(W.dot(self.estimates.b0) - self.estimates.b0)
self.estimates.AtW = Ab_.T.dot(W)
self.estimates.AtWA = self.estimates.AtW.dot(Ab_).toarray()
else:
d1, d2 = self.estimates.dims
A_ds = self.estimates.downscale_matrix.dot(self.estimates.Ab)
self.estimates.Atb = Ab_.T.dot(self.estimates.upscale_matrix.dot(W.dot(
self.estimates.downscale_matrix.dot(self.estimates.b0))) - self.estimates.b0)
self.estimates.AtW = A_ds.T.dot(W)
self.estimates.AtWA = self.estimates.AtW.dot(A_ds).toarray()
ind_zero = list(np.where(self.estimates.AtA.diagonal() < 1e-10)[0])
if len(ind_zero) > 0:
ind_zero.sort()
ind_zero = ind_zero[::-1]
ind_keep = list(set(range(Ab_.shape[-1])) - set(ind_zero))
ind_keep.sort()
if self.params.get('online', 'use_dense'):
self.estimates.Ab_dense = np.delete(
self.estimates.Ab_dense, ind_zero, axis=1)
self.estimates.AtA = np.delete(self.estimates.AtA, ind_zero, axis=0)
self.estimates.AtA = np.delete(self.estimates.AtA, ind_zero, axis=1)
self.estimates.CY = np.delete(self.estimates.CY, ind_zero, axis=0)
self.estimates.CC = np.delete(self.estimates.CC, ind_zero, axis=0)
self.estimates.CC = np.delete(self.estimates.CC, ind_zero, axis=1)
self.M -= len(ind_zero)
self.N -= len(ind_zero)
self.estimates.noisyC = np.delete(self.estimates.noisyC, ind_zero, axis=0)
for ii in ind_zero:
del self.estimates.OASISinstances[ii - self.params.get('init', 'nb')]
#del self.ind_A[ii-self.params.init['nb']]
self.estimates.C_on = np.delete(self.estimates.C_on, ind_zero, axis=0)
self.estimates.AtY_buf = np.delete(self.estimates.AtY_buf, ind_zero, axis=0)
#Ab_ = Ab_[:,ind_keep]
Ab_ = csc_matrix(Ab_[:, ind_keep])
#Ab_ = csc_matrix(self.estimates.Ab_dense[:,:self.M])
self.Ab_dense_copy = self.estimates.Ab_dense
self.Ab_copy = Ab_
self.estimates.Ab = Ab_
self.ind_A = list(
[(self.estimates.Ab.indices[self.estimates.Ab.indptr[ii]:self.estimates.Ab.indptr[ii + 1]]) for ii in range(self.params.get('init', 'nb'), self.M)])
self.estimates.groups = list(map(list, update_order(Ab_)[0]))
if self.params.get('online', 'n_refit'):
self.estimates.AtY_buf = Ab_.T.dot(self.estimates.Yr_buf.T)
else: # distributed shape update
self.update_counter *= 2**(-1. / self.params.get('online', 'update_freq'))
# if not num_added:
if (not num_added) and (time() - t_start < 2*self.time_spend / (t - self.params.get('online', 'init_batch') + 1)):
candidates = np.where(self.update_counter <= 1)[0]
if len(candidates):
indicator_components = candidates[:self.N // mbs + 1]
self.comp_upd.append(len(indicator_components))
self.update_counter[indicator_components] += 1
#update_bkgrd = (t % self.params.get('online', 'update_freq') == 0)
update_bkgrd = (t % mbs == 0)
if self.params.get('online', 'use_dense'):
# update dense Ab and sparse Ab simultaneously;
# this is faster than calling update_shapes with sparse Ab only
Ab_, self.ind_A, self.estimates.Ab_dense[:, :self.M] = update_shapes(
self.estimates.CY, self.estimates.CC, self.estimates.Ab, self.ind_A,
indicator_components=indicator_components, update_bkgrd=update_bkgrd,
Ab_dense=self.estimates.Ab_dense[:, :self.M], sn=self.estimates.sn,
q=0.5, iters=self.params.get('online', 'iters_shape'))
if update_bkgrd:
self.estimates.AtA = (Ab_.T.dot(Ab_)).toarray()
else:
indicator_components += nb_
self.estimates.AtA[indicator_components, indicator_components[:, None]] = \
self.estimates.Ab_dense[:, indicator_components].T.dot(
self.estimates.Ab_dense[:, indicator_components])
else:
Ab_, self.ind_A, _ = update_shapes(
self.estimates.CY, self.estimates.CC, Ab_, self.ind_A,
indicator_components=indicator_components, update_bkgrd=update_bkgrd,
q=0.5, iters=self.params.get('online', 'iters_shape'))
self.estimates.AtA = (Ab_.T.dot(Ab_)).toarray()
else:
self.comp_upd.append(0)
self.estimates.Ab = Ab_
else:
self.comp_upd.append(0)
self.time_spend += time() - t_start
self.t_shapes.append(time() - t_sh)
return self
def initialize_online(self, model_LN=None, T=None):
fls = self.params.get('data', 'fnames')
opts = self.params.get_group('online')
Y = caiman.load(fls[0], subindices=slice(0, opts['init_batch'],
None), var_name_hdf5=self.params.get('data', 'var_name_hdf5')).astype(np.float32)
if model_LN is not None:
Y = Y - caiman.movie(np.squeeze(model_LN.predict(np.expand_dims(Y, -1))))
Y = np.maximum(Y, 0)
# Downsample if needed
ds_factor = np.maximum(opts['ds_factor'], 1)
if ds_factor > 1:
Y = Y.resize(1./ds_factor, 1./ds_factor)
self.estimates.shifts = [] # store motion shifts here
self.estimates.time_new_comp = []
if self.params.get('online', 'motion_correct'):
max_shifts_online = self.params.get('online', 'max_shifts_online')
if self.params.get('motion', 'gSig_filt') is None:
mc = Y.motion_correct(max_shifts_online, max_shifts_online)
Y = mc[0].astype(np.float32)
else:
Y_filt = np.stack([high_pass_filter_space(yf, self.params.motion['gSig_filt']) for yf in Y], axis=0)
Y_filt = caiman.movie(Y_filt)
mc = Y_filt.motion_correct(max_shifts_online, max_shifts_online)
Y = Y.apply_shifts(mc[1])
if self.params.get('motion', 'pw_rigid'):
n_p = len([(it[0], it[1])
for it in sliding_window(Y[0], self.params.get('motion', 'overlaps'), self.params.get('motion', 'strides'))])
for sh in mc[1]:
self.estimates.shifts.append([tuple(sh) for i in range(n_p)])
else:
self.estimates.shifts.extend(mc[1])
img_min = Y.min()
if self.params.get('online', 'normalize'):
Y -= img_min
img_norm = np.std(Y, axis=0)
img_norm += np.median(img_norm) # normalize data to equalize the FOV
logging.info('Frame size:' + str(img_norm.shape))
if self.params.get('online', 'normalize'):
Y = Y/img_norm[None, :, :]
if opts['show_movie']:
self.bnd_Y = np.percentile(Y,(0.001,100-0.001))
_, d1, d2 = Y.shape
Yr = Y.to_2D().T # convert data into 2D array
self.img_min = img_min
self.img_norm = img_norm
if self.params.get('online', 'init_method') == 'bare':
init = self.params.get_group('init').copy()
is1p = (init['method_init'] == 'corr_pnr' and init['ring_size_factor'] is not None)
if is1p:
self.estimates.sn, psx = get_noise_fft(
Yr, noise_range=self.params.get('preprocess', 'noise_range'),
noise_method=self.params.get('preprocess', 'noise_method'),
max_num_samples_fft=self.params.get('preprocess', 'max_num_samples_fft'))
for key in ('K', 'nb', 'gSig', 'method_init'):
init.pop(key, None)
tmp = bare_initialization(
Y.transpose(1, 2, 0), init_batch=self.params.get('online', 'init_batch'),
k=self.params.get('init', 'K'), gnb=self.params.get('init', 'nb'),
method_init=self.params.get('init', 'method_init'), sn=self.estimates.sn,
gSig=self.params.get('init', 'gSig'), return_object=False,
options_total=self.params.to_dict(), **init)
if is1p:
(self.estimates.A, self.estimates.b, self.estimates.C, self.estimates.f,
self.estimates.YrA, self.estimates.W, self.estimates.b0) = tmp
else:
(self.estimates.A, self.estimates.b, self.estimates.C, self.estimates.f,
self.estimates.YrA) = tmp
self.estimates.S = np.zeros_like(self.estimates.C)
nr = self.estimates.C.shape[0]
self.estimates.g = np.array([-np.poly([0.9] * max(self.params.get('preprocess', 'p'), 1))[1:]
for gg in np.ones(nr)])
self.estimates.bl = np.zeros(nr)
self.estimates.c1 = np.zeros(nr)
self.estimates.neurons_sn = np.std(self.estimates.YrA, axis=-1)
self.estimates.lam = np.zeros(nr)
elif self.params.get('online', 'init_method') == 'cnmf':
n_processes = cpu_count() - 1 or 1
cnm = CNMF(n_processes=n_processes, params=self.params, dview=self.dview)
cnm.estimates.shifts = self.estimates.shifts
if self.params.get('patch', 'rf') is None:
cnm.dview = None
cnm.fit(np.array(Y))
self.estimates = cnm.estimates
else:
Y.save('init_file.hdf5')
f_new = mmapping.save_memmap(['init_file.hdf5'], base_name='Yr', order='C',
slices=[slice(0, opts['init_batch']), None, None])
Yrm, dims_, T_ = mmapping.load_memmap(f_new)
Y = np.reshape(Yrm.T, [T_] + list(dims_), order='F')
cnm.fit(Y)
self.estimates = cnm.estimates
if self.params.get('online', 'normalize'):
self.estimates.A /= self.img_norm.reshape(-1, order='F')[:, np.newaxis]
self.estimates.b /= self.img_norm.reshape(-1, order='F')[:, np.newaxis]
self.estimates.A = csc_matrix(self.estimates.A)
elif self.params.get('online', 'init_method') == 'seeded':
self.estimates.A, self.estimates.b, self.estimates.C, self.estimates.f, self.estimates.YrA = seeded_initialization(
Y.transpose(1, 2, 0), self.estimates.A, gnb=self.params.get('init', 'nb'), k=self.params.get('init', 'K'),
gSig=self.params.get('init', 'gSig'), return_object=False)
self.estimates.S = np.zeros_like(self.estimates.C)
nr = self.estimates.C.shape[0]
self.estimates.g = np.array([-np.poly([0.9] * max(self.params.get('preprocess', 'p'), 1))[1:]
for gg in np.ones(nr)])
self.estimates.bl = np.zeros(nr)
self.estimates.c1 = np.zeros(nr)
self.estimates.neurons_sn = np.std(self.estimates.YrA, axis=-1)
self.estimates.lam = np.zeros(nr)
else:
raise Exception('Unknown initialization method!')
dims, Ts = get_file_size(fls, var_name_hdf5=self.params.get('data', 'var_name_hdf5'))
dims = Y.shape[1:]
self.params.set('data', {'dims': dims})
T1 = np.array(Ts).sum()*self.params.get('online', 'epochs') if T is None else T
self._prepare_object(Yr, T1)
if opts['show_movie']:
self.bnd_AC = np.percentile(self.estimates.A.dot(self.estimates.C),
(0.001, 100-0.005))
#self.bnd_BG = np.percentile(self.estimates.b.dot(self.estimates.f),
# (0.001, 100-0.001))
return self
def save(self,filename):
"""save object in hdf5 file format
Args:
filename: str
path to the hdf5 file containing the saved object
"""
if '.hdf5' in filename:
# keys_types = [(k, type(v)) for k, v in self.__dict__.items()]
save_dict_to_hdf5(self.__dict__, filename)
else:
raise Exception("Unsupported file extension")
def mc_next(self, t, frame):
frame_ = frame.flatten(order='F')
templ = self.estimates.Ab.dot(
np.median(self.estimates.C_on[:self.M, t-51:t-1], 1))
if self.is1p and self.estimates.W is not None:
if self.params.get('init','ssub_B') == 1:
B = self.estimates.W.dot(frame_ - templ - self.estimates.b0) + self.estimates.b0
else:
bc2 = self.estimates.downscale_matrix.dot(frame_ - templ - self.estimates.b0)
B = self.estimates.upscale_matrix.dot(self.estimates.W.dot(bc2))
B += self.estimates.b0
templ += B
templ = templ.reshape(self.params.get('data', 'dims'), order='F')
if self.params.get('online', 'normalize'):
templ *= self.img_norm
if self.is1p:
templ = high_pass_filter_space(templ, self.params.motion['gSig_filt'])
if self.params.get('motion', 'pw_rigid'):
frame_cor, shift, _, xy_grid = tile_and_correct(
frame, templ, self.params.motion['strides'], self.params.motion['overlaps'],
self.params.motion['max_shifts'], newoverlaps=None, newstrides=None,
upsample_factor_grid=4, upsample_factor_fft=10, show_movie=False,
max_deviation_rigid=self.params.motion['max_deviation_rigid'], add_to_movie=0,
shifts_opencv=True, gSig_filt=None, use_cuda=False, border_nan='copy')
else:
if self.is1p:
frame_orig = frame.copy()
frame = high_pass_filter_space(frame, self.params.motion['gSig_filt'])
frame_cor, shift = motion_correct_iteration_fast(
frame, templ, *(self.params.get('online', 'max_shifts_online'),)*2)
if self.is1p:
M = np.float32([[1, 0, shift[1]], [0, 1, shift[0]]])
frame_cor = cv2.warpAffine(frame_orig, M, frame.shape[::-1],
flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REFLECT)
self.estimates.shifts.append(shift)
return frame_cor
def fit_online(self, **kwargs):
"""Implements the caiman online algorithm on the list of files fls. The
files are taken in alpha numerical order and are assumed to each have
the same number of frames (except the last one that can be shorter).
Caiman online is initialized using the seeded or bare initialization
methods.
Args:
fls: list
list of files to be processed
init_batch: int
number of frames to be processed during initialization
epochs: int
number of passes over the data
motion_correct: bool
flag for performing motion correction
kwargs: dict
additional parameters used to modify self.params.online']
see options.['online'] for details
Returns:
self (results of caiman online)
"""
self.t_init = -time()
fls = self.params.get('data', 'fnames')
init_batch = self.params.get('online', 'init_batch')
if self.params.get('online', 'ring_CNN'):
logging.info('Using Ring CNN model')
from caiman.utils.nn_models import (fit_NL_model, create_LN_model, quantile_loss, rate_scheduler)
gSig = self.params.get('init', 'gSig')[0]
width = self.params.get('ring_CNN', 'width')
nch = self.params.get('ring_CNN', 'n_channels')
if self.params.get('ring_CNN', 'loss_fn') == 'pct':
loss_fn = quantile_loss(self.params.get('ring_CNN', 'pct'))
else:
loss_fn = self.params.get('ring_CNN', 'loss_fn')
if self.params.get('ring_CNN', 'lr_scheduler') is None:
sch = None
else:
sch = rate_scheduler(*self.params.get('ring_CNN', 'lr_scheduler'))
Y = caiman.base.movies.load(fls[0], subindices=slice(init_batch),
var_name_hdf5=self.params.get('data', 'var_name_hdf5'))
shape = Y.shape[1:] + (1,)
logging.info('Starting background model training.')
model_LN = create_LN_model(Y, shape=shape, n_channels=nch,
lr=self.params.get('ring_CNN', 'lr'), gSig=gSig,
loss=loss_fn, width=width,
use_add=self.params.get('ring_CNN', 'use_add'),
use_bias=self.params.get('ring_CNN', 'use_bias'))
if self.params.get('ring_CNN', 'reuse_model'):
logging.info('Using existing model from {}'.format(self.params.get('ring_CNN', 'path_to_model')))
model_LN.load_weights(self.params.get('ring_CNN', 'path_to_model'))
else:
logging.info('Estimating model from scratch, starting training.')
model_LN, history, path_to_model = fit_NL_model(model_LN, Y,
epochs=self.params.get('ring_CNN', 'max_epochs'),
patience=self.params.get('ring_CNN', 'patience'),
schedule=sch)
logging.info('Training complete. Model saved in {}.'.format(path_to_model))
self.params.set('ring_CNN', {'path_to_model': path_to_model})
else:
model_LN = None
epochs = self.params.get('online', 'epochs')
self.initialize_online(model_LN=model_LN)
self.t_init += time()
extra_files = len(fls) - 1
init_files = 1
t = init_batch
self.Ab_epoch:List = []
t_online = []
ssub_B = self.params.get('init', 'ssub_B') * self.params.get('init', 'ssub')
d1, d2 = self.params.get('data', 'dims')
max_shifts_online = self.params.get('online', 'max_shifts_online')
if extra_files == 0: # check whether there are any additional files
process_files = fls[:init_files] # end processing at this file
init_batc_iter = [init_batch] # place where to start
else:
process_files = fls[:init_files + extra_files] # additional files
# where to start reading at each file
init_batc_iter = [init_batch] + [0]*extra_files
if self.params.get('online', 'save_online_movie') + self.params.get('online', 'show_movie'):
resize_fact = 2
fourcc = cv2.VideoWriter_fourcc(*self.params.get('online', 'opencv_codec'))
out = cv2.VideoWriter(self.params.get('online', 'movie_name_online'),
fourcc, 30, tuple([int(resize_fact*2*x) for x in self.params.get('data', 'dims')]),
True)
# Iterate through the epochs
for iter in range(epochs):
if iter == epochs - 1 and self.params.get('online', 'stop_detection'):
self.params.set('online', {'update_num_comps': False})
logging.info(f"Searching for new components set to: {self.params.get('online', 'update_num_comps')}")
if iter > 0:
# if not on first epoch process all files from scratch
process_files = fls[:init_files + extra_files]
init_batc_iter = [0] * (extra_files + init_files)
# Go through all files
for file_count, ffll in enumerate(process_files):
logging.warning('Now processing file {}'.format(ffll))
Y_ = caiman.base.movies.load_iter(
ffll, var_name_hdf5=self.params.get('data', 'var_name_hdf5'),
subindices=slice(init_batc_iter[file_count], None, None))
old_comps = self.N # number of existing components
frame_count = -1
while True: # process each file
try:
frame = next(Y_)
if model_LN is not None:
if self.params.get('ring_CNN', 'remove_activity'):
activity = self.estimates.Ab[:,:self.N].dot(self.estimates.C_on[:self.N, t-1]).reshape(self.params.get('data', 'dims'), order='F')
if self.params.get('online', 'normalize'):
activity *= self.img_norm
else:
activity = 0.
# frame = frame.astype(np.float32) - activity
frame = frame - np.squeeze(model_LN.predict(np.expand_dims(np.expand_dims(frame.astype(np.float32) - activity, 0), -1)))
frame = np.maximum(frame, 0)
frame_count += 1
t_frame_start = time()
if np.isnan(np.sum(frame)):
raise Exception('Frame ' + str(frame_count) +
' contains NaN')
if t % 500 == 0:
logging.info('Epoch: ' + str(iter + 1) + '. ' + str(t) +
' frames have beeen processed in total. ' +
str(self.N - old_comps) +
' new components were added. Total # of components is '
+ str(self.estimates.Ab.shape[-1] - self.params.get('init', 'nb')))
old_comps = self.N
# Downsample and normalize
frame_ = frame.copy().astype(np.float32)
if self.params.get('online', 'ds_factor') > 1:
frame_ = cv2.resize(frame_, self.img_norm.shape[::-1])
if self.params.get('online', 'normalize'):
frame_ -= self.img_min # make data non-negative
# Motion Correction
t_mot = time()
if self.params.get('online', 'motion_correct'): # motion correct
frame_cor = self.mc_next(t, frame_)
else:
templ = None
frame_cor = frame_
self.t_motion.append(time() - t_mot)
if self.params.get('online', 'normalize'):
frame_cor = frame_cor/self.img_norm
# Fit next frame
self.fit_next(t, frame_cor.reshape(-1, order='F'))
# Show
if self.params.get('online', 'show_movie'):
self.t = t
vid_frame = self.create_frame(frame_cor, resize_fact=resize_fact)
if self.params.get('online', 'save_online_movie'):
out.write(vid_frame)
for rp in range(len(self.estimates.ind_new)*2):
out.write(vid_frame)
cv2.imshow('frame', vid_frame)
for rp in range(len(self.estimates.ind_new)*2):
cv2.imshow('frame', vid_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
t += 1
t_online.append(time() - t_frame_start)
except (StopIteration, RuntimeError):
break
self.Ab_epoch.append(self.estimates.Ab.copy())
if self.params.get('online', 'normalize'):
self.estimates.Ab = csc_matrix(self.estimates.Ab.multiply(
self.img_norm.reshape(-1, order='F')[:, np.newaxis]))
self.estimates.A, self.estimates.b = self.estimates.Ab[:, self.params.get('init', 'nb'):], self.estimates.Ab[:, :self.params.get('init', 'nb')].toarray()
self.estimates.C, self.estimates.f = self.estimates.C_on[self.params.get('init', 'nb'):self.M, t - t //
epochs:t], self.estimates.C_on[:self.params.get('init', 'nb'), t - t // epochs:t]
noisyC = self.estimates.noisyC[self.params.get('init', 'nb'):self.M, t - t // epochs:t]
self.estimates.YrA = noisyC - self.estimates.C
if self.estimates.OASISinstances is not None:
self.estimates.bl = [osi.b for osi in self.estimates.OASISinstances]
self.estimates.S = np.stack([osi.s for osi in self.estimates.OASISinstances])
self.estimates.S = self.estimates.S[:, t - t // epochs:t]
else:
self.estimates.bl = [0] * self.estimates.C.shape[0]
self.estimates.S = np.zeros_like(self.estimates.C)
if self.params.get('online', 'ds_factor') > 1:
dims = frame.shape
self.estimates.A = hstack([coo_matrix(cv2.resize(self.estimates.A[:, i].reshape(self.estimates.dims, order='F').toarray(),
dims[::-1]).reshape(-1, order='F')[:,None]) for i in range(self.N)], format='csc')
if self.estimates.b.shape[-1] > 0:
self.estimates.b = np.concatenate([cv2.resize(self.estimates.b[:, i].reshape(self.estimates.dims, order='F'),
dims[::-1]).reshape(-1, order='F')[:,None] for i in range(self.params.get('init', 'nb'))], axis=1)
else:
self.estimates.b = np.resize(self.estimates.b, (self.estimates.A.shape[0], 0))
if self.estimates.b0 is not None:
b0 = self.estimates.b0.reshape(self.estimates.dims, order='F')
b0 = cv2.resize(b0, dims[::-1])
self.estimates.b0 = b0.reshape((-1, 1), order='F')
self.params.set('data', {'dims': dims})
self.estimates.dims = dims
if self.params.get('online', 'save_online_movie'):
out.release()
if self.params.get('online', 'show_movie'):
cv2.destroyAllWindows()
self.t_online = t_online
self.estimates.C_on = self.estimates.C_on[:self.M]
self.estimates.noisyC = self.estimates.noisyC[:self.M]
return self
def create_frame(self, frame_cor, show_residuals=True, resize_fact=3, transpose=True):
if show_residuals:
caption = 'Corr*PSNR buffer' if self.params.get('online', 'use_corr_img') else 'Mean Residual Buffer'
else:
caption = 'Identified Components'
captions = ['Raw Data', 'Inferred Activity', caption, 'Denoised Data']
self.dims = self.estimates.dims
self.captions = captions
est = self.estimates
gnb = self.M - self.N
A, b = est.Ab[:, gnb:], est.Ab[:, :gnb].toarray()
C, f = est.C_on[gnb:self.M, :], est.C_on[:gnb, :]
# inferred activity due to components (no background)
frame_plot = (frame_cor.copy() - self.bnd_Y[0])/np.diff(self.bnd_Y)
comps_frame = A.dot(C[:, self.t - 1]).reshape(self.dims, order='F')
if self.is1p:
ssub_B = self.params.get('init', 'ssub_B') * self.params.get('init', 'ssub')
if ssub_B == 1:
B = self.estimates.W.dot((frame_cor - comps_frame).flatten(order='F') - self.estimates.b0) + self.estimates.b0
bgkrnd_frame = B.reshape(self.dims, order='F')
else:
bc2 = self.estimates.downscale_matrix.dot(
(frame_cor - comps_frame).flatten(order='F') - self.estimates.b0)
bgkrnd_frame = (self.estimates.b0 + self.estimates.upscale_matrix.dot(
self.estimates.W.dot(bc2))).reshape(self.dims, order='F')
else:
bgkrnd_frame = b.dot(f[:, self.t - 1]).reshape(self.dims, order='F') # denoised frame (components + background)
denoised_frame = comps_frame + bgkrnd_frame
denoised_frame = (denoised_frame.copy() - self.bnd_Y[0])/np.diff(self.bnd_Y)
comps_frame = (comps_frame.copy() - self.bnd_AC[0])/np.diff(self.bnd_AC)
if show_residuals:
if self.params.get('online', 'use_corr_img'):
pnr_img = est.max_img / est.sn.reshape(est.dims, order='F')
pnr_img[pnr_img<2] = 0
all_comps = np.nan_to_num(est.corr_img * pnr_img)
fac = 1. / self.params.get('init', 'min_corr') / self.params.get('init', 'min_pnr')
else:
#all_comps = np.reshape(self.Yres_buf.mean(0), self.dims, order='F')
all_comps = np.reshape(est.mean_buff, self.dims, order='F')
fac = 1. / np.percentile(est.mean_buff, 99.995)
else:
all_comps = np.array(A.sum(-1)).reshape(self.dims, order='F')
fac = 2
#all_comps = (all_comps.copy() - self.bnd_Y[0])/np.diff(self.bnd_Y)
all_comps = np.minimum(np.maximum(all_comps, 0)*fac, 1)
# spatial shapes
frame_comp_1 = cv2.resize(np.concatenate([frame_plot, all_comps * 1.], axis=-1),
(2 * np.int(self.dims[1] * resize_fact), np.int(self.dims[0] * resize_fact)))
frame_comp_2 = cv2.resize(np.concatenate([comps_frame, denoised_frame], axis=-1),
(2 * np.int(self.dims[1] * resize_fact), np.int(self.dims[0] * resize_fact)))
frame_pn = np.concatenate([frame_comp_1, frame_comp_2], axis=0).T
if transpose:
self.dims = self.dims[::-1]
frame_pn = frame_pn.T
vid_frame = np.repeat(frame_pn[:, :, None], 3, axis=-1)
vid_frame = np.minimum((vid_frame * 255.), 255).astype('u1')
#if show_residuals and est.ind_new:
if est.ind_new:
add_v = np.int(self.dims[1-transpose]*resize_fact)
for ind_new in est.ind_new:
cv2.rectangle(vid_frame,(int(ind_new[transpose][1]*resize_fact) + transpose*add_v,
int(ind_new[1-transpose][1]*resize_fact) + (1-transpose)*add_v),
(int(ind_new[transpose][0]*resize_fact) + transpose*add_v,
int(ind_new[1-transpose][0]*resize_fact)+ (1-transpose)*add_v), (255,0,255), 2)
cv2.putText(vid_frame, captions[0], (5, 20), fontFace=5, fontScale=0.8, color=(
0, 255, 0), thickness=1)
cv2.putText(vid_frame, captions[1+transpose], (np.int(
self.dims[0] * resize_fact) + 5, 20), fontFace=5, fontScale=0.8, color=(0, 255, 0), thickness=1)
cv2.putText(vid_frame, captions[2-transpose], (5, np.int(
self.dims[1] * resize_fact) + 20), fontFace=5, fontScale=0.8, color=(0, 255, 0), thickness=1)
cv2.putText(vid_frame, captions[3], (np.int(self.dims[0] * resize_fact) + 5, np.int(
self.dims[1] * resize_fact) + 20), fontFace=5, fontScale=0.8, color=(0, 255, 0), thickness=1)
cv2.putText(vid_frame, 'Frame = ' + str(self.t), (vid_frame.shape[1] // 2 - vid_frame.shape[1] //
10, vid_frame.shape[0] - 20), fontFace=5, fontScale=0.8, color=(0, 255, 255), thickness=1)
if transpose:
self.dims = self.dims[::-1]
return vid_frame
#%%
def bare_initialization(Y, init_batch=1000, k=1, method_init='greedy_roi', gnb=1,
gSig=[5, 5], motion_flag=False, p=1,
return_object=True, **kwargs):
"""
Quick and dirty initialization for OnACID, bypassing CNMF entirely
Args:
Y movie object or np.array
matrix of data
init_batch int
number of frames to process
method_init string
initialization method
k int
number of components to find
gnb int
number of background components
gSig [int,int]
half-size of component
motion_flag bool
also perform motion correction
Output:
cnm_init object
caiman CNMF-like object to initialize OnACID
"""
if Y.ndim == 4: # 3D data
Y = Y[:, :, :, :init_batch]
else:
Y = Y[:, :, :init_batch]
try:
Ain, Cin, b_in, f_in, center = initialize_components(
Y, K=k, gSig=gSig, nb=gnb, method_init=method_init, **kwargs)
Ain = coo_matrix(Ain)
b_in = np.array(b_in)
Yr = np.reshape(Y, (Ain.shape[0], Y.shape[-1]), order='F')
nA = (Ain.power(2).sum(axis=0))
nr = nA.size
YA = spdiags(old_div(1., nA), 0, nr, nr) * \
(Ain.T.dot(Yr) - (Ain.T.dot(b_in)).dot(f_in))
AA = spdiags(old_div(1., nA), 0, nr, nr) * (Ain.T.dot(Ain))
YrA = YA - AA.T.dot(Cin)
except ValueError:
Ain, Cin, b_in, f_in, center, extra_1p = initialize_components(
Y, K=k, gSig=gSig, nb=gnb, method_init=method_init, **kwargs)
Ain = coo_matrix(Ain)
YrA, _, W, b0 = extra_1p[-4:]
if return_object:
cnm_init = caiman.source_extraction.cnmf.cnmf.CNMF(2, k=k, gSig=gSig, Ain=Ain, Cin=Cin, b_in=np.array(
b_in), f_in=f_in, method_init=method_init, p=p, gnb=gnb, **kwargs)
cnm_init.estimates.A, cnm_init.estimates.C, cnm_init.estimates.b, cnm_init.estimates.f, cnm_init.estimates.S,\
cnm_init.estimates.YrA = Ain, Cin, b_in, f_in, np.maximum(np.atleast_2d(Cin), 0), YrA
#cnm_init.g = np.array([-np.poly([0.9]*max(p,1))[1:] for gg in np.ones(k)])
cnm_init.estimates.g = np.array([-np.poly([0.9, 0.5][:max(1, p)])[1:]
for gg in np.ones(k)])
cnm_init.estimates.bl = np.zeros(k)
cnm_init.estimates.c1 = np.zeros(k)
cnm_init.estimates.neurons_sn = np.std(YrA, axis=-1)
cnm_init.estimates.lam = np.zeros(k)
cnm_init.dims = Y.shape[:-1]
cnm_init.params.set('online', {'init_batch': init_batch})
return cnm_init
else:
try:
return Ain, np.array(b_in), Cin, f_in, YrA, W, b0
except:
return Ain, np.array(b_in), Cin, f_in, YrA
#%%
def seeded_initialization(Y, Ain, dims=None, init_batch=1000, order_init=None, gnb=1, p=1,
return_object=True, **kwargs):
"""
Initialization for OnACID based on a set of user given binary masks.
Args:
Y movie object or np.array
matrix of data
Ain bool np.array
2d np.array with binary masks
dims tuple
dimensions of FOV
init_batch int
number of frames to process
gnb int
number of background components
order_init: list
order of elements to be initalized using rank1 nmf restricted to the support of
each component
Output:
cnm_init object
caiman CNMF-like object to initialize OnACID
"""
if 'ndarray' not in str(type(Ain)):
Ain = Ain.toarray()
if dims is None:
dims = Y.shape[:-1]
px = (np.sum(Ain > 0, axis=1) > 0)
not_px = 1 - px
if 'matrix' in str(type(not_px)):
not_px = np.array(not_px).flatten()
Yr = np.reshape(Y, (Ain.shape[0], Y.shape[-1]), order='F')
model = NMF(n_components=gnb, init='nndsvdar', max_iter=10)
_ = model.fit_transform(np.maximum(Yr[not_px], 0)) # Done to update the model object
f_in = model.components_.squeeze()
f_in = np.atleast_2d(f_in)
Y_resf = np.dot(Yr, f_in.T)
# b_in = np.maximum(Y_resf.dot(np.linalg.inv(f_in.dot(f_in.T))), 0)
b_in = np.maximum(np.linalg.solve(f_in.dot(f_in.T), Y_resf.T), 0).T
# b_in = np.maximum(pd_solve(f_in.dot(f_in.T), Y_resf.T), 0).T
Yr_no_bg = (Yr - b_in.dot(f_in)).astype(np.float32)
Cin = np.zeros([Ain.shape[-1],Yr.shape[-1]], dtype = np.float32)
if order_init is not None: #initialize using rank-1 nmf for each component
model_comp = NMF(n_components=1, init='nndsvdar', max_iter=50)
for count, idx_in in enumerate(order_init):
if count%10 == 0:
print(count)
idx_domain = np.where(Ain[:,idx_in])[0]
Ain[idx_domain,idx_in] = model_comp.fit_transform(\
np.maximum(Yr_no_bg[idx_domain], 0)).squeeze()
Cin[idx_in] = model_comp.components_.squeeze()
Yr_no_bg[idx_domain] -= np.outer(Ain[idx_domain, idx_in],Cin[idx_in])
else:
Ain = normalize(Ain.astype('float32'), axis=0, norm='l1')
Cin = np.maximum(Ain.T.dot(Yr) - (Ain.T.dot(b_in)).dot(f_in), 0)
Ain = HALS4shapes(Yr_no_bg, Ain, Cin, iters=5)
Ain, Cin, b_in, f_in = hals(Yr, Ain, Cin, b_in, f_in, maxIter=8, bSiz=None)
Ain = csc_matrix(Ain)
nA = (Ain.power(2).sum(axis=0))
nr = nA.size
YA = spdiags(old_div(1., nA), 0, nr, nr) * \
(Ain.T.dot(Yr) - (Ain.T.dot(b_in)).dot(f_in))
AA = spdiags(old_div(1., nA), 0, nr, nr) * (Ain.T.dot(Ain))
YrA = YA - AA.T.dot(Cin)
if return_object:
cnm_init = caiman.source_extraction.cnmf.cnmf.CNMF(
2, Ain=Ain, Cin=Cin, b_in=np.array(b_in), f_in=f_in, p=1, **kwargs)
cnm_init.estimates.A, cnm_init.estimates.C, cnm_init.estimates.b, cnm_init.estimates.f, cnm_init.estimates.S, \
cnm_init.estimates.YrA = Ain, Cin, b_in, f_in, np.fmax(np.atleast_2d(Cin), 0), YrA
# cnm_init.g = np.array([[gg] for gg in np.ones(nr)*0.9])
cnm_init.estimates.g = np.array([-np.poly([0.9] * max(p, 1))[1:]
for gg in np.ones(nr)])
cnm_init.estimates.bl = np.zeros(nr)
cnm_init.estimates.c1 = np.zeros(nr)
cnm_init.estimates.neurons_sn = np.std(YrA, axis=-1)
cnm_init.estimates.lam = np.zeros(nr)
cnm_init.dims = Y.shape[:-1]
cnm_init.params.set('online', {'init_batch': init_batch})
return cnm_init
else:
return Ain, np.array(b_in), Cin, f_in, YrA
def HALS4shapes(Yr, A, C, iters=2):
K = A.shape[-1]
ind_A = A > 0
U = C.dot(Yr.T)
V = C.dot(C.T)
V_diag = V.diagonal() + np.finfo(float).eps
for _ in range(iters):
for m in range(K): # neurons
ind_pixels = np.squeeze(ind_A[:, m])
A[ind_pixels, m] = np.clip(A[ind_pixels, m] +
((U[m, ind_pixels] - V[m].dot(A[ind_pixels].T)) /
V_diag[m]), 0, np.inf)
return A
# definitions for demixed time series extraction and denoising/deconvolving
@profile
def HALS4activity(Yr, A, noisyC, AtA=None, iters=5, tol=1e-3, groups=None,
order=None):
"""Solves C = argmin_C ||Yr-AC|| using block-coordinate decent. Can use
groups to update non-overlapping components in parallel or a specified
order.
Args:
Yr : np.array (possibly memory mapped, (x,y,[,z]) x t)
Imaging data reshaped in matrix format
A : scipy.sparse.csc_matrix (or np.array) (x,y,[,z]) x # of components)
Spatial components and background
noisyC : np.array (# of components x t)
Temporal traces (including residuals plus background)
AtA : np.array, optional (# of components x # of components)
A.T.dot(A) Overlap matrix of shapes A.
iters : int, optional
Maximum number of iterations.
tol : float, optional
Change tolerance level
groups : list of sets
grouped components to be updated simultaneously
order : list
Update components in that order (used if nonempty and groups=None)
Returns:
C : np.array (# of components x t)
solution of HALS
noisyC : np.array (# of components x t)
solution of HALS + residuals, i.e, (C + YrA)
"""
AtY = A.T.dot(Yr)
num_iters = 0
C_old = np.zeros_like(noisyC)
C = noisyC.copy()
if AtA is None:
AtA = A.T.dot(A)
AtAd = AtA.diagonal() + np.finfo(np.float32).eps
# faster than np.linalg.norm
def norm(c): return sqrt(c.ravel().dot(c.ravel()))
while (norm(C_old - C) >= tol * norm(C_old)) and (num_iters < iters):
C_old[:] = C
if groups is None:
if order is None:
order = list(range(AtY.shape[0]))
for m in order:
noisyC[m] = C[m] + (AtY[m] - AtA[m].dot(C)) / AtAd[m]
C[m] = np.maximum(noisyC[m], 0)
else:
for m in groups:
noisyC[m] = C[m] + ((AtY[m] - AtA[m].dot(C)).T/AtAd[m]).T
C[m] = np.maximum(noisyC[m], 0)
num_iters += 1
return C, noisyC
def demix1p(y, A, noisyC, AtA, Atb, AtW, AtWA, iters=5, tol=1e-3,
groups=None, downscale_matrix=None, ssub_B=1):
"""
Solve C = argmin_C ||Yr-AC-B|| using block-coordinate decent
where B = W(Y-AC-b0) + b0 (ring model for 1p data)
Parameters
----------
y : array of float, shape (x*y[*z],)
flattened array of raw data frame
A : sparse matrix of float
neural shapes
noisyC : ndarray of float
Initial value of fluorescence intensities.
AtA : ndarray of float
Overlap matrix of shapes A.
Atb : ndarray of float
Projection of constant background terms on shapes A: A'(Wb0-b0)
AtW : sparse matrix of float, shape (x*y, x*y)
Projection of ring matrix W on shapes A
AtWA : ndarray of float
A'*W*A
iters : int, optional
Maximal number of iterations.
tol : float, optional
Tolerance.
groups: list of lists
groups of components to update in parallel
"""
AtY = A.T.dot(y)
AtWyb = AtW.dot(y if ssub_B == 1 else downscale_matrix.dot(y) *
ssub_B**2) - Atb # Atb is A'(Wb0-b0)
num_iters = 0
C_old = np.zeros_like(noisyC)
C = noisyC.copy()
# faster than np.linalg.norm
def norm(c): return sqrt(c.ravel().dot(c.ravel()))
while (norm(C_old - C) >= tol * norm(C_old)) and (num_iters < iters):
C_old[:] = C
AtB = AtWyb - AtWA.dot(C) # A'B = A'WY - A'WAC - A'(Wb0-b0)
if groups is None:
for m in range(len(AtY)):
noisyC[m] = C[m] + (AtY[m] - AtA[m].dot(C) - AtB[m]) / (AtA[m, m] + np.finfo(C.dtype).eps)
C[m] = max(noisyC[m], 0)
else:
for m in groups:
noisyC[m] = C[m] + (AtY[m] - AtA[m].dot(C) - AtB[m]) / (AtA.diagonal()[m] + np.finfo(C.dtype).eps)
C[m] = np.maximum(noisyC[m], 0)
num_iters += 1
return C, noisyC
@profile
def demix_and_deconvolve(C, noisyC, AtY, AtA, OASISinstances, iters=3, n_refit=0):
"""
Solve C = argmin_C ||Y-AC|| subject to C following AR(p) dynamics
using OASIS within block-coordinate decent
Newly fits the last elements in buffers C and AtY and possibly refits
earlier elements.
Args:
C : ndarray of float
Buffer containing the denoised fluorescence intensities.
All elements up to and excluding the last one have been denoised in
earlier calls.
noisyC : ndarray of float
Buffer containing the undenoised fluorescence intensities.
AtY : ndarray of float
Buffer containing the projections of data Y on shapes A.
AtA : ndarray of float
Overlap matrix of shapes A.
OASISinstances : list of OASIS objects
Objects for deconvolution and denoising
iters : int, optional
Number of iterations.
n_refit : int, optional
Number of previous OASIS pools to refit
0 fits only last pool, np.inf all pools fully (i.e. starting) within buffer
"""
AtA += np.finfo(float).eps
T = OASISinstances[0].t + 1
len_buffer = C.shape[1]
nb = AtY.shape[0] - len(OASISinstances)
if n_refit == 0:
for i in range(iters):
for m in range(AtY.shape[0]):
noisyC[m, -1] = C[m, -1] + \
(AtY[m, -1] - AtA[m].dot(C[:, -1])) / AtA[m, m]
if m >= nb and i > 0:
n = m - nb
if i == iters - 1: # commit
OASISinstances[n].fit_next(noisyC[m, -1])
l = OASISinstances[n].get_l_of_last_pool()
if l < len_buffer:
C[m, -l:] = OASISinstances[n].get_c_of_last_pool()
else:
C[m] = OASISinstances[n].get_c(len_buffer)
else: # temporary non-commited update of most recent frame
C[m] = OASISinstances[n].fit_next_tmp(
noisyC[m, -1], len_buffer)
else:
# no need to enforce max(c, 0) for background, is it?
C[m, -1] = np.maximum(noisyC[m, -1], 0)
else:
# !threshold .1 assumes normalized A (|A|_2=1)
overlap = np.sum(AtA[nb:, nb:] > .1, 0) > 1
def refit(o, c):
# remove last pools
tmp = 0
while tmp < n_refit and o.t - o.get_l_of_last_pool() > T - len_buffer:
o.remove_last_pool()
tmp += 1
# refit last pools
for cc in c[o.t - T + len_buffer:-1]:
o.fit_next(cc)
for i in range(iters):
for m in range(AtY.shape[0]):
noisyC[m] = C[m] + (AtY[m] - AtA[m].dot(C)) / AtA[m, m]
if m >= nb:
n = m - nb
if overlap[n]:
refit(OASISinstances[n], noisyC[m])
if i == iters - 1: # commit
OASISinstances[n].fit_next(noisyC[m, -1])
C[m] = OASISinstances[n].get_c(len_buffer)
else: # temporary non-commited update of most recent frame
C[m] = OASISinstances[n].fit_next_tmp(
noisyC[m, -1], len_buffer)
else:
# no need to enforce max(c, 0) for background, is it?
C[m] = noisyC[m]
return C, noisyC, OASISinstances
#%% Estimate shapes on small initial batch
def init_shapes_and_sufficient_stats(Y, A, C, b, f, W=None, b0=None, ssub_B=1, bSiz=3,
downscale_matrix=None, upscale_matrix=None):
# smooth the components
dims, T = np.shape(Y)[:-1], np.shape(Y)[-1]
K = A.shape[1] # number of neurons
if W is None:
nb = b.shape[1] # number of background components
# if isinstance(bSiz, (int, float)):
# bSiz = [bSiz] * len(dims)
Ab = np.hstack([b, A])
else:
Ab = A
nb = 0
# Ab = scipy.sparse.hstack([A.astype('float32'), b.astype('float32')]).tocsc() might be faster
# closing of shapes to not have holes in index matrix ind_A.
# do this somehow smarter & faster, e.g. smooth only within patch !!
A_smooth = np.transpose([gaussian_filter(np.array(a).reshape(
dims, order='F'), 0).ravel(order='F') for a in Ab.T])
A_smooth[A_smooth < 1e-2] = 0
# set explicity zeros of Ab to small value, s.t. ind_A and Ab.indptr match
Ab += 1e-6 * A_smooth
Ab = csc_matrix(Ab)
ind_A = [Ab.indices[Ab.indptr[m]:Ab.indptr[m + 1]]
for m in range(nb, nb + K)]
Cf = np.r_[f.reshape(nb, -1), C] if f.size else C
CY = Cf.dot(np.reshape(Y, (np.prod(dims), T), order='F').T)
if W is not None:
if ssub_B == 1:
CY -= Cf.dot(W.dot(np.reshape(Y, (-1, T), order='F') -
A.dot(C) - b0[:, None]).T + b0)
else:
d1, d2 = dims
B = upscale_matrix.dot(W.dot(downscale_matrix.dot(
np.reshape(Y, (-1, T), order='F') - A.dot(C) - b0[:, None]))).T + b0
CY -= Cf.dot(B)
CC = Cf.dot(Cf.T)
return Ab, ind_A, CY, CC
@profile
def update_shapes(CY, CC, Ab, ind_A, sn=None, q=0.5, indicator_components=None,
Ab_dense=None, update_bkgrd=True, iters=5):
D, M = Ab.shape
N = len(ind_A)
nb = M - N
if indicator_components is None:
idx_comp = range(nb, M)
else:
idx_comp = np.where(indicator_components)[0] + nb
if sn is None or q == 0.5: # avoid costly construction of L=np.zeros((M, D), dtype=np.float32)
for _ in range(iters): # it's presumably better to run just 1 iter but update more neurons
if Ab_dense is None:
for m in idx_comp: # neurons
ind_pixels = ind_A[m - nb]
tmp = np.maximum(Ab.data[Ab.indptr[m]:Ab.indptr[m + 1]] +
((CY[m, ind_pixels] - Ab.dot(CC[m])[ind_pixels]) / (CC[m, m] + np.finfo(CC.dtype).eps)), 0)
# normalize
if tmp.dot(tmp) > 0:
tmp *= 1e-3 / \
min(1e-3, sqrt(tmp.dot(tmp)) + np.finfo(float).eps)
tmp = tmp / max(1, sqrt(tmp.dot(tmp)))
Ab.data[Ab.indptr[m]:Ab.indptr[m + 1]] = tmp
ind_A[m - nb] = Ab.indices[slice(Ab.indptr[m], Ab.indptr[m + 1])]
else:
for m in idx_comp: # neurons
ind_pixels = ind_A[m - nb]
tmp = np.maximum(Ab_dense[ind_pixels, m] +
((CY[m, ind_pixels] - Ab_dense[ind_pixels].dot(CC[m])) / (CC[m, m] + np.finfo(CC.dtype).eps)), 0)
# normalize
if tmp.dot(tmp) > 0:
tmp *= 1e-3 / \
min(1e-3, sqrt(tmp.dot(tmp)) + np.finfo(float).eps)
Ab_dense[ind_pixels, m] = tmp / max(1, sqrt(tmp.dot(tmp)))
Ab.data[Ab.indptr[m]:Ab.indptr[m + 1]] = Ab_dense[ind_pixels, m]
ind_A[m - nb] = Ab.indices[slice(Ab.indptr[m], Ab.indptr[m + 1])]
if update_bkgrd:
for m in range(nb): # background
sl = slice(Ab.indptr[m], Ab.indptr[m + 1])
ind_pixels = Ab.indices[sl]
Ab.data[sl] = np.maximum(
Ab.data[sl] + ((CY[m, ind_pixels] - Ab.dot(CC[m])[ind_pixels]) / (CC[m, m] + np.finfo(CC.dtype).eps)), 0)
if Ab_dense is not None:
Ab_dense[ind_pixels, m] = Ab.data[sl]
else:
L = norm.ppf(q)*np.outer(np.sqrt(CC.diagonal()), sn)
L[:nb] = 0
for _ in range(iters): # it's presumably better to run just 1 iter but update more neurons
if Ab_dense is None:
for m in idx_comp: # neurons
ind_pixels = ind_A[m - nb]
tmp = np.maximum(Ab.data[Ab.indptr[m]:Ab.indptr[m + 1]] +
((CY[m, ind_pixels] - L[m, ind_pixels] - Ab.dot(CC[m])[ind_pixels]) / (CC[m, m] + np.finfo(CC.dtype).eps)), 0)
if tmp.dot(tmp) > 0:
tmp *= 1e-3 / \
min(1e-3, sqrt(tmp.dot(tmp)) + np.finfo(float).eps)
tmp = tmp / max(1, sqrt(tmp.dot(tmp)))
Ab.data[Ab.indptr[m]:Ab.indptr[m + 1]] = tmp
ind_A[m -
nb] = Ab.indices[slice(Ab.indptr[m], Ab.indptr[m + 1])]
# N.B. Ab[ind_pixels].dot(CC[m]) is slower for csc matrix due to indexing rows
else:
for m in idx_comp: # neurons
ind_pixels = ind_A[m - nb]
tmp = np.maximum(Ab_dense[ind_pixels, m] + ((CY[m, ind_pixels] - L[m, ind_pixels] -
Ab_dense[ind_pixels].dot(CC[m])) /
(CC[m, m] + np.finfo(CC.dtype).eps)), 0)
# normalize
if tmp.dot(tmp) > 0:
tmp *= 1e-3 / \
min(1e-3, sqrt(tmp.dot(tmp)) + np.finfo(float).eps)
Ab_dense[ind_pixels, m] = tmp / max(1, sqrt(tmp.dot(tmp)))
Ab.data[Ab.indptr[m]:Ab.indptr[m + 1]] = Ab_dense[ind_pixels, m]
ind_A[m - nb] = Ab.indices[slice(Ab.indptr[m], Ab.indptr[m + 1])]
# Ab.data[Ab.indptr[nb]:] = np.concatenate(
# [Ab_dense[ind_A[m - nb], m] for m in range(nb, M)])
# N.B. why does selecting only overlapping neurons help surprisingly little, i.e
# Ab[ind_pixels][:, overlap[m]].dot(CC[overlap[m], m])
# where overlap[m] are the indices of all neurons overlappping with & including m?
# sparsify ??
if update_bkgrd:
for m in range(nb): # background
sl = slice(Ab.indptr[m], Ab.indptr[m + 1])
ind_pixels = Ab.indices[sl]
Ab.data[sl] = np.maximum(
Ab.data[sl] + ((CY[m, ind_pixels] - Ab.dot(CC[m])[ind_pixels]) / (CC[m, m] + np.finfo(CC.dtype).eps)), 0)
if Ab_dense is not None:
Ab_dense[ind_pixels, m] = Ab.data[sl]
return Ab, ind_A, Ab_dense
class RingBuffer(np.ndarray):
""" implements ring buffer efficiently"""
def __new__(cls, input_array, num_els):
obj = np.asarray(input_array).view(cls)
obj.max_ = num_els
obj.cur = 0
if input_array.shape[0] != num_els:
print([input_array.shape[0], num_els])
raise Exception('The first dimension should equal num_els')
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None:
return
self.max_ = getattr(obj, 'max_', None)
self.cur = getattr(obj, 'cur', None)
def append(self, x):
self[self.cur] = x
self.cur = (self.cur + 1) % self.max_
def get_ordered(self):
return np.concatenate([self[self.cur:], self[:self.cur]], axis=0)
def get_first(self):
return self[self.cur]
def get_last_frames(self, num_frames):
if self.cur >= num_frames:
return self[self.cur - num_frames:self.cur]
else:
return np.concatenate([self[(self.cur - num_frames):], self[:self.cur]], axis=0)
#%%
def csc_append(a, b):
""" Takes in 2 csc_matrices and appends the second one to the right of the first one.
Much faster than scipy.sparse.hstack but assumes the type to be csc and overwrites
the first matrix instead of copying it. The data, indices, and indptr still get copied."""
a.data = np.concatenate((a.data, b.data))
a.indices = np.concatenate((a.indices, b.indices))
a.indptr = np.concatenate((a.indptr, (b.indptr + a.nnz)[1:]))
a._shape = (a.shape[0], a.shape[1] + b.shape[1])
def csr_append(a, b):
""" Takes in 2 csr_matrices and appends the second one below the first one.
Much faster than scipy.sparse.vstack but assumes the type to be csr and overwrites
the first matrix instead of copying it. The data, indices, and indptr still get copied."""
a.data = np.concatenate((a.data, b.data))
a.indices = np.concatenate((a.indices, b.indices))
a.indptr = np.concatenate((a.indptr, (b.indptr + a.nnz)[1:]))
a._shape = (a.shape[0] + b.shape[0], a.shape[1])
def corr(a, b):
"""
faster correlation than np.corrcoef, especially for smaller arrays
be aware of side effects and pass a copy if necessary!
"""
a -= a.mean()
b -= b.mean()
return a.dot(b) / sqrt(a.dot(a) * b.dot(b) + np.finfo(float).eps)
def rank1nmf(Ypx, ain, iters=10):
"""
perform a fast rank 1 NMF
"""
# cin_old = -1
eps = np.finfo(np.float32).eps
for t in range(iters):
cin_res = ain.dot(Ypx) # / ain.dot(ain)
cin = np.maximum(cin_res, 0)
ain = np.maximum(Ypx.dot(cin), 0)
# ain /= (sqrt(ain.dot(ain)) + np.finfo(np.float32).eps)
if t in (0, iters-1):
ain /= (sqrt(ain.dot(ain)) + eps)
elif t % 2 == 0: # division by squared norm every 2nd iter is faster yet numerically stable
ain /= (ain.dot(ain) + eps)
# nc = cin.dot(cin)
# ain = np.maximum(Ypx.dot(cin.T) / nc, 0)
# tmp = cin - cin_old
# if tmp.dot(tmp) < 1e-6 * nc:
# break
# cin_old = cin.copy()
cin_res = ain.dot(Ypx) # / ain.dot(ain)
cin = np.maximum(cin_res, 0)
return ain, cin, cin_res
#%%
@profile
def get_candidate_components(sv, dims, Yres_buf, min_num_trial=3, gSig=(5, 5),
gHalf=(5, 5), sniper_mode=True, rval_thr=0.85,
patch_size=50, loaded_model=None, test_both=False,
thresh_CNN_noisy=0.5, use_peak_max=False,
thresh_std_peak_resid = 1, mean_buff=None,
tf_in=None, tf_out=None):
"""
Extract new candidate components from the residual buffer and test them
using space correlation or the CNN classifier. The function runs the CNN
classifier in batch mode which can bring speed improvements when
multiple components are considered in each timestep.
"""
Ain = []
Ain_cnn = []
Cin = []
Cin_res = []
idx = []
all_indices = []
ijsig_all = []
cnn_pos:List = []
local_maxima:List = []
Y_patch = []
ksize = tuple([int(3 * i / 2) * 2 + 1 for i in gSig])
compute_corr = test_both
if use_peak_max:
img_select_peaks = sv.reshape(dims).copy()
# plt.subplot(1,3,1)
# plt.cla()
# plt.imshow(img_select_peaks)
img_select_peaks = cv2.GaussianBlur(img_select_peaks , ksize=ksize, sigmaX=gSig[0],
sigmaY=gSig[1], borderType=cv2.BORDER_REPLICATE) \
- cv2.boxFilter(img_select_peaks, ddepth=-1, ksize=ksize, borderType=cv2.BORDER_REPLICATE)
thresh_img_sel = 0 #np.median(img_select_peaks) + thresh_std_peak_resid * np.std(img_select_peaks)
# plt.subplot(1,3,2)
# plt.cla()
# plt.imshow(img_select_peaks*(img_select_peaks>thresh_img_sel))
# plt.pause(.05)
# threshold_abs = np.median(img_select_peaks) + np.std(img_select_peaks)
# img_select_peaks -= np.min(img_select_peaks)
# img_select_peaks /= np.max(img_select_peaks)
# img_select_peaks *= 2**15
# img_select_peaks = img_select_peaks.astype(np.uint16)
# clahe = cv2.createCLAHE(clipLimit=40.0, tileGridSize=(half_crop_cnn[0]//2,half_crop_cnn[0]//2))
# img_select_peaks = clahe.apply(img_select_peaks)
local_maxima = peak_local_max(img_select_peaks,
min_distance=np.max(np.array(gSig)).astype(np.int),
num_peaks=min_num_trial,threshold_abs=thresh_img_sel, exclude_border = False)
min_num_trial = np.minimum(len(local_maxima),min_num_trial)
for i in range(min_num_trial):
if use_peak_max:
ij = local_maxima[i]
else:
ind = np.argmax(sv)
ij = np.unravel_index(ind, dims, order='C')
local_maxima.append(ij)
ij = [min(max(ij_val, g_val), dim_val-g_val-1)
for ij_val, g_val, dim_val in zip(ij, gHalf, dims)]
ind = np.ravel_multi_index(ij, dims, order='C')
ijSig = [[max(i - g, 0), min(i+g+1, d)] for i, g, d in zip(ij, gHalf, dims)]
ijsig_all.append(ijSig)
indices = np.ravel_multi_index(np.ix_(*[np.arange(ij[0], ij[1])
for ij in ijSig]), dims, order='F').ravel(order='C')
# indices_ = np.ravel_multi_index(np.ix_(*[np.arange(ij[0], ij[1])
# for ij in ijSig]), dims, order='C').ravel(order = 'C')
ain = np.maximum(mean_buff[indices], 0)
if sniper_mode:
half_crop_cnn = tuple([int(np.minimum(gs*2, patch_size/2)) for gs in gSig])
ij_cnn = [min(max(ij_val,g_val),dim_val-g_val-1) for ij_val, g_val, dim_val in zip(ij,half_crop_cnn,dims)]
ijSig_cnn = [[max(i - g, 0), min(i+g+1,d)] for i, g, d in zip(ij_cnn, half_crop_cnn, dims)]
indices_cnn = np.ravel_multi_index(np.ix_(*[np.arange(ij[0], ij[1])
for ij in ijSig_cnn]), dims, order='F').ravel(order = 'C')
ain_cnn = mean_buff[indices_cnn]
else:
compute_corr = True # determine when to compute corr coef
na = ain.dot(ain)
# sv[indices_] /= 1 # 0
if na:
ain /= sqrt(na)
Ain.append(ain)
if compute_corr:
Y_patch.append(Yres_buf.T[indices, :])
else:
all_indices.append(indices)
idx.append(ind)
if sniper_mode:
Ain_cnn.append(ain_cnn)
if sniper_mode & (len(Ain_cnn) > 0):
Ain_cnn = np.stack(Ain_cnn)
Ain2 = Ain_cnn.copy()
Ain2 -= np.median(Ain2,axis=1)[:,None]
Ain2 /= np.std(Ain2,axis=1)[:,None]
Ain2 = np.reshape(Ain2,(-1,) + tuple(np.diff(ijSig_cnn).squeeze()),order= 'F')
Ain2 = np.stack([cv2.resize(ain,(patch_size ,patch_size)) for ain in Ain2])
if tf_in is None:
predictions = loaded_model.predict(Ain2[:,:,:,np.newaxis], batch_size=min_num_trial, verbose=0)
else:
predictions = loaded_model.run(tf_out, feed_dict={tf_in: Ain2[:, :, :, np.newaxis]})
keep_cnn = list(np.where(predictions[:, 0] > thresh_CNN_noisy)[0])
cnn_pos = Ain2[keep_cnn]
else:
keep_cnn = [] # list(range(len(Ain_cnn)))
if compute_corr:
keep_corr = []
for i, (ain, Ypx) in enumerate(zip(Ain, Y_patch)):
ain, cin, cin_res = rank1nmf(Ypx, ain)
Ain[i] = ain
Cin.append(cin)
Cin_res.append(cin_res)
rval = corr(ain.copy(), np.mean(Ypx, -1))
if rval > rval_thr:
keep_corr.append(i)
keep_final:List = list(set().union(keep_cnn, keep_corr))
if len(keep_final) > 0:
Ain = np.stack(Ain)[keep_final]
else:
Ain = []
Cin = [Cin[kp] for kp in keep_final]
Cin_res = [Cin_res[kp] for kp in keep_final]
idx = list(np.array(idx)[keep_final])
else:
Ain = [Ain[kp] for kp in keep_cnn]
Y_patch = [Yres_buf.T[all_indices[kp]] for kp in keep_cnn]
idx = list(np.array(idx)[keep_cnn])
for i, (ain, Ypx) in enumerate(zip(Ain, Y_patch)):
ain, cin, cin_res = rank1nmf(Ypx, ain)
Ain[i] = ain
Cin.append(cin)
Cin_res.append(cin_res)
return Ain, Cin, Cin_res, idx, ijsig_all, cnn_pos, local_maxima
#%%
@profile
def update_num_components(t, sv, Ab, Cf, Yres_buf, Y_buf, rho_buf,
dims, gSig, gSiz, ind_A, CY, CC, groups, oases, gnb=1,
rval_thr=0.875, bSiz=3, robust_std=False,
N_samples_exceptionality=5, remove_baseline=True,
thresh_fitness_delta=-80, thresh_fitness_raw=-20,
thresh_overlap=0.25, batch_update_suff_stat=False,
sn=None, g=None, thresh_s_min=None, s_min=None,
Ab_dense=None, max_num_added=1, min_num_trial=1,
loaded_model=None, thresh_CNN_noisy=0.99,
sniper_mode=False, use_peak_max=False, test_both=False,
mean_buff=None, ssub_B=1, W=None, b0=None,
corr_img=None, first_moment=None, second_moment=None,
crosscorr=None, col_ind=None, row_ind=None, corr_img_mode=None,
max_img=None, downscale_matrix=None, upscale_matrix=None,
tf_in=None, tf_out=None):
"""
Checks for new components in the residual buffer and incorporates them if they pass the acceptance tests
"""
ind_new = []
gHalf = np.array(gSiz) // 2
# number of total components (including background)
M = np.shape(Ab)[-1]
N = M - gnb # number of coponents (without background)
if corr_img is None:
sv -= rho_buf.get_first()
# update variance of residual buffer
sv += rho_buf.get_last_frames(1).squeeze()
sv = np.maximum(sv, 0)
if max_img is not None:
# pnr_img = max_img.ravel(order='F') / np.sqrt(second_moment - first_moment**2)
# pnr_img = max_img / np.sqrt(second_moment - first_moment**2).reshape(dims, order='F')
pnr_img = max_img / sn.reshape(dims, order='F')
pnr_img[pnr_img<2] = 0
Ains, Cins, Cins_res, inds, ijsig_all, cnn_pos, local_max = get_candidate_components(
sv if corr_img is None else corr_img*pnr_img, dims, Yres_buf=Yres_buf,
min_num_trial=min_num_trial, gSig=gSig, gHalf=gHalf,
sniper_mode=sniper_mode, rval_thr=rval_thr, patch_size=50,
loaded_model=loaded_model, thresh_CNN_noisy=thresh_CNN_noisy,
use_peak_max=use_peak_max, test_both=test_both, mean_buff=mean_buff,
tf_in=tf_in, tf_out=tf_out)
ind_new_all = ijsig_all
num_added = 0 # len(inds)
cnt = 0
for ind, ain, cin, cin_res in zip(inds, Ains, Cins, Cins_res):
cnt += 1
ij = np.unravel_index(ind, dims)
ijSig = [[max(i - temp_g, 0), min(i + temp_g + 1, d)] for i, temp_g, d in zip(ij, gHalf, dims)]
indices = np.ravel_multi_index(
np.ix_(*[np.arange(ij[0], ij[1])
for ij in ijSig]), dims, order='F').ravel()
cin_circ = cin.get_ordered()
useOASIS = False # whether to use faster OASIS for cell detection
accepted = True # flag indicating new component has not been rejected yet
if Ab_dense is None:
Ain = np.zeros((np.prod(dims), 1), dtype=np.float32)
Ain[indices, :] = ain[:, None]
ff = np.where((Ab.T.dot(Ain).T > thresh_overlap)
[:, gnb:])[1] + gnb
else:
ff = np.where(Ab_dense[indices, gnb:M].T.dot(
ain).T > thresh_overlap)[0] + gnb
if ff.size > 0:
# accepted = False
cc = [corr(cin_circ.copy(), cins) for cins in Cf[ff, :]]
if np.any(np.array(cc) > .25) and accepted:
accepted = False # reject component as duplicate
if s_min is None:
s_min = 0
# use s_min * noise estimate * sqrt(1-sum(gamma))
elif s_min < 0:
# the formula has been obtained by running OASIS with s_min=0 and lambda=0 on Gaussin noise.
# e.g. 1 * sigma * sqrt(1-sum(gamma)) corresponds roughly to the root mean square (non-zero) spike size, sqrt(<s^2>)
# 2 * sigma * sqrt(1-sum(gamma)) corresponds roughly to the 95% percentile of (non-zero) spike sizes
# 3 * sigma * sqrt(1-sum(gamma)) corresponds roughly to the 99.7% percentile of (non-zero) spike sizes
s_min = -s_min * sqrt((ain**2).dot(sn[indices]**2)) * sqrt(1 - np.sum(g))
cin_res = cin_res.get_ordered()
if accepted:
if useOASIS:
oas = OASIS(g=g, s_min=s_min,
num_empty_samples=t + 1 - len(cin_res))
for yt in cin_res:
oas.fit_next(yt)
accepted = oas.get_l_of_last_pool() <= t
else:
fitness_delta, erfc_delta, std_rr, _ = compute_event_exceptionality(
np.diff(cin_res)[None, :], robust_std=robust_std, N=N_samples_exceptionality)
if remove_baseline:
num_samps_bl = min(len(cin_res) // 5, 800)
bl = percentile_filter(cin_res, 8, size=num_samps_bl)
else:
bl = 0
fitness_raw, erfc_raw, std_rr, _ = compute_event_exceptionality(
(cin_res - bl)[None, :], robust_std=robust_std,
N=N_samples_exceptionality)
accepted = (fitness_delta < thresh_fitness_delta) or (
fitness_raw < thresh_fitness_raw)
if accepted:
# print('adding component' + str(N + 1) + ' at timestep ' + str(t))
num_added += 1
ind_new.append(ijSig)
if oases is not None:
if not useOASIS:
# lambda from Selesnick's 3*sigma*|K| rule
# use noise estimate from init batch or use std_rr?
# sn_ = sqrt((ain**2).dot(sn[indices]**2)) / sqrt(1 - g**2)
sn_ = std_rr
oas = OASIS(np.ravel(g)[0], 3 * sn_ /
(sqrt(1 - g**2) if np.size(g) == 1 else
sqrt((1 + g[1]) * ((1 - g[1])**2 - g[0]**2) / (1 - g[1])))
if s_min == 0 else 0,
s_min, num_empty_samples=t +
1 - len(cin_res),
g2=0 if np.size(g) == 1 else g[1])
for yt in cin_res:
oas.fit_next(yt)
oases.append(oas)
Ain_csc = csc_matrix((ain, (indices, [0] * len(indices))), (np.prod(dims), 1), dtype=np.float32)
if Ab_dense is None:
groups = update_order(Ab, Ain, groups)[0]
else:
groups = update_order(Ab_dense[indices, :M], ain, groups)[0]
Ab_dense[indices, M] = ain
# faster version of scipy.sparse.hstack
csc_append(Ab, Ain_csc)
ind_A.append(Ab.indices[Ab.indptr[M]:Ab.indptr[M + 1]])
tt = t * 1.
# preallocate memory for speed up?
CC1 = np.hstack([CC, Cf.dot(cin_circ / tt)[:, None]])
CC2 = np.hstack(
[(Cf.dot(cin_circ)).T, cin_circ.dot(cin_circ)]) / tt
CC = np.vstack([CC1, CC2])
Cf = np.vstack([Cf, cin_circ])
if W is not None: # 1p data, subtract background
y = Y_buf.get_ordered()
if ssub_B == 1:
x = y.T - Ab.dot(Cf) - b0[:, None]
y = y[:, indices] - W[indices].dot(x).T - b0[indices]
else:
d1, d2 = dims
x = downscale_matrix.dot(y.T - Ab.dot(Cf) - b0[:, None])
y = y[:, indices] - upscale_matrix.tocsr()[indices].dot(
W).dot(x).T - b0[indices]
CY[M, indices] = cin_circ.dot(y) / tt
else:
CY[M, indices] = cin.dot(Y_buf[:, indices]) / tt
N = N + 1
M = M + 1
if crosscorr is not None:
# TODO: restrict to indices where component is located
if Ab_dense is None:
Ain = np.zeros(np.prod(dims), dtype=np.float32)
Ain[indices] = ain
else:
Ain = Ab_dense[:, M - 1]
if corr_img_mode == 'cumulative':
# first_moment[indices] = 0
# second_moment[indices] = 0
first_moment[Ain > 0] = 0
second_moment[Ain > 0] = 0
crosscorr *= (Ain[row_ind]==0) * (Ain[col_ind]==0)
else:
div = t if corr_img_mode == 'cumulative' else len(cin)
first_moment[indices] -= cin.sum() / div * ain
# (Y-ac')^2 = Y.^2 + (ac'.^2 - 2Y.ac)
second_moment[indices] += (ain**2 * cin.dot(cin) -
2 * cin.dot(Yres_buf[:, indices]) * ain) / div
crosscorr += (Ain[row_ind] * Ain[col_ind] * cin.dot(cin) -
cin.dot(Yres_buf[:, row_ind]) * Ain[col_ind] -
cin.dot(Yres_buf[:, col_ind]) * Ain[row_ind]) / div
max_img[Ain.reshape(dims, order='F') > 0] = 0
# # max_img[[slice(*i) for i in ijSig]] = first_moment[indices].reshape(
# # np.diff(ijSig).ravel(), order='F')
Yres_buf[:, indices] -= np.outer(cin, ain)
if corr_img is None:
# restrict blurring to region where component is located
# update bigger region than neural patch to avoid boundary effects
slices_update = tuple(slice(max(0, ijs[0] - sg // 2), min(d, ijs[1] + sg // 2))
for ijs, sg, d in zip(ijSig, gSiz, dims))
# filter even bigger region to avoid boundary effects
slices_filter = tuple(slice(max(0, ijs[0] - sg), min(d, ijs[1] + sg))
for ijs, sg, d in zip(ijSig, gSiz, dims))
ind_vb = np.ravel_multi_index(
np.ix_(*[np.arange(sl.start, sl.stop)
for sl in slices_update]), dims, order='C').ravel()
if len(dims) == 3:
rho_buf[:, ind_vb] = np.stack([imblur(
vb.reshape(dims, order='F')[slices_filter], sig=gSig, siz=gSiz,
nDimBlur=len(dims))[tuple([slice(
slices_update[i].start - slices_filter[i].start,
slices_update[i].stop - slices_filter[i].start)
for i in range(len(dims))])].ravel() for vb in Yres_buf])**2
else:
# faster than looping over frames:
# transform all frames into one, blur all simultaneously, transform back
Y_filter = Yres_buf.reshape((-1,) + dims, order='F'
)[:, slices_filter[0], slices_filter[1]]
T, d0, d1 = Y_filter.shape
tmp = np.concatenate((Y_filter, np.zeros((T, gHalf[0], d1), dtype=np.float32)),
axis=1).reshape(-1, d1)
cv2.GaussianBlur(tmp, tuple(gSiz), gSig[0], tmp, gSig[1], cv2.BORDER_CONSTANT)
slices = tuple([slice(slices_update[i].start - slices_filter[i].start,
slices_update[i].stop - slices_filter[i].start)
for i in range(len(dims))])
rho_buf[:, ind_vb] = tmp.reshape(T, -1, d1)[
(slice(None),) + slices].reshape(T, -1)**2
sv[ind_vb] = np.sum(rho_buf[:, ind_vb], 0)
return Ab, Cf, Yres_buf, rho_buf, CC, CY, ind_A, sv, groups, ind_new, ind_new_all, sv, cnn_pos
#%% remove components online
def remove_components_online(ind_rem, gnb, Ab, use_dense, Ab_dense, AtA, CY,
CC, M, N, noisyC, OASISinstances, C_on, exp_comps):
"""
Remove components indexed by ind_r (indexing starts at zero)
Args:
ind_rem list
indices of components to be removed (starting from zero)
gnb int
number of global background components
Ab csc_matrix
matrix of components + background
use_dense bool
use dense representation
Ab_dense ndarray
"""
ind_rem.sort()
ind_rem = [ind + gnb for ind in ind_rem[::-1]]
ind_keep = list(set(range(Ab.shape[-1])) - set(ind_rem))
ind_keep.sort()
if use_dense:
Ab_dense = np.delete(Ab_dense, ind_rem, axis=1)
else:
Ab_dense = []
AtA = np.delete(AtA, ind_rem, axis=0)
AtA = np.delete(AtA, ind_rem, axis=1)
CY = np.delete(CY, ind_rem, axis=0)
CC = np.delete(CC, ind_rem, axis=0)
CC = np.delete(CC, ind_rem, axis=1)
M -= len(ind_rem)
N -= len(ind_rem)
exp_comps -= len(ind_rem)
noisyC = np.delete(noisyC, ind_rem, axis=0)
for ii in ind_rem:
del OASISinstances[ii - gnb]
C_on = np.delete(C_on, ind_rem, axis=0)
Ab = csc_matrix(Ab[:, ind_keep])
ind_A = list(
[(Ab.indices[Ab.indptr[ii]:Ab.indptr[ii+1]]) for ii in range(gnb, M)])
groups = list(map(list, update_order(Ab)[0]))
return Ab, Ab_dense, CC, CY, M, N, noisyC, OASISinstances, C_on, exp_comps, ind_A, groups, AtA
def initialize_movie_online(Y, K, gSig, rf, stride, base_name,
p=1, merge_thresh=0.95, rval_thr_online=0.9, thresh_fitness_delta_online=-30, thresh_fitness_raw_online=-50,
rval_thr_init=.5, thresh_fitness_delta_init=-20, thresh_fitness_raw_init=-20,
rval_thr_refine=0.95, thresh_fitness_delta_refine=-100, thresh_fitness_raw_refine=-100,
final_frate=10, Npeaks=10, single_thread=True, dview=None, n_processes=None):
"""
Initialize movie using CNMF on minibatch. See CNMF parameters
"""
Yr = Y.to_2D().T
# merging threshold, max correlation allowed
# order of the autoregressive system
base_name = base_name + '.mmap'
fname_new = Y.save(base_name, order='C')
Yr, dims, T = caiman.load_memmap(fname_new)
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
Y = np.reshape(Yr, dims + (T,), order='F')
Cn2 = caiman.local_correlations(Y)
# pl.imshow(Cn2)
#%
#% RUN ALGORITHM ON PATCHES
# pl.close('all')
cnm_init = caiman.source_extraction.cnmf.CNMF(n_processes, method_init='greedy_roi', k=K, gSig=gSig, merge_thresh=merge_thresh,
p=0, dview=dview, Ain=None, rf=rf, stride=stride, method_deconvolution='oasis', skip_refinement=False,
normalize_init=False, options_local_NMF=None,
minibatch_shape=100, minibatch_suff_stat=5,
update_num_comps=True, rval_thr=rval_thr_online, thresh_fitness_delta=thresh_fitness_delta_online, thresh_fitness_raw=thresh_fitness_raw_online,
batch_update_suff_stat=True, max_comp_update_shape=5)
cnm_init = cnm_init.fit(images)
A_tot = cnm_init.A
C_tot = cnm_init.C
YrA_tot = cnm_init.YrA
b_tot = cnm_init.b
f_tot = cnm_init.f
print(('Number of components:' + str(A_tot.shape[-1])))
#%
traces = C_tot + YrA_tot
# traces_a=traces-scipy.ndimage.percentile_filter(traces,8,size=[1,np.shape(traces)[-1]/5])
# traces_b=np.diff(traces,axis=1)
fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples = caiman.components_evaluation.evaluate_components(
Y, traces, A_tot, C_tot, b_tot, f_tot, final_frate, remove_baseline=True, N=5, robust_std=False, Athresh=0.1, Npeaks=Npeaks, thresh_C=0.3)
idx_components_r = np.where(r_values >= rval_thr_init)[0]
idx_components_raw = np.where(fitness_raw < thresh_fitness_raw_init)[0]
idx_components_delta = np.where(
fitness_delta < thresh_fitness_delta_init)[0]
idx_components = np.union1d(idx_components_r, idx_components_raw)
idx_components = np.union1d(idx_components, idx_components_delta)
idx_components_bad = np.setdiff1d(list(range(len(traces))), idx_components)
print(('Keeping ' + str(len(idx_components)) +
' and discarding ' + str(len(idx_components_bad))))
A_tot = A_tot.tocsc()[:, idx_components]
C_tot = C_tot[idx_components]
#%
cnm_refine = caiman.source_extraction.cnmf.CNMF(n_processes, method_init='greedy_roi', k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, rf=None, stride=None,
p=p, dview=dview, Ain=A_tot, Cin=C_tot, f_in=f_tot, method_deconvolution='oasis', skip_refinement=True,
normalize_init=False, options_local_NMF=None,
minibatch_shape=100, minibatch_suff_stat=5,
update_num_comps=True, rval_thr=rval_thr_refine, thresh_fitness_delta=thresh_fitness_delta_refine, thresh_fitness_raw=thresh_fitness_raw_refine,
batch_update_suff_stat=True, max_comp_update_shape=5)
cnm_refine = cnm_refine.fit(images)
#%
A, C, b, f, YrA = cnm_refine.A, cnm_refine.C, cnm_refine.b, cnm_refine.f, cnm_refine.YrA
#%
final_frate = 10
Npeaks = 10
traces = C + YrA
fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples = \
caiman.components_evaluation.evaluate_components(Y, traces, A, C, b, f, final_frate, remove_baseline=True,
N=5, robust_std=False, Athresh=0.1, Npeaks=Npeaks, thresh_C=0.3)
idx_components_r = np.where(r_values >= rval_thr_refine)[0]
idx_components_raw = np.where(fitness_raw < thresh_fitness_raw_refine)[0]
idx_components_delta = np.where(
fitness_delta < thresh_fitness_delta_refine)[0]
idx_components = np.union1d(idx_components_r, idx_components_raw)
idx_components = np.union1d(idx_components, idx_components_delta)
idx_components_bad = np.setdiff1d(list(range(len(traces))), idx_components)
print(' ***** ')
print((len(traces)))
print((len(idx_components)))
#%
cnm_refine.sn = sn # FIXME: There is no sn in scope here
cnm_refine.idx_components = idx_components
cnm_refine.idx_components_bad = idx_components_bad
cnm_refine.r_values = r_values
cnm_refine.fitness_raw = fitness_raw
cnm_refine.fitness_delta = fitness_delta
cnm_refine.Cn2 = Cn2
#%
# cnm_init.dview = None
# save_object(cnm_init,fls[0][:-4]+ '_DS_' + str(ds)+ '_init.pkl')
return cnm_refine, Cn2, fname_new
def load_OnlineCNMF(filename, dview = None):
"""load object saved with the CNMF save method
Args:
filename: str
hdf5 file name containing the saved object
dview: multiprocessing or ipyparallel object
useful to set up parllelization in the objects
"""
for key,val in load_dict_from_hdf5(filename).items():
if key == 'params':
prms = CNMFParams()
for subdict in val.keys():
prms.set(subdict, val[subdict])
new_obj = OnACID(params=prms)
for key, val in load_dict_from_hdf5(filename).items():
if key == 'dview':
setattr(new_obj, key, dview)
elif key == 'estimates':
estim = Estimates()
for key_est, val_est in val.items():
setattr(estim, key_est, val_est)
new_obj.estimates = estim
else:
if key not in ['params', 'estimates']:
setattr(new_obj, key, val)
return new_obj
def inv_mat_vec(A):
return np.linalg.solve(A[0], A[1])
|
#!/usr/bin/env python
import featurebuilder
import pickle
import numpy
def load_data(filename):
src = open(filename, "r")
X, Y = eval(src.readline())
src.close()
return X, Y
def main():
print "Loading data..."
X, Y = load_data("test_set.py")
f = open("rnode.p", "rb")
rnode = pickle.load(f)
f.close()
rnode.debug = False
print "len(X):", len(X), "len(X[0]):", len(X[0]), "len(Y):", len(Y)
Y = numpy.array(Y, dtype="float64")
print "Shape of Y:", Y.shape
X = numpy.array(X, dtype="float64")
print "Shape of X:", X.shape
dsum = 0
rights = 0
for i in xrange(X.shape[0]):
x = X[i]
x = x.reshape(1, x.size)
y_est = rnode.execute(x, minx = -1.0, maxx = 10.0, miny = -1.0, maxy = 10.0, step = 0.5)
y_true = Y[i]
d = y_true - y_est[0]
dist = numpy.sqrt(numpy.dot(d, d))
dsum += dist
y_est_r = numpy.round(y_est)
got_it_right = numpy.array_equal(y_true, y_est_r[0])
if got_it_right:
rights += 1
print "true:", y_true, "estimate:", y_est, "dist:", dist, "rounded estimate:", y_est_r, "they're equal:", got_it_right
print "Average distance:", dsum/Y.shape[0]
print "Success rate:", float(rights)/Y.shape[0]
if __name__ == "__main__":
main()
|
import uuid
from django.core.exceptions import ValidationError
from django.db import models
class ReplyError(Exception):
def __init__(self, errors_dict):
self.errors_dict = errors_dict
@property
def errors(self):
return self.errors_dict
class Invitation(models.Model):
name = models.CharField(max_length=255)
code = models.CharField(max_length=255, db_index=True)
def __str__(self):
return self.name
def handle_reply(self, guests):
meals_by_id = dict((meal.id, meal) for meal in Meal.objects.all())
errors = {}
valid_guests = []
for guest_data in guests:
guest_id = guest_data.get('id')
try:
guest = self.guests.get(uuid=uuid.UUID(guest_id))
except ValidationError:
errors[guest_id] = {'__all__': 'No guest with that guest id'}
continue
guest.first_name = guest_data.get('first_name', None)
guest.last_name = guest_data.get('last_name', None)
guest.attending = guest_data.get('attending', None)
guest.notes = guest_data.get('notes', '')
meal_id = guest_data.get('meal', None)
guest.meal = meals_by_id.get(meal_id, None)
try:
guest.validate()
except ValidationError as e:
errors[guest_id] = dict(e)
else:
valid_guests.append(guest)
if errors:
raise ReplyError(errors)
else:
for guest in valid_guests:
guest.save()
def toJSON(self):
return {
'name': self.name,
'code': self.code,
'guests': [guest.toJSON() for guest in self.guests.all()]
}
class Meal(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
def toJSON(self):
return {
'id': self.id,
'name': self.name
}
class Guest(models.Model):
first_name = models.CharField(max_length=255, blank=True)
last_name = models.CharField(max_length=255, blank=True)
invitation = models.ForeignKey('rsvp.Invitation', related_name='guests')
guest_of = models.ForeignKey('rsvp.Guest', related_name='guests', null=True, blank=True)
meal = models.ForeignKey('rsvp.Meal', null=True, blank=True)
notes = models.CharField(max_length=2048, blank=True)
attending = models.NullBooleanField()
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
def __str__(self):
return "{}, {} {}".format(
self.last_name,
self.first_name,
"(Guest of {})".format(str(self.guest_of)) if self.guest_of else ''
)
def toJSON(self):
return {
'id': str(self.uuid),
'first_name': self.first_name,
'last_name': self.last_name,
'meal': self.meal_id,
'notes': self.notes,
'attending': self.attending,
'guest_of': str(self.guest_of.uuid) if self.guest_of else None
}
def validate(self):
fields = ['first_name', 'last_name', 'attending', 'meal', 'notes']
errors = {}
for field in fields:
validator = getattr(self, 'clean_{}'.format(field), lambda: None)
try:
validator()
except ValidationError as e:
errors[field] = e
if errors:
raise ValidationError(errors)
def clean_first_name(self):
if self.attending and not self.first_name:
raise ValidationError('First name is a required field')
def clean_last_name(self):
if self.attending and not self.last_name:
raise ValidationError('Last name is a required field')
def clean_attending(self):
if not isinstance(self.attending, bool):
raise ValidationError('You must choose Yes or No')
def clean_meal(self):
if self.attending and not self.meal:
raise ValidationError('You must choose a meal if you are attending')
class QuerySet(object):
def primary_guests(self):
return self.filter(guest_of__isnull=True)
|
from math import radians
from CollisionAvoidanceMonitor.transform import Transformation
import os
# Config happens here:
# Colors for each body
colors = [(0.6, 0.6, 0.6), (1, 0, 1), (1, 1, 0), (0, 1, 1), (0, 1, 0), (1, 0.5, 0), (0.2, 0.2, 1), (1, 1, 1)]
# PV prefix
pv_prefix = os.environ["MYPVPREFIX"]
# PV prefix for controlling the system
control_pv = "{}COLLIDE:".format(pv_prefix)
# Define the geometry of the system in mm
# Coordinate origin at arc centre, with nominal beam height
z_stage = dict(name="Z_Stage", size=(1000.0, 1000.0, 630.0), color=colors[0])
rot_stage = dict(name="Rotation", size=(600.0, 600.0, 165.0), color=colors[1])
bot_arc = dict(name="Bottom_Arc", size=(600.0, 600.0, 120.0), color=colors[2])
top_arc = dict(name="Top_Arc", size=(600.0, 600.0, 120.0), color=colors[3])
fine_z = dict(name="Fine_Z", size=(600.0, 600.0, 120.0), color=colors[4])
y_base = dict(name="Y_Stage", size=(900.0, 1200.0, 50.0), color=colors[4])
y_stage = dict(name="Y_Carriage", size=(600.0, 300.0, 20.0), color=colors[5])
x_stage = dict(name="X_Carriage", size=(520.0, 300.0, 20.0), color=colors[6])
sample = dict(name="Sample", size=(250.0, 250.0, 150.0), color=colors[6])
snout = dict(name="Snout", position=(-300, 0, 0), size=(500, 70, 70), color=colors[7])
slits = dict(name="Slits", position=(450, 0, 0), size=(100, 300, 300), color=colors[7])
# Define some variables to describe the geometry
centre_arc = 750.0
beam_ref = 1625.0
# Define some search parameters
coarse = 20.0
fine = 0.5
# Define the oversized-ness of each body - a global value in mm
oversize = coarse / 4
# List of pairs to ignore [0, 1]...[7, 8]
ignore = []
for i in range(0, 9):
for j in range(i, 9):
ignore.append([i, j])
def move_everything(axes):
# Z stage
t = Transformation()
size = axes[0] + z_stage['size'][2]
t.translate(z=-beam_ref + size / 2)
yield t, dict(z=size)
# Rotation
t = Transformation()
t.translate(z=-beam_ref + axes[0] + z_stage['size'][2] + rot_stage['size'][2] / 2)
t.rotate(rz=radians(axes[1]))
yield t
# Bottom arc
t = Transformation()
t.translate(z=-centre_arc - (bot_arc['size'][2] / 2 + top_arc['size'][2]))
t.rotate(ry=radians(axes[2]))
t.translate(z=centre_arc + (bot_arc['size'][2] / 2 + top_arc['size'][2]))
t.translate(z=-beam_ref + axes[0] + z_stage['size'][2] + rot_stage['size'][2] + bot_arc['size'][2] / 2)
t.rotate(rz=radians(axes[1]))
yield t
# Top arc
t = Transformation(t)
t.translate(z=+(centre_arc + top_arc['size'][2] / 2), forward=False)
t.rotate(rx=radians(axes[3]), forward=False)
t.translate(z=-(centre_arc + top_arc['size'][2] / 2), forward=False)
t.translate(z=top_arc['size'][2] / 2 + bot_arc['size'][2] / 2, forward=False)
yield t
# Fine Z
u = Transformation(t)
size = axes[4] + fine_z['size'][2]
u.translate(z=size / 2 + top_arc['size'][2] / 2, forward=False)
yield u, dict(z=size)
# Base of Y stage (top of fine Z)
t = Transformation(t)
size = axes[4] + fine_z['size'][2]
t.translate(z=size + top_arc['size'][2] / 2 + y_base['size'][2] / 2, forward=False)
yield t
# Y stage
t = Transformation(t)
t.translate(y=axes[5], z=y_base['size'][2] / 2 + y_stage['size'][2] / 2, forward=False)
yield t
# X stage
t = Transformation(t)
t.translate(x=axes[6], z=y_stage['size'][2] / 2 + x_stage['size'][2] / 2, forward=False)
yield t
# Sample
t = Transformation(t)
t.translate(z=x_stage['size'][2] / 2 + sample['size'][2] / 2, forward=False)
yield t
moves = move_everything
# Put them in a list
geometries = [z_stage, rot_stage, bot_arc, top_arc, fine_z, y_base, y_stage, x_stage, sample, snout, slits]
# Attach monitors to readbacks
pvs = ["{}MOT:MTR0101",
"{}MOT:MTR0102",
"{}MOT:MTR0103",
"{}MOT:MTR0104",
"{}MOT:MTR0105",
"{}MOT:MTR0106",
"{}MOT:MTR0107"]
pvs = [pv.format(pv_prefix) for pv in pvs]
hardlimits = [[-220, 100],
[-180.0, 180.0],
[-20, 20.0],
[-20.0, 20.0],
[0.0, 30.0],
[-300, 300],
[-37.5, 37.5]]
|
# -- coding: utf-8 --
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import CsHelper
import logging
class CsRoute:
""" Manage routes """
def __init__(self):
self.table_prefix = "Table_"
def get_tablename(self, name):
return self.table_prefix + name
def add_table(self, devicename):
tablenumber = 100 + int(devicename[3:])
tablename = self.get_tablename(devicename)
str = "%s %s" % (tablenumber, tablename)
filename = "/etc/iproute2/rt_tables"
logging.info("Adding route table: " + str + " to " + filename + " if not present ")
if not CsHelper.definedinfile(filename, str):
CsHelper.execute("sudo echo " + str + " >> /etc/iproute2/rt_tables")
# remove "from all table tablename" if exists, else it will interfer with
# routing of unintended traffic
if self.findRule("from all lookup " + tablename):
CsHelper.execute("sudo ip rule delete from all table " + tablename)
def flush_table(self, tablename):
CsHelper.execute("ip route flush table %s" % (tablename))
CsHelper.execute("ip route flush cache")
def add_route(self, dev, address):
""" Wrapper method that adds table name and device to route statement """
# ip route add dev eth1 table Table_eth1 10.0.2.0/24
table = self.get_tablename(dev)
logging.info("Adding route: dev " + dev + " table: " +
table + " network: " + address + " if not present")
cmd = "dev %s table %s %s" % (dev, table, address)
cmd = "default via %s table %s proto static" % (address, table)
self.set_route(cmd)
def add_network_route(self, dev, address):
""" Wrapper method that adds table name and device to route statement """
# ip route add dev eth1 table Table_eth1 10.0.2.0/24
table = self.get_tablename(dev)
logging.info("Adding route: dev " + dev + " table: " +
table + " network: " + address + " if not present")
cmd = "throw %s table %s proto static" % (address, table)
self.set_route(cmd)
def set_route(self, cmd, method="add"):
""" Add a route if it is not already defined """
found = False
search = cmd
if "throw" in search:
search = "type " + search
for i in CsHelper.execute("ip route show " + search):
found = True
if not found and method == "add":
logging.info("Add " + cmd)
cmd = "ip route add " + cmd
elif found and method == "delete":
logging.info("Delete " + cmd)
cmd = "ip route delete " + cmd
else:
return
CsHelper.execute(cmd)
def add_defaultroute(self, gateway):
""" Add a default route
:param str gateway
:return: bool
"""
if not gateway:
raise Exception("Gateway cannot be None.")
if self.defaultroute_exists():
return False
else:
cmd = "default via " + gateway
logging.info("Adding default route")
self.set_route(cmd)
return True
def defaultroute_exists(self):
""" Return True if a default route is present
:return: bool
"""
logging.info("Checking if default ipv4 route is present")
route_found = CsHelper.execute("ip -4 route list 0/0")
if len(route_found) > 0:
logging.info("Default route found: " + route_found[0])
return True
else:
logging.warn("No default route found!")
return False
def findRule(self, rule):
for i in CsHelper.execute("ip rule show"):
if rule in i.strip():
return True
return False
|
import os
import csv
import json
import pymongo
from kagen import utils
from kagen.utils import config
from datetime import datetime
logger = utils.get_logger("khan")
def work():
khan = utils.get_conn_khan()
db = utils.get_conn_mongo()
dtf = "%Y-%m-%dT%H:%M:%SZ"
doc = utils.get_response_json(khan, "/api/v1/playlists")
for item in doc:
item["_id"] = item["id"]
for playlist in doc:
playlist["backup_timestamp"] = datetime.strptime(playlist["backup_timestamp"], dtf)
db.playlists.drop()
db.playlists.insert(doc)
logger.info("loaded {} items in playlists collection".format(len(doc)))
doc = utils.get_response_json(khan, "/api/v1/playlists/library")
db.playlists_library.drop()
db.playlists_library.insert(doc)
logger.info("loaded {} items in playlists_library collection".format(len(doc)))
doc = utils.get_response_json(khan, "/api/v1/playlists/library/list")
for playlist in doc:
playlist["_id"] = playlist["id"]
playlist["backup_timestamp"] = datetime.strptime(playlist["backup_timestamp"], dtf)
db.playlists_library_list.drop()
db.playlists_library_list.insert(doc)
logger.info("loaded {} items in playlists_library_list collection".format(len(doc)))
videos = []
ids = []
for playlist in doc:
for video in playlist["videos"]:
video_id = video["id"]
if video_id not in ids:
video["_id"] = video_id
videos.append(video)
ids.append(video_id)
video["date_added"] = datetime.strptime(video["date_added"], dtf)
video["backup_timestamp"] = datetime.strptime(video["backup_timestamp"], dtf)
db.video_list.drop()
db.video_list.insert(videos)
logger.info("loaded {} items in video_list collection".format(len(videos)))
@utils.entry_point
def main():
logger.info("START khan")
work()
logger.info("DONE khan")
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.·····
#
##############################################################################
{
"name": "Supplier id on expense line",
"version": "1.0",
"author": "Savoir-faire Linux,Odoo Community Association (OCA)",
"website": "http://www.savoirfairelinux.com",
"license": "GPL-3 or any later version",
"category": "Human Resources",
"description": """
This module adds the 'supplier_id_accountedge' field to the
hr.employee model.
""",
"depends": ['hr_expense'],
"data": [
'hr_employee_accountedge.xml',
],
'installable': False,
}
|
from __future__ import unicode_literals
import os
import click
from pipes import quote
from .push_file import push_file, pull_file
from stable_world.py_helpers import urlparse, urlunparse
class BaseManager(object):
NAME = None
PROGRAM = None
@classmethod
def enabled(cls):
if cls.PROGRAM is None:
return True
for path in os.getenv('PATH', '').split(os.pathsep):
if os.path.isfile(os.path.join(path, cls.PROGRAM)):
return True
def __init__(self, site_url, urls, bucket, token, dryrun):
self.site_url = site_url
self.bucket = bucket
self.token = token
self.dryrun = dryrun
self.cache_name = self.NAME
self.cache_info = urls[self.NAME]
@property
def config_file(self):
raise NotImplementedError()
@property
def cache_dir(self):
cache_dir = os.path.join('~', '.cache', 'stable.world', self.bucket)
return os.path.expanduser(cache_dir)
def get_base_url(self, basicAuthRequired=False):
site_url = self.site_url
if basicAuthRequired:
site_uri = urlparse(self.site_url)
site_url = urlunparse(site_uri._replace(netloc='{}:{}@{}'.format(
'token',
self.token,
site_uri.netloc
)))
return '%s/cache/%s/%s/' % (site_url, self.bucket, self.cache_name)
def use(self):
if not self.dryrun:
push_file(self.config_file)
return self.update_config_file()
@classmethod
def unuse(cls, info):
if not info:
return
for config_file in info.get('config_files', []):
click.echo('Removing {} config file {}'.format(cls.NAME, quote(config_file)))
pull_file(config_file)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import datetime
import dateutil
import email
import logging
import pytz
import re
import time
import xmlrpclib
from email.message import Message
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.addons.mail.mail_message import decode
from openerp.osv import fields, osv, orm
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
def decode_header(message, header, separator=' '):
return separator.join(map(decode, filter(None, message.get_all(header, []))))
class mail_thread(osv.AbstractModel):
''' mail_thread model is meant to be inherited by any model that needs to
act as a discussion topic on which messages can be attached. Public
methods are prefixed with ``message_`` in order to avoid name
collisions with methods of the models that will inherit from this class.
``mail.thread`` defines fields used to handle and display the
communication history. ``mail.thread`` also manages followers of
inheriting classes. All features and expected behavior are managed
by mail.thread. Widgets has been designed for the 7.0 and following
versions of OpenERP.
Inheriting classes are not required to implement any method, as the
default implementation will work for any model. However it is common
to override at least the ``message_new`` and ``message_update``
methods (calling ``super``) to add model-specific behavior at
creation and update of a thread when processing incoming emails.
Options:
- _mail_flat_thread: if set to True, all messages without parent_id
are automatically attached to the first message posted on the
ressource. If set to False, the display of Chatter is done using
threads, and no parent_id is automatically set.
'''
_name = 'mail.thread'
_description = 'Email Thread'
_mail_flat_thread = True
# Automatic logging system if mail installed
# _track = {
# 'field': {
# 'module.subtype_xml': lambda self, cr, uid, obj, context=None: obj[state] == done,
# 'module.subtype_xml2': lambda self, cr, uid, obj, context=None: obj[state] != done,
# },
# 'field2': {
# ...
# },
# }
# where
# :param string field: field name
# :param module.subtype_xml: xml_id of a mail.message.subtype (i.e. mail.mt_comment)
# :param obj: is a browse_record
# :param function lambda: returns whether the tracking should record using this subtype
_track = {}
def _get_message_data(self, cr, uid, ids, name, args, context=None):
""" Computes:
- message_unread: has uid unread message for the document
- message_summary: html snippet summarizing the Chatter for kanban views """
res = dict((id, dict(message_unread=False, message_unread_count=0, message_summary=' ')) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
# search for unread messages, directly in SQL to improve performances
cr.execute(""" SELECT m.res_id FROM mail_message m
RIGHT JOIN mail_notification n
ON (n.message_id = m.id AND n.partner_id = %s AND (n.read = False or n.read IS NULL))
WHERE m.model = %s AND m.res_id in %s""",
(user_pid, self._name, tuple(ids),))
for result in cr.fetchall():
res[result[0]]['message_unread'] = True
res[result[0]]['message_unread_count'] += 1
for id in ids:
if res[id]['message_unread_count']:
title = res[id]['message_unread_count'] > 1 and _("You have %d unread messages") % res[id]['message_unread_count'] or _("You have one unread message")
res[id]['message_summary'] = "<span class='oe_kanban_mail_new' title='%s'><span class='oe_e'>9</span> %d %s</span>" % (title, res[id].pop('message_unread_count'), _("New"))
return res
def _get_subscription_data(self, cr, uid, ids, name, args, context=None):
""" Computes:
- message_subtype_data: data about document subtypes: which are
available, which are followed if any """
res = dict((id, dict(message_subtype_data='')) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
# find current model subtypes, add them to a dictionary
subtype_obj = self.pool.get('mail.message.subtype')
subtype_ids = subtype_obj.search(cr, uid, ['|', ('res_model', '=', self._name), ('res_model', '=', False)], context=context)
subtype_dict = dict((subtype.name, dict(default=subtype.default, followed=False, id=subtype.id)) for subtype in subtype_obj.browse(cr, uid, subtype_ids, context=context))
for id in ids:
res[id]['message_subtype_data'] = subtype_dict.copy()
# find the document followers, update the data
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, uid, [
('partner_id', '=', user_pid),
('res_id', 'in', ids),
('res_model', '=', self._name),
], context=context)
for fol in fol_obj.browse(cr, uid, fol_ids, context=context):
thread_subtype_dict = res[fol.res_id]['message_subtype_data']
for subtype in fol.subtype_ids:
thread_subtype_dict[subtype.name]['followed'] = True
res[fol.res_id]['message_subtype_data'] = thread_subtype_dict
return res
def _search_message_unread(self, cr, uid, obj=None, name=None, domain=None, context=None):
return [('message_ids.to_read', '=', True)]
def _get_followers(self, cr, uid, ids, name, arg, context=None):
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids)])
res = dict((id, dict(message_follower_ids=[], message_is_follower=False)) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids):
res[fol.res_id]['message_follower_ids'].append(fol.partner_id.id)
if fol.partner_id.id == user_pid:
res[fol.res_id]['message_is_follower'] = True
return res
def _set_followers(self, cr, uid, id, name, value, arg, context=None):
if not value:
return
partner_obj = self.pool.get('res.partner')
fol_obj = self.pool.get('mail.followers')
# read the old set of followers, and determine the new set of followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', '=', id)])
old = set(fol.partner_id.id for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids))
new = set(old)
for command in value or []:
if isinstance(command, (int, long)):
new.add(command)
elif command[0] == 0:
new.add(partner_obj.create(cr, uid, command[2], context=context))
elif command[0] == 1:
partner_obj.write(cr, uid, [command[1]], command[2], context=context)
new.add(command[1])
elif command[0] == 2:
partner_obj.unlink(cr, uid, [command[1]], context=context)
new.discard(command[1])
elif command[0] == 3:
new.discard(command[1])
elif command[0] == 4:
new.add(command[1])
elif command[0] == 5:
new.clear()
elif command[0] == 6:
new = set(command[2])
# remove partners that are no longer followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID,
[('res_model', '=', self._name), ('res_id', '=', id), ('partner_id', 'not in', list(new))])
fol_obj.unlink(cr, SUPERUSER_ID, fol_ids)
# add new followers
for partner_id in new - old:
fol_obj.create(cr, SUPERUSER_ID, {'res_model': self._name, 'res_id': id, 'partner_id': partner_id})
def _search_followers(self, cr, uid, obj, name, args, context):
fol_obj = self.pool.get('mail.followers')
res = []
for field, operator, value in args:
assert field == name
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('partner_id', operator, value)])
res_ids = [fol.res_id for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids)]
res.append(('id', 'in', res_ids))
return res
_columns = {
'message_is_follower': fields.function(_get_followers,
type='boolean', string='Is a Follower', multi='_get_followers,'),
'message_follower_ids': fields.function(_get_followers, fnct_inv=_set_followers,
fnct_search=_search_followers, type='many2many',
obj='res.partner', string='Followers', multi='_get_followers'),
'message_ids': fields.one2many('mail.message', 'res_id',
domain=lambda self: [('model', '=', self._name)],
auto_join=True,
string='Messages',
help="Messages and communication history"),
'message_unread': fields.function(_get_message_data,
fnct_search=_search_message_unread, multi="_get_message_data",
type='boolean', string='Unread Messages',
help="If checked new messages require your attention."),
'message_summary': fields.function(_get_message_data, method=True,
type='text', string='Summary', multi="_get_message_data",
help="Holds the Chatter summary (number of messages, ...). "\
"This summary is directly in html format in order to "\
"be inserted in kanban views."),
}
#------------------------------------------------------
# CRUD overrides for automatic subscription and logging
#------------------------------------------------------
def create(self, cr, uid, values, context=None):
""" Chatter override :
- subscribe uid
- subscribe followers of parent
- log a creation message
"""
if context is None:
context = {}
thread_id = super(mail_thread, self).create(cr, uid, values, context=context)
# subscribe uid unless asked not to
if not context.get('mail_create_nosubscribe'):
self.message_subscribe_users(cr, uid, [thread_id], [uid], context=context)
self.message_auto_subscribe(cr, uid, [thread_id], values.keys(), context=context)
# automatic logging unless asked not to (mainly for various testing purpose)
if not context.get('mail_create_nolog'):
self.message_post(cr, uid, thread_id, body=_('Document created'), context=context)
return thread_id
def write(self, cr, uid, ids, values, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
# Track initial values of tracked fields
tracked_fields = self._get_tracked_fields(cr, uid, values.keys(), context=context)
if tracked_fields:
initial = self.read(cr, uid, ids, tracked_fields.keys(), context=context)
initial_values = dict((item['id'], item) for item in initial)
# Perform write, update followers
result = super(mail_thread, self).write(cr, uid, ids, values, context=context)
self.message_auto_subscribe(cr, uid, ids, values.keys(), context=context)
# Perform the tracking
if tracked_fields:
self.message_track(cr, uid, ids, tracked_fields, initial_values, context=context)
return result
def unlink(self, cr, uid, ids, context=None):
""" Override unlink to delete messages and followers. This cannot be
cascaded, because link is done through (res_model, res_id). """
msg_obj = self.pool.get('mail.message')
fol_obj = self.pool.get('mail.followers')
# delete messages and notifications
msg_ids = msg_obj.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)], context=context)
msg_obj.unlink(cr, uid, msg_ids, context=context)
# delete
res = super(mail_thread, self).unlink(cr, uid, ids, context=context)
# delete followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids)], context=context)
fol_obj.unlink(cr, SUPERUSER_ID, fol_ids, context=context)
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default['message_ids'] = []
default['message_follower_ids'] = []
return super(mail_thread, self).copy(cr, uid, id, default=default, context=context)
#------------------------------------------------------
# Automatically log tracked fields
#------------------------------------------------------
def _get_tracked_fields(self, cr, uid, updated_fields, context=None):
""" Return a structure of tracked fields for the current model.
:param list updated_fields: modified field names
:return list: a list of (field_name, column_info obj), containing
always tracked fields and modified on_change fields
"""
lst = []
for name, column_info in self._all_columns.items():
visibility = getattr(column_info.column, 'track_visibility', False)
if visibility == 'always' or (visibility == 'onchange' and name in updated_fields) or name in self._track:
lst.append(name)
if not lst:
return lst
return self.fields_get(cr, uid, lst, context=context)
def message_track(self, cr, uid, ids, tracked_fields, initial_values, context=None):
def convert_for_display(value, col_info):
if not value and col_info['type'] == 'boolean':
return 'False'
if not value:
return ''
if col_info['type'] == 'many2one':
return value[1]
if col_info['type'] == 'selection':
return dict(col_info['selection'])[value]
return value
def format_message(message_description, tracked_values):
message = ''
if message_description:
message = '<span>%s</span>' % message_description
for name, change in tracked_values.items():
message += '<div> • <b>%s</b>: ' % change.get('col_info')
if change.get('old_value'):
message += '%s → ' % change.get('old_value')
message += '%s</div>' % change.get('new_value')
return message
if not tracked_fields:
return True
for record in self.read(cr, uid, ids, tracked_fields.keys(), context=context):
initial = initial_values[record['id']]
changes = []
tracked_values = {}
# generate tracked_values data structure: {'col_name': {col_info, new_value, old_value}}
for col_name, col_info in tracked_fields.items():
if record[col_name] == initial[col_name] and getattr(self._all_columns[col_name].column, 'track_visibility', None) == 'always':
tracked_values[col_name] = dict(col_info=col_info['string'],
new_value=convert_for_display(record[col_name], col_info))
elif record[col_name] != initial[col_name]:
if getattr(self._all_columns[col_name].column, 'track_visibility', None) in ['always', 'onchange']:
tracked_values[col_name] = dict(col_info=col_info['string'],
old_value=convert_for_display(initial[col_name], col_info),
new_value=convert_for_display(record[col_name], col_info))
if col_name in tracked_fields:
changes.append(col_name)
if not changes:
continue
# find subtypes and post messages or log if no subtype found
subtypes = []
for field, track_info in self._track.items():
if field not in changes:
continue
for subtype, method in track_info.items():
if method(self, cr, uid, record, context):
subtypes.append(subtype)
posted = False
for subtype in subtypes:
try:
subtype_rec = self.pool.get('ir.model.data').get_object(cr, uid, subtype.split('.')[0], subtype.split('.')[1], context=context)
except ValueError, e:
_logger.debug('subtype %s not found, giving error "%s"' % (subtype, e))
continue
message = format_message(subtype_rec.description if subtype_rec.description else subtype_rec.name, tracked_values)
self.message_post(cr, uid, record['id'], body=message, subtype=subtype, context=context)
posted = True
if not posted:
message = format_message('', tracked_values)
self.message_post(cr, uid, record['id'], body=message, context=context)
return True
#------------------------------------------------------
# mail.message wrappers and tools
#------------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
if self._needaction:
return [('message_unread', '=', True)]
return []
def _garbage_collect_attachments(self, cr, uid, context=None):
""" Garbage collect lost mail attachments. Those are attachments
- linked to res_model 'mail.compose.message', the composer wizard
- with res_id 0, because they were created outside of an existing
wizard (typically user input through Chatter or reports
created on-the-fly by the templates)
- unused since at least one day (create_date and write_date)
"""
limit_date = datetime.datetime.utcnow() - datetime.timedelta(days=1)
limit_date_str = datetime.datetime.strftime(limit_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ir_attachment_obj = self.pool.get('ir.attachment')
attach_ids = ir_attachment_obj.search(cr, uid, [
('res_model', '=', 'mail.compose.message'),
('res_id', '=', 0),
('create_date', '<', limit_date_str),
('write_date', '<', limit_date_str),
], context=context)
ir_attachment_obj.unlink(cr, uid, attach_ids, context=context)
return True
#------------------------------------------------------
# Email specific
#------------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
if not self._inherits.get('mail.alias'):
return [False for id in ids]
return ["%s@%s" % (record['alias_name'], record['alias_domain'])
if record.get('alias_domain') and record.get('alias_name')
else False
for record in self.read(cr, SUPERUSER_ID, ids, ['alias_name', 'alias_domain'], context=context)]
#------------------------------------------------------
# Mail gateway
#------------------------------------------------------
def message_capable_models(self, cr, uid, context=None):
""" Used by the plugin addon, based for plugin_outlook and others. """
ret_dict = {}
for model_name in self.pool.obj_list():
model = self.pool.get(model_name)
if 'mail.thread' in getattr(model, '_inherit', []):
ret_dict[model_name] = model._description
return ret_dict
def _message_find_partners(self, cr, uid, message, header_fields=['From'], context=None):
""" Find partners related to some header fields of the message.
TDE TODO: merge me with other partner finding methods in 8.0 """
partner_obj = self.pool.get('res.partner')
partner_ids = []
s = ', '.join([decode(message.get(h)) for h in header_fields if message.get(h)])
for email_address in tools.email_split(s):
related_partners = partner_obj.search(cr, uid, [('email', 'ilike', email_address), ('user_ids', '!=', False)], limit=1, context=context)
if not related_partners:
related_partners = partner_obj.search(cr, uid, [('email', 'ilike', email_address)], limit=1, context=context)
partner_ids += related_partners
return partner_ids
def _message_find_user_id(self, cr, uid, message, context=None):
""" TDE TODO: check and maybe merge me with other user finding methods in 8.0 """
from_local_part = tools.email_split(decode(message.get('From')))[0]
# FP Note: canonification required, the minimu: .lower()
user_ids = self.pool.get('res.users').search(cr, uid, ['|',
('login', '=', from_local_part),
('email', '=', from_local_part)], context=context)
return user_ids[0] if user_ids else uid
def message_route(self, cr, uid, message, model=None, thread_id=None,
custom_values=None, context=None):
"""Attempt to figure out the correct target model, thread_id,
custom_values and user_id to use for an incoming message.
Multiple values may be returned, if a message had multiple
recipients matching existing mail.aliases, for example.
The following heuristics are used, in this order:
1. If the message replies to an existing thread_id, and
properly contains the thread model in the 'In-Reply-To'
header, use this model/thread_id pair, and ignore
custom_value (not needed as no creation will take place)
2. Look for a mail.alias entry matching the message
recipient, and use the corresponding model, thread_id,
custom_values and user_id.
3. Fallback to the ``model``, ``thread_id`` and ``custom_values``
provided.
4. If all the above fails, raise an exception.
:param string message: an email.message instance
:param string model: the fallback model to use if the message
does not match any of the currently configured mail aliases
(may be None if a matching alias is supposed to be present)
:type dict custom_values: optional dictionary of default field values
to pass to ``message_new`` if a new record needs to be created.
Ignored if the thread record already exists, and also if a
matching mail.alias was found (aliases define their own defaults)
:param int thread_id: optional ID of the record/thread from ``model``
to which this mail should be attached. Only used if the message
does not reply to an existing thread and does not match any mail alias.
:return: list of [model, thread_id, custom_values, user_id]
"""
assert isinstance(message, Message), 'message must be an email.message.Message at this point'
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
email_to = decode_header(message, 'To')
references = decode_header(message, 'References')
in_reply_to = decode_header(message, 'In-Reply-To')
# 1. Verify if this is a reply to an existing thread
thread_references = references or in_reply_to
ref_match = thread_references and tools.reference_re.search(thread_references)
if ref_match:
thread_id = int(ref_match.group(1))
model = ref_match.group(2) or model
model_pool = self.pool.get(model)
if thread_id and model and model_pool and model_pool.exists(cr, uid, thread_id) \
and hasattr(model_pool, 'message_update'):
_logger.info('Routing mail from %s to %s with Message-Id %s: direct reply to model: %s, thread_id: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, model, thread_id, custom_values, uid)
return [(model, thread_id, custom_values, uid)]
# Verify whether this is a reply to a private message
if in_reply_to:
message_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', '=', in_reply_to)], limit=1, context=context)
if message_ids:
message = self.pool.get('mail.message').browse(cr, uid, message_ids[0], context=context)
_logger.info('Routing mail from %s to %s with Message-Id %s: direct reply to a private message: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, message.id, custom_values, uid)
return [(message.model, message.res_id, custom_values, uid)]
# 2. Look for a matching mail.alias entry
# Delivered-To is a safe bet in most modern MTAs, but we have to fallback on To + Cc values
# for all the odd MTAs out there, as there is no standard header for the envelope's `rcpt_to` value.
rcpt_tos = \
','.join([decode_header(message, 'Delivered-To'),
decode_header(message, 'To'),
decode_header(message, 'Cc'),
decode_header(message, 'Resent-To'),
decode_header(message, 'Resent-Cc')])
local_parts = [e.split('@')[0] for e in tools.email_split(rcpt_tos)]
if local_parts:
mail_alias = self.pool.get('mail.alias')
alias_ids = mail_alias.search(cr, uid, [('alias_name', 'in', local_parts)])
if alias_ids:
routes = []
for alias in mail_alias.browse(cr, uid, alias_ids, context=context):
user_id = alias.alias_user_id.id
if not user_id:
# TDE note: this could cause crashes, because no clue that the user
# that send the email has the right to create or modify a new document
# Fallback on user_id = uid
# Note: recognized partners will be added as followers anyway
# user_id = self._message_find_user_id(cr, uid, message, context=context)
user_id = uid
_logger.info('No matching user_id for the alias %s', alias.alias_name)
routes.append((alias.alias_model_id.model, alias.alias_force_thread_id, \
eval(alias.alias_defaults), user_id))
_logger.info('Routing mail from %s to %s with Message-Id %s: direct alias match: %r',
email_from, email_to, message_id, routes)
return routes
# 3. Fallback to the provided parameters, if they work
model_pool = self.pool.get(model)
if not thread_id:
# Legacy: fallback to matching [ID] in the Subject
match = tools.res_re.search(decode_header(message, 'Subject'))
thread_id = match and match.group(1)
# Convert into int (bug spotted in 7.0 because of str)
try:
thread_id = int(thread_id)
except:
thread_id = False
assert thread_id and hasattr(model_pool, 'message_update') or hasattr(model_pool, 'message_new'), \
"No possible route found for incoming message from %s to %s (Message-Id %s:)." \
"Create an appropriate mail.alias or force the destination model." % (email_from, email_to, message_id)
if thread_id and not model_pool.exists(cr, uid, thread_id):
_logger.warning('Received mail reply to missing document %s! Ignoring and creating new document instead for Message-Id %s',
thread_id, message_id)
thread_id = None
_logger.info('Routing mail from %s to %s with Message-Id %s: fallback to model:%s, thread_id:%s, custom_values:%s, uid:%s',
email_from, email_to, message_id, model, thread_id, custom_values, uid)
return [(model, thread_id, custom_values, uid)]
def message_process(self, cr, uid, model, message, custom_values=None,
save_original=False, strip_attachments=False,
thread_id=None, context=None):
""" Process an incoming RFC2822 email message, relying on
``mail.message.parse()`` for the parsing operation,
and ``message_route()`` to figure out the target model.
Once the target model is known, its ``message_new`` method
is called with the new message (if the thread record did not exist)
or its ``message_update`` method (if it did).
There is a special case where the target model is False: a reply
to a private message. In this case, we skip the message_new /
message_update step, to just post a new message using mail_thread
message_post.
:param string model: the fallback model to use if the message
does not match any of the currently configured mail aliases
(may be None if a matching alias is supposed to be present)
:param message: source of the RFC2822 message
:type message: string or xmlrpclib.Binary
:type dict custom_values: optional dictionary of field values
to pass to ``message_new`` if a new record needs to be created.
Ignored if the thread record already exists, and also if a
matching mail.alias was found (aliases define their own defaults)
:param bool save_original: whether to keep a copy of the original
email source attached to the message after it is imported.
:param bool strip_attachments: whether to strip all attachments
before processing the message, in order to save some space.
:param int thread_id: optional ID of the record/thread from ``model``
to which this mail should be attached. When provided, this
overrides the automatic detection based on the message
headers.
"""
if context is None:
context = {}
# extract message bytes - we are forced to pass the message as binary because
# we don't know its encoding until we parse its headers and hence can't
# convert it to utf-8 for transport between the mailgate script and here.
if isinstance(message, xmlrpclib.Binary):
message = str(message.data)
# Warning: message_from_string doesn't always work correctly on unicode,
# we must use utf-8 strings here :-(
if isinstance(message, unicode):
message = message.encode('utf-8')
msg_txt = email.message_from_string(message)
# parse the message, verify we are not in a loop by checking message_id is not duplicated
msg = self.message_parse(cr, uid, msg_txt, save_original=save_original, context=context)
if strip_attachments:
msg.pop('attachments', None)
if msg.get('message_id'): # should always be True as message_parse generate one if missing
existing_msg_ids = self.pool.get('mail.message').search(cr, SUPERUSER_ID, [
('message_id', '=', msg.get('message_id')),
], context=context)
if existing_msg_ids:
_logger.info('Ignored mail from %s to %s with Message-Id %s:: found duplicated Message-Id during processing',
msg.get('from'), msg.get('to'), msg.get('message_id'))
return False
# find possible routes for the message
routes = self.message_route(cr, uid, msg_txt, model,
thread_id, custom_values,
context=context)
# postpone setting msg.partner_ids after message_post, to avoid double notifications
partner_ids = msg.pop('partner_ids', [])
thread_id = False
for model, thread_id, custom_values, user_id in routes:
if self._name == 'mail.thread':
context.update({'thread_model': model})
if model:
model_pool = self.pool.get(model)
assert thread_id and hasattr(model_pool, 'message_update') or hasattr(model_pool, 'message_new'), \
"Undeliverable mail with Message-Id %s, model %s does not accept incoming emails" % \
(msg['message_id'], model)
# disabled subscriptions during message_new/update to avoid having the system user running the
# email gateway become a follower of all inbound messages
nosub_ctx = dict(context, mail_create_nosubscribe=True)
if thread_id and hasattr(model_pool, 'message_update'):
model_pool.message_update(cr, user_id, [thread_id], msg, context=nosub_ctx)
else:
nosub_ctx = dict(nosub_ctx, mail_create_nolog=True)
thread_id = model_pool.message_new(cr, user_id, msg, custom_values, context=nosub_ctx)
else:
assert thread_id == 0, "Posting a message without model should be with a null res_id, to create a private message."
model_pool = self.pool.get('mail.thread')
new_msg_id = model_pool.message_post(cr, uid, [thread_id], context=context, subtype='mail.mt_comment', **msg)
if partner_ids:
# postponed after message_post, because this is an external message and we don't want to create
# duplicate emails due to notifications
self.pool.get('mail.message').write(cr, uid, [new_msg_id], {'partner_ids': partner_ids}, context=context)
return thread_id
def message_new(self, cr, uid, msg_dict, custom_values=None, context=None):
"""Called by ``message_process`` when a new message is received
for a given thread model, if the message did not belong to
an existing thread.
The default behavior is to create a new record of the corresponding
model (based on some very basic info extracted from the message).
Additional behavior may be implemented by overriding this method.
:param dict msg_dict: a map containing the email details and
attachments. See ``message_process`` and
``mail.message.parse`` for details.
:param dict custom_values: optional dictionary of additional
field values to pass to create()
when creating the new thread record.
Be careful, these values may override
any other values coming from the message.
:param dict context: if a ``thread_model`` value is present
in the context, its value will be used
to determine the model of the record
to create (instead of the current model).
:rtype: int
:return: the id of the newly created thread object
"""
if context is None:
context = {}
data = {}
if isinstance(custom_values, dict):
data = custom_values.copy()
model = context.get('thread_model') or self._name
model_pool = self.pool.get(model)
fields = model_pool.fields_get(cr, uid, context=context)
if 'name' in fields and not data.get('name'):
data['name'] = msg_dict.get('subject', '')
res_id = model_pool.create(cr, uid, data, context=context)
return res_id
def message_update(self, cr, uid, ids, msg_dict, update_vals=None, context=None):
"""Called by ``message_process`` when a new message is received
for an existing thread. The default behavior is to update the record
with update_vals taken from the incoming email.
Additional behavior may be implemented by overriding this
method.
:param dict msg_dict: a map containing the email details and
attachments. See ``message_process`` and
``mail.message.parse()`` for details.
:param dict update_vals: a dict containing values to update records
given their ids; if the dict is None or is
void, no write operation is performed.
"""
if update_vals:
self.write(cr, uid, ids, update_vals, context=context)
return True
def _message_extract_payload(self, message, save_original=False):
"""Extract body as HTML and attachments from the mail message"""
attachments = []
body = u''
if save_original:
attachments.append(('original_email.eml', message.as_string()))
if not message.is_multipart() or 'text/' in message.get('content-type', ''):
encoding = message.get_content_charset()
body = message.get_payload(decode=True)
body = tools.ustr(body, encoding, errors='replace')
if message.get_content_type() == 'text/plain':
# text/plain -> <pre/>
body = tools.append_content_to_html(u'', body, preserve=True)
else:
alternative = (message.get_content_type() == 'multipart/alternative')
for part in message.walk():
if part.get_content_maintype() == 'multipart':
continue # skip container
filename = part.get_filename() # None if normal part
encoding = part.get_content_charset() # None if attachment
# 1) Explicit Attachments -> attachments
if filename or part.get('content-disposition', '').strip().startswith('attachment'):
attachments.append((filename or 'attachment', part.get_payload(decode=True)))
continue
# 2) text/plain -> <pre/>
if part.get_content_type() == 'text/plain' and (not alternative or not body):
body = tools.append_content_to_html(body, tools.ustr(part.get_payload(decode=True),
encoding, errors='replace'), preserve=True)
# 3) text/html -> raw
elif part.get_content_type() == 'text/html':
html = tools.ustr(part.get_payload(decode=True), encoding, errors='replace')
if alternative:
body = html
else:
body = tools.append_content_to_html(body, html, plaintext=False)
# 4) Anything else -> attachment
else:
attachments.append((filename or 'attachment', part.get_payload(decode=True)))
return body, attachments
def message_parse(self, cr, uid, message, save_original=False, context=None):
"""Parses a string or email.message.Message representing an
RFC-2822 email, and returns a generic dict holding the
message details.
:param message: the message to parse
:type message: email.message.Message | string | unicode
:param bool save_original: whether the returned dict
should include an ``original`` attachment containing
the source of the message
:rtype: dict
:return: A dict with the following structure, where each
field may not be present if missing in original
message::
{ 'message_id': msg_id,
'subject': subject,
'from': from,
'to': to,
'cc': cc,
'body': unified_body,
'attachments': [('file1', 'bytes'),
('file2', 'bytes')}
}
"""
msg_dict = {
'type': 'email',
'author_id': False,
}
if not isinstance(message, Message):
if isinstance(message, unicode):
# Warning: message_from_string doesn't always work correctly on unicode,
# we must use utf-8 strings here :-(
message = message.encode('utf-8')
message = email.message_from_string(message)
message_id = message['message-id']
if not message_id:
# Very unusual situation, be we should be fault-tolerant here
message_id = "<%s@localhost>" % time.time()
_logger.debug('Parsing Message without message-id, generating a random one: %s', message_id)
msg_dict['message_id'] = message_id
if message.get('Subject'):
msg_dict['subject'] = decode(message.get('Subject'))
# Envelope fields not stored in mail.message but made available for message_new()
msg_dict['from'] = decode(message.get('from'))
msg_dict['to'] = decode(message.get('to'))
msg_dict['cc'] = decode(message.get('cc'))
if message.get('From'):
author_ids = self._message_find_partners(cr, uid, message, ['From'], context=context)
if author_ids:
msg_dict['author_id'] = author_ids[0]
msg_dict['email_from'] = decode(message.get('from'))
partner_ids = self._message_find_partners(cr, uid, message, ['To', 'Cc'], context=context)
msg_dict['partner_ids'] = [(4, partner_id) for partner_id in partner_ids]
if message.get('Date'):
try:
date_hdr = decode(message.get('Date'))
parsed_date = dateutil.parser.parse(date_hdr, fuzzy=True)
if parsed_date.utcoffset() is None:
# naive datetime, so we arbitrarily decide to make it
# UTC, there's no better choice. Should not happen,
# as RFC2822 requires timezone offset in Date headers.
stored_date = parsed_date.replace(tzinfo=pytz.utc)
else:
stored_date = parsed_date.astimezone(tz=pytz.utc)
except Exception:
_logger.warning('Failed to parse Date header %r in incoming mail '
'with message-id %r, assuming current date/time.',
message.get('Date'), message_id)
stored_date = datetime.datetime.now()
msg_dict['date'] = stored_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
if message.get('In-Reply-To'):
parent_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', '=', decode(message['In-Reply-To']))])
if parent_ids:
msg_dict['parent_id'] = parent_ids[0]
if message.get('References') and 'parent_id' not in msg_dict:
parent_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', 'in',
[x.strip() for x in decode(message['References']).split()])])
if parent_ids:
msg_dict['parent_id'] = parent_ids[0]
msg_dict['body'], msg_dict['attachments'] = self._message_extract_payload(message, save_original=save_original)
return msg_dict
#------------------------------------------------------
# Note specific
#------------------------------------------------------
def log(self, cr, uid, id, message, secondary=False, context=None):
_logger.warning("log() is deprecated. As this module inherit from "\
"mail.thread, the message will be managed by this "\
"module instead of by the res.log mechanism. Please "\
"use mail_thread.message_post() instead of the "\
"now deprecated res.log.")
self.message_post(cr, uid, [id], message, context=context)
def _message_add_suggested_recipient(self, cr, uid, result, obj, partner=None, email=None, reason='', context=None):
""" Called by message_get_suggested_recipients, to add a suggested
recipient in the result dictionary. The form is :
partner_id, partner_name<partner_email> or partner_name, reason """
if email and not partner:
# get partner info from email
partner_info = self.message_get_partner_info_from_emails(cr, uid, [email], context=context, res_id=obj.id)[0]
if partner_info.get('partner_id'):
partner = self.pool.get('res.partner').browse(cr, SUPERUSER_ID, [partner_info.get('partner_id')], context=context)[0]
if email and email in [val[1] for val in result[obj.id]]: # already existing email -> skip
return result
if partner and partner in obj.message_follower_ids: # recipient already in the followers -> skip
return result
if partner and partner in [val[0] for val in result[obj.id]]: # already existing partner ID -> skip
return result
if partner and partner.email: # complete profile: id, name <email>
result[obj.id].append((partner.id, '%s<%s>' % (partner.name, partner.email), reason))
elif partner: # incomplete profile: id, name
result[obj.id].append((partner.id, '%s' % (partner.name), reason))
else: # unknown partner, we are probably managing an email address
result[obj.id].append((False, email, reason))
return result
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
""" Returns suggested recipients for ids. Those are a list of
tuple (partner_id, partner_name, reason), to be managed by Chatter. """
result = dict.fromkeys(ids, list())
if self._all_columns.get('user_id'):
for obj in self.browse(cr, SUPERUSER_ID, ids, context=context): # SUPERUSER because of a read on res.users that would crash otherwise
if not obj.user_id or not obj.user_id.partner_id:
continue
self._message_add_suggested_recipient(cr, uid, result, obj, partner=obj.user_id.partner_id, reason=self._all_columns['user_id'].column.string, context=context)
return result
def message_get_partner_info_from_emails(self, cr, uid, emails, link_mail=False, context=None, res_id=None):
""" Wrapper with weird order parameter because of 7.0 fix.
TDE TODO: remove me in 8.0 """
return self.message_find_partner_from_emails(cr, uid, res_id, emails, link_mail=link_mail, context=context)
def message_find_partner_from_emails(self, cr, uid, id, emails, link_mail=False, context=None):
""" Convert a list of emails into a list partner_ids and a list
new_partner_ids. The return value is non conventional because
it is meant to be used by the mail widget.
:return dict: partner_ids and new_partner_ids
TDE TODO: merge me with other partner finding methods in 8.0 """
mail_message_obj = self.pool.get('mail.message')
partner_obj = self.pool.get('res.partner')
result = list()
if id and self._name != 'mail.thread':
obj = self.browse(cr, SUPERUSER_ID, id, context=context)
else:
obj = None
for email in emails:
partner_info = {'full_name': email, 'partner_id': False}
m = re.search(r"((.+?)\s*<)?([^<>]+@[^<>]+)>?", email, re.IGNORECASE | re.DOTALL)
if not m:
continue
email_address = m.group(3)
# first try: check in document's followers
if obj:
for follower in obj.message_follower_ids:
if follower.email == email_address:
partner_info['partner_id'] = follower.id
# second try: check in partners
if not partner_info.get('partner_id'):
ids = partner_obj.search(cr, SUPERUSER_ID, [('email', 'ilike', email_address), ('user_ids', '!=', False)], limit=1, context=context)
if not ids:
ids = partner_obj.search(cr, SUPERUSER_ID, [('email', 'ilike', email_address)], limit=1, context=context)
if ids:
partner_info['partner_id'] = ids[0]
result.append(partner_info)
# link mail with this from mail to the new partner id
if link_mail and partner_info['partner_id']:
message_ids = mail_message_obj.search(cr, SUPERUSER_ID, [
'|',
('email_from', '=', email),
('email_from', 'ilike', '<%s>' % email),
('author_id', '=', False)
], context=context)
if message_ids:
mail_message_obj.write(cr, SUPERUSER_ID, message_ids, {'author_id': partner_info['partner_id']}, context=context)
return result
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification',
subtype=None, parent_id=False, attachments=None, context=None,
content_subtype='html', **kwargs):
""" Post a new message in an existing thread, returning the new
mail.message ID.
:param int thread_id: thread ID to post into, or list with one ID;
if False/0, mail.message model will also be set as False
:param str body: body of the message, usually raw HTML that will
be sanitized
:param str type: see mail_message.type field
:param str content_subtype:: if plaintext: convert body into html
:param int parent_id: handle reply to a previous message by adding the
parent partners to the message in case of private discussion
:param tuple(str,str) attachments or list id: list of attachment tuples in the form
``(name,content)``, where content is NOT base64 encoded
Extra keyword arguments will be used as default column values for the
new mail.message record. Special cases:
- attachment_ids: supposed not attached to any document; attach them
to the related document. Should only be set by Chatter.
:return int: ID of newly created mail.message
"""
if context is None:
context = {}
if attachments is None:
attachments = {}
mail_message = self.pool.get('mail.message')
ir_attachment = self.pool.get('ir.attachment')
assert (not thread_id) or \
isinstance(thread_id, (int, long)) or \
(isinstance(thread_id, (list, tuple)) and len(thread_id) == 1), \
"Invalid thread_id; should be 0, False, an ID or a list with one ID"
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
# if we're processing a message directly coming from the gateway, the destination model was
# set in the context.
model = False
if thread_id:
model = context.get('thread_model', self._name) if self._name == 'mail.thread' else self._name
if model != self._name:
del context['thread_model']
return self.pool.get(model).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
# 0: Parse email-from, try to find a better author_id based on document's followers for incoming emails
email_from = kwargs.get('email_from')
if email_from and thread_id and type == 'email' and kwargs.get('author_id'):
email_list = tools.email_split(email_from)
doc = self.browse(cr, uid, thread_id, context=context)
if email_list and doc:
author_ids = self.pool.get('res.partner').search(cr, uid, [
('email', 'ilike', email_list[0]),
('id', 'in', [f.id for f in doc.message_follower_ids])
], limit=1, context=context)
if author_ids:
kwargs['author_id'] = author_ids[0]
author_id = kwargs.get('author_id')
if author_id is None: # keep False values
author_id = self.pool.get('mail.message')._get_default_author(cr, uid, context=context)
# 1: Handle content subtype: if plaintext, converto into HTML
if content_subtype == 'plaintext':
body = tools.plaintext2html(body)
# 2: Private message: add recipients (recipients and author of parent message) - current author
# + legacy-code management (! we manage only 4 and 6 commands)
partner_ids = set()
kwargs_partner_ids = kwargs.pop('partner_ids', [])
for partner_id in kwargs_partner_ids:
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 4 and len(partner_id) == 2:
partner_ids.add(partner_id[1])
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 6 and len(partner_id) == 3:
partner_ids |= set(partner_id[2])
elif isinstance(partner_id, (int, long)):
partner_ids.add(partner_id)
else:
pass # we do not manage anything else
if parent_id and not model:
parent_message = mail_message.browse(cr, uid, parent_id, context=context)
private_followers = set([partner.id for partner in parent_message.partner_ids])
if parent_message.author_id:
private_followers.add(parent_message.author_id.id)
private_followers -= set([author_id])
partner_ids |= private_followers
# 3. Attachments
# - HACK TDE FIXME: Chatter: attachments linked to the document (not done JS-side), load the message
attachment_ids = kwargs.pop('attachment_ids', []) or [] # because we could receive None (some old code sends None)
if attachment_ids:
filtered_attachment_ids = ir_attachment.search(cr, SUPERUSER_ID, [
('res_model', '=', 'mail.compose.message'),
('create_uid', '=', uid),
('id', 'in', attachment_ids)], context=context)
if filtered_attachment_ids:
ir_attachment.write(cr, SUPERUSER_ID, filtered_attachment_ids, {'res_model': model, 'res_id': thread_id}, context=context)
attachment_ids = [(4, id) for id in attachment_ids]
# Handle attachments parameter, that is a dictionary of attachments
for name, content in attachments:
if isinstance(content, unicode):
content = content.encode('utf-8')
data_attach = {
'name': name,
'datas': base64.b64encode(str(content)),
'datas_fname': name,
'description': name,
'res_model': model,
'res_id': thread_id,
}
attachment_ids.append((0, 0, data_attach))
# 4: mail.message.subtype
subtype_id = False
if subtype:
if '.' not in subtype:
subtype = 'mail.%s' % subtype
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, *subtype.split('.'))
subtype_id = ref and ref[1] or False
# automatically subscribe recipients if asked to
if context.get('mail_post_autofollow') and thread_id and partner_ids:
partner_to_subscribe = partner_ids
if context.get('mail_post_autofollow_partner_ids'):
partner_to_subscribe = filter(lambda item: item in context.get('mail_post_autofollow_partner_ids'), partner_ids)
self.message_subscribe(cr, uid, [thread_id], list(partner_to_subscribe), context=context)
# _mail_flat_thread: automatically set free messages to the first posted message
if self._mail_flat_thread and not parent_id and thread_id:
message_ids = mail_message.search(cr, uid, ['&', ('res_id', '=', thread_id), ('model', '=', model)], context=context, order="id ASC", limit=1)
parent_id = message_ids and message_ids[0] or False
# we want to set a parent: force to set the parent_id to the oldest ancestor, to avoid having more than 1 level of thread
elif parent_id:
message_ids = mail_message.search(cr, SUPERUSER_ID, [('id', '=', parent_id), ('parent_id', '!=', False)], context=context)
# avoid loops when finding ancestors
processed_list = []
if message_ids:
message = mail_message.browse(cr, SUPERUSER_ID, message_ids[0], context=context)
while (message.parent_id and message.parent_id.id not in processed_list):
processed_list.append(message.parent_id.id)
message = message.parent_id
parent_id = message.id
values = kwargs
values.update({
'author_id': author_id,
'model': model,
'res_id': thread_id or False,
'body': body,
'subject': subject or False,
'type': type,
'parent_id': parent_id,
'attachment_ids': attachment_ids,
'subtype_id': subtype_id,
'partner_ids': [(4, pid) for pid in partner_ids],
})
# Avoid warnings about non-existing fields
for x in ('from', 'to', 'cc'):
values.pop(x, None)
# Create and auto subscribe the author
msg_id = mail_message.create(cr, uid, values, context=context)
message = mail_message.browse(cr, uid, msg_id, context=context)
if message.author_id and thread_id and type != 'notification' and not context.get('mail_create_nosubscribe'):
self.message_subscribe(cr, uid, [thread_id], [message.author_id.id], context=context)
return msg_id
#------------------------------------------------------
# Compatibility methods: do not use
# TDE TODO: remove me in 8.0
#------------------------------------------------------
def message_create_partners_from_emails(self, cr, uid, emails, context=None):
return {'partner_ids': [], 'new_partner_ids': []}
def message_post_user_api(self, cr, uid, thread_id, body='', parent_id=False,
attachment_ids=None, content_subtype='plaintext',
context=None, **kwargs):
return self.message_post(cr, uid, thread_id, body=body, parent_id=parent_id,
attachment_ids=attachment_ids, content_subtype=content_subtype,
context=context, **kwargs)
#------------------------------------------------------
# Followers API
#------------------------------------------------------
def message_get_subscription_data(self, cr, uid, ids, context=None):
""" Wrapper to get subtypes data. """
return self._get_subscription_data(cr, uid, ids, None, None, context=context)
def message_subscribe_users(self, cr, uid, ids, user_ids=None, subtype_ids=None, context=None):
""" Wrapper on message_subscribe, using users. If user_ids is not
provided, subscribe uid instead. """
if user_ids is None:
user_ids = [uid]
partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, uid, user_ids, context=context)]
return self.message_subscribe(cr, uid, ids, partner_ids, subtype_ids=subtype_ids, context=context)
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
""" Add partners to the records followers. """
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
if set(partner_ids) == set([user_pid]):
try:
self.check_access_rights(cr, uid, 'read')
except (osv.except_osv, orm.except_orm):
return
else:
self.check_access_rights(cr, uid, 'write')
self.write(cr, SUPERUSER_ID, ids, {'message_follower_ids': [(4, pid) for pid in partner_ids]}, context=context)
# if subtypes are not specified (and not set to a void list), fetch default ones
if subtype_ids is None:
subtype_obj = self.pool.get('mail.message.subtype')
subtype_ids = subtype_obj.search(cr, uid, [('default', '=', True), '|', ('res_model', '=', self._name), ('res_model', '=', False)], context=context)
# update the subscriptions
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids), ('partner_id', 'in', partner_ids)], context=context)
fol_obj.write(cr, SUPERUSER_ID, fol_ids, {'subtype_ids': [(6, 0, subtype_ids)]}, context=context)
return True
def message_unsubscribe_users(self, cr, uid, ids, user_ids=None, context=None):
""" Wrapper on message_subscribe, using users. If user_ids is not
provided, unsubscribe uid instead. """
if user_ids is None:
user_ids = [uid]
partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, uid, user_ids, context=context)]
return self.message_unsubscribe(cr, uid, ids, partner_ids, context=context)
def message_unsubscribe(self, cr, uid, ids, partner_ids, context=None):
""" Remove partners from the records followers. """
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
if set(partner_ids) == set([user_pid]):
self.check_access_rights(cr, uid, 'read')
else:
self.check_access_rights(cr, uid, 'write')
return self.write(cr, SUPERUSER_ID, ids, {'message_follower_ids': [(3, pid) for pid in partner_ids]}, context=context)
def _message_get_auto_subscribe_fields(self, cr, uid, updated_fields, auto_follow_fields=['user_id'], context=None):
""" Returns the list of relational fields linking to res.users that should
trigger an auto subscribe. The default list checks for the fields
- called 'user_id'
- linking to res.users
- with track_visibility set
In OpenERP V7, this is sufficent for all major addon such as opportunity,
project, issue, recruitment, sale.
Override this method if a custom behavior is needed about fields
that automatically subscribe users.
"""
user_field_lst = []
for name, column_info in self._all_columns.items():
if name in auto_follow_fields and name in updated_fields and getattr(column_info.column, 'track_visibility', False) and column_info.column._obj == 'res.users':
user_field_lst.append(name)
return user_field_lst
def message_auto_subscribe(self, cr, uid, ids, updated_fields, context=None):
"""
1. fetch project subtype related to task (parent_id.res_model = 'project.task')
2. for each project subtype: subscribe the follower to the task
"""
subtype_obj = self.pool.get('mail.message.subtype')
follower_obj = self.pool.get('mail.followers')
# fetch auto_follow_fields
user_field_lst = self._message_get_auto_subscribe_fields(cr, uid, updated_fields, context=context)
# fetch related record subtypes
related_subtype_ids = subtype_obj.search(cr, uid, ['|', ('res_model', '=', False), ('parent_id.res_model', '=', self._name)], context=context)
subtypes = subtype_obj.browse(cr, uid, related_subtype_ids, context=context)
default_subtypes = [subtype for subtype in subtypes if subtype.res_model == False]
related_subtypes = [subtype for subtype in subtypes if subtype.res_model != False]
relation_fields = set([subtype.relation_field for subtype in subtypes if subtype.relation_field != False])
if (not related_subtypes or not any(relation in updated_fields for relation in relation_fields)) and not user_field_lst:
return True
for record in self.browse(cr, uid, ids, context=context):
new_followers = dict()
parent_res_id = False
parent_model = False
for subtype in related_subtypes:
if not subtype.relation_field or not subtype.parent_id:
continue
if not subtype.relation_field in self._columns or not getattr(record, subtype.relation_field, False):
continue
parent_res_id = getattr(record, subtype.relation_field).id
parent_model = subtype.res_model
follower_ids = follower_obj.search(cr, SUPERUSER_ID, [
('res_model', '=', parent_model),
('res_id', '=', parent_res_id),
('subtype_ids', 'in', [subtype.id])
], context=context)
for follower in follower_obj.browse(cr, SUPERUSER_ID, follower_ids, context=context):
new_followers.setdefault(follower.partner_id.id, set()).add(subtype.parent_id.id)
if parent_res_id and parent_model:
for subtype in default_subtypes:
follower_ids = follower_obj.search(cr, SUPERUSER_ID, [
('res_model', '=', parent_model),
('res_id', '=', parent_res_id),
('subtype_ids', 'in', [subtype.id])
], context=context)
for follower in follower_obj.browse(cr, SUPERUSER_ID, follower_ids, context=context):
new_followers.setdefault(follower.partner_id.id, set()).add(subtype.id)
# add followers coming from res.users relational fields that are tracked
user_ids = [getattr(record, name).id for name in user_field_lst if getattr(record, name)]
user_id_partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, SUPERUSER_ID, user_ids, context=context)]
for partner_id in user_id_partner_ids:
new_followers.setdefault(partner_id, None)
for pid, subtypes in new_followers.items():
subtypes = list(subtypes) if subtypes is not None else None
self.message_subscribe(cr, uid, [record.id], [pid], subtypes, context=context)
# find first email message, set it as unread for auto_subscribe fields for them to have a notification
if user_id_partner_ids:
msg_ids = self.pool.get('mail.message').search(cr, uid, [
('model', '=', self._name),
('res_id', '=', record.id),
('type', '=', 'email')], limit=1, context=context)
if not msg_ids and record.message_ids:
msg_ids = [record.message_ids[-1].id]
if msg_ids:
self.pool.get('mail.notification')._notify(cr, uid, msg_ids[0], partners_to_notify=user_id_partner_ids, context=context)
return True
#------------------------------------------------------
# Thread state
#------------------------------------------------------
def message_mark_as_unread(self, cr, uid, ids, context=None):
""" Set as unread. """
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
cr.execute('''
UPDATE mail_notification SET
read=false
WHERE
message_id IN (SELECT id from mail_message where res_id=any(%s) and model=%s limit 1) and
partner_id = %s
''', (ids, self._name, partner_id))
return True
def message_mark_as_read(self, cr, uid, ids, context=None):
""" Set as read. """
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
cr.execute('''
UPDATE mail_notification SET
read=true
WHERE
message_id IN (SELECT id FROM mail_message WHERE res_id=ANY(%s) AND model=%s) AND
partner_id = %s
''', (ids, self._name, partner_id))
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# CoMFoRT: a COntent Management system FOr Researchers and Teachers!
#
# Copyright (C) 2008 Projet2-L3IF ENS Lyon.
#
# Contributors:
# * Jean-Alexandre Angles d'Auriac
# * Gabriel Beaulieu
# * Valentin Blot
# * Pierre Boutillier
# * Nicolas Brunie
# * Aloïs Brunel
# * Vincent Delaitre
# * Antoine Frénoy
# * Mathias Gaunard
# * Guilhem Jaber
# * Timo Jolivet
# * Jonas Lefèvre
# * Bastien Le Gloannec
# * Anne-Laure Mouly
# * Kevin Perrot
# * Jonathan Protzenko
# * Gabriel Renault
# * Philippe Robert
# * Pierre Roux
# * Abdallah Saffidine
# * David Salinas
# * Félix Sipma
# * Alexandra Sourisseau
# * Samuel Vaiter
# * Guillaume Vors
#
# Contact us with : [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>."
#
## TODO:
##
#
# TODO:
# MISE EN FORME
# Ajout de photo
from module_interfaces import *
from conf import confstatic
import conf_general, conf_private
from interface import class_forms
from db import db_manager
import re
class TheModule(ComfortModule, IModuleContentProvider, IModuleDB, IModuleAdminPage):
mode = "both"
def module_init(self):
self.table_prefix = "coord"
pass
def module_admin_info(self):
return "Permet d'ajouter vos coordonnées."
def module_title(self):
return ""
def module_name(self):
return _("Coordonnées")
def module_id(self):
return "Coord"
def init_content(self):
pass
def handle_admin_request(self, params, fields):
try:
form = params["form"]
except:
form=""
## Adding a coord set in the database
# Ajout d'un jeu de coordonnées dans la BDD
if form == "add_item":
if fields.has_key("item_title"):
record = {'f_name' : fields['item_f_name'].value, \
'l_name' : fields['item_l_name'].value, \
'tel1' : fields['item_tel1'].value, \
'tel2' : fields['item_tel2'].value, \
'tel1_text' : fields['item_tel1_text'].value, \
'tel2_text' : fields['item_tel2_text'].value, \
'addr_perso' : fields['item_addr_perso'].value, \
'addr_work' : fields['item_addr_work'].value, \
'mail' : fields['item_mail'].value, \
'fax' : fields['item_fax'].value, \
'labo' : fields['item_labo'].value, \
'title' : fields['item_title'].value, \
'labo_url' : fields['item_labo_url'].value}
table_db = self.db.table(self.table_prefix+"_items")
table_db.insert(record)
return "admin.py?id=Coord"
else:
return "handler.py"
## Deletion of some coords
# effacement d'un jeu de coordonnées
elif form == "del_item":
if fields.has_key("item_id"):
table = self.db.table(self.table_prefix+"_items")
table.delete([("id", "=", fields["item_id"].value)])
return "admin.py?id=Coord"
else:
return "handler.py"
## Coords edition
# edition d'un jeu de coordonnée
elif form == "edit_item":
if fields.has_key("item_id"):
record = {'f_name' : fields['item_f_name'].value, \
'l_name' : fields['item_l_name'].value, \
'tel1' : fields['item_tel1'].value, \
'tel2' : fields['item_tel2'].value, \
'tel1_text' : fields['item_tel1_text'].value, \
'tel2_text' : fields['item_tel2_text'].value, \
'addr_perso' : fields['item_addr_perso'].value, \
'addr_work' : fields['item_addr_work'].value, \
'mail' : fields['item_mail'].value, \
'fax' : fields['item_fax'].value, \
'labo' : fields['item_labo'].value, \
'title' : fields['item_title'].value, \
'labo_url' : fields['item_labo_url'].value}
table_db = self.db.table(self.table_prefix+"_items")
table_db.update(record, [("id", "=", fields["item_id"].value)])
return "admin.py?id=Coord"
else:
return "handler.py"
return "admin.py"
## Form to administrate the coords
# Le formulaire d'administration des coordonnées
def generate_admin_xhtml(self, form_page):
main = 0
## Options list
# Liste des options
try:
if form_page.params["form"] == "add_item":
main = 1
elif form_page.params["form"] == "edit_item":
main = 1
except:
pass
title=_("Module coordonnées")
form_page.page = class_forms.Page(title,conf_general.style)
title_content = form_page.page.add_complex_option_tag("div",[("id","title")])
title_content.add_text(title)
form_page.gen_nav()
## The whole module menu:
# Tout le menu du module:
body_content = form_page.page.add_complex_option_tag("div",[("class","body_class")])
body = body_content.add_complex_tag("fieldset")
body.add_simple_tag("legend",_("Opérations sur les jeux de coordonnées"))
table_f = body.add_table(0,0,5)
## News manager button
# Bouton de gestion des news
form_main = (class_forms.Content("", "")).add_form( "admin.py?id=Coord","post")
form_main.add_input("submit", "submit", _("Gestion générale"))
## Adding news button
# Bouton d'ajout d'une news
form_add = (class_forms.Content("", "")).add_form( "admin.py?id=Coord&form=add_item","post")
form_add.add_input("submit", "submit", _("Ajouter un jeu de coordonnées"))
table_f.add_line([("td", [], form_main), ("td", [], form_add)])
p = form_page.page.add_complex_tag("p")
p.add_br()
# ___________________________________________________________________ #
# #
## Main form ##
# Formulaire principal #
# ___________________________________________________________________ #
if main == 0:
body_content = form_page.page.add_complex_option_tag("div",[("class","body_class")])
body = body_content.add_complex_tag("fieldset")
body.add_simple_tag("legend",_("Coordonnées enregistrées"))
## Retrieve all the coord
# On recupere tous les jeux de coordonnées
table_db = self.db.table(self.table_prefix + "_items")
coord_db = table_db.select(cond = [], order = [("id",True)])
if len(coord_db) == 0:
body.add_simple_tag("h3", _("Il n'y a aucun jeu de coordonnées"))
else:
if len(coord_db) == 1:
body.add_simple_tag("h3", _("Il y a ")+"1"+ _(" jeu de coordonnées"))
else:
body.add_simple_tag("h3", _("Il y a ")+str(len(coord_db))+ _(" jeux de coordonnées"))
table_f = body.add_table(2,0,4)
table_f.add_line([("th",[("align","center")], class_forms.Text(_("Titre"))), \
("th",[("align","center")], class_forms.Text(_("Tel1"))), \
("th",[("align","center")], class_forms.Text(_("Tel2"))),\
("th",[("align","center")], class_forms.Text(_("mail"))),\
("th",[("align","center")], class_forms.Text(_("fax"))),\
("th",[("align","center")], class_forms.Text(_("labo")))\
])
for coord in coord_db:
## The nice buttons on the right
# Les p'tits boutons mignons de la droite
commands = (class_forms.Content("", "")).add_table(0, 0, 2)
form_del = (class_forms.Content("", "")).add_form( "adminvalid.py?id=Coord&form=del_item","post")
form_del.add_input("hidden", "item_id", str(coord["id"]))
form_del.add_input("submit", "submit", _("Supprimer"))
form_edit = (class_forms.Content("", "")).add_form( "admin.py?id=Coord&form=edit_item&item_id=" +str(coord["id"]),"post")
form_edit.add_input("submit", "submit", _("Éditer"))
commands.add_line([("td", [], form_del), ("td", [], form_edit)])
table_f.add_line([("td",[("align","left")], class_forms.Text(coord['title'])), \
("td",[("align","left")], class_forms.Text(coord['tel1'])),\
("td",[("align","left")], class_forms.Text(coord['tel2'])),\
("td",[("align","left")], class_forms.Text(coord['mail'])),\
("td",[("align","left")], class_forms.Text(coord['fax'])),\
("td",[("align","left")], class_forms.Text(coord['labo'])),\
("td",[("align","left")], commands) ])
body2 = body.add_complex_tag("p");
body2.add_br()
# ___________________________________________________________________ #
# #
## Adding coord form ##
# Formulaire d'ajout d'un jeu de coordonnées #
# ___________________________________________________________________ #
elif form_page.params['form'] == "add_item":
body_content = form_page.page.add_complex_option_tag("div",[("class","body_class")])
body = body_content.add_complex_tag("fieldset")
body.add_simple_tag("legend",_("Ajouter un jeu de coordonnées"))
## Adding form
# Le formulaire d'ajout
form = body.add_form("adminvalid.py?id=Coord&form=add_item", "post")
p = form.add_complex_tag("p")
addr_work_t = class_forms.Content("","")
addr_work_t = addr_work_t.add_textarea("item_addr_work",10,50)
addr_perso_t = class_forms.Content("","")
addr_perso_t = addr_perso_t.add_textarea("item_addr_perso",10,50)
p.add_input("hidden","item_tel1_text", "")
p.add_input("hidden","item_tel2_text", "")
table_f = p.add_table(0,0,3)
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Titre du jeu de coordonnées")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_title", "")) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Nom")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_f_name", "")) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Prenom")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_l_name", "")) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Telephone 1")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_tel1", "")) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Telephone 2")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_tel2","")) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Fax")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_fax", "")) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Adresse perso")+" : ")), \
("td",[("align","left")], addr_perso_t) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Adresse travail")+" : ")), \
("td",[("align","left")], addr_work_t) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Mail")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_mail", "")) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Laboratoire")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_labo", "")) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("URL du laboratoire")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_labo_url", "")) ])
p.add_br()
p.add_input("submit", "submit", _("Ajouter le jeu de coordonnées"))
p.add_br()
# ___________________________________________________________________ #
# #
## Coord edition form ##
# Formulaire d'édition d'un jeu de coordonnées #
# ___________________________________________________________________ #
elif form_page.params['form'] == "edit_item":
body_content = form_page.page.add_complex_option_tag("div",[("class","body_class")])
body = body_content.add_complex_tag("fieldset")
body.add_simple_tag("legend",_("Édition d'un jeu de coordonnées"))
table_db = self.db.table(self.table_prefix+"_items")
coord_db = table_db.select(cond=[("id", "=", form_page.params["item_id"])], order=[("id",True)]).next()
form = body.add_form("adminvalid.py?id=Coord&form=edit_item", "post")
p = form.add_complex_tag("p")
p.add_input("hidden","item_id",form_page.params["item_id"])
p.add_input("hidden","item_tel1_text", coord_db['tel1_text'])
p.add_input("hidden","item_tel2_text", coord_db['tel2_text'])
addr_work_t = class_forms.Content("","")
addr_work_t = addr_work_t.add_textarea("item_addr_work",10,50)
addr_work_t.add_text(coord_db['addr_work'])
addr_perso_t = class_forms.Content("","")
addr_perso_t = addr_perso_t.add_textarea("item_addr_perso",10,50)
addr_perso_t.add_text(coord_db['addr_perso'])
table_f = p.add_table(0,0,3)
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Titre du jeu de coordonnées")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_title", coord_db["title"])) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Nom")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_f_name", coord_db["f_name"])) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Prenom")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_l_name", coord_db["l_name"])) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Telephone 1")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_tel1", coord_db["tel1"])) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Telephone 2")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_tel2", coord_db["tel2"])) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Fax")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_fax", coord_db["fax"])) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Adresse perso")+" : ")), \
("td",[("align","left")], addr_perso_t) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Adresse travail")+" : ")), \
("td",[("align","left")], addr_work_t) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Mail")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_mail", coord_db["mail"])) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("Laboratoire")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_labo", coord_db["labo"])) ])
table_f.add_line([("td",[("align","left")], class_forms.Text(_("URL du laboratoire")+" : ")), \
("td",[("align","left")], class_forms.Input("text", "item_labo_url", coord_db["labo_url"])) ])
p.add_br()
p.add_input("submit", "submit", _("Éditer le jeu de coordonnées"))
## __ End of the page __
# ___ Bas de la page ___
form_page.gen_nav()
# __________________________________________________________________________#
def generate_content_xml(self, args):
ch = ""
## All the fields are retrieved
# On recupere tous les champs
table_db = self.db.table(self.table_prefix+"_items")
## If a specific coord set is on request
# Si un jeu spécifique de coordonnées est demandé
if args.has_key('coord_id'):
coord_db = table_db.select(cond=[("id", "=", args['coord_id']) ],order=[("id",True)])
else:
coord_db = table_db.select(cond=[], order=[("id", True)])
try:
coord = coord_db.next()
ch += " <para>\n"
ch += " <emphasis role='strong'>"+coord['title']+"</emphasis><sbr/>\n"
ch += " "+coord['f_name']+" "+coord['l_name']+"<sbr/>\n"
## Tel
# Téléphone
if coord['tel1'] != "" and coord['tel1_text'] != "":
ch += " <emphasis role='strong'>"+coord['tel1_text']+"</emphasis> "+coord['tel1']+"<sbr/>\n"
elif coord['tel1'] != "":
ch += " <emphasis role='strong'>Tel:</emphasis> "+coord['tel1']+"<sbr/>\n"
if coord['tel2'] != "" and coord['tel2_text'] != "":
ch += " <emphasis role='strong'>"+coord['tel2_text']+"</emphasis> "+coord['tel2']+"\n"
elif coord['tel2'] != "":
ch += " <emphasis role='strong'>Tel:</emphasis> "+coord['tel2']+"<sbr/>\n"
ch += " <para><ulink url='"+coord['labo_url']+"'><emphasis role='strong'>"+coord['labo']+"</emphasis></ulink></para>\n"
if coord['addr_work'] != "":
ch += " <para><emphasis role='strong'>- Travail -</emphasis><sbr/>"+coord['addr_work'].replace('\n', '<sbr/>\n')+"</para>\n"
if coord['addr_perso'] != "":
ch += " <para><emphasis role='strong'>- Personnel -</emphasis><sbr/>"+coord['addr_perso'].replace('\n', '<sbr/>\n')+"</para>\n"
ch += " <para></para><para><emphasis role='strong'>"+coord['mail']+"</emphasis></para>\n"
ch += " </para>\n"
except:
ch = ""
return ch
def setup_db(self, db):
self.db = db
## Coord table ceation
# Création de la table des coordonnées
schema = {'f_name' : 'text', \
'l_name' : 'text', \
'tel1' : 'text', \
'tel2' : 'text', \
'tel1_text' : 'text', \
'tel2_text' : 'text', \
'addr_perso' : 'text', \
'addr_work' : 'text', \
'mail' : 'text', \
'title' : 'text', \
'fax' : 'text', \
'labo' : 'text', \
'labo_url' : 'text'}
try:
self.db.table(self.table_prefix+"_items")
except:
self.db.create(self.table_prefix+"_items", schema);
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 Miha Purg <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
#
"""
This module contains the QStruct class for handling Q structure files (PDB, mol2).
Additionally, it implements methods for finding and replacing atom
placeholders (e.g. $1.N$)
"""
from __future__ import absolute_import, unicode_literals, division
from six.moves import map
import re
import logging
from collections import namedtuple
from Qpyl.common import raise_or_log
logger = logging.getLogger(__name__)
PosVector = namedtuple("PosVector", ["x", "y", "z"])
_PLACEHOLDER_RE = re.compile("\$\S+\.\S+\$")
_COMMENTS_RE = re.compile(r"[#\!].*")
def find_placeholders(inputstring):
"""Find atom placeholders of the form $514.C3$
It ignores comments (characters following # or !)
See also QStruct.convert_placeholders
"""
tmp = re.sub(_COMMENTS_RE, "", inputstring)
return _PLACEHOLDER_RE.findall(tmp)
class QStructError(Exception):
pass
class QStruct(object):
"""
Class for processing the structure (coordinates)
Args:
filename (path to the structure file)
filetype (type of structure file: 'pdb' or 'mol2')
ignore_errors (boolean): Optional, default is False.\
If set to True, some non-vital\
exceptions are logged instead.
In contrast to QLib and QPrm, the 'read' methods in this
class should not be called since the object should
contain data from only one structure file.
The structure data is stored in three lists:
atoms, residues, molecules
which contain _StructAtom, _StructResidue and _StructMolecule
objects.
"""
def __init__(self, filename, filetype, ignore_errors=False):
self.ignore_errors = ignore_errors
FILE_TYPES = {'pdb': self._read_pdb,
'mol2': self._read_mol2}
self.filetype = filetype.lower()
if self.filetype not in FILE_TYPES:
raise QStructError("Filetype {} not supported. Use {}"
.format(filetype,
" or ".join(FILE_TYPES)))
self.atoms = []
self.residues = []
self.molecules = []
self.filename = filename
# TODO: some sort of lookup hashes if needed
# run the parser function
FILE_TYPES[self.filetype](filename)
# check if we actually got something
for t in ["atoms", "residues", "molecules"]:
if len(self.__dict__[t]) == 0:
raise QStructError("No {} found, check file '{}' and"
" filetype '{}'".format(t,
self.filename,
self.filetype))
def _read_mol2(self, mol2_file):
"""
Read and parse a mol2 file for coordinates.
Args:
mol2_file (string): name/path of file
"""
molecule = None
residue = None
aindex, old_aindex = None, None
section = None
for line in open(mol2_file, 'r').readlines():
if line.startswith("@<TRIPOS>"):
section = line.replace("@<TRIPOS>", "").strip()
if section == "MOLECULE":
if molecule != None:
self.molecules.append(molecule)
molecule = _StructMolecule(self)
continue
if section == "ATOM":
if aindex != None:
old_aindex = aindex
lf = line.split()
aindex, aname = int(lf[0]), lf[1]
x, y, z = map(float, lf[2:5])
rindex = int(lf[6])
rname = lf[7][0:3].upper()
if old_aindex != None and aindex - old_aindex != 1:
raise_or_log("Bad Mol2 format - atom "
"index {} followed by {}"
.format(old_aindex, aindex),
QStructError, logger, self.ignore_errors)
if not residue or residue.index_struct != rindex:
if residue and rindex - residue.index_struct != 1:
raise_or_log("Bad Mol2 format - residue "
"index {} followed by {}"
.format(residue.index_struct, rindex),
QStructError, logger, self.ignore_errors)
residue = _StructResidue(rindex, rname, molecule, self)
self.residues.append(residue)
molecule.add_residue(residue)
atom = _StructAtom(aindex, aname, x, y, z, residue, self)
self.atoms.append(atom)
residue.add_atom(atom)
# append last one after parsing
if molecule != None and len(molecule.residues) > 0:
self.molecules.append(molecule)
def _read_pdb(self, pdb_file):
"""
Read and parse a PDB file for coordinates.
Args:
pdb_file (string): name/path of file
"""
# make a new _StructMolecule object
molecule = _StructMolecule(self)
# parse the PDB file
residue = None
aindex, old_aindex = None, None
for line in open(pdb_file, 'r').readlines():
if line.startswith("ATOM") or line.startswith("HETATM"):
if aindex != None:
old_aindex = aindex
aindex = int(line[6:12])
if old_aindex != None and aindex - old_aindex != 1:
raise_or_log("Bad PDB format - atom "
"index {} followed by {}"
.format(old_aindex, aindex),
QStructError, logger, self.ignore_errors)
aname = line[12:17].strip()
rname = line[17:20].strip().upper()
rindex = int(line[22:26])
x, y, z = map(float, (line[30:38], line[38:46], line[46:54]))
if not residue or residue.index_struct != rindex:
if residue and rindex - residue.index_struct != 1:
raise_or_log("Bad PDB format - residue "
"index {} followed by {}"
.format(residue.index_struct, rindex),
QStructError, logger, self.ignore_errors)
residue = _StructResidue(rindex, rname, molecule, self)
self.residues.append(residue)
molecule.add_residue(residue)
if aname in [a.name for a in residue.atoms]:
raise_or_log("Bad PDB format - two atoms with same name "
"({}) in residue {}.{}"
"".format(aname, rname, rindex),
QStructError, logger, self.ignore_errors)
atom = _StructAtom(aindex, aname, x, y, z, residue, self)
self.atoms.append(atom)
residue.add_atom(atom)
elif line.startswith("TER") or line.startswith("GAP"):
self.molecules.append(molecule)
residue = None
molecule = _StructMolecule(self)
# append last one if it didn't gave a TER/GAP
if molecule != None and len(molecule.residues) > 0:
self.molecules.append(molecule)
def convert_placeholders(self, inputstring):
"""Convert atom placeholders ($514.C3$) to indexes.
Placeholders are a combination of the residue id and
atom name, encapsulated in $$ - $RESID.ATOM_NAME$
In addition,there are some special values:
$LAST.ID$ - id of last atom in the system
Arguments:
inputstring (string): string with placeholders (input file contents)
Returns:
outputstring (string): converted string
"""
id_map = {"{}.{}".format(a.residue.index, a.name): str(a.index)
for a in self.atoms}
last_id = "{}.{}".format(self.atoms[-1].residue.index,
self.atoms[-1].name)
outputstring = ""
for line in inputstring.split("\n"):
comment = ""
if "#" in line:
i = line.index("#")
line, comment = line[:i], line[i:]
c = find_placeholders(line)
for pid in c:
pid = pid.strip("$")
pid2 = pid.replace("LAST.ID", last_id)
try:
padding = (len(pid2)+2 - len(id_map[pid2])) * " "
except KeyError:
raise QStructError("Atom '${}$' does not exist in the pdb "
"structure.".format(pid2))
line = re.sub("\$" + pid + "\$", id_map[pid2] + padding, line)
outputstring += line + comment + "\n"
return outputstring
class _StructAtom(object):
"""Contains structural information for an atom.
Arguments:
index_struct (int): index as written in pdb or mol2
name (string): atom name
x,y,z (float): coordinates
residue (_StructResidue): parent residue object
structure (_QStruct): parent structure object
Property 'index' (int) is the actual 1-based index of the atom
in the atom list (as opposed to index_struct which was read from
the file). It should correspond to the index in the generated topology.
"""
def __init__(self, index_struct, name, x, y, z, residue, structure):
self.index_struct = int(index_struct)
self.name = name
self.coordinates = PosVector(float(x), float(y), float(z))
self.residue = residue
self.structure = structure
@property
def index(self):
return self.structure.atoms.index(self) + 1
def __repr__(self):
res = self.residue
mol = res.molecule
return "_StructAtom: {}.{}.{}".format(mol.index,
res.index,
self.index)
class _StructResidue(object):
"""Contains structural information for a residue.
Arguments:
index_struct (int): index as written in pdb or mol2
name (string): residue name
molecule (_StructMolecule): parent molecule object
structure (_QStruct): parent structure object
Property 'index' (int) is the actual 1-based index of the residue
in the residue list (as opposed to index_struct which was read from
the file). It should correspond to the index in the generated topology.
"""
def __init__(self, index_struct, name, molecule, structure):
self.atoms = []
self.index_struct = int(index_struct)
self.name = name
self.molecule = molecule
self.structure = structure
@property
def index(self):
return self.structure.residues.index(self) + 1
def add_atom(self, atom):
self.atoms.append(atom)
def __repr__(self):
mol = self.molecule
return "_StructResidue: {}.{}{}".format(mol.index,
self.name,
self.index)
class _StructMolecule(object):
"""Contains structural information for a molecule.
Arguments:
structure (_QStruct): parent structure object
Special property is 'index' (int). It is the actual
1-based index of the molecule in the residue list (as it was appended).
This should corresponds to the index in the generated topology.
"""
def __init__(self, structure):
self.residues = []
self.structure = structure
@property
def index(self):
return self.structure.molecules.index(self) + 1
def add_residue(self, residue):
self.residues.append(residue)
def __repr__(self):
return "_StructMolecule: {}".format(self.index)
|
# -*- coding: utf-8 -*-
"""
Diagnostic Plots for Pipeline
Author: Patrick O'Brien
Date last updated: February 2017
"""
# Import statements
from glob import glob
import os
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from PyPDF2 import PdfFileMerger
import pandas as pd
##### ------------------------------------------------------------------ #####
# Set up flags table
def setup_flags_table(fwhm_file_name):
arr = np.genfromtxt(fwhm_file_name, dtype=None,delimiter='\t')
data = [ [0 for col in range(num_flags)] for row in range(len(arr))]
flags = pd.DataFrame(data, columns = ['Star', 'Exposure','Date', 'Bias1', 'Bias2', 'BlueFlat1','BlueFlat2', 'RedFlat1', 'RedFlat2', 'BluePoly', 'BlueCut', 'RedCut', 'Littrow', 'ExtFWHM', 'ExtProf', 'FitToBack', 'ProfFWHM', 'ProfPos', 'PeakGauss', 'ResponseBlue', 'ResponseRed', 'WaveFitResBlue', 'WaveFitResRed'])
return flags
##### ------------------------------------------------------------------ #####
# function that takes file names and organizes by the unique star name in each
def unique_star_names(seq, idfun=None):
# order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
##### ------------------------------------------------------------------ #####
# Calibrations function
def diagnostic_plots_cals(file_name, flags):
date = str(file_name[10:20])
pp = PdfPages('cal_plots.pdf')
pdfs.append('cal_plots.pdf')
arr = np.genfromtxt(file_name, dtype=None, delimiter=' ')
bias_bs, bias_as, bias_std = [],[],[]
flat_blue_bs, flat_blue_std_bs, flat_blue_as, flat_blue_std_as = [],[],[], []
flat_red_bs, flat_red_std_bs, flat_red_as, flat_red_std_as = [],[],[], []
blue_pix, blue_val, blue_poly = [],[],[]
blue_cut_row100, red_cut_row100, junk_zeros = [],[],[]
littrow_pix, littrow_val, fit_pix_lit, fit_lit, masked_edges = [],[],[],[],[]
com_blue_pix, com_blue_flat, blue_poly_fit = [], [], []
com_red_pix, com_red_flat, red_poly_fit = [], [], []
for m in np.arange(len(arr)):
bias_bs.append(arr[m][0])
bias_as.append(arr[m][1])
bias_std.append(arr[m][2])
flat_blue_bs.append(arr[m][3])
flat_blue_std_bs.append(arr[m][4])
flat_blue_as.append(arr[m][5])
flat_blue_std_as.append(arr[m][6])
flat_red_bs.append(arr[m][7])
flat_red_std_bs.append(arr[m][8])
flat_red_as.append(arr[m][9])
flat_red_std_as.append(arr[m][10])
blue_pix.append(arr[m][11])
blue_val.append(arr[m][12])
blue_poly.append(arr[m][13])
blue_cut_row100.append(arr[m][14])
red_cut_row100.append(arr[m][15])
junk_zeros.append(arr[m][16])
littrow_pix.append(arr[m][17])
littrow_val.append(arr[m][18])
fit_pix_lit.append(arr[m][19])
fit_lit.append(arr[m][20])
masked_edges.append(arr[m][21])
com_blue_pix.append(arr[m][22])
com_blue_flat.append(arr[m][23])
blue_poly_fit.append(arr[m][24])
com_red_pix.append(arr[m][25])
com_red_flat.append(arr[m][26])
red_poly_fit.append(arr[m][27])
bias_bs = np.array(bias_bs)
bias_bs = np.trim_zeros(bias_bs, 'b')
bias_as = np.array(bias_as)
bias_as = np.trim_zeros(bias_as, 'b')
bias_std = np.array(bias_std)
bias_std = np.trim_zeros(bias_std, 'b')
flat_blue_bs = np.array(flat_blue_bs)
flat_blue_bs = np.trim_zeros(flat_blue_bs, 'b')
flat_blue_as = np.array(flat_blue_as)
flat_blue_as = np.trim_zeros(flat_blue_as, 'b')
flat_blue_std_bs = np.array(flat_blue_std_bs)
flat_blue_std_bs = np.trim_zeros(flat_blue_std_bs, 'b')
flat_blue_std_as = np.array(flat_blue_std_as)
flat_blue_std_as = np.trim_zeros(flat_blue_std_as, 'b')
flat_red_bs = np.array(flat_red_bs)
flat_red_bs = np.trim_zeros(flat_red_bs, 'b')
flat_red_as = np.array(flat_red_as)
flat_red_as = np.trim_zeros(flat_red_as, 'b')
flat_red_std_bs = np.array(flat_red_std_bs)
flat_red_std_bs = np.trim_zeros(flat_red_std_bs, 'b')
flat_red_std_as = np.array(flat_red_std_as)
flat_red_std_as = np.trim_zeros(flat_red_std_as, 'b')
blue_pix = np.array(blue_pix)
blue_pix = np.trim_zeros(blue_pix, 'b')
blue_val = np.array(blue_val)
blue_val = np.trim_zeros(blue_val, 'b')
blue_poly = np.array(blue_poly)
blue_poly = np.trim_zeros(blue_poly, 'b')
blue_cut_row100 = np.array(blue_cut_row100)
blue_cut_row100 = np.trim_zeros(blue_cut_row100, 'b')
red_cut_row100 = np.array(red_cut_row100)
red_cut_row100 = np.trim_zeros(red_cut_row100, 'b')
bias_bs = np.array(bias_bs)
bias_bs = np.trim_zeros(bias_bs, 'b')
littrow_pix = np.array(littrow_pix)
littrow_pix = np.trim_zeros(littrow_pix, 'b')
littrow_val = np.array(littrow_val)
littrow_val = np.trim_zeros(littrow_val, 'b')
fit_pix_lit = np.array(fit_pix_lit)
fit_pix_lit = np.trim_zeros(fit_pix_lit, 'b')
fit_lit = np.array(fit_lit)
fit_lit = np.trim_zeros(fit_lit, 'b')
com_blue_pix = np.array(com_blue_pix)
com_blue_pix = np.trim_zeros(com_blue_pix, 'b')
com_blue_flat = np.array(com_blue_flat)
com_blue_flat = np.trim_zeros(com_blue_flat, 'b')
blue_poly_fit = np.array(blue_poly_fit)
blue_poly_fit = np.trim_zeros(blue_poly_fit)
com_red_pix = np.array(com_red_pix)
com_red_pix = np.trim_zeros(com_red_pix, 'b')
com_red_flat = np.array(com_red_flat)
com_red_flat = np.trim_zeros(com_red_flat, 'b')
red_poly_fit = np.array(red_poly_fit)
red_poly_fit = np.trim_zeros(red_poly_fit)
edge1 = float(masked_edges[0])
edge2 = float(masked_edges[1])
plt.figure()
plt.errorbar(np.arange(len(bias_bs)), bias_bs, yerr=bias_std, marker="o", linestyle="None")
plt.xlabel('Number')
plt.ylabel('Value')
plt.title('Bias - Before Scaling')
plt.savefig(pp,format='pdf')
plt.close()
plt.figure()
plt.errorbar(np.arange(len(bias_as)), bias_as, yerr=bias_std, marker="o", linestyle="None")
plt.xlabel('Number')
plt.ylabel('Value')
plt.title('Bias - After Scaling')
plt.savefig(pp,format='pdf')
plt.close()
plt.figure()
plt.errorbar(np.arange(len(flat_blue_bs)), flat_blue_bs, yerr=flat_blue_std_bs, marker="o", linestyle="None")
plt.xlabel('Number')
plt.ylabel('Value')
plt.title('Blue Flat - Before Scaling')
plt.savefig(pp,format='pdf')
plt.close()
plt.figure()
plt.errorbar(np.arange(len(flat_blue_as)), flat_blue_as, yerr=flat_blue_std_as, marker="o", linestyle="None")
plt.xlabel('Number')
plt.ylabel('Value')
plt.title('Blue Flat - After Scaling')
plt.savefig(pp,format='pdf')
plt.close()
if len(flat_red_bs) > 0:
plt.figure()
plt.errorbar(np.arange(len(flat_red_bs)), flat_red_bs, yerr=flat_red_std_bs, ecolor='r', marker="o",markerfacecolor='r', linestyle="None")
plt.xlabel('Number')
plt.ylabel('Value')
plt.title('Red Flat - Before Scaling')
plt.savefig(pp,format='pdf')
plt.close()
plt.figure()
plt.errorbar(np.arange(len(flat_red_as)), flat_red_as, yerr=flat_red_std_as, ecolor='r', marker="o",markerfacecolor='r', linestyle="None")
plt.xlabel('Number')
plt.ylabel('Value')
plt.title('Red Flat - After Scaling')
plt.savefig(pp,format='pdf')
plt.close()
plt.figure()
plt.plot(blue_pix, blue_val,'o')
plt.plot(blue_pix, blue_poly,'g')
plt.xlabel('Pixel')
plt.ylabel('Value')
plt.title('Blue Polynomial Littrow Fit Check')
plt.savefig(pp,format='pdf')
plt.close()
plt.figure()
plt.plot(np.arange(len(blue_cut_row100)), blue_cut_row100, 'b')
plt.plot(np.arange(len(blue_cut_row100)), np.ones(len(blue_cut_row100)), 'k--')
plt.xlabel('Pixel')
plt.ylabel('Value')
plt.title('Cut Along Row 100 - Blue')
plt.savefig(pp, format='pdf')
plt.close()
plt.figure()
plt.plot(com_blue_pix, com_blue_flat,'o')
plt.plot(np.arange(len(blue_poly_fit)),blue_poly_fit,'g')
plt.xlabel('Pixel')
plt.ylabel('Value')
plt.title('Blue Polynomial Check')
plt.savefig(pp,format='pdf')
plt.close()
if len(red_cut_row100 > 0):
plt.figure()
plt.plot(np.arange(len(red_cut_row100)), red_cut_row100, 'r')
plt.plot(np.arange(len(red_cut_row100)), np.ones(len(red_cut_row100)), 'k--')
plt.xlabel('Pixel')
plt.ylabel('Value')
plt.title('Cut Along Row 100 - Red')
plt.savefig(pp, format='pdf')
plt.close()
plt.figure()
plt.plot(com_red_pix, com_red_flat,'ro')
plt.plot(np.arange(len(red_poly_fit)),red_poly_fit,'g')
plt.xlabel('Pixel')
plt.ylabel('Value')
plt.title('Red Polynomial Check')
plt.savefig(pp,format='pdf')
plt.close()
plt.figure()
plt.plot(littrow_pix, littrow_val, 'k-')
plt.plot(fit_pix_lit, fit_lit, 'r-')
plt.axvline(x=masked_edges[0])
plt.axvline(x=masked_edges[1])
plt.xlabel('Pixel')
plt.ylabel('Normalized Flux')
plt.title('Location of Littrow Ghost')
plt.savefig(pp, format='pdf')
plt.close()
pp.close()
###### FLAGS #####
bias1_flag = 0
bias2_flag = 0
blueflat1_flag = 0
blueflat2_flag = 0
redflat1_flag = 0
redflat2_flag = 0
blue_poly_flag = 0
blue_cut_flag = 0
red_cut_flag = 0
littrow_flag = 0
unscaled_bias_std = np.std(bias_bs)
scaled_bias_std = np.std(bias_as)
unscaled_blue_flat_std = np.std(flat_blue_bs)
scaled_blue_flat_std = np.std(flat_blue_as)
if all( (np.mean(bias_bs) - 2*unscaled_bias_std) <= x <= (np.mean(bias_bs) + 2*unscaled_bias_std) for x in bias_bs):
bias1_flag = 0
else:
bias1_flag = 1
if all( (np.mean(bias_as) - 2*scaled_bias_std) <= x <= (np.mean(bias_as) + 2*scaled_bias_std) for x in bias_as):
bias2_flag = 0
else:
bias2_flag = 1
if all( (np.mean(flat_blue_bs) - 2*unscaled_blue_flat_std) <= x <= (np.mean(flat_blue_bs) + 2*unscaled_blue_flat_std) for x in flat_blue_bs):
blueflat1_flag = 0
else:
blueflat1_flag = 1
if all( (np.mean(flat_blue_as) - 2*scaled_blue_flat_std) <= x <= (np.mean(flat_blue_as) + 2*scaled_blue_flat_std) for x in flat_blue_as):
blueflat2_flag = 0
else:
blueflat2_flag = 1
if all( abs((blue_val[x] - blue_poly[x])) < 250 for x in range(len(blue_pix))):
blue_poly_flag = 0
else:
blue_poly_flag = 1
if all( abs((blue_cut_row100[x] - 1.0)) < 0.1 for x in range(len(blue_cut_row100))):
blue_cut_flag = 0
else:
blue_cut_flag = 1
if abs(np.average([edge1, edge2]) - 1400) < 10:
littrow_flag = 0
else:
littrow_flag = 1
flags['Date'] = date
flags['Bias1'] = bias1_flag
flags['Bias2'] = bias2_flag
flags['BlueFlat1'] = blueflat1_flag
flags['BlueFlat2'] = blueflat2_flag
flags['BluePoly'] = blue_poly_flag
flags['BlueCut'] = blue_cut_flag
flags['Littrow'] = littrow_flag
if len(flat_red_bs) > 0:
unscaled_red_flat_std = np.std(flat_red_bs)
scaled_red_flat_std = np.std(flat_red_as)
if all( (np.mean(flat_red_bs) - 2*unscaled_red_flat_std) <= x <= (np.mean(flat_red_bs) + 2*unscaled_red_flat_std) for x in flat_red_bs):
redflat1_flag = 0
else:
redflat1_flag = 1
if all( (np.mean(flat_red_as) - 2*scaled_red_flat_std) <= x <= (np.mean(flat_red_as) + 2*scaled_red_flat_std) for x in flat_red_as):
redflat2_flag = 0
else:
redflat2_flag = 1
if all( abs((red_cut_row100[x] - 1.0)) < 0.1 for x in range(len(red_cut_row100))):
red_cut_flag = 0
else:
red_cut_flag = 1
flags['RedFlat1'] = redflat1_flag
flags['RedFlat2'] = redflat2_flag
flags['RedCut'] = red_cut_flag
##### ------------------------------------------------------------------ #####
# FWHM / Profile Position function
def diagnostic_plots_FWHM(file_name, flags):
date = str(file_name[13:23])
pp = PdfPages('fwhm_plots.pdf')
pdfs.append('fwhm_plots.pdf')
def unique_star_names(seq, idfun=None):
# order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
arr = np.genfromtxt(file_name, dtype=None,delimiter='\t')
names, col1, fwhm1, pos1, peak1, col2, fwhm2, pos2, peak2 = [],[],[],[],[],[],[],[],[]
for m in np.arange(len(arr)):
names.append(str(arr[m][0][10:-5]))
col1.append(arr[m][1])
fwhm1.append(arr[m][2])
pos1.append(arr[m][3])
peak1.append(arr[m][4])
col2.append(arr[m][5])
fwhm2.append(arr[m][6])
pos2.append(arr[m][7])
peak2.append(arr[m][8])
fwhm2 = np.array(fwhm2)
col2 = np.array(col2)
pos2 = np.array(pos2)
peak2 = np.array(peak2)
no_duplicates = sorted(list(set(names)))
cat_pts = []
fwhm_pts = []
pos_pts = []
peak_pts = []
for i in range(len(names)):
for j in range(len(no_duplicates)):
if no_duplicates[j] in names[i]:
current_fwhm_array = []
cat_pts.append(j)
fwhm_pts.append(fwhm2[i])
pos_pts.append(pos2[i])
peak_pts.append(peak2[i])
##### FLAGS #####
flags['Star'] = names
for i in range(len(no_duplicates)):
star_name = no_duplicates[i]
num_exposures = names.count(star_name)
current_fwhm_array = []
current_pos_array = []
current_peak_array = []
star_indexes = flags[flags['Star'] == star_name].index.tolist()
for j in range(len(fwhm_pts)):
if cat_pts[j] == i:
current_fwhm_array.append(fwhm_pts[j])
current_pos_array.append(pos_pts[j])
current_peak_array.append(peak_pts[j])
for k in range(num_exposures):
flags.set_value(star_indexes[k], 'Exposure', k)
if abs(current_fwhm_array[k] - np.median(current_fwhm_array)) > fwhm_tol:#*np.std(current_fwhm_array):
flags.set_value(star_indexes[k], 'ProfFWHM', 1)
if abs(current_pos_array[k] - np.median(current_pos_array)) > pos_tol:#*np.std(current_pos_array):
flags.set_value(star_indexes[k], 'ProfPos', 1)
if abs(current_peak_array[k] - np.median(current_peak_array)) > peak_tol:#*np.std(current_peak_array):
flags.set_value(star_indexes[k], 'PeakGauss', 1)
##### ----- #####
x = np.arange(len(no_duplicates))
jitter = 0.1*np.random.rand(len(cat_pts))
cat_pts = cat_pts + jitter
plt.figure()
plt.xticks(x, no_duplicates, rotation=90)
plt.scatter(cat_pts,fwhm_pts)
plt.xlabel('Star')
plt.ylabel('Value')
plt.title('FWHM')
plt.tight_layout()
plt.savefig(pp,format='pdf')
plt.close()
plt.figure()
plt.xticks(x, no_duplicates, rotation=90)
plt.scatter(cat_pts, pos_pts)
plt.xlabel('Star')
plt.ylabel('Value')
plt.title('Profile Position')
plt.tight_layout()
plt.savefig(pp,format='pdf')
plt.close()
plt.figure()
plt.xticks(x, no_duplicates, rotation=90)
plt.scatter(cat_pts, peak_pts)
plt.xlabel('Star')
plt.ylabel('Value')
plt.title('Peak Value of Gaussian')
plt.tight_layout()
plt.savefig(pp,format='pdf')
plt.close()
pp.close()
##### ------------------------------------------------------------------ #####
# Wavelength calibrations function
def diagnostic_plots_wavecal(files, flags):
star_name = str(files[0][8:-21])
pdf_name = 'wavecal_plots_' + star_name + '.pdf'
pp = PdfPages(pdf_name)
pdfs.append(pdf_name)
blue_arr = []
red_arr = []
if len(files) == 1:
with open(files[0], 'r') as f:
first_line = f.readline()
if 'blue' in first_line:
blue_arr = np.genfromtxt(files[0],dtype=None,delimiter=' ')
elif 'red' in first_line:
red_arr = np.genfromtxt(files[0],dtype=None,delimiter=' ')
elif len(files) > 1:
with open(files[0], 'r') as f:
first_line = f.readline()
if 'blue' in first_line:
blue_arr = np.genfromtxt(files[0],dtype=None,delimiter=' ')
red_arr = np.genfromtxt(files[1],dtype=None,delimiter=' ')
elif 'red' in first_line:
blue_arr = np.genfromtxt(files[1],dtype=None,delimiter=' ')
red_arr = np.genfromtxt(files[0],dtype=None,delimiter=' ')
if len(blue_arr) > 0:
wave_fit, res, wave1, flux1, lam_fit, wave2, flux2, line_fit = [],[],[],[],[],[],[],[]
for m in np.arange(len(blue_arr)):
wave_fit.append(blue_arr[m][0])
res.append(blue_arr[m][1])
wave1.append(blue_arr[m][2])
flux1.append(blue_arr[m][3])
lam_fit.append(blue_arr[m][4])
wave2.append(blue_arr[m][5])
flux2.append(blue_arr[m][6])
line_fit.append(blue_arr[m][7])
wave_fit = np.array(wave_fit)
wave_fit = np.trim_zeros(wave_fit, 'b')
res = np.array(res)
res = np.trim_zeros(res, 'b')
wave1 = np.array(wave1)
wave1 = np.trim_zeros(wave1, 'b')
lam_fit = np.array(lam_fit)
lam_fit = np.trim_zeros(lam_fit, 'b')
flux1 = np.array(flux1)
flux1 = np.trim_zeros(flux1, 'b')
wave2 = np.array(wave2)
wave2 = np.trim_zeros(wave2, 'b')
flux2 = np.array(flux2)
flux2 = np.trim_zeros(flux2, 'b')
line_fit = np.array(line_fit)
line_fit = np.trim_zeros(line_fit, 'b')
xmin = np.min(wave_fit)-500
xmax = np.max(wave_fit)+500
x = np.linspace(xmin,xmax,1000)
zeros = np.zeros(len(x))
plt.figure()
plt.scatter(wave_fit,res)
plt.plot(x,zeros,'b--')
plt.xlim(xmin,xmax)
plt.xlabel('Wavelength')
plt.ylabel('Residuals (pixels)')
plt.title('Wavelength Fit Residuals - Blue - ' + star_name)
plt.savefig(pp,format='pdf')
plt.close()
if len(wave2) != 0:
x_line = np.linspace(np.min(wave2),np.max(wave2),len(line_fit))
plt.figure()
plt.plot(wave2,flux2)
plt.plot(x_line,line_fit)
plt.xlabel('Pixels')
plt.ylabel('Flux')
plt.title('Zero Point Offset - Blue - ' + star_name)
plt.savefig(pp,format='pdf')
plt.close()
##### FLAGS #####
star_indexes = flags[flags['Star'] == star_name].index.tolist()
blue_wave_fit_flag = 0
if all(-wave_fit_tol <= x <= wave_fit_tol for x in res):
blue_wave_fit_flag = 0
else:
blue_wave_fit_flag = 1
for k in range(len(star_indexes)):
flags.set_value(star_indexes[k], 'WaveFitResBlue', blue_wave_fit_flag)
### ---------------------------------------------------------------------- ###
if len(red_arr) > 0:
wave_fit, res, wave1, flux1, lam_fit, wave2, flux2, line_fit = [],[],[],[],[],[],[],[]
for m in np.arange(len(red_arr)):
wave_fit.append(red_arr[m][0])
res.append(red_arr[m][1])
wave1.append(red_arr[m][2])
flux1.append(red_arr[m][3])
lam_fit.append(red_arr[m][4])
wave2.append(red_arr[m][5])
flux2.append(red_arr[m][6])
line_fit.append(red_arr[m][7])
wave_fit = np.array(wave_fit)
wave_fit = np.trim_zeros(wave_fit, 'b')
res = np.array(res)
res = np.trim_zeros(res, 'b')
wave1 = np.array(wave1)
wave1 = np.trim_zeros(wave1, 'b')
lam_fit = np.array(lam_fit)
lam_fit = np.trim_zeros(lam_fit, 'b')
flux1 = np.array(flux1)
flux1 = np.trim_zeros(flux1, 'b')
wave2 = np.array(wave2)
wave2 = np.trim_zeros(wave2, 'b')
flux2 = np.array(flux2)
flux2 = np.trim_zeros(flux2, 'b')
line_fit = np.array(line_fit)
line_fit = np.trim_zeros(line_fit, 'b')
xmin = np.min(wave_fit)-500
xmax = np.max(wave_fit)+500
x = np.linspace(xmin,xmax,1000)
zeros = np.zeros(len(x))
plt.figure()
plt.scatter(wave_fit,res,color='red')
plt.plot(x,zeros,'r--')
plt.xlim(xmin,xmax)
plt.xlabel('Wavelength')
plt.ylabel('Residuals (pixels)')
plt.title('Wavelength Fit Residuals - Red - ' + star_name)
plt.savefig(pp,format='pdf')
plt.close()
if len(wave2) != 0:
x_line = np.linspace(np.min(wave2),np.max(wave2),len(line_fit))
plt.figure()
plt.plot(wave2,flux2,'r')
plt.plot(x_line,line_fit,'g')
plt.xlabel('Pixels')
plt.ylabel('Flux')
plt.title('Zero Point Offset - Red - ' + star_name)
plt.savefig(pp,format='pdf')
plt.close()
##### FLAGS #####
star_indexes = flags[flags['Star'] == star_name].index.tolist()
red_wave_fit_flag = 0
if all(-wave_fit_tol <= x <= wave_fit_tol for x in res):
red_wave_fit_flag = 0
else:
red_wave_fit_flag = 1
for k in range(len(star_indexes)):
flags.set_value(star_indexes[k], 'WaveFitResRed', red_wave_fit_flag)
pp.close()
##### ------------------------------------------------------------------ #####
# Continuum function
def diagnostic_plots_continuum(file_name, flags):
star_name = str(file_name[24:-21])
pdf_name = 'modelcal_plots_' + star_name + '.pdf'
pp = PdfPages(pdf_name)
pdfs.append(pdf_name)
arr = np.genfromtxt(file_name, dtype=None, delimiter=' ')
blue_lam, blue_res, blue_masked_lam, blue_masked_res, blue_res_fit, norm_spec_blue = [],[],[],[],[],[]
for m in np.arange(len(arr)):
blue_lam.append(arr[m][0])
blue_res.append(arr[m][1])
blue_masked_lam.append(arr[m][2])
blue_masked_res.append(arr[m][3])
blue_res_fit.append(arr[m][4])
norm_spec_blue.append(arr[m][5])
blue_lam = np.array(blue_lam)
blue_lam = np.trim_zeros(blue_lam, 'b')
blue_res = np.array(blue_res)
blue_res = np.trim_zeros(blue_res, 'b')
blue_masked_lam = np.array(blue_masked_lam)
blue_masked_lam = np.trim_zeros(blue_masked_lam, 'b')
blue_masked_res = np.array(blue_masked_res)
blue_masked_res = np.trim_zeros(blue_masked_res, 'b')
blue_res_fit = np.array(blue_res_fit)
blue_res_fit = np.trim_zeros(blue_res_fit, 'b')
norm_spec_blue = np.array(norm_spec_blue)
norm_spec_blue = np.trim_zeros(norm_spec_blue)
plt.figure()
plt.plot(blue_lam, blue_res)
plt.plot(blue_masked_lam, blue_masked_res,'g.')
plt.plot(blue_lam, blue_res_fit,'r')
plt.xlabel('Wavelength')
plt.ylabel('Response (observed/model)')
plt.title('Response - Blue - ' + star_name)
plt.savefig(pp,format='pdf')
plt.close()
plt.figure()
plt.plot(blue_lam, norm_spec_blue)
plt.xlabel('Wavelength')
plt.title('Continuum Normalized Spectrum - Blue')
plt.savefig(pp,format='pdf')
plt.close()
##### FLAGS #####
star_indexes = flags[flags['Star'] == star_name].index.tolist()
blue_res_flag = 0
for i in range(10,len(blue_masked_res)-10):
wavelength_interval = blue_masked_lam[i-10:i+10]
res_interval = blue_masked_res[i-10:i+10]
fit_interval = blue_res_fit[np.where(blue_lam == wavelength_interval)]
min_res = np.min(res_interval)
max_res = np.max(res_interval)
if all( min_res <= x <= max_res for x in fit_interval):
blue_res_flag += 0
else:
blue_res_flag += 1
if blue_res_flag > 0:
for k in range(len(star_indexes)):
flags.set_value(star_indexes[k], 'ResponseBlue', 1)
### ---------------------------------------------------------------------- ###
if len(arr[0]) > 6:
red_lam, red_res, red_masked_lam, red_masked_res, red_res_fit, norm_spec_red = [],[],[],[],[],[]
for m in np.arange(len(arr)):
red_lam.append(arr[m][6])
red_res.append(arr[m][7])
red_masked_lam.append(arr[m][8])
red_masked_res.append(arr[m][9])
red_res_fit.append(arr[m][10])
norm_spec_red.append(arr[m][11])
red_lam = np.array(red_lam)
red_lam = np.trim_zeros(red_lam, 'b')
red_res = np.array(red_res)
red_res = np.trim_zeros(red_res, 'b')
red_masked_lam = np.array(red_masked_lam)
red_masked_lam = np.trim_zeros(red_masked_lam, 'b')
red_masked_res = np.array(red_masked_res)
red_masked_res = np.trim_zeros(red_masked_res, 'b')
red_res_fit = np.array(red_res_fit)
red_res_fit = np.trim_zeros(red_res_fit, 'b')
norm_spec_red = np.array(norm_spec_red)
norm_spec_red = np.trim_zeros(norm_spec_red, 'b')
plt.figure()
plt.plot(red_lam, red_res)
plt.plot(red_masked_lam, red_masked_res,'g.')
plt.plot(red_lam, red_res_fit,'r')
plt.xlabel('Wavelength')
plt.ylabel('Response (observed/model)')
plt.title('Response - Red - ' + star_name)
plt.savefig(pp,format='pdf')
plt.close()
plt.figure()
plt.plot(red_lam, norm_spec_red,'r')
plt.xlabel('Wavelength')
plt.title('Continuum Normalized Spectrum - Red')
plt.savefig(pp,format='pdf')
plt.close()
##### FLAGS #####
star_indexes = flags[flags['Star'] == star_name].index.tolist()
red_res_flag = 0
for i in range(10,len(blue_masked_res)-10):
wavelength_interval = blue_masked_lam[i-10:i+10]
res_interval = blue_masked_res[i-10:i+10]
fit_interval = blue_res_fit[np.where(blue_lam == wavelength_interval)]
min_res = np.min(res_interval)
max_res = np.max(res_interval)
if all( min_res <= x <= max_res for x in fit_interval):
red_res_flag += 0
else:
red_res_flag += 1
if red_res_flag > 0:
for k in range(len(star_indexes)):
flags.set_value(star_indexes[k], 'ResponseRed', 1)
pp.close()
##### ------------------------------------------------------------------ #####
# Extraction function
def diagnostic_plots_extraction(file_name, flags):
star_name = str(file_name)[11:-21]
date = str(file_name)[-20:-10]
pdf_name = 'extraction_plots_' + star_name + '.pdf'
pp = PdfPages(pdf_name)
pdfs.append(pdf_name)
arr = np.genfromtxt(file_name, dtype=None, delimiter=' ')
meas_FWHM, pix_FWHM, fit_FWHM, all_pix = [],[],[],[]
prof_pix, prof_pos, fit_prof_pos = [],[],[]
pix_val_1200, val_1200, pixel_back_fit, val_fit, poly_fit_back = [],[],[],[],[]
for m in np.arange(len(arr)):
meas_FWHM.append(arr[m][0])
pix_FWHM.append(arr[m][1])
fit_FWHM.append(arr[m][2])
all_pix.append(arr[m][3])
prof_pix.append(arr[m][4])
prof_pos.append(arr[m][5])
fit_prof_pos.append(arr[m][6])
pix_val_1200.append(arr[m][7])
val_1200.append(arr[m][8])
pixel_back_fit.append(arr[m][9])
val_fit.append(arr[m][10])
poly_fit_back.append(arr[m][11])
meas_FWHM = np.array(meas_FWHM)
meas_FWHM = np.trim_zeros(meas_FWHM, 'b')
pix_FWHM = np.array(pix_FWHM)
pix_FWHM = np.trim_zeros(pix_FWHM, 'b')
fit_FWHM = np.array(fit_FWHM)
fit_FWHM = np.trim_zeros(fit_FWHM, 'b')
all_pix = np.array(all_pix)
all_pix = np.trim_zeros(all_pix, 'b')
prof_pix = np.array(prof_pix)
prof_pix = np.trim_zeros(prof_pix, 'b')
prof_pos = np.array(prof_pos)
prof_pos = np.trim_zeros(prof_pos, 'b')
fit_prof_pos = np.array(fit_prof_pos)
fit_prof_pos = np.trim_zeros(fit_prof_pos, 'b')
pix_val_1200 = np.array(pix_val_1200)
pix_val_1200 = np.trim_zeros(pix_val_1200, 'b')
val_1200 = np.array(val_1200)
val_1200 = np.trim_zeros(val_1200, 'b')
pixel_back_fit = np.array(pixel_back_fit)
pixel_back_fit = np.trim_zeros(pixel_back_fit, 'b')
val_fit = np.array(val_fit)
val_fit = np.trim_zeros(val_fit, 'b')
poly_fit_back = np.array(poly_fit_back)
poly_fit_back = np.trim_zeros(poly_fit_back, 'b')
plt.figure()
plt.scatter(pix_FWHM,meas_FWHM)
plt.plot(np.arange(len(fit_FWHM)),fit_FWHM)
plt.xlabel('Pixel')
plt.ylabel('FWHM')
plt.title('Extraction FWHM - ' + star_name)
plt.savefig(pp,format='pdf')
plt.close()
plt.figure()
plt.scatter(prof_pix, prof_pos)
plt.plot(np.arange(len(fit_prof_pos)),fit_prof_pos)
plt.xlabel('Pixel')
plt.ylabel('Profile Position')
plt.title('Extraction Profile - ' + star_name)
plt.savefig(pp,format='pdf')
plt.close()
plt.figure()
plt.scatter(pix_val_1200, val_1200, color='k', marker='^')
plt.scatter(pixel_back_fit, val_fit, color='b', marker='^')
plt.plot(pix_val_1200, poly_fit_back, 'b-')
plt.xlabel('Pixel')
plt.title('Fit to Background at Column 1200')
plt.savefig(pp,format='pdf')
plt.close()
pp.close()
##### FLAGS #####
star_indexes = flags[flags['Star'] == star_name].index.tolist()
ext_FWHM_flag = 0
ext_profile_flag = 0
background_fit_flag= 0
meas_FWHM_std = np.std(meas_FWHM)
max_back_ind = np.argmax(val_1200)
fit_at_max = poly_fit_back[max_back_ind]
avg_poly_fit = np.average(val_fit)
if all( (np.mean(meas_FWHM) - ext_FWHM_num_sigma*meas_FWHM_std) <= x <= (np.mean(meas_FWHM) + ext_FWHM_num_sigma*meas_FWHM_std) for x in meas_FWHM):
ext_FWHM_flag = 0
else:
ext_FWHM_flag = 1
if all( abs(prof_pos[x] - fit_prof_pos[int(prof_pix[x])]) < ext_prof_tol for x in range(len(prof_pos)) ):
ext_profile_flag = 0
else:
ext_profile_flag = 1
if abs(fit_at_max - avg_poly_fit) < background_fit_tol:
background_fit_flag = 0
else:
background_fit_flag = 1
for k in range(len(star_indexes)):
flags.set_value(star_indexes[k], 'ExtFWHM', ext_FWHM_flag)
flags.set_value(star_indexes[k], 'ExtProf', ext_profile_flag)
flags.set_value(star_indexes[k], 'FitToBack', background_fit_flag)
##### ------------------------------------------------------------------ #####
# Final spectra function
def diagnostic_plots_spectra(file_name, flags):
date = str(file_name)[9:19]
pdf_name = 'final_spectra_plots.pdf'
pp = PdfPages(pdf_name)
pdfs.append(pdf_name)
spectra_names = []
with open(file_name, 'r') as f:
first_line = f.readline()
second_line = f.readline()
names = first_line[3:-1] + ' ' + second_line[3:-2]
names = names.split(' ')
for n in names:
spectra_names.append(n[7:-9])
star_count = len(spectra_names)
lam_arrs = [[] for i in range(star_count)]
flux_arrs = [[] for i in range(star_count)]
arr = np.genfromtxt(file_name, dtype=None, delimiter=' ')
for m in np.arange(len(arr)):
lams_to_append = arr[m][0::2]
flux_to_append = arr[m][1::2]
for i in range(len(lam_arrs)):
lam_arrs[i].append(lams_to_append[i])
flux_arrs[i].append(flux_to_append[i])
for i in range(len(spectra_names)):
plt.figure()
plt.plot(lam_arrs[i], flux_arrs[i])
plt.xlabel('Wavelength (Angstroms)')
plt.ylabel('Flux')
plt.title('Final Spectrum - ' + spectra_names[i])
plt.savefig(pp,format='pdf')
plt.close()
pp.close()
def diagnostic_now():
original_date = os.getcwd()[-10::]
##### Flags #####
# thresholds and constants
global pdfs, num_flags, num_sigma, fwhm_tol, pos_tol, peak_tol, ext_FWHM_num_sigma,ext_prof_tol,background_fit_tol,wave_fit_tol
num_flags = 23
num_sigma = 2
fwhm_tol = 1
pos_tol = 5
peak_tol = 500
ext_FWHM_num_sigma = 2
ext_prof_tol = 5
background_fit_tol = 5
wave_fit_tol = 0.15
pdfs = glob('diagnostics_plots.pdf')
for f in pdfs:
os.remove(f)
pdfs = []
# Use the FWHM_records file to determine how many total exposures there were for the given date
fwhm_files = glob('FWHM*.txt')
file_name = str(fwhm_files[0])
flags = setup_flags_table(file_name)
##### ------------------------------------------------------------------ #####
# Sort file names by type
cal_files = glob('reduction*.txt')
fwhm_files = glob('FWHM*.txt')
wave_cal_files = glob('wavecal*.txt')
model_cal_files = glob('continuum_normalization*.txt')
extraction_files = glob('extraction_*_*.txt')
spectra_files = glob('flux_fits*.txt')
##### ------------------------------------------------------------------ #####
# Calibrations
for i in range(len(cal_files)): # Repeat copy of data below
file_name = str(cal_files[i])
diagnostic_plots_cals(file_name, flags)
##### ------------------------------------------------------------------ #####
# FWHM
for i in range(len(fwhm_files)): # First line not commented out
file_name = str(fwhm_files[i])
diagnostic_plots_FWHM(file_name, flags)
##### ------------------------------------------------------------------ #####
# Wavelength Calibrations
star_names = []
for i in range(len(wave_cal_files)):
star_names.append(wave_cal_files[i][8:-21])
with open(wave_cal_files[i], 'r') as f:
first_line = f.readline()
unique_names = unique_star_names(star_names)
for sub in unique_names:
file_names = [x for x in wave_cal_files if str(sub) in x]
diagnostic_plots_wavecal(file_names, flags)
##### ------------------------------------------------------------------ #####
# Model Calibrations
star_names = []
for i in range(len(model_cal_files)):
star_names.append(model_cal_files[i][24:-21])
with open(model_cal_files[i], 'r') as f:
first_line = f.readline()
unique_names = unique_star_names(star_names)
for sub in unique_names:
file_name = [x for x in model_cal_files if str(sub) in x]
diagnostic_plots_continuum(file_name[0], flags)
##### ------------------------------------------------------------------ #####
# Extraction
for i in range(len(extraction_files)):
file_name = str(extraction_files[i])
diagnostic_plots_extraction(file_name, flags)
######------------------------------------------------------------------ #####
for i in range(len(spectra_files)):
file_name = str(spectra_files[i])
diagnostic_plots_spectra(file_name, flags)
######------------------------------------------------------------------ #####
# Merge all pdfs of plots
#pdfs = glob('*.pdf')
outfile = PdfFileMerger()
for f in pdfs:
outfile.append(open(f, 'rb'))
os.remove(f)
outfile.write(open('diagnostic_plots.pdf', 'wb'))
flags.to_csv('diagnostics_flags.csv')
#Run from command line
if __name__ == '__main__':
diagnostic_now()
|
#!/usr/bin/env python
import Skype4Py
debug = False
import threading
import time
class BotSkypeinterface(object):
def __init__(self, commands, threading):
self.skype = Skype4Py.Skype(Transport='x11')
if not self.skype.Client.IsRunning:
print 'You need to start skype'
exit()
self.threading = threading
self.skype.FriendlyName = 'Py-in-the-Sky'
self.skype.RegisterEventHandler('MessageStatus', self.getmessage)
self.skype.Attach()
self.commands = commands
self.ops = set((name.strip() for name in open('ops').read().split('\n') if name))
print "attached!" if self.skype.AttachmentStatus == 0 else "Couldn't attach to skype"
def getmessage(self, message, status):
"this method gets attached to skype and called whenever a message comes in"
parsedmessage = self.commands.parse_message(message, self)
snippet = message.Body[1:21]
if parsedmessage: #parsed message returns false if it's not a command
function, args = parsedmessage
t = threading.Thread(target=function, args=args, name=snippet)
t.start_time = time.time()
t.setDaemon(True)
t.start()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.skype.UnregisterEventHandler('MessageStatus', self.getmessage)
self.commands.write_auth()
del self.skype
if __name__ == '__main__':
from Commands import DefaultCommands as Commands
with open('./allowed', 'r+') as auth:
with BotSkypeinterface(Commands(auth), threading) as Bot:
while True:
time.sleep(10)
|
from point_cloud import wavefront
import numpy as np
from scipy import interpolate
import matplotlib.pyplot as plt
def tan_step(x, a, b, delta):
# out = 1/2 * (np.tanh((x-a)/delta) - np.tanh((x - b)/delta))
out = 1/2 * ( 1- np.tanh((x-a)/delta))
return out
def bspline():
# x = np.arange(0, 2*np.pi+np.pi/4, 2*np.pi/8)
# y = np.sin(x)
ctr =np.array([[0, 1], [0.05, 1], [0.1, 1], [0.2, 1], [0.5, 0.2], [0.8, 0.0], [1, 0]])
x=ctr[:,0]
y=ctr[:,1]
#x=np.append(x,x[0])
#y=np.append(y,y[0])
tck,u = interpolate.splprep([x,y],k=3, s=0)
u=np.linspace(-10,10,num=50,endpoint=True)
out = interpolate.splev(u,tck)
plt.plot(x, y, 'ro', out[0], out[1], 'b')
plt.legend(['Points', 'Interpolated B-spline', 'True'],loc='best')
a = 0.5 # left right shift
b = 2 # size of transition
delta = 0.05
x = np.linspace(0, 1, 100)
angle = np.linspace(0, np.pi, 100)
tan_out = tan_step(angle/np.max(angle), a, b, delta)
rad_out = wavefront.radius_scale_factor(angle, a=0.3, delta=0.05)
plt.figure()
plt.plot(x,tan_out, 'g')
plt.plot(angle/np.max(angle), rad_out, 'r' )
plt.title('Tanh interpolation')
# plt.axis([0, 1, 0, 1])
plt.show()
|
# coding=utf-8
# Copyright 2021 The Ravens Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Aligning task."""
import os
import numpy as np
from ravens.tasks.task import Task
from ravens.utils import utils
class AlignBoxCorner(Task):
"""Aligning task."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_steps = 3
def reset(self, env):
super().reset(env)
# Generate randomly shaped box.
box_size = self.get_random_size(0.05, 0.15, 0.05, 0.15, 0.01, 0.06)
# Add corner.
dimx = (box_size[0] / 2 - 0.025 + 0.0025, box_size[0] / 2 + 0.0025)
dimy = (box_size[1] / 2 + 0.0025, box_size[1] / 2 - 0.025 + 0.0025)
corner_template = 'corner/corner-template.urdf'
replace = {'DIMX': dimx, 'DIMY': dimy}
corner_urdf = self.fill_template(corner_template, replace)
corner_size = (box_size[0], box_size[1], 0)
corner_pose = self.get_random_pose(env, corner_size)
env.add_object(corner_urdf, corner_pose, 'fixed')
os.remove(corner_urdf)
# Add possible placing poses.
theta = utils.quatXYZW_to_eulerXYZ(corner_pose[1])[2]
fip_rot = utils.eulerXYZ_to_quatXYZW((0, 0, theta + np.pi))
pose1 = (corner_pose[0], fip_rot)
alt_x = (box_size[0] / 2) - (box_size[1] / 2)
alt_y = (box_size[1] / 2) - (box_size[0] / 2)
alt_pos = (alt_x, alt_y, 0)
alt_rot0 = utils.eulerXYZ_to_quatXYZW((0, 0, np.pi / 2))
alt_rot1 = utils.eulerXYZ_to_quatXYZW((0, 0, 3 * np.pi / 2))
pose2 = utils.multiply(corner_pose, (alt_pos, alt_rot0))
pose3 = utils.multiply(corner_pose, (alt_pos, alt_rot1))
# Add box.
box_template = 'box/box-template.urdf'
box_urdf = self.fill_template(box_template, {'DIM': box_size})
box_pose = self.get_random_pose(env, box_size)
box_id = env.add_object(box_urdf, box_pose)
os.remove(box_urdf)
self.color_random_brown(box_id)
# Goal: box is aligned with corner (1 of 4 possible poses).
self.goals.append(([(box_id, (2 * np.pi, None))], np.int32([[1, 1, 1, 1]]),
[corner_pose, pose1, pose2, pose3],
False, True, 'pose', None, 1))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = "$Revision$"
import __builtin__
import gettext
import os
import sys
gettext.install('yaplcide') # this is a dummy to prevent gettext falling down
_dist_folder = os.path.split(sys.path[0])[0]
_beremiz_folder = os.path.join(_dist_folder, "beremiz")
#Ensure that Beremiz things are imported before builtins and libs.
sys.path.insert(1,_beremiz_folder)
from Beremiz import *
class YAPLCIdeLauncher(BeremizIDELauncher):
"""
YAPLC IDE Launcher class
"""
def __init__(self):
BeremizIDELauncher.__init__(self)
self.yaplc_dir = os.path.dirname(os.path.realpath(__file__))
self.splashPath = self.YApath("images", "splash.png")
self.extensions.append(self.YApath("yaplcext.py"))
import features
# Let's import nucleron yaplcconnectors
import yaplcconnectors
import connectors
connectors.connectors.update(yaplcconnectors.connectors)
# Import Nucleron yaplctargets
import yaplctargets
import targets
targets.toolchains.update(yaplctargets.toolchains)
targets.targets.update(yaplctargets.yaplctargets)
features.libraries = [
('Native', 'NativeLib.NativeLibrary')]
features.catalog.append(('yaplcconfig',
_('YAPLC Configuration Node'),
_('Adds template located variables'),
'yaplcconfig.yaplcconfig.YAPLCNodeConfig'))
def YApath(self, *args):
return os.path.join(self.yaplc_dir, *args)
# This is where we start our application
if __name__ == '__main__':
beremiz = YAPLCIdeLauncher()
beremiz.Start()
|
"""
***************************************************************************
DeleteWorkflowAction.py
-------------------------------------
Copyright (C) 2014 TIGER-NET (www.tiger-net.org)
***************************************************************************
* This plugin is part of the Water Observation Information System (WOIS) *
* developed under the TIGER-NET project funded by the European Space *
* Agency as part of the long-term TIGER initiative aiming at promoting *
* the use of Earth Observation (EO) for improved Integrated Water *
* Resources Management (IWRM) in Africa. *
* *
* WOIS is a free software i.e. you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published *
* by the Free Software Foundation, either version 3 of the License, *
* or (at your option) any later version. *
* *
* WOIS is distributed in the hope that it will be useful, but WITHOUT ANY *
* WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
* for more details. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program. If not, see <http://www.gnu.org/licenses/>. *
***************************************************************************
"""
import os
from qgis.PyQt.QtWidgets import QMessageBox
from qgis.core import QgsApplication
from processing_workflow.Workflow import Workflow
from processing.gui.ContextAction import ContextAction
class DeleteWorkflowAction(ContextAction):
def __init__(self):
super().__init__()
self.name = self.tr("Delete workflow", "DeleteWorkflowAction")
def isEnabled(self):
return (isinstance(self.itemData, Workflow) and
"processing_workflow" in self.itemData.provider().id())
def execute(self, alg):
reply = QMessageBox.question(None,
self.tr("Confirmation", "DeleteWorkflowAction"),
self.tr("Are you sure you want to delete this workflow?",
"DeleteWorkflowAction"),
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.Yes:
providerId = self.itemData.provider().id()
os.remove(self.itemData.descriptionFile)
QgsApplication.processingRegistry().providerById(providerId).refreshAlgorithms()
|
import getpass
import yaml
import os
import logging
import getpass
#from xcrypto import xcrypto
from xcloud import utils
log = logging.getLogger(__name__)
class YamlLoader(yaml.Loader):
def __init__(self, stream):
self._root = os.path.split(stream.name)[0]
super(YamlLoader, self).__init__(stream)
def include(self, node):
return self._include(node, self._root)
def sysfile(self, node):
return self._include(node, os.environ.get('SYS_PATH', self._root))
def syspath(self, node):
base_path = os.environ.get('SYS_PATH', self._root)
return os.path.join(base_path, self.construct_scalar(node))
def _include(self, node, base_path):
filename = os.path.join(base_path, self.construct_scalar(node))
with open(filename, 'r') as fhd:
y = fhd.read()
return y
def load(self, node):
filename = os.path.join(self._root, self.construct_scalar(node))
with open(filename, 'r') as fhd:
y = yaml.load(fhd, YamlLoader)
return y
def resolve_path(self, node):
filename = os.path.join(self._root, self.construct_scalar(node))
return filename.encode('ascii', 'ignore')
#YamlLoader.add_constructor('!encrypted', YamlLoader.encrypted)
YamlLoader.add_constructor('!file', YamlLoader.include)
YamlLoader.add_constructor('!sysfile', YamlLoader.sysfile)
YamlLoader.add_constructor('!syspath', YamlLoader.syspath)
YamlLoader.add_constructor('!yaml', YamlLoader.load)
YamlLoader.add_constructor('!resolve', YamlLoader.resolve_path)
class CloudOptions(dict):
@staticmethod
def create_from_defaults(filename, args):
all_options = []
os.environ['SYS_PATH'] = args.syspath
base_path = os.path.dirname(filename)
with open(filename, 'r') as fhd:
region = yaml.load(fhd, YamlLoader)
defaults = region.get('defaults', {})
defaults['security_groups'] = region.get('security_groups', {})
if args.username:
defaults['username'] = args.username
if args.password:
defaults['password'] = args.password
return defaults
@staticmethod
def create_from_file(filename, args):
all_options = []
os.environ['SYS_PATH'] = args.syspath
base_path = os.path.dirname(filename)
with open(filename, 'r') as fhd:
region = yaml.load(fhd, YamlLoader)
defaults = region.get('defaults', {})
all_files = region.get('files', [])
all_cloud_init = region.get('cloud_init', [])
clusters = region.get('clusters', [])
all_security_groups = region.get('security_groups', {})
if 'password' not in defaults:
if 'OS_PASSWORD' in os.environ:
defaults['password'] = os.environ['OS_PASSWORD']
else:
defaults['password'] = getpass.getpass('Openstack password: ')
env = region.get('env', {})
configs = region.get('configs', {})
for idx in range(0, len(clusters)):
cluster = clusters[idx]
if isinstance(cluster, str):
p = utils.resolve_path(cluster, base_path)
with open(p, 'r') as fhd:
clusters[idx] = yaml.load(fhd)
else:
d = {}
if 'extend' in cluster:
p = utils.resolve_path(cluster['extend'], base_path)
del cluster['extend']
with open(p, 'r') as fhd:
d = yaml.load(fhd, YamlLoader)
clusters[idx] = utils.extend(d, cluster)
region_scripts = region.get('scripts', {})
for cluster in clusters:
cluster = utils.extend(defaults, cluster)
cluster['files'] = all_files + cluster.get('files', [])
cluster['cloud_init'] = all_cloud_init + cluster.get('cloud_init', [])
cluster['scripts'] = dict(region_scripts, **cluster.get('scripts', {}))
cluster['env'] = dict(env, **cluster.get('env', {}))
cluster['configs'] = dict(configs, **cluster.get('configs', {}))
cluster['security_groups'] = all_security_groups
options = CloudOptions(cluster)
if args.username:
options['username'] = args.username
if args.password:
options['password'] = args.password
ssh_key = cluster.get('security', {}).get('ssh_key_name', None)
if ssh_key:
rc = os.system('ssh-add -L | grep %s >/dev/null 2>&1 || ssh-add ~/.ssh/%s >/dev/null 2>&1' % (ssh_key, ssh_key))
if rc != 0:
exit('please ensure %s (~/.ssh/%s) SSH key is loaded into SSH Agent' % (ssh_key, ssh_key))
all_options.append(options)
return all_options
|
# SecuML
# Copyright (C) 2017 ANSSI
#
# SecuML is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# SecuML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with SecuML. If not, see <http://www.gnu.org/licenses/>.
import json
import os.path as path
from SecuML.core.ActiveLearning.UpdateModel import UpdateModel
from SecuML.experiments.Classification.ClassificationExperiment import ClassificationExperiment
from SecuML.experiments.Classification.RunClassifier import RunClassifier
class UpdateModelExp(UpdateModel):
def __init__(self, iteration):
UpdateModel.__init__(self, iteration)
self.experiment = self.iteration.experiment
def run(self):
models_conf = self.iteration.conf.models_conf
self.models_exp = {}
for k, conf in models_conf.items():
self.models_exp[k] = self.runModel(k, conf)
self.exportModelsExperiments()
def exportModelsExperiments(self):
export_models = {}
for k, exp in self.models_exp.items():
export_models[k] = exp.experiment_id
output_file = path.join(self.iteration.iteration_dir,
'models_experiments.json')
with open(output_file, 'w') as f:
json.dump(export_models, f, indent=2)
def runModel(self, kind, conf):
self.setDatasets(conf)
# Create the experiment
exp = self.experiment
name = 'AL' + str(exp.experiment_id) + '-Iter'
name += str(self.iteration.iteration_number) + '-' + kind
model_exp = ClassificationExperiment(exp.project, exp.dataset, exp.session,
experiment_name=name,
parent=exp.experiment_id)
model_exp.setConf(conf, exp.features_filename,
annotations_id=exp.annotations_id)
model_exp.export()
# Build the model
model = conf.model_class(model_exp.conf, cv_monitoring=True)
model_run = RunClassifier(model, self.datasets, model_exp)
model_run.run()
self.models[kind] = model
# Execution time monitoring
time = model.training_execution_time + model.testing_execution_time
self.times[kind] = time
return model_exp
|
__all__ = [
'main',
]
import sys
from pathlib import Path
from g1.bases import argparses
from g1.bases import oses
from g1.bases.assertions import ASSERT
from g1.texts import columns
from g1.texts.columns import argparses as columns_argparses
from . import models
from . import xar_ops_dirs
_XAR_LIST_COLUMNS = frozenset((
'label',
'version',
'zipapp',
))
_XAR_LIST_DEFAULT_COLUMNS = (
'label',
'version',
'zipapp',
)
_XAR_LIST_STRINGIFIERS = {
'zipapp': lambda active: 'true' if active else 'false',
}
ASSERT.issuperset(_XAR_LIST_COLUMNS, _XAR_LIST_DEFAULT_COLUMNS)
ASSERT.issuperset(_XAR_LIST_COLUMNS, _XAR_LIST_STRINGIFIERS)
@argparses.begin_parser('list', **argparses.make_help_kwargs('list xars'))
@columns_argparses.columnar_arguments(
_XAR_LIST_COLUMNS, _XAR_LIST_DEFAULT_COLUMNS
)
@argparses.end
def cmd_list(args):
columnar = columns.Columnar(
**columns_argparses.make_columnar_kwargs(args),
stringifiers=_XAR_LIST_STRINGIFIERS,
)
with xar_ops_dirs.make_ops_dirs().listing_ops_dirs() as active_ops_dirs:
for ops_dir in active_ops_dirs:
columnar.append({
'label': ops_dir.label,
'version': ops_dir.version,
'zipapp': ops_dir.metadata.is_zipapp(),
})
columnar.sort(lambda row: (row['label'], row['version']))
columnar.output(sys.stdout)
return 0
@argparses.begin_parser(
'install', **argparses.make_help_kwargs('install xar from a bundle')
)
@argparses.argument(
'bundle',
type=Path,
help='provide path to deployment bundle directory',
)
@argparses.end
def cmd_install(args):
oses.assert_root_privilege()
xar_ops_dirs.make_ops_dirs().install(args.bundle)
return 0
@argparses.begin_parser(
'uninstall', **argparses.make_help_kwargs('uninstall xar')
)
@argparses.argument(
'label', type=models.validate_xar_label, help='provide xar label'
)
@argparses.argument(
'version', type=models.validate_xar_version, help='provide xar version'
)
@argparses.end
def cmd_uninstall(args):
oses.assert_root_privilege()
xar_ops_dirs.make_ops_dirs().uninstall(args.label, args.version)
return 0
@argparses.begin_parser('xars', **argparses.make_help_kwargs('manage xars'))
@argparses.begin_subparsers_for_subcmds(dest='command')
@argparses.include(cmd_list)
@argparses.include(cmd_install)
@argparses.include(cmd_uninstall)
@argparses.end
@argparses.end
def main(args):
if args.command == 'list':
return cmd_list(args)
elif args.command == 'install':
return cmd_install(args)
elif args.command == 'uninstall':
return cmd_uninstall(args)
else:
return ASSERT.unreachable('unknown command: {}', args.command)
|
from __future__ import absolute_import, print_function, unicode_literals
from .jstypes import Hook, Interfaces
OBSOLETE_EXTENSION_MANAGER = {
'on_get': 'This interface is part of the obsolete extension manager '
'interface, which is not available in any remotely modern '
'version of Firefox. It should not be referenced in any '
'code.'}
Interfaces.hook({
'nsIExtensionManager': OBSOLETE_EXTENSION_MANAGER,
'nsIUpdateItem': OBSOLETE_EXTENSION_MANAGER,
'nsIInstallLocation': OBSOLETE_EXTENSION_MANAGER,
'nsIAddonInstallListener': OBSOLETE_EXTENSION_MANAGER,
'nsIAddonUpdateCheckListener': OBSOLETE_EXTENSION_MANAGER,
})
# nsIJSON
NSIJSON_DEPRECATED = {
'err_id': ('testcases_javascript_calldefinitions', 'nsIJSON', 'deprec'),
'warning': 'Deprecated nsIJSON methods in use.',
'description':
'The `encode` and `decode` methods in nsIJSON have been '
'deprecated since Gecko 7. You should use the methods in the '
'global JSON object instead. See '
'https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference'
'/Global_Objects/JSON for more information.'}
@Interfaces.hook
class nsIJSON(Hook):
encode = {'on_call': NSIJSON_DEPRECATED}
decode = {'on_call': NSIJSON_DEPRECATED}
# nsIWebBrowserPersist
WEBBROWSERPERSIST_DEPRECATED = {
'err_id': ('testcases_javascript_call_definititions',
'webbrowserpersist'),
'warning': 'nsIWebBrowserPersist should no longer be used',
'description':
'Most nsIWebBrowserPersist methods have been '
'superseded by simpler methods in Downloads.jsm, namely '
'`Downloads.fetch` and `Downloads.createDownload`. See '
'http://mzl.la/downloads-jsm for more information.',
}
@Interfaces.hook
class nsIWebBrowserPersist(Hook):
saveChannel = {'on_call': WEBBROWSERPERSIST_DEPRECATED}
savePrivacyAwareURI = {'on_call': WEBBROWSERPERSIST_DEPRECATED}
@Hook.on_call
def saveURI(this, args, callee):
"""nsIWebBrowserPersist.saveURI requires a valid privacy context as
of Firefox 19."""
if len(args) >= 7:
load_context = args[6]
if load_context.as_primitive() is None:
this.traverser.warning(
err_id=('testcases_javascript_call_definititions',
'webbrowserpersist_saveuri'),
warning=('saveURI should not be called with a null load '
'context'),
description=(
'While nsIWebBrowserPersist.saveURI accepts null '
'in place of a privacy context, this usage is '
'acceptable only when no appropriate load '
'context exists.'))
return WEBBROWSERPERSIST_DEPRECATED
# nsITransferable
@Interfaces.hook
class nsITransferable(Hook):
@Hook.on_call
def init(this, args, callee):
if args and not args[0].as_primitive():
this.traverser.warning(
err_id=('js_entity_values', 'nsITransferable', 'init'),
warning=(
'`nsITransferable.init` should not be called with `null` '
'as its first argument'),
description=(
'Calling `nsITransferable.init()` with a null first '
'argument has the potential to leak data across '
'private browsing mode sessions. `null` is '
'appropriate only when reading data or writing data '
'which is not associated with a particular window.'))
|
from .. utils import TranspileTestCase, UnaryOperationTestCase, BinaryOperationTestCase, InplaceOperationTestCase
from unittest import expectedFailure
class DictTests(TranspileTestCase):
def test_setattr(self):
self.assertCodeExecution("""
x = {}
try:
x.attr = 42
except AttributeError as err:
print(err)
""")
def test_getattr(self):
self.assertCodeExecution("""
x = {}
try:
print(x.attr)
except AttributeError as err:
print(err)
""")
def test_creation(self):
# Empty dict
self.assertCodeExecution("""
x = {}
print(x)
""")
self.assertCodeExecution("""
x = {'a': 1}
print(x)
""")
def test_getitem(self):
# Simple existent key
self.assertCodeExecution("""
y = 37
x = {'a': 1, 'b': 2, 'c': y}
print('a' in x)
print('a' not in x)
print(x['a'])
""")
# Simple non-existent key
self.assertCodeExecution("""
x = {'a': 1, 'b': 2}
print('c' in x)
print('c' not in x)
try:
print(x['c'])
except KeyError as err:
print(err)
""")
def test_clear(self):
# Clear a dictionary
self.assertCodeExecution("""
x = {'a': 1, 'b': 2}
print('a' in x)
print(x.clear())
print('a' not in x)
print(x)
""")
# Clear an already empty dict
self.assertCodeExecution("""
x = {}
print('a' not in x)
print(x.clear())
print('a' not in x)
print(x)
""")
def test_builtin_constructor(self):
# Construct a dictionary using the dict builtin
self.assertCodeExecution("""
x = dict()
print(x)
print('a' in x)
# List of tuples
x = dict([('a', 1), ('b', 2)])
print('a' in x)
print(x['a'])
print('c' in x)
# List of lists
x = dict([['a', 3], ['b', 4]])
print('a' in x)
print(x['a'])
print('c' in x)
# Tuple of lists
x = dict((['a', 5], ['b', 6]))
print('a' in x)
print(x['a'])
print('c' in x)
# Tuple of tuples
x = dict((('a', 5), ('b', 6)))
print('a' in x)
print(x['a'])
print('c' in x)
# Test __contains__ throws unhashable exception
try:
print([] in x)
except TypeError as err:
print(err)
try:
print([] not in x)
except TypeError as err:
print(err)
""")
def test_builtin_constructor_kwargs(self):
self.assertCodeExecution("""
d = dict(a=1, b=2)
print('a' in d)
print('b' in d)
print('c' not in d)
print(d['b'])
d = dict(d, b=3)
print('a' in d)
print('b' in d)
print('c' in d)
print(d['b'])
""")
def test_builtin_non_2_tuples(self):
# One of the elements isn't a 2-tuple
self.assertCodeExecution("""
try:
x = dict([('a', 1), ('b', 2, False)])
except ValueError as err:
print(err)
""")
def test_builtin_non_sequence(self):
# One of the elements isn't a sequence
self.assertCodeExecution("""
try:
x = dict([('a', 1), False, ('b', 2)])
except TypeError as err:
print(err)
""")
def test_method_pop(self):
self.assertCodeExecution("""
x = {1: 2, 3: 4, 5: 6}
print(x.pop(1))
print(x.pop(3, 37))
try:
print(x.pop(7))
except KeyError as e:
print("Dict doesn't contain 7")
print(x.pop(7, 42))
print("Done")
""")
def test_method_popitem(self):
self.assertCodeExecution("""
ITEMS = [(1, 2), (3, ("4", 5))]
x = dict(ITEMS)
popped_1 = x.popitem()
print(popped_1 in ITEMS)
popped_2 = x.popitem()
print(popped_2 in ITEMS and popped_2 != popped_1)
# Check for exception
try:
print(x.popitem())
except KeyError as err:
print(err)
""")
def test_method_setdefault(self):
self.assertCodeExecution("""
x = {42: 'Babel'}
print(x.setdefault(42)) # should return Babel
print(x.setdefault(1)) # should return None
print(x[1] == None) # should be True
print(x.setdefault('David', 'Gilmour')) # should return 'Gilmour'
# Check unhashable exceptions
try:
x.setdefault([], 42)
except TypeError as err:
print(err)
""")
def test_method_get(self):
self.assertCodeExecution("""
x = {1: 2}
print(x.get(1))
print(x.get(2))
print(x.get(3,4))
""")
# check for unhashable type errors
self.assertCodeExecution("""
x = {1: 2}
try:
print(x.get([]))
except TypeError as err:
print(err)
try:
print(x.get([], 1))
except TypeError as err:
print(err)
""")
# check for unhashable type errors
self.assertCodeExecution("""
x = {1: 2}
try:
print(x.get([]))
except TypeError as err:
print(err)
try:
print(x.get([], 1))
except TypeError as err:
print(err)
""")
def test_copy(self):
self.assertCodeExecution("""
x = {42: 'Babel'}
y = x.copy()
print(y)
print(x == y)
print(x is not y)
""")
self.assertCodeExecution("""
x = {'42': 'Babel'}
y = x.copy()
print(x['42'] is y['42'])
x['42'] = 'somevalue'
print(x['42'] == y['42'])
print(x == y)
""")
def test_fromkeys(self):
self.assertCodeExecution("""
keys = [1, 2]
print(dict.fromkeys(keys))
""")
# non-iterable error test
self.assertCodeExecution("""
keys = 1
value = 2
try:
print(dict.fromkeys(keys, value))
except TypeError as err:
print(err)
""")
# empty iterable and orginal dict test
self.assertCodeExecution("""
keys = ()
value = 5
dt = {}
print(dt.fromkeys(keys, value))
print(dt)
""")
# non-hashable error on key test
self.assertCodeExecution("""
keys = [[1], 2]
try:
print(dict.fromkeys(keys))
except TypeError as err:
print(err)
""")
def test_update(self):
self.assertCodeExecution("""
a = {}
a.update([('a', 1), ('b', 2)])
print(sorted(a))
b = {}
b.update({'a': 1, 'b':2})
print(sorted(b))
c = {}
c.update(a=1, b=2)
print(sorted(c))
""")
self.assertCodeExecution("""
try:
a = {}
a.update([('a', 1, 2), ('b',2)])
print('An error should have been raised!')
except ValueError:
print('Received a ValueError as expected')
""")
self.assertCodeExecution("""
try:
a = {}
a.update('1')
print('An error should have been raised')
except ValueError:
print('Received a ValueError as expected')
""")
self.assertCodeExecution("""
try:
a = {}
a.update(1)
print('An error should have been raised')
except TypeError:
print('Received a TypeError as expected')
""")
self.assertCodeExecution("""
try:
a = {}
x = set([1, 2])
a.update(x)
print('An error should have been raised')
except TypeError:
print('Received a TypeError as expected')
""")
@expectedFailure
def test_fromkeys_missing_iterable(self):
self.assertCodeExecution("""
try:
print(dict.fromkeys())
except TypeError as err:
print(err)
""")
class UnaryDictOperationTests(UnaryOperationTestCase, TranspileTestCase):
data_type = 'dict'
class BinaryDictOperationTests(BinaryOperationTestCase, TranspileTestCase):
data_type = 'dict'
not_implemented = [
'test_multiply_bytearray',
'test_subscr_bytearray',
'test_subscr_class',
'test_subscr_complex',
'test_subscr_slice',
]
class InplaceDictOperationTests(InplaceOperationTestCase, TranspileTestCase):
data_type = 'dict'
|
#!/usr/bin/env python
import os
import argparse
import re
from collections import defaultdict
import sys
KNOWN_TYPES = {
"double" : "double",
"int" : "int32",
"size_t" : "uint32",
"float" : "float",
"string" : "string",
"bool" : "bool",
"complex<double>" : "double",
"pair<string, size_t>" : "TStringToUintPair",
"pair<size_t, size_t>" : "TUintToUintPair",
}
VECTOR_RE = re.compile("(?:vector|ActVector)+<(.*)>")
def generateProtos(all_structures, package, dst, imports):
for fname, structures in all_structures.iteritems():
dst_file = fname.split(".")[0] + ".proto"
with open(os.path.join(dst, dst_file), 'w') as f_ptr:
f_ptr.write("package %s;\n" % package)
f_ptr.write("\n")
for imp in imports:
f_ptr.write("import \"{}\";\n".format(imp))
f_ptr.write("\n")
for s in structures:
f_ptr.write("message %s {\n" % s['name'])
i = 1
for f in s['fields']:
if KNOWN_TYPES.get(f[0]) is None:
m = VECTOR_RE.match(f[0])
if m is None:
raise Exception("Can't match {}".format(f[0]))
f_ptr.write(" repeated %s %s = %s;\n" % (KNOWN_TYPES[ m.group(1) ], f[1], str(i)))
if m.group(1).startswith("complex"):
f_ptr.write(" repeated %s %s = %s;\n" % (KNOWN_TYPES[ m.group(1) ], f[1] + "_imag", str(i+1)))
i += 1
else:
f_ptr.write(" required %s %s = %s;\n" % (KNOWN_TYPES[ f[0] ], f[1], str(i)))
i += 1
f_ptr.write("}\n")
f_ptr.write("\n")
def parseSources(src):
structures = defaultdict(list)
for root, dirs, files in os.walk(src):
for f in files:
af = os.path.join(root, f)
generate_proto = False
if af.endswith(".cpp") or af.endswith(".h"):
for l in open(af):
l = l.strip()
l = l.split("//")[0]
if "@GENERATE_PROTO@" in l:
generate_proto = True
struct = {}
curly_counter = 0
continue
if generate_proto:
curly_counter += l.count("{")
curly_counter -= l.count("}")
if len(struct) == 0:
m = re.match("[\W]*(?:class|struct)[\W]+([^ ]+)", l)
if not m:
raise Exception("Can't parse GENERATE_PROTO class or struct")
struct['name'] = m.group(1)
struct['fields'] = []
else:
m = re.match(
"(%s)[\W]+(?!__)([^ ]*);[\W]*$" % "|".join(
KNOWN_TYPES.keys() + [ "(?:vector|ActVector)+<{}>".format(t) for t in KNOWN_TYPES.keys() ]
),
l
)
if m and curly_counter == 1:
struct['fields'].append( (m.group(1), m.group(2)) )
continue
if len(struct) > 0 and curly_counter == 0:
generate_proto = False
structures[f].append(struct)
return structures
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--source-path", help="Path to the sources",
type=str, required=True)
parser.add_argument("-d", "--dest-path", help="Path where to store .proto",
type=str, required=True)
parser.add_argument("-p", "--package", help="Package name, default : %(default)s",
type=str, required=False, default="Protos")
parser.add_argument("-i", "--imports", help="Put imports to all messages (separated by ;)",
type=str, required=False, default=None)
args = parser.parse_args()
structures = parseSources(args.source_path)
imports = []
if args.imports:
imports = [ v.strip() for v in args.imports.split(";") if v.strip() ]
generateProtos(structures, args.package, args.dest_path, imports)
|
#!/usr/bin/env python
#---- licence header
###############################################################################
## file : setup.py
##
## description : This file has been made to provide a python access to
## the Pylon SDK from python.
##
## project : python-pylon
##
## author(s) : S.Blanch-Torn\'e
##
## Copyright (C) : 2015
## CELLS / ALBA Synchrotron,
## 08290 Bellaterra,
## Spain
##
## This file is part of python-pylon.
##
## python-pylon is free software: you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## python-pylon is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with python-pylon. If not, see <http://www.gnu.org/licenses/>.
##
###############################################################################
#import pyximport; pyximport.install()
from pylon.version import version_python_pylon_string
from Cython.Distutils import build_ext
from distutils.core import setup
from distutils.extension import Extension
pylonExtension = Extension('pylon',['pylon/__init__.pyx',
'pylon/Logger.cpp',
'pylon/Factory.cpp',
'pylon/DevInfo.cpp',
'pylon/Camera.cpp',
'pylon/TransportLayer.cpp',
'pylon/GenApiWrap/INode.cpp',
'pylon/GenApiWrap/ICategory.cpp',
'pylon/GenApiWrap/IEnumeration.cpp',
'pylon/PyCallback.cpp'],
language="c++",
extra_compile_args=[#"-static",
#"-fPIC",
#"-std=c++11",
]
)
#FIXME: check how can be know if c++11 is available to be used
setup(name = 'pylon',
license = "LGPLv3+",
description = "Cython module to provide access to Pylon's SDK.",
version = version_python_pylon_string(),
author = "Sergi Blanch-Torn\'e",
author_email = "[email protected]",
classifiers = ['Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: '\
'GNU Lesser General Public License v3 or later (LGPLv3+)',
'Operating System :: POSIX',
'Programming Language :: Cython',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: '\
'Interface Engine/Protocol Translator',
'Topic :: Software Development :: Embedded Systems',
'Topic :: Software Development :: Libraries :: '\
'Python Modules',
'Topic :: Multimedia :: Graphics :: Capture',
'Topic :: Multimedia :: Video :: Capture',
''],
url="https://github.com/srgblnch/python-pylon",
cmdclass = {'build_ext': build_ext},
ext_modules=[pylonExtension],
#install_requires=['cython>=0.20.1'],
)
#for the classifiers review see:
#https://pypi.python.org/pypi?%3Aaction=list_classifiers
#
#Development Status :: 1 - Planning
#Development Status :: 2 - Pre-Alpha
#Development Status :: 3 - Alpha
#Development Status :: 4 - Beta
#Development Status :: 5 - Production/Stable
|
import ast
from unittest import (
TestCase,
)
from darglint.analysis.return_visitor import (
ReturnVisitor,
)
from .utils import (
reindent,
)
class ReturnsVisitorTests(TestCase):
def assertFound(self, program):
"""Assert that the return was found.
Args:
program: The program to run the analysis on.
Returns:
The visitor, in case you want to do more analysis.
"""
function = ast.parse(reindent(program)).body[0]
visitor = ReturnVisitor()
visitor.visit(function)
self.assertTrue(visitor.returns)
return visitor
def assertNoneFound(self, program):
"""Assert that no return was found.
Args:
program: The program to run the analysis on.
Returns:
The visitor, in case you want to do more analysis.
"""
function = ast.parse(reindent(program)).body[0]
visitor = ReturnVisitor()
visitor.visit(function)
self.assertEqual(visitor.returns, [])
return visitor
def test_no_return(self):
program = r'''
def f():
pass
'''
self.assertNoneFound(program)
def test_nested_no_return(self):
program = r'''
def f():
def g():
pass
'''
self.assertNoneFound(program)
def test_simplest_function(self):
program = r'''
def f():
return 3
'''
self.assertFound(program)
def test_early_return(self):
program = r'''
def f(x):
if x < 0:
return -1
for i in range(x):
if complex_condition(x, i):
return i
'''
self.assertFound(program)
def test_conditional_return(self):
program = r'''
def f():
if MY_GLOBAL:
return 1
else:
return 2
'''
self.assertFound(program)
def test_return_in_context(self):
program = r'''
def f():
with open('/tmp/input', 'r') as fin:
return fin.readlines()
'''
self.assertFound(program)
def test_returns_none(self):
program = r'''
def f():
return
'''
visitor = self.assertFound(program)
self.assertEqual(
visitor.returns[0].value,
None,
)
def test_returns_non_none(self):
program = r'''
def f():
return 3
'''
visitor = self.assertFound(program)
self.assertTrue(
isinstance(visitor.returns[0].value, ast.AST),
)
|
#K-Means++ Clustering with Gap Statistic to determine the optimal number of clusters
import sys
import numpy as np
import scipy.io as sio
#import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.svm import SVC
filename = sys.argv[1]
datafile = sio.loadmat(filename)
data = datafile['bow']
sizedata=[len(data), len(data[0])]
disp = []
optimal_ks = []
#Determining the optimal number of k with gap statistic method
def gap_statistic(data):
sizedata = [len(data),len(data[0])]
SD = []
gap = []
for knum in xrange(1,20):
#I assumed that the number of clusters in my data won't be more than 20, this can be changed accordingly
print knum
#Clustering original Data
kmeanspp = KMeans(n_clusters=knum,init = 'k-means++',max_iter = 100,n_jobs = 1)
kmeanspp.fit(data)
dispersion = kmeanspp.inertia_
#Clustering Reference Data
nrefs = 10
refDisp = np.zeros(nrefs)
for nref in xrange(nrefs):
refdata = np.random.random_sample(tuple(sizedata))
refkmeans = KMeans(n_clusters=knum,init='k-means++',max_iter=100,n_jobs=1)
refkmeans.fit(refdata)
refdisp = refkmeans.inertia_
refDisp[nref]=np.log(refdisp)
mean_log_refdisp = np.mean(refDisp)
gap.append(mean_log_refdisp-np.log(dispersion))
sd = (sum([(r-m)**2 for r,m in zip(refDisp,[mean_log_refdisp]*nrefs)])/nrefs)**0.5
SD.append(sd)
SD = [sd*((1+(1/nrefs))**0.5) for sd in SD]
opt_k = None
diff = []
for i in xrange(len(gap)-1):
diff = (SD[i+1]-(gap[i+1]-gap[i]))
if diff>0:
opt_k = i+10
break
if opt_k < 20:
#print opt_k
return opt_k
else:
return 20
#Returning 20 if opt_k is more than 20 in my case, as I wanted not to search more than 20.
# Not required if range is larger.
ntrials = 50
for ntrial in xrange(ntrials):
print 'ntrial: ',ntrial
optimal_ks.append(gap_statistic(data))
#For plotting the gap statistic measure
#plt.plot(np.linspace(10,19,10,True),gap)
#plt.show()
unique_opt_k = list(set(optimal_ks))
k_count = {}
count_opt_k = 0
second_opt_k = 0
opt_k = 0
for u_o_k in unique_opt_k:
count = optimal_ks.count(u_o_k)
k_count[u_o_k]=count
if count>count_opt_k:
count_opt_k = count
opt_k = u_o_k
elif count==count_opt_k:
second_opt_k = u_o_k
print opt_k
print k_count
#Clusterin with optimal number of k
kmeanspp = KMeans(n_clusters = opt_k,init='k-means++',max_iter=100,n_jobs=1)
kmeanspp.fit(data)
centers = kmeanspp.cluster_centers_
clusterlabels = kmeanspp.labels_
print clusterlabels
mdict = {}
mdict['clusterlabels'] = clusterlabels
sio.savemat('clusterlabels.mat',mdict,format = '4',oned_as = 'column')
print 'dan dana dan done...'
|
# Copyright (c) 2018-2021 Vector 35 Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import ctypes
import struct
# Binary Ninja components
import binaryninja
from binaryninja import _binaryninjacore as core
from binaryninja.enums import MediumLevelILOperation, InstructionTextTokenType, ILBranchDependence, DataFlowQueryOption
from binaryninja import basicblock #required for MediumLevelILBasicBlock argument
from binaryninja import function
from binaryninja import types
from binaryninja import lowlevelil
# 2-3 compatibility
from binaryninja import range
class SSAVariable(object):
def __init__(self, var, version):
self._var = var
self._version = version
def __repr__(self):
return "<ssa %s version %d>" % (repr(self._var), self._version)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (self._var, self._version) == (other.var, other.version)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not (self == other)
def __hash__(self):
return hash((self._var, self._version))
@property
def var(self):
""" """
return self._var
@var.setter
def var(self, value):
self._var = value
@property
def version(self):
""" """
return self._version
@version.setter
def version(self, value):
self._version = value
class MediumLevelILLabel(object):
def __init__(self, handle = None):
if handle is None:
self.handle = (core.BNMediumLevelILLabel * 1)()
core.BNMediumLevelILInitLabel(self.handle)
else:
self.handle = handle
class MediumLevelILOperationAndSize(object):
def __init__(self, operation, size):
self._operation = operation
self._size = size
def __repr__(self):
if self._size == 0:
return "<%s>" % self._operation.name
return "<%s %d>" % (self._operation.name, self._size)
def __eq__(self, other):
if isinstance(other, MediumLevelILOperation):
return other == self._operation
if isinstance(other, self.__class__):
return (other.size, other.operation) == (self._size, self._operation)
return NotImplemented
def __ne__(self, other):
if isinstance(other, MediumLevelILOperation) or isinstance(other, self.__class__):
return not (self == other)
return NotImplemented
def __hash__(self):
return hash((self._operation, self._size))
@property
def operation(self):
""" """
return self._operation
@property
def size(self):
""" """
return self._size
class MediumLevelILInstruction(object):
"""
``class MediumLevelILInstruction`` Medium Level Intermediate Language Instructions are infinite length tree-based
instructions. Tree-based instructions use infix notation with the left hand operand being the destination operand.
Infix notation is thus more natural to read than other notations (e.g. x86 ``mov eax, 0`` vs. MLIL ``eax = 0``).
"""
ILOperations = {
MediumLevelILOperation.MLIL_NOP: [],
MediumLevelILOperation.MLIL_SET_VAR: [("dest", "var"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_FIELD: [("dest", "var"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SPLIT: [("high", "var"), ("low", "var"), ("src", "expr")],
MediumLevelILOperation.MLIL_LOAD: [("src", "expr")],
MediumLevelILOperation.MLIL_LOAD_STRUCT: [("src", "expr"), ("offset", "int")],
MediumLevelILOperation.MLIL_STORE: [("dest", "expr"), ("src", "expr")],
MediumLevelILOperation.MLIL_STORE_STRUCT: [("dest", "expr"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_VAR: [("src", "var")],
MediumLevelILOperation.MLIL_VAR_FIELD: [("src", "var"), ("offset", "int")],
MediumLevelILOperation.MLIL_VAR_SPLIT: [("high", "var"), ("low", "var")],
MediumLevelILOperation.MLIL_ADDRESS_OF: [("src", "var")],
MediumLevelILOperation.MLIL_ADDRESS_OF_FIELD: [("src", "var"), ("offset", "int")],
MediumLevelILOperation.MLIL_CONST: [("constant", "int")],
MediumLevelILOperation.MLIL_CONST_PTR: [("constant", "int")],
MediumLevelILOperation.MLIL_EXTERN_PTR: [("constant", "int"), ("offset", "int")],
MediumLevelILOperation.MLIL_FLOAT_CONST: [("constant", "float")],
MediumLevelILOperation.MLIL_IMPORT: [("constant", "int")],
MediumLevelILOperation.MLIL_ADD: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_ADC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_SUB: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_SBB: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_AND: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_OR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_XOR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_LSL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_LSR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_ASR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_ROL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_RLC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_ROR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_RRC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_MUL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MULU_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MULS_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVU: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVU_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVS: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVS_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODU: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODU_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODS: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODS_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_NEG: [("src", "expr")],
MediumLevelILOperation.MLIL_NOT: [("src", "expr")],
MediumLevelILOperation.MLIL_SX: [("src", "expr")],
MediumLevelILOperation.MLIL_ZX: [("src", "expr")],
MediumLevelILOperation.MLIL_LOW_PART: [("src", "expr")],
MediumLevelILOperation.MLIL_JUMP: [("dest", "expr")],
MediumLevelILOperation.MLIL_JUMP_TO: [("dest", "expr"), ("targets", "target_map")],
MediumLevelILOperation.MLIL_RET_HINT: [("dest", "expr")],
MediumLevelILOperation.MLIL_CALL: [("output", "var_list"), ("dest", "expr"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_CALL_UNTYPED: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_CALL_OUTPUT: [("dest", "var_list")],
MediumLevelILOperation.MLIL_CALL_PARAM: [("src", "var_list")],
MediumLevelILOperation.MLIL_RET: [("src", "expr_list")],
MediumLevelILOperation.MLIL_NORET: [],
MediumLevelILOperation.MLIL_IF: [("condition", "expr"), ("true", "int"), ("false", "int")],
MediumLevelILOperation.MLIL_GOTO: [("dest", "int")],
MediumLevelILOperation.MLIL_CMP_E: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_NE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SLT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_ULT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SLE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_ULE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SGE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_UGE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SGT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_UGT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_TEST_BIT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_BOOL_TO_INT: [("src", "expr")],
MediumLevelILOperation.MLIL_ADD_OVERFLOW: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_SYSCALL: [("output", "var_list"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_SYSCALL_UNTYPED: [("output", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_TAILCALL: [("output", "var_list"), ("dest", "expr"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_TAILCALL_UNTYPED: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_BP: [],
MediumLevelILOperation.MLIL_TRAP: [("vector", "int")],
MediumLevelILOperation.MLIL_INTRINSIC: [("output", "var_list"), ("intrinsic", "intrinsic"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_INTRINSIC_SSA: [("output", "var_ssa_list"), ("intrinsic", "intrinsic"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_FREE_VAR_SLOT: [("dest", "var")],
MediumLevelILOperation.MLIL_FREE_VAR_SLOT_SSA: [("prev", "var_ssa_dest_and_src")],
MediumLevelILOperation.MLIL_UNDEF: [],
MediumLevelILOperation.MLIL_UNIMPL: [],
MediumLevelILOperation.MLIL_UNIMPL_MEM: [("src", "expr")],
MediumLevelILOperation.MLIL_FADD: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FSUB: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FMUL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FDIV: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FSQRT: [("src", "expr")],
MediumLevelILOperation.MLIL_FNEG: [("src", "expr")],
MediumLevelILOperation.MLIL_FABS: [("src", "expr")],
MediumLevelILOperation.MLIL_FLOAT_TO_INT: [("src", "expr")],
MediumLevelILOperation.MLIL_INT_TO_FLOAT: [("src", "expr")],
MediumLevelILOperation.MLIL_FLOAT_CONV: [("src", "expr")],
MediumLevelILOperation.MLIL_ROUND_TO_INT: [("src", "expr")],
MediumLevelILOperation.MLIL_FLOOR: [("src", "expr")],
MediumLevelILOperation.MLIL_CEIL: [("src", "expr")],
MediumLevelILOperation.MLIL_FTRUNC: [("src", "expr")],
MediumLevelILOperation.MLIL_FCMP_E: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_NE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_LT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_LE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_GE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_GT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_O: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_UO: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SSA: [("dest", "var_ssa"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD: [("prev", "var_ssa_dest_and_src"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA: [("high", "var_ssa"), ("low", "var_ssa"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_ALIASED: [("prev", "var_ssa_dest_and_src"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD: [("prev", "var_ssa_dest_and_src"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_VAR_SSA: [("src", "var_ssa")],
MediumLevelILOperation.MLIL_VAR_SSA_FIELD: [("src", "var_ssa"), ("offset", "int")],
MediumLevelILOperation.MLIL_VAR_ALIASED: [("src", "var_ssa")],
MediumLevelILOperation.MLIL_VAR_ALIASED_FIELD: [("src", "var_ssa"), ("offset", "int")],
MediumLevelILOperation.MLIL_VAR_SPLIT_SSA: [("high", "var_ssa"), ("low", "var_ssa")],
MediumLevelILOperation.MLIL_CALL_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr_list"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_SYSCALL_SSA: [("output", "expr"), ("params", "expr_list"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA: [("output", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_TAILCALL_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr_list"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA: [("dest_memory", "int"), ("dest", "var_ssa_list")],
MediumLevelILOperation.MLIL_CALL_PARAM_SSA: [("src_memory", "int"), ("src", "var_ssa_list")],
MediumLevelILOperation.MLIL_LOAD_SSA: [("src", "expr"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_LOAD_STRUCT_SSA: [("src", "expr"), ("offset", "int"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_STORE_SSA: [("dest", "expr"), ("dest_memory", "int"), ("src_memory", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_STORE_STRUCT_SSA: [("dest", "expr"), ("offset", "int"), ("dest_memory", "int"), ("src_memory", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_VAR_PHI: [("dest", "var_ssa"), ("src", "var_ssa_list")],
MediumLevelILOperation.MLIL_MEM_PHI: [("dest_memory", "int"), ("src_memory", "int_list")]
}
def __init__(self, func, expr_index, instr_index=None):
instr = core.BNGetMediumLevelILByIndex(func.handle, expr_index)
self._function = func
self._expr_index = expr_index
if instr_index is None:
self._instr_index = core.BNGetMediumLevelILInstructionForExpr(func.handle, expr_index)
else:
self._instr_index = instr_index
self._operation = MediumLevelILOperation(instr.operation)
self._size = instr.size
self._address = instr.address
self._source_operand = instr.sourceOperand
operands = MediumLevelILInstruction.ILOperations[instr.operation]
self._operands = []
i = 0
for operand in operands:
name, operand_type = operand
if operand_type == "int":
value = instr.operands[i]
value = (value & ((1 << 63) - 1)) - (value & (1 << 63))
elif operand_type == "float":
if instr.size == 4:
value = struct.unpack("f", struct.pack("I", instr.operands[i] & 0xffffffff))[0]
elif instr.size == 8:
value = struct.unpack("d", struct.pack("Q", instr.operands[i]))[0]
else:
value = instr.operands[i]
elif operand_type == "expr":
value = MediumLevelILInstruction(func, instr.operands[i])
elif operand_type == "intrinsic":
value = lowlevelil.ILIntrinsic(func.arch, instr.operands[i])
elif operand_type == "var":
value = function.Variable.from_identifier(self._function.source_function, instr.operands[i])
elif operand_type == "var_ssa":
var = function.Variable.from_identifier(self._function.source_function, instr.operands[i])
version = instr.operands[i + 1]
i += 1
value = SSAVariable(var, version)
elif operand_type == "var_ssa_dest_and_src":
var = function.Variable.from_identifier(self._function.source_function, instr.operands[i])
dest_version = instr.operands[i + 1]
src_version = instr.operands[i + 2]
i += 2
self._operands.append(SSAVariable(var, dest_version))
#TODO: documentation for dest
self.dest = SSAVariable(var, dest_version)
value = SSAVariable(var, src_version)
elif operand_type == "int_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
value = []
for j in range(count.value):
value.append(operand_list[j])
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "var_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = []
for j in range(count.value):
value.append(function.Variable.from_identifier(self._function.source_function, operand_list[j]))
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "var_ssa_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = []
for j in range(count.value // 2):
var_id = operand_list[j * 2]
var_version = operand_list[(j * 2) + 1]
value.append(SSAVariable(function.Variable.from_identifier(self._function.source_function,
var_id), var_version))
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "expr_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = []
for j in range(count.value):
value.append(MediumLevelILInstruction(func, operand_list[j]))
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "target_map":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = {}
for j in range(count.value // 2):
key = operand_list[j * 2]
target = operand_list[(j * 2) + 1]
value[key] = target
core.BNMediumLevelILFreeOperandList(operand_list)
self._operands.append(value)
self.__dict__[name] = value
i += 1
def __str__(self):
tokens = self.tokens
if tokens is None:
return "invalid"
result = ""
for token in tokens:
result += token.text
return result
def __repr__(self):
return "<il: %s>" % str(self)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self._expr_index == other.expr_index
def __lt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index < other.expr_index
def __le__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index <= other.expr_index
def __gt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index > other.expr_index
def __ge__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index >= other.expr_index
def __hash__(self):
return hash((self._instr_index, self._function))
@property
def tokens(self):
"""MLIL tokens (read-only)"""
count = ctypes.c_ulonglong()
tokens = ctypes.POINTER(core.BNInstructionTextToken)()
if ((self._instr_index is not None) and (self._function.source_function is not None) and
(self._expr_index == core.BNGetMediumLevelILIndexForInstruction(self._function.handle, self._instr_index))):
if not core.BNGetMediumLevelILInstructionText(self._function.handle, self._function.source_function.handle,
self._function.arch.handle, self._instr_index, tokens, count):
return None
else:
if not core.BNGetMediumLevelILExprText(self._function.handle, self._function.arch.handle,
self._expr_index, tokens, count):
return None
result = binaryninja.function.InstructionTextToken.get_instruction_lines(tokens, count.value)
core.BNFreeInstructionText(tokens, count.value)
return result
@property
def il_basic_block(self):
"""IL basic block object containing this expression (read-only) (only available on finalized functions)"""
return MediumLevelILBasicBlock(self._function.source_function.view, core.BNGetMediumLevelILBasicBlockForInstruction(self._function.handle, self._instr_index), self._function)
@property
def ssa_form(self):
"""SSA form of expression (read-only)"""
return MediumLevelILInstruction(self._function.ssa_form,
core.BNGetMediumLevelILSSAExprIndex(self._function.handle, self._expr_index))
@property
def non_ssa_form(self):
"""Non-SSA form of expression (read-only)"""
return MediumLevelILInstruction(self._function.non_ssa_form,
core.BNGetMediumLevelILNonSSAExprIndex(self._function.handle, self._expr_index))
@property
def value(self):
"""Value of expression if constant or a known value (read-only)"""
value = core.BNGetMediumLevelILExprValue(self._function.handle, self._expr_index)
result = function.RegisterValue(self._function.arch, value)
return result
@property
def possible_values(self):
"""Possible values of expression using path-sensitive static data flow analysis (read-only)"""
value = core.BNGetMediumLevelILPossibleExprValues(self._function.handle, self._expr_index, None, 0)
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
@property
def branch_dependence(self):
"""Set of branching instructions that must take the true or false path to reach this instruction"""
count = ctypes.c_ulonglong()
deps = core.BNGetAllMediumLevelILBranchDependence(self._function.handle, self._instr_index, count)
result = {}
for i in range(0, count.value):
result[deps[i].branch] = ILBranchDependence(deps[i].dependence)
core.BNFreeILBranchDependenceList(deps)
return result
@property
def low_level_il(self):
"""Low level IL form of this expression"""
expr = self._function.get_low_level_il_expr_index(self._expr_index)
if expr is None:
return None
return lowlevelil.LowLevelILInstruction(self._function.low_level_il.ssa_form, expr)
@property
def llil(self):
"""Alias for low_level_il"""
return self.low_level_il
@property
def llils(self):
exprs = self._function.get_low_level_il_expr_indexes(self.expr_index)
result = []
for expr in exprs:
result.append(lowlevelil.LowLevelILInstruction(self._function.low_level_il.ssa_form, expr))
return result
@property
def high_level_il(self):
"""High level IL form of this expression"""
expr = self._function.get_high_level_il_expr_index(self._expr_index)
if expr is None:
return None
return binaryninja.highlevelil.HighLevelILInstruction(self._function.high_level_il, expr)
@property
def hlil(self):
"""Alias for high_level_il"""
return self.high_level_il
@property
def hlils(self):
exprs = self._function.get_high_level_il_expr_indexes(self.expr_index)
result = []
for expr in exprs:
result.append(binaryninja.highlevelil.HighLevelILInstruction(self._function.high_level_il, expr))
return result
@property
def ssa_memory_version(self):
"""Version of active memory contents in SSA form for this instruction"""
return core.BNGetMediumLevelILSSAMemoryVersionAtILInstruction(self._function.handle, self._instr_index)
@property
def prefix_operands(self):
"""All operands in the expression tree in prefix order"""
result = [MediumLevelILOperationAndSize(self._operation, self._size)]
for operand in self._operands:
if isinstance(operand, MediumLevelILInstruction):
result += operand.prefix_operands
else:
result.append(operand)
return result
@property
def postfix_operands(self):
"""All operands in the expression tree in postfix order"""
result = []
for operand in self._operands:
if isinstance(operand, MediumLevelILInstruction):
result += operand.postfix_operands
else:
result.append(operand)
result.append(MediumLevelILOperationAndSize(self._operation, self._size))
return result
@property
def vars_written(self):
"""List of variables written by instruction"""
if self._operation in [MediumLevelILOperation.MLIL_SET_VAR, MediumLevelILOperation.MLIL_SET_VAR_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_SSA, MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_ALIASED, MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD,
MediumLevelILOperation.MLIL_VAR_PHI]:
return [self.dest]
elif self._operation in [MediumLevelILOperation.MLIL_SET_VAR_SPLIT, MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA]:
return [self.high, self.low]
elif self._operation in [MediumLevelILOperation.MLIL_CALL, MediumLevelILOperation.MLIL_SYSCALL, MediumLevelILOperation.MLIL_TAILCALL]:
return self.output
elif self._operation in [MediumLevelILOperation.MLIL_CALL_UNTYPED, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED,
MediumLevelILOperation.MLIL_CALL_SSA, MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA,
MediumLevelILOperation.MLIL_SYSCALL_SSA, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA,
MediumLevelILOperation.MLIL_TAILCALL_SSA, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA]:
return self.output.vars_written
elif self._operation in [MediumLevelILOperation.MLIL_CALL_OUTPUT, MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA]:
return self.dest
return []
@property
def vars_read(self):
"""List of variables read by instruction"""
if self._operation in [MediumLevelILOperation.MLIL_SET_VAR, MediumLevelILOperation.MLIL_SET_VAR_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_SPLIT, MediumLevelILOperation.MLIL_SET_VAR_SSA,
MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA, MediumLevelILOperation.MLIL_SET_VAR_ALIASED]:
return self.src.vars_read
elif self._operation in [MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD]:
return [self.prev] + self.src.vars_read
elif self._operation in [MediumLevelILOperation.MLIL_CALL, MediumLevelILOperation.MLIL_SYSCALL, MediumLevelILOperation.MLIL_TAILCALL,
MediumLevelILOperation.MLIL_CALL_SSA, MediumLevelILOperation.MLIL_SYSCALL_SSA, MediumLevelILOperation.MLIL_TAILCALL_SSA]:
result = []
for param in self.params:
result += param.vars_read
return result
elif self._operation in [MediumLevelILOperation.MLIL_CALL_UNTYPED, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED,
MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA]:
return self.params.vars_read
elif self._operation in [MediumLevelILOperation.MLIL_CALL_PARAM, MediumLevelILOperation.MLIL_CALL_PARAM_SSA,
MediumLevelILOperation.MLIL_VAR_PHI]:
return self.src
elif self._operation in [MediumLevelILOperation.MLIL_CALL_OUTPUT, MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA]:
return []
result = []
for operand in self._operands:
if (isinstance(operand, function.Variable)) or (isinstance(operand, SSAVariable)):
result.append(operand)
elif isinstance(operand, MediumLevelILInstruction):
result += operand.vars_read
return result
@property
def expr_type(self):
"""Type of expression"""
result = core.BNGetMediumLevelILExprType(self._function.handle, self._expr_index)
if result.type:
platform = None
if self._function.source_function:
platform = self._function.source_function.platform
return types.Type(result.type, platform = platform, confidence = result.confidence)
return None
def get_possible_values(self, options = []):
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleExprValues(self._function.handle, self._expr_index, option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_ssa_var_possible_values(self, ssa_var, options = []):
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleSSAVarValues(self._function.handle, var_data, ssa_var.version,
self._instr_index, option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_ssa_var_version(self, var):
var_data = core.BNVariable()
var_data.type = var.source_type
var_data.index = var.index
var_data.storage = var.storage
return core.BNGetMediumLevelILSSAVarVersionAtILInstruction(self._function.handle, var_data, self._instr_index)
def get_var_for_reg(self, reg):
reg = self._function.arch.get_reg_index(reg)
result = core.BNGetMediumLevelILVariableForRegisterAtInstruction(self._function.handle, reg, self._instr_index)
return function.Variable(self._function.source_function, result.type, result.index, result.storage)
def get_var_for_flag(self, flag):
flag = self._function.arch.get_flag_index(flag)
result = core.BNGetMediumLevelILVariableForFlagAtInstruction(self._function.handle, flag, self._instr_index)
return function.Variable(self._function.source_function, result.type, result.index, result.storage)
def get_var_for_stack_location(self, offset):
result = core.BNGetMediumLevelILVariableForStackLocationAtInstruction(self._function.handle, offset, self._instr_index)
return function.Variable(self._function.source_function, result.type, result.index, result.storage)
def get_reg_value(self, reg):
reg = self._function.arch.get_reg_index(reg)
value = core.BNGetMediumLevelILRegisterValueAtInstruction(self._function.handle, reg, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_reg_value_after(self, reg):
reg = self._function.arch.get_reg_index(reg)
value = core.BNGetMediumLevelILRegisterValueAfterInstruction(self._function.handle, reg, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_possible_reg_values(self, reg, options = []):
reg = self._function.arch.get_reg_index(reg)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleRegisterValuesAtInstruction(self._function.handle, reg, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_possible_reg_values_after(self, reg, options = []):
reg = self._function.arch.get_reg_index(reg)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleRegisterValuesAfterInstruction(self._function.handle, reg, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_flag_value(self, flag):
flag = self._function.arch.get_flag_index(flag)
value = core.BNGetMediumLevelILFlagValueAtInstruction(self._function.handle, flag, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_flag_value_after(self, flag):
flag = self._function.arch.get_flag_index(flag)
value = core.BNGetMediumLevelILFlagValueAfterInstruction(self._function.handle, flag, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_possible_flag_values(self, flag, options = []):
flag = self._function.arch.get_flag_index(flag)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleFlagValuesAtInstruction(self._function.handle, flag, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_possible_flag_values_after(self, flag, options = []):
flag = self._function.arch.get_flag_index(flag)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleFlagValuesAfterInstruction(self._function.handle, flag, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_stack_contents(self, offset, size):
value = core.BNGetMediumLevelILStackContentsAtInstruction(self._function.handle, offset, size, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_stack_contents_after(self, offset, size):
value = core.BNGetMediumLevelILStackContentsAfterInstruction(self._function.handle, offset, size, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_possible_stack_contents(self, offset, size, options = []):
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleStackContentsAtInstruction(self._function.handle, offset, size, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_possible_stack_contents_after(self, offset, size, options = []):
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleStackContentsAfterInstruction(self._function.handle, offset, size, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_branch_dependence(self, branch_instr):
return ILBranchDependence(core.BNGetMediumLevelILBranchDependence(self._function.handle, self._instr_index, branch_instr))
@property
def function(self):
""" """
return self._function
@property
def expr_index(self):
""" """
return self._expr_index
@property
def instr_index(self):
""" """
return self._instr_index
@property
def operation(self):
""" """
return self._operation
@property
def size(self):
""" """
return self._size
@property
def address(self):
""" """
return self._address
@property
def source_operand(self):
""" """
return self._source_operand
@property
def operands(self):
""" """
return self._operands
class MediumLevelILExpr(object):
"""
``class MediumLevelILExpr`` hold the index of IL Expressions.
.. note:: This class shouldn't be instantiated directly. Rather the helper members of MediumLevelILFunction should be \
used instead.
"""
def __init__(self, index):
self._index = index
@property
def index(self):
""" """
return self._index
@index.setter
def index(self, value):
self._index = value
class MediumLevelILFunction(object):
"""
``class MediumLevelILFunction`` contains the list of MediumLevelILExpr objects that make up a binaryninja.function. MediumLevelILExpr
objects can be added to the MediumLevelILFunction by calling :func:`append` and passing the result of the various class
methods which return MediumLevelILExpr objects.
"""
def __init__(self, arch = None, handle = None, source_func = None):
self._arch = arch
self._source_function = source_func
if handle is not None:
self.handle = core.handle_of_type(handle, core.BNMediumLevelILFunction)
if self._source_function is None:
self._source_function = binaryninja.function.Function(handle = core.BNGetMediumLevelILOwnerFunction(self.handle))
if self._arch is None:
self._arch = self._source_function.arch
else:
if self._source_function is None:
self.handle = None
raise ValueError("IL functions must be created with an associated function")
if self._arch is None:
self._arch = self._source_function.arch
func_handle = self._source_function.handle
self.handle = core.BNCreateMediumLevelILFunction(arch.handle, func_handle)
def __del__(self):
if self.handle is not None:
core.BNFreeMediumLevelILFunction(self.handle)
def __repr__(self):
arch = self.source_function.arch
if arch:
return "<mlil func: %s@%#x>" % (arch.name, self.source_function.start)
else:
return "<mlil func: %#x>" % self.source_function.start
def __len__(self):
return int(core.BNGetMediumLevelILInstructionCount(self.handle))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return ctypes.addressof(self.handle.contents) == ctypes.addressof(other.handle.contents)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not (self == other)
def __hash__(self):
return hash(('MLIL', self._source_function))
def __getitem__(self, i):
if isinstance(i, slice) or isinstance(i, tuple):
raise IndexError("expected integer instruction index")
if isinstance(i, MediumLevelILExpr):
return MediumLevelILInstruction(self, i.index)
# for backwards compatibility
if isinstance(i, MediumLevelILInstruction):
return i
if i < -len(self) or i >= len(self):
raise IndexError("index out of range")
if i < 0:
i = len(self) + i
return MediumLevelILInstruction(self, core.BNGetMediumLevelILIndexForInstruction(self.handle, i), i)
def __setitem__(self, i, j):
raise IndexError("instruction modification not implemented")
def __iter__(self):
count = ctypes.c_ulonglong()
blocks = core.BNGetMediumLevelILBasicBlockList(self.handle, count)
view = None
if self._source_function is not None:
view = self._source_function.view
try:
for i in range(0, count.value):
yield MediumLevelILBasicBlock(view, core.BNNewBasicBlockReference(blocks[i]), self)
finally:
core.BNFreeBasicBlockList(blocks, count.value)
@property
def current_address(self):
"""Current IL Address (read/write)"""
return core.BNMediumLevelILGetCurrentAddress(self.handle)
@current_address.setter
def current_address(self, value):
core.BNMediumLevelILSetCurrentAddress(self.handle, self._arch.handle, value)
def set_current_address(self, value, arch = None):
if arch is None:
arch = self._arch
core.BNMediumLevelILSetCurrentAddress(self.handle, arch.handle, value)
@property
def basic_blocks(self):
"""list of MediumLevelILBasicBlock objects (read-only)"""
count = ctypes.c_ulonglong()
blocks = core.BNGetMediumLevelILBasicBlockList(self.handle, count)
result = []
view = None
if self._source_function is not None:
view = self._source_function.view
for i in range(0, count.value):
result.append(MediumLevelILBasicBlock(view, core.BNNewBasicBlockReference(blocks[i]), self))
core.BNFreeBasicBlockList(blocks, count.value)
return result
@property
def instructions(self):
"""A generator of mlil instructions of the current function"""
for block in self.basic_blocks:
for i in block:
yield i
@property
def ssa_form(self):
"""Medium level IL in SSA form (read-only)"""
result = core.BNGetMediumLevelILSSAForm(self.handle)
if not result:
return None
return MediumLevelILFunction(self._arch, result, self._source_function)
@property
def non_ssa_form(self):
"""Medium level IL in non-SSA (default) form (read-only)"""
result = core.BNGetMediumLevelILNonSSAForm(self.handle)
if not result:
return None
return MediumLevelILFunction(self._arch, result, self._source_function)
@property
def low_level_il(self):
"""Low level IL for this function"""
result = core.BNGetLowLevelILForMediumLevelIL(self.handle)
if not result:
return None
return lowlevelil.LowLevelILFunction(self._arch, result, self._source_function)
@property
def llil(self):
"""Alias for low_level_il"""
return self.low_level_il
@property
def high_level_il(self):
"""High level IL for this medium level IL."""
result = core.BNGetHighLevelILForMediumLevelIL(self.handle)
if not result:
return None
return binaryninja.highlevelil.HighLevelILFunction(self._arch, result, self._source_function)
@property
def hlil(self):
return self.high_level_il
def get_instruction_start(self, addr, arch = None):
if arch is None:
arch = self._arch
result = core.BNMediumLevelILGetInstructionStart(self.handle, arch.handle, addr)
if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
return None
return result
def expr(self, operation, a = 0, b = 0, c = 0, d = 0, e = 0, size = 0):
if isinstance(operation, str):
operation = MediumLevelILOperation[operation]
elif isinstance(operation, MediumLevelILOperation):
operation = operation.value
return MediumLevelILExpr(core.BNMediumLevelILAddExpr(self.handle, operation, size, a, b, c, d, e))
def append(self, expr):
"""
``append`` adds the MediumLevelILExpr ``expr`` to the current MediumLevelILFunction.
:param MediumLevelILExpr expr: the MediumLevelILExpr to add to the current MediumLevelILFunction
:return: number of MediumLevelILExpr in the current function
:rtype: int
"""
return core.BNMediumLevelILAddInstruction(self.handle, expr.index)
def goto(self, label):
"""
``goto`` returns a goto expression which jumps to the provided MediumLevelILLabel.
:param MediumLevelILLabel label: Label to jump to
:return: the MediumLevelILExpr that jumps to the provided label
:rtype: MediumLevelILExpr
"""
return MediumLevelILExpr(core.BNMediumLevelILGoto(self.handle, label.handle))
def if_expr(self, operand, t, f):
"""
``if_expr`` returns the ``if`` expression which depending on condition ``operand`` jumps to the MediumLevelILLabel
``t`` when the condition expression ``operand`` is non-zero and ``f`` when it's zero.
:param MediumLevelILExpr operand: comparison expression to evaluate.
:param MediumLevelILLabel t: Label for the true branch
:param MediumLevelILLabel f: Label for the false branch
:return: the MediumLevelILExpr for the if expression
:rtype: MediumLevelILExpr
"""
return MediumLevelILExpr(core.BNMediumLevelILIf(self.handle, operand.index, t.handle, f.handle))
def mark_label(self, label):
"""
``mark_label`` assigns a MediumLevelILLabel to the current IL address.
:param MediumLevelILLabel label:
:rtype: None
"""
core.BNMediumLevelILMarkLabel(self.handle, label.handle)
def add_label_list(self, labels):
"""
``add_label_list`` returns a label list expression for the given list of MediumLevelILLabel objects.
:param labels: the list of MediumLevelILLabel to get a label list expression from
:type labels: list(MediumLevelILLabel)
:return: the label list expression
:rtype: MediumLevelILExpr
"""
label_list = (ctypes.POINTER(core.BNMediumLevelILLabel) * len(labels))()
for i in range(len(labels)):
label_list[i] = labels[i].handle
return MediumLevelILExpr(core.BNMediumLevelILAddLabelList(self.handle, label_list, len(labels)))
def add_operand_list(self, operands):
"""
``add_operand_list`` returns an operand list expression for the given list of integer operands.
:param operands: list of operand numbers
:type operands: list(int)
:return: an operand list expression
:rtype: MediumLevelILExpr
"""
operand_list = (ctypes.c_ulonglong * len(operands))()
for i in range(len(operands)):
operand_list[i] = operands[i]
return MediumLevelILExpr(core.BNMediumLevelILAddOperandList(self.handle, operand_list, len(operands)))
def finalize(self):
"""
``finalize`` ends the function and computes the list of basic blocks.
:rtype: None
"""
core.BNFinalizeMediumLevelILFunction(self.handle)
def get_ssa_instruction_index(self, instr):
return core.BNGetMediumLevelILSSAInstructionIndex(self.handle, instr)
def get_non_ssa_instruction_index(self, instr):
return core.BNGetMediumLevelILNonSSAInstructionIndex(self.handle, instr)
def get_ssa_var_definition(self, ssa_var):
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
result = core.BNGetMediumLevelILSSAVarDefinition(self.handle, var_data, ssa_var.version)
if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
return None
return self[result]
def get_ssa_memory_definition(self, version):
result = core.BNGetMediumLevelILSSAMemoryDefinition(self.handle, version)
if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
return None
return self[result]
def get_ssa_var_uses(self, ssa_var):
count = ctypes.c_ulonglong()
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
instrs = core.BNGetMediumLevelILSSAVarUses(self.handle, var_data, ssa_var.version, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def get_ssa_memory_uses(self, version):
count = ctypes.c_ulonglong()
instrs = core.BNGetMediumLevelILSSAMemoryUses(self.handle, version, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def is_ssa_var_live(self, ssa_var):
"""
``is_ssa_var_live`` determines if ``ssa_var`` is live at any point in the function
:param SSAVariable ssa_var: the SSA variable to query
:return: whether the variable is live at any point in the function
:rtype: bool
"""
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
return core.BNIsMediumLevelILSSAVarLive(self.handle, var_data, ssa_var.version)
def get_var_definitions(self, var):
count = ctypes.c_ulonglong()
var_data = core.BNVariable()
var_data.type = var.source_type
var_data.index = var.index
var_data.storage = var.storage
instrs = core.BNGetMediumLevelILVariableDefinitions(self.handle, var_data, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def get_var_uses(self, var):
count = ctypes.c_ulonglong()
var_data = core.BNVariable()
var_data.type = var.source_type
var_data.index = var.index
var_data.storage = var.storage
instrs = core.BNGetMediumLevelILVariableUses(self.handle, var_data, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def get_ssa_var_value(self, ssa_var):
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
value = core.BNGetMediumLevelILSSAVarValue(self.handle, var_data, ssa_var.version)
result = function.RegisterValue(self._arch, value)
return result
def get_low_level_il_instruction_index(self, instr):
low_il = self.low_level_il
if low_il is None:
return None
low_il = low_il.ssa_form
if low_il is None:
return None
result = core.BNGetLowLevelILInstructionIndex(self.handle, instr)
if result >= core.BNGetLowLevelILInstructionCount(low_il.handle):
return None
return result
def get_low_level_il_expr_index(self, expr):
low_il = self.low_level_il
if low_il is None:
return None
low_il = low_il.ssa_form
if low_il is None:
return None
result = core.BNGetLowLevelILExprIndex(self.handle, expr)
if result >= core.BNGetLowLevelILExprCount(low_il.handle):
return None
return result
def get_low_level_il_expr_indexes(self, expr):
count = ctypes.c_ulonglong()
exprs = core.BNGetLowLevelILExprIndexes(self.handle, expr, count)
result = []
for i in range(0, count.value):
result.append(exprs[i])
core.BNFreeILInstructionList(exprs)
return result
def get_high_level_il_instruction_index(self, instr):
high_il = self.high_level_il
if high_il is None:
return None
result = core.BNGetHighLevelILInstructionIndex(self.handle, instr)
if result >= core.BNGetHighLevelILInstructionCount(high_il.handle):
return None
return result
def get_high_level_il_expr_index(self, expr):
high_il = self.high_level_il
if high_il is None:
return None
result = core.BNGetHighLevelILExprIndex(self.handle, expr)
if result >= core.BNGetHighLevelILExprCount(high_il.handle):
return None
return result
def get_high_level_il_expr_indexes(self, expr):
count = ctypes.c_ulonglong()
exprs = core.BNGetHighLevelILExprIndexes(self.handle, expr, count)
result = []
for i in range(0, count.value):
result.append(exprs[i])
core.BNFreeILInstructionList(exprs)
return result
def create_graph(self, settings = None):
if settings is not None:
settings_obj = settings.handle
else:
settings_obj = None
return binaryninja.flowgraph.CoreFlowGraph(core.BNCreateMediumLevelILFunctionGraph(self.handle, settings_obj))
@property
def arch(self):
""" """
return self._arch
@arch.setter
def arch(self, value):
self._arch = value
@property
def source_function(self):
""" """
return self._source_function
@source_function.setter
def source_function(self, value):
self._source_function = value
class MediumLevelILBasicBlock(basicblock.BasicBlock):
def __init__(self, view, handle, owner):
super(MediumLevelILBasicBlock, self).__init__(handle, view)
self.il_function = owner
def __repr__(self):
arch = self.arch
if arch:
return "<mlil block: %s@%d-%d>" % (arch.name, self.start, self.end)
else:
return "<mlil block: %d-%d>" % (self.start, self.end)
def __iter__(self):
for idx in range(self.start, self.end):
yield self.il_function[idx]
def __getitem__(self, idx):
size = self.end - self.start
if idx > size or idx < -size:
raise IndexError("list index is out of range")
if idx >= 0:
return self.il_function[idx + self.start]
else:
return self.il_function[self.end + idx]
def __hash__(self):
return hash((self.start, self.end, self.il_function))
def __contains__(self, instruction):
if type(instruction) != MediumLevelILInstruction or instruction.il_basic_block != self:
return False
if instruction.instr_index >= self.start and instruction.instr_index <= self.end:
return True
else:
return False
def _create_instance(self, handle, view):
"""Internal method by super to instantiate child instances"""
return MediumLevelILBasicBlock(view, handle, self.il_function)
@property
def il_function(self):
""" """
return self._il_function
@il_function.setter
def il_function(self, value):
self._il_function = value
|
#!/usr/bin/python3 -u
# A script for install alive script
import subprocess
import argparse
import re
import os
import shutil
import pwd
from subprocess import call
parser = argparse.ArgumentParser(description='A script for install alive script and cron')
parser.add_argument('--url', help='The url where notify that this server is alive', required=True)
parser.add_argument('--user', help='The user for pastafari', required=True)
parser.add_argument('--pub_key', help='The pub key used in pastafari user', required=True)
args = parser.parse_args()
url=args.url
check_url = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if check_url.match(args.url):
# Create users
if call("sudo useradd -m -s /bin/sh %s" % args.user, shell=True) > 0:
print('Error, cannot add a new user')
exit(1)
else:
print('Added user')
if call("sudo mkdir -p /home/"+args.user+"/.ssh && sudo chown "+args.user+":"+args.user+" /home/"+args.user+"/.ssh && sudo chmod 700 /home/"+args.user+"/.ssh", shell=True) > 0:
print('Error, cannot add ssh directory')
exit(1)
else:
print('Added ssh directory')
if call("sudo cp "+args.pub_key+" /home/"+args.user+"/.ssh/authorized_keys && sudo chown "+args.user+":"+args.user+" /home/"+args.user+"/.ssh/authorized_keys && sudo chmod 600 /home/"+args.user+"/.ssh/authorized_keys", shell=True) > 0:
print('Error, cannot pub key to user')
exit(1)
else:
print('Added pub key to user')
# Edit alive cron
with open('modules/pastafari/scripts/monit/debian_wheezy/files/crontab/alive') as f:
alive_cron=f.read()
with open('modules/pastafari/scripts/monit/debian_wheezy/files/crontab/alive', 'w') as f:
alive_cron=alive_cron.replace('/home/spanel/modules/pastafari/scripts/monit/debian_wheezy/files/get_info.py', '/usr/local/bin/get_info.py')
f.write(alive_cron)
# Edit get_info.py
with open('modules/pastafari/scripts/monit/debian_wheezy/files/get_info.py') as f:
get_info=f.read()
with open('/usr/local/bin/get_info.py', 'w') as f:
get_info=get_info.replace("http://url/to/server/token/ip", args.url)
f.write(get_info)
os.chmod('/usr/local/bin/get_info.py', 0o700)
user_passwd=pwd.getpwnam(args.user)
os.chown('/usr/local/bin/get_info.py', user_passwd[2], user_passwd[3])
#shutil.chown('/usr/local/bin/get_info.py', args.user, args.user)
# Edit get_updates.py
with open('modules/pastafari/scripts/monit/debian_wheezy/files/get_updates.py') as f:
get_updates=f.read()
with open('/etc/cron.daily/get_updates.py', 'w') as f:
url_updates=args.url.replace('/getinfo/', '/getupdates/')
get_updates=get_updates.replace("http://url/to/server/token/ip", url_updates)
f.write(get_updates)
os.chmod('/etc/cron.daily/get_updates.py', 0o700)
# Edit sudo file
with open('modules/pastafari/scripts/monit/debian_wheezy/files/sudoers.d/spanel') as f:
sudoers=f.read()
with open('/etc/sudoers.d/spanel', 'w') as f:
sudoers=sudoers.replace("spanel", args.user)
f.write(sudoers)
# Copy cron alive to /etc/cron.d/
if call("sudo cp modules/pastafari/scripts/monit/debian_wheezy/files/crontab/alive /etc/cron.d/alive", shell=True) > 0:
print('Error, cannot install crontab alive file in cron.d')
exit(1)
else:
print('Added contrab alive file in cron.d')
print('Script installed successfully')
# Copy script for upgrades in /usr/local/bin
if call("mkdir /home/"+args.user+"/bin/ && cp modules/pastafari/scripts/standard/debian_wheezy/upgrade.sh /home/"+args.user+"/bin/ && chown -R "+args.user+":"+args.user+" /home/"+args.user+"/bin/", shell=True) > 0:
print('Error, cannot install upgrade.py in /home/'+args.user+'/bin/')
exit(1)
else:
print('Added /home/'+args.user+'/bin/upgrade.py')
print('Script installed successfully')
# Making first call to site
if subprocess.call('/usr/local/bin/get_info.py', shell=True) > 0:
print('Error')
exit(1)
else:
print('Your server should be up in your panel...')
exit(0)
else:
print('Error installing the module, not valid url')
exit(1)
|
# -*- coding: utf-8 -*-
import os,math
from qgis.core import NULL
from mole import oeq_global
from mole.project import config
from mole.extensions import OeQExtension
from mole.stat_corr import rb_contemporary_window_uvalue_by_building_age_lookup
from mole.stat_corr import nrb_contemporary_window_uvalue_by_building_age_lookup
def calculation(self=None, parameters={},feature = None):
from math import floor, ceil
from PyQt4.QtCore import QVariant
return {'WN_UE': {'type': QVariant.Double, 'value': 1.3}}
extension = OeQExtension(
extension_id=__name__,
category='Evaluation',
subcategory='U-Values EnEV',
extension_name='Window Quality (U_Value, EnEV)',
layer_name= 'U Window EnEV',
extension_filepath=os.path.join(__file__),
colortable = os.path.join(os.path.splitext(__file__)[0] + '.qml'),
field_id='WN_UE',
source_type='none',
par_in=[],
sourcelayer_name=config.data_layer_name,
targetlayer_name=config.data_layer_name,
active=True,
show_results=['WN_UE'],
description=u"Calculate the EnEV U-Value of the Building's windows",
evaluation_method=calculation)
extension.registerExtension(default=True)
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class sslglobal_sslpolicy_binding(base_resource) :
""" Binding class showing the sslpolicy that can be bound to sslglobal.
"""
def __init__(self) :
self._policyname = ""
self._type = ""
self._priority = 0
self._gotopriorityexpression = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self.___count = 0
@property
def priority(self) :
"""The priority of the policy binding.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""The priority of the policy binding.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
"""The name for the SSL policy.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""The name for the SSL policy.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the virtual server or user-defined policy label to invoke if the policy evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the virtual server or user-defined policy label to invoke if the policy evaluates to TRUE.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a number that is larger than the largest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a number that is larger than the largest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def invoke(self) :
"""Invoke policies bound to a virtual server, service, or policy label. After the invoked policies are evaluated, the flow returns to the policy with the next priority.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
"""Invoke policies bound to a virtual server, service, or policy label. After the invoked policies are evaluated, the flow returns to the policy with the next priority.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def type(self) :
"""Global bind point to which the policy is bound.<br/>Possible values = CONTROL_OVERRIDE, CONTROL_DEFAULT, DATA_OVERRIDE, DATA_DEFAULT.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
"""Global bind point to which the policy is bound.<br/>Possible values = CONTROL_OVERRIDE, CONTROL_DEFAULT, DATA_OVERRIDE, DATA_DEFAULT
"""
try :
self._type = type
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of policy label to invoke. Specify virtual server for a policy label associated with a virtual server, or policy label for a user-defined policy label.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
"""Type of policy label to invoke. Specify virtual server for a policy label associated with a virtual server, or policy label for a user-defined policy label.
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(sslglobal_sslpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.sslglobal_sslpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = sslglobal_sslpolicy_binding()
updateresource.policyname = resource.policyname
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.type = resource.type
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [sslglobal_sslpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].type = resource[i].type
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = sslglobal_sslpolicy_binding()
deleteresource.policyname = resource.policyname
deleteresource.type = resource.type
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [sslglobal_sslpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].type = resource[i].type
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
""" Use this API to fetch a sslglobal_sslpolicy_binding resources.
"""
try :
obj = sslglobal_sslpolicy_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
""" Use this API to fetch filtered set of sslglobal_sslpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslglobal_sslpolicy_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
""" Use this API to count sslglobal_sslpolicy_binding resources configued on NetScaler.
"""
try :
obj = sslglobal_sslpolicy_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
""" Use this API to count the filtered set of sslglobal_sslpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslglobal_sslpolicy_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Type:
CONTROL_OVERRIDE = "CONTROL_OVERRIDE"
CONTROL_DEFAULT = "CONTROL_DEFAULT"
DATA_OVERRIDE = "DATA_OVERRIDE"
DATA_DEFAULT = "DATA_DEFAULT"
class Labeltype:
vserver = "vserver"
service = "service"
policylabel = "policylabel"
class sslglobal_sslpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.sslglobal_sslpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.sslglobal_sslpolicy_binding = [sslglobal_sslpolicy_binding() for _ in range(length)]
|
import requests
from talent_curator import app
GOOGLE_DRIVE_API_URI = 'https://www.googleapis.com/drive/v2/files/'
logger = app.logger
HEADERS = {
'Authorization': "Bearer {access_token}",
'Content-type': "application/json",
}
class GoogleDriveAPI(object):
def get_document(self, access_token, document_id):
headers = self.build_headers(access_token=access_token)
r = requests.get(GOOGLE_DRIVE_API_URI + document_id, headers=headers)
file_resource = None
if r.status_code == requests.codes.ok:
file_resource = r.json
logger.debug("File found: %s", file_resource)
else:
logger.error("Failed to find document: %s", r.reason)
logger.error("Full response %s", r.text)
return file_resource
def search(self, access_token, query):
headers = self.build_headers(access_token=access_token)
query_string = {'q': query}
r = requests.get(GOOGLE_DRIVE_API_URI, headers=headers, params=query_string)
if r.status_code != requests.codes.ok:
return None
logger.debug("Response %s" % r.text)
results_list = r.json['items']
return results_list
def children(self, access_token, folder_id):
headers = self.build_headers(access_token=access_token)
r = requests.get(GOOGLE_DRIVE_API_URI + folder_id + '/children', headers=headers, params={'maxResults': 5})
logger.debug("Response %s" % r.json['items'])
if r.status_code != requests.codes.ok:
return None
return r.json['items']
def build_headers(self, *args, **kwargs):
headers = {}
for key, val in HEADERS.iteritems():
headers[key] = val.format(**kwargs)
return headers
|
# This file is part of TRS (http://math.kompiler.org)
#
# TRS is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# TRS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with TRS. If not, see <http://www.gnu.org/licenses/>.
from itertools import combinations, product, ifilterfalse
from .utils import least_common_multiple, partition, is_numeric_node, \
evals_to_numeric
from ..node import ExpressionNode as N, ExpressionLeaf as L, Scope, OP_DIV, \
OP_ADD, OP_MUL, negate
from ..possibilities import Possibility as P, MESSAGES
from ..translate import _
from .negation import negate_polynome
def match_constant_division(node):
"""
a / 0 -> Division by zero
a / 1 -> a
0 / a -> 0
a / a -> 1
"""
assert node.is_op(OP_DIV)
p = []
nominator, denominator = node
# a / 0
if denominator == 0:
raise ZeroDivisionError('Division by zero: %s.' % node)
# a / 1
if denominator == 1:
p.append(P(node, division_by_one, (nominator,)))
# 0 / a
if nominator == 0:
p.append(P(node, division_of_zero, (denominator,)))
# a / a
if nominator == denominator:
p.append(P(node, division_by_self, (nominator,)))
return p
def division_by_one(root, args):
"""
a / 1 -> a
"""
return args[0].negate(root.negated)
MESSAGES[division_by_one] = _('Division by `1` yields the nominator.')
def division_of_zero(root, args):
"""
0 / a -> 0
"""
return L(0, negated=root.negated)
MESSAGES[division_of_zero] = _('Division of `0` by {1} reduces to `0`.')
def division_by_self(root, args):
"""
a / a -> 1
"""
return L(1, negated=root.negated)
MESSAGES[division_by_self] = _('Division of {1} by itself reduces to `1`.')
def match_add_fractions(node):
"""
a / b + c / b and a, c in Z -> (a + c) / b
a / b + c / d and a, b, c, d in Z -> a' / e + c' / e # e = lcm(b, d)
# | e = b * d
a / b + c and a, b, c in Z -> a / b + (bc) / b # =>* (a + bc) / b
"""
assert node.is_op(OP_ADD)
p = []
scope = Scope(node)
fractions, others = partition(lambda n: n.is_op(OP_DIV), scope)
numerics = filter(is_numeric_node, others)
for ab, cd in combinations(fractions, 2):
a, b = ab
c, d = cd
if b == d:
# Equal denominators, add nominators to create a single fraction
p.append(P(node, add_nominators, (scope, ab, cd)))
elif all(map(is_numeric_node, (a, b, c, d))):
# Denominators are both numeric, rewrite both fractions to the
# least common multiple of their denominators. Later, the
# nominators will be added
lcm = least_common_multiple(b.value, d.value)
p.append(P(node, equalize_denominators, (scope, ab, cd, lcm)))
# Also, add the (non-recommended) possibility to multiply the
# denominators. Do this only if the multiplication is not equal to
# the least common multiple, to avoid duplicate possibilities
mult = b.value * d.value
if mult != lcm:
p.append(P(node, equalize_denominators, (scope, ab, cd, mult)))
for ab, c in product(fractions, numerics):
a, b = ab
if a.is_numeric() and b.is_numeric():
# Fraction of constants added to a constant -> create a single
# constant fraction
p.append(P(node, constant_to_fraction, (scope, ab, c)))
return p
def add_nominators(root, args):
"""
a / b + c / b and a, c in Z -> (a + c) / b
"""
scope, ab, cb = args
a, b = ab
c = cb[0]
# Replace the left node with the new expression, transfer fraction
# negations to nominators
scope.replace(ab, (a.negate(ab.negated) + c.negate(cb.negated)) / b)
scope.remove(cb)
return scope.as_nary_node()
MESSAGES[add_nominators] = \
_('Add the nominators of {2} and {3} to create a single fraction.')
def equalize_denominators(root, args):
"""
a / b + c / d and a, b, c, d in Z -> a' / e + c' / e
"""
scope, denom = args[::3]
for fraction in args[1:3]:
n, d = fraction
mult = denom / d.value
if mult != 1:
if n.is_numeric():
nom = L(n.value * mult)
else:
nom = L(mult) * n
scope.replace(fraction, negate(nom / L(d.value * mult),
fraction.negated))
return scope.as_nary_node()
MESSAGES[equalize_denominators] = \
_('Equalize the denominators of divisions {2} and {3} to {4}.')
def constant_to_fraction(root, args):
"""
a / b + c and a, b, c in Z -> a / b + (bc) / b # =>* (a + bc) / b
"""
scope, ab, c = args
b = ab[1]
scope.replace(c, b * c / b)
return scope.as_nary_node()
MESSAGES[constant_to_fraction] = \
_('Rewrite constant {3} to a fraction to be able to add it to {2}.')
def match_multiply_fractions(node):
"""
a / b * c / d -> (ac) / (bd)
a / b * c and (eval(c) in Z or eval(a / b) not in Z) -> (ac) / b
"""
assert node.is_op(OP_MUL)
p = []
scope = Scope(node)
fractions, others = partition(lambda n: n.is_op(OP_DIV), scope)
for ab, cd in combinations(fractions, 2):
p.append(P(node, multiply_fractions, (scope, ab, cd)))
for ab, c in product(fractions, others):
if evals_to_numeric(c) or not evals_to_numeric(ab):
p.append(P(node, multiply_with_fraction, (scope, ab, c)))
return p
def multiply_fractions(root, args):
"""
a / b * (c / d) -> ac / (bd)
"""
scope, ab, cd = args
a, b = ab
c, d = cd
scope.replace(ab, (a * c / (b * d)).negate(ab.negated + cd.negated))
scope.remove(cd)
return scope.as_nary_node()
MESSAGES[multiply_fractions] = _('Multiply fractions {2} and {3}.')
def multiply_with_fraction(root, args):
"""
a / b * c and (eval(c) in Z or eval(a / b) not in Z) -> (ac) / b
"""
scope, ab, c = args
a, b = ab
if scope.index(ab) < scope.index(c):
nominator = a * c
else:
nominator = c * a
scope.replace(ab, negate(nominator / b, ab.negated))
scope.remove(c)
return scope.as_nary_node()
MESSAGES[multiply_with_fraction] = \
_('Multiply {3} with the nominator of fraction {2}.')
def match_divide_fractions(node):
"""
Reduce divisions of fractions to a single fraction.
Examples:
a / b / c -> a / (bc)
a / (b / c) -> ac / b
Note that:
a / b / (c / d) => ad / bd
"""
assert node.is_op(OP_DIV)
nom, denom = node
p = []
if nom.is_op(OP_DIV):
p.append(P(node, divide_fraction, tuple(nom) + (denom,)))
if denom.is_op(OP_DIV):
p.append(P(node, divide_by_fraction, (nom,) + tuple(denom)))
return p
def divide_fraction(root, args):
"""
a / b / c -> a / (bc)
"""
(a, b), c = root
return negate(a / (b * c), root.negated)
MESSAGES[divide_fraction] = \
_('Move {3} to denominator of fraction `{1} / {2}`.')
def divide_by_fraction(root, args):
"""
a / (b / c) -> ac / b
"""
a, bc = root
b, c = bc
return negate(a * c / b, root.negated + bc.negated)
MESSAGES[divide_by_fraction] = \
_('Move {3} to the nominator of fraction `{1} / {2}`.')
def is_power_combination(a, b):
"""
Check if two nodes are powers that can be combined in a fraction, for
example:
a and a^2
a^2 and a^2
a^2 and a
"""
if a.is_power():
a = a[0]
if b.is_power():
b = b[0]
return a == b
def mult_scope(node):
"""
Get the multiplication scope of a node that may or may no be a
multiplication itself.
"""
if node.is_op(OP_MUL):
return Scope(node)
return Scope(N(OP_MUL, node))
def remove_from_mult_scope(scope, node):
if len(scope) == 1:
scope.replace(node, L(1))
else:
scope.remove(node)
return scope.as_nary_node()
def match_extract_fraction_terms(node):
"""
Divide nominator and denominator by the same part. If the same root of a
power appears in both nominator and denominator, also extract it so that it
can be reduced to a single power by power division rules.
Examples:
ab / (ac) -> a / a * (c / e) # =>* c / e
a ^ b * c / (a ^ d * e) -> a ^ b / a ^ d * (c / e) # -> a^(b - d)(c / e)
ac / b and eval(c) not in Z and eval(a / b) in Z -> a / b * c
"""
assert node.is_op(OP_DIV)
n_scope, d_scope = map(mult_scope, node)
p = []
nominator, denominator = node
# ac / b
for n in ifilterfalse(evals_to_numeric, n_scope):
a_scope = mult_scope(nominator)
#a = remove_from_mult_scope(a_scope, n)
if len(a_scope) == 1:
a = L(1)
else:
a = a_scope.all_except(n)
if evals_to_numeric(a / denominator):
p.append(P(node, extract_nominator_term, (a, n)))
if len(n_scope) == 1 and len(d_scope) == 1:
return p
# a ^ b * c / (a ^ d * e)
for n, d in product(n_scope, d_scope):
if n == d:
handler = divide_fraction_by_term
elif is_power_combination(n, d):
handler = extract_fraction_terms
else:
continue # pragma: nocover
p.append(P(node, handler, (n_scope, d_scope, n, d)))
return p
def extract_nominator_term(root, args):
"""
ac / b and eval(c) not in Z and eval(a / b) in Z -> a / b * c
"""
a, c = args
return negate(a / root[1] * c, root.negated)
MESSAGES[extract_nominator_term] = \
_('Extract {2} from the nominator of fraction {0}.')
def extract_fraction_terms(root, args):
"""
a ^ b * c / (a ^ d * e) -> a ^ b / a ^ d * (c / e)
"""
n_scope, d_scope, n, d = args
div = n / d * (remove_from_mult_scope(n_scope, n) \
/ remove_from_mult_scope(d_scope, d))
return negate(div, root.negated)
MESSAGES[extract_fraction_terms] = _('Extract `{3} / {4}` from fraction {0}.')
def divide_fraction_by_term(root, args):
"""
ab / a -> b
a / (ba) -> 1 / b
a * c / (ae) -> c / e
"""
n_scope, d_scope, n, d = args
nom = remove_from_mult_scope(n_scope, n)
d_scope.remove(d)
if not len(d_scope):
return negate(nom, root.negated)
return negate(nom / d_scope.as_nary_node(), root.negated)
MESSAGES[divide_fraction_by_term] = \
_('Divide nominator and denominator of {0} by {2}.')
def match_division_in_denominator(node):
"""
a / (b / c + d) -> (ca) / (c(b / c + d))
"""
assert node.is_op(OP_DIV)
denom = node[1]
if not denom.is_op(OP_ADD):
return []
return [P(node, multiply_with_term, (n[1],))
for n in Scope(denom) if n.is_op(OP_DIV)]
def multiply_with_term(root, args):
"""
a / (b / c + d) -> (ca) / (c(b / c + d))
"""
c = args[0]
nom, denom = root
return negate(c * nom / (c * denom), root.negated)
MESSAGES[multiply_with_term] = \
_('Multiply nominator and denominator of {0} with {1}.')
def match_combine_fractions(node):
"""
a/b + c/d -> ad/(bd) + bc/(bd) # -> (ad + bc)/(bd)
"""
assert node.is_op(OP_ADD)
scope = Scope(node)
fractions = [n for n in scope if n.is_op(OP_DIV)]
p = []
for left, right in combinations(fractions, 2):
p.append(P(node, combine_fractions, (scope, left, right)))
return p
def combine_fractions(root, args):
"""
a/b + c/d -> ad/(bd) + bc/(bd)
"""
scope, ab, cd = args
(a, b), (c, d) = ab, cd
a = negate(a, ab.negated)
d = negate(d, cd.negated)
scope.replace(ab, a * d / (b * d) + b * c / (b * d))
scope.remove(cd)
return scope.as_nary_node()
MESSAGES[combine_fractions] = _('Combine fraction {2} and {3}.')
def match_remove_division_negation(node):
"""
-a / (-b + c) -> a / (--b - c)
"""
assert node.is_op(OP_DIV)
nom, denom = node
if node.negated:
if nom.is_op(OP_ADD) and any([n.negated for n in Scope(nom)]):
return [P(node, remove_division_negation, (True, nom))]
if denom.is_op(OP_ADD) and any([n.negated for n in Scope(denom)]):
return [P(node, remove_division_negation, (False, denom))]
return []
def remove_division_negation(root, args):
"""
-a / (-b + c) -> a / (--b - c)
"""
nom, denom = root
if args[0]:
nom = negate_polynome(nom, ())
else:
denom = negate_polynome(denom, ())
return negate(nom / denom, root.negated - 1)
MESSAGES[remove_division_negation] = \
_('Move negation from fraction {0} to polynome {2}.')
def match_fraction_in_division(node):
"""
(1 / a * b) / c -> b / (ac)
c / (1 / a * b) -> (ac) / b
"""
assert node.is_op(OP_DIV)
nom, denom = node
p = []
if nom.is_op(OP_MUL):
scope = Scope(nom)
for n in scope:
if n.is_op(OP_DIV) and n[0] == 1:
p.append(P(node, fraction_in_division, (True, scope, n)))
if denom.is_op(OP_MUL):
scope = Scope(denom)
for n in scope:
if n.is_op(OP_DIV) and n[0] == 1:
p.append(P(node, fraction_in_division, (False, scope, n)))
return p
def fraction_in_division(root, args):
"""
(1 / a * b) / c -> b / (ac)
c / (1 / a * b) -> (ac) / b
"""
is_nominator, scope, fraction = args
nom, denom = root
if fraction.negated or fraction[0].negated:
scope.replace(fraction, fraction[0].negate(fraction.negated))
else:
scope.remove(fraction)
if is_nominator:
nom = scope.as_nary_node()
denom = fraction[1] * denom
else:
nom = fraction[1] * nom
denom = scope.as_nary_node()
return negate(nom / denom, root.negated)
MESSAGES[fraction_in_division] = \
_('Multiply both sides of fraction {0} with {3[1]}.')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.