code
stringlengths 658
1.05M
|
---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The pyspark program.
This module will be run by spark-submit for PySparkTask jobs.
The first argument is a path to the pickled instance of the PySparkTask,
other arguments are the ones returned by PySparkTask.app_options()
"""
from __future__ import print_function
import abc
try:
import cPickle as pickle
except ImportError:
import pickle
import logging
import sys
import os
from luigi import configuration
from luigi import six
# this prevents the modules in the directory of this script from shadowing global packages
sys.path.append(sys.path.pop(0))
@six.add_metaclass(abc.ABCMeta)
class _SparkEntryPoint(object):
def __init__(self, conf):
self.conf = conf
@abc.abstractmethod
def __enter__(self):
pass
@abc.abstractmethod
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class SparkContextEntryPoint(_SparkEntryPoint):
sc = None
def __enter__(self):
from pyspark import SparkContext
self.sc = SparkContext(conf=self.conf)
return self.sc, self.sc
def __exit__(self, exc_type, exc_val, exc_tb):
self.sc.stop()
class SparkSessionEntryPoint(_SparkEntryPoint):
spark = None
def _check_major_spark_version(self):
from pyspark import __version__ as spark_version
major_version = int(spark_version.split('.')[0])
if major_version < 2:
raise RuntimeError(
'''
Apache Spark {} does not support SparkSession entrypoint.
Try to set 'pyspark_runner.use_spark_session' to 'False' and switch to old-style syntax
'''.format(spark_version)
)
def __enter__(self):
self._check_major_spark_version()
from pyspark.sql import SparkSession
self.spark = SparkSession \
.builder \
.config(conf=self.conf) \
.enableHiveSupport() \
.getOrCreate()
return self.spark, self.spark.sparkContext
def __exit__(self, exc_type, exc_val, exc_tb):
self.spark.stop()
class AbstractPySparkRunner(object):
_entry_point_class = None
def __init__(self, job, *args):
# Append job directory to PYTHON_PATH to enable dynamic import
# of the module in which the class resides on unpickling
sys.path.append(os.path.dirname(job))
with open(job, "rb") as fd:
self.job = pickle.load(fd)
self.args = args
def run(self):
from pyspark import SparkConf
conf = SparkConf()
self.job.setup(conf)
with self._entry_point_class(conf=conf) as (entry_point, sc):
self.job.setup_remote(sc)
self.job.main(entry_point, *self.args)
def _pyspark_runner_with(name, entry_point_class):
return type(name, (AbstractPySparkRunner,), {'_entry_point_class': entry_point_class})
PySparkRunner = _pyspark_runner_with('PySparkRunner', SparkContextEntryPoint)
PySparkSessionRunner = _pyspark_runner_with('PySparkSessionRunner', SparkSessionEntryPoint)
def _use_spark_session():
return bool(configuration.get_config().get('pyspark_runner', "use_spark_session", False))
def _get_runner_class():
if _use_spark_session():
return PySparkSessionRunner
return PySparkRunner
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
_get_runner_class()(*sys.argv[1:]).run()
|
from django.utils.translation import gettext_lazy
from cradmin_legacy.viewhelpers.listbuilder.itemvalue import TitleDescription
from cradmin_legacy.viewhelpers.listbuilder.lists import RowList
from cradmin_legacy.viewhelpers.listbuilder.itemframe import DefaultSpacingItemFrame
class WithResultValueRenderable(TitleDescription):
"""
For rendering results number in list filter views.
"""
template_name = 'devilry_cradmin/devilry_listfilter/utils/devilry_with_result_value_renderable.django.html'
def get_object_name_singular(self, num_matches):
"""
String representation of the objects listed in singular form.
"""
return 'object'
def get_object_name_plural(self, num_matches):
"""
String representation of the objects listed in plural form.
"""
return 'objects'
def get_title(self):
num_matches = self.kwargs['num_matches']
if num_matches == 1:
object_name = self.get_object_name_singular(num_matches=num_matches)
else:
object_name = self.get_object_name_plural(num_matches=num_matches)
return gettext_lazy('Found %(result_num)s of %(total_num)s %(object_name)s') % {
'result_num': self.kwargs['num_matches'],
'total_num': self.kwargs['num_total'],
'object_name': object_name
}
def get_base_css_classes_list(self):
"""
Adds the ``cradmin-legacy-listbuilder-itemvalue-titledescription`` css class
in addition to the classes added by the superclasses.
"""
return []
class RowListWithMatchResults(RowList):
"""
Extends the default RowList with rendering of filter hit count and
total object count.
"""
match_result_value_renderable = WithResultValueRenderable
match_result_frame_renderable = DefaultSpacingItemFrame
def append_results_renderable(self):
result_info_renderable = self.match_result_value_renderable(
value=None,
num_matches=self.num_matches,
num_total=self.num_total
)
self.renderable_list.insert(0, self.match_result_frame_renderable(inneritem=result_info_renderable))
def __init__(self, num_matches, num_total, page):
self.num_matches = num_matches
self.num_total = num_total
self.page = page
super(RowListWithMatchResults, self).__init__()
if page == 1:
self.append_results_renderable()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-12-06 23:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0003_remove_news_url_desc'),
]
operations = [
migrations.CreateModel(
name='Route',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('year', models.PositiveSmallIntegerField()),
('gpsies', models.CharField(blank=True, max_length=255, null=True)),
('ranking', models.FloatField(default=0)),
('level', models.PositiveIntegerField()),
('season', models.CharField(blank=True, max_length=255, null=True)),
('bike_type', models.CharField(blank=True, max_length=255, null=True)),
('duration', models.DurationField(default=0)),
('distance', models.PositiveSmallIntegerField()),
('photo_google', models.CharField(blank=True, max_length=255, null=True)),
('comments', models.ManyToManyField(blank=True, to='news.Comments')),
],
options={
'ordering': ['level'],
},
),
]
|
import sys
'''
Parses the FlyBase version of the myProteinNet.
Replaces PubMed IDs with "NA"
'''
def main():
infile = 'InteractionsDetectionMethods.tsv'
evidencefile = 'mi.owl'
## parse evidence file into a key of (MI,name) pairs.
evmap = {}
mi=None
with open(evidencefile) as fin:
for line in fin:
if 'id: MI:' in line:
mi = line.strip().split(' ')[1]
elif 'name: ' in line and mi:
name = '-'.join(line.strip().split(' ')[1:])
evmap[mi]=name
mi = None
print '%d MI terms mapped.' % (len(evmap))
## generate dictionary of (key,value) pairs, where key is a tuple
## that represents an edge and the value is a set of evidence names.
rows = {}
with open(infile) as fin:
for line in fin:
row = line.strip().split()
e = tuple([row[0],row[1]])
ev = evmap.get(row[2],row[2])
if e not in rows:
rows[e] = set()
rows[e].add(ev)
print '%d edges read' % (len(rows))
outfile = 'myProteinNet-flybase.txt'
out = open(outfile,'w')
out.write('#FlyBase1\tFlyBase2\tPubMedIDs\tEvidence\n')
for e in rows:
out.write('%s\t%s\tNA\t%s\n' % (e[0],e[1],';'.join(rows[e])))
out.close()
print 'wrote to %s' % (outfile)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Write a program to find the nth super ugly number.
Super ugly numbers are positive numbers whose all prime factors are in the given prime list primes of size k. For
example, [1, 2, 4, 7, 8, 13, 14, 16, 19, 26, 28, 32] is the sequence of the first 12 super ugly numbers given primes =
[2, 7, 13, 19] of size 4.
Note:
(1) 1 is a super ugly number for any given primes.
(2) The given numbers in primes are in ascending order.
(3) 0 < k ≤ 100, 0 < n ≤ 106, 0 < primes[i] < 1000.
"""
import heapq
from collections import deque
import sys
__author__ = 'Daniel'
class Solution(object):
def nthSuperUglyNumber(self, n, primes):
"""
DP O(kn)
:type n: int
:type primes: List[int]
:rtype: int
"""
k = len(primes)
ret = [sys.maxint for _ in xrange(n)]
ret[0] = 1
# for each prime, a pointer pointing to the value of next unused number in the result
idxes = [0 for _ in xrange(k)]
for i in xrange(1, n):
for j in xrange(k):
ret[i] = min(ret[i], primes[j]*ret[idxes[j]])
for j in xrange(k):
if ret[i] == primes[j]*ret[idxes[j]]:
idxes[j] += 1
return ret[n-1]
class QueueWrapper(object):
def __init__(self, idx, q):
self.idx = idx
self.q = q
def __cmp__(self, other):
return self.q[0] - other.q[0]
class SolutionHeap(object):
def nthSuperUglyNumber(self, n, primes):
"""
O(k lg k) + O(nk)
:type n: int
:type primes: List[int]
:rtype: int
"""
ret = 1
h = [QueueWrapper(i, deque([v])) for i, v in enumerate(primes)]
dic = {e.idx: e for e in h}
heapq.heapify(h)
for _ in xrange(n-1):
mini = heapq.heappop(h)
ret = mini.q.popleft()
for i in xrange(mini.idx, len(primes)):
dic[i].q.append(ret*primes[i])
heapq.heappush(h, mini)
return ret
if __name__ == "__main__":
assert Solution().nthSuperUglyNumber(12, [2, 7, 13, 19]) == 32
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from libml.data import DataSet, augment_svhn
from libml.data_pair import stack_augment
DATASETS = {}
DATASETS.update(
[DataSet.creator('svhn500', 0, label, valid, [augment_svhn, stack_augment(augment_svhn)], do_memoize=False)
for label, valid in itertools.product([27, 38, 77, 156, 355, 671, 867], [1, 5000])])
DATASETS.update(
[DataSet.creator('svhn300', 0, label, valid, [augment_svhn, stack_augment(augment_svhn)], do_memoize=False)
for label, valid in itertools.product([96, 185, 353, 710, 1415, 2631, 3523], [1, 5000])])
DATASETS.update(
[DataSet.creator('svhn200', 0, label, valid, [augment_svhn, stack_augment(augment_svhn)], do_memoize=False)
for label, valid in itertools.product([56, 81, 109, 138, 266, 525, 1059, 2171, 4029, 5371], [1, 5000])])
DATASETS.update(
[DataSet.creator('svhn200s150', 0, label, valid, [augment_svhn, stack_augment(augment_svhn)], do_memoize=False)
for label, valid in itertools.product([145, 286, 558, 1082, 2172, 4078, 5488], [1, 5000])])
|
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django import forms
from comments.models import Comment
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
COMMENT_MAX_LENGTH = getattr(settings,'COMMENT_MAX_LENGTH',3000)
class CommentForm(forms.ModelForm):
honeypot = forms.CharField(required=False, widget=forms.HiddenInput)
content = forms.CharField(label='Comment', max_length=COMMENT_MAX_LENGTH,
widget=forms.Textarea(attrs={
'placeholder': _("Write a comment"),
'rows': 3,
}))
class Meta:
model = Comment
fields = ('content', 'reply_to', 'object_pk', 'content_type')
def __init__(self, obj, *args, **kwargs):
if obj:
ct = ContentType.objects.get_for_model(obj)
kwargs.setdefault('initial', {})
kwargs['initial']['object_pk'] = obj.pk
kwargs['initial']['content_type'] = ct.pk
super(CommentForm, self).__init__(*args, **kwargs)
self.fields['reply_to'].widget = forms.HiddenInput()
self.fields['object_pk'].widget = forms.HiddenInput()
self.fields['content_type'].widget = forms.HiddenInput()
def clean(self):
reply_to = self.cleaned_data.get('reply_to')
content_type = self.cleaned_data.get('content_type')
object_pk = self.cleaned_data.get('object_pk')
if reply_to and content_type and object_pk:
if not reply_to.content_type == content_type and not reply_to.object_pk == object_pk:
raise forms.ValidationError(_(u'You car reply only comments for same object'))
return self.cleaned_data
def clean_honeypot(self):
"""Check that nothing's been entered into the honeypot."""
value = self.cleaned_data["honeypot"]
if value:
raise forms.ValidationError(self.fields["honeypot"].label)
return value
def save(self, user, commit=True):
obj = super(CommentForm, self).save(False)
obj.user = user
if commit:
obj.save()
return obj
def get_errors(self):
from django.utils.encoding import force_unicode
output = {}
for key, value in self.errors.items():
output[key] = '/n'.join([force_unicode(i) for i in value])
return output
|
import signal
import sys
from checker import Checker
def process_input(sentence):
sentence = checker.words(sentence)
length = len(sentence)
for index, word in enumerate(sentence):
if index == 0:
pre = ''
else:
pre = sentence[index - 1]
if index == length - 1:
post = ''
else:
post = sentence[index + 1]
proposals = checker.correct(pre, word, post)
print('{0}: {1}'.format(word, proposals))
def signal_handler(signal_handled, frame):
exiting()
def exiting(initial_jump=True):
bye_bye = 'Bye bye!'
if initial_jump:
bye_bye = '\n{0}'.format(bye_bye)
print(bye_bye)
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
print('Welcome to a simple spell checker.\n'
'You can finish it with \'Ctrl+c\'.\n'
'Please wait while the program is loading...')
checker = Checker()
while True:
user_input = input('Type a sentence to check: ')
if user_input == 'exit':
exiting(False)
else:
process_input(user_input)
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
import time
import csv
# Now we add ability to write out a CSV file
# import the csv library
# open new file for writing
csvfile = open("mls_players.csv", 'w', newline='', encoding='utf-8')
c = csv.writer(csvfile)
# write the header row in the CSV
c.writerow(['title', 'team', 'position', 'birthday', 'birthplace', 'twitter'])
html = urlopen("http://www.mlssoccer.com/players")
bsObj = BeautifulSoup(html, "html.parser")
player_list = []
# player links are on multiple pages -- get the next page URL
def get_next_page(html, bsObj):
next_page = bsObj.find( "a", {"title":"Go to next page"} )
if next_page and ('href' in next_page.attrs):
partial = str(next_page.attrs['href'])
new_url = "http://www.mlssoccer.com" + partial
html = urlopen(new_url)
bsObj = BeautifulSoup(html, "html.parser")
get_player_pages(html, bsObj)
else:
print("Done collecting URLs ...")
# run this on each page to get player detail page links
def get_player_pages(html, bsObj):
global player_list
tag_list = bsObj.findAll( "a", {"class":"row_link"} )
for tag in tag_list:
if 'href' in tag.attrs:
player_list.append(str(tag.attrs['href']))
time.sleep(1)
# disable for testing - use only first page URLs
# get_next_page(html, bsObj)
def get_player_details(player_list):
for player in player_list:
new_url = "http://www.mlssoccer.com" + player
html = urlopen(new_url)
bsObj = BeautifulSoup(html, "html.parser")
bsObj.span.decompose()
player_details = []
title = bsObj.find( "div", {"class":"title"} )
team = bsObj.find( "div", {"class":"club"} )
position = bsObj.find( "span", {"class":"position"} )
birthday = bsObj.find( "div", {"class":"age"} )
# <div class="age"><span class="category">Age:</span>
# 23 (10/21/1992)</div>
birthplace = bsObj.find( "div", {"class":"hometown"} )
# <div class="hometown"><span class="category">Birthplace:</span>
# Barranquilla, Colombia</div>
twitter = bsObj.find( "div", {"class":"twitter_handle"} )
player_details = [title, team, position, birthday, birthplace,
twitter]
# new empty list for CSV rows
row = []
for detail in player_details:
try:
# write a new item into the list, row
row.append( detail.get_text() )
except AttributeError:
# write a new item into the list, row
row.append( "None" )
# write a new row in the CSV, by writing the list
c.writerow( row )
# delay program for 1 second
time.sleep(1)
get_player_pages(html, bsObj)
get_player_details(player_list)
# close CSV file
csvfile.close()
|
import json
import decimal
import collections
from cxio.cx_constants import CxConstants
from cxio.element_maker import ElementMaker
class CxWriter(object):
""" This is to write CX data to a output stream (a file-like object).
"""
def __init__(self, out):
""" Creates a new CxWriter for writing to "out".
:param out: object
A file-like object to write to
"""
if out is None:
raise AssertionError('output stream must not be none')
self.__out = out
self.__pre_meta_data = []
self.__post_meta_data = []
self.__aspect_element_counts = {}
self.__started = False
self.__ended = False
self.__fragment_started = False
self.__first = True
self.__in_fragment = False
self.__pretty_formatting = True
def set_pretty_formatting(self, pretty_formatting=True):
""" This allows to turn "pretty" formatting on/off.
"Pretty" formatting is on by default.
:param pretty_formatting: boolean
"""
self.__pretty_formatting = pretty_formatting
def add_pre_meta_data(self, pre_meta_data):
""" To add pre meta data, to be written prior to the aspect elements.
:param pre_meta_data: list
A list of Elements representing pre-meta data
"""
if pre_meta_data is None:
raise AssertionError('pre meta data must not be none')
if self.__ended:
raise IOError('already ended')
if self.__started:
raise IOError('already started')
self.__add_meta_data(self.__pre_meta_data, pre_meta_data)
def add_post_meta_data(self, post_meta_data):
""" To add post meta data, to be written after the aspect elements.
:param post_meta_data: list
A list of Elements representing post-meta data
"""
if post_meta_data is None:
raise AssertionError('post meta data must not be none')
if self.__ended:
raise IOError('already ended')
self.__add_meta_data(self.__post_meta_data, post_meta_data)
def start(self):
""" To start writing CX formatted data.
"""
if self.__ended:
raise IOError('already ended')
if self.__started:
raise IOError('already started')
self.__started = True
self.__out.write('[')
self.__write_number_verification_element()
if len(self.__pre_meta_data) > 0:
self.__write_meta_data(self.__pre_meta_data)
def end(self, success=True, error_msg=''):
""" To end writing CX formatted data.
"""
if self.__ended:
raise IOError('already ended')
if not self.__started:
raise IOError('not started')
if self.__fragment_started:
raise IOError('fragment not ended')
if len(self.__post_meta_data) > 0:
self.__write_meta_data(self.__post_meta_data)
self.__write_status_element(success, error_msg)
self.__ended = True
self.__started = False
if self.__pretty_formatting:
self.__out.write('\n')
self.__out.write(']')
def start_aspect_fragment(self, aspect_name):
""" To start writing a aspect fragment (list of aspect elements of the same category).
:param aspect_name: String
The aspect name
"""
if aspect_name is None:
raise AssertionError('aspect name data must not be none')
if self.__ended:
raise IOError('already ended')
if not self.__started:
raise IOError('not started')
if self.__fragment_started:
raise IOError('fragment already started')
self.__fragment_started = True
if self.__first:
self.__first = False
else:
self.__out.write(',')
if self.__pretty_formatting:
self.__out.write('\n { "')
else:
self.__out.write('{"')
self.__out.write(aspect_name)
if self.__pretty_formatting:
self.__out.write('": [\n ')
else:
self.__out.write('":[')
def end_aspect_fragment(self):
""" To end writing a aspect fragment (list of aspect elements of the same category).
"""
if self.__ended:
raise IOError('already ended')
if not self.__fragment_started:
raise IOError('fragment not started')
self.__fragment_started = False
if self.__pretty_formatting:
self.__out.write(' ]\n }')
else:
self.__out.write(']}')
self.__in_fragment = False
def write_aspect_element(self, element):
""" To write one aspect element to a aspect fragment.
:param element: AspectElement
The aspect element to be written
"""
if self.__ended:
raise IOError('already ended')
if not self.__fragment_started:
raise IOError('fragment not started')
if self.__in_fragment is True:
if self.__pretty_formatting:
self.__out.write(',\n ')
else:
self.__out.write(',')
self.__out.write(self.__aspect_element_to_json(element))
self.__in_fragment = True
my_name = element.get_name()
if my_name not in self.__aspect_element_counts:
self.__aspect_element_counts[my_name] = 1
else:
self.__aspect_element_counts[my_name] += 1
def get_aspect_element_counts(self):
""" Returns a dictionary containing aspect element counts written out so far.
:return:dict
"""
return self.__aspect_element_counts
def write_aspect_fragment(self, aspect_element_list):
""" Convenience method to write a list of aspect elements ("aspect fragment").
:param aspect_element_list: list
The list of AspectElement (of the same category) to be written out.
"""
if len(aspect_element_list) > 0:
name = aspect_element_list[0].get_name()
self.start_aspect_fragment(name)
for aspect_element in aspect_element_list:
if not name == aspect_element.get_name():
raise ValueError('"' + str(name) + '" different from "' + str(aspect_element.get_name() + '"'))
self.write_aspect_element(aspect_element)
self.end_aspect_fragment()
def write_single_aspect_fragment(self, aspect_element):
""" Convenience method to write a single aspect element as "aspect fragment".
Not recommended, use write_aspect_fragment whenever more than
one aspect of the same category needs to be outputted.
:param aspect_element: AspectElement a single aspect element
"""
name = aspect_element.get_name()
self.start_aspect_fragment(name)
self.write_aspect_element(aspect_element)
self.end_aspect_fragment()
def __write_number_verification_element(self):
e = ElementMaker.create_number_verification_element()
if self.__pretty_formatting:
self.__out.write('\n { "')
else:
self.__out.write('{"')
self.__out.write(e.get_name())
self.__out.write('": ')
self.__out.write(self.__aspect_element_to_json(e))
self.__out.write(' },')
def __write_status_element(self, success=True, error_msg=''):
e = ElementMaker.create_status_element(success,error_msg)
if self.__pretty_formatting:
self.__out.write(',\n { "')
else:
self.__out.write(',{"')
self.__out.write(e.get_name())
self.__out.write('": ')
self.__out.write(self.__aspect_element_to_json(e))
self.__out.write(' }')
def __write_meta_data(self, meta_data):
self.start_aspect_fragment(CxConstants.META_DATA)
for e in meta_data:
self.write_aspect_element(e)
self.end_aspect_fragment()
@staticmethod
def __aspect_element_to_json(aspect_element):
return json.dumps(aspect_element.get_data(), cls=DecimalEncoder)
@staticmethod
def __add_meta_data(meta_data, add_me):
if isinstance(add_me, collections.Iterable):
meta_data.extend(add_me)
else:
meta_data.append(add_me)
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
return float(o)
return super(DecimalEncoder, self).default(o)
|
"""Remote helper class for communicating with juju machines."""
import abc
import logging
import os
import subprocess
import sys
import zlib
import winrm
import jujupy
import utility
__metaclass__ = type
def _remote_for_series(series):
"""Give an appropriate remote class based on machine series."""
if series is not None and series.startswith("win"):
return WinRmRemote
return SSHRemote
def remote_from_unit(client, unit, series=None, status=None):
"""Create remote instance given a juju client and a unit."""
if series is None:
if status is None:
status = client.get_status()
machine = status.get_unit(unit).get("machine")
if machine is not None:
series = status.status["machines"].get(machine, {}).get("series")
remotecls = _remote_for_series(series)
return remotecls(client, unit, None, series=series, status=status)
def remote_from_address(address, series=None):
"""Create remote instance given an address"""
remotecls = _remote_for_series(series)
return remotecls(None, None, address, series=series)
class _Remote:
"""_Remote represents a juju machine to access over the network."""
__metaclass__ = abc.ABCMeta
def __init__(self, client, unit, address, series=None, status=None):
if address is None and (client is None or unit is None):
raise ValueError("Remote needs either address or client and unit")
self.client = client
self.unit = unit
self.use_juju_ssh = unit is not None
self.address = address
self.series = series
self.status = status
def __repr__(self):
params = []
if self.client is not None:
params.append("env=" + repr(self.client.env.environment))
if self.unit is not None:
params.append("unit=" + repr(self.unit))
if self.address is not None:
params.append("addr=" + repr(self.address))
return "<{} {}>".format(self.__class__.__name__, " ".join(params))
@abc.abstractmethod
def cat(self, filename):
"""
Get the contents of filename from the remote machine.
Environment variables in the filename will be expanded in a according
to platform-specific rules.
"""
@abc.abstractmethod
def copy(self, destination_dir, source_globs):
"""Copy files from the remote machine."""
def is_windows(self):
"""Returns True if remote machine is running windows."""
return self.series and self.series.startswith("win")
def get_address(self):
"""Gives the address of the remote machine."""
self._ensure_address()
return self.address
def update_address(self, address):
"""Change address of remote machine."""
self.address = address
def _get_status(self):
if self.status is None:
self.status = self.client.get_status()
return self.status
def _ensure_address(self):
if self.address:
return
if self.client is None:
raise ValueError("No address or client supplied")
status = self._get_status()
unit = status.get_unit(self.unit)
if 'public-address' not in unit:
raise ValueError("No public address for unit: {!r} {!r}".format(
self.unit, unit))
self.address = unit['public-address']
def _default_is_command_error(err):
"""
Whether to treat error as issue with remote command rather than ssh.
This is a conservative default, remote commands may return a variety of
other return codes. However, as the fallback to local ssh binary will
repeat the command, those problems will be exposed later anyway.
"""
return err.returncode == 1
def _no_platform_ssh():
"""True if no openssh binary is available on this platform."""
return sys.platform == "win32"
class SSHRemote(_Remote):
"""SSHRemote represents a juju machine to access using ssh."""
_ssh_opts = [
"-o", "User ubuntu",
"-o", "UserKnownHostsFile /dev/null",
"-o", "StrictHostKeyChecking no",
"-o", "PasswordAuthentication no",
]
# Limit each operation over SSH to 2 minutes by default
timeout = 120
def run(self, command_args, is_command_error=_default_is_command_error):
"""
Run a command on the remote machine.
If the remote instance has a juju unit run will default to using the
juju ssh command. Otherwise, or if that fails, it will fall back to
using ssh directly.
The command_args param is a string or list of arguments to be invoked
on the remote machine. A string must be given if special shell
characters are used.
The is_command_error param is a function that takes an instance of
CalledProcessError and returns whether that error comes from the
command being run rather than ssh itself. This can be used to skip the
fallback to native ssh behaviour when running commands that may fail.
"""
if not isinstance(command_args, (list, tuple)):
command_args = [command_args]
if self.use_juju_ssh:
logging.debug('juju ssh {}'.format(self.unit))
try:
return self.client.get_juju_output(
"ssh", self.unit, *command_args, timeout=self.timeout)
except subprocess.CalledProcessError as e:
logging.warning(
"juju ssh to {!r} failed, returncode: {} output: {!r}"
" stderr: {!r}".format(
self.unit, e.returncode, e.output,
getattr(e, "stderr", None)))
# Don't fallback to calling ssh directly if command really
# failed or if there is likely to be no usable ssh client.
if is_command_error(e) or _no_platform_ssh():
raise
self.use_juju_ssh = False
self._ensure_address()
args = ["ssh"]
args.extend(self._ssh_opts)
args.append(self.address)
args.extend(command_args)
logging.debug(' '.join(utility.quote(i) for i in args))
return self._run_subprocess(args)
def copy(self, destination_dir, source_globs):
"""Copy files from the remote machine."""
self._ensure_address()
args = ["scp", "-rC"]
args.extend(self._ssh_opts)
address = utility.as_literal_address(self.address)
args.extend(["{}:{}".format(address, f) for f in source_globs])
args.append(destination_dir)
self._run_subprocess(args)
def cat(self, filename):
"""
Get the contents of filename from the remote machine.
Tildes and environment variables in the form $TMP will be expanded.
"""
return self.run(["cat", filename])
def _run_subprocess(self, command):
if self.timeout:
command = jujupy.get_timeout_prefix(self.timeout) + tuple(command)
return subprocess.check_output(command, stdin=subprocess.PIPE)
class _SSLSession(winrm.Session):
def __init__(self, target, auth, transport="ssl"):
key, cert = auth
self.url = self._build_url(target, transport)
self.protocol = winrm.Protocol(self.url, transport=transport,
cert_key_pem=key, cert_pem=cert)
_ps_copy_script = """\
$ErrorActionPreference = "Stop"
function OutputEncodedFile {
param([String]$filename, [IO.Stream]$instream)
$trans = New-Object Security.Cryptography.ToBase64Transform
$out = [Console]::OpenStandardOutput()
$bs = New-Object Security.Cryptography.CryptoStream($out, $trans,
[Security.Cryptography.CryptoStreamMode]::Write)
$zs = New-Object IO.Compression.DeflateStream($bs,
[IO.Compression.CompressionMode]::Compress)
[Console]::Out.Write($filename + "|")
try {
$instream.CopyTo($zs)
} finally {
$zs.close()
$bs.close()
[Console]::Out.Write("`n")
}
}
function GatherFiles {
param([String[]]$patterns)
ForEach ($pattern in $patterns) {
$path = [Environment]::ExpandEnvironmentVariables($pattern)
ForEach ($file in Get-Item -path $path) {
try {
$in = New-Object IO.FileStream($file, [IO.FileMode]::Open,
[IO.FileAccess]::Read, [IO.FileShare]"ReadWrite,Delete")
OutputEncodedFile -filename $file.name -instream $in
} catch {
$utf8 = New-Object Text.UTF8Encoding($False)
$errstream = New-Object IO.MemoryStream(
$utf8.GetBytes($_.Exception), $False)
$errfilename = $file.name + ".copyerror"
OutputEncodedFile -filename $errfilename -instream $errstream
}
}
}
}
try {
GatherFiles -patterns @(%s)
} catch {
Write-Error $_.Exception
exit 1
}
"""
class WinRmRemote(_Remote):
"""WinRmRemote represents a juju machine to access using winrm."""
def __init__(self, *args, **kwargs):
super(WinRmRemote, self).__init__(*args, **kwargs)
self._ensure_address()
self.use_juju_ssh = False
self.certs = utility.get_winrm_certs()
self.session = _SSLSession(self.address, self.certs)
def update_address(self, address):
"""Change address of remote machine, refreshes the winrm session."""
self.address = address
self.session = _SSLSession(self.address, self.certs)
_escape = staticmethod(subprocess.list2cmdline)
def run_cmd(self, cmd_list):
"""Run cmd and arguments given as a list returning response object."""
if isinstance(cmd_list, basestring):
raise ValueError("run_cmd requires a list not a string")
# pywinrm does not correctly escape arguments, fix up by escaping cmd
# and giving args as a list of a single pre-escaped string.
cmd = self._escape(cmd_list[:1])
args = [self._escape(cmd_list[1:])]
return self.session.run_cmd(cmd, args)
def run_ps(self, script):
"""Run string of powershell returning response object."""
return self.session.run_ps(script)
def cat(self, filename):
"""
Get the contents of filename from the remote machine.
Backslashes will be treated as directory seperators. Environment
variables in the form %TMP% will be expanded.
"""
result = self.session.run_cmd("type", [self._escape([filename])])
if result.status_code:
logging.warning("winrm cat failed %r", result)
return result.std_out
# TODO(gz): Unlike SSHRemote.copy this only supports copying files, not
# directories and their content. Both the powershell script and
# the unpacking method will need updating to support that.
def copy(self, destination_dir, source_globs):
"""Copy files from the remote machine."""
# Encode globs into script to run on remote machine and return result.
script = _ps_copy_script % ",".join(s.join('""') for s in source_globs)
result = self.run_ps(script)
if result.status_code:
logging.warning("winrm copy stderr:\n%s", result.std_err)
raise subprocess.CalledProcessError(result.status_code,
"powershell", result)
self._encoded_copy_to_dir(destination_dir, result.std_out)
@staticmethod
def _encoded_copy_to_dir(destination_dir, output):
"""Write remote files from powershell script to disk.
The given output from the powershell script is one line per file, with
the filename first, then a pipe, then the base64 encoded deflated file
contents. This method reverses that process and creates the files in
the given destination_dir.
"""
start = 0
while True:
end = output.find("\n", start)
if end == -1:
break
mid = output.find("|", start, end)
if mid == -1:
if not output[start:end].rstrip("\r\n"):
break
raise ValueError("missing filename in encoded copy data")
filename = output[start:mid]
if "/" in filename:
# Just defense against path traversal bugs, should never reach.
raise ValueError("path not filename {!r}".format(filename))
with open(os.path.join(destination_dir, filename), "wb") as f:
f.write(zlib.decompress(output[mid + 1:end].decode("base64"),
-zlib.MAX_WBITS))
start = end + 1
|
from misago.models import ForumRole
from misago.utils.translation import ugettext_lazy as _
def load():
role = ForumRole()
role.name = _('Full Access').message
role.permissions = {
'can_see_forum': True,
'can_see_forum_contents': True,
'can_read_threads': 2,
'can_start_threads': 2,
'can_edit_own_threads': True,
'can_soft_delete_own_threads': True,
'can_write_posts': 2,
'can_edit_own_posts': True,
'can_soft_delete_own_posts': True,
'can_upvote_posts': True,
'can_downvote_posts': True,
'can_see_posts_scores': 2,
'can_see_votes': True,
'can_make_polls': True,
'can_vote_in_polls': True,
'can_see_poll_votes': True,
'can_edit_polls': 0,
'can_delete_polls': 2,
'can_upload_attachments': True,
'can_download_attachments': True,
'attachment_size': 5000,
'attachment_limit': 12,
'can_approve': True,
'can_change_prefixes': True,
'can_see_changelog': True,
'can_pin_threads': 2,
'can_edit_threads_posts': True,
'can_move_threads_posts': True,
'can_close_threads': True,
'can_protect_posts': True,
'can_delete_threads': 2,
'can_delete_posts': 2,
'can_delete_attachments': True,
'can_see_deleted_checkpoints': True,
'can_delete_checkpoints': 2,
}
role.save(force_insert=True)
role = ForumRole()
role.name = _('Standard Access and Upload').message
role.permissions = {
'can_see_forum': True,
'can_see_forum_contents': True,
'can_read_threads': 2,
'can_start_threads': 2,
'can_edit_own_threads': True,
'can_write_posts': 2,
'can_edit_own_posts': True,
'can_soft_delete_own_posts': True,
'can_upvote_posts': True,
'can_downvote_posts': True,
'can_see_posts_scores': 2,
'can_make_polls': True,
'can_vote_in_polls': True,
'can_edit_polls': 30,
'can_delete_polls': 1,
'can_upload_attachments': True,
'can_download_attachments': True,
'attachment_size': 500,
'attachment_limit': 4,
}
role.save(force_insert=True)
role = ForumRole()
role.name = _('Standard Access').message
role.permissions = {
'can_see_forum': True,
'can_see_forum_contents': True,
'can_read_threads': 2,
'can_start_threads': 2,
'can_edit_own_threads': True,
'can_write_posts': 2,
'can_edit_own_posts': True,
'can_soft_delete_own_posts': True,
'can_upvote_posts': True,
'can_downvote_posts': True,
'can_see_posts_scores': 2,
'can_make_polls': True,
'can_vote_in_polls': True,
'can_edit_polls': 30,
'can_delete_polls': 1,
'can_download_attachments': True,
}
role.save(force_insert=True)
role = ForumRole()
role.name = _('Read and Download').message
role.permissions = {
'can_see_forum': True,
'can_see_forum_contents': True,
'can_read_threads': 2,
'can_download_attachments': True,
'can_see_posts_scores': 2,
}
role.save(force_insert=True)
role = ForumRole()
role.name = _('Threads list only').message
role.permissions = {
'can_see_forum': True,
'can_see_forum_contents': True,
}
role.save(force_insert=True)
role = ForumRole()
role.name = _('Read only').message
role.permissions = {
'can_see_forum': True,
'can_see_forum_contents': True,
'can_read_threads': 2,
'can_see_posts_scores': 2,
}
role.save(force_insert=True)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://sam.zoy.org/wtfpl/COPYING for more details.
#
# Localize.py - Incremental localization on XCode projects
# João Moreno 2009
# http://joaomoreno.com/
from sys import argv
from codecs import open
from re import compile
from copy import copy
import os
re_translation = compile(r'^"(.+)" = "(.+)";$')
re_comment_single = compile(r'^/(/.*|\*.*\*/)$')
re_comment_start = compile(r'^/\*.*$')
re_comment_end = compile(r'^.*\*/$')
def print_help():
print u"""Usage: merge.py merged_file old_file new_file
Xcode localizable strings merger script. João Moreno 2009."""
class LocalizedString():
def __init__(self, comments, translation):
self.comments, self.translation = comments, translation
self.key, self.value = re_translation.match(self.translation).groups()
def __unicode__(self):
return u'%s%s\n' % (u''.join(self.comments), self.translation)
class LocalizedFile():
def __init__(self, fname=None, auto_read=False):
self.fname = fname
self.strings = []
self.strings_d = {}
if auto_read:
self.read_from_file(fname)
def read_from_file(self, fname=None):
fname = self.fname if fname == None else fname
try:
f = open(fname, encoding='utf_16', mode='r')
except:
print 'File %s does not exist.' % fname
exit(-1)
line = f.readline()
while line and line == u'\n':
line = f.readline()
while line:
comments = [line]
if not re_comment_single.match(line):
while line and not re_comment_end.match(line):
line = f.readline()
comments.append(line)
line = f.readline()
if line and re_translation.match(line):
translation = line
else:
raise Exception('invalid file: %s' % line)
line = f.readline()
while line and line == u'\n':
line = f.readline()
string = LocalizedString(comments, translation)
self.strings.append(string)
self.strings_d[string.key] = string
f.close()
def save_to_file(self, fname=None):
fname = self.fname if fname == None else fname
try:
f = open(fname, encoding='utf_16', mode='w')
except:
print 'Couldn\'t open file %s.' % fname
exit(-1)
for string in self.strings:
f.write(string.__unicode__())
f.close()
def merge_with(self, new):
merged = LocalizedFile()
for string in new.strings:
if self.strings_d.has_key(string.key):
new_string = copy(self.strings_d[string.key])
new_string.comments = string.comments
string = new_string
merged.strings.append(string)
merged.strings_d[string.key] = string
return merged
def merge(merged_fname, old_fname, new_fname):
try:
old = LocalizedFile(old_fname, auto_read=True)
new = LocalizedFile(new_fname, auto_read=True)
except Exception as e:
print 'Error: input files have invalid format. old: %s, new: %s' % (old_fname, new_fname)
print e
merged = old.merge_with(new)
merged.save_to_file(merged_fname)
STRINGS_FILE = 'Localizable.strings'
def localize(path, language, include_pods_and_frameworks):
if "Scripts" in path:
print "Must run script from the root folder"
quit()
os.chdir(path)
language = os.path.join(path, language)
original = merged = language + os.path.sep + STRINGS_FILE
old = original + '.old'
new = original + '.new'
# TODO: This is super ugly, we have to come up with a better way of doing it
if include_pods_and_frameworks:
find_cmd = 'find . ../Pods/WordPress* ../Pods/WPMediaPicker ../WordPressComStatsiOS/WordPressComStatsiOS ../WordPressShared/WordPressShared ../WordPressKit/WordPressKit -name "*.m" -o -name "*.swift" | grep -v Vendor'
else:
find_cmd = 'find . -name "*.m" -o -name "*.swift" | grep -v Vendor'
filelist = os.popen(find_cmd).read().replace("\n", " ")
if os.path.isfile(original):
os.rename(original, old)
os.system('genstrings -q -o "%s" %s' % (language, filelist))
os.rename(original, new)
merge(merged, old, new)
os.remove(new)
os.remove(old)
else:
os.system('genstrings -q -o "%s" %s' % (language, filelist))
if __name__ == '__main__':
basedir = os.getcwd()
localize(os.path.join(basedir, 'WordPress'), 'Resources/en.lproj', True)
localize(os.path.join(basedir, 'WordPress', 'WordPressTodayWidget'), 'Base.lproj', False)
localize(os.path.join(basedir, 'WordPress', 'WordPressShareExtension'), 'Base.lproj', False)
|
## $Id: controllers.py,v 1.4 2003/09/07 21:40:15 riq Exp $
##
## Tenes Empanadas Graciela
##
## Copyright (C) 2000,2003 Ricardo Quesada
##
## Author: Ricardo Calixto Quesada <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; only version 2 of the License
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import pygame
from pygame.locals import *
from events import *
class Controller( object ):
def __init__( self ):
self._eventMgr = None
def getEvents( self ):
raise NotImplemented
def update( self ):
raise NotImplemented
def setEventMgr( self, evMgr ):
self._eventMgr = evMgr
class FPSController( Controller ):
"""Frames per seconds controller"""
def update( self ):
"""No more thatn 20 fps"""
pygame.time.Clock().tick(20)
class PygameController( Controller ):
def getEvents( self ):
evts = []
for event in pygame.event.get():
if event.type == QUIT:
evts.append( QuitEvent() )
elif event.type == MOUSEMOTION:
evts.append( MouseMotionEvent( pygame.mouse.get_pos() ) )
elif event.type == MOUSEBUTTONUP:
evts.append( MouseButtonUp( pygame.mouse.get_pressed() ) )
elif event.type == MOUSEBUTTONDOWN:
evts.append( MouseButtonDown( pygame.mouse.get_pressed() ) )
return evts
|
# coding: utf-8
import netCDF4
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pd
import romspy
Chl2C_m = 0.0535 # [mg_Chl/mg_C]
PhyCN = 6.625 # [mole_C/mole_N]
C = 12.01 # [mole_C/g_C]
sta_JST = 'seconds since 1968-05-23 09:00:00 GMT'
obs_JST = 'days since 1968-05-23 09:00:00 GMT'
molN = u' [mmol N m$^{-3}$]'
molP = u' [mmol P m$^{-3}$]'
molO2 = u' [mmol O2 m$^{-3}$]'
def calculate_depth(nc, t, station):
s_rho = len(nc.dimensions['s_rho'])
zeta = nc.variables['zeta'][t,station-1]
h = nc.variables['h'][station-1]
Cs_r = nc.variables['Cs_r'][:]
depth = np.ndarray(shape=[s_rho])
depth = (h + zeta) * Cs_r
return depth
def read_sta(stafile, dtime, station, varnames, tunit=sta_JST):
nc = netCDF4.Dataset(stafile, 'r')
ocean_time = nc.variables['ocean_time'][:]
time = netCDF4.date2num(dtime, tunit)
t = np.where(ocean_time == time)[0][0]
var = {}
for name in varnames:
if name == 'TN':
NH4 = nc.variables['NH4'][t,station-1,:]
NO3 = nc.variables['NO3'][t,station-1,:]
phyt = nc.variables['phytoplankton'][t,station-1,:]
zoop = nc.variables['zooplankton'][t,station-1,:]
LDeN = nc.variables['LdetritusN'][t,station-1,:]
SDeN = nc.variables['SdetritusN'][t,station-1,:]
var[name] = NH4 + NO3 + phyt + zoop + LDeN + SDeN
elif name == 'DIN':
NH4 = nc.variables['NH4'][t,station-1,:]
NO3 = nc.variables['NO3'][t,station-1,:]
var[name] = NH4 + NO3
elif name == 'DetritusN':
LDeN = nc.variables['LdetritusN'][t,station-1,:]
SDeN = nc.variables['SdetritusN'][t,station-1,:]
var[name] = LDeN + SDeN
elif name == 'TP':
PO4 = nc.variables['PO4'][t,station-1,:]
LDeP = nc.variables['LdetritusP'][t,station-1,:]
SDeP = nc.variables['SdetritusP'][t,station-1,:]
var[name] = PO4 + LDeP + SDeP
elif name == 'DetritusP':
LDeP = nc.variables['LdetritusP'][t,station-1,:]
SDeP = nc.variables['SdetritusP'][t,station-1,:]
var[name] = LDeP + SDeP
else:
var[name] = nc.variables[name][t,station-1,:]
print stafile
# print netCDF4.num2date(ocean_time[0], tunit), '-', netCDF4.num2date(ocean_time[-1], tunit)
return var, calculate_depth(nc, t, station)
def read_obs(obsfile, dtime, station):
nc = netCDF4.Dataset(obsfile, 'r')
time = netCDF4.date2num(dtime, obs_JST)
obs_time = nc.variables['obs_time'][:]
print obsfile
# print netCDF4.num2date(obs_time[0], obs_JST), '-', netCDF4.num2date(obs_time[-1], obs_JST)
index = np.where(obs_time == time)[0]
obs_station = nc.variables['obs_station'][index]
obs_type = nc.variables['obs_type'][index]
obs_depth = nc.variables['obs_depth'][index]
obs_value = nc.variables['obs_value'][index]
data={'station':obs_station, 'depth':obs_depth, 'type':obs_type, 'value':obs_value}
df = pd.DataFrame(data)
df = df[df.station==station]
return df
def plot_sta(varname, var, depth, ax, c, label):
v = var[varname]
line = {'Free': '--', 'Assi': '-'}
ax.plot(v, depth, line[label], c=c[varname], label=label)
# ax.tick_params(labelleft='off')
def plot_obs(varname, station, obs, ax):
varid = {'temp':6, 'salt':7, 'chlorophyll':10, 'oxygen':15}
if varname in varid.keys():
var = obs[obs.type == varid[varname]]
elif varname == 'phytoplankton':
var = obs[obs.type == varid['chlorophyll']]
var.value = var.value * 2.18 / (Chl2C_m * PhyCN * C)
else:
return
if varname == 'oxygen':
T = obs[obs.type == varid['temp']]
S = obs[obs.type == varid['salt']]
T = np.asarray(T.value)
S = np.asarray(S.value)
O2p = np.asarray(var.value)
var.value = O2p * romspy.O2_saturation(T, S) / 100.0
ax.plot(var.value, var.depth, 'o', mec='k', mfc='w', mew=1, label='Obs')
def fennelP(dtime, station, freefile=None, assifile=None, obsfile=None, pngfile=None):
#varnames = ['temp', 'salt', 'chlorophyll', 'NO3', 'NH4', 'PO4', 'oxygen']
#varnames = ['temp', 'salt', 'chlorophyll', 'TN', 'TP', 'PP', 'oxygen']
#varnames = ['temp', 'salt', 'chlorophyll', 'PO4', 'LdetritusP', 'SdetritusP', 'oxygen']
varnames = ['temp', 'salt', 'chlorophyll', 'oxygen', 'DIN', 'DetritusN', 'PO4', 'DetritusP']
colors = ['c', 'k', 'g', 'r', 'r', 'm', 'm', 'b']
c = {name:c for name, c in zip(varnames, colors)}
# read
if freefile is not None:
fvar, fdepth = read_sta(freefile, dtime, station, varnames)
if assifile is not None:
avar, adepth = read_sta(assifile, dtime, station, varnames)
if obsfile is not None:
obs = read_obs(obsfile, dtime, station)
# plot
#fig, ax = plt.subplots(1, len(varnames), figsize=[20,3])
fig, ax = plt.subplots(2, 4, figsize=[20,10])
ax1 = [ax[i][j] for i in range(2) for j in range(4)]
for i, varname in enumerate(varnames):
if freefile is not None:
plot_sta(varname, fvar, fdepth, ax1[i], c, 'Free')
if assifile is not None:
plot_sta(varname, avar, adepth, ax1[i], c, 'Assi')
if obsfile is not None:
plot_obs(varname, station, obs, ax1[i])
ax1[i].grid()
ax1[i].set_ylim(-14,0)
# settings
ax1[0].set_xlabel('Temperature [degC]')
ax1[1].set_xlabel('Salinity')
ax1[2].set_xlabel('Chlorophyll [mg m$^{-3}$]')
ax1[3].set_xlabel('Oxygen'+molO2)
ax1[4].set_xlabel('DIN'+molN)
ax1[5].set_xlabel('DetritusN'+molN)
ax1[6].set_xlabel('PO4'+molP)
ax1[7].set_xlabel('DetritusP'+molP)
ax1[0].tick_params(labelleft='on')
ax1[0].set_xlim(15,33)
ax1[1].set_xlim(15,33)
ax1[2].set_xlim(0,10.0)
ax1[3].set_xlim(0,500.0)
ax1[4].set_xlim(0,5.0)
ax1[5].set_xlim(0,5.0)
ax1[6].set_xlim(0,1.0)
ax1[7].set_xlim(0,1.0)
# output
fig.suptitle('Sta.'+str(station)+dtime.strftime(' %Y-%m-%d %H:%M'), fontsize=10)
if pngfile is not None:
strtime = dtime.strftime('%m%d%H')
fig.savefig(pngfile.format(station, strtime), bbox_inches='tight', dpi=300)
else:
return ax
def npzd(dtime, station, freefile=None, assifile=None, obsfile=None, pngfile=None, tunit=sta_JST):
varnames = ['temp', 'salt', 'phytoplankton', 'NO3', 'zooplankton', 'detritus']
colors = ['c', 'k', 'g', 'r', 'b', 'm', 'c']
c = {name:c for name, c in zip(varnames, colors)}
if freefile is not None:
fvar, fdepth = read_sta(freefile, dtime, station, varnames, tunit)
if assifile is not None:
avar, adepth = read_sta(assifile, dtime, station, varnames, tunit)
if obsfile is not None:
obs = read_obs(obsfile, dtime, station)
fig, ax = plt.subplots(1, len(varnames), figsize=[15,3])
for i, name in enumerate(varnames):
if name == 'NO3':
fvar[name] = fvar[name] * 10.0
if freefile is not None:
plot_sta(name, fvar, fdepth, ax[i], c, 'Free')
if assifile is not None:
plot_sta(name, avar, adepth, ax[i], c, 'Assi')
if obsfile is not None:
plot_obs(name, station, obs, ax[i])
ax[i].grid()
ax[i].set_ylim(-14,0)
ax[0].tick_params(labelleft='on')
ax[0].set_xlabel('Temperature [degC]')
ax[1].set_xlabel('Salinity')
ax[2].set_xlabel('Phytoplankton'+molN)
ax[3].set_xlabel('NO3'+molN)
ax[5].set_xlabel('Detritus'+molN)
ax[4].set_xlabel('Zooplankton'+molN)
ax[0].set_xlim(23,33)
ax[1].set_xlim(23,33)
ax[2].set_xlim(0,6.0)
ax[3].set_xlim(0,6.0)
ax[4].set_xlim(0,6.0)
ax[5].set_xlim(0,6.0)
fig.suptitle('Sta.'+str(station)+dtime.strftime(' %Y-%m-%d %H:%M'))
if pngfile is not None:
strtime = dtime.strftime('%Y%m%d_%H%M')
fig.savefig(pngfile.format(station, strtime), bbox_inches='tight', dpi=300)
else:
return ax
if __name__ == '__main__':
dtime = dt.datetime(2012,1,10,0)
station = 12
freefile = '/Users/teruhisa/Dropbox/Data/OB500_fennelP/NL03/ob500_sta.nc'
obsfile = '/Users/teruhisa/Dropbox/Data/ob500_obs_2012_obweb-2.nc'
#pngfile = '/Users/teruhisa/Dropbox/Data/OB500_fennelP/NL03/profiles_{}_{}.png'
pngfile = 'test.png'
fennelP(dtime, station, freefile=freefile, obsfile=obsfile, pngfile=pngfile)
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os
import sys
import pickle
from numpy.random import seed, randint
from opus_core.logger import logger
from opus_core.misc import write_to_text_file
from opus_core.misc import get_config_from_opus_path
from opus_core.services.run_server.generic_option_group import GenericOptionGroup
from opus_core.services.run_server.run_manager import insert_auto_generated_cache_directory_if_needed
from opus_core.configurations.xml_configuration import XMLConfiguration
from opus_core.services.run_server.run_manager import RunManager
class StartRunSetOptionGroup(GenericOptionGroup):
"""Class for starting multiple runs.
"""
def __init__(self):
GenericOptionGroup.__init__(self, usage="python %prog [options]",
description="Starts running a set of runs.")
self.parser.add_option("-r", "--pickled-resource-file", dest="pickled_resource_file", default=None,
help="Opus path to pickled configuration file.")
self.parser.add_option("-c", "--configuration-path", dest="configuration_path", default=None,
help="Opus path to Python module defining run_configuration.")
self.parser.add_option("-x", "--xml-configuration", dest="xml_configuration", default=None,
help="file name of xml configuration (must also provide a scenario name using -s)")
self.parser.add_option("-s", "--scenario_name", dest="scenario_name", default=None,
help="name of the scenario to run")
self.parser.add_option("--directory-to-cache", dest="existing_cache_to_copy", default=None,
action="store",
help="Directory containing data to put in new cache.")
self.parser.add_option("--years-to-cache", dest="years_to_cache",
default=None, action="store",
help="List of years of data to take from the directory-to-cache (default is all years).")
if __name__ == "__main__":
try: import wingdbstub
except: pass
option_group = StartRunSetOptionGroup()
parser = option_group.parser
(options, args) = parser.parse_args()
run_manager = RunManager(option_group.get_services_database_configuration(options))
if options.pickled_resource_file is not None:
f = file(options.pickled_resource_file, 'r')
try:
config = pickle.load(f)
finally:
f.close()
elif options.configuration_path is not None:
opus_path = options.configuration_path
try:
config = get_config_from_opus_path(opus_path)
except ImportError:
# TODO: Once all fully-specified configurations are stored as classes,
# get rid of this use.
import_stmt = 'from %s import run_configuration as config' % opus_path
exec(import_stmt)
insert_auto_generated_cache_directory_if_needed(config)
elif options.xml_configuration is not None:
if options.scenario_name is None:
parser.print_help()
sys.exit(1)
config = XMLConfiguration(options.xml_configuration).get_run_configuration(options.scenario_name)
insert_auto_generated_cache_directory_if_needed(config)
else:
parser.print_help()
sys.exit(1)
if options.existing_cache_to_copy is not None:
config['creating_baseyear_cache_configuration'].cache_from_database = False
config['creating_baseyear_cache_configuration'].baseyear_cache.existing_cache_to_copy = options.existing_cache_to_copy
if options.years_to_cache is not None:
config['creating_baseyear_cache_configuration'].baseyear_cache.years_to_cache = eval(options.years_to_cache)
number_of_runs = config.get("number_of_runs", 1)
number_of_runs_in_parallel = min(config.get("parallel_runs", 1), number_of_runs)
# generate seeds for multiple runs
root_seed = config.get("seed", None)
seed(root_seed)
# generate different seed for each run (each seed contains 1 number)
seed_array = randint(1,2**30, number_of_runs)
list_of_cache_directories = []
for irun in range(number_of_runs):
config['seed']= (seed_array[irun],)
this_config = config.copy()
if ((irun + 1) % number_of_runs_in_parallel) == 0:
run_in_background = False
else:
run_in_background = True
run_manager.setup_new_run(cache_directory = this_config['cache_directory'],
configuration = this_config)
run_manager.run_run(this_config, run_as_multiprocess=False,
run_in_background=run_in_background)
if irun == 0:
# log file for the multiple runs will be located in the first cache
first_cache_directory = this_config['cache_directory']
log_file = os.path.join(first_cache_directory, 'multiple_runs.log')
logger.enable_file_logging(log_file)
logger.log_status("Multiple runs: %s replications" % number_of_runs)
logger.log_status("root random seed = %s" % str(root_seed))
else:
logger.enable_file_logging(log_file, verbose=False)
logger.log_status("Run %s: %s" % (irun+1, this_config['cache_directory']))
logger.disable_file_logging(log_file)
list_of_cache_directories.append(this_config['cache_directory'])
write_to_text_file(os.path.join(first_cache_directory,"cache_directories"),
list_of_cache_directories)
|
from .base_section import BaseSection
from mtools.util import OrderedDict
from mtools.util.print_table import print_table
class RsStateSection(BaseSection):
"""
RsStateSection class.
This section determines if there were any Replica Set state changes in
the log file and prints out the times and information about the restarts
found.
"""
name = "rsstate"
def __init__(self, mloginfo):
BaseSection.__init__(self, mloginfo)
# add --restarts flag to argparser
helptext = 'outputs information about every detected RS state change'
self.mloginfo.argparser_sectiongroup.add_argument('--rsstate',
action='store_true',
help=helptext)
@property
def active(self):
"""Return boolean if this section is active."""
return self.mloginfo.args['rsstate']
def run(self):
"""Run this section and print out information."""
titles = ['date', 'host', 'state/message']
table_rows = []
for host, state, logevent in self.mloginfo.logfile.rs_state:
stats = OrderedDict()
stats['date'] = logevent.datetime.strftime("%b %d %H:%M:%S")
stats['host'] = host
stats['state/message'] = state
table_rows.append(stats)
print_table(table_rows, titles, uppercase_headers=False)
if len(self.mloginfo.logfile.rs_state) == 0:
print(" no rs state changes found")
|
""" Algorithms for clustering : Meanshift, Affinity propagation and spectral
clustering.
"""
# Author: Alexandre Gramfort [email protected]
# Gael Varoquaux [email protected]
# License: BSD
import numpy as np
import warnings
from ..base import BaseEstimator, ClusterMixin
from ..utils import as_float_array
from ..metrics import euclidean_distances
def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,
damping=0.5, copy=True, verbose=False):
"""Perform Affinity Propagation Clustering of data
Parameters
----------
S: array [n_samples, n_samples]
Matrix of similarities between points
preference: array [n_samples,] or float, optional, default: None
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter: int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter: int, optional, default: 200
Maximum number of iterations
damping: float, optional, default: 200
Damping factor between 0.5 and 1.
copy: boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose: boolean, optional, default: False
The verbosity level
Returns
-------
cluster_centers_indices: array [n_clusters]
index of clusters centers
labels : array [n_samples]
cluster labels for each point
Notes
-----
See examples/plot_affinity_propagation.py for an example.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
random_state = np.random.RandomState(0)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Remove degeneracies
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# Compute responsibilities
Rold = R.copy()
AS = A + S
I = np.argmax(AS, axis=1)
Y = AS[np.arange(n_samples), I] # np.max(AS, axis=1)
AS[ind, I[ind]] = - np.finfo(np.double).max
Y2 = np.max(AS, axis=1)
R = S - Y[:, np.newaxis]
R[ind, I[ind]] = S[ind, I[ind]] - Y2[ind]
R = (1 - damping) * R + damping * Rold # Damping
# Compute availabilities
Aold = A
Rp = np.maximum(R, 0)
Rp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
A = np.sum(Rp, axis=0)[np.newaxis, :] - Rp
dA = np.diag(A)
A = np.minimum(A, 0)
A.flat[::n_samples + 1] = dA
A = (1 - damping) * A + damping * Aold # Damping
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
if verbose:
print("Converged after %d iterations." % it)
break
else:
if verbose:
print("Did not converge")
I = np.where(np.diag(A + R) > 0)[0]
K = I.size # Identify exemplars
if K > 0:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
labels = np.empty((n_samples, 1))
cluster_centers_indices = None
labels.fill(np.nan)
return cluster_centers_indices, labels
###############################################################################
class AffinityPropagation(BaseEstimator, ClusterMixin):
"""Perform Affinity Propagation Clustering of data
Parameters
----------
damping: float, optional, default: 0.5
Damping factor between 0.5 and 1.
convergence_iter: int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter: int, optional, default: 200
Maximum number of iterations
copy: boolean, optional, default: True
Make a copy of input data.
preference: array [n_samples,] or float, optional, default: None
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity: string, optional, default=``euclidean``
Which affinity to use. At the moment ``precomputed`` and
``euclidean`` are supported. ``euclidean`` uses the
negative squared euclidean distance between points.
verbose: boolean, optional, default: False
Whether to be verbose.
Attributes
----------
`cluster_centers_indices_` : array, [n_clusters]
Indices of cluster centers
`labels_` : array, [n_samples]
Labels of each point
`affinity_matrix_` : array-like, [n_samples, n_samples]
Stores the affinity matrix used in ``fit``.
Notes
-----
See examples/plot_affinity_propagation.py for an example.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
def __init__(self, damping=.5, max_iter=200, convergence_iter=15,
copy=True, preference=None, affinity='euclidean',
verbose=False):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
@property
def _pairwise(self):
return self.affinity is "precomputed"
def fit(self, X):
""" Create affinity matrix from negative euclidean distances, then
apply affinity propagation clustering.
Parameters
----------
X: array [n_samples, n_features] or [n_samples, n_samples]
Data matrix or, if affinity is ``precomputed``, matrix of
similarities / affinities.
"""
if X.shape[0] == X.shape[1] and not self._pairwise:
warnings.warn("The API of AffinityPropagation has changed."
"Now ``fit`` constructs an affinity matrix from the"
" data. To use a custom affinity matrix, set "
"``affinity=precomputed``.")
if self.affinity is "precomputed":
self.affinity_matrix_ = X
elif self.affinity is "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_ = affinity_propagation(
self.affinity_matrix_, self.preference, max_iter=self.max_iter,
convergence_iter=self.convergence_iter, damping=self.damping,
copy=self.copy, verbose=self.verbose)
return self
|
######################################################################
# Copyright (C) 2013,2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Unit tests for `dot` module.
"""
import unittest
import numpy as np
import scipy
from numpy import testing
from ..dot import Dot, SumMultiply
from ..gaussian import Gaussian, GaussianARD
from ...vmp import VB
from bayespy.utils import utils
from bayespy.utils import linalg
from bayespy.utils import random
from bayespy.utils.utils import TestCase
class TestSumMultiply(TestCase):
def test_parent_validity(self):
"""
Test that the parent nodes are validated properly in SumMultiply
"""
V = GaussianARD(1, 1)
X = Gaussian(np.ones(1), np.identity(1))
Y = Gaussian(np.ones(3), np.identity(3))
Z = Gaussian(np.ones(5), np.identity(5))
A = SumMultiply(X, ['i'])
self.assertEqual(A.dims, ((), ()))
A = SumMultiply('i', X)
self.assertEqual(A.dims, ((), ()))
A = SumMultiply(X, ['i'], ['i'])
self.assertEqual(A.dims, ((1,), (1,1)))
A = SumMultiply('i->i', X)
self.assertEqual(A.dims, ((1,), (1,1)))
A = SumMultiply(X, ['i'], Y, ['j'], ['i','j'])
self.assertEqual(A.dims, ((1,3), (1,3,1,3)))
A = SumMultiply('i,j->ij', X, Y)
self.assertEqual(A.dims, ((1,3), (1,3,1,3)))
A = SumMultiply(V, [], X, ['i'], Y, ['i'], [])
self.assertEqual(A.dims, ((), ()))
A = SumMultiply(',i,i->', V, X, Y)
self.assertEqual(A.dims, ((), ()))
# Error: not enough inputs
self.assertRaises(ValueError,
SumMultiply)
self.assertRaises(ValueError,
SumMultiply,
X)
# Error: too many keys
self.assertRaises(ValueError,
SumMultiply,
Y,
['i', 'j'])
self.assertRaises(ValueError,
SumMultiply,
'ij',
Y)
# Error: not broadcastable
self.assertRaises(ValueError,
SumMultiply,
Y,
['i'],
Z,
['i'])
self.assertRaises(ValueError,
SumMultiply,
'i,i',
Y,
Z)
# Error: output key not in inputs
self.assertRaises(ValueError,
SumMultiply,
X,
['i'],
['j'])
self.assertRaises(ValueError,
SumMultiply,
'i->j',
X)
# Error: non-unique input keys
self.assertRaises(ValueError,
SumMultiply,
X,
['i','i'])
self.assertRaises(ValueError,
SumMultiply,
'ii',
X)
# Error: non-unique output keys
self.assertRaises(ValueError,
SumMultiply,
X,
['i'],
['i','i'])
self.assertRaises(ValueError,
SumMultiply,
'i->ii',
X)
# String has too many '->'
self.assertRaises(ValueError,
SumMultiply,
'i->i->i',
X)
# String has too many input nodes
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X)
def test_message_to_child(self):
"""
Test the message from SumMultiply to its children.
"""
def compare_moments(u0, u1, *args):
Y = SumMultiply(*args)
u_Y = Y.get_moments()
self.assertAllClose(u_Y[0], u0)
self.assertAllClose(u_Y[1], u1)
# Test constant parent
y = np.random.randn(2,3,4)
compare_moments(y,
linalg.outer(y, y, ndim=2),
'ij->ij',
y)
# Do nothing for 2-D array
Y = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y = Y.get_moments()
compare_moments(y[0],
y[1],
'ij->ij',
Y)
compare_moments(y[0],
y[1],
Y,
[0,1],
[0,1])
# Sum over the rows of a matrix
Y = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y = Y.get_moments()
mu = np.einsum('...ij->...j', y[0])
cov = np.einsum('...ijkl->...jl', y[1])
compare_moments(mu,
cov,
'ij->j',
Y)
compare_moments(mu,
cov,
Y,
[0,1],
[1])
# Inner product of three vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
plates=(),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
X3 = GaussianARD(np.random.randn(7,6,5,2),
np.random.rand(7,6,5,2),
plates=(7,6,5),
shape=(2,))
x3 = X3.get_moments()
mu = np.einsum('...i,...i,...i->...', x1[0], x2[0], x3[0])
cov = np.einsum('...ij,...ij,...ij->...', x1[1], x2[1], x3[1])
compare_moments(mu,
cov,
'i,i,i',
X1,
X2,
X3)
compare_moments(mu,
cov,
'i,i,i->',
X1,
X2,
X3)
compare_moments(mu,
cov,
X1,
[9],
X2,
[9],
X3,
[9])
compare_moments(mu,
cov,
X1,
[9],
X2,
[9],
X3,
[9],
[])
# Outer product of two vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
plates=(5,),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
mu = np.einsum('...i,...j->...ij', x1[0], x2[0])
cov = np.einsum('...ik,...jl->...ijkl', x1[1], x2[1])
compare_moments(mu,
cov,
'i,j->ij',
X1,
X2)
compare_moments(mu,
cov,
X1,
[9],
X2,
[7],
[9,7])
# Matrix product
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y2 = Y2.get_moments()
mu = np.einsum('...ik,...kj->...ij', y1[0], y2[0])
cov = np.einsum('...ikjl,...kmln->...imjn', y1[1], y2[1])
compare_moments(mu,
cov,
'ik,kj->ij',
Y1,
Y2)
compare_moments(mu,
cov,
Y1,
['i','k'],
Y2,
['k','j'],
['i','j'])
# Trace of a matrix product
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y2 = Y2.get_moments()
mu = np.einsum('...ij,...ji->...', y1[0], y2[0])
cov = np.einsum('...ikjl,...kilj->...', y1[1], y2[1])
compare_moments(mu,
cov,
'ij,ji',
Y1,
Y2)
compare_moments(mu,
cov,
'ij,ji->',
Y1,
Y2)
compare_moments(mu,
cov,
Y1,
['i','j'],
Y2,
['j','i'])
compare_moments(mu,
cov,
Y1,
['i','j'],
Y2,
['j','i'],
[])
# Vector-matrix-vector product
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
plates=(),
shape=(3,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
Y = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y = Y.get_moments()
mu = np.einsum('...i,...ij,...j->...', x1[0], y[0], x2[0])
cov = np.einsum('...ia,...ijab,...jb->...', x1[1], y[1], x2[1])
compare_moments(mu,
cov,
'i,ij,j',
X1,
Y,
X2)
compare_moments(mu,
cov,
X1,
[1],
Y,
[1,2],
X2,
[2])
# Complex sum-product of 0-D, 1-D, 2-D and 3-D arrays
V = GaussianARD(np.random.randn(7,6,5),
np.random.rand(7,6,5),
plates=(7,6,5),
shape=())
v = V.get_moments()
X = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x = X.get_moments()
Y = GaussianARD(np.random.randn(3,4),
np.random.rand(3,4),
plates=(5,),
shape=(3,4))
y = Y.get_moments()
Z = GaussianARD(np.random.randn(4,2,3),
np.random.rand(4,2,3),
plates=(6,5),
shape=(4,2,3))
z = Z.get_moments()
mu = np.einsum('...,...i,...kj,...jik->...k', v[0], x[0], y[0], z[0])
cov = np.einsum('...,...ia,...kjcb,...jikbac->...kc', v[1], x[1], y[1], z[1])
compare_moments(mu,
cov,
',i,kj,jik->k',
V,
X,
Y,
Z)
compare_moments(mu,
cov,
V,
[],
X,
['i'],
Y,
['k','j'],
Z,
['j','i','k'],
['k'])
pass
def test_message_to_parent(self):
"""
Test the message from SumMultiply node to its parents.
"""
data = 2
tau = 3
def check_message(true_m0, true_m1, parent, *args, F=None):
if F is None:
A = SumMultiply(*args)
B = GaussianARD(A, tau)
B.observe(data*np.ones(A.plates + A.dims[0]))
else:
A = F
(A_m0, A_m1) = A._message_to_parent(parent)
self.assertAllClose(true_m0, A_m0)
self.assertAllClose(true_m1, A_m1)
pass
# Check: different message to each of multiple parents
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2))
x2 = X2.get_moments()
m0 = tau * data * x2[0]
m1 = -0.5 * tau * x2[1] * np.identity(2)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[9],
X2,
[9],
[9])
m0 = tau * data * x1[0]
m1 = -0.5 * tau * x1[1] * np.identity(2)
check_message(m0, m1, 1,
'i,i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[9],
X2,
[9],
[9])
# Check: key not in output
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2))
x1 = X1.get_moments()
m0 = tau * data * np.ones(2)
m1 = -0.5 * tau * np.ones((2,2))
check_message(m0, m1, 0,
'i',
X1)
check_message(m0, m1, 0,
'i->',
X1)
check_message(m0, m1, 0,
X1,
[9])
check_message(m0, m1, 0,
X1,
[9],
[])
# Check: key not in some input
X1 = GaussianARD(np.random.randn(),
np.random.rand())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2))
x2 = X2.get_moments()
m0 = tau * data * np.sum(x2[0], axis=-1)
m1 = -0.5 * tau * np.sum(x2[1] * np.identity(2),
axis=(-1,-2))
check_message(m0, m1, 0,
',i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[],
X2,
[9],
[9])
m0 = tau * data * x1[0] * np.ones(2)
m1 = -0.5 * tau * x1[1] * np.identity(2)
check_message(m0, m1, 1,
',i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[],
X2,
[9],
[9])
# Check: keys in different order
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2))
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(2,3),
np.random.rand(2,3))
y2 = Y2.get_moments()
m0 = tau * data * y2[0].T
m1 = -0.5 * tau * np.einsum('ijlk->jikl', y2[1] * utils.identity(2,3))
check_message(m0, m1, 0,
'ij,ji->ij',
Y1,
Y2)
check_message(m0, m1, 0,
Y1,
['i','j'],
Y2,
['j','i'],
['i','j'])
m0 = tau * data * y1[0].T
m1 = -0.5 * tau * np.einsum('ijlk->jikl', y1[1] * utils.identity(3,2))
check_message(m0, m1, 1,
'ij,ji->ij',
Y1,
Y2)
check_message(m0, m1, 1,
Y1,
['i','j'],
Y2,
['j','i'],
['i','j'])
# Check: plates when different dimensionality
X1 = GaussianARD(np.random.randn(5),
np.random.rand(5),
shape=(),
plates=(5,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(5,3),
np.random.rand(5,3),
shape=(3,),
plates=(5,))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,3)) * x2[0], axis=-1)
m1 = -0.5 * tau * np.sum(x2[1] * utils.identity(3), axis=(-1,-2))
check_message(m0, m1, 0,
',i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[],
X2,
['i'],
['i'])
m0 = tau * data * x1[0][:,np.newaxis] * np.ones((5,3))
m1 = -0.5 * tau * x1[1][:,np.newaxis,np.newaxis] * utils.identity(3)
check_message(m0, m1, 1,
',i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when node has the
# same plates
X1 = GaussianARD(np.random.randn(5,4,3),
np.random.rand(5,4,3),
shape=(3,),
plates=(5,4))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.ones((5,4,3)) * x2[0]
m1 = -0.5 * tau * x2[1] * utils.identity(3)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when node does
# not have that plate
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,4,3)) * x2[0], axis=(0,1))
m1 = -0.5 * tau * np.sum(np.ones((5,4,1,1))
* utils.identity(3)
* x2[1],
axis=(0,1))
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when the node
# only broadcasts that plate
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(1,1))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,4,3)) * x2[0], axis=(0,1), keepdims=True)
m1 = -0.5 * tau * np.sum(np.ones((5,4,1,1))
* utils.identity(3)
* x2[1],
axis=(0,1),
keepdims=True)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: broadcasted dimensions
X1 = GaussianARD(np.random.randn(1,1),
np.random.rand(1,1))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((3,2)) * x2[0],
keepdims=True)
m1 = -0.5 * tau * np.sum(utils.identity(3,2) * x2[1],
keepdims=True)
check_message(m0, m1, 0,
'ij,ij->ij',
X1,
X2)
check_message(m0, m1, 0,
X1,
[0,1],
X2,
[0,1],
[0,1])
m0 = tau * data * np.ones((3,2)) * x1[0]
m1 = -0.5 * tau * utils.identity(3,2) * x1[1]
check_message(m0, m1, 1,
'ij,ij->ij',
X1,
X2)
check_message(m0, m1, 1,
X1,
[0,1],
X2,
[0,1],
[0,1])
# Check: non-ARD observations
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2))
x1 = X1.get_moments()
Lambda = np.array([[2, 1.5], [1.5, 2]])
F = SumMultiply('i->i', X1)
Y = Gaussian(F, Lambda)
y = np.random.randn(2)
Y.observe(y)
m0 = np.dot(Lambda, y)
m1 = -0.5 * Lambda
check_message(m0, m1, 0,
'i->i',
X1,
F=F)
check_message(m0, m1, 0,
X1,
['i'],
['i'],
F=F)
# Check: mask with same shape
X1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
shape=(2,),
plates=(3,))
x1 = X1.get_moments()
mask = np.array([True, False, True])
F = SumMultiply('i->i', X1)
Y = GaussianARD(F, tau)
Y.observe(data*np.ones((3,2)), mask=mask)
m0 = tau * data * mask[:,np.newaxis] * np.ones(2)
m1 = -0.5 * tau * mask[:,np.newaxis,np.newaxis] * np.identity(2)
check_message(m0, m1, 0,
'i->i',
X1,
F=F)
check_message(m0, m1, 0,
X1,
['i'],
['i'],
F=F)
# Check: mask larger
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
shape=(2,),
plates=())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
shape=(2,),
plates=(3,))
x2 = X2.get_moments()
mask = np.array([True, False, True])
F = SumMultiply('i,i->i', X1, X2)
Y = GaussianARD(F, tau,
plates=(3,))
Y.observe(data*np.ones((3,2)), mask=mask)
m0 = tau * data * np.sum(mask[:,np.newaxis] * x2[0], axis=0)
m1 = -0.5 * tau * np.sum(mask[:,np.newaxis,np.newaxis]
* x2[1]
* np.identity(2),
axis=0)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2,
F=F)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'],
F=F)
# Check: mask for broadcasted plate
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
plates=(1,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2),
plates=(3,))
x2 = X2.get_moments()
mask = np.array([True, False, True])
F = SumMultiply('i,i->i', X1, X2)
Y = GaussianARD(F, tau,
plates=(3,))
Y.observe(data*np.ones((3,2)), mask=mask)
m0 = tau * data * np.sum(mask[:,np.newaxis] * x2[0],
axis=0,
keepdims=True)
m1 = -0.5 * tau * np.sum(mask[:,np.newaxis,np.newaxis]
* x2[1]
* np.identity(2),
axis=0,
keepdims=True)
check_message(m0, m1, 0,
'i->i',
X1,
F=F)
check_message(m0, m1, 0,
X1,
['i'],
['i'],
F=F)
pass
def check_performance(scale=1e2):
"""
Tests that the implementation of SumMultiply is efficient.
This is not a unit test (not run automatically), but rather a
performance test, which you may run to test the performance of the
node. A naive implementation of SumMultiply will run out of memory in
some cases and this method checks that the implementation is not naive
but good.
"""
# Check: Broadcasted plates are computed efficiently
# (bad implementation will take a long time to run)
s = scale
X1 = GaussianARD(np.random.randn(s,s),
np.random.rand(s,s),
shape=(s,),
plates=(s,))
X2 = GaussianARD(np.random.randn(s,1,s),
np.random.rand(s,1,s),
shape=(s,),
plates=(s,1))
F = SumMultiply('i,i', X1, X2)
Y = GaussianARD(F, 1)
Y.observe(np.ones((s,s)))
try:
F._message_to_parent(1)
except e:
print(e)
print('SOMETHING BAD HAPPENED')
# Check: Broadcasted dimensions are computed efficiently
# (bad implementation will run out of memory)
pass
|
# coding: utf-8
# In[2]:
from __future__ import division
import logging
import os
import xml.etree.ElementTree as ET
from senpy.plugins import EmotionPlugin, SenpyPlugin
from senpy.models import Results, EmotionSet, Entry, Emotion
logger = logging.getLogger(__name__)
# added packages
import codecs, csv, re, nltk
import numpy as np
import math, itertools
from drevicko.twitter_regexes import cleanString, setupRegexes, tweetPreprocessor
import preprocess_twitter
from collections import defaultdict
from stop_words import get_stop_words
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals import joblib
from sklearn.svm import LinearSVR, SVR
from nltk.tokenize import TweetTokenizer
import nltk.tokenize.casual as casual
import gzip
from datetime import datetime
os.environ['KERAS_BACKEND']='theano'
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.models import load_model, model_from_json
class wassaRegression(EmotionPlugin):
def __init__(self, info, *args, **kwargs):
super(wassaRegression, self).__init__(info, *args, **kwargs)
self.name = info['name']
self.id = info['module']
self._info = info
local_path = os.path.dirname(os.path.abspath(__file__))
self._maxlen = 55
self.WORD_FREQUENCY_TRESHOLD = 2
self._savedModelPath = local_path + "/classifiers/LSTM/wassaRegression"
self._path_wordembeddings = os.path.dirname(local_path) + '/glove.twitter.27B.100d.txt.gz'
self._paths_ngramizer = local_path + '/wassa_ngramizer.dump'
self._paths_svr = local_path + "/classifiers/SVR"
self._paths_linearsvr = local_path + "/classifiers/LinearSVR"
self.extension_classifier = '.dump'
self._paths_word_freq = os.path.join(os.path.dirname(__file__), 'wordFrequencies.dump')
# self._emoNames = ['sadness', 'disgust', 'surprise', 'anger', 'fear', 'joy']
self._emoNames = ['anger','fear','joy','sadness']
def activate(self, *args, **kwargs):
np.random.seed(1337)
# st = datetime.now()
# self._wordFrequencies = self._load_unique_tokens(filename = self._paths_word_freq)
# logger.info("{} {}".format(datetime.now() - st, "loaded _wordFrequencies"))
self._wassaRegressionDLModels = {emo:self._load_model_emo_and_weights(self._savedModelPath, emo) for emo in self._emoNames}
st = datetime.now()
self._stop_words = get_stop_words('en')
logger.info("{} {}".format(datetime.now() - st, "loaded _stop_words"))
st = datetime.now()
self._ngramizer = joblib.load(self._paths_ngramizer)
logger.info("{} {}".format(datetime.now() - st, "loaded _ngramizers"))
self._wassaRegressionSVMmodels = {
#'LinearSVR': self._load_classifier(PATH=self._paths_linearsvr, ESTIMATOR='LinearSVR' ),
'SVR': self._load_classifier(PATH=self._paths_svr, ESTIMATOR='SVR')
}
st = datetime.now()
self._Dictionary, self._Indices = self._load_original_vectors(
filename = self._path_wordembeddings,
sep = ' ',
wordFrequencies = None,#self._wordFrequencies,
zipped = True) # leave wordFrequencies=None for loading the entire WE file
logger.info("{} {}".format(datetime.now() - st, "loaded _wordEmbeddings"))
logger.info("wassaRegression plugin is ready to go!")
def deactivate(self, *args, **kwargs):
try:
logger.info("wassaRegression plugin is being deactivated...")
except Exception:
print("Exception in logger while reporting deactivation of wassaRegression")
# CUSTOM FUNCTIONS
def _text_preprocessor(self, text):
text = preprocess_twitter.tokenize(text)
text = casual.reduce_lengthening(text)
text = cleanString(setupRegexes('twitterProAna'),text)
text = ' '.join([span for notentity,span in tweetPreprocessor(text, ("urls", "users", "lists")) if notentity])
text = text.replace('\t','')
text = text.replace('< ','<').replace(' >','>')
text = text.replace('):', '<sadface>').replace('(:', '<smile>')
text = text.replace(" 't", "t").replace("#", "")
return ' '.join(text.split())
def tokenise_tweet(text):
text = preprocess_twitter.tokenize(text)
text = preprocess_tweet(text)
return ' '.join(text.split())
def _load_original_vectors(self, filename = 'glove.27B.100d.txt', sep = ' ', wordFrequencies = None, zipped = False):
def __read_file(f):
Dictionary, Indices = {},{}
i = 1
for line in f:
line_d = line.decode('utf-8').split(sep)
token = line_d[0]
token_vector = np.array(line_d[1:], dtype = 'float32')
if wordFrequencies != None:
if token in wordFrequencies:
Dictionary[token] = token_vector
Indices.update({token:i})
i+=1
else:
Dictionary[token] = token_vector
Indices.update({token:i})
i+=1
return(Dictionary, Indices)
if zipped:
with gzip.open(filename, 'rb') as f:
return(__read_file(f))
else:
with open(filename, 'rb') as f:
return(__read_file(f))
# ===== SVR
def _tweetToNgramVector(self, text):
return self._ngramizer.transform([text,text]).toarray()[0]
def _tweetToWordVectors(self, tweet, fixedLength=False):
output = []
if fixedLength:
for i in range(100):
output.append(blankVector)
for i,token in enumerate(tweet.split()):
if token in self._Dictionary:
output[i] = self._Dictionary[token]
else:
for i,token in enumerate(tweet.lower().split()):
if token in self._Dictionary:
output.append(self._Dictionary[token])
return output
def _ModWordVectors(self, x, mod=True):
if len(x) == 0:
if mod:
return np.zeros(self.EMBEDDINGS_DIM*3, dtype='float32')
else:
return np.zeros(self.EMBEDDINGS_DIM, dtype='float32')
m = np.matrix(x)
if mod:
xMean = np.array(m.mean(0))[0]
xMin = np.array(m.min(0))[0]
xMax = np.array(m.max(0))[0]
xX = np.concatenate((xMean,xMin,xMax))
return xX
else:
return np.array(m.mean(0))[0]
def _bindTwoVectors(self, x0, x1):
return np.array(list(itertools.chain(x0,x1)),dtype='float32')
def _bind_vectors(self, x):
return np.concatenate(x)
def _load_classifier(self, PATH, ESTIMATOR):
models = []
st = datetime.now()
for EMOTION in self._emoNames:
filename = os.path.join(PATH, EMOTION + self.extension_classifier)
st = datetime.now()
m = joblib.load(filename)
logger.info("{} loaded _wassaRegression.{}.{}".format(datetime.now() - st, ESTIMATOR, EMOTION))
models.append( m )
return models
def _load_unique_tokens(self, filename = 'wordFrequencies.dump'):
return joblib.load(filename)
def _convert_text_to_vector(self, text, text_input):
ngramVector = self._tweetToNgramVector(text)
embeddingsVector = self._ModWordVectors(self._tweetToWordVectors(text))
X = np.asarray( self._bind_vectors((ngramVector, embeddingsVector)) ).reshape(1,-1)
return X
# ===== LSTM
def _load_model_emo_and_weights(self, filename, emo):
st = datetime.now()
with open(filename+'.'+emo+'.json', 'r') as json_file:
loaded_model_json = json_file.read()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(filename+'.'+emo+'.h5')
logger.info("{} {}".format(datetime.now() - st, "loaded _wassaRegression.LSTM."+emo))
return loaded_model
def _lists_to_vectors(self, text):
train_sequences = [self._text_to_sequence(text)]
X = sequence.pad_sequences(train_sequences, maxlen=self._maxlen)
return X
def _text_to_sequence(self,text):
train_sequence = []
for token in text.split():
try:
train_sequence.append(self._Indices[token])
except:
train_sequence.append(0)
train_sequence.extend([0]*( self._maxlen-len(train_sequence)) )
return np.array(train_sequence)
def _extract_features(self, X):
feature_set = {}
for emo in self._emoNames:
feature_set.update({emo:self._wassaRegressionDLModels[emo].predict(X)[0][0]})
return feature_set
def _extract_features_svr(self, X):
feature_set = {
emo: float(clf.predict(X)[0]) for emo,clf in zip(self._emoNames, self._wassaRegressionSVMmodels['SVR'])}
return feature_set
# ANALYZE
def analyse(self, **params):
logger.debug("wassaRegression LSTM Analysing with params {}".format(params))
st = datetime.now()
text_input = params.get("input", None)
text = self._text_preprocessor(text_input)
self.ESTIMATOR = params.get("estimator", 'LSTM')
if self.ESTIMATOR == 'LSTM':
X_lstm = self._lists_to_vectors(text = text)
feature_text = self._extract_features(X_lstm)
elif self.ESTIMATOR == 'averaged':
X_lstm = self._lists_to_vectors(text = text)
X_svr = self._convert_text_to_vector(text=text, text_input=text_input)
feature_text_lstm = self._extract_features(X_lstm)
feature_text_svr = self._extract_features_svr(X_svr)
feature_text = {emo:np.mean([feature_text_lstm[emo], feature_text_svr[emo]]) for emo in self._emoNames}
else:
X_svr = self._convert_text_to_vector(text=text, text_input=text_input)
feature_text = self._extract_features_svr(X_svr)
logger.info("{} {}".format(datetime.now() - st, "string analysed"))
response = Results()
entry = Entry()
entry.nif__isString = text_input
emotionSet = EmotionSet()
emotionSet.id = "Emotions"
emotionSet.onyx__maxIntensityValue = float(100.0)
emotion1 = Emotion()
for dimension in ['V','A','D']:
weights = [feature_text[i] for i in feature_text]
if not all(v == 0 for v in weights):
value = np.average([self.centroids[i][dimension] for i in feature_text], weights=weights)
else:
value = 5.0
emotion1[self.centroid_mappings[dimension]] = value
emotionSet.onyx__hasEmotion.append(emotion1)
for i in feature_text:
emotionSet.onyx__hasEmotion.append(Emotion(onyx__hasEmotionCategory=self.wnaffect_mappings[i],
onyx__hasEmotionIntensity=float(feature_text[i])*emotionSet.onyx__maxIntensityValue))
entry.emotions = [emotionSet,]
response.entries.append(entry)
return response
|
import re
from django import forms
from django.core.exceptions import ValidationError
from django.forms import RadioSelect
from .models import Presentation, Slide
class PresentationBaseForm(forms.ModelForm):
subject = forms.CharField(max_length=50)
markdown = forms.CharField(widget=forms.HiddenInput(), required=True)
is_public = forms.BooleanField(initial=True,
required=False,
widget=RadioSelect(choices=[
(True, 'public'), (False, 'private')]))
def clean_tags(self):
tags = self.cleaned_data['tags']
if len(tags) > 20:
raise ValidationError("Too much tags")
for tag in tags:
if len(tag) > 16:
raise ValidationError("Too long tag")
return tags
def add_slide_list(self, presentation):
markdown = self.cleaned_data.get('markdown')
slide_list = re.split("={5,}", markdown)
Slide.objects.filter(presentation=presentation).delete()
for order_num, slide in enumerate(slide_list):
Slide.objects.create(
presentation=presentation,
slide_order=order_num,
markdown=slide
)
@staticmethod
class Meta:
model = Presentation
fields = ['subject', 'markdown', 'is_public', "tags"]
class PresentationCreateForm(PresentationBaseForm):
def save(self, commit=True):
instance = super(PresentationCreateForm, self).save(commit=False)
instance.author = self.author
instance.save()
self.add_slide_list(instance)
return instance
class PresentationUpdateForm(PresentationBaseForm):
def save(self, commit=True):
instance = super(PresentationUpdateForm, self).save(commit=False)
instance.subject = self.cleaned_data.get('subject')
instance.is_public = self.cleaned_data.get('is_public')
instance.author = self.author
instance.save()
self.add_slide_list(instance)
return instance
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Worker command"""
import os
import signal
import subprocess
import sys
import daemon
from daemon.pidfile import TimeoutPIDLockFile
from airflow import settings
from airflow.configuration import conf
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations, setup_logging, sigint_handler
def _serve_logs(env, skip_serve_logs=False):
"""Starts serve_logs sub-process"""
if skip_serve_logs is False:
sub_proc = subprocess.Popen(['airflow', 'serve_logs'], env=env, close_fds=True)
return sub_proc
return None
@cli_utils.action_logging
def worker(args):
"""Starts Airflow Celery worker"""
env = os.environ.copy()
env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME
if not settings.validate_session():
print("Worker exiting... database connection precheck failed! ")
sys.exit(1)
# Celery worker
from airflow.executors.celery_executor import app as celery_app
from celery.bin import worker # pylint: disable=redefined-outer-name
autoscale = args.autoscale
skip_serve_logs = args.skip_serve_logs
if autoscale is None and conf.has_option("celery", "worker_autoscale"):
autoscale = conf.get("celery", "worker_autoscale")
worker = worker.worker(app=celery_app) # pylint: disable=redefined-outer-name
options = {
'optimization': 'fair',
'O': 'fair',
'queues': args.queues,
'concurrency': args.concurrency,
'autoscale': autoscale,
'hostname': args.celery_hostname,
'loglevel': conf.get('core', 'LOGGING_LEVEL'),
}
if conf.has_option("celery", "pool"):
options["pool"] = conf.get("celery", "pool")
if args.daemon:
pid, stdout, stderr, log_file = setup_locations("worker",
args.pid,
args.stdout,
args.stderr,
args.log_file)
handle = setup_logging(log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
files_preserve=[handle],
stdout=stdout,
stderr=stderr,
)
with ctx:
sub_proc = _serve_logs(env, skip_serve_logs)
worker.run(**options)
stdout.close()
stderr.close()
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
sub_proc = _serve_logs(env, skip_serve_logs)
worker.run(**options)
if sub_proc:
sub_proc.kill()
|
# -*- coding: utf-8 -*-
from pymongo import MongoClient
from operator import itemgetter
import jieba
client = MongoClient()
data = []
i = 0
for x in client.News.News.find():
try:
#fop = open('news_article/news_article_%d' % i, 'w')
#fop.write(x['content'].encode('utf8'))
data.append(x)
i += 1
except KeyError, TypeError:
continue
"""
def list2freqdict(mylist):
mydict=dict()
for ch in mylist:
mydict[ch]=mydict.get(ch,0)+1
return mydict
def list2bigram(mylist):
return [mylist[i:i+2] for i in range(0,len(mylist)-1)]
def list2trigram(mylist):
return [mylist[i:i+3] for i in range(0,len(mylist)-2)]
def list2fourgram(mylist):
return [mylist[i:i+4] for i in range(0,len(mylist)-3)]
def bigram2freqdict(mybigram):
mydict=dict()
for (ch1,ch2) in mybigram:
mydict[(ch1,ch2)]=mydict.get((ch1,ch2),0)+1
return mydict
def trigram2freqdict(mytrigram):
mydict=dict()
for (ch1,ch2,ch3) in mytrigram:
mydict[(ch1,ch2,ch3)]=mydict.get((ch1,ch2,ch3),0)+1
return mydict
def fourgram2freqdict(myfourgram):
mydict=dict()
for (ch1,ch2,ch3,ch4) in myfourgram:
mydict[(ch1,ch2,ch3,ch4)]=mydict.get((ch1,ch2,ch3,ch4),0)+1
return mydict
"""
def freq2report(freqlist):
chs=str()
print('Char(s)\tCount')
print('=============')
for (token,num) in freqlist:
for ch in token:
chs=chs+ch
print chs.encode('utf8') + '\t' + str(num)
chs=''
return
"""
sentence=reduce(lambda x,y: x + y['content'], data, u'')#data[0]['content']#u'吃葡萄不吐葡萄皮,不吃葡萄倒吐葡萄皮。'
chlist=[ch for ch in sentence]
chfreqdict=list2freqdict(chlist)
chbigram=list2bigram(chlist)
chtrigram=list2trigram(chlist)
chfourgram=list2fourgram(chlist)
bigramfreqdict=bigram2freqdict(chbigram)
trigramfreqdict=trigram2freqdict(chtrigram)
fourgramfreqdict=fourgram2freqdict(chfourgram)
chfreqsorted=sorted(chfreqdict.items(), key=itemgetter(1), reverse=True)
bigramfreqsorted=sorted(bigramfreqdict.items(), key=itemgetter(1), reverse=True)
trigramfreqsorted=sorted(trigramfreqdict.items(), key=itemgetter(1), reverse=True)
fourgramfreqsorted=sorted(fourgramfreqdict.items(), key=itemgetter(1), reverse=True)
freq2report(chfreqsorted[:10])
freq2report(bigramfreqsorted[:10])
freq2report(trigramfreqsorted[:10])
freq2report(fourgramfreqsorted[:10])
"""
from scipy.linalg import norm
import tfidf
def fast_cosine_sim(a, b):
if len(b) < len(a):
a, b = b, a
up = 0
for key, a_value in a.iteritems():
b_value = b.get(key, 0)
up += a_value * b_value
if up == 0:
return 0
return up / norm(a.values()) / norm(b.values())
jieba.set_dictionary('dict.txt.big')
article_num = 2#len(data)
table = tfidf.tfidf()
words_table = dict()
#doc_count = []
for i in xrange(0, article_num):
x = data[i]
try:
words = jieba.cut(x['content'], cut_all=False)
words = [word for word in words]
table.addDocument(i, words)
words_table[i] = words
#word_count = dict()
#for word in words:
# word_count[word] = word_count.get(word, 0) + 1
#doc_count.append(word_count)
#print reduce(lambda x,y: x + '\n' + y, map(lambda (k,v): k + '\t' + str(v), word_count.items()))
except:
continue
print reduce(lambda x, y: x + ' ' + y, words_table[1])
#scores = dict()
#for i in xrange(0, article_num):
# scores[i] = fast_cosine_sim(doc_count[0], doc_count[i])
#scoresorted=sorted(scores.items(), key=itemgetter(1), reverse=True)
#print reduce(lambda x,y: x + '\n' + y, map(lambda (k,v): str(k) + '\t' + str(v), scoresorted))
"""
select = [1, 384]
words_vector = []
for i in select:
try:
words_vector += words_table[i]
except:
continue
"""
tfidfsorted=sorted(table.similarities([u'復興',u'澎湖',u'航空',u'空難',u'颱風',u'家屬',u'班機',u'飛機',u'罹難者',u'天氣']), key=itemgetter(1), reverse=True)
print reduce(lambda x,y: x + '\n' + y, map(lambda (k,v): str(k) + '\t' + str(v), tfidfsorted))
#sentence=reduce(lambda x,y: x + y['content'], data, u'')#data[0]['content']#u'吃葡萄不吐葡萄皮,不吃葡萄倒吐葡萄皮。'
#words = jieba.cut(sentence, cut_all=False)
#print "Output 精確模式 Full Mode:"
#for word in words:
# print word
|
#/*##########################################################################
# Copyright (C) 2004-2012 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# This toolkit is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# PyMca is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyMca; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# PyMca follows the dual licensing model of Riverbank's PyQt and cannot be
# used as a free plugin for a non-free program.
#
# Please contact the ESRF industrial unit ([email protected]) if this license
# is a problem for you.
#############################################################################*/
import sys
import os
import numpy
from PyMca import DataObject
from PyMca import PyMcaIOHelper
DEBUG = 0
SOURCE_TYPE = "EdfFileStack"
class AifiraMap(DataObject.DataObject):
def __init__(self, filename):
DataObject.DataObject.__init__(self)
if sys.platform == 'win32':
fid = open(filename, 'rb')
else:
fid = open(filename, 'r')
self.sourceName = [filename]
self.data = PyMcaIOHelper.readAifira(fid).astype(numpy.float)
nrows, ncols, nChannels = self.data.shape
self.nSpectra = nrows * ncols
fid.close()
#fill the header
self.header = []
self.nRows = nrows
#arrange as an EDF Stack
self.info = {}
self.__nFiles = self.nSpectra / self.nRows
self.__nImagesPerFile = 1
shape = self.data.shape
for i in range(len(shape)):
key = 'Dim_%d' % (i + 1,)
self.info[key] = shape[i]
self.info["SourceType"] = SOURCE_TYPE
self.info["SourceName"] = self.sourceName
self.info["Size"] = self.__nFiles * self.__nImagesPerFile
self.info["NumberOfFiles"] = self.__nFiles * 1
self.info["FileIndex"] = 0
self.info["McaCalib"] = [0.0, 1.0, 0.0]
self.info["Channel0"] = 0.0
def main():
global DEBUG
filename = None
if len(sys.argv) > 1:
filename = sys.argv[1]
elif os.path.exists("./AIFIRA/010737.DAT"):
filename = "./AIFIRA/010737.DAT"
if filename is not None:
DEBUG = 1
AifiraMap(filename)
else:
print("Please supply input filename")
if __name__ == "__main__":
main()
|
import pynmea2
from PyQt4.QtCore import QObject, pyqtSignal, QDate, QDateTime, QTime, Qt
from qgis.core import (QgsGPSDetector, QgsGPSConnectionRegistry, QgsPoint, \
QgsCoordinateTransform, QgsCoordinateReferenceSystem, \
QgsGPSInformation, QgsCsException)
from roam.utils import log, info
NMEA_FIX_BAD = 1
NMEA_FIX_2D = 2
NMEA_FIX_3D = 3
KNOTS_TO_KM = 1.852
def safe_float(value):
try:
return float(value)
except (TypeError, ValueError):
return 0.0
def safe_int(value):
try:
return int(value)
except (TypeError, ValueError):
return 0
class GPSService(QObject):
gpsfixed = pyqtSignal(bool, object)
gpsfailed = pyqtSignal()
gpsdisconnected = pyqtSignal()
# Connect to listen to GPS status updates.
gpsposition = pyqtSignal(QgsPoint, object)
firstfix = pyqtSignal(QgsPoint, object)
def __init__(self):
super(GPSService, self).__init__()
self.isConnected = False
self._currentport = None
self.postion = None
self.elevation = None
self.info = QgsGPSInformation()
self.wgs84CRS = QgsCoordinateReferenceSystem(4326)
self.crs = None
def gpsinfo(self, attribute):
"""
Return information about the GPS postion.
:param attribute:
:return:
"""
if attribute.lower() == 'x':
return self.postion.x()
if attribute.lower() == 'y':
return self.postion.y()
if attribute.lower() == 'z':
return self.elevation
if attribute.lower() == 'portname':
return self.currentport
else:
return getattr(self.info, attribute)
@property
def currentport(self):
return 'scan' if not self._currentport else self._currentport
@currentport.setter
def currentport(self, value):
self._currentport = value
def connectGPS(self, portname):
if not self.isConnected:
self._currentport = portname
if portname == 'scan' or portname == '':
log("Auto scanning for GPS port")
portname = ''
self.detector = QgsGPSDetector(portname)
log("Connecting to:{}".format(portname or 'scan'))
self.detector.detected.connect(self._gpsfound)
self.detector.detectionFailed.connect(self.gpsfailed)
self.isConnectFailed = False
self.detector.advance()
def disconnectGPS(self):
if self.isConnected:
self.gpsConn.nmeaSentenceReceived.disconnect(self.parse_data)
# self.gpsConn.stateChanged.disconnect(self.gpsStateChanged)
self.gpsConn.close()
log("GPS disconnect")
self.isConnected = False
self.postion = None
QgsGPSConnectionRegistry.instance().unregisterConnection(self.gpsConn)
self.gpsdisconnected.emit()
def _gpsfound(self, gpsConnection):
log("GPS found")
self.gpsConn = gpsConnection
self.gpsConn.nmeaSentenceReceived.connect(self.parse_data)
# self.gpsConn.stateChanged.connect(self.gpsStateChanged)
self.isConnected = True
QgsGPSConnectionRegistry.instance().registerConnection(self.gpsConn)
def parse_data(self, datastring):
try:
data = pynmea2.parse(datastring)
except AttributeError as er:
log(er.message)
return
except pynmea2.SentenceTypeError as er:
log(er.message)
return
mappings = {"RMC": self.extract_rmc,
"GGA": self.extract_gga,
"GSV": self.extract_gsv,
"VTG": self.extract_vtg,
"GSA": self.extract_gsa}
try:
mappings[data.sentence_type](data)
self.gpsStateChanged(self.info)
except KeyError:
return
except AttributeError:
return
def extract_vtg(self, data):
self.info.speed = safe_float(data.spd_over_grnd_kmph)
return self.info
def extract_gsa(self, data):
self.info.hdop = safe_float(data.hdop)
self.info.pdop = safe_float(data.pdop)
self.info.vdop = safe_float(data.vdop)
self.info.fixMode = data.mode
self.info.fixType = safe_int(data.mode_fix_type)
return self.info
def extract_gsv(self, data):
pass
def extract_gga(self, data):
self.info.latitude = data.latitude
self.info.longitude = data.longitude
self.info.elevation = safe_float(data.altitude)
self.info.quality = safe_int(data.gps_qual)
self.info.satellitesUsed = safe_int(data.num_sats)
return self.info
def extract_rmc(self, data):
self.info.latitude = data.latitude
self.info.longitude = data.longitude
self.info.speed = KNOTS_TO_KM * safe_float(data.spd_over_grnd)
self.info.status = data.data_validity
self.info.direction = safe_float(data.true_course)
if data.datestamp and data.timestamp:
date = QDate(data.datestamp.year, data.datestamp.month, data.datestamp.day)
time = QTime(data.timestamp.hour, data.timestamp.minute, data.timestamp.second)
dt = QDateTime()
self.info.utcDateTime.setTimeSpec(Qt.UTC)
self.info.utcDateTime.setDate(date)
self.info.utcDateTime.setTime(time)
return self.info
def gpsStateChanged(self, gpsInfo):
if gpsInfo.fixType == NMEA_FIX_BAD or gpsInfo.status == 0 or gpsInfo.quality == 0:
self.gpsfixed.emit(False, gpsInfo)
return
elif gpsInfo.fixType == NMEA_FIX_3D or NMEA_FIX_2D:
self.gpsfixed.emit(True, gpsInfo)
map_pos = QgsPoint(gpsInfo.longitude, gpsInfo.latitude)
if self.crs:
transform = QgsCoordinateTransform(self.wgs84CRS, self.crs)
try:
map_pos = transform.transform(map_pos)
except QgsCsException:
log("Transform exception")
return
if self.postion is None:
self.firstfix.emit(map_pos, gpsInfo)
self.info = gpsInfo
self.gpsposition.emit(map_pos, gpsInfo)
self.postion = map_pos
self.elevation = gpsInfo.elevation
GPS = GPSService()
|
import os
from xml.dom import minidom
def get_data(api, api_nonunique, method_dom_objs, signature):
tmp_dict = {}
for inner_method_domobj in method_dom_objs.childNodes:
tmp_dict_2 = {}
node_name = inner_method_domobj.localName
if node_name:
tmp_dict[node_name] = {}
tmp_dict_2[node_name] = {}
for element in signature[node_name]:
level = []
if inner_method_domobj.hasAttribute(element):
level.append(inner_method_domobj.getAttribute(element))
if inner_method_domobj.hasChildNodes():
for ii in inner_method_domobj.childNodes:
if ii.localName and ii.hasAttribute(element):
level.append(ii.getAttribute(element))
if node_name == 'response':
tmp_dict[node_name][element] = level
tmp_dict_2[node_name][element] = level
else:
tmp_dict[node_name][element] = inner_method_domobj.getAttribute(element)
tmp_dict_2[node_name][element] = inner_method_domobj.getAttribute(element)
if tmp_dict_2:
api_nonunique.append(tmp_dict_2)
if tmp_dict:
api.append(tmp_dict)
return api, api_nonunique
def xml2dict(xml, signature):
"""
For parsing api.xml
TODO: SZ: Make recursive!
"""
api = []
api_nonunique = []
try:
if os.path.exists(xml):
xmldoc = minidom.parse(xml)
# SZ: Huge hack. FIX!
for method_dom_objs in xmldoc.childNodes[0].childNodes: api, api_nonunique = get_data(api, api_nonunique, method_dom_objs, signature)
else:
xmldoc = minidom.parseString(xml)
api, api_nonunique = get_data(api, api_nonunique, xmldoc.childNodes[0], signature)
except:
return False
return api, api_nonunique
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Keyword.text'
db.alter_column(u'dictionary_keyword', 'text', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100))
def backwards(self, orm):
# Changing field 'Keyword.text'
db.alter_column(u'dictionary_keyword', 'text', self.gf('django.db.models.fields.CharField')(max_length=50, unique=True))
models = {
u'dictionary.definition': {
'Meta': {'ordering': "['gloss']", 'object_name': 'Definition'},
'count': ('django.db.models.fields.IntegerField', [], {}),
'gloss': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dictionary.Gloss']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'dictionary.dialect': {
'Meta': {'ordering': "['language', 'name']", 'object_name': 'Dialect'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dictionary.Language']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'dictionary.gloss': {
'Meta': {'ordering': "['idgloss']", 'object_name': 'Gloss'},
'StemSN': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'annotation_idgloss': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'aslgloss': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'asloantf': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'asltf': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'blend': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'blendtf': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'bslgloss': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'bslloantf': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'bsltf': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'compound': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'comptf': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'dialect': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dictionary.Dialect']", 'symmetrical': 'False'}),
'domhndsh': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'final_domhndsh': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'final_loc': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'final_palm_orientation': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'final_relative_orientation': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'final_secondary_loc': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'final_subhndsh': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idgloss': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'inWeb': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'initial_palm_orientation': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'initial_relative_orientation': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'initial_secondary_loc': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'inittext': ('django.db.models.fields.CharField', [], {'max_length': "'50'", 'blank': 'True'}),
'isNew': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dictionary.Language']", 'symmetrical': 'False'}),
'locprim': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'locsecond': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'morph': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'sedefinetf': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'segloss': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'sense': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sn': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'subhndsh': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'})
},
u'dictionary.keyword': {
'Meta': {'ordering': "['text']", 'object_name': 'Keyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'dictionary.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'dictionary.relation': {
'Meta': {'ordering': "['source']", 'object_name': 'Relation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'relation_sources'", 'to': u"orm['dictionary.Gloss']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'relation_targets'", 'to': u"orm['dictionary.Gloss']"})
},
u'dictionary.translation': {
'Meta': {'ordering': "['gloss', 'index']", 'object_name': 'Translation'},
'gloss': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dictionary.Gloss']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dictionary.Keyword']"})
}
}
complete_apps = ['dictionary']
|
# -- coding: utf-8 --
# ===========================================================================
# eXe
# Copyright 2013, Pedro Peña Pérez, Open Phoenix IT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
import sys
import os
from exe.engine.path import Path
from exe import globals as G
class LocationButtons(object):
def __init__(self):
if sys.platform[:3] == "win":
self.names_map = {0: x_('Desktop'),
5: x_('My Documents'),
40: x_('Home Folder')}
elif sys.platform[:6] == "darwin":
self.names_map = {'kDesktopFolderType': x_('Desktop'),
'kDocumentsFolderType': x_('Documents'),
'kCurrentUserFolderType': x_('Home Folder')}
else:
self.names_map = {'DESKTOP': x_('Desktop'),
'DOCUMENTS': x_('Documents'),
'HOME': x_('Home Folder')}
self.compute()
def compute(self):
self.buttons = []
for key, value in self.names_map.items():
try:
button = {'xtype': 'button', 'text': _(value),
'location': self.MapDir(key)}
self.buttons.append(button)
except:
pass
def updateText(self):
i = 0
for value in self.names_map.values():
button = self.buttons[i]
button['text'] = _(value)
i = i + 1
def MapDir(self, code):
if sys.platform[:3] == "win":
try:
from ctypes import WinDLL, create_unicode_buffer
dll = WinDLL('shell32')
result = create_unicode_buffer(260)
resource = dll.SHGetFolderPathW(None, code, None, 0, result)
if resource != 0:
raise Exception
else:
path = result.value
except:
if code == 0:
path = os.environ['HOMEPATH']
else:
raise
elif sys.platform[:6] == "darwin":
try:
from Carbon import Folder, Folders
folderref = Folder.FSFindFolder(Folders.kUserDomain,
getattr(Folders, code), False)
path = folderref.as_pathname()
except:
if code == 'kCurrentUserFolderType':
path = os.environ['HOME']
else:
raise
else:
try:
XDG_USER_DIR_CMD = 'xdg-user-dir'
import subprocess
p = subprocess.Popen([XDG_USER_DIR_CMD, code],
stdout=subprocess.PIPE)
path, _ = p.communicate()
path = path.rstrip('\n')
except:
if code == 'HOME':
path = os.environ['HOME']
elif G.application.snap and code == 'DESKTOP':
path = os.environ.get('XDG_DESKTOP_DIR')
if not path:
raise Exception
elif G.application.snap and code == 'DOCUMENTS':
path = os.environ.get('XDG_DOCUMENTS_DIR')
if not path:
raise Exception
return Path(path).abspath()
|
from gh.base import Command
from gh.util import get_issue_number
class IssueReopenCommand(Command):
name = 'issue.reopen'
usage = '%prog [options] issue.reopen [#]number'
summary = 'Reopen an issue'
subcommands = {}
def run(self, options, args):
self.get_repo(options)
opts, args = self.parser.parse_args(args)
if opts.help:
self.help()
number = get_issue_number(
args, self.parser, 'issue.reopen requires a valid number'
)
if number is None:
return self.FAILURE
return self.reopen_issue(number)
def reopen_issue(self, number):
self.login()
user, repo = self.repository
issue = self.gh.issue(user, repo, number)
if not issue:
print("Reopening issue failed.")
return self.FAILURE
if issue.reopen():
print("Issue reopened successfully.")
return self.SUCCESS
else:
print("Reopening issue failed.")
return self.FAILURE
IssueReopenCommand()
|
#-*- coding: utf-8 -*-
#!/usr/bin/env python
import time
import datetime
import os
import sys
import stat
import json
from library import templexer
import argparse
import ConfigParser
configread = ConfigParser.ConfigParser()
# sys.path.append("./library")
def hoge(arg):
arg = arg.strip("{}();")
arg = arg.split(":")
print arg
templexer.add_command("HOGE", hoge)
if __name__ == "__main__":
class MyParser(object):
def __init__(self):
self._parser = argparse.ArgumentParser(description="""
templexer test script
""")
self._parser.add_argument('--device', '-D', help='device number',
default=1)
self._parser.add_argument('--config', '-C', help='config file',
default="default.conf")
self._parser.add_argument('--script', '-S', help='script file',
default="script.txt")
self.args = self._parser.parse_args(namespace=self)
parser = MyParser()
_device = parser.args.device
_config = parser.args.config
_script = parser.args.script
configread.read(_config)
# print configread.items("script")
scr = configread.get("script", "source")
templexer.parse(scr)
# with open(_script, "rb") as _file:
# script = _file.read()
# script = script.upper()
# # configread.read(script)
# # print script
# # print templexer.callback
# # print isinstance(script, basestring)
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import time
from oslo_config import cfg
import six
from rally import exceptions
from rally.plugins.openstack import scenario
from rally.plugins.openstack.wrappers import network as network_wrapper
from rally.task import atomic
from rally.task import utils
NOVA_BENCHMARK_OPTS = []
option_names_and_defaults = [
# action, prepoll delay, timeout, poll interval
("start", 0, 300, 1),
("stop", 0, 300, 2),
("boot", 1, 300, 1),
("delete", 2, 300, 2),
("reboot", 2, 300, 2),
("rebuild", 1, 300, 1),
("rescue", 2, 300, 2),
("unrescue", 2, 300, 2),
("suspend", 2, 300, 2),
("resume", 2, 300, 2),
("pause", 2, 300, 2),
("unpause", 2, 300, 2),
("shelve", 2, 300, 2),
("unshelve", 2, 300, 2),
("image_create", 0, 300, 2),
("image_delete", 0, 300, 2),
("resize", 2, 400, 5),
("resize_confirm", 0, 200, 2),
("resize_revert", 0, 200, 2),
("live_migrate", 1, 400, 2),
("migrate", 1, 400, 2),
]
for action, prepoll, timeout, poll in option_names_and_defaults:
NOVA_BENCHMARK_OPTS.extend([
cfg.FloatOpt(
"nova_server_%s_prepoll_delay" % action,
default=float(prepoll),
help="Time to sleep after %s before polling for status" % action
),
cfg.FloatOpt(
"nova_server_%s_timeout" % action,
default=float(timeout),
help="Server %s timeout" % action
),
cfg.FloatOpt(
"nova_server_%s_poll_interval" % action,
default=float(poll),
help="Server %s poll interval" % action
)
])
NOVA_BENCHMARK_OPTS.extend([
cfg.FloatOpt(
"nova_detach_volume_timeout",
default=float(200),
help="Nova volume detach timeout"),
cfg.FloatOpt(
"nova_detach_volume_poll_interval",
default=float(2),
help="Nova volume detach poll interval")
])
CONF = cfg.CONF
benchmark_group = cfg.OptGroup(name="benchmark",
title="benchmark options")
CONF.register_group(benchmark_group)
CONF.register_opts(NOVA_BENCHMARK_OPTS, group=benchmark_group)
class NovaScenario(scenario.OpenStackScenario):
"""Base class for Nova scenarios with basic atomic actions."""
@atomic.action_timer("nova.list_servers")
def _list_servers(self, detailed=True):
"""Returns user servers list."""
return self.clients("nova").servers.list(detailed)
def _pick_random_nic(self):
"""Choose one network from existing ones."""
ctxt = self.context
nets = [net["id"]
for net in ctxt.get("tenant", {}).get("networks", [])]
if nets:
# NOTE(amaretskiy): Balance servers among networks.
net_idx = self.context["iteration"] % len(nets)
return [{"net-id": nets[net_idx]}]
@atomic.action_timer("nova.boot_server")
def _boot_server(self, image_id, flavor_id,
auto_assign_nic=False, name=None, **kwargs):
"""Boot a server.
Returns when the server is actually booted and in "ACTIVE" state.
If multiple networks created by Network context are present, the first
network found that isn't associated with a floating IP pool is used.
:param image_id: int, image ID for server creation
:param flavor_id: int, flavor ID for server creation
:param auto_assign_nic: bool, whether or not to auto assign NICs
:param name: str, server name
:param kwargs: other optional parameters to initialize the server
:returns: nova Server instance
"""
server_name = name or self._generate_random_name()
secgroup = self.context.get("user", {}).get("secgroup")
if secgroup:
if "security_groups" not in kwargs:
kwargs["security_groups"] = [secgroup["name"]]
elif secgroup["name"] not in kwargs["security_groups"]:
kwargs["security_groups"].append(secgroup["name"])
if auto_assign_nic and not kwargs.get("nics", False):
nic = self._pick_random_nic()
if nic:
kwargs["nics"] = nic
server = self.clients("nova").servers.create(
server_name, image_id, flavor_id, **kwargs)
time.sleep(CONF.benchmark.nova_server_boot_prepoll_delay)
server = utils.wait_for(
server,
is_ready=utils.resource_is("ACTIVE"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_boot_timeout,
check_interval=CONF.benchmark.nova_server_boot_poll_interval
)
return server
def _do_server_reboot(self, server, reboottype):
server.reboot(reboot_type=reboottype)
time.sleep(CONF.benchmark.nova_server_reboot_prepoll_delay)
utils.wait_for(
server, is_ready=utils.resource_is("ACTIVE"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_reboot_timeout,
check_interval=CONF.benchmark.nova_server_reboot_poll_interval
)
@atomic.action_timer("nova.soft_reboot_server")
def _soft_reboot_server(self, server):
"""Reboot a server with soft reboot.
A soft reboot will be issued on the given server upon which time
this method will wait for the server to become active.
:param server: The server to reboot.
"""
self._do_server_reboot(server, "SOFT")
@atomic.action_timer("nova.reboot_server")
def _reboot_server(self, server):
"""Reboot a server with hard reboot.
A reboot will be issued on the given server upon which time
this method will wait for the server to become active.
:param server: The server to reboot.
"""
self._do_server_reboot(server, "HARD")
@atomic.action_timer("nova.rebuild_server")
def _rebuild_server(self, server, image, **kwargs):
"""Rebuild a server with a new image.
:param server: The server to rebuild.
:param image: The new image to rebuild the server with.
:param kwargs: Optional additional arguments to pass to the rebuild
"""
server.rebuild(image, **kwargs)
time.sleep(CONF.benchmark.nova_server_rebuild_prepoll_delay)
utils.wait_for(
server,
is_ready=utils.resource_is("ACTIVE"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_rebuild_timeout,
check_interval=CONF.benchmark.nova_server_rebuild_poll_interval
)
@atomic.action_timer("nova.start_server")
def _start_server(self, server):
"""Start the given server.
A start will be issued for the given server upon which time
this method will wait for it to become ACTIVE.
:param server: The server to start and wait to become ACTIVE.
"""
server.start()
utils.wait_for(
server, is_ready=utils.resource_is("ACTIVE"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_start_timeout,
check_interval=CONF.benchmark.nova_server_start_poll_interval
)
@atomic.action_timer("nova.stop_server")
def _stop_server(self, server):
"""Stop the given server.
Issues a stop on the given server and waits for the server
to become SHUTOFF.
:param server: The server to stop.
"""
server.stop()
utils.wait_for(
server, is_ready=utils.resource_is("SHUTOFF"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_stop_timeout,
check_interval=CONF.benchmark.nova_server_stop_poll_interval
)
@atomic.action_timer("nova.rescue_server")
def _rescue_server(self, server):
"""Rescue the given server.
Returns when the server is actually rescue and is in the "Rescue"
state.
:param server: Server object
"""
server.rescue()
time.sleep(CONF.benchmark.nova_server_rescue_prepoll_delay)
utils.wait_for(
server, is_ready=utils.resource_is("RESCUE"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_rescue_timeout,
check_interval=CONF.benchmark.nova_server_rescue_poll_interval
)
@atomic.action_timer("nova.unrescue_server")
def _unrescue_server(self, server):
"""Unrescue the given server.
Returns when the server is unrescue and waits to become ACTIVE
:param server: Server object
"""
server.unrescue()
time.sleep(CONF.benchmark.nova_server_unrescue_prepoll_delay)
utils.wait_for(
server, is_ready=utils.resource_is("ACTIVE"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_unrescue_timeout,
check_interval=CONF.benchmark.nova_server_unrescue_poll_interval
)
@atomic.action_timer("nova.suspend_server")
def _suspend_server(self, server):
"""Suspends the given server.
Returns when the server is actually suspended and is in the "Suspended"
state.
:param server: Server object
"""
server.suspend()
time.sleep(CONF.benchmark.nova_server_suspend_prepoll_delay)
utils.wait_for(
server, is_ready=utils.resource_is("SUSPENDED"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_suspend_timeout,
check_interval=CONF.benchmark.nova_server_suspend_poll_interval
)
@atomic.action_timer("nova.resume_server")
def _resume_server(self, server):
"""Resumes the suspended server.
Returns when the server is actually resumed and is in the "ACTIVE"
state.
:param server: Server object
"""
server.resume()
time.sleep(CONF.benchmark.nova_server_resume_prepoll_delay)
utils.wait_for(
server, is_ready=utils.resource_is("ACTIVE"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_resume_timeout,
check_interval=CONF.benchmark.nova_server_resume_poll_interval
)
@atomic.action_timer("nova.pause_server")
def _pause_server(self, server):
"""Pause the live server.
Returns when the server is actually paused and is in the "PAUSED"
state.
:param server: Server object
"""
server.pause()
time.sleep(CONF.benchmark.nova_server_pause_prepoll_delay)
utils.wait_for(
server, is_ready=utils.resource_is("PAUSED"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_pause_timeout,
check_interval=CONF.benchmark.nova_server_pause_poll_interval
)
@atomic.action_timer("nova.unpause_server")
def _unpause_server(self, server):
"""Unpause the paused server.
Returns when the server is actually unpaused and is in the "ACTIVE"
state.
:param server: Server object
"""
server.unpause()
time.sleep(CONF.benchmark.nova_server_unpause_prepoll_delay)
utils.wait_for(
server, is_ready=utils.resource_is("ACTIVE"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_unpause_timeout,
check_interval=CONF.benchmark.nova_server_unpause_poll_interval
)
@atomic.action_timer("nova.shelve_server")
def _shelve_server(self, server):
"""Shelve the given server.
Returns when the server is actually shelved and is in the
"SHELVED_OFFLOADED" state.
:param server: Server object
"""
server.shelve()
time.sleep(CONF.benchmark.nova_server_shelve_prepoll_delay)
utils.wait_for(
server, is_ready=utils.resource_is("SHELVED_OFFLOADED"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_shelve_timeout,
check_interval=CONF.benchmark.nova_server_shelve_poll_interval
)
@atomic.action_timer("nova.unshelve_server")
def _unshelve_server(self, server):
"""Unshelve the given server.
Returns when the server is unshelved and is in the "ACTIVE" state.
:param server: Server object
"""
server.unshelve()
time.sleep(CONF.benchmark.nova_server_unshelve_prepoll_delay)
utils.wait_for(
server, is_ready=utils.resource_is("ACTIVE"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_unshelve_timeout,
check_interval=CONF.benchmark.nova_server_unshelve_poll_interval
)
def _delete_server(self, server, force=False):
"""Delete the given server.
Returns when the server is actually deleted.
:param server: Server object
:param force: If True, force_delete will be used instead of delete.
"""
atomic_name = ("nova.%sdelete_server") % (force and "force_" or "")
with atomic.ActionTimer(self, atomic_name):
if force:
server.force_delete()
else:
server.delete()
utils.wait_for_status(
server,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_delete_timeout,
check_interval=CONF.benchmark.nova_server_delete_poll_interval
)
def _delete_servers(self, servers, force=False):
"""Delete multiple servers.
:param servers: A list of servers to delete
:param force: If True, force_delete will be used instead of delete.
"""
atomic_name = ("nova.%sdelete_servers") % (force and "force_" or "")
with atomic.ActionTimer(self, atomic_name):
for server in servers:
if force:
server.force_delete()
else:
server.delete()
for server in servers:
utils.wait_for_status(
server,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_delete_timeout,
check_interval=CONF.
benchmark.nova_server_delete_poll_interval
)
@atomic.action_timer("nova.delete_image")
def _delete_image(self, image):
"""Delete the given image.
Returns when the image is actually deleted.
:param image: Image object
"""
image.delete()
check_interval = CONF.benchmark.nova_server_image_delete_poll_interval
utils.wait_for_status(
image,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_image_delete_timeout,
check_interval=check_interval
)
@atomic.action_timer("nova.create_image")
def _create_image(self, server):
"""Create an image from the given server
Uses the server name to name the created image. Returns when the image
is actually created and is in the "Active" state.
:param server: Server object for which the image will be created
:returns: Created image object
"""
image_uuid = self.clients("nova").servers.create_image(server,
server.name)
image = self.clients("nova").images.get(image_uuid)
check_interval = CONF.benchmark.nova_server_image_create_poll_interval
image = utils.wait_for(
image,
is_ready=utils.resource_is("ACTIVE"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_image_create_timeout,
check_interval=check_interval
)
return image
@atomic.action_timer("nova.list_images")
def _list_images(self, detailed=False, **kwargs):
"""List all images.
:param detailed: True if the image listing
should contain detailed information
:param kwargs: Optional additional arguments for image listing
:returns: Image list
"""
return self.clients("nova").images.list(detailed, **kwargs)
@atomic.action_timer("nova.create_keypair")
def _create_keypair(self, **kwargs):
"""Create a keypair
:returns: Created keypair name
"""
keypair_name = self._generate_random_name(prefix="rally_keypair_")
keypair = self.clients("nova").keypairs.create(keypair_name, **kwargs)
return keypair.name
@atomic.action_timer("nova.list_keypairs")
def _list_keypairs(self):
"""Return user keypairs list."""
return self.clients("nova").keypairs.list()
@atomic.action_timer("nova.delete_keypair")
def _delete_keypair(self, keypair_name):
"""Delete keypair
:param keypair_name: The keypair name to delete.
"""
self.clients("nova").keypairs.delete(keypair_name)
@atomic.action_timer("nova.boot_servers")
def _boot_servers(self, image_id, flavor_id, requests, name_prefix=None,
instances_amount=1, auto_assign_nic=False, **kwargs):
"""Boot multiple servers.
Returns when all the servers are actually booted and are in the
"Active" state.
:param image_id: ID of the image to be used for server creation
:param flavor_id: ID of the flavor to be used for server creation
:param requests: Number of booting requests to perform
:param name_prefix: The prefix to use while naming the created servers.
The rest of the server names will be '_<number>'
:param instances_amount: Number of instances to boot per each request
:param auto_assign_nic: bool, whether or not to auto assign NICs
:param kwargs: other optional parameters to initialize the servers
:returns: List of created server objects
"""
if not name_prefix:
name_prefix = self._generate_random_name()
if auto_assign_nic and not kwargs.get("nics", False):
nic = self._pick_random_nic()
if nic:
kwargs["nics"] = nic
for i in range(requests):
self.clients("nova").servers.create("%s_%d" % (name_prefix, i),
image_id, flavor_id,
min_count=instances_amount,
max_count=instances_amount,
**kwargs)
# NOTE(msdubov): Nova python client returns only one server even when
# min_count > 1, so we have to rediscover all the
# created servers manually.
servers = filter(lambda server: server.name.startswith(name_prefix),
self.clients("nova").servers.list())
time.sleep(CONF.benchmark.nova_server_boot_prepoll_delay)
servers = [utils.wait_for(
server,
is_ready=utils.resource_is("ACTIVE"),
update_resource=utils.
get_from_manager(),
timeout=CONF.benchmark.nova_server_boot_timeout,
check_interval=CONF.benchmark.nova_server_boot_poll_interval
) for server in servers]
return servers
def _associate_floating_ip(self, server, address, fixed_address=None,
atomic_action=True):
"""Add floating IP to an instance
:param server: The :class:`Server` to add an IP to.
:param address: The ip address or FloatingIP to add to the instance
:param fixed_address: The fixedIP address the FloatingIP is to be
associated with (optional)
:param atomic_action: True if this is an atomic action (optional)
"""
if atomic_action:
with atomic.ActionTimer(self, "nova.associate_floating_ip"):
server.add_floating_ip(address, fixed_address=fixed_address)
utils.wait_for(
server,
is_ready=self.check_ip_address(address),
update_resource=utils.get_from_manager()
)
else:
server.add_floating_ip(address, fixed_address=fixed_address)
utils.wait_for(
server,
is_ready=self.check_ip_address(address),
update_resource=utils.get_from_manager()
)
# Update server data
server.addresses = server.manager.get(server.id).addresses
def _dissociate_floating_ip(self, server, address, atomic_action=True):
"""Remove floating IP from an instance
:param server: The :class:`Server` to add an IP to.
:param address: The ip address or FloatingIP to remove
:param atomic_action: True if this is an atomic action (optional)
"""
if atomic_action:
with atomic.ActionTimer(self, "nova.dissociate_floating_ip"):
server.remove_floating_ip(address)
utils.wait_for(
server,
is_ready=self.check_ip_address(address, must_exist=False),
update_resource=utils.get_from_manager()
)
else:
server.remove_floating_ip(address)
utils.wait_for(
server,
is_ready=self.check_ip_address(address, must_exist=False),
update_resource=utils.get_from_manager()
)
# Update server data
server.addresses = server.manager.get(server.id).addresses
@staticmethod
def check_ip_address(address, must_exist=True):
ip_to_check = getattr(address, "ip", address)
def _check_addr(resource):
for network, addr_list in resource.addresses.items():
for addr in addr_list:
if ip_to_check == addr["addr"]:
return must_exist
return not must_exist
return _check_addr
@atomic.action_timer("nova.list_networks")
def _list_networks(self):
"""Return user networks list."""
return self.clients("nova").networks.list()
@atomic.action_timer("nova.resize")
def _resize(self, server, flavor):
server.resize(flavor)
utils.wait_for(
server,
is_ready=utils.resource_is("VERIFY_RESIZE"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_resize_timeout,
check_interval=CONF.benchmark.nova_server_resize_poll_interval
)
@atomic.action_timer("nova.resize_confirm")
def _resize_confirm(self, server, status="ACTIVE"):
server.confirm_resize()
utils.wait_for(
server,
is_ready=utils.resource_is(status),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_resize_confirm_timeout,
check_interval=(
CONF.benchmark.nova_server_resize_confirm_poll_interval)
)
@atomic.action_timer("nova.resize_revert")
def _resize_revert(self, server, status="ACTIVE"):
server.revert_resize()
utils.wait_for(
server,
is_ready=utils.resource_is(status),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_resize_revert_timeout,
check_interval=(
CONF.benchmark.nova_server_resize_revert_poll_interval)
)
@atomic.action_timer("nova.attach_volume")
def _attach_volume(self, server, volume, device=None):
server_id = server.id
volume_id = volume.id
self.clients("nova").volumes.create_server_volume(server_id,
volume_id,
device)
utils.wait_for(
volume,
is_ready=utils.resource_is("in-use"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_resize_revert_timeout,
check_interval=(
CONF.benchmark.nova_server_resize_revert_poll_interval)
)
@atomic.action_timer("nova.detach_volume")
def _detach_volume(self, server, volume):
server_id = server.id
volume_id = volume.id
self.clients("nova").volumes.delete_server_volume(server_id,
volume_id)
utils.wait_for(
volume,
is_ready=utils.resource_is("available"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_detach_volume_timeout,
check_interval=CONF.benchmark.nova_detach_volume_poll_interval
)
@atomic.action_timer("nova.live_migrate")
def _live_migrate(self, server, target_host, block_migration=False,
disk_over_commit=False, skip_host_check=False):
"""Run live migration of the given server.
:param server: Server object
:param target_host: Specifies the target compute node to migrate
:param block_migration: Specifies the migration type
:param disk_over_commit: Specifies whether to overcommit migrated
instance or not
:param skip_host_check: Specifies whether to verify the targeted host
availability
"""
server_admin = self.admin_clients("nova").servers.get(server.id)
host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host")
server_admin.live_migrate(target_host,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
utils.wait_for(
server,
is_ready=utils.resource_is("ACTIVE"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_live_migrate_timeout,
check_interval=(
CONF.benchmark.nova_server_live_migrate_poll_interval)
)
server_admin = self.admin_clients("nova").servers.get(server.id)
if (host_pre_migrate == getattr(server_admin, "OS-EXT-SRV-ATTR:host")
and not skip_host_check):
raise exceptions.LiveMigrateException(
"Migration complete but instance did not change host: %s" %
host_pre_migrate)
@atomic.action_timer("nova.find_host_to_migrate")
def _find_host_to_migrate(self, server):
"""Find a compute node for live migration.
:param server: Server object
"""
server_admin = self.admin_clients("nova").servers.get(server.id)
host = getattr(server_admin, "OS-EXT-SRV-ATTR:host")
az_name = getattr(server_admin, "OS-EXT-AZ:availability_zone")
az = None
for a in self.admin_clients("nova").availability_zones.list():
if az_name == a.zoneName:
az = a
break
try:
new_host = random.choice(
[key for key, value in six.iteritems(az.hosts)
if key != host and
value["nova-compute"]["available"] is True])
return new_host
except IndexError:
raise exceptions.InvalidHostException(
"No valid host found to migrate")
@atomic.action_timer("nova.migrate")
def _migrate(self, server, skip_host_check=False):
"""Run migration of the given server.
:param server: Server object
:param skip_host_check: Specifies whether to verify the targeted host
availability
"""
server_admin = self.admin_clients("nova").servers.get(server.id)
host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host")
server_admin.migrate()
utils.wait_for(
server,
is_ready=utils.resource_is("VERIFY_RESIZE"),
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_migrate_timeout,
check_interval=(
CONF.benchmark.nova_server_migrate_poll_interval)
)
if not skip_host_check:
server_admin = self.admin_clients("nova").servers.get(server.id)
host_after_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host")
if host_pre_migrate == host_after_migrate:
raise exceptions.MigrateException(
"Migration complete but instance did not change host: %s" %
host_pre_migrate)
def _create_security_groups(self, security_group_count):
security_groups = []
with atomic.ActionTimer(self, "nova.create_%s_security_groups" %
security_group_count):
for i in range(security_group_count):
sg_name = self._generate_random_name()
sg = self.clients("nova").security_groups.create(sg_name,
sg_name)
security_groups.append(sg)
return security_groups
def _create_rules_for_security_group(self, security_groups,
rules_per_security_group,
ip_protocol="tcp", cidr="0.0.0.0/0"):
action_name = ("nova.create_%s_rules" % (rules_per_security_group *
len(security_groups)))
with atomic.ActionTimer(self, action_name):
for i in range(len(security_groups)):
for j in range(rules_per_security_group):
self.clients("nova").security_group_rules.create(
security_groups[i].id,
from_port=(i * rules_per_security_group + j + 1),
to_port=(i * rules_per_security_group + j + 1),
ip_protocol=ip_protocol,
cidr=cidr)
def _update_security_groups(self, security_groups):
"""Update a list of security groups
:param security_groups: list, security_groups that are to be updated
"""
with atomic.ActionTimer(self, "nova.update_%s_security_groups" %
len(security_groups)):
for sec_group in security_groups:
sg_new_name = self._generate_random_name()
sg_new_desc = self._generate_random_name()
self.clients("nova").security_groups.update(sec_group.id,
sg_new_name,
sg_new_desc)
def _delete_security_groups(self, security_group):
with atomic.ActionTimer(self, "nova.delete_%s_security_groups" %
len(security_group)):
for sg in security_group:
self.clients("nova").security_groups.delete(sg.id)
def _list_security_groups(self):
"""Return security groups list."""
with atomic.ActionTimer(self, "nova.list_security_groups"):
return self.clients("nova").security_groups.list()
@atomic.action_timer("nova.list_floating_ips_bulk")
def _list_floating_ips_bulk(self):
"""List all floating IPs."""
return self.admin_clients("nova").floating_ips_bulk.list()
@atomic.action_timer("nova.create_floating_ips_bulk")
def _create_floating_ips_bulk(self, ip_range, **kwargs):
"""Create floating IPs by range."""
ip_range = network_wrapper.generate_cidr(start_cidr=ip_range)
pool_name = self._generate_random_name(prefix="rally_fip_pool_")
return self.admin_clients("nova").floating_ips_bulk.create(
ip_range=ip_range, pool=pool_name, **kwargs)
@atomic.action_timer("nova.delete_floating_ips_bulk")
def _delete_floating_ips_bulk(self, ip_range):
"""Delete floating IPs by range."""
return self.admin_clients("nova").floating_ips_bulk.delete(ip_range)
@atomic.action_timer("nova.list_hypervisors")
def _list_hypervisors(self, detailed=True):
"""List hypervisors."""
return self.admin_clients("nova").hypervisors.list(detailed)
@atomic.action_timer("nova.lock_server")
def _lock_server(self, server):
"""Lock the given server.
:param server: Server to lock
"""
server.lock()
@atomic.action_timer("nova.unlock_server")
def _unlock_server(self, server):
"""Unlock the given server.
:param server: Server to unlock
"""
server.unlock()
@atomic.action_timer("nova.create_network")
def _create_network(self, ip_range, **kwargs):
"""Create nova network.
:param ip_range: IP range in CIDR notation to create
"""
net_label = self._generate_random_name(prefix="rally_novanet")
ip_range = network_wrapper.generate_cidr(start_cidr=ip_range)
return self.admin_clients("nova").networks.create(
label=net_label, cidr=ip_range, **kwargs)
@atomic.action_timer("nova.delete_network")
def _delete_network(self, net_id):
"""Delete nova network.
:param net_id: The nova-network ID to delete
"""
return self.admin_clients("nova").networks.delete(net_id)
|
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2014 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""
Stacked Bar chart
"""
from __future__ import division
from pygal.graph.bar import Bar
from pygal.util import compute_scale, swap, ident
from pygal.adapters import none_to_zero
class StackedBar(Bar):
"""Stacked Bar graph"""
_adapters = [none_to_zero]
def _get_separated_values(self, secondary=False):
series = self.secondary_series if secondary else self.series
transposed = list(zip(*[serie.values for serie in series]))
positive_vals = [sum([
val for val in vals
if val is not None and val >= self.zero])
for vals in transposed]
negative_vals = [sum([
val
for val in vals
if val is not None and val < self.zero])
for vals in transposed]
return positive_vals, negative_vals
def _compute_box(self, positive_vals, negative_vals):
self._box.ymin = negative_vals and min(min(negative_vals), self.zero)
self._box.ymax = positive_vals and max(max(positive_vals), self.zero)
def _compute(self):
positive_vals, negative_vals = self._get_separated_values()
self._compute_box(positive_vals, negative_vals)
if self.logarithmic:
positive_vals = list(filter(lambda x: x > 0, positive_vals))
negative_vals = list(filter(lambda x: x > 0, negative_vals))
positive_vals = positive_vals or [self.zero]
negative_vals = negative_vals or [self.zero]
x_pos = [
x / self._len for x in range(self._len + 1)
] if self._len > 1 else [0, 1] # Center if only one value
self._points(x_pos)
y_pos = compute_scale(
self._box.ymin, self._box.ymax, self.logarithmic, self.order_min
) if not self.y_labels else list(map(float, self.y_labels))
self._x_ranges = zip(x_pos, x_pos[1:])
self._x_labels = self.x_labels and list(zip(self.x_labels, [
sum(x_range) / 2 for x_range in self._x_ranges]))
self._y_labels = list(zip(map(self._format, y_pos), y_pos))
self.negative_cumulation = [0] * self._len
self.positive_cumulation = [0] * self._len
if self.secondary_series:
positive_vals, negative_vals = self._get_separated_values(True)
self.secondary_negative_cumulation = [0] * self._len
self.secondary_positive_cumulation = [0] * self._len
# In case of pyramids
sum_ = lambda x: sum(x) if isinstance(x, tuple) else x
self._secondary_min = (negative_vals and min(
sum_(min(negative_vals)), self.zero)) or self.zero
self._secondary_max = (positive_vals and max(
sum_(max(positive_vals)), self.zero)) or self.zero
def _bar(self, parent, x, y, index, i, zero, shift=False, secondary=False):
if secondary:
cumulation = (self.secondary_negative_cumulation
if y < self.zero else
self.secondary_positive_cumulation)
else:
cumulation = (self.negative_cumulation
if y < self.zero else
self.positive_cumulation)
zero = cumulation[i]
cumulation[i] = zero + y
if zero == 0:
zero = self.zero
y -= self.zero
y += zero
width = (self.view.x(1) - self.view.x(0)) / self._len
x, y = self.view((x, y))
series_margin = width * self._series_margin
x += series_margin
width -= 2 * series_margin
if self.secondary_series:
width /= 2
x += int(secondary) * width
serie_margin = width * self._serie_margin
x += serie_margin
width -= 2 * serie_margin
height = self.view.y(zero) - y
r = self.rounded_bars * 1 if self.rounded_bars else 0
self.svg.transposable_node(
parent, 'rect',
x=x, y=y, rx=r, ry=r, width=width, height=height,
class_='rect reactive tooltip-trigger')
transpose = swap if self.horizontal else ident
return transpose((x + width / 2, y + height / 2))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Course(models.Model):
"""
Represents a course at a school, made unique by its course code.
Courses persist across semesters and years. Their presence in a semester or year
is indicated by the existence of sections assigned to that course for that semester
or year. This is why a course does not have fields like professor, those varies.
The course model maintains only attributes which tend not to vary across semesters
or years.
A course has many :obj:`Section` which a student can enroll in.
Attributes:
code (:obj:`CharField`): the course code without indication of section (E.g. EN.600.100)
name (:obj:`CharField`): the general name of the course (E.g. Calculus I)
description (:obj:`TextField`): the explanation of the content of the courzse
department (:obj:`CharField`): department offering course (e.g. Computer Science)
level (:obj:`CharField`): indicator of level of course (e.g. 100, 200, Upper, Lower, Grad)
"""
code = models.CharField(max_length=20, primary_key=True)
name = models.CharField(max_length=250)
description = models.TextField(default='')
department = models.CharField(max_length=250, default='', null=True)
level = models.CharField(max_length=500, default='', null=True)
def __str__(self):
return self.code + ": " + self.name
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from sqlalchemy.ext.declarative import declared_attr
from ggrc import db
from ggrc.models.associationproxy import association_proxy
from ggrc.models import mixins
from ggrc.models.deferred import deferred
from ggrc.models.object_owner import Ownable
from ggrc.models.reflection import PublishOnly
from ggrc.models.relationship import Relatable
from ggrc.models.track_object_state import HasObjectState
class Risk(HasObjectState, mixins.CustomAttributable, mixins.Stateful,
Relatable, mixins.Described, Ownable,
mixins.WithContact, mixins.Titled, mixins.Timeboxed,
mixins.Slugged, mixins.Noted, mixins.Hyperlinked, mixins.Base,
db.Model):
__tablename__ = 'risks'
VALID_STATES = [
'Draft',
'Final',
'Effective',
'Ineffective',
'Launched',
'Not Launched',
'In Scope',
'Not in Scope',
'Deprecated',
]
# Overriding mixin to make mandatory
@declared_attr
def description(cls):
return deferred(db.Column(db.Text, nullable=False), cls.__name__)
risk_objects = db.relationship(
'RiskObject', backref='risk', cascade='all, delete-orphan')
objects = association_proxy('risk_objects', 'object', 'RiskObject')
_publish_attrs = [
'risk_objects',
PublishOnly('objects'),
]
_aliases = {
"contact": {
"display_name": "Contact",
"filter_by": "_filter_by_contact",
},
"secondary_contact": None,
}
|
import copy
import io
import pytest
import typing
import argparse
from mitmproxy import options
from mitmproxy import optmanager
from mitmproxy import exceptions
class TO(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("one", typing.Optional[int], None, "help")
self.add_option("two", typing.Optional[int], 2, "help")
self.add_option("bool", bool, False, "help")
self.add_option("required_int", int, 2, "help")
class TD(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("one", str, "done", "help")
self.add_option("two", str, "dtwo", "help")
class TD2(TD):
def __init__(self):
super().__init__()
self.add_option("three", str, "dthree", "help")
self.add_option("four", str, "dfour", "help")
class TM(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("two", typing.Sequence[str], ["foo"], "help")
self.add_option("one", typing.Optional[str], None, "help")
def test_defaults():
o = TD2()
defaults = {
"one": "done",
"two": "dtwo",
"three": "dthree",
"four": "dfour",
}
for k, v in defaults.items():
assert o.default(k) == v
assert not o.has_changed("one")
newvals = dict(
one="xone",
two="xtwo",
three="xthree",
four="xfour",
)
o.update(**newvals)
assert o.has_changed("one")
for k, v in newvals.items():
assert v == getattr(o, k)
o.reset()
assert not o.has_changed("one")
for k in o.keys():
assert not o.has_changed(k)
def test_required_int():
o = TO()
with pytest.raises(exceptions.OptionsError):
o.parse_setval(o._options["required_int"], None, None)
def test_deepcopy():
o = TD()
copy.deepcopy(o)
def test_options():
o = TO()
assert o.keys() == {"bool", "one", "two", "required_int"}
assert o.one is None
assert o.two == 2
o.one = 1
assert o.one == 1
with pytest.raises(TypeError):
TO(nonexistent = "value")
with pytest.raises(Exception, match="Unknown options"):
o.nonexistent = "value"
with pytest.raises(Exception, match="Unknown options"):
o.update(nonexistent = "value")
assert o.update_known(nonexistent = "value") == {"nonexistent": "value"}
rec = []
def sub(opts, updated):
rec.append(copy.copy(opts))
o.changed.connect(sub)
o.one = 90
assert len(rec) == 1
assert rec[-1].one == 90
o.update(one=3)
assert len(rec) == 2
assert rec[-1].one == 3
def test_setter():
o = TO()
f = o.setter("two")
f(99)
assert o.two == 99
with pytest.raises(Exception, match="No such option"):
o.setter("nonexistent")
def test_toggler():
o = TO()
f = o.toggler("bool")
assert o.bool is False
f()
assert o.bool is True
f()
assert o.bool is False
with pytest.raises(Exception, match="No such option"):
o.toggler("nonexistent")
with pytest.raises(Exception, match="boolean options"):
o.toggler("one")
class Rec():
def __init__(self):
self.called = None
def __call__(self, *args, **kwargs):
self.called = (args, kwargs)
def test_subscribe():
o = TO()
r = Rec()
# pytest.raises keeps a reference here that interferes with the cleanup test
# further down.
try:
o.subscribe(r, ["unknown"])
except exceptions.OptionsError:
pass
else:
raise AssertionError
assert len(o.changed.receivers) == 0
o.subscribe(r, ["two"])
o.one = 2
assert not r.called
o.two = 3
assert r.called
assert len(o.changed.receivers) == 1
del r
o.two = 4
assert len(o.changed.receivers) == 0
class binder:
def __init__(self):
self.o = TO()
self.called = False
self.o.subscribe(self.bound, ["two"])
def bound(self, *args, **kwargs):
self.called = True
t = binder()
t.o.one = 3
assert not t.called
t.o.two = 3
assert t.called
def test_rollback():
o = TO()
rec = []
def sub(opts, updated):
rec.append(copy.copy(opts))
recerr = []
def errsub(opts, **kwargs):
recerr.append(kwargs)
def err(opts, updated):
if opts.one == 10:
raise exceptions.OptionsError()
if opts.bool is True:
raise exceptions.OptionsError()
o.changed.connect(sub)
o.changed.connect(err)
o.errored.connect(errsub)
assert o.one is None
with pytest.raises(exceptions.OptionsError):
o.one = 10
assert o.one is None
with pytest.raises(exceptions.OptionsError):
o.bool = True
assert o.bool is False
assert isinstance(recerr[0]["exc"], exceptions.OptionsError)
assert o.one is None
assert o.bool is False
assert len(rec) == 4
assert rec[0].one == 10
assert rec[1].one is None
assert rec[2].bool is True
assert rec[3].bool is False
with pytest.raises(exceptions.OptionsError):
with o.rollback({"one"}, reraise=True):
raise exceptions.OptionsError()
def test_simple():
assert repr(TO())
assert "one" in TO()
def test_items():
assert TO().items()
def test_serialize():
def serialize(opts: optmanager.OptManager, text: str, defaults: bool = False) -> str:
buf = io.StringIO()
optmanager.serialize(opts, buf, text, defaults)
return buf.getvalue()
o = TD2()
o.three = "set"
assert "dfour" in serialize(o, "", defaults=True)
data = serialize(o, "")
assert "dfour" not in data
o2 = TD2()
optmanager.load(o2, data)
assert o2 == o
assert not o == 42
t = """
unknown: foo
"""
data = serialize(o, t)
o2 = TD2()
optmanager.load(o2, data)
assert o2 == o
t = "invalid: foo\ninvalid"
with pytest.raises(Exception, match="Config error"):
optmanager.load(o2, t)
t = "invalid"
with pytest.raises(Exception, match="Config error"):
optmanager.load(o2, t)
t = "# a comment"
optmanager.load(o2, t)
optmanager.load(o2, "foobar: '123'")
assert o2.deferred == {"foobar": "123"}
t = ""
optmanager.load(o2, t)
optmanager.load(o2, "foobar: '123'")
assert o2.deferred == {"foobar": "123"}
def test_serialize_defaults():
o = options.Options()
buf = io.StringIO()
optmanager.serialize(o, buf, "", defaults=True)
assert buf.getvalue()
def test_saving(tmpdir):
o = TD2()
o.three = "set"
dst = str(tmpdir.join("conf"))
optmanager.save(o, dst, defaults=True)
o2 = TD2()
optmanager.load_paths(o2, dst)
o2.three = "foo"
optmanager.save(o2, dst, defaults=True)
optmanager.load_paths(o, dst)
assert o.three == "foo"
with open(dst, 'a') as f:
f.write("foobar: '123'")
optmanager.load_paths(o, dst)
assert o.deferred == {"foobar": "123"}
with open(dst, 'a') as f:
f.write("'''")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with open(dst, 'wb') as f:
f.write(b"\x01\x02\x03")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with pytest.raises(exceptions.OptionsError):
optmanager.save(o, dst)
with open(dst, 'wb') as f:
f.write(b"\xff\xff\xff")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with pytest.raises(exceptions.OptionsError):
optmanager.save(o, dst)
def test_merge():
m = TM()
m.merge(dict(one="two"))
assert m.one == "two"
m.merge(dict(one=None))
assert m.one == "two"
m.merge(dict(two=["bar"]))
assert m.two == ["foo", "bar"]
def test_option():
o = optmanager._Option("test", int, 1, "help", None)
assert o.current() == 1
with pytest.raises(TypeError):
o.set("foo")
with pytest.raises(TypeError):
optmanager._Option("test", str, 1, "help", None)
o2 = optmanager._Option("test", int, 1, "help", None)
assert o2 == o
o2.set(5)
assert o2 != o
def test_dump_defaults():
o = TTypes()
buf = io.StringIO()
optmanager.dump_defaults(o, buf)
assert buf.getvalue()
def test_dump_dicts():
o = options.Options()
assert optmanager.dump_dicts(o)
assert optmanager.dump_dicts(o, ['http2', 'listen_port'])
class TTypes(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("str", str, "str", "help")
self.add_option("choices", str, "foo", "help", ["foo", "bar", "baz"])
self.add_option("optstr", typing.Optional[str], "optstr", "help")
self.add_option("bool", bool, False, "help")
self.add_option("bool_on", bool, True, "help")
self.add_option("int", int, 0, "help")
self.add_option("optint", typing.Optional[int], 0, "help")
self.add_option("seqstr", typing.Sequence[str], [], "help")
self.add_option("unknown", float, 0.0, "help")
def test_make_parser():
parser = argparse.ArgumentParser()
opts = TTypes()
opts.make_parser(parser, "str", short="a")
opts.make_parser(parser, "bool", short="b")
opts.make_parser(parser, "int", short="c")
opts.make_parser(parser, "seqstr", short="d")
opts.make_parser(parser, "bool_on", short="e")
with pytest.raises(ValueError):
opts.make_parser(parser, "unknown")
# Nonexistent options ignore
opts.make_parser(parser, "nonexistentxxx")
def test_set():
opts = TTypes()
opts.set("str=foo")
assert opts.str == "foo"
with pytest.raises(TypeError):
opts.set("str")
opts.set("optstr=foo")
assert opts.optstr == "foo"
opts.set("optstr")
assert opts.optstr is None
opts.set("bool=false")
assert opts.bool is False
opts.set("bool")
assert opts.bool is True
opts.set("bool=true")
assert opts.bool is True
with pytest.raises(exceptions.OptionsError):
opts.set("bool=wobble")
opts.set("bool=toggle")
assert opts.bool is False
opts.set("bool=toggle")
assert opts.bool is True
opts.set("int=1")
assert opts.int == 1
with pytest.raises(exceptions.OptionsError):
opts.set("int=wobble")
opts.set("optint")
assert opts.optint is None
assert opts.seqstr == []
opts.set("seqstr=foo")
assert opts.seqstr == ["foo"]
opts.set("seqstr=bar")
assert opts.seqstr == ["foo", "bar"]
opts.set("seqstr")
assert opts.seqstr == []
opts.set(*('seqstr=foo', 'seqstr=bar'))
assert opts.seqstr == ["foo", "bar"]
with pytest.raises(exceptions.OptionsError):
opts.set("deferredoption=wobble")
opts.set("deferredoption=wobble", defer=True)
assert "deferredoption" in opts.deferred
opts.process_deferred()
assert "deferredoption" in opts.deferred
opts.add_option("deferredoption", str, "default", "help")
opts.process_deferred()
assert "deferredoption" not in opts.deferred
assert opts.deferredoption == "wobble"
opts.set(*('deferredsequenceoption=a', 'deferredsequenceoption=b'), defer=True)
assert "deferredsequenceoption" in opts.deferred
opts.process_deferred()
assert "deferredsequenceoption" in opts.deferred
opts.add_option("deferredsequenceoption", typing.Sequence[str], [], "help")
opts.process_deferred()
assert "deferredsequenceoption" not in opts.deferred
assert opts.deferredsequenceoption == ["a", "b"]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 22 16:15:28 2016
@author: login
"""
from scipy.io import loadmat
import pandas as pd
import numpy as np
import sys, os
from sklearn.decomposition import PCA, LatentDirichletAllocation
from sklearn.preprocessing import StandardScaler
from copy import deepcopy
from scipy import stats as ss
from rarefy import rarefy_table
def rarefyOTUtables(otu_df, meta_df, min_size=None):
rare_table = rarefy_table(otu_df, min_size)
samps1, samps2 = otu_df.shape[0], rare_table.shape[0]
if samps1 != samps2:
print "\t{} samples dropped".format((samps1-samps2))
aft4 = set(list(rare_table.index))
bee4 = set(list(otu_df.index))
samps_dropped = list(bee4.symmetric_difference(aft4))
rt_m = meta_df.copy().drop(samps_dropped)
else:
rt_m = meta_df.copy()
rt_m.ix[:, otu_df.columns] = rare_table
return rare_table, rt_m
def simpleProportions(otu_df, meta_df):
sp_otu = otu_df.divide(otu_df.sum(axis=1), axis='rows')
sp_m = meta_df.copy()
sp_m.ix[:, otu_df.columns] = sp_otu
return sp_otu, sp_m
def standardScaled(otu_df, meta_df):
data_ = otu_df.copy().values
new_data_ = StandardScaler().fit_transform(data_)
ss_otu = pd.DataFrame(new_data_, index=otu_df.index, columns=otu_df.columns)
ss_m = meta_df.copy()
ss_m.ix[:, ss_otu.columns] = ss_otu.ix[:, ss_otu.columns]
return ss_otu, ss_m
def seq_to_taxa(seq_list, all_taxa, taxa_series):
"""
Accepts a list of `seq` labels, the `all_taxa` set,
Returns a vector of taxanomic occupancy of that set
"""
def split_seq(seq):
return taxa_series[seq].split(";")
long_taxa_arr = np.array(map(split_seq, seq_list))
axis, counts = np.unique(long_taxa_arr, return_counts=True)
this_taxa_ser = pd.Series(data=np.zeros((len(all_taxa),)),
index=list(all_taxa))
for s, n in zip(axis, counts):
this_taxa_ser[s] = n
return this_taxa_ser
def score_clusters(test_cluster_dict, all_taxa, taxa_series, test_labels):
print "Scoring clusters"
top_level = np.array(test_cluster_dict.keys()).max()
bottom_level=np.array(test_cluster_dict.keys()).min()
iteration_order = np.arange(bottom_level, top_level)[::-1]
p_values = np.ones(iteration_order.shape)
child_clusts = np.zeros(iteration_order.shape)
parent_clusts = np.zeros(iteration_order.shape)
for idx, clust_n in enumerate(iteration_order):
this_clust = test_cluster_dict[clust_n]
this_labels = [test_labels[int(i)] for i in this_clust]
this_clust_set = set(this_clust)
this_taxa_ser = seq_to_taxa(this_labels, all_taxa, taxa_series)
higher_tree_levels = np.arange(clust_n+1,top_level+1)
for clust_m in higher_tree_levels:
higher_clust = set(test_cluster_dict[clust_m])
if this_clust_set.issubset(higher_clust):
parent_clust = [test_labels[i] for i in list(higher_clust)]
break
else:
pass
higher_taxa_ser = seq_to_taxa(parent_clust, all_taxa, taxa_series)
parent_clusts[idx] = clust_m
child_clusts[idx] = clust_n
p_values[idx] = Ftest_pvalue(this_taxa_ser.values, higher_taxa_ser.values)
cluster_df_data = np.vstack((parent_clusts,
child_clusts,
p_values)).T
clust_df_cols = ['parent', 'child', 'p-val']
cluster_df = pd.DataFrame(data=cluster_df_data, columns=clust_df_cols)
print "Analyzed clusters"
return cluster_df
def Ftest_pvalue(d1,d2):
"""takes two vectors and performs an F-test, returning the p value"""
df1 = len(d1) - 1
df2 = len(d2) - 1
F = np.var(d1) / np.var(d2)
p_value = ss.f.cdf(F, df1, df2)
return p_value
def append_depths(df, depths_vector):
"""
This takes a df with a series of surface concentrations i.e. has a depth
column that has 0 in all the rows. It then:
1. copies the df as provided to create a template
2. iterates over the depth_vector, pulling out each depth sequentially
2. copies the template
3. modifies the copied template to the particular depth
4. appends the template to the original provided DF
The purpose of this is to use regression base methods to see if the concentration
at the surface is somehow related to the distributions of microbes throughout
the water column.
"""
n_rows, _ = df.shape
multiples = len(depths_vector)
expected_rows_n = n_rows*multiples
new_depth_template = df.copy()
for d in depths_vector:
if d != 0:
this_depth = new_depth_template.copy()
this_depth['depth'] = np.ones((new_depth_template.shape[0], ))*d
df = df.append(this_depth)
print "Returning df of shape {}, expecting {} rows".format(df.shape,
expected_rows_n)
return df
def removeZeroCols(df):
return (df.T[(df != 0).any()]).T
def parseBiosample(df):
"""
This function accepts a df with samples in rows and OTUs in columns
and parses the biosample key
"""
biosamples = list(df.index)
dates_, primers_, kits_, replicates_, depths_ = [], [], [], [], []
for bs in biosamples:
if bs[:2] == "SB":
clipped = bs[2:]
else:
sys.exit("Non-SB start to biosample")
if 'TAWMD' in clipped:
date, rest = clipped.split("TAWMD")
elif "TAWWD" in clipped:
date, rest = clipped.split("TAWWD")
else:
sys.exit("Bridge Sequence not Detected")
if "VV4T" in rest:
primer = "VV4"
depth, rest2 = rest.split("VV4T")
elif "VV4V5T" in rest:
primer = "V4V5"
depth, rest2 = rest.split("VV4V5T")
else:
sys.exit("Primer Sequence not Detected")
if rest2[0] == "M":
kit = "MolBio"
elif rest2[0] == "Q":
kit = "Qiagen"
elif rest2[:4] == "filt" and rest2[4] == "M":
kit = "MolBio"
elif rest2[:2] == "NA":
kit = "NA"
else:
print clipped
print rest2[0]
sys.exit("kit type not detected")
if rest2[-2] == "R":
replicate = rest2[-1]
else:
sys.exit("replicate signifier not detected")
if depth == '015':
depth = '01.5'
dates_.append(date)
primers_.append(primer)
kits_.append(kit)
replicates_.append(replicate)
depths_.append(depth)
df['date'] = dates_
df['primers'] = primers_
df['kit'] = kits_
df['replicates'] = replicates_
df['depth'] = depths_
return df
def add_quadrants(dF):
depth_list = list(dF.depth)
depth_ns = np.array([float(n) for n in depth_list])
quad_ns = np.array([" "]*len(depth_ns))
quad_ns[depth_ns < 5] = "Q1"
quad_ns[(depth_ns > 4) & (depth_ns < 11)]= "Q2"
quad_ns[(depth_ns > 10) & (depth_ns < 17)]= "Q3"
quad_ns[depth_ns > 16]= "Q4"
dF["Quadrants"] = quad_ns
return dF.copy()
def numericEncodings(df_orig, metadata_cols, verbose=True):
df = df_orig.copy()
print "Changing {} metadata columns".format(len(metadata_cols))
unq_metadata = {i:{} for i in metadata_cols}
int_vals = ['Forward read length', 'Index read length', 'Quadrants',
'Sequencing Date', 'Sequencing platform', 'kit', 'primers',
'replicates']
dontTransform = ['Coverage', 'TotalSeqs', 'BadBarcodes', 'GoodBarcodes',
'seasonality']
for md, unqs in unq_metadata.items():
if md in int_vals:
if verbose:
print "\n", md
for num, unq in enumerate(np.unique(df[md])):
num+=1
unqs[num] = unq
if verbose == True:
print "Encoding", unq, "as", num
bool_ = df[md] == unq
df.ix[bool_, md] = num
elif md in dontTransform:
pass
elif md == 'date' or md == 'Date':
earliest = df[md].min()
earliest = pd.to_datetime(earliest)
for unq in np.unique(df[md]):
this_day = pd.to_datetime(unq)
td = (this_day - earliest).days
unqs[td] = unq
if verbose == True:
print "Encoding", unq, "as", td, "(days since first day)"
bool_ = df[md] == unq
df.ix[bool_, md] = td
elif md == 'depth' or md == 'Depth':
for unq in np.unique(df[md]):
unqs[unq] = float(unq)
if verbose == True:
print "Encoding", unq, "as", float(unq)
bool_ = df[md] == unq
df.ix[bool_, md] = float(unq)
else:
sys.exit("Illegal var type detected")
for i in metadata_cols:
df[[i]] = df[[i]].apply(pd.to_numeric)
df2 = df.copy()
return df2, unq_metadata
from itertools import groupby
def listRepGroups(df):
# make a list of the index
dfindex = list(df.index)
# create a list for each grouping and a list of already matched samples
rep_groups, consumed = [], []
# Start with one index
for first in dfindex:
this_group = []
# Check the entire list for members that match
if first not in consumed:
for second in dfindex:
# If a sample wasn't already consumed, and isn't the exact same sample
if second not in consumed and first != second:
# check for a match
if first.split("VV")[0] == second.split("VV")[0]:
# if detected, add to the already consumed list
consumed.append(second)
this_group.append(second)
if len(this_group) != 0:
this_group.append(first)
this_group.sort()
rep_groups.append(this_group)
rep_groups.sort()
unq_rep_groups = list(rg for rg,_ in groupby(rep_groups))
print len(unq_rep_groups), "groups of replicates detected"
return unq_rep_groups
def KLD(x,y):
a = np.array(x, dtype=float) + 0.000001
b = np.array(y, dtype=float) + 0.000001
return (a*np.log(a/b)).sum()
def rootJSD(x,y):
a = np.array(x, dtype=float)
b = np.array(y, dtype=float)
return np.sqrt(0.5 * KLD(a, (a+b)/2) + 0.5 * KLD(b, (a+b)/2))
def distant_replicate_covariates(broken_groups, df, df_m, metadata):
# create a dictionary with the combined keys of (mdata_cat + mdata_val)
# for each instance add the average distance for an individual
super_meta_dict = {}
m_only_df = df_m.ix[:, metadata]
# measure average distance to all other replicates
for grp in broken_groups:
dists = JensenShannonDiv_Sqrt(df.ix[grp, :])
for row, idx in zip(range(dists.shape[0]), dists.index):
nz_row = dists.ix[row, :].values
this_ird = nz_row[nz_row !=0].mean()
# add to a particular column & value key
for col in m_only_df.columns:
if col != 'replicates' and col !='depth':
this_val = m_only_df.ix[idx, col]
jk = str(col) + "_" + str(this_val)
if not super_meta_dict.has_key(jk):
super_meta_dict[jk] = [this_ird]
else:
super_meta_dict[jk].append(this_ird)
meta_dict_u = {}
for k, v in super_meta_dict.items():
meta_dict_u[k] = np.array(v)
return meta_dict_u
def plot_interreplicate_distances(df_otus, rep_groups, fnum):
""""
plot distribution of inter-replicate distances
"""
all_dists = []
for idx, group in enumerate(rep_groups):
this_grps = df_otus.ix[group, :]
dist_dat = JensenShannonDiv_Sqrt(this_grps)
for a_d in dist_dat.values.flatten():
if a_d != 0:
all_dists.append(a_d)
plt.figure(fnum, figsize=(8,6))
plt.clf()
plt.hist(all_dists, bins=50)
plt.tick_params(labelsize=14)
plt.xlabel("root JS distance (inter-replicate)", fontsize=14)
plt.ylabel("N", fontsize=14)
return None
def JensenShannonDiv_Sqrt(df_otu):
ps_df = df_otu.copy()
ps_n_df = ps_df.divide(ps_df.sum(axis=1), axis=0)
shape_sq = len(ps_n_df.index)
dist_dat = np.zeros((shape_sq, shape_sq))
for r_idx, r in enumerate(ps_n_df.index):
for c_idx, c in enumerate(ps_n_df.index):
x_ = ps_n_df.ix[r, :].values
y_ = ps_n_df.ix[c, :].values
dist_dat[r_idx, c_idx] = rootJSD(x_, y_)
dist_mat = pd.DataFrame(index=ps_n_df.index, columns=ps_n_df.index,
data=dist_dat)
return dist_mat
def centeredLogRatio(otu_table, otu_table_m):
from scipy.stats.mstats import gmean
noZeros = otu_table.copy().replace(0, np.nan)
geomeans = np.repeat(np.nan, repeats = noZeros.shape[0])
for i in range(0, noZeros.shape[0]):
geomeans[i] = gmean(noZeros.ix[i, :].dropna())
clr_table = np.log(noZeros.divide(geomeans, axis=0))
clr_table.replace(np.nan, 0, inplace=True)
clr_table_m = otu_table_m.copy()
clr_table_m.ix[:, otu_table.columns] = clr_table
return clr_table, clr_table_m
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(x) / np.sum(np.exp(x), axis=0)
def ReplicateReport(df, df_otus, rep_groups, verbose=True, metric="JSD"):
print "REPLICATE REPORT"
in_rep_distances, worst_reps = [], []
broken_groups = 0
rep_groups_mutable = deepcopy(rep_groups)
for idx, group in enumerate(rep_groups_mutable):
if verbose:
print "Group {}".format(idx)
this_grps = df_otus.ix[group, :]
dist_mat = JensenShannonDiv_Sqrt(this_grps)
# Returns a symmetric matrix with 0s on the diagonal
# So we pull out all unique excluding those on the diagonal
for a_d in np.unique(dist_mat.values):
if a_d != 0:
in_rep_distances.append(a_d)
if verbose == True:
print dist_mat
most_distant = dist_mat.max().max()
if verbose:
print "Most Distant: {}".format(most_distant)
if most_distant > 0.3:
broken_groups+=1
while most_distant > 0.3:
# find one of the pair that are most divergent
bad_reps_bool = (dist_mat.max() == dist_mat.max().max())
bad_means = dist_mat[bad_reps_bool].mean(axis=1)
worst_rep = bad_means.argmax()
worst_reps.append(worst_rep)
if verbose:
print "\tdropping {}".format(worst_rep)
group.remove(worst_rep)
this_grps = df_otus.ix[group, :]
dist_mat = JensenShannonDiv_Sqrt(this_grps)
most_distant = dist_mat.max().max()
if verbose:
print "\tmost distant now: {}".format(most_distant)
return worst_reps, broken_groups
def originate_rep_groupings(final_rep_groups):
final_rep_dict = []
for g in final_rep_groups:
this_dict = {mem:(idx+1) for idx, mem in enumerate(g)}
final_rep_dict.append(this_dict)
return final_rep_dict
def matchXandYbyIndex(clr_x, model_proc_df):
"""
This fxn drops rows in the design matrix & response vector
according to index equivalency.
"""
# drop all rows in x that don't match to y
x_bool = clr_x.index.isin(model_proc_df.index)
x_1 = clr_x[x_bool]
# drop all values in y that don't match to x_1
y_bool = model_proc_df.index.isin(x_1.index)
y_1 = model_proc_df[y_bool]
print "X matrix shape {} reduced to {} rows".format(clr_x.shape,
(x_bool).sum())
print "Y matrix shape {} reduced to {} rows".format(model_proc_df.shape,
(y_bool).sum())
return x_1.values, y_1.values
from sklearn.metrics import r2_score
from sklearn.linear_model import RidgeCV
from sklearn.feature_selection import f_regression
from sklearn.model_selection import train_test_split
def lasso_ridge_ftest(shared_x, baton_twirler):
"""performs 3 tests on a given set of y vectors and returns scores,
coefficients and selected features"""
metrics_ = ['r2-ridge', 'r2-parsi', 'parsi-n', 'df', 'df-parsi']
score_matrix = np.zeros( (len(baton_twirler.columns), len(metrics_)))
score_df = pd.DataFrame(data = score_matrix,
index = baton_twirler.columns,
columns = metrics_)
lm_coeff_mat1 = np.zeros( (len(baton_twirler.columns), len(shared_x.columns)))
lm_coeff_mat2 = np.zeros( (len(baton_twirler.columns), len(shared_x.columns)))
ridge_coeff_df = pd.DataFrame(data = lm_coeff_mat1,
index = baton_twirler.columns,
columns = shared_x.columns)
parsi_coeff_df = pd.DataFrame(data = lm_coeff_mat2,
index = baton_twirler.columns,
columns = shared_x.columns)
select_features = {}
for baton in baton_twirler.columns:
print "Testing fit for {}".format(baton)
y_vector = baton_twirler.ix[:, baton]
if y_vector.isnull().sum() > 0:
some_nans = float(y_vector.isnull().sum())
print "NaNs detected ({} of vector) and dropped".format(some_nans/len(y_vector))
y_vector = y_vector.dropna()
X_matched, y_matched_us = matchXandYbyIndex(shared_x, y_vector)
ss2 = StandardScaler()
y_matched = ss2.fit_transform(y_matched_us.reshape(-1,1))
# select only most important features for building model
F_values, p_values = f_regression(X_matched, y_matched)
p_val_series = pd.Series(index=shared_x.columns, data=p_values)
## Bonferroni Correction
sig_thresh = 0.05 / float(len(p_values))
these_features = set(list(p_val_series[p_val_series < sig_thresh].index))
select_features[baton] = these_features
parsi_n = len(these_features)
not_sig_feats = set(shared_x.columns).symmetric_difference(these_features)
# make a sparser design matrix
if len(these_features) > 1:
parsi_x = shared_x.drop(not_sig_feats, axis=1)
else:
parsi_x = shared_x.copy()
X_matched_p, _ = matchXandYbyIndex(parsi_x, y_vector)
df_normal = X_matched.shape[0]*X_matched.shape[1] - 1
df_parsi = X_matched_p.shape[0]*X_matched_p.shape[1] - 1
fold_scores = np.zeros((2, 10))
# score both models
for design_mat, axis_ in zip([X_matched, X_matched_p], [0,1]):
# cross validation is for scoring purposes
for fold in range(10):
X_train, X_test, y_train, y_test = train_test_split(design_mat,
y_matched,
test_size=0.3,
random_state=42)
# maybe only use signficant predicotrs to shrink
ridge_model = RidgeCV()
ridge_model.fit(X_train, y_train)
y_predicted_ridge = ridge_model.predict(X_test)
fold_scores[axis_, fold] = r2_score(y_test, y_predicted_ridge)
# take the average scores
if fold_scores[0, :].mean() < -1.0:
r2_ridge = -1.0
else:
r2_ridge = fold_scores[0, :].mean()
if fold_scores[1, :].mean() < -1.0:
r2_parsi = -1.0
else:
r2_parsi = fold_scores[1, :].mean()
scores = [r2_ridge , r2_parsi, parsi_n, df_normal, df_parsi]
score_df.ix[baton, :] = np.array(scores)
parsi_model, ridge_model = RidgeCV(), RidgeCV()
ridge_model.fit(X_matched, y_matched)
parsi_model.fit(X_matched_p, y_matched)
ridge_coeff_df.ix[baton, :] = ridge_model.coef_
parsi_coeff_df.ix[baton, parsi_x.columns] = parsi_model.coef_
full_results = [ridge_coeff_df, parsi_coeff_df, score_df, select_features]
return full_results
def prettify_date_string(time_stamp):
return str(time_stamp).split(" ")[0]
def dropBadReps(less_diverse_reps, rep_groups):
"""
1. Unpack a copy of current groups
2. Create a variable to hold groups that pass
3. if shitty rep in this group, remove it
4. If replicate groups have >1 member, add it to new group list
5. If not, drop it from group list
6. Return newly assembled list of lists
"""
old_groups = deepcopy(rep_groups)
new_rep_groups, broken_groups = [], []
for g in old_groups:
for l in less_diverse_reps:
if l in g:
g.remove(l)
if len(g) > 1:
new_rep_groups.append(g)
else:
broken_groups.append(g)
return new_rep_groups, broken_groups
import matplotlib.pyplot as plt
import seaborn as sns
def plotHeatmap(df, fignum):
plt.figure(fignum, figsize=(12,9))
ax = sns.heatmap(df)
ylab = ax.get_ylabel(); xlab = ax.get_xlabel();
ax.set_xlabel(xlab, fontsize=16); ax.set_ylabel(ylab, fontsize=16)
for item in ax.get_yticklabels():
item.set_rotation(0)
item.set_fontsize(14)
for item in ax.get_xticklabels():
item.set_rotation(90)
item.set_fontsize(14)
#plt.savefig('seabornPandas.png', dpi=100)
plt.tight_layout()
plt.show()
from collections import OrderedDict
def plotTimeCourse(df, path):
date_tracker = []
new_idx = []
date_set = OrderedDict()
for i, idx in enumerate(df.index):
print "{}. {}".format(i, str(idx[0]).split(" ")[0])
date = str(idx[0]).split(" ")[0]
date_set[date] = None
new_idx.append(int(idx[1]))
dates_ = list(date_set.keys())
date_nums = range(1,len(dates_)+1)
date_dict = {i:(float(j)/10) for i, j in zip(dates_, date_nums)}
for idx in df.index:
date = str(idx[0]).split(" ")[0]
date_tracker.append(7.3+date_dict[date])
df.index = range(df.index.shape[0])
#df['dateTracker'] = date_tracker
ax = df.plot(figsize=(14, 6), fontsize=14, legend=False)
ax.set_ylabel("log2(counts)", fontsize=14)
plt.tight_layout()
plt.show()
plt.savefig(path)
return None
import ecopy as ep
def beta_wrapper(df, var_key):
print "\n", var_key
brayDist = ep.distance(df, method='bray')
groups = list(df[var_key])
rand_groups = list(np.random.choice(np.unique(groups),
size=np.array(groups).shape))
ep.beta_dispersion(brayDist, groups, test='anova',
center='median', scores=False)
ep.beta_dispersion(brayDist, rand_groups, test='anova',
center='median', scores=False)
return brayDist
from sklearn.feature_selection import f_regression
from sklearn import manifold
def decomposeNcorrelate(df_numerical, df_w_mdata, metadata_cols, transformed,
method, dists=None):
if method == "nMDS" and (dists is None):
sys.exit("nMDS requires sample distances to be precomputed")
rv = df_numerical.shape[0]
if transformed:
X_std2 = df_numerical.values.T
else:
X_std2 = StandardScaler().fit_transform(df_numerical.values).T
rows_n, cols_n = X_std2.shape
print "\nPerforming {}".format(method)
if method == "PCA":
decomp = PCA(n_components=100, random_state=42)
decomp.fit(X_std2)
no1 = decomp.explained_variance_ratio_[0]
no2 = decomp.explained_variance_ratio_[1]
print "Top two components explain {} and {} of variance.".format(no1, no2)
components_ = decomp.components_
elif method == "nMDS":
decomp = manifold.MDS(n_components=100, metric=True, max_iter=3000,
eps=1e-12, dissimilarity="precomputed",
random_state=42)
emb_coords = decomp.fit_transform(dists)
components_ = emb_coords.T
else:
sys.exit("Illegal Decomposition method {} provided".format(method))
all_cors, p_comp_n, exp_vars, corr_ps = [], [], [], []
all_pvals, p_comp_nF, exp_vars2 = [], [], []
for mdata in metadata_cols:
md_arr = np.array(df_w_mdata[mdata])
raw_corrs = [ss.pearsonr(components_[i, :], md_arr) for i in range(100)]
corrs, c_pvals = zip(*raw_corrs)
if not np.all(np.isfinite(md_arr)):
print "Replacing {} not finite # with 0".format((~np.isfinite(md_arr)).sum())
md_arr[~np.isfinite(md_arr)] = 0
pvals = [f_regression(components_[i, :].reshape(rv, 1), md_arr)[1][0] for i in range(100)]
all_pvals.append(np.array(pvals).min())
all_cors.append(np.array(corrs).max())
pca_comp_no = np.argmax(np.array(corrs))
corr_ps.append(np.array(c_pvals)[pca_comp_no])
pca_comp_no2 = np.argmin(np.array(pvals))
p_comp_n.append(pca_comp_no+1)
p_comp_nF.append(pca_comp_no2+1)
if method == "PCA":
exp_vars.append(decomp.explained_variance_ratio_[pca_comp_no])
exp_vars2.append(decomp.explained_variance_ratio_[pca_comp_no2])
elif method == "nMDS":
exp_vars.append(np.nan)
exp_vars2.append(np.nan)
data_ = np.vstack((all_cors, p_comp_n, exp_vars, corr_ps)).T
data_2 = np.vstack((all_pvals, p_comp_nF, exp_vars2)).T
colset = ['Correlation', 'Component', 'Explained Variance', 'P-value']
colset2 = ['Pvalue', 'Component_F', 'Explained Variance_F']
to_return = pd.DataFrame(data=data_, index=metadata_cols, columns=colset)
f_to_return = pd.DataFrame(data=data_2, index=metadata_cols, columns=colset2)
f_to_return.sort_values(['Component_F', 'Pvalue'],
ascending=[True, True],
inplace=True)
to_return.sort_values(['Component', 'Correlation'],
ascending=[True, False],
inplace=True)
final_return = to_return[to_return.Correlation.notnull()]
final_f_return = f_to_return[f_to_return.Pvalue.notnull()]
high_val_cols = ['depth', 'date']
sig_score = sum(-1*np.log10(final_f_return.ix[high_val_cols, :].Pvalue.values))
print "\tSignificance score: {}".format(sig_score)
print "\tVar Exp (Depth): {:.3f}".format(final_f_return.ix['depth', 'Explained Variance_F'])
print "\tVar Exp (Date): {:.3f}".format(final_f_return.ix['date', 'Explained Variance_F'])
return final_return, final_f_return
def readChemData(chem_path, units, ftype, plotbool=False):
print os.path.basename(chem_path)
print units
if ftype == 'depth_profile':
site_chem_spec_df = pd.read_csv(chem_path, index_col=0,
parse_dates=True,
infer_datetime_format=True)
new_idx = []
for i in site_chem_spec_df.index:
new_idx.append(pd.Period(i, 'M'))
site_chem_spec_df.index = new_idx
elif ftype == 'surface_measure':
chem_spec_csv = pd.read_csv(chem_path, sep=",", index_col=0)
print "Null Values: {}".format(chem_spec_csv.isnull().sum().sum())
print "Database Shape: {}".format(chem_spec_csv.shape)
new_cols = [pd.Period(i) for i in chem_spec_csv.columns]
chem_spec_csv.columns = new_cols
site_chem_spec_df = chem_spec_csv.T.interpolate()
else:
sys.exit("invalid ftype")
if plotbool == True:
site_chem_spec_df.plot()
return site_chem_spec_df
def plotCommonTaxa(taxa_series):
taxa_list = list(taxa_series.values)
taxa_decomposition = []
for tax_str in taxa_list:
this_tl = tax_str.split(";")
clean_list = [i for i in this_tl if i[-2:] != "__" ]
taxa_decomposition += clean_list
unq_taxa = np.unique(taxa_decomposition, return_counts=True)
dfindex, dfdata = unq_taxa[0], unq_taxa[1]
taxa_df = pd.DataFrame(data=dfdata, index=dfindex, columns=['count'])
taxa_df.sort_values('count', ascending=False, inplace=True)
rtaxa_df = taxa_df.divide(taxa_df.sum(), axis=1)
total_called = []
t_levels_s = ['k', 'p', 'c', 'o', 'f', 'g', 's']
t_levels = ['kingdom', 'pylum', 'class', 'order', 'family', 'genus', 'species']
rtaxa_df = taxa_df.divide(taxa_df.sum(), axis=1)
for tL in t_levels_s:
posHits = [i for i in rtaxa_df.index if i[0] == tL]
subdf = rtaxa_df.ix[posHits, :]
print tL, subdf.sum()
total_called.append(subdf.sum())
t_level_rel = np.array(total_called)
width = 0.35
ind = np.arange(len(t_levels))
print "total pct%", t_level_rel.sum()
fig, ax = plt.subplots(1, 1)
ax.bar(ind + width, t_level_rel,width)
ax.set_xticks(ind + width)
ax.set_xticklabels(t_levels)
def inferTaxaLevel(taxa_series):
addSemicolon = lambda x: x+";"
taxa_edit = taxa_series.copy().apply(addSemicolon)
taxa_dict = taxa_edit.to_dict()
taxa_frame = taxa_edit.copy().to_frame()
taxa_depth = np.zeros(taxa_series.shape)
taxa_frame["Taxa Depth"] = taxa_depth
for seq, ts in taxa_dict.items():
taxa_frame.ix[seq, "Taxa Depth"] = 7 - ts.count("_;")
return taxa_frame
def analyze_alpha_diversity(decoder, derepped_otus_m, valPairs, var_type):
skippable = ['replicates', 'Index read length', 'Sequencing platform',
'Forward read length', 'Quadrants' ]
ad_cats = []
ad_cols = {"Mean":[], "Median":[], "Std":[], "N":[]}
for var in decoder.keys():
codes = decoder[var].keys()
for code in codes:
if var == 'date':
ad_cats.append(str(decoder[var][code]).split("T")[0]+" ("+var+")")
elif var in skippable:
pass
else:
ad_cats.append(str(decoder[var][code])+" ("+var+")")
if var in skippable:
pass
else:
try:
sub_bool = derepped_otus_m[var] == code
except TypeError:
sub_bool = derepped_otus_m[var] == float(code)
subdf = derepped_otus_m[sub_bool]
ad_cols["Median"].append(np.median(subdf.ix[:, var_type]))
ad_cols["Std"].append(subdf.ix[:, var_type].std())
ad_cols["N"].append(subdf.shape[0])
ad_cols["Mean"].append(subdf.ix[:, var_type].mean())
for idx, vp in enumerate(valPairs):
kitT, primT = vp[0], vp[1]
bool1 = derepped_otus_m.primers == primT
bool2 = derepped_otus_m.kit == kitT
subdf2 = derepped_otus_m[bool1 & bool2]
if idx == 2:
primer_outgroup = list(subdf2.index)
ad_cats.append(str(decoder['primers'][primT])+" & "+str(decoder['kit'][kitT]))
ad_cols["Median"].append(np.median(subdf2.ix[:, var_type]))
ad_cols["Std"].append(subdf2.ix[:, var_type].std())
ad_cols["N"].append(subdf2.shape[0])
ad_cols["Mean"].append(subdf2.ix[:, var_type].mean())
alpha_df = pd.DataFrame(data=ad_cols, index=ad_cats)
return alpha_df, primer_outgroup
def alpha_diversity(dereplicated_otus, derepped_m, metrics):
derepped_otus_m = derepped_m.copy()
row_sum = dereplicated_otus.copy().sum(axis=1)
row_rel = dereplicated_otus.copy().divide(row_sum, axis=0).astype('float64')
if 'enspie' in metrics:
enspie_sq = row_rel.apply(np.square)
enspie_dom = enspie_sq.sum(axis=1)
enspie_ = enspie_dom**-1
derepped_otus_m['enspie'] = enspie_
if 'shannon' in metrics:
entrop = lambda n: n*np.log(n)
shannon_ = row_rel.replace({ 0 : np.nan }).applymap(entrop).T.sum()*-1.0
derepped_otus_m['shannon'] = shannon_.apply(np.exp)
if 'chao1' in metrics:
total_s = (dereplicated_otus > 0).T.sum()
singletons = (dereplicated_otus == 1).T.sum()
doubletons = (dereplicated_otus == 2).T.sum()
numerator = singletons.multiply(singletons-1)
denominator = 2*(doubletons+1)
chao1_ = total_s + numerator.divide(denominator, axis=0)
derepped_otus_m['chao1'] = chao1_
return dereplicated_otus, derepped_otus_m
def plotCountTotalsByMetadata(df_m, decoder, mList, mVar, fignum):
## Get Normalized Count Totals
# drop everything but the key grouping variable and sum
mList.remove(mVar)
counts_Depth = df_m.drop(mList, axis=1)
depthGroup = counts_Depth.groupby([mVar]).sum()
# find the number of samples per grouping
(_, n_per_depth) = np.unique(df_m[mVar].values, return_counts=True)
# average the total counts per group by the number of samples
mean_counts_per_group = depthGroup.T.sum().divide(n_per_depth)
## Get Standard Deviation of Count Totals
# Drop depths & sum each sample before grouping
just_counts = counts_Depth.drop([mVar], axis=1)
depthSum = just_counts.T.sum().to_frame()
# Convert Series into DataFrame, add col names, and modify dtype
depthSum[mVar] = df_m[mVar].values
depthSum.columns = ['counts', mVar]
depthSum = depthSum.applymap(pd.to_numeric)
# group each sum by depth and flatten by std
depthStd = depthSum.groupby([mVar]).std()
# convert labels for display
if mVar == 'date':
decoded_labs = [str(decoder[mVar][i]).split("T")[0] for i in list(np.unique(df_m[mVar].values))]
else:
decoded_labs = [str(decoder[mVar][i]) for i in list(np.unique(df_m[mVar].values))]
# Setup Plot
width = 0.35; ind = np.arange(len(n_per_depth));
plt.figure(fignum, figsize=(14, 8))
ax = plt.gca()
ax.bar(ind + width, mean_counts_per_group, width,
yerr=depthStd.values.flatten())
ax.set_xticks(ind + width)
ax.set_xticklabels(decoded_labs)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=45)
ax.set_xlim(0.0, float(len(list(np.unique(df_m[mVar].values)))))
ax.set_xlabel(mVar.capitalize()+" (m)")
ax.set_ylabel("Average Total OTUs (n)")
# Add back metadata variable
mList.append(mVar)
def replicateAlphaDiv(df, metric, rep_groups):
enspie_cv = []
enspie_1 = []
enspie_2 = []
for g in rep_groups:
this_grp = df.ix[g, metric]
enspie_cv.append(this_grp.std() / this_grp.mean())
justTwo = list(np.random.choice(g, size=2, replace=False))
enspie_1.append(df.ix[justTwo[0], metric])
enspie_2.append(df.ix[justTwo[1], metric])
enspie_1 = np.array(enspie_1)
enspie_2 = np.array(enspie_2)
return (enspie_cv, enspie_1, enspie_2)
from sklearn.model_selection import train_test_split
def lda_tuner(ingroup_otu, best_models):
best_score = -1*np.inf
dtp_series = [0.0001, 0.001, 0.01, 0.1, 0.2]
twp_series = [0.0001, 0.001, 0.01, 0.1, 0.2]
topic_series = [3]
X = ingroup_otu.values
eval_counter = 0
for topics in topic_series:
for dtp in dtp_series:
for twp in twp_series:
eval_counter +=1
X_train, X_test = train_test_split(X, test_size=0.5)
lda = LatentDirichletAllocation(n_topics=topics,
doc_topic_prior=dtp,
topic_word_prior=twp,
learning_method='batch',
random_state=42,
max_iter=20)
lda.fit(X_train)
this_score = lda.score(X_test)
this_perplexity = lda.perplexity(X_test)
if this_score > best_score:
best_score = this_score
print "New Max Likelihood: {}".format(best_score)
print "#{}: n:{}, dtp:{}, twp:{}, score:{}, perp:{}".format(eval_counter,
topics, dtp, twp,
this_score, this_perplexity)
best_models.append({'n': topics, 'dtp': dtp, 'twp': twp,
'score': this_score, 'perp': this_perplexity})
if (dtp == dtp_series[-1]) and (twp == twp_series[-1]):
eval_counter +=1
X_train, X_test = train_test_split(X, test_size=0.5)
lda = LatentDirichletAllocation(n_topics=topics,
doc_topic_prior=1./topics,
topic_word_prior=1./topics,
learning_method='batch',
random_state=42,
max_iter=20)
lda.fit(X_train)
this_score = lda.score(X_test)
this_perplexity = lda.perplexity(X_test)
if this_score > best_score:
best_score = this_score
print "New Max Likelihood: {}".format(best_score)
print "#{}: n:{}, dtp:{}, twp:{}, score:{} perp: {}".format(eval_counter,
topics,
(1./topics),
(1./topics),
this_score,
this_perplexity)
best_models.append({'n': topics, 'dtp': (1./topics),
'twp': (1./topics), 'score': this_score,
'perp': this_perplexity})
return best_models
def collapseBdiversity(dist_mat, raw_data_m, metaData_var, verbose=False):
metaOptions = np.unique(raw_data_m.ix[:, metaData_var])
n_ = len(metaOptions)
metaDistance = np.full((n_, n_), np.nan)
metaDeviation = np.full((n_, n_), np.nan)
for r_i, r in enumerate(metaOptions):
for c_i, c in enumerate(metaOptions):
dist_copy = dist_mat.copy()
dist_copy[metaData_var] = raw_data_m.ix[:, metaData_var]
dist_filt_1 = dist_copy[dist_copy[metaData_var] == r]
dist_filt_1.drop([metaData_var], axis=1, inplace=True)
dist_filt_t = dist_filt_1.T
dist_filt_t[metaData_var] = raw_data_m.ix[:, metaData_var]
dist_filt_2 = dist_filt_t[dist_filt_t[metaData_var] == c]
dist_filt = dist_filt_2.drop([metaData_var], axis=1)
dist_filt_flat = dist_filt.values.flatten()
dist_filt_nz = dist_filt_flat[dist_filt_flat != 0]
mD = np.median(dist_filt_nz)
mDev = dist_filt_nz.std()
if verbose:
print "{} versus {} metadistance".format(r, c)
print "\t {} ({})".format(mD, mDev)
metaDistance[r_i,c_i] = mD
metaDeviation[r_i,c_i] = mDev
return metaDistance, metaDeviation
"""
http://qiime.org/scripts/make_otu_network.html
http://qiime.org/scripts/differential_abundance.html
# alternative to random forest classifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
clf = GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True,
n_jobs=-1)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
"""
import subprocess as sp
from sklearn import tree
def write_tree_to_png(fname_no_ext, rf):
tree.export_graphviz(rf, out_file=fname_no_ext+".dot")
base_cmd = "dot -Tpng {}.dot > {}.png".format(fname_no_ext, fname_no_ext)
p = sp.Popen(base_cmd, cwd=os.getcwd(), shell=True, stderr=sp.PIPE, stdout=sp.PIPE)
stdout, stderr = p.communicate()
return stdout
def bz2wrapper(fpath):
if fpath.endswith("bz2"):
print "Unzipping"
base_cmd = "bzip2 -d {}".format(fpath)
else:
base_cmd = "bzip2 -z {}".format(fpath)
p = sp.Popen(base_cmd, cwd=os.getcwd(), shell=True, stderr=sp.PIPE, stdout=sp.PIPE)
stdout, stderr = p.communicate()
return stdout
def DESeqRlog(df_otus, df_m, saved_file=False):
shared_otus = df_otus.T.copy()
if not saved_file:
path = "/Users/login/Desktop/vst_temp.csv"
path2 = "/Users/login/Desktop/date_temp.csv"
wrapper_file = os.path.join(os.getcwd(), "DESeqwrapper.R")
base_cmd = "Rscript DESeqwrapper.R"
if not os.path.exists(wrapper_file):
sys.exit("Accessory script missing")
# direct export path
to_transform = path
arg_1 = os.path.dirname(to_transform)
arg_2 = to_transform.split("/")[-1]
arg_3 = os.path.basename(path2)
# export data to disk
date_series = df_m.ix[:, 'Sequencing Date']
date_series.to_csv(path2)
shared_otus.to_csv(to_transform)
# communicate with the world
cmd = " ".join([base_cmd, arg_1, arg_2, arg_3])
p = sp.Popen(cmd, cwd=os.getcwd(), shell=True, stderr=sp.PIPE, stdout=sp.PIPE)
stdout, stderr = p.communicate()
if "Execution halted" in stderr:
sys.exit("R wrapper failed")
to_read_back = os.path.join(arg_1, arg_2.split(".")[0]+"_vst.csv")
rlog_otus = pd.read_csv(to_read_back, index_col = 0)
dropped_otus = len(rlog_otus.columns) - len(shared_otus.columns)
if dropped_otus > 0:
print "{} OTUS dropped".format(dropped_otus)
for i, j in zip(rlog_otus.index, shared_otus.index):
assert i == j
for temps in [to_read_back, to_transform]:
os.remove(temps)
else:
saved_file = os.path.join(os.getcwd(), "rlog_saved.csv")
rlog_otus = pd.read_csv(saved_file, index_col = 0).T
shared_otus = shared_otus.T
dropped_otus = len(rlog_otus.columns) - len(shared_otus.columns)
if dropped_otus > 0:
print "{} OTUS dropped".format(dropped_otus)
for i, j in zip(rlog_otus.index, shared_otus.index):
assert i == j
# return only those columns not in both dfs
mdata_cols = set(df_otus).symmetric_difference(set(df_m.columns))
# copy transformed matrix
rlog_m = rlog_otus.copy()
# add back metadata
for mc in mdata_cols:
rlog_m[mc] = df_m.ix[:, mc]
return rlog_otus, rlog_m
def edgeRtmm(df_otus, df_m):
"""
Uses edgeR's variance stabilizing transformation to transform sequence counts
into OTU abundances. Involves adding and subtracting psuedocounts.
"""
path = "/Users/login/Desktop/vst_temp.csv"
print "Performing TMM Transform"
wrapper_file = os.path.join(os.getcwd(), "edgeRwrapper_tmm.R")
base_cmd = "Rscript edgeRwrapper_tmm.R"
if not os.path.exists(wrapper_file):
sys.exit("Accessory script missing")
# direct export path
to_transform = path
shared_otus = df_otus.copy()
arg_1 = os.path.dirname(to_transform)
arg_2 = to_transform.split("/")[-1]
# export data to disk
shared_otus.to_csv(to_transform, sep=",")
cmd = " ".join([base_cmd, arg_1, arg_2])
p = sp.Popen(cmd, cwd=os.getcwd(), shell=True, stderr=sp.PIPE, stdout=sp.PIPE)
stdout, stderr = p.communicate()
if "Execution halted" in stderr:
sys.exit("R wrapper failed")
to_read_back = os.path.join(arg_1, arg_2.split(".")[0]+"_vst.csv")
shared_vst = pd.read_csv(to_read_back, index_col = 0)
vs_T_otus = shared_vst.copy()
dropped_otus = len(shared_vst.columns) - len(shared_otus.columns)
if dropped_otus > 0:
print "{} OTUS dropped".format(dropped_otus)
for i, j in zip(shared_vst.index, shared_otus.index):
assert i == j
for temps in [to_read_back, to_transform]:
os.remove(temps)
# return only those columns not in both dfs
mdata_cols = set(df_otus).symmetric_difference(set(df_m.columns))
# copy transformed matrix
vs_T_m = vs_T_otus.copy()
# add back metadata
for mc in mdata_cols:
vs_T_m[mc] = df_m.ix[:, mc]
return vs_T_otus, vs_T_m
def importratesandconcs_mod(path_):
assert os.path.exists(path_)
conc_idxs = {0 : "O", 1 : "C", 2 : "N+", 3 : "N-", 4 : "S+", 5 : "S-",
6 : "Fe+", 7 : "Fe-", 8 : "CH4", 9 : "Null"}
rate_idxs = { 0 : "Iron Oxidation (oxygen)",
1 : "Ammonia Oxidation (oxygen)",
2 : "Sulfur Oxidation (oxygen)",
3 : "Iron Oxidation (nitrate)",
4 : "Sulfur Oxidation (nitrate)",
5 : "Methane Oxidation (oxygen)",
6 : "Methane Oxidation (nitrate)",
7 : "Methane Oxidation (sulfate)",
8 : "Aerobic Heterotrophy",
9 : "Denitrification",
10 : "Iron Reduction",
11 : "Sulfate Reduction",
12 : "Methanogenesis" }
mat = loadmat(path_)
concs_ = mat['concs_history']
rates_ = mat['rates_history']
conc_idxs_inv = {v: k for k, v in conc_idxs.iteritems()}
rate_idxs_inv = {v: k for k, v in rate_idxs.iteritems()}
rate_dict, conc_dict = {}, {}
outputs = [conc_dict, rate_dict]
inputs = [concs_, rates_]
translators = [conc_idxs_inv, rate_idxs_inv]
for o_dict, i_arr, t_dict in zip(outputs, inputs, translators):
for name, idx_z in t_dict.items():
o_dict[name] = i_arr[:, :, idx_z]
return (rate_dict, conc_dict)
from scipy.interpolate import interp1d
def time_scale_modeled_chem_data(rate_dict, conc_dict, n_days, start_date, end_date):
"""
1. Create a date index for the new dataframes
2. Create new dictionaries to hold the new dataframes
3. Unload each DF one at a time
4. Interpolate each depth vector along new axis
5. Load into new numpy array
6. Assign date index & numpy array to new dataframe object
7. Reload new dataframe into new dictionary, accessible by name string
8. Return newly minted dictionaries
"""
dr = pd.date_range(start_date, end_date)
assert len(dr) == n_days
new_rate_dict, new_conc_dict = {}, {}
for a_dict, new_dict in zip([rate_dict, conc_dict], [new_rate_dict, new_conc_dict]):
for a_spec in a_dict.keys():
this_df = a_dict[a_spec]
depths, columns = this_df.shape
n_slices = columns
assert n_slices < n_days
idx = np.arange(n_slices)
new_interval = max(idx) / float(n_days)
new_columns = np.arange(idx.min(), idx.max(), new_interval)
new_df_data = np.zeros((depths, len(new_columns)))
for depth in xrange(depths):
a_vector = this_df.ix[depth, :].values
f2 = interp1d(idx, a_vector, kind='cubic')
new_df_data[depth, :] = f2(new_columns)
new_df = pd.DataFrame(data=new_df_data.T, columns=np.arange(6,6+depths),
index=dr)
new_dict[a_spec] = new_df.T.unstack()
rate_cols = sorted(new_rate_dict.keys())
conc_cols = sorted(new_conc_dict.keys())
all_cols = rate_cols + conc_cols
full_idx = new_rate_dict[rate_cols[0]].index
full_df = pd.DataFrame(index=full_idx, columns=all_cols)
for name in all_cols:
if name in rate_cols:
full_df.ix[:, name] = new_rate_dict[name]
elif name in conc_cols:
full_df.ix[:, name] = new_conc_dict[name]
return full_df
def preheim_date_parser(date_str):
date_part = date_str.split("_")[1]
new_str = date_part[2:4] + "-" + date_part[4:6] + "-" + date_part[0:2]
return pd.to_datetime(new_str)
def importratesandconcs_obs(chem_dir):
assert os.path.exists(chem_dir)
obs_conc_f_dict = { "Chloride" : "Cl_mg_ClL-1.txt",
"Dissolved Oxygen" : "DO.txt",
"Nitrate" : "NO3_mg_NL-1.txt",
"Conductance" : "SCP.txt",
"Sulfate" : "SO4_mg_SL-1.txt",
"Temperature" : "TEMP.txt" }
obs_conc_df_dict = {}
for name, fname in obs_conc_f_dict.items():
this_path = os.path.join(chem_dir, fname)
this_df = pd.read_csv(this_path, sep="\t", index_col=0, dtype=float)
this_df.columns = map(preheim_date_parser, this_df.columns)
if name == "Temperature" or name == "Conductance":
this_df.ix[1, '2012-11-12'] = this_df.ix[2, '2012-11-02']
this_df.interpolate(axis=0, inplace=True)
surf_null_mask = this_df.ix[0, :].isnull().values
this_df.ix[0, surf_null_mask] = this_df.ix[1, surf_null_mask]
this_df = this_df.T
if name == "Dissolved Oxygen":
idx_to_drop = this_df.index[5]
this_df.drop(idx_to_drop, axis=0, inplace=True)
# new_cols = list(this_df.columns)
# new_cols.reverse()
# this_df.columns = new_cols
this_df.columns = [int(i) for i in this_df.columns]
print "Total Null Vals in {}: {}".format(name, this_df.isnull().sum().sum())
obs_conc_df_dict[name] = this_df.T.unstack()
conc_cols = sorted(obs_conc_df_dict.keys())
full_idx = obs_conc_df_dict[conc_cols[0]].index
obs_conc_df = pd.DataFrame(index=full_idx, columns=conc_cols)
for name in conc_cols:
obs_conc_df.ix[:, name] = obs_conc_df_dict[name]
return obs_conc_df
def extract_linkages(row_clusters, labels):
clusters = {}
for row in xrange(row_clusters.shape[0]):
cluster_n = row + len(labels)
# which clusters / labels are present in this row
glob1, glob2 = row_clusters[row, 0], row_clusters[row, 1]
# if this is a cluster, pull the cluster
this_clust = []
for glob in [glob1, glob2]:
if glob > (len(labels)-1):
this_clust += clusters[glob]
# if it isn't, add the label to this cluster
else:
this_clust.append(glob)
clusters[cluster_n] = this_clust
real_clusters = {}
for n in clusters.keys():
if len(clusters[n]) > 2 or len(clusters[n]) < len(labels):
real_clusters[n-len(labels)] = clusters[n]
realest = { idx : real_clusters[i] for idx, i in enumerate(real_clusters.keys())}
return realest
|
from praw.exceptions import ClientException, PRAWException
from praw.models import Comment, Submission
from prawcore import BadRequest
import mock
import pytest
from ... import IntegrationTest
class TestComment(IntegrationTest):
def test_attributes(self):
with self.recorder.use_cassette("TestComment.test_attributes"):
comment = Comment(self.reddit, "cklhv0f")
assert comment.author == "bboe"
assert comment.body.startswith("Yes it does.")
assert not comment.is_root
assert comment.submission == "2gmzqe"
@mock.patch("time.sleep", return_value=None)
def test_block(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_block"):
comment = None
for item in self.reddit.inbox.submission_replies():
if item.author and item.author != pytest.placeholders.username:
comment = item
break
else:
assert False, "no comment found"
comment.block()
def test_clear_vote(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_clear_vote"):
Comment(self.reddit, "d1680wu").clear_vote()
@mock.patch("time.sleep", return_value=None)
def test_delete(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_delete"):
comment = Comment(self.reddit, "d1616q2")
comment.delete()
assert comment.author is None
assert comment.body == "[deleted]"
def test_disable_inbox_replies(self):
self.reddit.read_only = False
comment = Comment(self.reddit, "dcc9snh")
with self.recorder.use_cassette(
"TestComment.test_disable_inbox_replies"
):
comment.disable_inbox_replies()
def test_downvote(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_downvote"):
Comment(self.reddit, "d1680wu").downvote()
@mock.patch("time.sleep", return_value=None)
def test_edit(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_edit"):
comment = Comment(self.reddit, "d1616q2")
comment.edit("New text")
assert comment.body == "New text"
def test_enable_inbox_replies(self):
self.reddit.read_only = False
comment = Comment(self.reddit, "dcc9snh")
with self.recorder.use_cassette(
"TestComment.test_enable_inbox_replies"
):
comment.enable_inbox_replies()
def test_gild__no_creddits(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_gild__no_creddits"):
with pytest.raises(BadRequest) as excinfo:
Comment(self.reddit, "d1616q2").gild()
reason = excinfo.value.response.json()["reason"]
assert "INSUFFICIENT_CREDDITS" == reason
def test_invalid(self):
with self.recorder.use_cassette("TestComment.test_invalid"):
with pytest.raises(PRAWException) as excinfo:
Comment(self.reddit, "0").body
assert excinfo.value.args[0].startswith(
"No data returned for comment"
)
@mock.patch("time.sleep", return_value=None)
def test_mark_read(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_mark_read"):
comment = next(self.reddit.inbox.unread())
assert isinstance(comment, Comment)
comment.mark_read()
@mock.patch("time.sleep", return_value=None)
def test_mark_unread(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_mark_unread"):
comment = next(self.reddit.inbox.comment_replies())
comment.mark_unread()
def test_parent__comment(self):
comment = Comment(self.reddit, "cklhv0f")
with self.recorder.use_cassette("TestComment.test_parent__comment"):
parent = comment.parent()
parent.refresh()
assert comment in parent.replies
assert isinstance(parent, Comment)
assert parent.fullname == comment.parent_id
def test_parent__chain(self):
comment = Comment(self.reddit, "dkk4qjd")
counter = 0
with self.recorder.use_cassette("TestComment.test_parent__chain"):
comment.refresh()
parent = comment.parent()
while parent != comment.submission:
if counter % 9 == 0:
parent.refresh()
counter += 1
parent = parent.parent()
def test_parent__comment_from_forest(self):
submission = self.reddit.submission("2gmzqe")
with self.recorder.use_cassette(
"TestComment.test_parent__comment_from_forest"
):
comment = submission.comments[0].replies[0]
parent = comment.parent()
assert comment in parent.replies
assert isinstance(parent, Comment)
assert parent.fullname == comment.parent_id
@mock.patch("time.sleep", return_value=None)
def test_parent__from_replies(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.parent__from_replies"):
comment = next(self.reddit.inbox.comment_replies())
parent = comment.parent()
assert isinstance(parent, Comment)
assert parent.fullname == comment.parent_id
def test_parent__submission(self):
comment = Comment(self.reddit, "cklfmye")
with self.recorder.use_cassette("TestComment.test_parent__submission"):
parent = comment.parent()
assert comment in parent.comments
assert isinstance(parent, Submission)
assert parent.fullname == comment.parent_id
def test_refresh(self):
with self.recorder.use_cassette("TestComment.test_refresh"):
comment = Comment(self.reddit, "d81vwef").refresh()
assert len(comment.replies) > 0
def test_refresh__raises_exception(self):
with self.recorder.use_cassette(
"TestComment.test_refresh__raises_exception"
):
with pytest.raises(ClientException) as excinfo:
Comment(self.reddit, "d81vwef").refresh()
assert (
"This comment does not appear to be in the comment tree",
) == excinfo.value.args
def test_refresh__twice(self):
with self.recorder.use_cassette("TestComment.test_refresh__twice"):
Comment(self.reddit, "d81vwef").refresh().refresh()
def test_refresh__deleted_comment(self):
with self.recorder.use_cassette(
"TestComment.test_refresh__deleted_comment"
):
with pytest.raises(ClientException) as excinfo:
Comment(self.reddit, "d7ltvl0").refresh()
assert (
"This comment does not appear to be in the comment tree",
) == excinfo.value.args
def test_refresh__removed_comment(self):
with self.recorder.use_cassette(
"TestComment.test_refresh__removed_comment"
):
with pytest.raises(ClientException) as excinfo:
Comment(self.reddit, "dma3mi5").refresh()
assert (
"This comment does not appear to be in the comment tree",
) == excinfo.value.args
def test_refresh__with_reply_sort_and_limit(self):
with self.recorder.use_cassette(
"TestComment.test_refresh__with_reply_sort_and_limit"
):
comment = Comment(self.reddit, "e4j4830")
comment.reply_limit = 4
comment.reply_sort = "new"
comment.refresh()
replies = comment.replies
last_created = float("inf")
for reply in replies:
if isinstance(reply, Comment):
if reply.created_utc > last_created:
assert False, "sort order incorrect"
last_created = reply.created_utc
assert len(comment.replies) == 3
def test_reply(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_reply"):
parent_comment = Comment(self.reddit, "d1616q2")
comment = parent_comment.reply("Comment reply")
assert comment.author == self.reddit.config.username
assert comment.body == "Comment reply"
assert not comment.is_root
assert comment.parent_id == parent_comment.fullname
def test_report(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_report"):
Comment(self.reddit, "d0335z3").report("custom")
def test_save(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_save"):
Comment(self.reddit, "d1680wu").save("foo")
def test_unsave(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_unsave"):
Comment(self.reddit, "d1680wu").unsave()
def test_upvote(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_upvote"):
Comment(self.reddit, "d1680wu").upvote()
class TestCommentModeration(IntegrationTest):
def test_approve(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestCommentModeration.test_approve"):
Comment(self.reddit, "da2g5y6").mod.approve()
def test_distinguish(self):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_distinguish"
):
Comment(self.reddit, "da2g5y6").mod.distinguish()
@mock.patch("time.sleep", return_value=None)
def test_distinguish__sticky(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_distinguish__sticky"
):
Comment(self.reddit, "da2g5y6").mod.distinguish(sticky=True)
def test_ignore_reports(self):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_ignore_reports"
):
self.reddit.comment("da2g5y6").mod.ignore_reports()
def test_lock(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestCommentModeration.test_lock"):
Comment(self.reddit, "da2g6ne").mod.lock()
def test_remove(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestCommentModeration.test_remove"):
self.reddit.comment("da2g5y6").mod.remove(spam=True)
def test_unlock(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestCommentModeration.test_unlock"):
Comment(self.reddit, "da2g6ne").mod.unlock()
@mock.patch("time.sleep", return_value=None)
def test_send_removal_message(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_send_removal_message"
):
comment = self.reddit.comment("edu698v")
mod = comment.mod
mod.remove()
message = "message"
res = [
mod.send_removal_message(message, "title", type)
for type in ("public", "private", "private_exposed")
]
assert isinstance(res[0], Comment)
assert res[0].parent_id == "t1_" + comment.id
assert res[0].body == message
assert res[1] is None
assert res[2] is None
def test_undistinguish(self):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_undistinguish"
):
self.reddit.comment("da2g5y6").mod.undistinguish()
def test_unignore_reports(self):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_unignore_reports"
):
self.reddit.comment("da2g5y6").mod.unignore_reports()
|
"""
A simple web spider with several recursion strategies.
Many command options taken from wget;
some ideas from http://ericholscher.com/projects/django-test-utils/
"""
import collections
import httplib2
import logging
import lxml.html
import patternize
import posixpath
import pprint
import random
import re
import sys
import time
import urlparse
logger = logging.getLogger('spydey')
try:
import fabulous.color
fab = True
except ImportError:
fab = False
if fab and sys.stderr.isatty():
colorizer = fabulous.color
else:
class _noop_colorizer:
def __getattr__(self, *name):
return lambda s: s
colorizer = _noop_colorizer()
PROFILE_REPORT_SIZE=20
queuetypes = {}
class FifoUrlQueue(object):
"""
A URL queue is responsible for storing a queue of unique URLs,
removing duplicates, and deciding (via the pop() method) which
URL to visit next.
This base class pops URLs in FIFO order, so it does a
breadth-first traversal of the site.
"""
# This could subclass list, but I want to limit the API to a
# subset of list's API.
def __init__(self, opts):
self.urls = collections.deque()
self.known_urls = set()
self.referrers = {} # first referrer only.
self.opts = opts
def __len__(self):
return len(self.urls)
def append(self, url, referrer=None):
if url not in self.known_urls:
self.urls.append(url)
self.known_urls.add(url)
self.referrers[url] = referrer
def extend(self, urls, referrer=None):
# Is there a more efficient way to do this
# while still only inserting each url once?
for url in urls:
self.append(url, referrer=referrer)
def pop(self):
return self.urls.popleft()
queuetypes['breadth-first'] = FifoUrlQueue
class RandomizingUrlQueue(FifoUrlQueue):
"""A URL Queue that pops URLs off the queue in random order.
This turns out to not feel very random in behavior, because often
the URL space is dominated by a few similar patterns, so we have a
high likelihood of spending a lot of time on similar leaf nodes.
"""
def __init__(self, opts):
self.urls = []
self.known_urls = set()
self.referrers = {}
self.opts = opts
def pop(self):
i = random.randint(0, len(self.urls) -1)
logger.info('Randomly popping %d of %d' % (i, len(self.urls)))
# This is O(N), a dict keyed by ints might be a better storage.
return self.urls.pop(i)
queuetypes['random'] = RandomizingUrlQueue
class DepthFirstQueue(FifoUrlQueue):
"""
Depth-first traversal. Since we don't have a site map to follow,
we're not walking a tree, but rather a (probably cyclic) directed
graph. So we use a LIFO queue in typical depth-first fashion, but
also, to get far away from the root as fast as possible, new links
are appended in order of the number of path elements in the URL.
In practice this means we quickly walk to the end of a branch and
then spend a lot of time on similar leaf nodes before exploring
another branch.
"""
def extend(self, urls, referrer=None):
urls.sort(key=lambda s: s.count('/'))
return FifoUrlQueue.extend(self, urls, referrer)
def pop(self):
return self.urls.pop()
queuetypes['depth-first'] = DepthFirstQueue
class HybridTraverseQueue(DepthFirstQueue):
"""
Alternate between depth-first and breadth-first traversal
behavior.
"""
def __init__(self, opts):
super(HybridTraverseQueue, self).__init__(opts)
self.next = self.urls.pop
def pop(self):
if self.next == self.urls.pop:
self.next = self.urls.popleft
logger.debug('next: left')
else:
self.next = self.urls.pop
logger.debug('next: right')
popped = self.next()
return popped
queuetypes['hybrid'] = HybridTraverseQueue
class PatternPrioritizingUrlQueue(RandomizingUrlQueue):
"""
An attempt at discovering different sections of a website quickly.
We classify links with a primitive pattern-recognition algorithm, and
prefer links whose patterns we haven't seen before.
Classification uses a heuristic: the first part of the path,
followed by the rest of the path converted into regex patterns.
Whenever there are no high-priority URLs -- those whose patterns
we haven't seen yet -- we fall back to RandomizingUrlQueue
behavior, and pick a random URL from the remaining low-priority
URLs.
"""
def __init__(self, opts):
super(PatternPrioritizingUrlQueue, self).__init__(opts)
self.priority_urls = collections.deque()
self.known_patterns = {}
self.referrers = {}
self.popped_some = False
def make_pattern(self, s):
path = urlparse.urlparse(s).path.strip('/')
if not path:
return ''
parts = posixpath.normpath(path).split('/')
parts = parts[:1] + [patternize.patternize(p) for p in parts[1:]]
return '/'.join(parts)
def append(self, url, referrer=None):
if url in self.known_urls:
return
self.known_urls.add(url)
self.referrers[url] = referrer
new_pattern = self.make_pattern(url)
if new_pattern in self.known_patterns:
# put it in the low priority pile.
self.urls.append(url)
self.known_patterns[new_pattern] += 1
else:
logger.debug(colorizer.red('NEW PATTERN!') + new_pattern)
self.priority_urls.append(url)
self.known_patterns[new_pattern] = 1
def extend(self, urls, referrer=None):
# We actually want to visit the shallowest new-patterned URLs first.
urls = set(urls)
urls = sorted(urls, key=lambda s: s.count('/'), reverse=True)
for url in urls:
self.append(url, referrer)
def pop(self):
logger.debug(colorizer.green('LENGTH: known URLs: %d; new pattern queue: %d; old pattern queue: %d' % (len(self.known_urls), len(self.priority_urls), len(self.urls))))
if self.priority_urls:
self.popped_some = True
return self.priority_urls.pop()
if self.opts.max_requests == -1 and self.popped_some:
logger.info("Stopping iteration because we're out of new patterns")
raise StopIteration()
return RandomizingUrlQueue.pop(self)
def __len__(self):
return len(self.urls) + len(self.priority_urls)
queuetypes['pattern'] = PatternPrioritizingUrlQueue
class Spider(object):
"""A simple web spider that doesn't yet do much beyond offer
pluggable traversal strategies, and report HTTP status for each
visited URL.
"""
def __init__(self, url, opts):
self.opts = opts
self.base_url = url
self.domain = urlparse.urlparse(url).netloc
self.queue = queuetypes[opts.traversal](opts)
self.queue.append(url)
self.http = httplib2.Http(timeout=opts.timeout or None)
# We do our own redirect handling.
self.http.follow_redirects = False
self.fetchcount = 0
self.reject = [(s, re.compile(s)) for s in (self.opts.reject or [])]
self.accept = [(s, re.compile(s)) for s in (self.opts.accept or [])]
self.slowest_urls = []
def sleep(self):
"""Maybe wait before doing next download.
"""
if self.opts.wait is not None:
time.sleep(self.opts.wait)
elif self.opts.random_wait is not None:
time.sleep(random.uniform(0, 2 * self.opts.random_wait))
def fetch_one(self, url):
"""Fetch a single URL.
"""
if self.opts.max_requests > 0 and self.fetchcount >= self.opts.max_requests:
logger.info("Stopping after %d requests." % self.fetchcount)
raise StopIteration()
if self.opts.profile:
start = time.time()
(response, content) = self.http.request(url)
if self.opts.profile:
elapsed = time.time() - start
self.slowest_urls.append((elapsed, url))
self.slowest_urls.sort(reverse=True)
self.slowest_urls = self.slowest_urls[:PROFILE_REPORT_SIZE]
else:
logger.debug('fetched %r' % url)
elapsed = None
self.fetchcount += 1
self.handle_result(url, response, content, elapsed)
return (response, content, elapsed)
def handle_result(self, url, response, data, elapsed):
# TODO: options to store downloads, report different things, etc.
status = response['status']
if int(status) < 300:
status = colorizer.green(status)
level = logging.INFO
elif int(status) < 400:
status = colorizer.cyan(status)
level = logging.INFO
elif int(status) == 404:
status = colorizer.magenta(status)
level = logging.WARN
else:
status = colorizer.red(status)
level = logging.ERROR
status = colorizer.bold(status)
msg = '%d. %s %s' % (self.fetchcount, status, colorizer.blue(url))
if self.opts.profile:
msg = '%s (%0.3f secs)' % (msg, elapsed)
if self.opts.log_referrer:
msg = '%s (from %s)' % (msg, self.queue.referrers.get(url, None))
logger.log(level, msg)
def crawl(self):
while self.queue:
try:
url = self.queue.pop()
try:
response, data, elapsed = self.fetch_one(url)
except AttributeError:
# httplib2 bug: socket is None, means no connection.
logger.error("Failure connecting to %s" % url)
continue
except StopIteration:
break
# Might be following a redirect. Need to fix our idea of
# URL since we use that to fix relative links...
redirect_count = 0
while response.has_key('location') and (300 <= response.status < 400):
if redirect_count >= self.opts.max_redirect:
logger.info("Stopping redirects after %d" % redirect_count)
break
redirect_count += 1
newurl = response['location']
logger.debug('redirected from %r to %r' % (url, newurl))
if not self.allow_link(newurl):
logger.info("Not following redirect to disallowed link %s"
% newurl)
break
try:
response, data, elapsed = self.fetch_one(newurl)
except StopIteration:
break
url = newurl
if self.opts.stop_on_error and response.status >= 400:
logger.warn("Bailing out on first HTTP error")
break
if self.opts.recursive:
urls = self.get_urls(url, response, data)
self.queue.extend(urls, referrer=url)
# fabulous doesn't deal well w/ this:
#logger.debug("Adding new URLs from %r:\n%s" % (
# url, pprint.pformat(urls, indent=2)))
self.sleep()
if isinstance(self.queue, PatternPrioritizingUrlQueue) and self.opts.stats:
print "\nPattern count summary:"
patterns = [(v, k) for (k, v) in self.queue.known_patterns.items()]
patterns = sorted(patterns)
pprint.pprint([(k, v) for (v, k) in patterns])
print
if self.opts.profile:
print "\nSlowest %d URLs:" % PROFILE_REPORT_SIZE
pprint.pprint(self.slowest_urls)
print
def allow_link(self, link):
"""Patterns to explicitly accept or reject.
"""
# Check base URL if we're not spanning across hosts.
if not self.opts.span_hosts:
parsed_link = urlparse.urlsplit(link, allow_fragments=False)
if parsed_link.netloc != self.domain:
logger.debug("Skipping %r from foreign domain" % link)
return False
if self.accept:
skip = True
else:
skip = False
for pattern, regex in self.accept:
if regex.search(link):
logger.debug("Allowing %r, matches accept pattern %r" % (link, pattern))
skip = False
break
for pattern, regex in self.reject:
if regex.search(link):
logger.debug("Skipping %r, matches reject pattern %r" % (link, pattern))
skip = True
break
return not skip
def filter_links(self, links):
# Assumes links are absolute, and are tuples as returned by iterlinks().
for (el, attr, link, pos) in links:
# Discard fragment name, eg http://foo/#bar -> http://foo/
(scheme, netloc, path, query, frament) = urlparse.urlsplit(
link, allow_fragments=False)
fragment = ''
# For some reason, sometimes the fragment ends up in the path.
path = path.split('#', 1)[0]
link = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
# We could stand to do some other normalization here, eg.
# strip trailing slashes from the path - but that breaks
# referrer logging on a site that redirects 'foo' to 'foo/'.
if not self.allow_link(link):
continue
if el.tag == 'a':
if self.opts.no_parent:
# Only applies to pages, not js, stylesheets or
# other resources.
if not link.startswith(self.base_url):
logger.debug("Skipping parent or sibling %r" % link)
continue
yield link
elif el.tag == 'form' and attr == 'action':
# Unless we can guess how to fill out the form,
# following these would make no sense at all.
continue
elif self.opts.page_requisites:
logger.debug("getting page req. %r from (%r, %r)" % (link, el, attr))
yield link
else:
logger.debug("Skipping %r from (%r, %r)" % (link, el, attr))
continue
def get_urls(self, url, response, data):
logger.debug("getting more urls from %s..." % url)
if data.strip() and is_html(response):
tree = lxml.html.document_fromstring(data)
tree.make_links_absolute(url, resolve_base_href=True)
links = self.filter_links(tree.iterlinks())
return list(links)
else:
# TODO: parse resource links from CSS.
return []
def is_html(response):
return response.get('content-type', '').lower().startswith('text/html')
def get_optparser(argv=None):
if argv is None:
import sys
argv = sys.argv[1:]
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] URL")
parser.add_option("-r", "--recursive", action="store_true", default=False,
help="Recur into subdirectories")
parser.add_option('-p', '--page-requisites', action="store_true",
default=False,
help="Get all images, etc. needed to display HTML page.")
parser.add_option('--no-parent', action="store_true", default=False,
help="Don't ascend to the parent directory.")
parser.add_option('-R', '--reject', action="append",
help="Regex for filenames to reject. May be given multiple times.")
parser.add_option('-A', '--accept', action="append",
help="Regex for filenames to accept. May be given multiple times.")
parser.add_option('-t', '--traversal', '--traverse', action="store",
default="breadth-first",
choices=sorted(queuetypes.keys()),
help="Recursive traversal strategy. Choices are: %s"
% ', '.join(sorted(queuetypes.keys())))
parser.add_option("-H", "--span-hosts", action="store_true", default=False,
help="Go to foreign hosts when recursive.")
parser.add_option("-w", "--wait", default=None, type=float,
help="Wait SECONDS between retrievals.")
parser.add_option("--random-wait", default=None, type=float,
help="Wait from 0...2*WAIT secs between retrievals.")
parser.add_option("--loglevel", default='INFO', help="Log level.")
parser.add_option("--log-referrer", "--log-referer",
action="store_true", default=False,
help="Log referrer URL for each request.")
parser.add_option("--transient-log", default=False, action="store_true",
help="Use Fabulous transient logging config.")
parser.add_option("--max-redirect", default=20, type=int,
help="Maximum number of redirections to follow for a resource.")
parser.add_option("--max-requests", default=0, type=int,
help="Maximum number of requests to make before exiting. (-1 used with --traversal=pattern means exit when out of new patterns)")
parser.add_option("--stop-on-error", default=False, action="store_true",
help="Stop after the first HTTP error (response code 400 or greater).")
parser.add_option("-T", "--timeout", default=30, type=int,
help="Set the network timeout in seconds. 0 means no timeout.")
parser.add_option("-P", "--profile", default=False, action="store_true",
help="Print the time to download each resource, and a summary of the %d slowest at the end." % PROFILE_REPORT_SIZE)
parser.add_option("--stats", default=False, action="store_true",
help="Print a summary of traversal patterns, if --traversal=pattern")
parser.add_option("-v", "--version", default=False, action="store_true",
help="Print version information and exit.")
return parser
def main():
"""
Many command-line options were deliberately copied from wget.
"""
parser = get_optparser()
(options, args) = parser.parse_args()
loglevel = getattr(logging, options.loglevel.upper(), 'INFO')
if options.version:
# Hopefully this can't find a different installed version?
import pkg_resources
requirement = pkg_resources.Requirement.parse('spydey')
me = pkg_resources.working_set.find(requirement)
print me.project_name, me.version
return
if len(args) != 1:
parser.error("incorrect number of arguments")
url = args.pop(0)
spider = Spider(url, options)
if options.transient_log and fab:
import fabulous.logs
fabulous.logs.basicConfig(level=loglevel)
else:
logging.basicConfig(level=loglevel)
return spider.crawl()
if __name__ == '__main__':
import sys
sys.exit(main())
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.compat.six import iteritems, string_types
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils._text import to_native
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping, AnsibleUnicode
from ansible.plugins import lookup_loader
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.block import Block
from ansible.playbook.conditional import Conditional
from ansible.playbook.loop_control import LoopControl
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['Task']
class Task(Base, Conditional, Taggable, Become):
"""
A task is a language feature that represents a call to a module, with given arguments and other parameters.
A handler is a subclass of a task.
Usage:
Task.load(datastructure) -> Task
Task.something(...)
"""
# =================================================================================
# ATTRIBUTES
# load_<attribute_name> and
# validate_<attribute_name>
# will be used if defined
# might be possible to define others
_args = FieldAttribute(isa='dict', default=dict())
_action = FieldAttribute(isa='string')
_any_errors_fatal = FieldAttribute(isa='bool')
_async = FieldAttribute(isa='int', default=0)
_changed_when = FieldAttribute(isa='list', default=[])
_delay = FieldAttribute(isa='int', default=5)
_delegate_to = FieldAttribute(isa='string')
_delegate_facts = FieldAttribute(isa='bool', default=False)
_failed_when = FieldAttribute(isa='list', default=[])
_loop = FieldAttribute(isa='string', private=True, inherit=False)
_loop_args = FieldAttribute(isa='list', private=True, inherit=False)
_loop_control = FieldAttribute(isa='class', class_type=LoopControl, inherit=False)
_name = FieldAttribute(isa='string', default='')
_notify = FieldAttribute(isa='list')
_poll = FieldAttribute(isa='int')
_register = FieldAttribute(isa='string')
_retries = FieldAttribute(isa='int')
_until = FieldAttribute(isa='list', default=[])
def __init__(self, block=None, role=None, task_include=None):
''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
self._role = role
self._parent = None
if task_include:
self._parent = task_include
else:
self._parent = block
super(Task, self).__init__()
def get_path(self):
''' return the absolute path of the task with its line number '''
path = ""
if hasattr(self, '_ds') and hasattr(self._ds, '_data_source') and hasattr(self._ds, '_line_number'):
path = "%s:%s" % (self._ds._data_source, self._ds._line_number)
return path
def get_name(self):
''' return the name of the task '''
if self._role and self.name and ("%s : " % self._role._role_name) not in self.name:
return "%s : %s" % (self._role.get_name(), self.name)
elif self.name:
return self.name
else:
if self._role:
return "%s : %s" % (self._role.get_name(), self.action)
else:
return "%s" % (self.action,)
def _merge_kv(self, ds):
if ds is None:
return ""
elif isinstance(ds, string_types):
return ds
elif isinstance(ds, dict):
buf = ""
for (k,v) in iteritems(ds):
if k.startswith('_'):
continue
buf = buf + "%s=%s " % (k,v)
buf = buf.strip()
return buf
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = Task(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
def __repr__(self):
''' returns a human readable representation of the task '''
if self.get_name() == 'meta':
return "TASK: meta (%s)" % self.args['_raw_params']
else:
return "TASK: %s" % self.get_name()
def _preprocess_loop(self, ds, new_ds, k, v):
''' take a lookup plugin name and store it correctly '''
loop_name = k.replace("with_", "")
if new_ds.get('loop') is not None:
raise AnsibleError("duplicate loop in task: %s" % loop_name, obj=ds)
if v is None:
raise AnsibleError("you must specify a value when using %s" % k, obj=ds)
new_ds['loop'] = loop_name
new_ds['loop_args'] = v
def preprocess_data(self, ds):
'''
tasks are especially complex arguments so need pre-processing.
keep it short.
'''
assert isinstance(ds, dict)
# the new, cleaned datastructure, which will have legacy
# items reduced to a standard structure suitable for the
# attributes of the task class
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
# use the args parsing class to determine the action, args,
# and the delegate_to value from the various possible forms
# supported as legacy
args_parser = ModuleArgsParser(task_ds=ds)
try:
(action, args, delegate_to) = args_parser.parse()
except AnsibleParserError as e:
raise AnsibleParserError(to_native(e), obj=ds)
# the command/shell/script modules used to support the `cmd` arg,
# which corresponds to what we now call _raw_params, so move that
# value over to _raw_params (assuming it is empty)
if action in ('command', 'shell', 'script'):
if 'cmd' in args:
if args.get('_raw_params', '') != '':
raise AnsibleError("The 'cmd' argument cannot be used when other raw parameters are specified."
" Please put everything in one or the other place.", obj=ds)
args['_raw_params'] = args.pop('cmd')
new_ds['action'] = action
new_ds['args'] = args
new_ds['delegate_to'] = delegate_to
# we handle any 'vars' specified in the ds here, as we may
# be adding things to them below (special handling for includes).
# When that deprecated feature is removed, this can be too.
if 'vars' in ds:
# _load_vars is defined in Base, and is used to load a dictionary
# or list of dictionaries in a standard way
new_ds['vars'] = self._load_vars(None, ds.get('vars'))
else:
new_ds['vars'] = dict()
for (k,v) in iteritems(ds):
if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
# we don't want to re-assign these values, which were
# determined by the ModuleArgsParser() above
continue
elif k.replace("with_", "") in lookup_loader:
self._preprocess_loop(ds, new_ds, k, v)
else:
# pre-2.0 syntax allowed variables for include statements at the
# top level of the task, so we move those into the 'vars' dictionary
# here, and show a deprecation message as we will remove this at
# some point in the future.
if action == 'include' and k not in self._valid_attrs and k not in self.DEPRECATED_ATTRIBUTES:
display.deprecated("Specifying include variables at the top-level of the task is deprecated."
" Please see:\nhttp://docs.ansible.com/ansible/playbooks_roles.html#task-include-files-and-encouraging-reuse\n\n"
" for currently supported syntax regarding included files and variables")
new_ds['vars'][k] = v
else:
new_ds[k] = v
return super(Task, self).preprocess_data(new_ds)
def _load_loop_control(self, attr, ds):
if not isinstance(ds, dict):
raise AnsibleParserError(
"the `loop_control` value must be specified as a dictionary and cannot "
"be a variable itself (though it can contain variables)",
obj=ds,
)
return LoopControl.load(data=ds, variable_manager=self._variable_manager, loader=self._loader)
def post_validate(self, templar):
'''
Override of base class post_validate, to also do final validation on
the block and task include (if any) to which this task belongs.
'''
if self._parent:
self._parent.post_validate(templar)
super(Task, self).post_validate(templar)
def _post_validate_loop_args(self, attr, value, templar):
'''
Override post validation for the loop args field, which is templated
specially in the TaskExecutor class when evaluating loops.
'''
return value
def _post_validate_environment(self, attr, value, templar):
'''
Override post validation of vars on the play, as we don't want to
template these too early.
'''
if value is None:
return dict()
elif isinstance(value, list):
if len(value) == 1:
return templar.template(value[0], convert_bare=True)
else:
env = []
for env_item in value:
if isinstance(env_item, (string_types, AnsibleUnicode)) and env_item in templar._available_variables.keys():
env[env_item] = templar.template(env_item, convert_bare=False)
elif isinstance(value, dict):
env = dict()
for env_item in value:
if isinstance(env_item, (string_types, AnsibleUnicode)) and env_item in templar._available_variables.keys():
env[env_item] = templar.template(value[env_item], convert_bare=False)
# at this point it should be a simple string
return templar.template(value, convert_bare=True)
def _post_validate_changed_when(self, attr, value, templar):
'''
changed_when is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
'''
return value
def _post_validate_failed_when(self, attr, value, templar):
'''
failed_when is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
'''
return value
def _post_validate_until(self, attr, value, templar):
'''
until is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
'''
return value
def get_vars(self):
all_vars = dict()
if self._parent:
all_vars.update(self._parent.get_vars())
all_vars.update(self.vars)
if 'tags' in all_vars:
del all_vars['tags']
if 'when' in all_vars:
del all_vars['when']
return all_vars
def get_include_params(self):
all_vars = dict()
if self._parent:
all_vars.update(self._parent.get_include_params())
if self.action in ('include', 'include_role'):
all_vars.update(self.vars)
return all_vars
def copy(self, exclude_parent=False, exclude_tasks=False):
new_me = super(Task, self).copy()
new_me._parent = None
if self._parent and not exclude_parent:
new_me._parent = self._parent.copy(exclude_tasks=exclude_tasks)
new_me._role = None
if self._role:
new_me._role = self._role
return new_me
def serialize(self):
data = super(Task, self).serialize()
if not self._squashed and not self._finalized:
if self._parent:
data['parent'] = self._parent.serialize()
data['parent_type'] = self._parent.__class__.__name__
if self._role:
data['role'] = self._role.serialize()
return data
def deserialize(self, data):
# import is here to avoid import loops
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.handler_task_include import HandlerTaskInclude
parent_data = data.get('parent', None)
if parent_data:
parent_type = data.get('parent_type')
if parent_type == 'Block':
p = Block()
elif parent_type == 'TaskInclude':
p = TaskInclude()
elif parent_type == 'HandlerTaskInclude':
p = HandlerTaskInclude()
p.deserialize(parent_data)
self._parent = p
del data['parent']
role_data = data.get('role')
if role_data:
r = Role()
r.deserialize(role_data)
self._role = r
del data['role']
super(Task, self).deserialize(data)
def set_loader(self, loader):
'''
Sets the loader on this object and recursively on parent, child objects.
This is used primarily after the Task has been serialized/deserialized, which
does not preserve the loader.
'''
self._loader = loader
if self._parent:
self._parent.set_loader(loader)
def _get_parent_attribute(self, attr, extend=False):
'''
Generic logic to get the attribute or parent attribute for a task value.
'''
value = None
try:
value = self._attributes[attr]
if self._parent and (value is None or extend):
parent_value = getattr(self._parent, attr, None)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
except KeyError:
pass
return value
def _get_attr_environment(self):
'''
Override for the 'tags' getattr fetcher, used from Base.
'''
return self._get_parent_attribute('environment', extend=True)
def get_dep_chain(self):
if self._parent:
return self._parent.get_dep_chain()
else:
return None
def get_search_path(self):
'''
Return the list of paths you should search for files, in order.
This follows role/playbook dependency chain.
'''
path_stack = []
dep_chain = self.get_dep_chain()
# inside role: add the dependency chain from current to dependant
if dep_chain:
path_stack.extend(reversed([x._role_path for x in dep_chain]))
# add path of task itself, unless it is already in the list
task_dir = os.path.dirname(self.get_path())
if task_dir not in path_stack:
path_stack.append(task_dir)
return path_stack
def all_parents_static(self):
if self._parent:
return self._parent.all_parents_static()
return True
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Humanizing functions for numbers."""
import math
import re
from fractions import Fraction
from . import compat
from .i18n import gettext as _
from .i18n import ngettext
from .i18n import ngettext_noop as NS_
from .i18n import pgettext as P_
from .i18n import thousands_separator
def ordinal(value):
"""Converts an integer to its ordinal as a string.
For example, 1 is "1st", 2 is "2nd", 3 is "3rd", etc. Works for any integer or
anything `int()` will turn into an integer. Anything other value will have nothing
done to it.
Examples:
```pycon
>>> ordinal(1)
'1st'
>>> ordinal(1002)
'1002nd'
>>> ordinal(103)
'103rd'
>>> ordinal(4)
'4th'
>>> ordinal(12)
'12th'
>>> ordinal(101)
'101st'
>>> ordinal(111)
'111th'
>>> ordinal("something else")
'something else'
>>> ordinal(None) is None
True
```
Args:
value (int, str, float): Integer to convert.
Returns:
str: Ordinal string.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
t = (
P_("0", "th"),
P_("1", "st"),
P_("2", "nd"),
P_("3", "rd"),
P_("4", "th"),
P_("5", "th"),
P_("6", "th"),
P_("7", "th"),
P_("8", "th"),
P_("9", "th"),
)
if value % 100 in (11, 12, 13): # special case
return "%d%s" % (value, t[0])
return "%d%s" % (value, t[value % 10])
def intcomma(value, ndigits=None):
"""Converts an integer to a string containing commas every three digits.
For example, 3000 becomes "3,000" and 45000 becomes "45,000". To maintain some
compatibility with Django's `intcomma`, this function also accepts floats.
Examples:
```pycon
>>> intcomma(100)
'100'
>>> intcomma("1000")
'1,000'
>>> intcomma(1_000_000)
'1,000,000'
>>> intcomma(1_234_567.25)
'1,234,567.25'
>>> intcomma(1234.5454545, 2)
'1,234.55'
>>> intcomma(14308.40, 1)
'14,308.4'
>>> intcomma(None) is None
True
```
Args:
value (int, float, str): Integer or float to convert.
ndigits (int, None): Digits of precision for rounding after the decimal point.
Returns:
str: string containing commas every three digits.
"""
sep = thousands_separator()
try:
if isinstance(value, compat.string_types):
float(value.replace(sep, ""))
else:
float(value)
except (TypeError, ValueError):
return value
if ndigits:
orig = "{0:.{1}f}".format(value, ndigits)
else:
orig = str(value)
new = re.sub(r"^(-?\d+)(\d{3})", r"\g<1>%s\g<2>" % sep, orig)
if orig == new:
return new
else:
return intcomma(new)
powers = [10 ** x for x in (3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 100)]
human_powers = (
NS_("thousand", "thousand"),
NS_("million", "million"),
NS_("billion", "billion"),
NS_("trillion", "trillion"),
NS_("quadrillion", "quadrillion"),
NS_("quintillion", "quintillion"),
NS_("sextillion", "sextillion"),
NS_("septillion", "septillion"),
NS_("octillion", "octillion"),
NS_("nonillion", "nonillion"),
NS_("decillion", "decillion"),
NS_("googol", "googol"),
)
def intword(value, format="%.1f"):
"""Converts a large integer to a friendly text representation.
Works best for numbers over 1 million. For example, 1_000_000 becomes "1.0 million",
1200000 becomes "1.2 million" and "1_200_000_000" becomes "1.2 billion". Supports up
to decillion (33 digits) and googol (100 digits).
Examples:
```pycon
>>> intword("100")
'100'
>>> intword("12400")
'12.4 thousand'
>>> intword("1000000")
'1.0 million'
>>> intword(1_200_000_000)
'1.2 billion'
>>> intword(8100000000000000000000000000000000)
'8.1 decillion'
>>> intword(None) is None
True
>>> intword("1234000", "%0.3f")
'1.234 million'
```
Args:
value (int, float, str): Integer to convert.
format (str): To change the number of decimal or general format of the number
portion.
Returns:
str: Friendly text representation as a string, unless the value passed could not
be coaxed into an `int`.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < powers[0]:
return str(value)
for ordinal, power in enumerate(powers[1:], 1):
if value < power:
chopped = value / float(powers[ordinal - 1])
if float(format % chopped) == float(10 ** 3):
chopped = value / float(powers[ordinal])
singular, plural = human_powers[ordinal]
return (
" ".join([format, ngettext(singular, plural, math.ceil(chopped))])
) % chopped
else:
singular, plural = human_powers[ordinal - 1]
return (
" ".join([format, ngettext(singular, plural, math.ceil(chopped))])
) % chopped
return str(value)
def apnumber(value):
"""Converts an integer to Associated Press style.
Examples:
```pycon
>>> apnumber(0)
'zero'
>>> apnumber(5)
'five'
>>> apnumber(10)
'10'
>>> apnumber("7")
'seven'
>>> apnumber("foo")
'foo'
>>> apnumber(None) is None
True
```
Args:
value (int, float, str): Integer to convert.
Returns:
str: For numbers 0-9, the number spelled out. Otherwise, the number. This always
returns a string unless the value was not `int`-able, unlike the Django filter.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if not 0 <= value < 10:
return str(value)
return (
_("zero"),
_("one"),
_("two"),
_("three"),
_("four"),
_("five"),
_("six"),
_("seven"),
_("eight"),
_("nine"),
)[value]
def fractional(value):
"""Convert to fractional number.
There will be some cases where one might not want to show ugly decimal places for
floats and decimals.
This function returns a human-readable fractional number in form of fractions and
mixed fractions.
Pass in a string, or a number or a float, and this function returns:
* a string representation of a fraction
* or a whole number
* or a mixed fraction
Examples:
```pycon
>>> fractional(0.3)
'3/10'
>>> fractional(1.3)
'1 3/10'
>>> fractional(float(1/3))
'1/3'
>>> fractional(1)
'1'
>>> fractional("ten")
'ten'
>>> fractional(None) is None
True
```
Args:
value (int, float, str): Integer to convert.
Returns:
str: Fractional number as a string.
"""
try:
number = float(value)
except (TypeError, ValueError):
return value
whole_number = int(number)
frac = Fraction(number - whole_number).limit_denominator(1000)
numerator = frac._numerator
denominator = frac._denominator
if whole_number and not numerator and denominator == 1:
# this means that an integer was passed in
# (or variants of that integer like 1.0000)
return "%.0f" % whole_number
elif not whole_number:
return "{:.0f}/{:.0f}".format(numerator, denominator)
else:
return "{:.0f} {:.0f}/{:.0f}".format(whole_number, numerator, denominator)
def scientific(value, precision=2):
"""Return number in string scientific notation z.wq x 10ⁿ.
Examples:
```pycon
>>> scientific(float(0.3))
'3.00 x 10⁻¹'
>>> scientific(int(500))
'5.00 x 10²'
>>> scientific(-1000)
'1.00 x 10⁻³'
>>> scientific(1000, 1)
'1.0 x 10³'
>>> scientific(1000, 3)
'1.000 x 10³'
>>> scientific("99")
'9.90 x 10¹'
>>> scientific("foo")
'foo'
>>> scientific(None) is None
True
```
Args:
value (int, float, str): Input number.
precision (int): Number of decimal for first part of the number.
Returns:
str: Number in scientific notation z.wq x 10ⁿ.
"""
exponents = {
"0": "⁰",
"1": "¹",
"2": "²",
"3": "³",
"4": "⁴",
"5": "⁵",
"6": "⁶",
"7": "⁷",
"8": "⁸",
"9": "⁹",
"+": "⁺",
"-": "⁻",
}
negative = False
try:
if "-" in str(value):
value = str(value).replace("-", "")
negative = True
if isinstance(value, compat.string_types):
value = float(value)
fmt = "{:.%se}" % str(int(precision))
n = fmt.format(value)
except (ValueError, TypeError):
return value
part1, part2 = n.split("e")
if "-0" in part2:
part2 = part2.replace("-0", "-")
if "+0" in part2:
part2 = part2.replace("+0", "")
new_part2 = []
if negative:
new_part2.append(exponents["-"])
for char in part2:
new_part2.append(exponents[char])
final_str = part1 + " x 10" + "".join(new_part2)
return final_str
def clamp(value, format="{:}", floor=None, ceil=None, floor_token="<", ceil_token=">"):
"""Returns number with the specified format, clamped between floor and ceil.
If the number is larger than ceil or smaller than floor, then the respective limit
will be returned, formatted and prepended with a token specifying as such.
Examples:
```pycon
>>> clamp(123.456)
'123.456'
>>> clamp(0.0001, floor=0.01)
'<0.01'
>>> clamp(0.99, format="{:.0%}", ceil=0.99)
'99%'
>>> clamp(0.999, format="{:.0%}", ceil=0.99)
'>99%'
>>> clamp(1, format=intword, floor=1e6, floor_token="under ")
'under 1.0 million'
>>> clamp(None) is None
True
```
Args:
value (int, float): Input number.
format (str OR callable): Can either be a formatting string, or a callable
function than receives value and returns a string.
floor (int, float): Smallest value before clamping.
ceil (int, float): Largest value before clamping.
floor_token (str): If value is smaller than floor, token will be prepended
to output.
ceil_token (str): If value is larger than ceil, token will be prepended
to output.
Returns:
str: Formatted number. The output is clamped between the indicated floor and
ceil. If the number if larger than ceil or smaller than floor, the output will
be prepended with a token indicating as such.
"""
if value is None:
return None
if floor is not None and value < floor:
value = floor
token = floor_token
elif ceil is not None and value > ceil:
value = ceil
token = ceil_token
else:
token = ""
if isinstance(format, compat.string_types):
return token + format.format(value)
elif callable(format):
return token + format(value)
else:
raise ValueError(
"Invalid format. Must be either a valid formatting string, or a function "
"that accepts value and returns a string."
)
|
import os
import sys
import tempfile
from fabric.api import run, sudo, env, local, hide, settings
from fabric.contrib.files import append, exists
from fabric.operations import put
from fabric.tasks import Task
from fab_deploy.functions import random_password
from fab_deploy.base import postgres as base_postgres
class RHMixin(object):
binary_path = '/usr/pgsql-9.1/bin/'
def _get_data_dir(self, db_version):
return os.path.join('/var/lib/pgsql', '%s' % db_version, 'data')
def _get_config_dir(self, db_version, data_dir):
return data_dir
def _install_package(self, db_version):
sudo("rpm -U --replacepkgs http://yum.postgresql.org/9.1/redhat/rhel-6-x86_64/pgdg-redhat91-9.1-5.noarch.rpm")
pk_version = db_version.replace('.', '')
sudo("yum -y install postgresql%s-server" % pk_version)
sudo("yum -y install postgresql%s-contrib" % pk_version)
postgres_conf = os.path.join(self._get_data_dir(db_version), 'postgresql.conf')
if not exists(postgres_conf, use_sudo=True):
sudo("service postgresql-%s initdb" % db_version)
def _restart_db_server(self, db_version):
sudo('service postgresql-%s restart' % db_version)
def _stop_db_server(self, db_version):
sudo('service postgresql-%s stop' % db_version)
def _start_db_server(self, db_version):
sudo('service postgresql-%s start' % db_version)
class PostgresInstall(RHMixin, base_postgres.PostgresInstall):
"""
Install postgresql on server.
This task gets executed inside other tasks, including
setup.db_server, setup.slave_db and setup.dev_server
install postgresql package, and set up access policy in pg_hba.conf.
enable postgres access from localhost without password;
enable all other user access from other machines with password;
setup a few parameters related with streaming replication;
database server listen to all machines '*';
create a user for database with password.
"""
name = 'master_setup'
db_version = '9.1'
class SlaveSetup(RHMixin, base_postgres.SlaveSetup):
"""
Set up master-slave streaming replication: slave node
"""
name = 'slave_setup'
|
# -*- coding: utf-8 -*-
#The MIT License (MIT)
#
#Copyright (c) 2015,2018 Sami Salkosuo
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
#
#list-command
#
from ..utils.utils import *
from ..utils.functions import *
from .SuperCommand import *
from ..globals import *
from ..globals import GlobalVariables
class ListCommand(SuperCommand):
def __init__(self,cmd_handler):
super().__init__(cmd_handler)
def parseCommandArgs(self,userInputList):
cmd_parser = ThrowingArgumentParser(prog="list",description='Print all accounts or all that match given start of name.')
cmd_parser.add_argument('name', metavar='NAME', type=str, nargs='?',
help='Start of name.')
(self.cmd_args,self.help_text)=parseCommandArgs(cmd_parser,userInputList)
def execute(self):
loadAccounts()
arg=""
if self.cmd_args.name:
arg=self.cmd_args.name
rows=executeSelect([COLUMN_NAME,COLUMN_ID,COLUMN_URL,COLUMN_USERNAME,COLUMN_EMAIL,COLUMN_PASSWORD,COLUMN_COMMENT],arg)
printAccountRows(rows)
|
class SessionHelper:
def __init__(self,app):
self.app = app
def log_in(self, username, password):
wd = self.app.wd
self.app.navigation.open_home_page()
wd.find_element_by_id("content").click()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_css_selector('input[type="submit"]').click()
def log_out(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def ensure_log_out(self):
if self.is_logged_in():
self.log_out()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def ensure_log_in(self, username, password):
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.log_out()
self.log_in(username,password)
def is_logged_in_as(self,username):
# wd = self.app.wd
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//form/b").text[1:-1]
|
#!/usr/bin/python -d
#
# Copyright (C) 2016 Reinhard Fleissner
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""Create mesh"""
__author__="Reinhard Fleissner"
__date__ ="$29.08.2014 18:21:40$"
import numpy as np
import macro as mc
class CalcMesh(object):
"""Create mesh.
"""
def __init__( self, nodRaw, proRaw, nodReach, nnC, length,
nodLBL=None, nodRBL=None, nodLBO=None, nodRBO=None, nnL=None, nnR=None):
"""Constructor for load case.
Keyword arguments:i
nodRaw -- raw nodes
proRaw -- raw profiles
...
"""
# inputs
self.nodRaw = nodRaw
self.proRaw = proRaw
self.nodReach = nodReach
self.nodLBL = nodLBL
self.nodRBL = nodRBL
self.nodLBO = nodLBO
self.nodRBO = nodRBO
self.nnL = nnL
if nnL is not None:
self.nnL = int(nnL)
self.nnC = int(nnC)
self.nnR = nnR
if nnR is not None:
self.nnR = int(nnR)
self.length = length
# results
self.nnS = {}
self.LBLInterp = {}
self.RBLInterp = {}
self.LBOInterp = {}
self.RBOInterp = {}
self.proArranged = {}
self.nodInterp = {}
self.proInterp = {}
self.nodMesh = {}
self.proMesh = {}
self.mesh = {}
def determineFlowDirection(self):
profilecounter = 1
direction = {}
# loop over reach nodes
for nID_reach in range(len(self.nodReach)):
nID_reach += 1
# determine flow direction of reach segments
if nID_reach <= len(self.nodReach)-1:
xa = self.nodReach[nID_reach][0]
xe = self.nodReach[nID_reach+1][0]
ya = self.nodReach[nID_reach][1]
ye = self.nodReach[nID_reach+1][1]
dx = xa - xe
dy = ya - ye
if dx >=0.0 and dy >= 0.0 and abs(dx) <= abs(dy):
direction[nID_reach] = 'S'
elif dx <=0.0 and dy >= 0.0 and abs(dx) <= abs(dy):
direction[nID_reach] = 'S'
elif dx <=0.0 and dy >= 0.0 and abs(dx) >= abs(dy):
direction[nID_reach] = 'E'
elif dx <=0.0 and dy <= 0.0 and abs(dx) >= abs(dy):
direction[nID_reach] = 'E'
elif dx <=0.0 and dy <= 0.0 and abs(dx) <= abs(dy):
direction[nID_reach] = 'N'
elif dx >=0.0 and dy <= 0.0 and abs(dx) <= abs(dy):
direction[nID_reach] = 'N'
elif dx >=0.0 and dy <= 0.0 and abs(dx) >= abs(dy):
direction[nID_reach] = 'W'
elif dx >=0.0 and dy >= 0.0 and abs(dx) >= abs(dy):
direction[nID_reach] = 'W'
else:
direction[nID_reach] = direction[nID_reach-1]
# determine closest profile node to current reach node
closestnode = mc.getClosestNode(self.nodReach[nID_reach], self.nodRaw.keys(), self.nodRaw)
# determine profile that inherits closest profile node
for pID_raw in self.proRaw:
for nID_raw in range(len(self.proRaw[pID_raw])):
if closestnode == self.proRaw[pID_raw][nID_raw]:
startnode = self.proRaw[pID_raw][0]
endnode = self.proRaw[pID_raw][-1]
if direction[profilecounter] == 'N':
if self.nodRaw[startnode][0] > self.nodRaw[endnode][0]:
self.proRaw[pID_raw].reverse()
elif direction[profilecounter] == 'E':
if self.nodRaw[startnode][1] < self.nodRaw[endnode][1]:
self.proRaw[pID_raw].reverse()
elif direction[profilecounter] == 'S':
if self.nodRaw[startnode][0] < self.nodRaw[endnode][0]:
self.proRaw[pID_raw].reverse()
elif direction[profilecounter] == 'W':
if self.nodRaw[startnode][1] > self.nodRaw[endnode][1]:
self.proRaw[pID_raw].reverse()
self.proArranged[profilecounter] = self.proRaw[pID_raw]
profilecounter += 1
break
info = "\nFlow direction:\n"
for pID_Arranged in direction:
info += ' - Profile {0}:\t{1}\n'.format(pID_Arranged, direction[pID_Arranged])
return info
def normalizeProfiles(self):
def normalize(nID_a, nID_b, nodes):
nID_i = 0
nID_j = 0
up = False
if nID_a < nID_b:
nID_i = nID_a
nID_j = nID_b
up = True
else:
nID_i = nID_b
nID_j = nID_a
up = False
nodesTempNormalized = {}
for i in range(nID_j - nID_i + 1):
nID = 0
if up is True:
nID = nID_i + i
else:
nID = nID_j - i
nID = nID_i + i
nodesTempNormalized[nID] = nodes[nID][0:3]
return nodesTempNormalized
def getXYZ(_range, nodes, profiles, id):
x = []
y = []
z = []
for i in range(_range):
x.append(nodes[profiles[pID][i+id]][0])
y.append(nodes[profiles[pID][i+id]][1])
z.append(nodes[profiles[pID][i+id]][2])
return x, y, z
nodesNormalized = {}
startkey = 1
# loop over arranged profiles
for pID in self.proArranged:
nIDs = self.proArranged[pID]
if self.nnL is not None and self.nnR is None:
leftNode = mc.getClosestNodeFromIntersection(self.nodLBL, self.proArranged[pID], self.nodRaw)
# left
tempNodesLeft = normalize(nIDs[0], nIDs[leftNode], self.nodRaw)
nodesNormalized.update(tempNodesLeft)
x, y, z = getXYZ(leftNode+1, nodesNormalized, self.proArranged, 0)
tempNodesLeftInterp = mc.interpolateXYZ(x, y, z, self.nnL, startkey)
startkey += len(tempNodesLeftInterp)-1
self.nodInterp.update(tempNodesLeftInterp)
# channel
tempNodesChannel = normalize(nIDs[leftNode], nIDs[-1], self.nodRaw)
nodesNormalized.update(tempNodesChannel)
x, y, z = getXYZ(len(self.proArranged[pID])-leftNode, nodesNormalized, self.proArranged, leftNode)
tempNodesChannelInterp = mc.interpolateXYZ(x, y, z, self.nnC, startkey)
startkey += len(tempNodesChannelInterp)
self.nodInterp.update(tempNodesChannelInterp)
self.proInterp[pID] = range(startkey-self.nnL-self.nnC+1, startkey)
elif self.nnL is None and self.nnR is not None:
rightNode = mc.getClosestNodeFromIntersection(self.nodRBL, self.proArranged[pID], self.nodRaw)
# channel
tempNodesChannel = normalize(nIDs[0], nIDs[rightNode], self.nodRaw)
nodesNormalized.update(tempNodesChannel)
x, y, z = getXYZ(rightNode+1, nodesNormalized, self.proArranged, 0)
tempNodesChannelInterp = mc.interpolateXYZ(x, y, z, self.nnC, startkey)
startkey += len(tempNodesChannelInterp)-1
self.nodInterp.update(tempNodesChannelInterp)
# right
tempNodesRight = normalize(nIDs[rightNode], nIDs[-1], self.nodRaw)
nodesNormalized.update(tempNodesRight)
x, y, z = getXYZ(len(self.proArranged[pID])-rightNode, nodesNormalized, self.proArranged, rightNode)
tempNodesRightInterp = mc.interpolateXYZ(x, y, z, self.nnR, startkey)
startkey += len(tempNodesRightInterp)
self.nodInterp.update(tempNodesRightInterp)
self.proInterp[pID] = range(startkey-self.nnC-self.nnR+1, startkey)
elif self.nnL is not None and self.nnR is not None:
leftNode = mc.getClosestNodeFromIntersection(self.nodLBL, self.proArranged[pID], self.nodRaw)
rightNode = mc.getClosestNodeFromIntersection(self.nodRBL, self.proArranged[pID], self.nodRaw)
# left
tempNodesLeft = normalize(nIDs[0], nIDs[leftNode], self.nodRaw)
nodesNormalized.update(tempNodesLeft)
x, y, z = getXYZ(leftNode+1, nodesNormalized, self.proArranged, 0)
tempNodesLeftInterp = mc.interpolateXYZ(x, y, z, self.nnL, startkey)
startkey += len(tempNodesLeftInterp)-1
self.nodInterp.update(tempNodesLeftInterp)
# channel
tempNodesChannel = normalize(nIDs[leftNode], nIDs[rightNode], self.nodRaw)
nodesNormalized.update(tempNodesChannel)
x, y, z = getXYZ(rightNode-leftNode+1, nodesNormalized, self.proArranged, leftNode)
tempNodesChannelInterp = mc.interpolateXYZ(x, y, z, self.nnC, startkey)
startkey += len(tempNodesChannelInterp)-1
self.nodInterp.update(tempNodesChannelInterp)
# right
tempNodesRight = normalize(nIDs[rightNode], nIDs[-1], self.nodRaw)
nodesNormalized.update(tempNodesRight)
x, y, z = getXYZ(len(self.proArranged[pID])-rightNode, nodesNormalized, self.proArranged, rightNode)
tempNodesRightInterp = mc.interpolateXYZ(x, y, z, self.nnR, startkey)
startkey += len(tempNodesRightInterp)
self.nodInterp.update(tempNodesRightInterp)
self.proInterp[pID] = range(startkey-self.nnL-self.nnC-self.nnR+2, startkey)
else:
# channel
tempNodesChannel = normalize(nIDs[0], nIDs[-1], self.nodRaw)
nodesNormalized.update(tempNodesChannel)
x, y, z = getXYZ(len(self.proArranged[pID]), nodesNormalized, self.proArranged, 0)
tempNodesChannelInterp = mc.interpolateXYZ(x, y, z, self.nnC, startkey)
startkey += len(tempNodesChannelInterp)
self.nodInterp.update(tempNodesChannelInterp)
self.proInterp[pID] = range(startkey-self.nnC, startkey)
return "\nProfiles normalized."
def getNumberOfSegmentNodes(self, node_i, node_j, length):
tempLength = np.linalg.norm(np.subtract(node_j, node_i))
num = int(tempLength / length)+1
if num < 2:
return 2
else:
return num
def interpolateChannel(self):
nodecounter = 0
profilecounter = 0
if self.nodLBO is None:
self.nodLBO = self.getNodesLeft(self.nodInterp, self.proInterp)
if self.nodRBO is None:
self.nodRBO = self.getNodesRight(self.nodInterp, self.proInterp)
for i in range(len(self.proInterp)-1):
pID = i+1
tempLeftBreaklineInterp = {}
tempRightBreaklineInterp = {}
tempLeftBoundaryInterp = {}
tempRightBoundaryInterp = {}
tempLeftBoundary = {}
tempLeftBreakline = {}
tempRightBreakline = {}
tempRightBoundary = {}
id_i = 0
id_j = 0
if self.nnL is None:
id_i = 0
id_j = self.nnC-1
else:
id_i = self.nnL-1
id_j = self.nnL+self.nnC-2
if self.nnR is None:
id_j = 0
else:
id_j = self.nnR-1
if self.nnL is None and self.nnR is None:
id_i = 0
id_j = -1
nodeCount = self.getNumberOfSegmentNodes(self.nodReach[pID], self.nodReach[pID+1], self.length)
self.nnS[pID] = nodeCount
tempLeftBoundary = mc.getPartOfNodeString(self.nodLBO,
self.nodInterp[self.proInterp[pID][0]],
self.nodInterp[self.proInterp[pID+1][0]]
)
tempRightBoundary = mc.getPartOfNodeString(self.nodRBO,
self.nodInterp[self.proInterp[pID][-1]],
self.nodInterp[self.proInterp[pID+1][-1]]
)
# left boundary
zVec = []
if len(tempLeftBoundary[1]) == 2:
# boundary is *.i2s, interpolate z values linear between profiles
zVec = np.linspace(self.nodInterp[self.proInterp[pID][0]][2], self.nodInterp[self.proInterp[pID+1][0]][2], num=nodeCount)
tempLeftBoundaryInterp = mc.interpolateNodeString2d(tempLeftBoundary, nodeCount)
for key in tempLeftBoundaryInterp:
tempLeftBoundaryInterp[key][2] = zVec[key-1]
else:
# boundary is *.i3s
tempLeftBoundaryInterp = mc.interpolateNodeString3d(tempLeftBoundary, nodeCount)
# right boundary
zVec = []
if len(tempRightBoundary[1]) == 2:
zVec = np.linspace(self.nodInterp[self.proInterp[pID][-1]][2], self.nodInterp[self.proInterp[pID+1][-1]][2], num=nodeCount)
tempRightBoundaryInterp = mc.interpolateNodeString2d(tempRightBoundary, nodeCount)
for key in tempRightBoundaryInterp:
tempRightBoundaryInterp[key][2] = zVec[key-1]
else:
tempRightBoundaryInterp = mc.interpolateNodeString3d(tempRightBoundary, nodeCount)
# apply temporary interpolated left boundary (between two profiles) to total left boundary
for nID in range(len(tempLeftBoundaryInterp)-1):
self.LBOInterp[len(self.LBOInterp)+1] = tempLeftBoundaryInterp[nID+1]
if pID == len(self.proInterp)-1:
self.LBOInterp[len(self.LBOInterp)+1] = tempLeftBoundaryInterp[len(tempLeftBoundaryInterp)]
# apply temporary interpolated right boundary (between two profiles) to total right boundary
for nID in range(len(tempRightBoundaryInterp)-1):
self.RBOInterp[len(self.RBOInterp)+1] = tempRightBoundaryInterp[nID+1]
if pID == len(self.proInterp)-1:
self.RBOInterp[len(self.RBOInterp)+1] = tempRightBoundaryInterp[len(tempRightBoundaryInterp)]
if self.nnL is None and self.nnR is None:
pass
else:
if self.nnL is not None:
tempLeftBreakline = mc.getPartOfNodeString(self.nodLBL,
self.nodInterp[self.proInterp[pID][id_i]],
self.nodInterp[self.proInterp[pID+1][id_i]]
)
zVec = []
if len(tempLeftBreakline[1]) == 2:
zVec = np.linspace(self.nodInterp[self.proInterp[pID][id_i]][2], self.nodInterp[self.proInterp[pID+1][id_i]][2], num=nodeCount)
tempLeftBreaklineInterp = mc.interpolateNodeString2d(tempLeftBreakline, nodeCount)
for key in tempLeftBreaklineInterp:
tempLeftBreaklineInterp[key][2] = zVec[key-1]
else:
tempLeftBreaklineInterp = mc.interpolateNodeString3d(tempLeftBreakline, nodeCount)
for nID in range(len(tempLeftBreaklineInterp)-1):
self.LBLInterp[len(self.LBLInterp)+1] = tempLeftBreaklineInterp[nID+1]
if pID == len(self.proInterp)-1:
self.LBLInterp[len(self.LBLInterp)+1] = tempLeftBreaklineInterp[len(tempLeftBreaklineInterp)]
if self.nnR is not None:
tempRightBreakline = mc.getPartOfNodeString(self.nodRBL,
self.nodInterp[self.proInterp[pID][id_j]],
self.nodInterp[self.proInterp[pID+1][id_j]]
)
zVec = []
if len(tempRightBreakline[1]) == 2:
zVec = np.linspace(self.nodInterp[self.proInterp[pID][id_j]][2], self.nodInterp[self.proInterp[pID+1][id_j]][2], num=nodeCount)
tempRightBreaklineInterp = mc.interpolateNodeString2d(tempRightBreakline, nodeCount)
for key in tempRightBreaklineInterp:
tempRightBreaklineInterp[key][2] = zVec[key-1]
else:
tempRightBreaklineInterp = mc.interpolateNodeString3d(tempRightBreakline, nodeCount)
for nID in range(len(tempRightBreaklineInterp)-1):
self.RBLInterp[len(self.RBLInterp)+1] = tempRightBreaklineInterp[nID+1]
if pID == len(self.proInterp)-1:
self.RBLInterp[len(self.RBLInterp)+1] = tempRightBreaklineInterp[len(tempRightBreaklineInterp)]
# interpolate channel from left boundary over left and right breakline to right boundary
elementsInSegment = 0
if i == (len(self.proInterp)-2):
elementsInSegment = len(tempLeftBoundaryInterp)
else:
elementsInSegment = len(tempLeftBoundaryInterp)-1
## ToDo: interpolate channel dependend on element width
for j in range(elementsInSegment):
nID = j+1
xx = []
yy = []
if self.nnL is not None and self.nnR is None:
tempLeft = {}
tempLeft[1] = tempLeftBoundaryInterp[nID]
tempLeft[2] = tempLeftBreaklineInterp[nID]
nodesInterpLeft = mc.interpolateNodeString2d(tempLeft, self.nnL)
xleft, yleft = mc.getXY(nodesInterpLeft)
tempChannel = {}
tempChannel[1] = tempLeftBreaklineInterp[nID]
tempChannel[2] = tempRightBoundaryInterp[nID]
nodesInterpChannel = mc.interpolateNodeString2d(tempChannel, self.nnC)
xchannel, ychannel = mc.getXY(nodesInterpChannel)
xx = np.concatenate([xleft[0:-1], xchannel])
yy = np.concatenate([yleft[0:-1], ychannel])
elif self.nnL is None and self.nnR is not None:
tempChannel = {}
tempChannel[1] = tempLeftBoundaryInterp[nID]
tempChannel[2] = tempRightBreaklineInterp[nID]
nodesInterpChannel = mc.interpolateNodeString2d(tempChannel, self.nnC)
xchannel, ychannel = mc.getXY(nodesInterpChannel)
tempRight = {}
tempRight[1] = tempRightBreaklineInterp[nID]
tempRight[2] = tempRightBoundaryInterp[nID]
nodesInterpRight = mc.interpolateNodeString2d(tempRight, self.nnR)
xright, yright = mc.getXY(nodesInterpRight)
xx = np.concatenate([xchannel[0:-1], xright])
yy = np.concatenate([ychannel[0:-1], yright])
elif self.nnL is not None and self.nnR is not None:
tempLeft = {}
tempLeft[1] = tempLeftBoundaryInterp[nID]
tempLeft[2] = tempLeftBreaklineInterp[nID]
nodesInterpLeft = mc.interpolateNodeString2d(tempLeft, self.nnL)
xleft, yleft = mc.getXY(nodesInterpLeft)
tempChannel = {}
tempChannel[1] = tempLeftBreaklineInterp[nID]
tempChannel[2] = tempRightBreaklineInterp[nID]
nodesInterpChannel = mc.interpolateNodeString2d(tempChannel, self.nnC)
xchannel, ychannel = mc.getXY(nodesInterpChannel)
tempRight = {}
tempRight[1] = tempRightBreaklineInterp[nID]
tempRight[2] = tempRightBoundaryInterp[nID]
nodesInterpRight = mc.interpolateNodeString2d(tempRight, self.nnR)
xright, yright = mc.getXY(nodesInterpRight)
xx = np.concatenate([xleft[0:-1], xchannel[0:-1], xright])
yy = np.concatenate([yleft[0:-1], ychannel[0:-1], yright])
else:
temp = {}
temp[1] = tempLeftBoundaryInterp[nID]
temp[2] = tempRightBoundaryInterp[nID]
tempNodesInterp = mc.interpolateNodeString2d(temp, self.nnC)
xx, yy = mc.getXY(tempNodesInterp)
zz = np.zeros(len(xx))
nodes = mc.getNodeString3d(xx, yy, zz, nodecounter+1)
self.nodMesh = dict(self.nodMesh.items() + nodes.items())
profilecounter = profilecounter + 1
numberOfNodes = 0
if self.nnL is None and self.nnR is not None:
numberOfNodes = self.nnC+self.nnR-1
elif self.nnL is not None and self.nnR is None:
numberOfNodes = self.nnL+self.nnC-1
elif self.nnL is None and self.nnR is None:
numberOfNodes = self.nnC
else:
numberOfNodes = self.nnL+self.nnC+self.nnR-2
self.proMesh[profilecounter] = np.arange(nodecounter+1, nodecounter+numberOfNodes+1, 1)
nodecounter = nodecounter + len(xx)
return "\nChannel nodes interpolated."
def interpolateElevation(self):
for i in range(len(self.proInterp[1])):
zVec = []
for j in range(len(self.proInterp)-1):
pID = j+1
nNodes = self.nnS[pID]
zi = self.nodInterp[self.proInterp[pID][i]][2]
zj = self.nodInterp[self.proInterp[pID+1][i]][2]
zVec = np.append(zVec, np.linspace(zi, zj, num=nNodes)[0:-1])
zVec = np.append(zVec, zj)
for k in range(len(zVec)):
id = k*len(self.proMesh[1])+1+i
self.nodMesh[id][2] = zVec[k]
return "\nElevation interpolated."
def interpolateElevationCorrection(self):
for pID in self.proMesh:
dzVec = list()
if self.nnL is not None and self.nnR is None:
dzLi = self.LBOInterp[pID][2] - self.nodMesh[(pID-1)*len(self.proMesh[1])+1][2]
dzLj = self.LBLInterp[pID][2] - self.nodMesh[(pID-1)*len(self.proMesh[1])+self.nnL][2]
dzLvec = dzLi + np.linspace(0.0, dzLj - dzLi, num=self.nnL)
dzCi = dzLj
dzCj = self.RBOInterp[pID][2] - self.nodMesh[(pID-1)*len(self.proMesh[1])+self.nnL+self.nnC-1][2]
dzCvec = dzCi + np.linspace(0.0, dzCj - dzCi, num=self.nnC)
dzVec = np.concatenate([dzLvec[0:-1], dzCvec])
for i in range(len(dzVec)):
id = (pID-1)*(self.nnL+self.nnC-1)+i+1
self.nodMesh[id][2] += dzVec[i]
elif self.nnL is None and self.nnR is not None:
dzCi = self.LBOInterp[pID][2] - self.nodMesh[(pID-1)*len(self.proMesh[1])+1][2]
dzCj = self.RBLInterp[pID][2] - self.nodMesh[(pID-1)*len(self.proMesh[1])+self.nnC][2]
dzCvec = dzCi + np.linspace(0.0, dzCj - dzCi, num=self.nnC)
dzRi = dzCj
dzRj = self.RBOInterp[pID][2] - self.nodMesh[(pID-1)*len(self.proMesh[1])+self.nnC+self.nnR-1][2]
dzRvec = dzRi + np.linspace(0.0, dzRj - dzRi, num=self.nnR)
dzVec = np.concatenate([dzCvec[0:-1], dzRvec])
for i in range(len(dzVec)):
id = (pID-1)*(self.nnC+self.nnR-1)+i+1
self.nodMesh[id][2] += dzVec[i]
elif self.nnL is not None and self.nnR is not None:
dzLi = self.LBOInterp[pID][2] - self.nodMesh[(pID-1)*len(self.proMesh[1])+1][2]
dzLj = self.LBLInterp[pID][2] - self.nodMesh[(pID-1)*len(self.proMesh[1])+self.nnL][2]
dzLvec = dzLi + np.linspace(0.0, dzLj - dzLi, num=self.nnL)
dzCi = dzLj
dzCj = self.RBLInterp[pID][2] - self.nodMesh[(pID-1)*len(self.proMesh[1])+self.nnL+self.nnC-1][2]
dzCvec = dzCi + np.linspace(0.0, dzCj - dzCi, num=self.nnC)
dzRi = dzCj
dzRj = self.RBOInterp[pID][2] - self.nodMesh[(pID-1)*len(self.proMesh[1])+self.nnL+self.nnC+self.nnR-2][2]
dzRvec = dzRi + np.linspace(0.0, dzRj - dzRi, num=self.nnR)
dzVec = np.concatenate([dzLvec[0:-1], dzCvec[0:-1], dzRvec])
for i in range(len(dzVec)):
id = (pID-1)*(self.nnL+self.nnC+self.nnR-2)+i+1
self.nodMesh[id][2] += dzVec[i]
else:
dzCi = self.LBOInterp[pID][2] - self.nodMesh[(pID-1)*len(self.proMesh[1])+1][2]
dzCj = self.RBOInterp[pID][2] - self.nodMesh[(pID-1)*len(self.proMesh[1])+self.nnC][2]
dzVec = dzCi + np.linspace(0.0, dzCj - dzCi, num=self.nnC)
for i in range(len(dzVec)):
id = (pID-1)*(self.nnC)+i+1
self.nodMesh[id][2] += dzVec[i]
return "\nElevation correction interpolated."
def createMesh(self):
info = "\n\nChannel mesh:\n"
eID = 1
for pID in range(len(self.proMesh)-1):
pID += 1
for nID in range(len(self.proMesh[pID])-1):
a1 = self.proMesh[pID][nID]
a2 = self.proMesh[pID][nID+1]
b1 = self.proMesh[pID+1][nID]
b2 = self.proMesh[pID+1][nID+1]
d1 = np.linalg.norm(np.subtract(self.nodMesh[b1], self.nodMesh[a2]))
d2 = np.linalg.norm(np.subtract(self.nodMesh[b2], self.nodMesh[a1]))
if d1 < d2:
self.mesh[eID] = [a1, a2, b1]
eID += 1
self.mesh[eID] = [b1, a2, b2]
eID += 1
else:
self.mesh[eID] = [b1, a1, b2]
eID += 1
self.mesh[eID] = [b2, a1, a2]
eID += 1
info += " - Nodes:\t{0}\n".format(len(self.nodMesh))
info += " - Elements:\t{0}\n".format(eID-1)
return info
def getNodesRow(self, nodes, profiles, row):
nodesRow = {}
for i in range(len(profiles)):
pID = i+1
nodesRow[pID] = nodes[profiles[pID][row]]
return nodesRow
def getNodesLeft(self, nodes, profiles):
nodesLeft = {}
for i in range(len(profiles)):
pID = i+1
nodesLeft[pID] = nodes[profiles[pID][0]]
return nodesLeft
def getNodesRight(self, nodes, profiles):
nodesRight = {}
for i in range(len(profiles)):
pID = i+1
nodesRight[pID] = nodes[profiles[pID][-1]]
return nodesRight
def getNodeIDsLeft(self, profiles):
nodes = []
for pID in range(len(profiles)):
nodes.append(profiles[pID+1][0])
return nodes
def getNodeIDsRight(self, profiles):
nodes = []
for pID in range(len(profiles)):
nodes.append(profiles[pID+1][-1])
return nodes
def getNodesOutline(self, profiles):
nodes = []
nodes.extend(getNodeIDsLeft(profiles))
nodes.extend(profiles[len(profiles)][1:])
right = getNodeIDsRight(profiles)[:]
right.reverse()
nodes.extend(right[1:])
up = profiles[1][:]
up = up[::-1]
nodes.extend(up[1:])
return nodes
|
#!/usr/bin/env python3
# _*_coding:utf-8_*_
import hashlib
import tornado.web
from tornado import gen
from admin.handler.baseHandler import BaseHandler
class AdminLoginHandler(BaseHandler):
def get(self, *args, **kwargs):
self.render("admin/sys_login.html")
@gen.coroutine
def post(self, *args, **kwargs):
username = self.get_argument("username", default="")
password = self.get_argument("password", default="")
if username != "" and password != "":
salt = hashlib.md5(username.encode('utf-8')).hexdigest()
hash_password = hashlib.sha256((password + salt).encode('utf-8')).hexdigest()
query = {"username": username, "password": hash_password}
show = {"_id": 0, "username": 1, "password": 1}
user = yield self.db.sys_user.find_one(query, show)
if user:
self.set_secure_cookie("user", username)
self.redirect("/admin")
else:
self.redirect("/login")
else:
self.redirect("/login")
class AdminLogoutHandler(BaseHandler):
@tornado.web.authenticated
def get(self, *args, **kwargs):
self.set_secure_cookie("user", "")
self.redirect("/login")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# === tests.urls ----------------------------------------------------------===
# This file is part of django-pgpm. django-pgpm is copyright © 2012, RokuSigma
# Inc. and contributors. See AUTHORS and LICENSE for more details.
#
# django-pgpm is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# django-pgpm is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with django-pgpm. If not, see <http://www.gnu.org/licenses/>.
# ===----------------------------------------------------------------------===
# Import the default test urls provided by django_patterns.
from django_patterns.test.project.urls import *
# ===----------------------------------------------------------------------===
# End of File
# ===----------------------------------------------------------------------===
|
#!/usr/bin/python
#
# Urwid web (CGI/Asynchronous Javascript) display module
# Copyright (C) 2004-2007 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
"""
Urwid web application display module
"""
import os
import sys
import signal
import random
import select
import socket
import glob
from urwid import util
_js_code = r"""
// Urwid web (CGI/Asynchronous Javascript) display module
// Copyright (C) 2004-2005 Ian Ward
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
// Urwid web site: http://excess.org/urwid/
colours = new Object();
colours = {
'0': "black",
'1': "#c00000",
'2': "green",
'3': "#804000",
'4': "#0000c0",
'5': "#c000c0",
'6': "teal",
'7': "silver",
'8': "gray",
'9': "#ff6060",
'A': "lime",
'B': "yellow",
'C': "#8080ff",
'D': "#ff40ff",
'E': "aqua",
'F': "white"
};
keycodes = new Object();
keycodes = {
8: "backspace", 9: "tab", 13: "enter", 27: "esc",
33: "page up", 34: "page down", 35: "end", 36: "home",
37: "left", 38: "up", 39: "right", 40: "down",
45: "insert", 46: "delete",
112: "f1", 113: "f2", 114: "f3", 115: "f4",
116: "f5", 117: "f6", 118: "f7", 119: "f8",
120: "f9", 121: "f10", 122: "f11", 123: "f12"
};
var conn = null;
var char_width = null;
var char_height = null;
var screen_x = null;
var screen_y = null;
var urwid_id = null;
var send_conn = null;
var send_queue_max = 32;
var send_queue = new Array(send_queue_max);
var send_queue_in = 0;
var send_queue_out = 0;
var check_font_delay = 1000;
var send_more_delay = 100;
var poll_again_delay = 500;
var document_location = null;
var update_method = "multipart";
var sending = false;
var lastkeydown = null;
function setup_connection() {
if (window.XMLHttpRequest) {
conn = new XMLHttpRequest();
} else if (window.ActiveXObject) {
conn = new ActiveXObject("Microsoft.XMLHTTP");
}
if (conn == null) {
set_status("Connection Failed");
alert( "Can't figure out how to send request." );
return;
}
try{
conn.multipart = true;
}catch(e){
update_method = "polling";
}
conn.onreadystatechange = handle_recv;
conn.open("POST", document_location, true);
conn.setRequestHeader("X-Urwid-Method",update_method);
conn.setRequestHeader("Content-type","text/plain");
conn.send("window resize " +screen_x+" "+screen_y+"\n");
}
function do_poll() {
if (urwid_id == null){
alert("that's unpossible!");
return;
}
if (window.XMLHttpRequest) {
conn = new XMLHttpRequest();
} else if (window.ActiveXObject) {
conn = new ActiveXObject("Microsoft.XMLHTTP");
}
conn.onreadystatechange = handle_recv;
conn.open("POST", document_location, true);
conn.setRequestHeader("X-Urwid-Method","polling");
conn.setRequestHeader("X-Urwid-ID",urwid_id);
conn.setRequestHeader("Content-type","text/plain");
conn.send("eh?");
}
function handle_recv() {
if( ! conn ){ return;}
if( conn.readyState != 4) {
return;
}
if( conn.status == 404 && urwid_id != null) {
set_status("Connection Closed");
return;
}
if( conn.status == 403 && update_method == "polling" ) {
set_status("Server Refused Connection");
alert("This server does not allow polling clients.\n\n" +
"Please use a web browser with multipart support " +
"such as Mozilla Firefox");
return;
}
if( conn.status == 503 ) {
set_status("Connection Failed");
alert("The server has reached its maximum number of "+
"connections.\n\nPlease try again later.");
return;
}
if( conn.status != 200) {
set_status("Connection Failed");
alert("Error from server: "+conn.statusText);
return;
}
if( urwid_id == null ){
urwid_id = conn.getResponseHeader("X-Urwid-ID");
if( send_queue_in != send_queue_out ){
// keys waiting
do_send();
}
if(update_method=="polling"){
set_status("Polling");
}else if(update_method=="multipart"){
set_status("Connected");
}
}
if( conn.responseText == "" ){
if(update_method=="polling"){
poll_again();
}
return; // keepalive
}
if( conn.responseText == "Z" ){
set_status("Connection Closed");
update_method = null;
return;
}
var text = document.getElementById('text');
var last_screen = Array(text.childNodes.length);
for( var i=0; i<text.childNodes.length; i++ ){
last_screen[i] = text.childNodes[i];
}
var frags = conn.responseText.split("\n");
var ln = document.createElement('span');
var k = 0;
for( var i=0; i<frags.length; i++ ){
var f = frags[i];
if( f == "" ){
var br = document.getElementById('br').cloneNode(true);
ln.appendChild( br );
if( text.childNodes.length > k ){
text.replaceChild(ln, text.childNodes[k]);
}else{
text.appendChild(ln);
}
k = k+1;
ln = document.createElement('span');
}else if( f.charAt(0) == "<" ){
line_number = parseInt(f.substr(1));
if( line_number == k ){
k = k +1;
continue;
}
var clone = last_screen[line_number].cloneNode(true);
if( text.childNodes.length > k ){
text.replaceChild(clone, text.childNodes[k]);
}else{
text.appendChild(clone);
}
k = k+1;
}else{
var span=make_span(f.substr(2),f.charAt(0),f.charAt(1));
ln.appendChild( span );
}
}
for( var i=k; i < text.childNodes.length; i++ ){
text.removeChild(last_screen[i]);
}
if(update_method=="polling"){
poll_again();
}
}
function poll_again(){
if(conn.status == 200){
setTimeout("do_poll();",poll_again_delay);
}
}
function load_web_display(){
if( document.documentURI ){
document_location = document.documentURI;
}else{
document_location = document.location;
}
document.onkeypress = body_keypress;
document.onkeydown = body_keydown;
document.onresize = body_resize;
body_resize();
send_queue_out = send_queue_in; // don't queue the first resize
set_status("Connecting");
setup_connection();
setTimeout("check_fontsize();",check_font_delay);
}
function set_status( status ){
var s = document.getElementById('status');
var t = document.createTextNode(status);
s.replaceChild(t, s.firstChild);
}
function make_span(s, fg, bg){
d = document.createElement('span');
d.style.backgroundColor = colours[bg];
d.style.color = colours[fg];
d.appendChild(document.createTextNode(s));
return d;
}
function body_keydown(e){
if (conn == null){
return;
}
if (!e) var e = window.event;
if (e.keyCode) code = e.keyCode;
else if (e.which) code = e.which;
var mod = "";
var key;
if( e.ctrlKey ){ mod = "ctrl " + mod; }
if( e.altKey || e.metaKey ){ mod = "meta " + mod; }
if( e.shiftKey && e.charCode == 0 ){ mod = "shift " + mod; }
key = keycodes[code];
if( key != undefined ){
lastkeydown = key;
send_key( mod + key );
stop_key_event(e);
return false;
}
}
function body_keypress(e){
if (conn == null){
return;
}
if (!e) var e = window.event;
if (e.keyCode) code = e.keyCode;
else if (e.which) code = e.which;
var mod = "";
var key;
if( e.ctrlKey ){ mod = "ctrl " + mod; }
if( e.altKey || e.metaKey ){ mod = "meta " + mod; }
if( e.shiftKey && e.charCode == 0 ){ mod = "shift " + mod; }
if( e.charCode != null && e.charCode != 0 ){
key = String.fromCharCode(e.charCode);
}else if( e.charCode == null ){
key = String.fromCharCode(code);
}else{
key = keycodes[code];
if( key == undefined || lastkeydown == key ){
lastkeydown = null;
stop_key_event(e);
return false;
}
}
send_key( mod + key );
stop_key_event(e);
return false;
}
function stop_key_event(e){
e.cancelBubble = true;
if( e.stopPropagation ){
e.stopPropagation();
}
if( e.preventDefault ){
e.preventDefault();
}
}
function send_key( key ){
if( (send_queue_in+1)%send_queue_max == send_queue_out ){
// buffer overrun
return;
}
send_queue[send_queue_in] = key;
send_queue_in = (send_queue_in+1)%send_queue_max;
if( urwid_id != null ){
if (send_conn == undefined || send_conn.ready_state != 4 ){
send_more();
return;
}
do_send();
}
}
function do_send() {
if( ! urwid_id ){ return; }
if( ! update_method ){ return; } // connection closed
if( send_queue_in == send_queue_out ){ return; }
if( sending ){
//var queue_delta = send_queue_in - send_queue_out;
//if( queue_delta < 0 ){ queue_delta += send_queue_max; }
//set_status("Sending (queued "+queue_delta+")");
return;
}
try{
sending = true;
//set_status("starting send");
if( send_conn == null ){
if (window.XMLHttpRequest) {
send_conn = new XMLHttpRequest();
} else if (window.ActiveXObject) {
send_conn = new ActiveXObject("Microsoft.XMLHTTP");
}
}else if( send_conn.status != 200) {
alert("Error from server: "+send_conn.statusText);
return;
}else if(send_conn.readyState != 4 ){
alert("not ready on send connection");
return;
}
} catch(e) {
alert(e);
sending = false;
return;
}
send_conn.open("POST", document_location, true);
send_conn.onreadystatechange = send_handle_recv;
send_conn.setRequestHeader("Content-type","text/plain");
send_conn.setRequestHeader("X-Urwid-ID",urwid_id);
var tmp_send_queue_in = send_queue_in;
var out = null;
if( send_queue_out > tmp_send_queue_in ){
out = send_queue.slice(send_queue_out).join("\n")
if( tmp_send_queue_in > 0 ){
out += "\n" + send_queue.slice(0,tmp_send_queue_in).join("\n");
}
}else{
out = send_queue.slice(send_queue_out,
tmp_send_queue_in).join("\n");
}
send_queue_out = tmp_send_queue_in;
//set_status("Sending");
send_conn.send( out +"\n" );
}
function send_handle_recv() {
if( send_conn.readyState != 4) {
return;
}
if( send_conn.status == 404) {
set_status("Connection Closed");
update_method = null;
return;
}
if( send_conn.status != 200) {
alert("Error from server: "+send_conn.statusText);
return;
}
sending = false;
if( send_queue_out != send_queue_in ){
send_more();
}
}
function send_more(){
setTimeout("do_send();",send_more_delay);
}
function check_fontsize(){
body_resize()
setTimeout("check_fontsize();",check_font_delay);
}
function body_resize(){
var t = document.getElementById('testchar');
var t2 = document.getElementById('testchar2');
var text = document.getElementById('text');
var window_width;
var window_height;
if (window.innerHeight) {
window_width = window.innerWidth;
window_height = window.innerHeight;
}else{
window_width = document.documentElement.clientWidth;
window_height = document.documentElement.clientHeight;
//var z = "CI:"; for(var i in bod){z = z + " " + i;} alert(z);
}
char_width = t.offsetLeft / 44;
var avail_width = window_width-18;
var avail_width_mod = avail_width % char_width;
var x_size = (avail_width - avail_width_mod)/char_width;
char_height = t2.offsetTop - t.offsetTop;
var avail_height = window_height-text.offsetTop-10;
var avail_height_mod = avail_height % char_height;
var y_size = (avail_height - avail_height_mod)/char_height;
text.style.width = x_size*char_width+"px";
text.style.height = y_size*char_height+"px";
if( screen_x != x_size || screen_y != y_size ){
send_key("window resize "+x_size+" "+y_size);
}
screen_x = x_size;
screen_y = y_size;
}
"""
ALARM_DELAY = 60
POLL_CONNECT = 3
MAX_COLS = 200
MAX_ROWS = 100
MAX_READ = 4096
BUF_SZ = 16384
_code_colours = {
'black': "0",
'dark red': "1",
'dark green': "2",
'brown': "3",
'dark blue': "4",
'dark magenta': "5",
'dark cyan': "6",
'light gray': "7",
'dark gray': "8",
'light red': "9",
'light green': "A",
'yellow': "B",
'light blue': "C",
'light magenta': "D",
'light cyan': "E",
'white': "F",
}
# replace control characters with ?'s
_trans_table = "?" * 32 + "".join([chr(x) for x in range(32, 256)])
_css_style = """
body { margin: 8px 8px 8px 8px; border: 0;
color: black; background-color: silver;
font-family: fixed; overflow: hidden; }
form { margin: 0 0 8px 0; }
#text { position: relative;
background-color: silver;
width: 100%; height: 100%;
margin: 3px 0 0 0; border: 1px solid #999; }
#page { position: relative; width: 100%;height: 100%;}
"""
# HTML Initial Page
_html_page = [
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>Urwid Web Display - ""","""</title>
<style type="text/css">
""" + _css_style + """
</style>
</head>
<body id="body" onload="load_web_display()">
<div style="position:absolute; visibility:hidden;">
<br id="br"\>
<pre>The quick brown fox jumps over the lazy dog.<span id="testchar">X</span>
<span id="testchar2">Y</span></pre>
</div>
Urwid Web Display - <b>""","""</b> -
Status: <span id="status">Set up</span>
<script type="text/javascript">
//<![CDATA[
""" + _js_code +"""
//]]>
</script>
<pre id="text"></pre>
</body>
</html>
"""]
class Screen:
def __init__(self):
self.palette = {}
self.has_color = True
self._started = False
started = property(lambda self: self._started)
def register_palette( self, l ):
"""Register a list of palette entries.
l -- list of (name, foreground, background) or
(name, same_as_other_name) palette entries.
calls self.register_palette_entry for each item in l
"""
for item in l:
if len(item) in (3,4):
self.register_palette_entry( *item )
continue
assert len(item) == 2, "Invalid register_palette usage"
name, like_name = item
if not self.palette.has_key(like_name):
raise Exception("palette entry '%s' doesn't exist"%like_name)
self.palette[name] = self.palette[like_name]
def register_palette_entry( self, name, foreground, background,
mono=None):
"""Register a single palette entry.
name -- new entry/attribute name
foreground -- foreground colour
background -- background colour
mono -- monochrome terminal attribute
See curses_display.register_palette_entry for more info.
"""
if foreground == "default":
foreground = "black"
if background == "default":
background = "light gray"
self.palette[name] = (foreground, background, mono)
def set_mouse_tracking(self):
"""Not yet implemented"""
pass
def tty_signal_keys(self, *args, **vargs):
"""Do nothing."""
pass
def start(self):
"""
This function reads the initial screen size, generates a
unique id and handles cleanup when fn exits.
web_display.set_preferences(..) must be called before calling
this function for the preferences to take effect
"""
global _prefs
assert not self._started
client_init = sys.stdin.read(50)
assert client_init.startswith("window resize "),client_init
ignore1,ignore2,x,y = client_init.split(" ",3)
x = int(x)
y = int(y)
self._set_screen_size( x, y )
self.last_screen = {}
self.last_screen_width = 0
self.update_method = os.environ["HTTP_X_URWID_METHOD"]
assert self.update_method in ("multipart","polling")
if self.update_method == "polling" and not _prefs.allow_polling:
sys.stdout.write("Status: 403 Forbidden\r\n\r\n")
sys.exit(0)
clients = glob.glob(os.path.join(_prefs.pipe_dir,"urwid*.in"))
if len(clients) >= _prefs.max_clients:
sys.stdout.write("Status: 503 Sever Busy\r\n\r\n")
sys.exit(0)
urwid_id = "%09d%09d"%(random.randrange(10**9),
random.randrange(10**9))
self.pipe_name = os.path.join(_prefs.pipe_dir,"urwid"+urwid_id)
os.mkfifo(self.pipe_name+".in",0600)
signal.signal(signal.SIGTERM,self._cleanup_pipe)
self.input_fd = os.open(self.pipe_name+".in",
os.O_NONBLOCK | os.O_RDONLY)
self.input_tail = ""
self.content_head = ("Content-type: "
"multipart/x-mixed-replace;boundary=ZZ\r\n"
"X-Urwid-ID: "+urwid_id+"\r\n"
"\r\n\r\n"
"--ZZ\r\n")
if self.update_method=="polling":
self.content_head = (
"Content-type: text/plain\r\n"
"X-Urwid-ID: "+urwid_id+"\r\n"
"\r\n\r\n")
signal.signal(signal.SIGALRM,self._handle_alarm)
signal.alarm( ALARM_DELAY )
self._started = True
def stop(self):
"""
Restore settings and clean up.
"""
assert self._started
# XXX which exceptions does this actually raise? EnvironmentError?
try:
self._close_connection()
except Exception:
pass
signal.signal(signal.SIGTERM,signal.SIG_DFL)
self._cleanup_pipe()
self._started = False
def set_input_timeouts(self, *args):
pass
def run_wrapper(self,fn):
"""
Run the application main loop, calling start() first
and stop() on exit.
"""
try:
self.start()
return fn()
finally:
self.stop()
def _close_connection(self):
if self.update_method == "polling child":
self.server_socket.settimeout(0)
socket, addr = self.server_socket.accept()
socket.sendall("Z")
socket.close()
if self.update_method == "multipart":
sys.stdout.write("\r\nZ"
"\r\n--ZZ--\r\n")
sys.stdout.flush()
def _cleanup_pipe(self, *args):
if not self.pipe_name: return
# XXX which exceptions does this actually raise? EnvironmentError?
try:
os.remove(self.pipe_name+".in")
os.remove(self.pipe_name+".update")
except Exception:
pass
def _set_screen_size(self, cols, rows ):
"""Set the screen size (within max size)."""
if cols > MAX_COLS:
cols = MAX_COLS
if rows > MAX_ROWS:
rows = MAX_ROWS
self.screen_size = cols, rows
def draw_screen(self, (cols, rows), r ):
"""Send a screen update to the client."""
if cols != self.last_screen_width:
self.last_screen = {}
sendq = [self.content_head]
if self.update_method == "polling":
send = sendq.append
elif self.update_method == "polling child":
signal.alarm( 0 )
try:
s, addr = self.server_socket.accept()
except socket.timeout:
sys.exit(0)
send = s.sendall
else:
signal.alarm( 0 )
send = sendq.append
send("\r\n")
self.content_head = ""
assert r.rows() == rows
if r.cursor is not None:
cx, cy = r.cursor
else:
cx = cy = None
new_screen = {}
y = -1
for row in r.content():
y += 1
row = list(row)
l = []
sig = tuple(row)
if y == cy: sig = sig + (cx,)
new_screen[sig] = new_screen.get(sig,[]) + [y]
old_line_numbers = self.last_screen.get(sig, None)
if old_line_numbers is not None:
if y in old_line_numbers:
old_line = y
else:
old_line = old_line_numbers[0]
send( "<%d\n"%old_line )
continue
col = 0
for (a, cs, run) in row:
run = run.translate(_trans_table)
if a is None:
fg,bg,mono = "black", "light gray", None
else:
fg,bg,mono = self.palette[a]
if y == cy and col <= cx:
run_width = util.calc_width(run, 0,
len(run))
if col+run_width > cx:
l.append(code_span(run, fg, bg,
cx-col))
else:
l.append(code_span(run, fg, bg))
col += run_width
else:
l.append(code_span(run, fg, bg))
send("".join(l)+"\n")
self.last_screen = new_screen
self.last_screen_width = cols
if self.update_method == "polling":
sys.stdout.write("".join(sendq))
sys.stdout.flush()
sys.stdout.close()
self._fork_child()
elif self.update_method == "polling child":
s.close()
else: # update_method == "multipart"
send("\r\n--ZZ\r\n")
sys.stdout.write("".join(sendq))
sys.stdout.flush()
signal.alarm( ALARM_DELAY )
def clear(self):
"""
Force the screen to be completely repainted on the next
call to draw_screen().
(does nothing for web_display)
"""
pass
def _fork_child(self):
"""
Fork a child to run CGI disconnected for polling update method.
Force parent process to exit.
"""
daemonize( self.pipe_name +".err" )
self.input_fd = os.open(self.pipe_name+".in",
os.O_NONBLOCK | os.O_RDONLY)
self.update_method = "polling child"
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind( self.pipe_name+".update" )
s.listen(1)
s.settimeout(POLL_CONNECT)
self.server_socket = s
def _handle_alarm(self, sig, frame):
assert self.update_method in ("multipart","polling child")
if self.update_method == "polling child":
# send empty update
try:
s, addr = self.server_socket.accept()
s.close()
except socket.timeout:
sys.exit(0)
else:
# send empty update
sys.stdout.write("\r\n\r\n--ZZ\r\n")
sys.stdout.flush()
signal.alarm( ALARM_DELAY )
def get_cols_rows(self):
"""Return the screen size."""
return self.screen_size
def get_input(self, raw_keys=False):
"""Return pending input as a list."""
l = []
resized = False
try:
iready,oready,eready = select.select(
[self.input_fd],[],[],0.5)
except select.error, e:
# return on interruptions
if e.args[0] == 4:
if raw_keys:
return [],[]
return []
raise
if not iready:
if raw_keys:
return [],[]
return []
keydata = os.read(self.input_fd, MAX_READ)
os.close(self.input_fd)
self.input_fd = os.open(self.pipe_name+".in",
os.O_NONBLOCK | os.O_RDONLY)
#sys.stderr.write( repr((keydata,self.input_tail))+"\n" )
keys = keydata.split("\n")
keys[0] = self.input_tail + keys[0]
self.input_tail = keys[-1]
for k in keys[:-1]:
if k.startswith("window resize "):
ign1,ign2,x,y = k.split(" ",3)
x = int(x)
y = int(y)
self._set_screen_size(x, y)
resized = True
else:
l.append(k)
if resized:
l.append("window resize")
if raw_keys:
return l, []
return l
def code_span( s, fg, bg, cursor = -1):
code_fg = _code_colours[ fg ]
code_bg = _code_colours[ bg ]
if cursor >= 0:
c_off, _ign = util.calc_text_pos(s, 0, len(s), cursor)
c2_off = util.move_next_char(s, c_off, len(s))
return ( code_fg + code_bg + s[:c_off] + "\n" +
code_bg + code_fg + s[c_off:c2_off] + "\n" +
code_fg + code_bg + s[c2_off:] + "\n")
else:
return code_fg + code_bg + s + "\n"
def html_escape(text):
"""Escape text so that it will be displayed safely within HTML"""
text = text.replace('&','&')
text = text.replace('<','<')
text = text.replace('>','>')
return text
def is_web_request():
"""
Return True if this is a CGI web request.
"""
return os.environ.has_key('REQUEST_METHOD')
def handle_short_request():
"""
Handle short requests such as passing keystrokes to the application
or sending the initial html page. If returns True, then this
function recognised and handled a short request, and the calling
script should immediately exit.
web_display.set_preferences(..) should be called before calling this
function for the preferences to take effect
"""
global _prefs
if not is_web_request():
return False
if os.environ['REQUEST_METHOD'] == "GET":
# Initial request, send the HTML and javascript.
sys.stdout.write("Content-type: text/html\r\n\r\n" +
html_escape(_prefs.app_name).join(_html_page))
return True
if os.environ['REQUEST_METHOD'] != "POST":
# Don't know what to do with head requests etc.
return False
if not os.environ.has_key('HTTP_X_URWID_ID'):
# If no urwid id, then the application should be started.
return False
urwid_id = os.environ['HTTP_X_URWID_ID']
if len(urwid_id)>20:
#invalid. handle by ignoring
#assert 0, "urwid id too long!"
sys.stdout.write("Status: 414 URI Too Long\r\n\r\n")
return True
for c in urwid_id:
if c not in "0123456789":
# invald. handle by ignoring
#assert 0, "invalid chars in id!"
sys.stdout.write("Status: 403 Forbidden\r\n\r\n")
return True
if os.environ.get('HTTP_X_URWID_METHOD',None) == "polling":
# this is a screen update request
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.connect( os.path.join(_prefs.pipe_dir,
"urwid"+urwid_id+".update") )
data = "Content-type: text/plain\r\n\r\n"+s.recv(BUF_SZ)
while data:
sys.stdout.write(data)
data = s.recv(BUF_SZ)
return True
except socket.error:
sys.stdout.write("Status: 404 Not Found\r\n\r\n")
return True
# this is a keyboard input request
try:
fd = os.open((os.path.join(_prefs.pipe_dir,
"urwid"+urwid_id+".in")), os.O_WRONLY)
except OSError:
sys.stdout.write("Status: 404 Not Found\r\n\r\n")
return True
# FIXME: use the correct encoding based on the request
keydata = sys.stdin.read(MAX_READ)
os.write(fd,keydata.encode('ascii'))
os.close(fd)
sys.stdout.write("Content-type: text/plain\r\n\r\n")
return True
class _Preferences:
app_name = "Unnamed Application"
pipe_dir = "/tmp"
allow_polling = True
max_clients = 20
_prefs = _Preferences()
def set_preferences( app_name, pipe_dir="/tmp", allow_polling=True,
max_clients=20 ):
"""
Set web_display preferences.
app_name -- application name to appear in html interface
pipe_dir -- directory for input pipes, daemon update sockets
and daemon error logs
allow_polling -- allow creation of daemon processes for
browsers without multipart support
max_clients -- maximum concurrent client connections. This
pool is shared by all urwid applications
using the same pipe_dir
"""
global _prefs
_prefs.app_name = app_name
_prefs.pipe_dir = pipe_dir
_prefs.allow_polling = allow_polling
_prefs.max_clients = max_clients
class ErrorLog:
def __init__(self, errfile ):
self.errfile = errfile
def write(self, err):
open(self.errfile,"a").write(err)
def daemonize( errfile ):
"""
Detach process and become a daemon.
"""
pid = os.fork()
if pid:
os._exit(0)
os.setsid()
signal.signal(signal.SIGHUP, signal.SIG_IGN)
os.umask(0)
pid = os.fork()
if pid:
os._exit(0)
os.chdir("/")
for fd in range(0,20):
try:
os.close(fd)
except OSError:
pass
sys.stdin = open("/dev/null","r")
sys.stdout = open("/dev/null","w")
sys.stderr = ErrorLog( errfile )
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationGatewayBackendHealthHttpSettings(Model):
"""Application gateway BackendHealthHttp settings.
:param backend_http_settings: Reference of an
ApplicationGatewayBackendHttpSettings resource.
:type backend_http_settings:
~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayBackendHttpSettings
:param servers: List of ApplicationGatewayBackendHealthServer resources.
:type servers:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayBackendHealthServer]
"""
_attribute_map = {
'backend_http_settings': {'key': 'backendHttpSettings', 'type': 'ApplicationGatewayBackendHttpSettings'},
'servers': {'key': 'servers', 'type': '[ApplicationGatewayBackendHealthServer]'},
}
def __init__(self, backend_http_settings=None, servers=None):
super(ApplicationGatewayBackendHealthHttpSettings, self).__init__()
self.backend_http_settings = backend_http_settings
self.servers = servers
|
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
""" Simplify linking to Bokeh Github resources.
This module provides roles that can be used to easily reference information from
various sources in the Bokeh project structure:
``:bokeh-commit:`` : link to a specific commit
``:bokeh-issue:`` : link to an issue
``:bokeh-minpy:`` : provide the minimum supported Python version
``:bokeh-pull:`` : link to a pull request
``:bokeh-requires:`` : list the install requires from setup.py
``:bokeh-tree:`` : (versioned) link to a source tree URL
Examples
--------
The following code:
.. code-block:: rest
The repo history shows that :bokeh-commit:`bf19bcb` was made in
in :bokeh-pull:`1698`, which closed :bokeh-issue:`1694`. This included
updating all of the files in the :bokeh-tree:`examples` subdirectory.
yields the output:
The repo history shows that :bokeh-commit:`bf19bcb` was made in
in :bokeh-pull:`1698`,which closed :bokeh-issue:`1694`. This included
updating all of the files in the :bokeh-tree:`examples` subdirectory.
"""
# -----------------------------------------------------------------------------
# Boilerplate
# -----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
# Standard library imports
import importlib.machinery
import os
import types
from os.path import abspath, join, pardir
# External imports
from docutils import nodes, utils
from docutils.parsers.rst.roles import set_classes
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
__all__ = (
"bokeh_commit",
"bokeh_issue",
"bokeh_minpy",
"bokeh_pull",
"bokeh_requires",
"bokeh_tree",
"setup",
)
BOKEH_GH = "https://github.com/bokeh/bokeh"
# need REPO top (i.e. one up from where sphinx *runs*)
TOP_PATH = abspath(join(os.curdir, pardir))
# -----------------------------------------------------------------------------
# General API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Dev API
# -----------------------------------------------------------------------------
def bokeh_commit(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Link to a Bokeh Github issue.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
"""
app = inliner.document.settings.env.app
node = _make_gh_link_node(app, rawtext, "commit", "commit ", "commit", text, options)
return [node], []
def bokeh_issue(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Link to a Bokeh Github issue.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
"""
app = inliner.document.settings.env.app
try:
issue_num = int(text)
if issue_num <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(f"Github issue number must be a number greater than or equal to 1; {text!r} is invalid.", line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = _make_gh_link_node(app, rawtext, "issue", "#", "issues", str(issue_num), options)
return [node], []
def bokeh_minpy(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Provide the minimum supported Python version from setup.py.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
"""
loader = importlib.machinery.SourceFileLoader("setup", join(TOP_PATH, "_setup_support.py"))
setup = types.ModuleType(loader.name)
loader.exec_module(setup)
node = nodes.Text(".".join(str(x) for x in setup.MIN_PYTHON_VERSION))
return [node], []
def bokeh_pull(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Link to a Bokeh Github issue.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
"""
app = inliner.document.settings.env.app
try:
issue_num = int(text)
if issue_num <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(f"Github pull request number must be a number greater than or equal to 1; {text!r} is invalid.", line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = _make_gh_link_node(app, rawtext, "pull", "pull request ", "pull", str(issue_num), options)
return [node], []
def bokeh_requires(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Provide the minimum required Python version from setup.py.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
"""
loader = importlib.machinery.SourceFileLoader("setup", join(TOP_PATH, "_setup_support.py"))
setup = types.ModuleType(loader.name)
loader.exec_module(setup)
node = nodes.bullet_list()
for dep in setup.INSTALL_REQUIRES:
node += nodes.list_item("", nodes.Text(dep))
return [node], []
def bokeh_tree(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Link to a URL in the Bokeh GitHub tree, pointing to appropriate tags
for releases, or to main otherwise.
The link text is simply the URL path supplied, so typical usage might
look like:
.. code-block:: none
All of the examples are located in the :bokeh-tree:`examples`
subdirectory of your Bokeh checkout.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
"""
app = inliner.document.settings.env.app
tag = app.env.config["version"]
if "-" in tag:
tag = "main"
url = f"{BOKEH_GH}/tree/{tag}/{text}"
options = options or {}
set_classes(options)
node = nodes.reference(rawtext, text, refuri=url, **options)
return [node], []
def setup(app):
""" Required Sphinx extension setup function. """
app.add_role("bokeh-commit", bokeh_commit)
app.add_role("bokeh-issue", bokeh_issue)
app.add_role("bokeh-minpy", bokeh_minpy)
app.add_role("bokeh-pull", bokeh_pull)
app.add_role("bokeh-requires", bokeh_requires)
app.add_role("bokeh-tree", bokeh_tree)
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
def _make_gh_link_node(app, rawtext, role, kind, api_type, id, options=None):
"""Return a link to a Bokeh Github resource.
Args:
app (Sphinx app) : current app
rawtext (str) : text being replaced with link node.
role (str) : role name
kind (str) : resource type (issue, pull, etc.)
api_type (str) : type for api link
id : (str) : id of the resource to link to
options (dict) : options dictionary passed to role function
"""
url = f"{BOKEH_GH}/{api_type}/{id}"
options = options or {}
set_classes(options)
node = nodes.reference(rawtext, f"{kind}{utils.unescape(id)}", refuri=url, **options)
return node
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
|
#!/usr/bin/env python2
# This library implements the Bluetooth protocol
# for the Mega Electronics Ltd. Faros device.
#
# Copyright 2014
# Andreas Henelius <[email protected]>
# Finnish Institute of Occupational Health
#
# This code is released under the MIT License
# http://opensource.org/licenses/mit-license.php
#
# Please see the file LICENSE for details.
import serial
from construct import *
def get_firmware(ser):
ser.write(bytes("\\wbainf\r"))
res = ser.read(9).decode("UTF-8").strip()
return(res)
def get_firmware_build_date(ser):
ser.write(bytes("\\wbaind\r"))
res = ser.read(9).decode("UTF-8").strip()
return(res)
def set_ecg_fs(ser, fs):
fs_map = {1000: "\\wbafs1\r",
500: "\\wbafs2\r",
250: "\\wbafs4\r",
125: "\\wbafs8\r",
100: "\\wbafst\r"}
ser.write(bytes(fs_map[int(fs)]))
def set_acc_fs(ser, fs):
fs_map = {100: "\\wbaas1\r",
50: "\\wbaas2\r",
40: "\\wbaas3\r",
25: "\\wbafs4\r",
20: "\\wbaast\r"}
ser.write(bytes(fs_map[int(fs)]))
def set_ecg_res(ser, res):
res_map = {"0.20": "\\wbasg0\r",
"1.00": "\\wbasg1\r"}
ser.write(bytes(res_map[str(res)]))
def set_acc_res(ser, res):
res_map = {"0.25": "\\wbaar0\r",
"1.00": "\\wbaar1\r"}
ser.write(bytes(res_map[str(res)]))
def set_ecg_hpf(ser, f):
res_map = {1: "\\wbash0\r",
10: "\\wbash1\r"}
ser.write(bytes(res_map[int(f)]))
def start_measurement(ser, data_format):
ser.write(bytes("\\wbaom" + str(data_format) + "\r"))
res = ser.read(7).decode("UTF-8").strip()
return(res)
def stop_measurement(ser, state):
if state == "idle":
command = "\\wbaoms\r"
if state == "power_off":
command = "\\wbaom0\r"
for i in range(6):
try:
ser.write(bytes(command))
ser.flushInput()
res = ser.read(6).decode("UTF-8").strip()
if (res == "wbaack"):
break
except UnicodeDecodeError:
pass
def connect(port):
return serial.Serial(port)
def get_packet_size(fs_ecg, fs_acc):
k = str(fs_ecg) + "_" + str(fs_acc)
packet_size_map = {"1000_40" : 70,
"500_100" : 94,
"500_20" : 70,
"250_100" : 124,
"250_50" : 94,
"250_20" : 76,
"125_100" : 184,
"125_50" : 124,
"125_25" : 94,
"125_20" : 88,
"100_100" : 214,
"100_20" : 94}
return(packet_size_map[k])
def get_packet_format(packetsize):
N = (packetsize - 64) / 2
packet_format = Struct('packet_format',
Bytes('start_1', 1),
Bytes('start_2', 1),
ULInt8('protocol_version_major'),
ULInt8('protocol_version_minor'),
BitStruct("packet_number",
BitField("pn", 24, swapped = True)),
BitStruct("flag",
BitField("battery_2", 1),
BitField("battery_1", 1),
BitField("rr_error", 1),
BitField("dummy_3", 1),
BitField("adaptation_failed", 1),
BitField("dummy_2", 1),
BitField("dummy_1", 1),
BitField("rr_in_packet", 1)),
Array(25, SLInt16("ECG")),
UBInt16("rr"),
UBInt32("rr_absolute"),
Array(N, SLInt16("acc")))
return packet_format
|
"""Figure extractor and label generator
Read a single PDF file and write the extracted data and labels
to a directory with the following structure:
/json
- filename_figno.json
/img
- filename_figno.png
- filename_figno_2x.png (200 DPI)
- filename_figno_3x.png (300 DPI)
- filename_figno_4x.png (400 DPI)
/text-masked
- filename_figno_box.png
- filename_figno_mask.png
Usage:
label_gen.py read-s3 S3-IN-BUCKET S3-FILE S3-OUT-BUCKET S3-PATH [--use-ramdisk] [--debug] [--dbg-image]
label_gen.py read FILE PATH [--debug] [--dbg-image]
label_gen.py (-h | --help)
label_gen.py --version
Options:
--use-ramdisk Store temporary files in /tmp/ram/.
--debug Write debug output.
--dbg-image Create a debug label.
-h --help Show this screen.
--version Show version.
"""
import tempfile
import shutil
import subprocess
import os
import json
import logging
from docopt import docopt
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import config
import render
import label_image
DEBUG = False
def create_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def run_local(pdf_file, path, debug_image, flat):
filepath = os.path.abspath(pdf_file)
outpath = os.path.abspath(path)
ident = os.path.splitext(os.path.basename(pdf_file))[0]
if flat:
# cheaper because we don't need separate directories
json_path = outpath
img_path = outpath
label_path = outpath
else:
json_path = os.path.join(outpath, 'json')
img_path = os.path.join(outpath, 'img')
label_path = os.path.join(outpath, 'text-masked')
# create directories, if needed
create_dir(json_path)
create_dir(img_path)
create_dir(label_path)
outident_json = os.path.join(json_path, ident)
# generate the json for figures
logging.debug('Run pdffigures {}'.format(filepath))
DEVNULL = open(os.devnull, 'w')
subprocess.call(['pdffigures/pdffigures', '-j',
outident_json, filepath], stdout=DEVNULL, stderr=DEVNULL)
json_files = []
img_files = []
label_files = []
logging.debug("Finished. Now look for the JSON and generate labels.")
# pdffigures now generates only a singe JSON file, we need one file per figure
# https://github.com/allenai/pdffigures/commit/8ffcaceab3fdc97ec489c58e87191b7e12c0134a
json_file = '{}.json'.format(outident_json)
if os.path.isfile(json_file):
with open(json_file) as fh:
figures = json.load(fh)
logging.debug('Found {} figures'.format(len(figures)))
for index, figure in enumerate(figures):
chart_json = '{}-Figure-{}.json'.format(outident_json, index)
json_files.append(chart_json)
with open(chart_json, 'w') as jfh:
json.dump(figure, jfh)
def image_path(factor):
ext = '' if factor == 1 else '-{}x'.format(factor)
name = '{}-Figure-{}{}.png'.format(ident, index, ext)
return os.path.join(img_path, name)
# render image with different resolutions
for factor in [1, 2]:
image_file = image_path(factor)
logging.debug('Render image {} from {}'.format(
image_file, filepath))
render.render_chart(filepath, figure['Page']-1,
figure['ImageBB'],
int(factor*100), image_file)
img_files.append(image_file)
# labeled image
output = os.path.join(
label_path, '{}-Figure-{}-label.png'.format(
ident, index, factor))
dbg_output = None
if debug_image:
dbg_output = os.path.join(
label_path, '{}-Figure-{}-dbg.png'.format(
ident, index, factor))
logging.debug('generate label {}'.format(output))
if label_image.gen_labeled_image(
figure, image_path(1), output, dbg_output, DEBUG):
# yes, a labeled file was generated
label_files.append(output)
if dbg_output:
label_files.append(dbg_output)
# remove the one json file with data for all figures
os.remove(json_file)
return json_files, img_files, label_files
def run_s3(in_bucket_name, filename, out_bucket_name, path, ramtemp, debug_image):
conn = S3Connection(config.access_key, config.secret_key, is_secure=False)
in_bucket = conn.get_bucket(in_bucket_name)
out_bucket = conn.get_bucket(out_bucket_name)
dirpath = tempfile.mkdtemp(dir='/tmp/ram/' if ramtemp else None)
logging.debug('Temp directory in {}'.format(dirpath))
try:
# copy into temp
key = Key(in_bucket, filename)
target = os.path.join(dirpath, os.path.basename(filename))
key.get_contents_to_filename(target)
# run algos
files = run_local(target, dirpath, debug_image, True)
# write files back to s3
for f in files[0]:
key = Key(out_bucket, os.path.join(path, 'json', os.path.basename(f)))
key.set_contents_from_filename(f)
for f in files[1]:
key = Key(out_bucket, os.path.join(path, 'img', os.path.basename(f)))
key.set_contents_from_filename(f)
for f in files[2]:
key = Key(out_bucket, os.path.join(
path, 'text-masked', os.path.basename(f)))
key.set_contents_from_filename(f)
finally:
shutil.rmtree(dirpath)
if __name__ == '__main__':
arguments = docopt(__doc__, version='Extractor 1.0')
if arguments['--debug']:
DEBUG = True
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("boto").setLevel(logging.WARNING)
if arguments['read-s3']:
run_s3(arguments['S3-IN-BUCKET'], arguments['S3-FILE'],
arguments['S3-OUT-BUCKET'], arguments['S3-PATH'],
arguments['--use-ramdisk'], arguments['--dbg-image'])
elif arguments['read']:
run_local(arguments['FILE'], arguments['PATH'],
arguments['--dbg-image'], False)
|
# Wasp: Discrete Design with Grasshopper plug-in (GPL) initiated by Andrea Rossi
#
# This file is part of Wasp.
#
# Copyright (c) 2017, Andrea Rossi <[email protected]>
# Wasp is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Wasp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Wasp; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0 <https://www.gnu.org/licenses/gpl.html>
#
# Significant parts of Wasp have been developed by Andrea Rossi
# as part of research on digital materials and discrete design at:
# DDU Digital Design Unit - Prof. Oliver Tessmann
# Technische Universitt Darmstadt
#########################################################################
## COMPONENT INFO ##
#########################################################################
"""
Extract information from a Connection.
-
Provided by Wasp 0.5
Args:
CONN: Connection to deconstruct
Returns:
PLN: Connection plane
ID: Connection ID
PART: Part to which the connection belongs to
T: Connection type
"""
ghenv.Component.Name = "Wasp_Deconstruct Connection"
ghenv.Component.NickName = 'DeConn'
ghenv.Component.Message = 'v0.5.004'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "Wasp"
ghenv.Component.SubCategory = "1 | Elements"
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import sys
import Rhino.Geometry as rg
import Grasshopper as gh
## add Wasp install directory to system path
wasp_loaded = False
ghcompfolder = gh.Folders.DefaultAssemblyFolder
if ghcompfolder not in sys.path:
sys.path.append(ghcompfolder)
try:
from wasp import __version__
wasp_loaded = True
except:
msg = "Cannot import Wasp. Is the wasp folder available in " + ghcompfolder + "?"
ghenv.Component.AddRuntimeMessage(gh.Kernel.GH_RuntimeMessageLevel.Error, msg)
## if Wasp is installed correctly, load the classes required by the component
if wasp_loaded:
from wasp.core import Connection
def main(connection):
check_data = True
##check inputs
if connection is None:
check_data = False
msg = "No connection provided"
ghenv.Component.AddRuntimeMessage(gh.Kernel.GH_RuntimeMessageLevel.Warning, msg)
if check_data:
return connection.pln, connection.id, connection.part, connection.type
else:
return -1
result = main(CONN)
if result != -1:
PLN = result[0]
ID = result[1]
PART = result[2]
T = result[3]
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2017, Ilya Etingof <[email protected]>
# License: http://pysnmp.sf.net/license.html
#
from bisect import bisect
class OrderedDict(dict):
"""Ordered dictionary used for indices"""
def __init__(self, **kwargs):
self.__keys = []
self.__dirty = True
super(OrderedDict, self).__init__()
if kwargs:
self.update(kwargs)
def __setitem__(self, key, value):
if key not in self:
self.__keys.append(key)
super(OrderedDict, self).__setitem__(key, value)
self.__dirty = True
def __repr__(self):
if self.__dirty:
self.__order()
return super(OrderedDict, self).__repr__()
def __str__(self):
if self.__dirty:
self.__order()
return super(OrderedDict, self).__str__()
def __delitem__(self, key):
if super(OrderedDict, self).__contains__(key):
self.__keys.remove(key)
super(OrderedDict, self).__delitem__(key)
self.__dirty = True
__delattr__ = __delitem__
def clear(self):
super(OrderedDict, self).clear()
self.__keys = []
self.__dirty = True
def keys(self):
if self.__dirty:
self.__order()
return list(self.__keys)
def values(self):
if self.__dirty:
self.__order()
return [self[k] for k in self.__keys]
def items(self):
if self.__dirty:
self.__order()
return [(k, self[k]) for k in self.__keys]
def update(self, d):
[self.__setitem__(k, v) for k, v in d.items()]
def sortingFun(self, keys):
keys.sort()
def __order(self):
self.sortingFun(self.__keys)
d = {}
for k in self.__keys:
d[len(k)] = 1
l = list(d.keys())
l.sort(reverse=True)
self.__keysLens = tuple(l)
self.__dirty = False
def nextKey(self, key):
keys = list(self.keys())
if key in self:
nextIdx = keys.index(key) + 1
else:
nextIdx = bisect(keys, key)
if nextIdx < len(keys):
return keys[nextIdx]
else:
raise KeyError(key)
def getKeysLens(self):
if self.__dirty:
self.__order()
return self.__keysLens
class OidOrderedDict(OrderedDict):
"""OID-ordered dictionary used for indices"""
def __init__(self, **kwargs):
self.__keysCache = {}
OrderedDict.__init__(self, **kwargs)
def __setitem__(self, key, value):
if key not in self.__keysCache:
if isinstance(key, tuple):
self.__keysCache[key] = key
else:
self.__keysCache[key] = [int(x) for x in key.split('.') if x]
OrderedDict.__setitem__(self, key, value)
def __delitem__(self, key):
if key in self.__keysCache:
del self.__keysCache[key]
OrderedDict.__delitem__(self, key)
__delattr__ = __delitem__
def sortingFun(self, keys):
keys.sort(key=lambda k, d=self.__keysCache: d[k])
|
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.http import HttpResponse, HttpResponseForbidden
from django.contrib.auth.decorators import login_required, user_passes_test
from django.views.generic.base import View
from revuo.models import NewsItem, BlogItem, Publication
from .utils import editor_test
import json
class ItemPublish(View):
categories = {'N': NewsItem, 'B': BlogItem, 'P': Publication}
@method_decorator(login_required)
@method_decorator(user_passes_test(editor_test))
def get(self, request, category, item_id):
if request.is_ajax():
item_class = self.categories[category]
item = get_object_or_404(item_class, id=int(item_id), deleted=False)
item.authorize()
item.save()
result = {'msg': 'Item Published'}
else:
return HttpResponseForbidden("FORBIDDEN")
return HttpResponse(json.dumps(result), content_type='application/json')
|
#!/usr/bin/env python
__all__ = ['yixia_miaopai_download']
from ..common import *
#----------------------------------------------------------------------
def yixia_miaopai_download_by_scid(scid, output_dir = '.', merge = True, info_only = False):
""""""
headers = {
'User-Agent': 'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Cache-Control': 'max-age=0',
}
html = get_content('http://m.miaopai.com/show/channel/' + scid, headers)
title = match1(html, r'<title>(\w+)')
video_url = match1(html, r'<div class="vid_img" data-url=\'(.+)\'')
type, ext, size = url_info(video_url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([video_url], title, ext, size, output_dir, merge=merge)
#----------------------------------------------------------------------
def yixia_miaopai_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
"""wrapper"""
if re.match(r'http://www.miaopai.com/show/channel/\w+', url):
scid = match1(url, r'http://www.miaopai.com/show/channel/(\w+)')
elif re.match(r'http://www.miaopai.com/show/\w+', url):
scid = match1(url, r'http://www.miaopai.com/show/(\w+)')
elif re.match(r'http://m.miaopai.com/show/channel/\w+', url):
scid = match1(url, r'http://m.miaopai.com/show/channel/(\w+)')
else:
pass
yixia_miaopai_download_by_scid(scid, output_dir, merge, info_only)
site_info = "Yixia MiaoPai"
download = yixia_miaopai_download
download_playlist = playlist_not_supported('yixia_miaopai')
|
#!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#graphml_headers.py
# Created by Disa Mhembere on 2015-09-10.
# Email: [email protected]
import argparse
import re
class Container(object):
""" This is a shell for an igraph cotainer object """
def __init__(self, attrs=[]):
self.attrs = attrs
def attribute_names(self,):
return self.attrs
def __repr__(self):
return "{0}".format(self.attrs)
class VertexContainer(Container):
pass
class EdgeContainer(Container):
pass
class GraphContainer(object):
""" This is a shell for an igraph graph object """
def __init__(self, g_attrs={}, v_attrs=VertexContainer(), e_attrs=EdgeContainer()):
self.attrs = g_attrs
self.vs = v_attrs
self.es = e_attrs
def __getitem__(self, var):
return self.attrs.__getitem__(var)
def vcount(self,):
return self.attrs["vcount"]
def ecount(self,):
return self.attrs["ecount"]
def attributes(self,):
return self.attrs.keys()
def __repr__(self,):
return "\nGraph Container:\nGraph: {0}\nVertex: {1}\nEdges: {2}".\
format(self.attrs, self.vs, self.es)
def read_graphml_headers(fn):
f = open(fn, "rb")
g_attrs = {}
e_attrs = []
v_attrs = []
g_key_patt = re.compile("g_\w*")
while True:
line = f.readline().strip()
if line.startswith("<node"):
break # No more metadata
elif line.startswith("<key"):
attr = line.split("\"")[1]
if attr.startswith("v_"):
v_attrs.append(attr[2:])
elif attr.startswith("e_"):
e_attrs.append(attr[2:])
elif line.startswith("<data"): # These are graph attributes
lsplit = line.split(">")
m = re.search(g_key_patt, lsplit[0])
key = m.string[m.span()[0]:m.span()[1]][2:] # skip the `g_`
g_attrs[key] = lsplit[1].split("<")[0]
# Fail on graphs without these attrs
if not g_attrs.has_key("vcount") and not g_attrs.has_key("ecount"):
raise AttributeError("Expected graph attribures vcount & ecount")
return GraphContainer(g_attrs, VertexContainer(v_attrs), EdgeContainer(e_attrs))
def test():
parser = argparse.ArgumentParser(description="Partial read of a graphml graph for attrs")
parser.add_argument("graphfn", action="store", help="The graph filename")
result = parser.parse_args()
g = read_graphml_headers(result.graphfn)
print g
if __name__ == "__main__":
test()
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
###########################################################
# © 2011 Daniel 'grindhold' Brendle and Team
#
# This file is part of Skarphed.
#
# Skarphed is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# Skarphed is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skarphed.
# If not, see http://www.gnu.org/licenses/.
###########################################################
import pygtk
pygtk.require("2.0")
import gtk
import os
class ModulePage(gtk.VBox):
def __init__(self, parent, module):
self.par = parent
gtk.VBox.__init__(self)
self.moduleId = module.getLocalId()
path = os.path.realpath(__file__)
path = path.replace("module.pyc","")
self._path = path.replace("module.py","")
self.builder = gtk.Builder()
self.builder.add_from_file(self._path+"module.glade")
self.content = self.builder.get_object("module")
self.add(self.content)
def render(self):
pass
def getPar(self):
return self.par
def getApplication(self):
return self.par.getApplication()
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import binascii
import os
import pytest
from cryptography.hazmat.primitives.ciphers import algorithms, modes
from .utils import generate_aead_test, generate_encrypt_test
from ...utils import load_nist_vectors
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.CBC("\x00" * 16)
),
skip_message="Does not support AES CBC",
)
@pytest.mark.cipher
class TestAESModeCBC(object):
test_CBC = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "CBC"),
[
"CBCGFSbox128.rsp",
"CBCGFSbox192.rsp",
"CBCGFSbox256.rsp",
"CBCKeySbox128.rsp",
"CBCKeySbox192.rsp",
"CBCKeySbox256.rsp",
"CBCVarKey128.rsp",
"CBCVarKey192.rsp",
"CBCVarKey256.rsp",
"CBCVarTxt128.rsp",
"CBCVarTxt192.rsp",
"CBCVarTxt256.rsp",
"CBCMMT128.rsp",
"CBCMMT192.rsp",
"CBCMMT256.rsp",
],
lambda key, **kwargs: algorithms.AES(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.ECB()
),
skip_message="Does not support AES ECB",
)
@pytest.mark.cipher
class TestAESModeECB(object):
test_ECB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "ECB"),
[
"ECBGFSbox128.rsp",
"ECBGFSbox192.rsp",
"ECBGFSbox256.rsp",
"ECBKeySbox128.rsp",
"ECBKeySbox192.rsp",
"ECBKeySbox256.rsp",
"ECBVarKey128.rsp",
"ECBVarKey192.rsp",
"ECBVarKey256.rsp",
"ECBVarTxt128.rsp",
"ECBVarTxt192.rsp",
"ECBVarTxt256.rsp",
"ECBMMT128.rsp",
"ECBMMT192.rsp",
"ECBMMT256.rsp",
],
lambda key, **kwargs: algorithms.AES(binascii.unhexlify(key)),
lambda **kwargs: modes.ECB(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.OFB("\x00" * 16)
),
skip_message="Does not support AES OFB",
)
@pytest.mark.cipher
class TestAESModeOFB(object):
test_OFB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "OFB"),
[
"OFBGFSbox128.rsp",
"OFBGFSbox192.rsp",
"OFBGFSbox256.rsp",
"OFBKeySbox128.rsp",
"OFBKeySbox192.rsp",
"OFBKeySbox256.rsp",
"OFBVarKey128.rsp",
"OFBVarKey192.rsp",
"OFBVarKey256.rsp",
"OFBVarTxt128.rsp",
"OFBVarTxt192.rsp",
"OFBVarTxt256.rsp",
"OFBMMT128.rsp",
"OFBMMT192.rsp",
"OFBMMT256.rsp",
],
lambda key, **kwargs: algorithms.AES(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.OFB(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.CFB("\x00" * 16)
),
skip_message="Does not support AES CFB",
)
@pytest.mark.cipher
class TestAESModeCFB(object):
test_CFB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "CFB"),
[
"CFB128GFSbox128.rsp",
"CFB128GFSbox192.rsp",
"CFB128GFSbox256.rsp",
"CFB128KeySbox128.rsp",
"CFB128KeySbox192.rsp",
"CFB128KeySbox256.rsp",
"CFB128VarKey128.rsp",
"CFB128VarKey192.rsp",
"CFB128VarKey256.rsp",
"CFB128VarTxt128.rsp",
"CFB128VarTxt192.rsp",
"CFB128VarTxt256.rsp",
"CFB128MMT128.rsp",
"CFB128MMT192.rsp",
"CFB128MMT256.rsp",
],
lambda key, **kwargs: algorithms.AES(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CFB(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.CTR("\x00" * 16)
),
skip_message="Does not support AES CTR",
)
@pytest.mark.cipher
class TestAESModeCTR(object):
test_CTR = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "CTR"),
["aes-128-ctr.txt", "aes-192-ctr.txt", "aes-256-ctr.txt"],
lambda key, **kwargs: algorithms.AES(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CTR(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.GCM("\x00" * 12)
),
skip_message="Does not support AES GCM",
)
@pytest.mark.cipher
class TestAESModeGCM(object):
test_GCM = generate_aead_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "GCM"),
[
"gcmDecrypt128.rsp",
"gcmDecrypt192.rsp",
"gcmDecrypt256.rsp",
"gcmEncryptExtIV128.rsp",
"gcmEncryptExtIV192.rsp",
"gcmEncryptExtIV256.rsp",
],
lambda key: algorithms.AES(key),
lambda iv, tag: modes.GCM(iv, tag),
)
|
from pandasdmx.source import add_source, list_sources, sources
def test_list_sources():
source_ids = list_sources()
assert len(source_ids) >= 16
# Listed alphabetically
assert source_ids[0] == "ABS"
assert source_ids[-1] == "WB_WDI"
def test_source_support():
# Implicitly supported endpoint
assert sources["ILO"].supports["categoryscheme"]
# Specifically unsupported endpoint
assert not sources["ESTAT"].supports["categoryscheme"]
# Explicitly supported structure-specific data
assert sources["INEGI"].supports["structure-specific data"]
def test_add_source():
profile = """{
"id": "FOO",
"name": "Demo source",
"url": "https://example.org/sdmx"
}"""
add_source(profile)
# JSON sources do not support metadata endpoints, by default
profile2 = """{
"id": "BAR",
"data_content_type": "JSON",
"name": "Demo source",
"url": "https://example.org/sdmx"
}"""
add_source(profile2)
assert not sources["BAR"].supports["datastructure"]
|
#!/usr/bin/python
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A program to check load level and report to Load Balancer.
Periodically checks load level and report the result to AppEngine.
The file is passed to Compute Engine instances as a start up script.
At that time, template variables are filled with real values.
"""
import os
import socket
import subprocess
import time
import urllib
class CpuUsageFetcher(object):
"""Class to get CPU usage information of the system and report."""
NUM_PLAYERS_FILE = '/grits/num_players'
REGISTER_URL = 'http://{{ hostname }}/register'
UPDATE_URL = 'http://{{ hostname }}/load'
def __init__(self):
self.hostname = socket.gethostname()
self.prev_idle = 0
self.prev_total = 0
def Register(self):
"""Registers this Compute Engine instance to AppEngine load balancer."""
urllib.urlopen(self.REGISTER_URL,
data=urllib.urlencode({'name': self.hostname}))
def _GetNumPlayers(self):
try:
return int(open(self.NUM_PLAYERS_FILE).read())
except IOError:
return 0
def Check(self):
"""Checks CPU usage and reports to AppEngine load balancer."""
# 8 Players are the max.
load_level = int(12.5 * self._GetNumPlayers())
# Send POST request to /load.
urllib.urlopen(self.UPDATE_URL,
urllib.urlencode({'name': self.hostname,
'load': load_level}))
class GameSetUp(object):
"""Class to retrieve game server start up script and execute it."""
GAME_SETUP_AND_START_URL = 'http://{{ hostname }}/setup-and-start-game'
def Start(self):
response = urllib.urlopen(self.GAME_SETUP_AND_START_URL)
local_script_file = 'startup-and-start'
f = open(local_script_file, 'w')
f.write(response.read())
f.close()
os.chmod(local_script_file, 0700)
subprocess.call('./' + local_script_file)
def main():
GameSetUp().Start()
cpu_fetcher = CpuUsageFetcher()
cpu_fetcher.Register()
time.sleep(5)
while True:
cpu_fetcher.Check()
time.sleep(5)
if __name__ == '__main__':
main()
|
#! /usr/bin/env python
import os
from argparse import ArgumentParser, FileType
from csv import DictReader, DictWriter
def parse_args():
parser = ArgumentParser(
description="Takes a list of names and a list of salutations and puts them together"
)
parser.add_argument(
'names_csv',
type=FileType(),
help='CSV with one column called "name"'
)
parser.add_argument(
'regularized_salutations_csv',
type=FileType(),
help='CSV with one column called "regularized_salutation"'
)
parser.add_argument('salutations_csv', type=FileType('w'))
return parser.parse_args()
def main():
args = parse_args()
names_reader = DictReader(args.names_csv)
regularized_salutation_reader = DictReader(args.regularized_salutations_csv)
writer = DictWriter(args.salutations_csv,
['salutation'],
lineterminator=os.linesep)
writer.writeheader()
names = [row["name"] for row in names_reader]
salutations = [row["regularized_salutation"] for row in regularized_salutation_reader]
for i in range(min(len(names), len(salutations))):
writer.writerow(dict(salutation="{}, {}".format(salutations[i], names[i])))
if __name__ == "__main__":
main()
|
'''Term class hierarchy.'''
# pylint: disable-msg=W0142
from aterm import types
from aterm import compare
from aterm import hash
from aterm import write
from aterm import lists
class Term(object):
'''Base class for all terms.
Terms are non-modifiable. Changes are carried out by returning another term
instance.
'''
# NOTE: most methods defer the execution to visitors
__slots__ = ['factory']
def __init__(self, factory):
self.factory = factory
# XXX: this has a large inpact in performance
if __debug__ and False:
def __setattr__(self, name, value):
'''Prevent modification of term attributes.'''
# TODO: implement this with a metaclass
try:
object.__getattribute__(self, name)
except AttributeError:
object.__setattr__(self, name, value)
else:
raise AttributeError("attempt to modify read-only term attribute '%s'" % name)
def __delattr__(self, name):
'''Prevent deletion of term attributes.'''
raise AttributeError("attempt to delete read-only term attribute '%s'" % name)
def getType(self):
'''Gets the type of this term.'''
return self.type
def getHash(self):
'''Generate a hash value for this term.'''
return hash.fullHash(self)
def getStructuralHash(self):
'''Generate a hash value for this term.
Annotations are not taken into account.
'''
return hash.structuralHash(self)
__hash__ = getStructuralHash
def isEquivalent(self, other):
'''Checks for structural equivalence of this term agains another term.'''
return compare.isEquivalent(self, other)
def isEqual(self, other):
'''Checks equality of this term against another term. Note that for two
terms to be equal, any annotations they might have must be equal as
well.'''
return compare.isEqual(self, other)
def __eq__(self, other):
if not isinstance(other, Term):
# TODO: produce a warning
return False
return compare.isEquivalent(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def rmatch(self, other):
'''Matches this term against a string pattern.'''
return self.factory.match(other, self)
def accept(self, visitor, *args, **kargs):
'''Accept a visitor.'''
raise NotImplementedError
def writeToTextFile(self, fp):
'''Write this term to a file object.'''
writer = write.TextWriter(fp)
writer.visit(self)
def __str__(self):
'''Get the string representation of this term.'''
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
fp = StringIO()
self.writeToTextFile(fp)
return fp.getvalue()
def __repr__(self):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
fp = StringIO()
writer = write.AbbrevTextWriter(fp, 3)
try:
writer.visit(self)
except:
fp.write('...<error>')
return '<Term %s>' % (fp.getvalue(),)
class Lit(Term):
'''Base class for literal terms.'''
__slots__ = ['value']
def __init__(self, factory, value):
Term.__init__(self, factory)
self.value = value
def getValue(self):
return self.value
class Integer(Lit):
'''Integer literal term.'''
__slots__ = []
type = types.INT
def __init__(self, factory, value):
if not isinstance(value, (int, long)):
raise TypeError('value is not an integer', value)
Lit.__init__(self, factory, value)
def __int__(self):
return int(self.value)
def accept(self, visitor, *args, **kargs):
return visitor.visitInt(self, *args, **kargs)
class Real(Lit):
'''Real literal term.'''
__slots__ = []
type = types.REAL
def __init__(self, factory, value):
if not isinstance(value, float):
raise TypeError('value is not a float', value)
Lit.__init__(self, factory, value)
def __float__(self):
return float(self.value)
def accept(self, visitor, *args, **kargs):
return visitor.visitReal(self, *args, **kargs)
class Str(Lit):
'''String literal term.'''
__slots__ = []
type = types.STR
def __init__(self, factory, value):
if not isinstance(value, str):
raise TypeError('value is not a string', value)
Lit.__init__(self, factory, value)
def accept(self, visitor, *args, **kargs):
return visitor.visitStr(self, *args, **kargs)
class List(Term):
'''Base class for list terms.'''
__slots__ = []
# Python's list compatability methods
def __nonzero__(self):
return not lists.empty(self)
def __len__(self):
return lists.length(self)
def __getitem__(self, index):
return lists.item(self, index)
def __iter__(self):
return lists.Iter(self)
def insert(self, index, element):
return lists.insert(self, index, element)
def append(self, element):
return lists.append(self, element)
def extend(self, other):
return lists.extend(self, other)
def reverse(self):
return lists.reverse(self)
def accept(self, visitor, *args, **kargs):
return visitor.visitList(self, *args, **kargs)
class Nil(List):
'''Empty list term.'''
__slots__ = []
type = types.NIL
def __init__(self, factory):
List.__init__(self, factory)
def accept(self, visitor, *args, **kargs):
return visitor.visitNil(self, *args, **kargs)
class Cons(List):
'''Concatenated list term.'''
__slots__ = ['head', 'tail']
type = types.CONS
def __init__(self, factory, head, tail):
List.__init__(self, factory)
if not isinstance(head, Term):
raise TypeError("head is not a term", head)
self.head = head
if not isinstance(tail, List):
raise TypeError("tail is not a list term", tail)
self.tail = tail
def accept(self, visitor, *args, **kargs):
return visitor.visitCons(self, *args, **kargs)
class Appl(Term):
'''Application term.'''
__slots__ = ['name', 'args', 'annotations']
type = types.APPL
def __init__(self, factory, name, args, annotations):
Term.__init__(self, factory)
if not isinstance(name, basestring):
raise TypeError("name is not a string", name)
self.name = name
self.args = tuple(args)
for arg in self.args:
if not isinstance(arg, Term):
raise TypeError("arg is not a term", arg)
if not isinstance(annotations, List):
raise TypeError("annotations is not a list", annotations)
self.annotations = annotations
def getArity(self):
return len(self.args)
def setAnnotations(self, annotations):
'''Return a copy of this term with the given annotations.'''
return self.factory.makeAppl(self.name, self.args, annotations)
def removeAnnotations(self):
'''Return a copy of this term with all annotations removed.'''
return self.factory.makeAppl(self.name, self.args)
def accept(self, visitor, *args, **kargs):
return visitor.visitAppl(self, *args, **kargs)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#NOTE: this example requires PyAudio because it uses the Microphone class
import sys
import speech_recognition as sr
import subprocess, shlex
import os
if len(sys.argv) > 1:
language = sys.argv[1]
region = sys.argv[2]
# obtain audio from the microphone
r = sr.Recognizer()
while True:
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source) # listen for 1 second to calibrate the energy threshold for ambient noise levels
print("Say something!")
audio = r.listen(source)
# recognize speech using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
print "Google Speech Recognition thinks you said " + r.recognize_google(audio)
text2 = r.recognize_google(audio)
text = text2.encode('utf-8')
langspeaker = ''
try:
language
except NameError:
language = 'en'
try:
region
except NameError:
region = 'US'
if (language == 'de') or (language == 'en') or (language == 'es') or (language == 'it') or (language == 'fr'):
langspeaker = 'pico'
print "Language: %s" % language
bashCommand = 'googletranslate "%s" "%s"' % (text, language)
args = shlex.split(bashCommand)
output = subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read()
print output
if (langspeaker == 'pico'):
os.popen('pico2wave -l=%s-%s -w=/tmp/test.wav "%s"' % (language, region, output))
os.popen('aplay /tmp/test.wav')
os.popen('rm /tmp/test.wav')
else:
os.popen('/home/linus/get_voices.sh "%s" "%s"' % (language, output))
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
except KeyboardInterrupt:
print("A translator by Linus Claußnitzer")
|
"""
AutoCanary | https://firstlook.org/code/autocanary
Copyright (c) 2015 First Look Media
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import feedparser
# Avoid feedparser python3 bug, see https://github.com/manolomartinez/greg/issues/47
feedparser.PREFERRED_XML_PARSERS.remove('drv_libxml2')
from . import common
config = {
'feed_url': 'https://en.wikinews.org/w/index.php?title=Special:NewsFeed&feed=rss&categories=Published¬categories=No%20publish|Archived|AutoArchived|disputed&namespace=0&count=5&ordermethod=categoryadd&stablepages=only',
'headline_bullet': u"\u2022"
}
class Headlines(object):
def __init__(self):
self.enabled = False
self.have_headlines = False
self.headlines_str = None
def fetch_headlines(self):
# --- feed.entries is empty list on fail.
feed = feedparser.parse(config['feed_url'])
# --- available keys: summary_detail published_parsed links title
# comments summary guidislink title_detail link published id
entry_data = list(map(lambda x: (x.title,), feed.entries))
headlines = list(map(lambda x: "{} {}".format(config['headline_bullet'], x[0]), entry_data))
if len(headlines) == 0:
self.have_headlines = False
common.alert("Couldn't fetch headlines.")
else:
self.have_headlines = True
self.store_headlines(headlines)
def store_headlines(self, headlines):
self.headlines_str = '\n'.join(headlines)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-23 13:25
from __future__ import unicode_literals
import bluebottle.bb_projects.fields
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DonationJournal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', bluebottle.bb_projects.fields.MoneyField(decimal_places=2, max_digits=12, verbose_name='amount')),
('user_reference', models.CharField(blank=True, max_length=100, verbose_name=b'user reference')),
('description', models.CharField(blank=True, max_length=400)),
('date', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='Created')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='OrderPaymentJournal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', bluebottle.bb_projects.fields.MoneyField(decimal_places=2, max_digits=12, verbose_name='amount')),
('user_reference', models.CharField(blank=True, editable=False, max_length=100, verbose_name=b'user reference')),
('description', models.CharField(blank=True, max_length=400)),
('date', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='Created')),
],
),
migrations.CreateModel(
name='OrganizationPayoutJournal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', bluebottle.bb_projects.fields.MoneyField(decimal_places=2, max_digits=12, verbose_name='amount')),
('user_reference', models.CharField(blank=True, max_length=100, verbose_name=b'user reference')),
('description', models.CharField(blank=True, max_length=400)),
('date', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='Created')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProjectPayoutJournal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', bluebottle.bb_projects.fields.MoneyField(decimal_places=2, max_digits=12, verbose_name='amount')),
('user_reference', models.CharField(blank=True, max_length=100, verbose_name=b'user reference')),
('description', models.CharField(blank=True, max_length=400)),
('date', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='Created')),
],
options={
'abstract': False,
},
),
]
|
import threading
import time
from core.lib.jobs.base import BaseJob
__author__ = 'Federico Schmidt'
class TestJob(BaseJob):
def __init__(self, ret_val=0, *args, **kwargs):
super(TestJob, self).__init__(*args, **kwargs)
self.ret_val = ret_val
def run(self):
return self.ret_val
class ReaderThread(threading.Thread):
def __init__(self, rwlock, id, run_order):
super(ReaderThread, self).__init__()
self.rwlock = rwlock
self.id = id
self.run_order = run_order
def run(self):
with self.rwlock.reader():
time.sleep(0.5)
self.run_order.append(self.id)
class WriterThread(threading.Thread):
def __init__(self, rwlock, id, run_order, priority=0):
super(WriterThread, self).__init__()
self.rwlock = rwlock
self.id = id
self.run_order = run_order
self.priority = priority
def run(self):
with self.rwlock.writer(self.priority):
time.sleep(1)
self.run_order.append(self.id)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'configuredialog.ui'
#
# Created: Sat May 31 23:14:20 2014
# by: pyside-uic 0.2.14 running on PySide 1.1.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(418, 391)
self.gridLayout = QtGui.QGridLayout(Dialog)
self.gridLayout.setObjectName("gridLayout")
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.configGroupBox = QtGui.QGroupBox(Dialog)
self.configGroupBox.setTitle("")
self.configGroupBox.setObjectName("configGroupBox")
self.formLayout = QtGui.QFormLayout(self.configGroupBox)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName("formLayout")
self.label0 = QtGui.QLabel(self.configGroupBox)
self.label0.setObjectName("label0")
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label0)
self.lineEdit0 = QtGui.QLineEdit(self.configGroupBox)
self.lineEdit0.setObjectName("lineEdit0")
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.lineEdit0)
self.label_2 = QtGui.QLabel(self.configGroupBox)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_2)
self.lineEditFHC = QtGui.QLineEdit(self.configGroupBox)
self.lineEditFHC.setObjectName("lineEditFHC")
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.lineEditFHC)
self.label_3 = QtGui.QLabel(self.configGroupBox)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_3)
self.lineEditMEC = QtGui.QLineEdit(self.configGroupBox)
self.lineEditMEC.setObjectName("lineEditMEC")
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.lineEditMEC)
self.label_4 = QtGui.QLabel(self.configGroupBox)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_4)
self.lineEditLEC = QtGui.QLineEdit(self.configGroupBox)
self.lineEditLEC.setObjectName("lineEditLEC")
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.lineEditLEC)
self.label_5 = QtGui.QLabel(self.configGroupBox)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_5)
self.lineEditFGT = QtGui.QLineEdit(self.configGroupBox)
self.lineEditFGT.setObjectName("lineEditFGT")
self.formLayout.setWidget(4, QtGui.QFormLayout.FieldRole, self.lineEditFGT)
self.label_7 = QtGui.QLabel(self.configGroupBox)
self.label_7.setObjectName("label_7")
self.formLayout.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_7)
self.checkBoxGUI = QtGui.QCheckBox(self.configGroupBox)
self.checkBoxGUI.setText("")
self.checkBoxGUI.setObjectName("checkBoxGUI")
self.formLayout.setWidget(5, QtGui.QFormLayout.FieldRole, self.checkBoxGUI)
self.gridLayout.addWidget(self.configGroupBox, 0, 0, 1, 1)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "ConfigureDialog", None, QtGui.QApplication.UnicodeUTF8))
self.label0.setText(QtGui.QApplication.translate("Dialog", "identifier: ", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Dialog", "Femoral Head Centre:", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Dialog", "Medial Epicondyle:", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("Dialog", "Lateral Epicondyle:", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("Dialog", "Greater Trochanter:", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("Dialog", "GUI:", None, QtGui.QApplication.UnicodeUTF8))
|
from beprof import curve
from beprof import functions
import numpy as np
import logging
__author__ = 'grzanka'
logger = logging.getLogger(__name__)
class Profile(curve.Curve):
"""
General profile characterized by rising and falling edge.
"""
def __new__(cls, input_array, axis=None, **meta):
logger.info('Creating Profile object, metadata is: %s', meta)
# input_array shape control provided in Curve class
new = super(Profile, cls).__new__(cls, input_array, **meta)
if axis is None:
new.axis = getattr(input_array, 'axis', None)
else:
new.axis = axis
return new
def __array_finalize__(self, obj):
if obj is None:
return
self.metadata = getattr(obj, 'metadata', {})
def x_at_y(self, y, reverse=False):
"""
Calculates inverse profile - for given y returns x such that f(x) = y
If given y is not found in the self.y, then interpolation is used.
By default returns first result looking from left,
if reverse argument set to True,
looks from right. If y is outside range of self.y
then np.nan is returned.
Use inverse lookup to get x-coordinate of first point:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(5.))
0.0
Use inverse lookup to get x-coordinate of second point,
looking from left:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(10.))
0.1
Use inverse lookup to get x-coordinate of fourth point,
looking from right:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(10., reverse=True))
0.3
Use interpolation between first two points:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(7.5))
0.05
Looking for y below self.y range:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(2.0))
nan
Looking for y above self.y range:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(22.0))
nan
:param y: reference value
:param reverse: boolean value - direction of lookup
:return: x value corresponding to given y or NaN if not found
"""
logger.info('Running %(name)s.y_at_x(y=%(y)s, reverse=%(rev)s)',
{"name": self.__class__, "y": y, "rev": reverse})
# positive or negative direction handles
x_handle, y_handle = self.x, self.y
if reverse:
x_handle, y_handle = self.x[::-1], self.y[::-1]
# find the index of first value in self.y greater or equal than y
cond = y_handle >= y
ind = np.argmax(cond)
# two boundary conditions where x cannot be found:
# A) y > max(self.y)
# B) y < min(self.y)
# A) if y > max(self.y) then condition self.y >= y
# will never be satisfied
# np.argmax( cond ) will be equal 0 and cond[ind] will be False
if not cond[ind]:
return np.nan
# B) if y < min(self.y) then condition self.y >= y
# will be satisfied on first item
# np.argmax(cond) will be equal 0,
# to exclude situation that y_handle[0] = y
# we also check if y < y_handle[0]
if ind == 0 and y < y_handle[0]:
return np.nan
# use lookup if y in self.y:
if cond[ind] and y_handle[ind] == y:
return x_handle[ind]
# alternatively - pure python implementation
# return x_handle[ind] - \
# ((x_handle[ind] - x_handle[ind - 1]) / \
# (y_handle[ind] - y_handle[ind - 1])) * \
# (y_handle[ind] - y)
# use interpolation
sl = slice(ind - 1, ind + 1)
return np.interp(y, y_handle[sl], x_handle[sl])
def width(self, level):
"""
Width at given level
:param level:
:return:
"""
return self.x_at_y(level, reverse=True) - self.x_at_y(level)
@property
def fwhm(self):
"""
Full width af half-maximum
:return:
"""
return self.width(0.5 * np.max(self.y))
def normalize(self, dt, allow_cast=True):
"""
Normalize to 1 over [-dt, +dt] area, if allow_cast is set
to True, division not in place and casting may occur.
If division in place is not possible and allow_cast is False
an exception is raised.
>>> a = Profile([[0, 0], [1, 5], [2, 10], [3, 5], [4, 0]])
>>> a.normalize(1, allow_cast=True)
>>> print(a.y)
[ 0. 2. 4. 2. 0.]
:param dt:
:param allow_cast:
"""
if dt <= 0:
raise ValueError("Expected positive input")
logger.info('Running %(name)s.normalize(dt=%(dt)s)', {"name": self.__class__, "dt": dt})
try:
ave = np.average(self.y[np.fabs(self.x) <= dt])
except RuntimeWarning as e:
logger.error('in normalize(). self class is %(name)s, dt=%(dt)s', {"name": self.__class__, "dt": dt})
raise Exception("Scaling factor error: {0}".format(e))
try:
self.y /= ave
except TypeError as e:
logger.warning("Division in place is impossible: %s", e)
if allow_cast:
self.y = self.y / ave
else:
logger.error("Division in place impossible - allow_cast flag set to True should help")
raise
def __str__(self):
logger.info('Running %s.__str__', self.__class__)
ret = curve.Curve.__str__(self)
ret += "\nFWHM = {:2.3f}".format(self.fwhm)
return ret
def main():
print('\nProfile')
p = Profile([[0, 0], [1, 1], [2, 2], [3, 1]], some='exemplary', meta='data')
print(p)
print(type(p))
print("X:", p.x)
print("Y:", p.y)
print('M: ', p.metadata)
p2 = Profile([[1.5, 1], [2.5, 1], [3.5, 2], [4, 1]])
print("X:", p2.x)
print("Y:", p2.y)
print('M: ', p2.metadata)
b = curve.Curve([[0.5, 1], [1.5, 1], [2, 1], [2.5, 1]], negative='one')
print('\na: \n')
print('X: ', b.x)
print('Y: ', b.y)
print('M: ', b.metadata)
diff = functions.subtract(p, b)
print('type(diff): ', type(diff))
print("X:", diff.x)
print("Y:", diff.y)
print('M: ', diff.metadata)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import cPickle
import sys
from TestHost import TestHost
def hostList():
"""Modify this list to reflect the STAF hosts available in your local network"""
list = []
list.append(TestHost('patrick', ['x86.linux-dev', 'x86.linux-release'], ['RTS', 'HDE', 'SRC'], 6500))
list.append(TestHost('patrick-winxp', ['x86.win32-release'], ['HDE', 'RTS']))
list.append(TestHost('xlrw04.testnet.ptnl', ['x86.linux-dev', 'x86.linux-release']))
list.append(TestHost('xlrw05.testnet.ptnl', ['x86.win32-dev', 'x86.win32-release']))
list.append(TestHost('xlrw06.testnet.ptnl', ['x86.linux-dev']))
list.append(TestHost('xlrw07.testnet.ptnl', ['x86.linux-dev']))
list.append(TestHost('xlrw08.testnet.ptnl', ['x86.linux-dev']))
list.append(TestHost('ssso09.testnet.ptnl', ['sparc.solaris8-dev', 'sparc.solaris8-release']))
list.append(TestHost('sssx17.testnet.ptnl', ['sparc.solaris10_studio12-dev', 'sparc.solaris10_studio12-release']))
list.append(TestHost('perf4.perfnet.ptnl', ['x86_64.linux-dev', 'x86_64.linux-release']))
return list
if __name__ == '__main__':
if (len(sys.argv) != 2):
print 'Usage: %s [fileName]' % sys.argv[0]
sys.exit(1)
list = hostList()
output = open(sys.argv[1], 'wb')
cPickle.dump(list, output)
|
#######################################################################
# This file is part of redminelib.
#
# Copyright (C) 2011 Will Kahn-Greene
#
# redminelib is distributed under the MIT license. See the file
# COPYING for distribution details.
#######################################################################
from redminelib.redmine import RedmineScraper
from redminelib.tests import get_testdata
import os
from nose.tools import eq_
def test_716():
rs = RedmineScraper("")
data = open(os.path.join(get_testdata(), "716.html")).read()
issue = rs.parse_issue(data)
# extracted
eq_(issue["id"], "716")
eq_(issue["title"], u'Apache FCGI documentation In Manual')
eq_(issue["author"], u"Sam Kleinman")
eq_(issue["creation-date"], "12/20/2011 10:23 am")
eq_(issue["last-updated-date"], "12/22/2011 06:29 pm")
eq_(issue["description"], u'')
# details table
eq_(issue["priority"], "Normal")
eq_(issue["status"], "New")
eq_(issue["start-date"], "12/20/2011")
eq_(issue["due-date"], "")
eq_(issue["assigned-to"], "Sam Kleinman")
eq_(issue["progress"], "0%")
eq_(issue["category"], "Documentation")
eq_(issue["fixed-version"], "-")
# history
eq_(len(issue["history"]), 1)
hist1 = issue["history"][0]
eq_(hist1["date"], "12/22/2011 06:29 pm")
eq_(hist1["author"], "Blaise Alleyne")
props = hist1["properties"]
eq_(len(props), 0)
|
'''
Generates "pprldistr2009_RLB.pickle.gz" that is used to plot, in the background, the results of the 2009 algorithms
'''
# datapath = "../../data-archive/data/gecco-bbob-1-24/2009/data"
datapaths = ["../../data-archive/data/gecco-bbob-1-24/2009/data", "../../data-archive/data/gecco-bbob-noisy/2009/data"]
savepath = "cocopp/pprldistr2009_hardestRLB.pickle"
import pickle
import cocopp as bb
import numpy as np
data = {}
for datapath in datapaths:
print "loading data from", datapath
data2009 = bb.load(datapath)
Algs = data2009.dictByAlg()
target_runlengths_in_table = [0.5, 1.2, 3, 10, 50]
targets = bb.pproc.RunlengthBasedTargetValues(target_runlengths_in_table,
force_different_targets_factor = 10 ** -0.2)
for alg in Algs:
curAlg = Algs[alg].dictByFunc()
algname = curAlg[curAlg.keys()[0]][0].algId
if not data.has_key(algname):
data[algname] = {}
for func in curAlg:
data[algname][func] = {}
funcdata = curAlg[func].dictByDim()
for dim in funcdata:
data[algname][func][dim] = [[]]
curtarget = targets((func, dim))[-1]
data[algname][func][dim][0].append(curtarget) # record hardest target
datum = funcdata[dim][0]
y = datum.detEvals([curtarget])[0]
data[algname][func][dim][0].append(y)
x = y[np.isnan(y) == False]
bb.pprldistr.plotECDF(x[np.isfinite(x)] / float(dim), len(y))
print algname, "done"
with open(savepath, "w") as f:
pickle.dump(data, f)
"""
G.detEvals([targets((G.funcId,G.dim))[-1]])
x=hstack(G.detEvals([targets((G.funcId,G.dim))[-1]]))
y=x[isnan(list(x))==False]
"""
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the
:mod:`colour_demosaicing.bayer.demosaicing.malvar2004` module.
"""
import numpy as np
import os
import unittest
from colour import read_image
from colour_demosaicing import TESTS_RESOURCES_DIRECTORY
from colour_demosaicing.bayer import demosaicing_CFA_Bayer_Malvar2004
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2015-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = ['BAYER_DIRECTORY', 'TestDemosaicing_CFA_Bayer_Malvar2004']
BAYER_DIRECTORY = os.path.join(TESTS_RESOURCES_DIRECTORY, 'colour_demosaicing',
'bayer')
class TestDemosaicing_CFA_Bayer_Malvar2004(unittest.TestCase):
"""
Defines :func:`colour_demosaicing.bayer.demosaicing.malvar2004.\
demosaicing_CFA_Bayer_Malvar2004` definition unit tests methods.
"""
def test_demosaicing_CFA_Bayer_Malvar2004(self):
"""
Tests :func:`colour_demosaicing.bayer.demosaicing.malvar2004.\
demosaicing_CFA_Bayer_Malvar2004` definition.
"""
for pattern in ('RGGB', 'BGGR', 'GRBG', 'GBRG'):
CFA = os.path.join(BAYER_DIRECTORY, 'Lighthouse_CFA_{0}.exr')
RGB = os.path.join(BAYER_DIRECTORY,
'Lighthouse_Malvar2004_{0}.exr')
np.testing.assert_almost_equal(
demosaicing_CFA_Bayer_Malvar2004(
read_image(str(CFA.format(pattern)))[..., 0], pattern),
read_image(str(RGB.format(pattern))),
decimal=7)
if __name__ == '__main__':
unittest.main()
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_chr,
compat_ord,
compat_urllib_parse_unquote,
)
from ..utils import (
int_or_none,
parse_iso8601,
)
class BeegIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)'
_TEST = {
'url': 'http://beeg.com/5416503',
'md5': '46c384def73b33dbc581262e5ee67cef',
'info_dict': {
'id': '5416503',
'ext': 'mp4',
'title': 'Sultry Striptease',
'description': 'md5:d22219c09da287c14bed3d6c37ce4bc2',
'timestamp': 1391813355,
'upload_date': '20140207',
'duration': 383,
'tags': list,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://beeg.com/api/v5/video/%s' % video_id, video_id)
def split(o, e):
def cut(s, x):
n.append(s[:x])
return s[x:]
n = []
r = len(o) % e
if r > 0:
o = cut(o, r)
while len(o) > e:
o = cut(o, e)
n.append(o)
return n
def decrypt_key(key):
# Reverse engineered from http://static.beeg.com/cpl/1105.js
a = '5ShMcIQlssOd7zChAIOlmeTZDaUxULbJRnywYaiB'
e = compat_urllib_parse_unquote(key)
o = ''.join([
compat_chr(compat_ord(e[n]) - compat_ord(a[n % len(a)]) % 21)
for n in range(len(e))])
return ''.join(split(o, 3)[::-1])
def decrypt_url(encrypted_url):
encrypted_url = self._proto_relative_url(
encrypted_url.replace('{DATA_MARKERS}', ''), 'http:')
key = self._search_regex(
r'/key=(.*?)%2Cend=', encrypted_url, 'key', default=None)
if not key:
return encrypted_url
return encrypted_url.replace(key, decrypt_key(key))
formats = []
for format_id, video_url in video.items():
if not video_url:
continue
height = self._search_regex(
r'^(\d+)[pP]$', format_id, 'height', default=None)
if not height:
continue
formats.append({
'url': decrypt_url(video_url),
'format_id': format_id,
'height': int(height),
})
self._sort_formats(formats)
title = video['title']
video_id = video.get('id') or video_id
display_id = video.get('code')
description = video.get('desc')
timestamp = parse_iso8601(video.get('date'), ' ')
duration = int_or_none(video.get('duration'))
tags = [tag.strip() for tag in video['tags'].split(',')] if video.get('tags') else None
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'timestamp': timestamp,
'duration': duration,
'tags': tags,
'formats': formats,
'age_limit': 18,
}
|
from __future__ import print_function
import gzip
import time
import logging
import resource
import socket
import os
CWD = os.path.dirname(__file__)
DATA_FILE = os.path.join(CWD, '17monipdb.dat')
# logging.basicConfig(level=logging.DEBUG)
with gzip.open(os.path.join(CWD, 'ips.gz'), 'rb') as f:
testdata = [l.strip().decode('latin1') for l in f]
class BenchBase(object):
pass
class Bench_official(BenchBase):
def setup(self):
import ipip
self.db = ipip.IP
self.db.load(DATA_FILE)
def run(self):
db = self.db
for ip in testdata:
db.find(ip)
class Bench_lxyu(BenchBase):
def setup(self):
import IP
self.db = IP.IPv4Database(DATA_FILE)
def run(self):
db = self.db
for ip in testdata:
db.find(ip)
class Bench_pyipip(BenchBase):
def setup(self):
from pyipip import IPIPDatabase
self.db = IPIPDatabase(DATA_FILE)
def run(self):
db = self.db
for ip in testdata:
db.lookup(ip)
def main():
N = 3
for b in BenchBase.__subclasses__():
n = b.__name__.split('_')[1]
c = b()
c.setup()
c.run() # warm-up
s = time.time()
for _ in range(N):
c.run()
e = time.time() - s
print(n, '%.2f' % (N*len(testdata) / e))
if __name__ == '__main__':
main()
|
# coding: utf8
"""
Django settings for RunningCause project.
"""
import os
from os.path import dirname, abspath
import dj_database_url
from django.utils.translation import ugettext_lazy as _
from django.contrib.messages import constants as messages_constants
ADMINS = (
('Kraen Hansen', '[email protected]'),
)
MANAGERS = (
('Kraen Hansen', '[email protected]'),
)
MESSAGE_TAGS = {
messages_constants.ERROR: 'danger',
}
PROJECT_DIR = dirname(dirname(abspath(__file__)))
BASE_DIR = dirname(PROJECT_DIR)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY', '')
RUNKEEPER_CLIENT_ID = os.getenv('RUNKEEPER_CLIENT_ID', '')
RUNKEEPER_CLIENT_SECRET = os.getenv('RUNKEEPER_CLIENT_SECRET')
STRIPE_PUBLIC_KEY = os.environ.get('STRIPE_PUBLIC_KEY', '')
STRIPE_SECRET_KEY = os.environ.get('STRIPE_SECRET_KEY', '')
COURRIERS_MAILCHIMP_API_KEY = os.environ.get('MAILCHIMP_API_KEY', '')
COURRIERS_MAILCHIMP_LIST = '2640511eac'
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.humanize',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'widget_tweaks',
'django_redis',
'allauth',
'allauth.account',
# 'allauth.socialaccount',
'django_extensions',
'bootstrap3',
'rosetta',
'courriers',
'cloudinary',
'profile',
'runs',
'sponsorship',
'challenges',
'invitations',
'tools',
'pages',
'payments',
'common',
)
MIDDLEWARE_CLASSES = (
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'RunningCause.middleware.RedirectFromCnamesMiddleware',
)
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend"
)
AUTH_USER_MODEL = "profile.User"
LOGIN_REDIRECT_URL = '/profile/sign_in_landing'
LOGIN_URL = '/profile/signuporlogin'
LOGOUT_REDIRECT_URL = '/'
ACCOUNT_ADAPTER = 'RunningCause.allauth.adaptor.AccountAdapter'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = True
ACCOUNT_SIGNUP_PASSWORD_VERIFICATION = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_PASSWORD_MIN_LENGTH = 8
ACCOUNT_SESSION_REMEMBER = None
USER_MODEL_USERNAME_FIELD = 'username'
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_SIGNUP_FORM_CLASS = 'profile.forms.SignupForm'
EMAIL_SUBJECT_PREFIX = ''
ACCOUNT_EMAIL_SUBJECT_PREFIX = EMAIL_SUBJECT_PREFIX
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = os.getenv('MAILGUN_SMTP_SERVER')
EMAIL_HOST_USER = os.getenv('MAILGUN_SMTP_LOGIN')
EMAIL_HOST_PASSWORD = os.getenv('MAILGUN_SMTP_PASSWORD')
EMAIL_PORT = os.getenv('MAILGUN_SMTP_PORT')
DEFAULT_FROM_EMAIL = 'Masanga Runners <[email protected]>'
SOCIALACCOUNT_QUERY_EMAIL = False
COURRIERS_BACKEND_CLASS = 'courriers.backends.mailchimp.MailchimpBackend'
COURRIERS_DEFAULT_FROM_NAME = 'masanga'
SOCIALACCOUNT_PROVIDERS = {
# 'facebook': {
# 'SCOPE': ['email', 'publish_stream'],
# 'METHOD': 'js_sdk'
# }
}
ROOT_URLCONF = 'RunningCause.urls'
WSGI_APPLICATION = 'RunningCause.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if os.getenv('DATABASE_URL'):
DATABASES['default'] = dj_database_url.config(default=os.environ.get('DATABASE_URL'))
REDIS_URL = os.getenv('REDIS_URL', 'redis://127.0.0.1:6379')
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_URL,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'SOCKET_CONNECT_TIMEOUT': 5, # in seconds
'SOCKET_TIMEOUT': 5, # in seconds
}
}
}
LANGUAGES = (
('en', _('English')),
('da', _('Danish')),
)
LANGUAGE_CODE = 'da-dk'
SITE_ID = 1
SITE_DOMAIN = 'runners.masanga.dk'
BASE_URL = 'http://' + SITE_DOMAIN
APP_URL = os.getenv('APP_URL')
TIME_ZONE = 'Europe/Copenhagen'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
TEMPLATE_DIRS = [
os.path.join(os.path.dirname(__file__), 'templates'),
]
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.request",
"django.core.context_processors.i18n",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.static",
"django.template.context_processors.media",
"RunningCause.context_processors.base_url",
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'OPTIONS': {
'context_processors': TEMPLATE_CONTEXT_PROCESSORS,
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
},
},
]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = [
SITE_DOMAIN,
'www.' + SITE_DOMAIN,
]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'static'),
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(asctime)s [%(name)s:%(lineno)s] %(levelname)s - %(message)s',
},
'simple': {
'format': '%(levelname)s %(message)s',
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'default',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'django.db': {
'handlers': ['console', ],
'level': 'INFO',
},
'RunningCause': {
'handlers': ['console', ],
'level': 'DEBUG',
},
'invitations': {
'handlers': ['console', ],
'level': 'DEBUG',
},
'pages': {
'handlers': ['console', ],
'level': 'DEBUG',
},
'payments': {
'handlers': ['console', ],
'level': 'DEBUG',
},
'profile': {
'handlers': ['console', ],
'level': 'DEBUG',
},
'runs': {
'handlers': ['console', ],
'level': 'DEBUG',
},
'sponsorship': {
'handlers': ['console', ],
'level': 'DEBUG',
},
'tools': {
'handlers': ['console', ],
'level': 'DEBUG',
},
'challenges': {
'handlers': ['console', ],
'level': 'DEBUG',
},
'celery': {
'handlers': ['console', ],
'level': 'INFO',
},
}
}
# Cloudinary settings
CLOUDINARY = {
'cloud_name': 'masanga-runners',
'api_key': '183456335578974',
'api_secret': os.environ.get('CLOUDINARY_API_SECRET', '')
}
|
'''
exersice 7.0
re-do sps file to include shot elevation static correction
'''
import numpy as np
#our formatting string
sps_format = 'S{0:>10}{1:>10d}{2:>4}{3:>4d}{4:16}{5:10.1f}{6:10.1f}{7:6.1f}\n'
#read in elevations and create lookup table
elevations = {}
with open("3981_interped_elevations.csv", 'r') as f:
for line in f:
sp, z = line.split(',')
elevations[int(sp)] = float(z) * 0.3048
with open("3981.s01", 'w') as output:
with open("pos_39-81_utm.txt", 'r') as f:
for line in f:
#put the stuff we need into variables
sp, x, y = int(line.split()[0]), float(line.split()[1]), float(line.split()[2])
z = elevations[sp]
sp *= 4
#exercise 7 modification
datum = 100.0
replacement = 1000.0
depth = 75.0 * 0.3048 #convert dynamite ft to m
static = 1000 * (z - depth - 100)/replacement #ms
static *= -1.0 #correction, not error
#write it out
#print sps_format.format('3981',sp,"", int(static), "",x,y, z)
output.write(sps_format.format('3981',sp,"", int(static), "",x,y, z))
|
import json
from copy import deepcopy as copy
from text import TextTemplate
TextTemplate.get_text = lambda self: self.get_message()['text']
TEXT_CHARACTER_LIMIT = 640
template = {
'template_type': 'button',
'value': {
'attachment': {
'type': 'template',
'payload': {
'template_type': 'button',
'text': '',
'buttons': []
}
}
}
}
class ButtonTemplate:
def __init__(self, text=''):
self.template = copy(template['value'])
self.text = text
def add_web_url(self, title='', url=''):
web_url_button = {}
web_url_button['type'] = 'web_url'
web_url_button['title'] = title
web_url_button['url'] = url
self.template['attachment']['payload']['buttons'].append(web_url_button)
def add_postback(self, title='', payload=''):
postback_button = {}
postback_button['type'] = 'postback'
postback_button['title'] = title
postback_button['payload'] = json.dumps(payload)
self.template['attachment']['payload']['buttons'].append(postback_button)
def set_text(self, text=''):
self.text = text
def get_message(self):
self.template['attachment']['payload']['text'] = self.text
return self.template
|
from __future__ import print_function
from builtins import range
import logging
from collections import MutableMapping
from future.utils import viewitems
from miasm.expression.expression import ExprOp, ExprId, ExprLoc, ExprInt, \
ExprMem, ExprCompose, ExprSlice, ExprCond
from miasm.expression.simplifications import expr_simp_explicit
from miasm.ir.ir import AssignBlock
log = logging.getLogger("symbexec")
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("[%(levelname)-8s]: %(message)s"))
log.addHandler(console_handler)
log.setLevel(logging.INFO)
def get_block(lifter, ircfg, mdis, addr):
"""Get IRBlock at address @addr"""
loc_key = ircfg.get_or_create_loc_key(addr)
if not loc_key in ircfg.blocks:
offset = mdis.loc_db.get_location_offset(loc_key)
block = mdis.dis_block(offset)
lifter.add_asmblock_to_ircfg(block, ircfg)
irblock = ircfg.get_block(loc_key)
if irblock is None:
raise LookupError('No block found at that address: %s' % lifter.loc_db.pretty_str(loc_key))
return irblock
class StateEngine(object):
"""Stores an Engine state"""
def merge(self, other):
"""Generate a new state, representing the merge of self and @other
@other: a StateEngine instance"""
raise NotImplementedError("Abstract method")
class SymbolicState(StateEngine):
"""Stores a SymbolicExecutionEngine state"""
def __init__(self, dct):
self._symbols = frozenset(viewitems(dct))
def __hash__(self):
return hash((self.__class__, self._symbols))
def __eq__(self, other):
if self is other:
return True
if self.__class__ != other.__class__:
return False
return self.symbols == other.symbols
def __ne__(self, other):
return not self == other
def __iter__(self):
for dst, src in self._symbols:
yield dst, src
def iteritems(self):
"""Iterate on stored memory/values"""
return self.__iter__()
def merge(self, other):
"""Merge two symbolic states
Only equal expressions are kept in both states
@other: second symbolic state
"""
symb_a = self.symbols
symb_b = other.symbols
intersection = set(symb_a).intersection(set(symb_b))
out = {}
for dst in intersection:
if symb_a[dst] == symb_b[dst]:
out[dst] = symb_a[dst]
return self.__class__(out)
@property
def symbols(self):
"""Return the dictionary of known symbols"""
return dict(self._symbols)
INTERNAL_INTBASE_NAME = "__INTERNAL_INTBASE__"
def get_expr_base_offset(expr):
"""Return a couple representing the symbolic/concrete part of an addition
expression.
If there is no symbolic part, ExprId(INTERNAL_INTBASE_NAME) is used
If there is not concrete part, 0 is used
@expr: Expression instance
"""
if expr.is_int():
internal_intbase = ExprId(INTERNAL_INTBASE_NAME, expr.size)
return internal_intbase, int(expr)
if not expr.is_op('+'):
return expr, 0
if expr.args[-1].is_int():
args, offset = expr.args[:-1], int(expr.args[-1])
if len(args) == 1:
return args[0], offset
return ExprOp('+', *args), offset
return expr, 0
class MemArray(MutableMapping):
"""Link between base and its (offset, Expr)
Given an expression (say *base*), this structure will store every memory
content relatively to an integer offset from *base*.
The value associated to a given offset is a description of the slice of a
stored expression. The slice size depends on the configuration of the
MemArray. For example, for a slice size of 8 bits, the assignment:
- @32[EAX+0x10] = EBX
will store for the base EAX:
- 0x10: (EBX, 0)
- 0x11: (EBX, 1)
- 0x12: (EBX, 2)
- 0x13: (EBX, 3)
If the *base* is EAX+EBX, this structure can store the following contents:
- @32[EAX+EBX]
- @8[EAX+EBX+0x100]
But not:
- @32[EAX+0x10] (which is stored in another MemArray based on EAX)
- @32[EAX+EBX+ECX] (which is stored in another MemArray based on
EAX+EBX+ECX)
"""
def __init__(self, base, expr_simp=expr_simp_explicit):
self._base = base
self.expr_simp = expr_simp
self._mask = int(base.mask)
self._offset_to_expr = {}
@property
def base(self):
"""Expression representing the symbolic base address"""
return self._base
@property
def mask(self):
"""Mask offset"""
return self._mask
def __contains__(self, offset):
return offset in self._offset_to_expr
def __getitem__(self, offset):
assert 0 <= offset <= self._mask
return self._offset_to_expr.__getitem__(offset)
def __setitem__(self, offset, value):
raise RuntimeError("Use write api to update keys")
def __delitem__(self, offset):
assert 0 <= offset <= self._mask
return self._offset_to_expr.__delitem__(offset)
def __iter__(self):
for offset, _ in viewitems(self._offset_to_expr):
yield offset
def __len__(self):
return len(self._offset_to_expr)
def __repr__(self):
out = []
out.append("Base: %s" % self.base)
for offset, (index, value) in sorted(viewitems(self._offset_to_expr)):
out.append("%16X %d %s" % (offset, index, value))
return '\n'.join(out)
def copy(self):
"""Copy object instance"""
obj = MemArray(self.base, self.expr_simp)
obj._offset_to_expr = self._offset_to_expr.copy()
return obj
@staticmethod
def offset_to_ptr(base, offset):
"""
Return an expression representing the @base + @offset
@base: symbolic base address
@offset: relative offset integer to the @base address
"""
if base.is_id(INTERNAL_INTBASE_NAME):
ptr = ExprInt(offset, base.size)
elif offset == 0:
ptr = base
else:
ptr = base + ExprInt(offset, base.size)
return ptr.canonize()
def read(self, offset, size):
"""
Return memory at @offset with @size as an Expr list
@offset: integer (in bytes)
@size: integer (in bits), byte aligned
Consider the following state:
- 0x10: (EBX, 0)
- 0x11: (EBX, 1)
- 0x12: (EBX, 2)
- 0x13: (EBX, 3)
A read at 0x10 of 32 bits should return: EBX
"""
assert size % 8 == 0
# Parts is (Expr's offset, size, Expr)
parts = []
for index in range(size // 8):
# Wrap read:
# @32[EAX+0xFFFFFFFF] is ok and will read at 0xFFFFFFFF, 0, 1, 2
request_offset = (offset + index) & self._mask
if request_offset in self._offset_to_expr:
# Known memory portion
off, data = self._offset_to_expr[request_offset]
parts.append((off, 1, data))
continue
# Unknown memory portion
ptr = self.offset_to_ptr(self.base, request_offset)
data = ExprMem(ptr, 8)
parts.append((0, 1, data))
# Group similar data
# XXX TODO: only little endian here
index = 0
while index + 1 < len(parts):
off_a, size_a, data_a = parts[index]
off_b, size_b, data_b = parts[index+1]
if data_a == data_b and off_a + size_a == off_b:
# Read consecutive bytes of a variable
# [(0, 8, x), (1, 8, x)] => (0, 16, x)
parts[index:index+2] = [(off_a, size_a + size_b, data_a)]
continue
if data_a.is_int() and data_b.is_int():
# Read integer parts
# [(0, 8, 0x11223344), (1, 8, 0x55667788)] => (0, 16, 0x7744)
int1 = self.expr_simp(data_a[off_a*8:(off_a+size_a)*8])
int2 = self.expr_simp(data_b[off_b*8:(off_b+size_b)*8])
assert int1.is_int() and int2.is_int()
int1, int2 = int(int1), int(int2)
result = ExprInt((int2 << (size_a * 8)) | int1, (size_a + size_b) * 8)
parts[index:index+2] = [(0, size_a + size_b, result)]
continue
if data_a.is_mem() and data_b.is_mem():
# Read consecutive bytes of a memory variable
ptr_base_a, ptr_offset_a = get_expr_base_offset(data_a.ptr)
ptr_base_b, ptr_offset_b = get_expr_base_offset(data_b.ptr)
if ptr_base_a != ptr_base_b:
index += 1
continue
if (ptr_offset_a + off_a + size_a) & self._mask == (ptr_offset_b + off_b) & self._mask:
assert size_a <= data_a.size // 8 - off_a
assert size_b <= data_b.size // 8 - off_b
# Successive comparable symbolic pointers
# [(0, 8, @8[ptr]), (0, 8, @8[ptr+1])] => (0, 16, @16[ptr])
ptr = self.offset_to_ptr(ptr_base_a, (ptr_offset_a + off_a) & self._mask)
data = ExprMem(ptr, (size_a + size_b) * 8)
parts[index:index+2] = [(0, size_a + size_b, data)]
continue
index += 1
# Slice data
read_mem = []
for off, bytesize, data in parts:
if data.size // 8 != bytesize:
data = data[off * 8: (off + bytesize) * 8]
read_mem.append(data)
return read_mem
def write(self, offset, expr):
"""
Write @expr at @offset
@offset: integer (in bytes)
@expr: Expr instance value
"""
assert expr.size % 8 == 0
assert offset <= self._mask
for index in range(expr.size // 8):
# Wrap write:
# @32[EAX+0xFFFFFFFF] is ok and will write at 0xFFFFFFFF, 0, 1, 2
request_offset = (offset + index) & self._mask
# XXX TODO: only little endian here
self._offset_to_expr[request_offset] = (index, expr)
tmp = self.expr_simp(expr[index * 8: (index + 1) * 8])
# Special case: Simplify slice of pointer (simplification is ok
# here, as we won't store the simplified expression)
if tmp.is_slice() and tmp.arg.is_mem() and tmp.start % 8 == 0:
new_ptr = self.expr_simp(
tmp.arg.ptr + ExprInt(tmp.start // 8, tmp.arg.ptr.size)
)
tmp = ExprMem(new_ptr, tmp.stop - tmp.start)
# Test if write to original value
if tmp.is_mem():
src_ptr, src_off = get_expr_base_offset(tmp.ptr)
if src_ptr == self.base and src_off == request_offset:
del self._offset_to_expr[request_offset]
def _get_variable_parts(self, index, known_offsets, forward=True):
"""
Find consecutive memory parts representing the same variable. The part
starts at offset known_offsets[@index] and search is in offset direction
determined by @forward
Return the number of consecutive parts of the same variable.
@index: index of the memory offset in known_offsets
@known_offsets: sorted offsets
@forward: Search in offset growing direction if True, else in reverse
order
"""
offset = known_offsets[index]
value_byte_index, value = self._offset_to_expr[offset]
assert value.size % 8 == 0
if forward:
start, end, step = value_byte_index + 1, value.size // 8, 1
else:
start, end, step = value_byte_index - 1, -1, -1
partnum = 1
for value_offset in range(start, end, step):
offset += step
# Check if next part is in known_offsets
next_index = index + step * partnum
if not 0 <= next_index < len(known_offsets):
break
offset_next = known_offsets[next_index]
if offset_next != offset:
break
# Check if next part is a part of the searched value
byte_index, value_next = self._offset_to_expr[offset_next]
if byte_index != value_offset:
break
if value != value_next:
break
partnum += 1
return partnum
def _build_value_at_offset(self, value, offset, start, length):
"""
Return a couple. The first element is the memory Expression representing
the value at @offset, the second is its value. The value is truncated
at byte @start with @length
@value: Expression to truncate
@offset: offset in bytes of the variable (integer)
@start: value's byte offset (integer)
@length: length in bytes (integer)
"""
ptr = self.offset_to_ptr(self.base, offset)
size = length * 8
if start == 0 and size == value.size:
result = value
else:
result = self.expr_simp(value[start * 8: start * 8 + size])
return ExprMem(ptr, size), result
def memory(self):
"""
Iterate on stored memory/values
The goal here is to group entities.
Consider the following state:
EAX + 0x10 = (0, EDX)
EAX + 0x11 = (1, EDX)
EAX + 0x12 = (2, EDX)
EAX + 0x13 = (3, EDX)
The function should return:
@32[EAX + 0x10] = EDX
"""
if not self._offset_to_expr:
return
known_offsets = sorted(self._offset_to_expr)
index = 0
# Test if the first element is the continuation of the last byte. If
# yes, merge and output it first.
min_int = 0
max_int = (1 << self.base.size) - 1
limit_index = len(known_offsets)
first_element = None
# Special case where a variable spreads on max_int/min_int
if known_offsets[0] == min_int and known_offsets[-1] == max_int:
min_offset, max_offset = known_offsets[0], known_offsets[-1]
min_byte_index, min_value = self._offset_to_expr[min_offset]
max_byte_index, max_value = self._offset_to_expr[max_offset]
if min_value == max_value and max_byte_index + 1 == min_byte_index:
# Look for current variable start
partnum_before = self._get_variable_parts(len(known_offsets) - 1, known_offsets, False)
# Look for current variable end
partnum_after = self._get_variable_parts(0, known_offsets)
partnum = partnum_before + partnum_after
offset = known_offsets[-partnum_before]
index_value, value = self._offset_to_expr[offset]
mem, result = self._build_value_at_offset(value, offset, index_value, partnum)
first_element = mem, result
index = partnum_after
limit_index = len(known_offsets) - partnum_before
# Special cases are done, walk and merge variables
while index < limit_index:
offset = known_offsets[index]
index_value, value = self._offset_to_expr[offset]
partnum = self._get_variable_parts(index, known_offsets)
mem, result = self._build_value_at_offset(value, offset, index_value, partnum)
yield mem, result
index += partnum
if first_element is not None:
yield first_element
def dump(self):
"""Display MemArray content"""
for mem, value in self.memory():
print("%s = %s" % (mem, value))
class MemSparse(object):
"""Link a symbolic memory pointer to its MemArray.
For each symbolic memory object, this object will extract the memory pointer
*ptr*. It then splits *ptr* into a symbolic and an integer part. For
example, the memory @[ESP+4] will give ESP+4 for *ptr*. *ptr* is then split
into its base ESP and its offset 4. Each symbolic base address uses a
different MemArray.
Example:
- @32[EAX+EBX]
- @8[EAX+EBX+0x100]
Will be stored in the same MemArray with a EAX+EBX base
"""
def __init__(self, addrsize, expr_simp=expr_simp_explicit):
"""
@addrsize: size (in bits) of the addresses manipulated by the MemSparse
@expr_simp: an ExpressionSimplifier instance
"""
self.addrsize = addrsize
self.expr_simp = expr_simp
self.base_to_memarray = {}
def __contains__(self, expr):
"""
Return True if the whole @expr is present
For partial check, use 'contains_partial'
"""
if not expr.is_mem():
return False
ptr = expr.ptr
base, offset = get_expr_base_offset(ptr)
memarray = self.base_to_memarray.get(base, None)
if memarray is None:
return False
for i in range(expr.size // 8):
if offset + i not in memarray:
return False
return True
def contains_partial(self, expr):
"""
Return True if a part of @expr is present in memory
"""
if not expr.is_mem():
return False
ptr = expr.ptr
base, offset = get_expr_base_offset(ptr)
memarray = self.base_to_memarray.get(base, None)
if memarray is None:
return False
for i in range(expr.size // 8):
if offset + i in memarray:
return True
return False
def clear(self):
"""Reset the current object content"""
self.base_to_memarray.clear()
def copy(self):
"""Copy the current object instance"""
base_to_memarray = {}
for base, memarray in viewitems(self.base_to_memarray):
base_to_memarray[base] = memarray.copy()
obj = MemSparse(self.addrsize, self.expr_simp)
obj.base_to_memarray = base_to_memarray
return obj
def __delitem__(self, expr):
"""
Delete a value @expr *fully* present in memory
For partial delete, use delete_partial
"""
ptr = expr.ptr
base, offset = get_expr_base_offset(ptr)
memarray = self.base_to_memarray.get(base, None)
if memarray is None:
raise KeyError
# Check if whole entity is in the MemArray before deleting it
for i in range(expr.size // 8):
if (offset + i) & memarray.mask not in memarray:
raise KeyError
for i in range(expr.size // 8):
del memarray[(offset + i) & memarray.mask]
def delete_partial(self, expr):
"""
Delete @expr from memory. Skip parts of @expr which are not present in
memory.
"""
ptr = expr.ptr
base, offset = get_expr_base_offset(ptr)
memarray = self.base_to_memarray.get(base, None)
if memarray is None:
raise KeyError
# Check if whole entity is in the MemArray before deleting it
for i in range(expr.size // 8):
real_offset = (offset + i) & memarray.mask
if real_offset in memarray:
del memarray[real_offset]
def read(self, ptr, size):
"""
Return the value associated with the Expr at address @ptr
@ptr: Expr representing the memory address
@size: memory size (in bits), byte aligned
"""
assert size % 8 == 0
base, offset = get_expr_base_offset(ptr)
memarray = self.base_to_memarray.get(base, None)
if memarray is not None:
mems = memarray.read(offset, size)
ret = mems[0] if len(mems) == 1 else ExprCompose(*mems)
else:
ret = ExprMem(ptr, size)
return ret
def write(self, ptr, expr):
"""
Update the corresponding Expr @expr at address @ptr
@ptr: Expr representing the memory address
@expr: Expr instance
"""
assert ptr.size == self.addrsize
base, offset = get_expr_base_offset(ptr)
memarray = self.base_to_memarray.get(base, None)
if memarray is None:
memarray = MemArray(base, self.expr_simp)
self.base_to_memarray[base] = memarray
memarray.write(offset, expr)
def iteritems(self):
"""Iterate on stored memory variables and their values."""
for _, memarray in viewitems(self.base_to_memarray):
for mem, value in memarray.memory():
yield mem, value
def items(self):
"""Return stored memory variables and their values."""
return list(self.iteritems())
def dump(self):
"""Display MemSparse content"""
for mem, value in viewitems(self):
print("%s = %s" % (mem, value))
def __repr__(self):
out = []
for _, memarray in sorted(viewitems(self.base_to_memarray)):
out.append(repr(memarray))
return '\n'.join(out)
class SymbolMngr(object):
"""Symbolic store manager (IDs and MEMs)"""
def __init__(self, init=None, addrsize=None, expr_simp=expr_simp_explicit):
assert addrsize is not None
if init is None:
init = {}
self.addrsize = addrsize
self.expr_simp = expr_simp
self.symbols_id = {}
self.symbols_mem = MemSparse(addrsize, expr_simp)
self.mask = (1 << addrsize) - 1
for expr, value in viewitems(init):
self.write(expr, value)
def __contains__(self, expr):
if expr.is_id():
return self.symbols_id.__contains__(expr)
if expr.is_mem():
return self.symbols_mem.__contains__(expr)
return False
def __getitem__(self, expr):
return self.read(expr)
def __setitem__(self, expr, value):
self.write(expr, value)
def __delitem__(self, expr):
if expr.is_id():
del self.symbols_id[expr]
elif expr.is_mem():
del self.symbols_mem[expr]
else:
raise TypeError("Bad source expr")
def copy(self):
"""Copy object instance"""
obj = SymbolMngr(self, addrsize=self.addrsize, expr_simp=self.expr_simp)
return obj
def clear(self):
"""Forget every variables values"""
self.symbols_id.clear()
self.symbols_mem.clear()
def read(self, src):
"""
Return the value corresponding to Expr @src
@src: ExprId or ExprMem instance
"""
if src.is_id():
return self.symbols_id.get(src, src)
elif src.is_mem():
# Only byte aligned accesses are supported for now
assert src.size % 8 == 0
return self.symbols_mem.read(src.ptr, src.size)
else:
raise TypeError("Bad source expr")
def write(self, dst, src):
"""
Update @dst with @src expression
@dst: ExprId or ExprMem instance
@src: Expression instance
"""
assert dst.size == src.size
if dst.is_id():
if dst == src:
if dst in self.symbols_id:
del self.symbols_id[dst]
else:
self.symbols_id[dst] = src
elif dst.is_mem():
# Only byte aligned accesses are supported for now
assert dst.size % 8 == 0
self.symbols_mem.write(dst.ptr, src)
else:
raise TypeError("Bad destination expr")
def dump(self, ids=True, mems=True):
"""Display memory content"""
if ids:
for variable, value in self.ids():
print('%s = %s' % (variable, value))
if mems:
for mem, value in self.memory():
print('%s = %s' % (mem, value))
def __repr__(self):
out = []
for variable, value in viewitems(self):
out.append('%s = %s' % (variable, value))
return "\n".join(out)
def iteritems(self):
"""ExprId/ExprMem iteritems of the current state"""
for variable, value in self.ids():
yield variable, value
for variable, value in self.memory():
yield variable, value
def items(self):
"""Return variables/values of the current state"""
return list(self.iteritems())
def __iter__(self):
for expr, _ in self.iteritems():
yield expr
def ids(self):
"""Iterate on variables and their values."""
for expr, value in viewitems(self.symbols_id):
yield expr, value
def memory(self):
"""Iterate on memory variables and their values."""
for mem, value in viewitems(self.symbols_mem):
yield mem, value
def keys(self):
"""Variables of the current state"""
return list(self)
def merge_ptr_read(known, ptrs):
"""
Merge common memory parts in a multiple byte memory.
@ptrs: memory bytes list
@known: ptrs' associated boolean for present/unpresent memory part in the
store
"""
assert known
out = []
known.append(None)
ptrs.append(None)
last, value, size = known[0], ptrs[0], 8
for index, part in enumerate(known[1:], 1):
if part == last:
size += 8
else:
out.append((last, value, size))
last, value, size = part, ptrs[index], 8
return out
class SymbolicExecutionEngine(object):
"""
Symbolic execution engine
Allow IR code emulation in symbolic domain
Examples:
from miasm.ir.symbexec import SymbolicExecutionEngine
from miasm.ir.ir import AssignBlock
lifter = Lifter_X86_32()
init_state = {
lifter.arch.regs.EAX: lifter.arch.regs.EBX,
ExprMem(id_x+ExprInt(0x10, 32), 32): id_a,
}
sb_exec = SymbolicExecutionEngine(lifter, init_state)
>>> sb_exec.dump()
EAX = a
@32[x + 0x10] = a
>>> sb_exec.dump(mems=False)
EAX = a
>>> print sb_exec.eval_expr(lifter.arch.regs.EAX + lifter.arch.regs.ECX)
EBX + ECX
Inspecting state:
- dump
- modified
State manipulation:
- '.state' (rw)
Evaluation (read only):
- eval_expr
- eval_assignblk
Evaluation with state update:
- eval_updt_expr
- eval_updt_assignblk
- eval_updt_irblock
Start a symbolic execution based on provisioned '.lifter' blocks:
- run_block_at
- run_at
"""
StateEngine = SymbolicState
def __init__(self, lifter, state=None,
sb_expr_simp=expr_simp_explicit):
self.expr_to_visitor = {
ExprInt: self.eval_exprint,
ExprId: self.eval_exprid,
ExprLoc: self.eval_exprloc,
ExprMem: self.eval_exprmem,
ExprSlice: self.eval_exprslice,
ExprCond: self.eval_exprcond,
ExprOp: self.eval_exprop,
ExprCompose: self.eval_exprcompose,
}
if state is None:
state = {}
self.symbols = SymbolMngr(addrsize=lifter.addrsize, expr_simp=sb_expr_simp)
for dst, src in viewitems(state):
self.symbols.write(dst, src)
self.lifter = lifter
self.expr_simp = sb_expr_simp
@property
def ir_arch(self):
warnings.warn('DEPRECATION WARNING: use ".lifter" instead of ".ir_arch"')
return self.lifter
def get_state(self):
"""Return the current state of the SymbolicEngine"""
state = self.StateEngine(dict(self.symbols))
return state
def set_state(self, state):
"""Restaure the @state of the engine
@state: StateEngine instance
"""
self.symbols = SymbolMngr(addrsize=self.lifter.addrsize, expr_simp=self.expr_simp)
for dst, src in viewitems(dict(state)):
self.symbols[dst] = src
state = property(get_state, set_state)
def eval_expr_visitor(self, expr, cache=None):
"""
[DEV]: Override to change the behavior of an Expr evaluation.
This function recursively applies 'eval_expr*' to @expr.
This function uses @cache to speedup re-evaluation of expression.
"""
if cache is None:
cache = {}
ret = cache.get(expr, None)
if ret is not None:
return ret
new_expr = self.expr_simp(expr)
ret = cache.get(new_expr, None)
if ret is not None:
return ret
func = self.expr_to_visitor.get(new_expr.__class__, None)
if func is None:
raise TypeError("Unknown expr type")
ret = func(new_expr, cache=cache)
ret = self.expr_simp(ret)
assert ret is not None
cache[expr] = ret
cache[new_expr] = ret
return ret
def eval_exprint(self, expr, **kwargs):
"""[DEV]: Evaluate an ExprInt using the current state"""
return expr
def eval_exprid(self, expr, **kwargs):
"""[DEV]: Evaluate an ExprId using the current state"""
ret = self.symbols.read(expr)
return ret
def eval_exprloc(self, expr, **kwargs):
"""[DEV]: Evaluate an ExprLoc using the current state"""
offset = self.lifter.loc_db.get_location_offset(expr.loc_key)
if offset is not None:
ret = ExprInt(offset, expr.size)
else:
ret = expr
return ret
def eval_exprmem(self, expr, **kwargs):
"""[DEV]: Evaluate an ExprMem using the current state
This function first evaluate the memory pointer value.
Override 'mem_read' to modify the effective memory accesses
"""
ptr = self.eval_expr_visitor(expr.ptr, **kwargs)
mem = ExprMem(ptr, expr.size)
ret = self.mem_read(mem)
return ret
def eval_exprcond(self, expr, **kwargs):
"""[DEV]: Evaluate an ExprCond using the current state"""
cond = self.eval_expr_visitor(expr.cond, **kwargs)
src1 = self.eval_expr_visitor(expr.src1, **kwargs)
src2 = self.eval_expr_visitor(expr.src2, **kwargs)
ret = ExprCond(cond, src1, src2)
return ret
def eval_exprslice(self, expr, **kwargs):
"""[DEV]: Evaluate an ExprSlice using the current state"""
arg = self.eval_expr_visitor(expr.arg, **kwargs)
ret = ExprSlice(arg, expr.start, expr.stop)
return ret
def eval_exprop(self, expr, **kwargs):
"""[DEV]: Evaluate an ExprOp using the current state"""
args = []
for oarg in expr.args:
arg = self.eval_expr_visitor(oarg, **kwargs)
args.append(arg)
ret = ExprOp(expr.op, *args)
return ret
def eval_exprcompose(self, expr, **kwargs):
"""[DEV]: Evaluate an ExprCompose using the current state"""
args = []
for arg in expr.args:
args.append(self.eval_expr_visitor(arg, **kwargs))
ret = ExprCompose(*args)
return ret
def eval_expr(self, expr, eval_cache=None):
"""
Evaluate @expr
@expr: Expression instance to evaluate
@cache: None or dictionary linking variables to their values
"""
if eval_cache is None:
eval_cache = {}
ret = self.eval_expr_visitor(expr, cache=eval_cache)
assert ret is not None
return ret
def modified(self, init_state=None, ids=True, mems=True):
"""
Return the modified variables.
@init_state: a base dictionary linking variables to their initial values
to diff. Can be None.
@ids: track ids only
@mems: track mems only
"""
if init_state is None:
init_state = {}
if ids:
for variable, value in viewitems(self.symbols.symbols_id):
if variable in init_state and init_state[variable] == value:
continue
yield variable, value
if mems:
for mem, value in self.symbols.memory():
if mem in init_state and init_state[mem] == value:
continue
yield mem, value
def dump(self, ids=True, mems=True):
"""
Display modififed variables
@ids: display modified ids
@mems: display modified memory
"""
for variable, value in self.modified(None, ids, mems):
print("%-18s" % variable, "=", "%s" % value)
def eval_assignblk(self, assignblk):
"""
Evaluate AssignBlock using the current state
Returns a dictionary containing modified keys associated to their values
@assignblk: AssignBlock instance
"""
pool_out = {}
eval_cache = {}
for dst, src in viewitems(assignblk):
src = self.eval_expr(src, eval_cache)
if dst.is_mem():
ptr = self.eval_expr(dst.ptr, eval_cache)
# Test if mem lookup is known
tmp = ExprMem(ptr, dst.size)
pool_out[tmp] = src
elif dst.is_id():
pool_out[dst] = src
else:
raise ValueError("Unknown destination type", str(dst))
return pool_out
def apply_change(self, dst, src):
"""
Apply @dst = @src on the current state WITHOUT evaluating both side
@dst: Expr, destination
@src: Expr, source
"""
if dst.is_mem():
self.mem_write(dst, src)
else:
self.symbols.write(dst, src)
def eval_updt_assignblk(self, assignblk):
"""
Apply an AssignBlock on the current state
@assignblk: AssignBlock instance
"""
mem_dst = []
dst_src = self.eval_assignblk(assignblk)
for dst, src in viewitems(dst_src):
self.apply_change(dst, src)
if dst.is_mem():
mem_dst.append(dst)
return mem_dst
def eval_updt_irblock(self, irb, step=False):
"""
Symbolic execution of the @irb on the current state
@irb: irbloc instance
@step: display intermediate steps
"""
for assignblk in irb:
if step:
print('Instr', assignblk.instr)
print('Assignblk:')
print(assignblk)
print('_' * 80)
self.eval_updt_assignblk(assignblk)
if step:
self.dump(mems=False)
self.dump(ids=False)
print('_' * 80)
dst = self.eval_expr(self.lifter.IRDst)
return dst
def run_block_at(self, ircfg, addr, step=False):
"""
Symbolic execution of the block at @addr
@addr: address to execute (int or ExprInt or label)
@step: display intermediate steps
"""
irblock = ircfg.get_block(addr)
if irblock is not None:
addr = self.eval_updt_irblock(irblock, step=step)
return addr
def run_at(self, ircfg, addr, lbl_stop=None, step=False):
"""
Symbolic execution starting at @addr
@addr: address to execute (int or ExprInt or label)
@lbl_stop: LocKey to stop execution on
@step: display intermediate steps
"""
while True:
irblock = ircfg.get_block(addr)
if irblock is None:
break
if irblock.loc_key == lbl_stop:
break
addr = self.eval_updt_irblock(irblock, step=step)
return addr
def del_mem_above_stack(self, stack_ptr):
"""
Remove all stored memory values with following properties:
* pointer based on initial stack value
* pointer below current stack pointer
"""
stack_ptr = self.eval_expr(stack_ptr)
base, stk_offset = get_expr_base_offset(stack_ptr)
memarray = self.symbols.symbols_mem.base_to_memarray.get(base, None)
if memarray:
to_del = set()
for offset in memarray:
if ((offset - stk_offset) & int(stack_ptr.mask)) >> (stack_ptr.size - 1) != 0:
to_del.add(offset)
for offset in to_del:
del memarray[offset]
def eval_updt_expr(self, expr):
"""
Evaluate @expr and apply side effect if needed (ie. if expr is an
assignment). Return the evaluated value
"""
# Update value if needed
if expr.is_assign():
ret = self.eval_expr(expr.src)
self.eval_updt_assignblk(AssignBlock([expr]))
else:
ret = self.eval_expr(expr)
return ret
def mem_read(self, expr):
"""
[DEV]: Override to modify the effective memory reads
Read symbolic value at ExprMem @expr
@expr: ExprMem
"""
return self.symbols.read(expr)
def mem_write(self, dst, src):
"""
[DEV]: Override to modify the effective memory writes
Write symbolic value @src at ExprMem @dst
@dst: destination ExprMem
@src: source Expression
"""
self.symbols.write(dst, src)
|
import datetime
from storm.references import Reference
from storm.store import AutoReload
from stoqlib.database.properties import (UnicodeCol, BoolCol, PercentCol,
IntCol, QuantityCol, PriceCol, DateTimeCol)
from stoqlib.migration.domainv1 import Domain
from stoqlib.migration.parameter import get_parameter
class LoginUser(Domain):
__storm_table__ = 'login_user'
person_id = IntCol()
class Branch(Domain):
__storm_table__ = 'branch'
class Person(Domain):
__storm_table__ = 'person'
Person.login_user = Reference(Person.id, LoginUser.person_id, on_remote=True)
class PaymentMethod(Domain):
__storm_table__ = 'payment_method'
method_name = UnicodeCol()
is_active = BoolCol(default=True)
daily_penalty = PercentCol(default=0)
interest = PercentCol(default=0)
payment_day = IntCol(default=None)
closing_day = IntCol(default=None)
max_installments = IntCol(default=1)
destination_account_id = IntCol()
class SaleItem(Domain):
__storm_table__ = 'sale_item'
quantity = QuantityCol()
sale_id = IntCol()
class ReturnedSaleItem(Domain):
__storm_table__ = 'returned_sale_item'
quantity = QuantityCol(default=0)
price = PriceCol()
sale_item_id = IntCol()
sale_item = Reference(sale_item_id, SaleItem.id)
sellable_id = IntCol()
returned_sale_id = IntCol()
class Sale(Domain):
__storm_table__ = 'sale'
return_date = DateTimeCol(default=None)
branch_id = IntCol()
branch = Reference(branch_id, Branch.id)
def get_items(self):
return self.store.find(SaleItem, sale_id=self.id).order_by(SaleItem.id)
class ReturnedSale(Domain):
__storm_table__ = 'returned_sale'
identifier = IntCol(default=AutoReload)
return_date = DateTimeCol(default_factory=datetime.datetime.now)
invoice_number = IntCol(default=None)
reason = UnicodeCol(default=u'')
sale_id = IntCol()
sale = Reference(sale_id, Sale.id)
new_sale_id = IntCol()
new_sale = Reference(new_sale_id, Sale.id)
responsible_id = IntCol()
responsible = Reference(responsible_id, LoginUser.id)
branch_id = IntCol()
branch = Reference(branch_id, Branch.id)
def apply_patch(store):
store.execute("""
CREATE TABLE returned_sale (
id serial NOT NULL PRIMARY KEY,
te_created_id bigint UNIQUE REFERENCES transaction_entry(id),
te_modified_id bigint UNIQUE REFERENCES transaction_entry(id),
identifier serial NOT NULL,
return_date timestamp,
reason text,
invoice_number integer CONSTRAINT valid_invoice_number
CHECK (invoice_number > 0 AND invoice_number <= 999999999)
DEFAULT NULL UNIQUE,
responsible_id bigint REFERENCES login_user(id) ON UPDATE CASCADE,
branch_id bigint REFERENCES branch(id) ON UPDATE CASCADE,
sale_id bigint REFERENCES sale(id) ON UPDATE CASCADE,
new_sale_id bigint UNIQUE REFERENCES sale(id) ON UPDATE CASCADE
);
CREATE TABLE returned_sale_item (
id serial NOT NULL PRIMARY KEY,
te_created_id bigint UNIQUE REFERENCES transaction_entry(id),
te_modified_id bigint UNIQUE REFERENCES transaction_entry(id),
quantity numeric(20, 3) CONSTRAINT positive_quantity
CHECK (quantity >= 0),
price numeric(20, 2) CONSTRAINT positive_price
CHECK (price >= 0),
sellable_id bigint REFERENCES sellable(id) ON UPDATE CASCADE,
sale_item_id bigint REFERENCES sale_item(id) ON UPDATE CASCADE,
returned_sale_id bigint REFERENCES returned_sale(id) ON UPDATE CASCADE
);
""")
# Migrate all renegotiation_data to returned_sale
invoice_numbers = set()
for sale_id, person_id, invoice_number, reason, penalty in store.execute(
"""SELECT sale_id, responsible_id, invoice_number, reason, penalty_value
FROM renegotiation_data;""").get_all():
sale = Sale.get(sale_id, store)
person = Person.get(person_id, store)
if invoice_number is not None:
# invoice_number can be duplicated, since it wasn't unique before
# First come, first served. Others will have no invoice number
if invoice_number in invoice_numbers:
invoice_number = None
invoice_numbers.add(invoice_number)
returned_sale = ReturnedSale(
store=store,
return_date=sale.return_date,
invoice_number=invoice_number,
responsible=person.login_user,
reason=reason,
branch=sale.branch,
sale=sale,
)
for sale_item in sale.get_items():
ReturnedSaleItem(
store=store,
sale_item=sale_item,
returned_sale_id=returned_sale.id,
quantity=sale_item.quantity,
)
store.execute("DROP TABLE renegotiation_data;")
account = int(get_parameter(store, u'IMBALANCE_ACCOUNT'))
# Only do that if IMBALANCE_ACCOUNT is already registered. Else, the
# database is brand new and payment method will be created later.
if account:
# Register the new payment method, 'trade'
method = store.find(PaymentMethod, method_name=u'trade').one()
if not method:
PaymentMethod(store=store,
method_name=u'trade',
destination_account_id=account,
max_installments=12)
|
# lint-amnesty, pylint: disable=django-not-configured
"""Tests for course home page date summary blocks."""
from datetime import datetime, timedelta
import crum
import ddt
import waffle # lint-amnesty, pylint: disable=invalid-django-waffle-import
from django.contrib.messages.middleware import MessageMiddleware
from django.test import RequestFactory
from django.urls import reverse
from edx_toggles.toggles.testutils import override_waffle_flag
from freezegun import freeze_time
from pytz import utc
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from lms.djangoapps.commerce.models import CommerceConfiguration
from lms.djangoapps.course_home_api.toggles import COURSE_HOME_USE_LEGACY_FRONTEND
from lms.djangoapps.courseware.courses import get_course_date_blocks
from lms.djangoapps.courseware.date_summary import (
CertificateAvailableDate,
CourseAssignmentDate,
CourseEndDate,
CourseExpiredDate,
CourseStartDate,
TodaysDate,
VerificationDeadlineDate,
VerifiedUpgradeDeadlineDate
)
from lms.djangoapps.courseware.models import (
CourseDynamicUpgradeDeadlineConfiguration,
DynamicUpgradeDeadlineConfiguration,
OrgDynamicUpgradeDeadlineConfiguration
)
from lms.djangoapps.verify_student.models import VerificationDeadline
from lms.djangoapps.verify_student.services import IDVerificationService
from lms.djangoapps.verify_student.tests.factories import SoftwareSecurePhotoVerificationFactory
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.content.course_overviews.tests.factories import CourseOverviewFactory
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference
from openedx.features.course_duration_limits.models import CourseDurationLimitConfig
from openedx.features.course_experience import (
DISABLE_UNIFIED_COURSE_TAB_FLAG,
RELATIVE_DATES_FLAG,
UPGRADE_DEADLINE_MESSAGE,
CourseHomeMessages
)
from common.djangoapps.student.tests.factories import TEST_PASSWORD, CourseEnrollmentFactory, UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@ddt.ddt
class CourseDateSummaryTest(SharedModuleStoreTestCase):
"""Tests for course date summary blocks."""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
super().setUp()
SelfPacedConfiguration.objects.create(enable_course_home_improvements=True)
def make_request(self, user):
""" Creates a request """
request = RequestFactory().request()
request.user = user
self.addCleanup(crum.set_current_request, None)
crum.set_current_request(request)
return request
def test_course_info_feature_flag(self):
SelfPacedConfiguration(enable_course_home_improvements=False).save()
course = create_course_run()
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
self.client.login(username=user.username, password=TEST_PASSWORD)
url = reverse('info', args=(course.id,))
response = self.client.get(url)
self.assertNotContains(response, 'date-summary', status_code=302)
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
def test_course_home_logged_out(self):
course = create_course_run()
url = reverse('openedx.course_experience.course_home', args=(course.id,))
response = self.client.get(url)
assert 200 == response.status_code
# Tests for which blocks are enabled
def assert_block_types(self, course, user, expected_blocks):
"""Assert that the enabled block types for this course are as expected."""
blocks = get_course_date_blocks(course, user)
assert len(blocks) == len(expected_blocks)
assert {type(b) for b in blocks} == set(expected_blocks)
@ddt.data(
# Verified enrollment with no photo-verification before course start
({}, {}, (CourseEndDate, CourseStartDate, TodaysDate, VerificationDeadlineDate)),
# Verified enrollment with `approved` photo-verification after course end
({'days_till_start': -10,
'days_till_end': -5,
'days_till_upgrade_deadline': -6,
'days_till_verification_deadline': -5,
},
{'verification_status': 'approved'},
(TodaysDate, CourseEndDate)),
# Verified enrollment with `expired` photo-verification during course run
({'days_till_start': -10},
{'verification_status': 'expired'},
(TodaysDate, CourseEndDate, VerificationDeadlineDate)),
# Verified enrollment with `approved` photo-verification during course run
({'days_till_start': -10},
{'verification_status': 'approved'},
(TodaysDate, CourseEndDate)),
# Verified enrollment with *NO* course end date
({'days_till_end': None},
{},
(CourseStartDate, TodaysDate, VerificationDeadlineDate)),
# Verified enrollment with no photo-verification during course run
({'days_till_start': -1},
{},
(TodaysDate, CourseEndDate, VerificationDeadlineDate)),
# Verification approved
({'days_till_start': -10,
'days_till_upgrade_deadline': -1,
'days_till_verification_deadline': 1,
},
{'verification_status': 'approved'},
(TodaysDate, CourseEndDate)),
# After upgrade deadline
({'days_till_start': -10,
'days_till_upgrade_deadline': -1},
{},
(TodaysDate, CourseEndDate, VerificationDeadlineDate)),
# After verification deadline
({'days_till_start': -10,
'days_till_upgrade_deadline': -2,
'days_till_verification_deadline': -1},
{},
(TodaysDate, CourseEndDate, VerificationDeadlineDate)),
)
@ddt.unpack
def test_enabled_block_types(self, course_kwargs, user_kwargs, expected_blocks):
course = create_course_run(**course_kwargs)
user = create_user(**user_kwargs)
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
self.assert_block_types(course, user, expected_blocks)
@override_waffle_flag(RELATIVE_DATES_FLAG, active=True)
def test_enabled_block_types_with_assignments(self): # pylint: disable=too-many-statements
"""
Creates a course with multiple subsections to test all of the different
cases for assignment dates showing up. Mocks out calling the edx-when
service and then validates the correct data is set and returned.
"""
course = create_course_run(days_till_start=-100)
user = create_user()
request = self.make_request(user)
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
now = datetime.now(utc)
assignment_title_html = ['<a href=', '</a>']
with self.store.bulk_operations(course.id):
section = ItemFactory.create(category='chapter', parent_location=course.location)
ItemFactory.create(
category='sequential',
display_name='Released',
parent_location=section.location,
start=now - timedelta(days=1),
due=now + timedelta(days=6),
graded=True,
format='Homework',
)
ItemFactory.create(
category='sequential',
display_name='Not released',
parent_location=section.location,
start=now + timedelta(days=1),
due=now + timedelta(days=7),
graded=True,
format='Homework',
)
ItemFactory.create(
category='sequential',
display_name='Third nearest assignment',
parent_location=section.location,
start=now + timedelta(days=1),
due=now + timedelta(days=8),
graded=True,
format='Exam',
)
ItemFactory.create(
category='sequential',
display_name='Past due date',
parent_location=section.location,
start=now - timedelta(days=14),
due=now - timedelta(days=7),
graded=True,
format='Exam',
)
ItemFactory.create(
category='sequential',
display_name='Not returned since we do not get non-graded subsections',
parent_location=section.location,
start=now + timedelta(days=1),
due=now - timedelta(days=7),
graded=False,
)
ItemFactory.create(
category='sequential',
display_name='No start date',
parent_location=section.location,
start=None,
due=now + timedelta(days=9),
graded=True,
format='Speech',
)
ItemFactory.create(
category='sequential',
# Setting display name to None should set the assignment title to 'Assignment'
display_name=None,
parent_location=section.location,
start=now - timedelta(days=14),
due=now + timedelta(days=10),
graded=True,
format=None,
)
dummy_subsection = ItemFactory.create(category='sequential', graded=True, due=now + timedelta(days=11))
# We are deleting this subsection right after creating it because we need to pass in a real
# location object (dummy_subsection.location), but do not want this to exist inside of the modulestore
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
self.store.delete_item(dummy_subsection.location, user.id)
# Standard widget case where we restrict the number of assignments.
expected_blocks = (
TodaysDate, CourseAssignmentDate, CourseAssignmentDate, CourseEndDate, VerificationDeadlineDate
)
blocks = get_course_date_blocks(course, user, request, num_assignments=2)
assert len(blocks) == len(expected_blocks)
assert {type(b) for b in blocks} == set(expected_blocks)
assignment_blocks = filter( # pylint: disable=filter-builtin-not-iterating
lambda b: isinstance(b, CourseAssignmentDate), blocks
)
for assignment in assignment_blocks:
assignment_title = str(assignment.title_html) or str(assignment.title)
assert assignment_title != 'Third nearest assignment'
assert assignment_title != 'Past due date'
assert assignment_title != 'Not returned since we do not get non-graded subsections'
# checking if it is _in_ the title instead of being the title since released assignments
# are actually links. Unreleased assignments are just the string of the title.
if 'Released' in assignment_title:
for html_tag in assignment_title_html:
assert html_tag in assignment_title
elif assignment_title == 'Not released':
for html_tag in assignment_title_html:
assert html_tag not in assignment_title
# No restrictions on number of assignments to return
expected_blocks = (
CourseStartDate, TodaysDate, CourseAssignmentDate, CourseAssignmentDate, CourseAssignmentDate,
CourseAssignmentDate, CourseAssignmentDate, CourseAssignmentDate, CourseEndDate,
VerificationDeadlineDate
)
blocks = get_course_date_blocks(course, user, request, include_past_dates=True)
assert len(blocks) == len(expected_blocks)
assert {type(b) for b in blocks} == set(expected_blocks)
assignment_blocks = filter( # pylint: disable=filter-builtin-not-iterating
lambda b: isinstance(b, CourseAssignmentDate), blocks
)
for assignment in assignment_blocks:
assignment_title = str(assignment.title_html) or str(assignment.title)
assert assignment_title != 'Not returned since we do not get non-graded subsections'
assignment_type = str(assignment.assignment_type)
# checking if it is _in_ the title instead of being the title since released assignments
# are actually links. Unreleased assignments are just the string of the title.
# also checking that the assignment type is returned for graded subsections
if 'Released' in assignment_title:
assert assignment_type == 'Homework'
for html_tag in assignment_title_html:
assert html_tag in assignment_title
elif assignment_title == 'Not released':
assert assignment_type == 'Homework'
for html_tag in assignment_title_html:
assert html_tag not in assignment_title
elif assignment_title == 'Third nearest assignment':
assert assignment_type == 'Exam'
# It's still not released
for html_tag in assignment_title_html:
assert html_tag not in assignment_title
elif 'Past due date' in assignment_title:
assert now > assignment.date
assert assignment_type == 'Exam'
for html_tag in assignment_title_html:
assert html_tag in assignment_title
elif 'No start date' == assignment_title:
assert assignment_type == 'Speech'
# Can't determine if it is released so it does not get a link
for html_tag in assignment_title_html:
assert html_tag not in assignment_title
# This is the item with no display name where we set one ourselves.
elif 'Assignment' in assignment_title:
assert assignment_type is None
# Can't determine if it is released so it does not get a link
for html_tag in assignment_title_html:
assert html_tag in assignment_title
@override_waffle_flag(RELATIVE_DATES_FLAG, active=True)
@ddt.data(
([], 3),
([{
'due': None,
'start': None,
'name': 'student-training',
'examples': [
{
'answer': ['Replace this text with your own sample response...'],
'options_selected': [
{'option': 'Fair', 'criterion': 'Ideas'},
{'option': 'Good', 'criterion': 'Content'}
]
}, {
'answer': ['Replace this text with another sample response...'],
'options_selected': [
{'option': 'Poor', 'criterion': 'Ideas'},
{'option': 'Good', 'criterion': 'Content'}
]
}
]
}, {
'due': '2029-01-01T00:00:00+00:00',
'start': '2001-01-01T00:00:00+00:00',
'must_be_graded_by': 3,
'name': 'peer-assessment',
'must_grade': 5
}, {
'due': '2029-01-01T00:00:00+00:00',
'start': '2001-01-01T00:00:00+00:00',
'name': 'self-assessment'
}], 5)
)
@ddt.unpack
def test_dates_with_openassessments(self, rubric_assessments, date_block_count):
course = create_self_paced_course_run(days_till_start=-1, org_id='TestOrg')
user = create_user()
request = self.make_request(user)
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
now = datetime.now(utc)
chapter = ItemFactory.create(
parent=course,
category="chapter",
graded=True,
)
section = ItemFactory.create(
parent=chapter,
category="sequential",
)
vertical = ItemFactory.create(
parent=section,
category="vertical",
)
ItemFactory.create(
parent=vertical,
category="openassessment",
rubric_assessments=rubric_assessments,
submission_start=(now + timedelta(days=1)).isoformat(),
submission_end=(now + timedelta(days=7)).isoformat(),
)
blocks = get_course_date_blocks(course, user, request, include_past_dates=True)
assert len(blocks) == date_block_count
@override_waffle_flag(RELATIVE_DATES_FLAG, active=True)
def test_enabled_block_types_with_expired_course(self):
course = create_course_run(days_till_start=-100)
user = create_user()
self.make_request(user)
# These two lines are to trigger the course expired block to be rendered
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.AUDIT)
CourseDurationLimitConfig.objects.create(enabled=True, enabled_as_of=datetime(2018, 1, 1, tzinfo=utc))
expected_blocks = (
TodaysDate, CourseEndDate, CourseExpiredDate, VerifiedUpgradeDeadlineDate
)
self.assert_block_types(course, user, expected_blocks)
@ddt.data(
# Course not started
({}, (CourseStartDate, TodaysDate, CourseEndDate)),
# Course active
({'days_till_start': -1}, (TodaysDate, CourseEndDate)),
# Course ended
({'days_till_start': -10, 'days_till_end': -5},
(TodaysDate, CourseEndDate)),
)
@ddt.unpack
def test_enabled_block_types_without_enrollment(self, course_kwargs, expected_blocks):
course = create_course_run(**course_kwargs)
user = create_user()
self.assert_block_types(course, user, expected_blocks)
def test_enabled_block_types_with_non_upgradeable_course_run(self):
course = create_course_run(days_till_start=-10, days_till_verification_deadline=None)
user = create_user()
CourseMode.objects.get(course_id=course.id, mode_slug=CourseMode.VERIFIED).delete()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.AUDIT)
self.assert_block_types(course, user, (TodaysDate, CourseEndDate))
def test_todays_date_block(self):
"""
Helper function to test that today's date block renders correctly
and displays the correct time, accounting for daylight savings
"""
with freeze_time('2015-01-02'):
course = create_course_run()
user = create_user()
block = TodaysDate(course, user)
assert block.is_enabled
assert block.is_allowed
assert block.date == datetime.now(utc)
assert block.title == 'current_datetime'
@ddt.data(
'info',
'openedx.course_experience.course_home',
)
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
@override_waffle_flag(DISABLE_UNIFIED_COURSE_TAB_FLAG, active=False)
def test_todays_date_no_timezone(self, url_name):
with freeze_time('2015-01-02'):
course = create_course_run()
user = create_user()
self.client.login(username=user.username, password=TEST_PASSWORD)
html_elements = [
'<h3 class="hd hd-6 handouts-header">Upcoming Dates</h3>',
'<div class="date-summary',
'<p class="hd hd-6 date localized-datetime"',
'data-timezone="None"'
]
url = reverse(url_name, args=(course.id,))
response = self.client.get(url, follow=True)
for html in html_elements:
self.assertContains(response, html)
@ddt.data(
'info',
'openedx.course_experience.course_home',
)
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
@override_waffle_flag(DISABLE_UNIFIED_COURSE_TAB_FLAG, active=False)
def test_todays_date_timezone(self, url_name):
with freeze_time('2015-01-02'):
course = create_course_run()
user = create_user()
self.client.login(username=user.username, password=TEST_PASSWORD)
set_user_preference(user, 'time_zone', 'America/Los_Angeles')
url = reverse(url_name, args=(course.id,))
response = self.client.get(url, follow=True)
html_elements = [
'<h3 class="hd hd-6 handouts-header">Upcoming Dates</h3>',
'<div class="date-summary',
'<p class="hd hd-6 date localized-datetime"',
'data-timezone="America/Los_Angeles"'
]
for html in html_elements:
self.assertContains(response, html)
## Tests Course Start Date
def test_course_start_date(self):
course = create_course_run()
user = create_user()
block = CourseStartDate(course, user)
assert block.date == course.start
@ddt.data(
'info',
'openedx.course_experience.course_home',
)
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
@override_waffle_flag(DISABLE_UNIFIED_COURSE_TAB_FLAG, active=False)
def test_start_date_render(self, url_name):
with freeze_time('2015-01-02'):
course = create_course_run()
user = create_user()
self.client.login(username=user.username, password=TEST_PASSWORD)
url = reverse(url_name, args=(course.id,))
response = self.client.get(url, follow=True)
html_elements = [
'data-datetime="2015-01-03 00:00:00+00:00"'
]
for html in html_elements:
self.assertContains(response, html)
@ddt.data(
'info',
'openedx.course_experience.course_home',
)
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
@override_waffle_flag(DISABLE_UNIFIED_COURSE_TAB_FLAG, active=False)
def test_start_date_render_time_zone(self, url_name):
with freeze_time('2015-01-02'):
course = create_course_run()
user = create_user()
self.client.login(username=user.username, password=TEST_PASSWORD)
set_user_preference(user, 'time_zone', 'America/Los_Angeles')
url = reverse(url_name, args=(course.id,))
response = self.client.get(url, follow=True)
html_elements = [
'data-datetime="2015-01-03 00:00:00+00:00"',
'data-timezone="America/Los_Angeles"'
]
for html in html_elements:
self.assertContains(response, html)
## Tests Course End Date Block
def test_course_end_date_for_certificate_eligible_mode(self):
course = create_course_run(days_till_start=-1)
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = CourseEndDate(course, user)
assert block.description == ('After this date, the course will be archived, which means you can review the '
'course content but can no longer participate in graded assignments or work '
'towards earning a certificate.')
def test_course_end_date_for_non_certificate_eligible_mode(self):
course = create_course_run(days_till_start=-1)
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.AUDIT)
block = CourseEndDate(course, user)
assert block.description == 'After the course ends, the course content will be archived and no longer active.'
assert block.title == 'Course ends'
def test_course_end_date_after_course(self):
course = create_course_run(days_till_start=-2, days_till_end=-1)
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = CourseEndDate(course, user)
assert block.description ==\
'This course is archived, which means you can review course content but it is no longer active.'
assert block.title == 'Course ends'
@ddt.data(300, 400)
@override_waffle_flag(RELATIVE_DATES_FLAG, active=True)
def test_course_end_date_self_paced(self, days_till_end):
"""
In self-paced courses, the end date will only show up if the learner
views the course within 365 days of the course end date.
"""
now = datetime.now(utc)
course = CourseFactory.create(
start=now + timedelta(days=-7), end=now + timedelta(days=days_till_end), self_paced=True)
user = create_user()
block = CourseEndDate(course, user)
assert block.title == 'Course ends'
if 365 > days_till_end:
assert block.date == course.end
else:
assert block.date is None
assert block.description == ''
def test_ecommerce_checkout_redirect(self):
"""Verify the block link redirects to ecommerce checkout if it's enabled."""
sku = 'TESTSKU'
configuration = CommerceConfiguration.objects.create(checkout_on_ecommerce_service=True)
course = create_course_run()
user = create_user()
course_mode = CourseMode.objects.get(course_id=course.id, mode_slug=CourseMode.VERIFIED)
course_mode.sku = sku
course_mode.save()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = VerifiedUpgradeDeadlineDate(course, user)
assert block.link == f'{configuration.basket_checkout_page}?sku={sku}'
## CertificateAvailableDate
@waffle.testutils.override_switch('certificates.auto_certificate_generation', True)
def test_no_certificate_available_date(self):
course = create_course_run(days_till_start=-1)
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.AUDIT)
block = CertificateAvailableDate(course, user)
assert block.date is None
assert not block.is_allowed
## CertificateAvailableDate
@waffle.testutils.override_switch('certificates.auto_certificate_generation', True)
def test_no_certificate_available_date_for_self_paced(self):
course = create_self_paced_course_run()
verified_user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=verified_user, mode=CourseMode.VERIFIED)
course.certificate_available_date = datetime.now(utc) + timedelta(days=7)
course.save()
block = CertificateAvailableDate(course, verified_user)
assert block.date is not None
assert not block.is_allowed
def test_no_certificate_available_date_for_audit_course(self):
"""
Tests that Certificate Available Date is not visible in the course "Important Course Dates" section
if the course only has audit mode.
"""
course = create_course_run()
audit_user = create_user()
# Enroll learner in the audit mode and verify the course only has 1 mode (audit)
CourseEnrollmentFactory(course_id=course.id, user=audit_user, mode=CourseMode.AUDIT)
CourseMode.objects.get(course_id=course.id, mode_slug=CourseMode.VERIFIED).delete()
all_course_modes = CourseMode.modes_for_course(course.id)
assert len(all_course_modes) == 1
assert all_course_modes[0].slug == CourseMode.AUDIT
course.certificate_available_date = datetime.now(utc) + timedelta(days=7)
course.save()
# Verify Certificate Available Date is not enabled for learner.
block = CertificateAvailableDate(course, audit_user)
assert not block.is_allowed
assert block.date is not None
@waffle.testutils.override_switch('certificates.auto_certificate_generation', True)
def test_certificate_available_date_defined(self):
course = create_course_run()
audit_user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=audit_user, mode=CourseMode.AUDIT)
verified_user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=verified_user, mode=CourseMode.VERIFIED)
course.certificate_available_date = datetime.now(utc) + timedelta(days=7)
enable_course_certificates(course)
expected_blocks = [
CourseEndDate, CourseStartDate, TodaysDate, VerificationDeadlineDate, CertificateAvailableDate
]
self.assert_block_types(course, verified_user, expected_blocks)
for block in (CertificateAvailableDate(course, audit_user), CertificateAvailableDate(course, verified_user)):
assert course.certificate_available_date is not None
assert block.date == course.certificate_available_date
assert block.is_allowed
## VerificationDeadlineDate
def test_no_verification_deadline(self):
course = create_course_run(days_till_start=-1, days_till_verification_deadline=None)
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = VerificationDeadlineDate(course, user)
assert block.date is None
assert block.is_allowed
def test_no_verified_enrollment(self):
course = create_course_run(days_till_start=-1)
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.AUDIT)
block = VerificationDeadlineDate(course, user)
assert not block.is_allowed
def test_verification_deadline_date_upcoming(self):
with freeze_time('2015-01-02'):
course = create_course_run(days_till_start=-1)
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = VerificationDeadlineDate(course, user)
assert block.css_class == 'verification-deadline-upcoming'
assert block.title == 'Verification Deadline'
assert block.date == (datetime.now(utc) + timedelta(days=14))
assert block.description ==\
'You must successfully complete verification before this date to qualify for a Verified Certificate.'
assert block.link_text == 'Verify My Identity'
assert block.link == IDVerificationService.get_verify_location(course.id)
def test_verification_deadline_date_retry(self):
with freeze_time('2015-01-02'):
course = create_course_run(days_till_start=-1)
user = create_user(verification_status='denied')
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = VerificationDeadlineDate(course, user)
assert block.css_class == 'verification-deadline-retry'
assert block.title == 'Verification Deadline'
assert block.date == (datetime.now(utc) + timedelta(days=14))
assert block.description ==\
'You must successfully complete verification before this date to qualify for a Verified Certificate.'
assert block.link_text == 'Retry Verification'
assert block.link == IDVerificationService.get_verify_location()
def test_verification_deadline_date_denied(self):
with freeze_time('2015-01-02'):
course = create_course_run(days_till_start=-10, days_till_verification_deadline=-1)
user = create_user(verification_status='denied')
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = VerificationDeadlineDate(course, user)
assert block.css_class == 'verification-deadline-passed'
assert block.title == 'Missed Verification Deadline'
assert block.date == (datetime.now(utc) + timedelta(days=(- 1)))
assert block.description == "Unfortunately you missed this course's deadline for a successful verification."
assert block.link_text == 'Learn More'
assert block.link == ''
@ddt.data(
(-1, '1 day ago - {date}'),
(1, 'in 1 day - {date}')
)
@ddt.unpack
def test_render_date_string_past(self, delta, expected_date_string):
with freeze_time('2015-01-02'):
course = create_course_run(days_till_start=-10, days_till_verification_deadline=delta)
user = create_user(verification_status='denied')
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = VerificationDeadlineDate(course, user)
assert block.relative_datestring == expected_date_string
@ddt.data(
('info', True),
('info', False),
('openedx.course_experience.course_home', True),
('openedx.course_experience.course_home', False),
)
@ddt.unpack
@override_waffle_flag(DISABLE_UNIFIED_COURSE_TAB_FLAG, active=False)
@override_waffle_flag(RELATIVE_DATES_FLAG, active=True)
def test_dates_tab_link_render(self, url_name, legacy_active):
""" The dates tab link should only show for enrolled or staff users """
course = create_course_run()
html_elements = [
'class="dates-tab-link"',
'View all course dates</a>',
]
# The url should change based on the mfe being active.
if legacy_active:
html_elements.append('/courses/' + str(course.id) + '/dates')
else:
html_elements.append('/course/' + str(course.id) + '/dates')
url = reverse(url_name, args=(course.id,))
def assert_html_elements(assert_function, user):
self.client.login(username=user.username, password=TEST_PASSWORD)
with override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=legacy_active):
response = self.client.get(url, follow=True)
if legacy_active or user.is_staff:
for html in html_elements:
assert_function(response, html)
else:
assert 404 == response.status_code
self.client.logout()
with freeze_time('2015-01-02'):
unenrolled_user = create_user()
assert_html_elements(self.assertNotContains, unenrolled_user)
staff_user = create_user()
staff_user.is_staff = True
staff_user.save()
assert_html_elements(self.assertContains, staff_user)
enrolled_user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=enrolled_user, mode=CourseMode.VERIFIED)
assert_html_elements(self.assertContains, enrolled_user)
@ddt.ddt
class TestDateAlerts(SharedModuleStoreTestCase):
"""
Unit tests for date alerts.
"""
def setUp(self):
super().setUp()
with freeze_time('2017-07-01 09:00:00'):
self.course = create_course_run(days_till_start=0)
self.course.certificate_available_date = self.course.start + timedelta(days=21)
enable_course_certificates(self.course)
self.enrollment = CourseEnrollmentFactory(course_id=self.course.id, mode=CourseMode.AUDIT)
self.request = RequestFactory().request()
self.request.session = {}
self.request.user = self.enrollment.user
MessageMiddleware().process_request(self.request)
@ddt.data(
['2017-01-01 09:00:00', 'in 6 months on <span class="date localized-datetime" data-format="shortDate"'],
['2017-06-17 09:00:00', 'in 2 weeks on <span class="date localized-datetime" data-format="shortDate"'],
['2017-06-30 10:00:00', 'in 1 day at <span class="date localized-datetime" data-format="shortTime"'],
['2017-07-01 08:00:00', 'in 1 hour at <span class="date localized-datetime" data-format="shortTime"'],
['2017-07-01 08:55:00', 'in 5 minutes at <span class="date localized-datetime" data-format="shortTime"'],
['2017-07-01 09:00:00', None],
['2017-08-01 09:00:00', None],
)
@ddt.unpack
def test_start_date_alert(self, current_time, expected_message_html):
"""
Verify that course start date alerts are registered.
"""
with freeze_time(current_time):
block = CourseStartDate(self.course, self.request.user)
block.register_alerts(self.request, self.course)
messages = list(CourseHomeMessages.user_messages(self.request))
if expected_message_html:
assert len(messages) == 1
assert expected_message_html in messages[0].message_html
else:
assert len(messages) == 0
@ddt.data(
['2017-06-30 09:00:00', None],
['2017-07-01 09:00:00', 'in 2 weeks on <span class="date localized-datetime" data-format="shortDate"'],
['2017-07-14 10:00:00', 'in 1 day at <span class="date localized-datetime" data-format="shortTime"'],
['2017-07-15 08:00:00', 'in 1 hour at <span class="date localized-datetime" data-format="shortTime"'],
['2017-07-15 08:55:00', 'in 5 minutes at <span class="date localized-datetime" data-format="shortTime"'],
['2017-07-15 09:00:00', None],
['2017-08-15 09:00:00', None],
)
@ddt.unpack
def test_end_date_alert(self, current_time, expected_message_html):
"""
Verify that course end date alerts are registered.
"""
with freeze_time(current_time):
block = CourseEndDate(self.course, self.request.user)
block.register_alerts(self.request, self.course)
messages = list(CourseHomeMessages.user_messages(self.request))
if expected_message_html:
assert len(messages) == 1
assert expected_message_html in messages[0].message_html
else:
assert len(messages) == 0
@ddt.data(
['2017-06-20 09:00:00', None],
['2017-06-21 09:00:00', 'Don't forget, you have 2 weeks left to upgrade to a Verified Certificate.'],
['2017-07-04 10:00:00', 'Don't forget, you have 1 day left to upgrade to a Verified Certificate.'],
['2017-07-05 08:00:00', 'Don't forget, you have 1 hour left to upgrade to a Verified Certificate.'],
['2017-07-05 08:55:00', 'Don't forget, you have 5 minutes left to upgrade to a Verified Certificate.'],
['2017-07-05 09:00:00', None],
['2017-08-05 09:00:00', None],
)
@ddt.unpack
@override_waffle_flag(UPGRADE_DEADLINE_MESSAGE, active=True)
def test_verified_upgrade_deadline_alert(self, current_time, expected_message_html):
"""
Verify the verified upgrade deadline alerts.
"""
with freeze_time(current_time):
block = VerifiedUpgradeDeadlineDate(self.course, self.request.user)
block.register_alerts(self.request, self.course)
messages = list(CourseHomeMessages.user_messages(self.request))
if expected_message_html:
assert len(messages) == 1
assert expected_message_html in messages[0].message_html
else:
assert len(messages) == 0
@ddt.data(
['2017-07-15 08:00:00', None],
['2017-07-15 09:00:00', 'If you have earned a certificate, you will be able to access it 1 week from now.'],
['2017-07-21 09:00:00', 'If you have earned a certificate, you will be able to access it 1 day from now.'],
['2017-07-22 08:00:00', 'If you have earned a certificate, you will be able to access it 1 hour from now.'],
['2017-07-22 09:00:00', None],
['2017-07-23 09:00:00', None],
)
@ddt.unpack
@waffle.testutils.override_switch('certificates.auto_certificate_generation', True)
def test_certificate_availability_alert(self, current_time, expected_message_html):
"""
Verify the verified upgrade deadline alerts.
"""
with freeze_time(current_time):
block = CertificateAvailableDate(self.course, self.request.user)
block.register_alerts(self.request, self.course)
messages = list(CourseHomeMessages.user_messages(self.request))
if expected_message_html:
assert len(messages) == 1
assert expected_message_html in messages[0].message_html
else:
assert len(messages) == 0
@ddt.ddt
class TestScheduleOverrides(SharedModuleStoreTestCase):
""" Tests for Schedule Overrides """
def test_date_with_self_paced_with_enrollment_before_course_start(self):
""" Enrolling before a course begins should result in the upgrade deadline being set relative to the
course start date. """
global_config = DynamicUpgradeDeadlineConfiguration.objects.create(enabled=True)
course = create_self_paced_course_run(days_till_start=3)
overview = CourseOverview.get_from_id(course.id)
expected = overview.start + timedelta(days=global_config.deadline_days)
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT)
block = VerifiedUpgradeDeadlineDate(course, enrollment.user)
assert block.date == expected
self._check_text(block)
def _check_text(self, upgrade_date_summary):
""" Validates the text on an upgrade_date_summary """
assert upgrade_date_summary.title == 'Upgrade to Verified Certificate'
assert upgrade_date_summary.description ==\
"Don't miss the opportunity to highlight your new knowledge and skills by earning a verified" \
" certificate."
assert upgrade_date_summary.relative_datestring == 'by {date}'
def test_date_with_self_paced_with_enrollment_after_course_start(self):
""" Enrolling after a course begins should result in the upgrade deadline being set relative to the
enrollment date.
Additionally, OrgDynamicUpgradeDeadlineConfiguration should override the number of days until the deadline,
and CourseDynamicUpgradeDeadlineConfiguration should override the org-level override.
"""
global_config = DynamicUpgradeDeadlineConfiguration.objects.create(enabled=True)
course = create_self_paced_course_run(days_till_start=-1, org_id='TestOrg')
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT)
block = VerifiedUpgradeDeadlineDate(course, enrollment.user)
expected = enrollment.created + timedelta(days=global_config.deadline_days)
assert block.date == expected
# Orgs should be able to override the deadline
org_config = OrgDynamicUpgradeDeadlineConfiguration.objects.create(
enabled=True, org_id=course.org, deadline_days=4
)
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT)
block = VerifiedUpgradeDeadlineDate(course, enrollment.user)
expected = enrollment.created + timedelta(days=org_config.deadline_days)
assert block.date == expected
# Courses should be able to override the deadline (and the org-level override)
course_config = CourseDynamicUpgradeDeadlineConfiguration.objects.create(
enabled=True, course_id=course.id, deadline_days=3
)
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT)
block = VerifiedUpgradeDeadlineDate(course, enrollment.user)
expected = enrollment.created + timedelta(days=course_config.deadline_days)
assert block.date == expected
def test_date_with_self_paced_without_dynamic_upgrade_deadline(self):
""" Disabling the dynamic upgrade deadline functionality should result in the verified mode's
expiration date being returned. """
DynamicUpgradeDeadlineConfiguration.objects.create(enabled=False)
course = create_self_paced_course_run()
expected = CourseMode.objects.get(course_id=course.id, mode_slug=CourseMode.VERIFIED).expiration_datetime
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT)
block = VerifiedUpgradeDeadlineDate(course, enrollment.user)
assert block.date == expected
def test_date_with_existing_schedule(self):
""" If a schedule is created while deadlines are disabled, they shouldn't magically appear once the feature is
turned on. """
course = create_self_paced_course_run(days_till_start=-1)
DynamicUpgradeDeadlineConfiguration.objects.create(enabled=False)
course_config = CourseDynamicUpgradeDeadlineConfiguration.objects.create(enabled=False, course_id=course.id)
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT)
# The enrollment has a schedule, but the upgrade deadline should be None
assert enrollment.schedule.upgrade_deadline is None
block = VerifiedUpgradeDeadlineDate(course, enrollment.user)
expected = CourseMode.objects.get(course_id=course.id, mode_slug=CourseMode.VERIFIED).expiration_datetime
assert block.date == expected
# Now if we turn on the feature for this course, this existing enrollment should be unaffected
course_config.enabled = True
course_config.save()
block = VerifiedUpgradeDeadlineDate(course, enrollment.user)
assert block.date == expected
@ddt.data(
# (enroll before configs, org enabled, org opt-out, course enabled, course opt-out, expected dynamic deadline)
(False, False, False, False, False, True),
(False, False, False, False, True, True),
(False, False, False, True, False, True),
(False, False, False, True, True, False),
(False, False, True, False, False, True),
(False, False, True, False, True, True),
(False, False, True, True, False, True),
(False, False, True, True, True, False),
(False, True, False, False, False, True),
(False, True, False, False, True, True),
(False, True, False, True, False, True),
(False, True, False, True, True, False), # course-level overrides org-level
(False, True, True, False, False, False),
(False, True, True, False, True, False),
(False, True, True, True, False, True), # course-level overrides org-level
(False, True, True, True, True, False),
(True, False, False, False, False, True),
(True, False, False, False, True, True),
(True, False, False, True, False, True),
(True, False, False, True, True, False),
(True, False, True, False, False, True),
(True, False, True, False, True, True),
(True, False, True, True, False, True),
(True, False, True, True, True, False),
(True, True, False, False, False, True),
(True, True, False, False, True, True),
(True, True, False, True, False, True),
(True, True, False, True, True, False), # course-level overrides org-level
(True, True, True, False, False, False),
(True, True, True, False, True, False),
(True, True, True, True, False, True), # course-level overrides org-level
(True, True, True, True, True, False),
)
@ddt.unpack
def test_date_with_org_and_course_config_overrides(self, enroll_first, org_config_enabled, org_config_opt_out,
course_config_enabled, course_config_opt_out,
expected_dynamic_deadline):
""" Runs through every combination of org-level plus course-level DynamicUpgradeDeadlineConfiguration enabled
and opt-out states to verify that course-level overrides the org-level config. """
course = create_self_paced_course_run(days_till_start=-1, org_id='TestOrg')
DynamicUpgradeDeadlineConfiguration.objects.create(enabled=True)
if enroll_first:
course_overview = CourseOverviewFactory.create(self_paced=True)
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT, course=course_overview)
OrgDynamicUpgradeDeadlineConfiguration.objects.create(
enabled=org_config_enabled, opt_out=org_config_opt_out, org_id=course.id.org
)
CourseDynamicUpgradeDeadlineConfiguration.objects.create(
enabled=course_config_enabled, opt_out=course_config_opt_out, course_id=course.id
)
if not enroll_first:
course_overview = CourseOverviewFactory.create(self_paced=True)
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT, course=course_overview)
# The enrollment has a schedule, and the upgrade_deadline is set when expected_dynamic_deadline is True
if not enroll_first:
assert (enrollment.schedule.upgrade_deadline is not None) == expected_dynamic_deadline
# The CourseEnrollment.upgrade_deadline property method is checking the configs
assert (enrollment.dynamic_upgrade_deadline is not None) == expected_dynamic_deadline
def create_user(verification_status=None):
""" Create a new User instance.
Arguments:
verification_status (str): User's verification status. If this value is set an instance of
SoftwareSecurePhotoVerification will be created for the user with the specified status.
"""
user = UserFactory()
if verification_status is not None:
SoftwareSecurePhotoVerificationFactory.create(user=user, status=verification_status)
return user
def create_course_run(
days_till_start=1, days_till_end=14, days_till_upgrade_deadline=4, days_till_verification_deadline=14,
):
""" Create a new course run and course modes.
All date-related arguments are relative to the current date-time (now) unless otherwise specified.
Both audit and verified `CourseMode` objects will be created for the course run.
Arguments:
days_till_end (int): Number of days until the course ends.
days_till_start (int): Number of days until the course starts.
days_till_upgrade_deadline (int): Number of days until the course run's upgrade deadline.
days_till_verification_deadline (int): Number of days until the course run's verification deadline. If this
value is set to `None` no deadline will be verification deadline will be created.
"""
now = datetime.now(utc)
course = CourseFactory.create(start=now + timedelta(days=days_till_start))
course.end = None
if days_till_end is not None:
course.end = now + timedelta(days=days_till_end)
CourseModeFactory(course_id=course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory(
course_id=course.id,
mode_slug=CourseMode.VERIFIED,
expiration_datetime=now + timedelta(days=days_till_upgrade_deadline)
)
if days_till_verification_deadline is not None:
VerificationDeadline.objects.create(
course_key=course.id,
deadline=now + timedelta(days=days_till_verification_deadline)
)
return course
def create_self_paced_course_run(days_till_start=1, org_id=None):
""" Create a new course run and course modes.
All date-related arguments are relative to the current date-time (now) unless otherwise specified.
Both audit and verified `CourseMode` objects will be created for the course run.
Arguments:
days_till_start (int): Number of days until the course starts.
org_id (string): String org id to assign the course to (default: None; use CourseFactory default)
"""
now = datetime.now(utc)
course = CourseFactory.create(start=now + timedelta(days=days_till_start), self_paced=True,
org=org_id if org_id else 'TestedX')
CourseModeFactory(
course_id=course.id,
mode_slug=CourseMode.AUDIT
)
CourseModeFactory(
course_id=course.id,
mode_slug=CourseMode.VERIFIED,
expiration_datetime=now + timedelta(days=100)
)
return course
def enable_course_certificates(course):
"""
Enable course certificate configuration.
"""
course.certificates = {
'certificates': [{
'course_title': 'Test',
'name': '',
'is_active': True,
}]
}
course.save()
|
# encoding: utf-8
"""
A lightweight Traits like module.
This is designed to provide a lightweight, simple, pure Python version of
many of the capabilities of enthought.traits. This includes:
* Validation
* Type specification with defaults
* Static and dynamic notification
* Basic predefined types
* An API that is similar to enthought.traits
We don't support:
* Delegation
* Automatic GUI generation
* A full set of trait types. Most importantly, we don't provide container
traits (list, dict, tuple) that can trigger notifications if their
contents change.
* API compatibility with enthought.traits
There are also some important difference in our design:
* enthought.traits does not validate default values. We do.
We choose to create this module because we need these capabilities, but
we need them to be pure Python so they work in all Python implementations,
including Jython and IronPython.
Inheritance diagram:
.. inheritance-diagram:: IPython.utils.traitlets
:parts: 3
Authors:
* Brian Granger
* Enthought, Inc. Some of the code in this file comes from enthought.traits
and is licensed under the BSD license. Also, many of the ideas also come
from enthought.traits even though our implementation is very different.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import inspect
import re
import sys
import types
from types import FunctionType
try:
from types import ClassType, InstanceType
ClassTypes = (ClassType, type)
except:
ClassTypes = (type,)
from .importstring import import_item
from . import py3compat
from .py3compat import iteritems
SequenceTypes = (list, tuple, set, frozenset)
#-----------------------------------------------------------------------------
# Basic classes
#-----------------------------------------------------------------------------
class NoDefaultSpecified ( object ): pass
NoDefaultSpecified = NoDefaultSpecified()
class Undefined ( object ): pass
Undefined = Undefined()
class TraitError(Exception):
pass
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def class_of ( object ):
""" Returns a string containing the class name of an object with the
correct indefinite article ('a' or 'an') preceding it (e.g., 'an Image',
'a PlotValue').
"""
if isinstance( object, py3compat.string_types ):
return add_article( object )
return add_article( object.__class__.__name__ )
def add_article ( name ):
""" Returns a string containing the correct indefinite article ('a' or 'an')
prefixed to the specified string.
"""
if name[:1].lower() in 'aeiou':
return 'an ' + name
return 'a ' + name
def repr_type(obj):
""" Return a string representation of a value and its type for readable
error messages.
"""
the_type = type(obj)
if (not py3compat.PY3) and the_type is InstanceType:
# Old-style class.
the_type = obj.__class__
msg = '%r %r' % (obj, the_type)
return msg
def is_trait(t):
""" Returns whether the given value is an instance or subclass of TraitType.
"""
return (isinstance(t, TraitType) or
(isinstance(t, type) and issubclass(t, TraitType)))
def parse_notifier_name(name):
"""Convert the name argument to a list of names.
Examples
--------
>>> parse_notifier_name('a')
['a']
>>> parse_notifier_name(['a','b'])
['a', 'b']
>>> parse_notifier_name(None)
['anytrait']
"""
if isinstance(name, str):
return [name]
elif name is None:
return ['anytrait']
elif isinstance(name, (list, tuple)):
for n in name:
assert isinstance(n, str), "names must be strings"
return name
class _SimpleTest:
def __init__ ( self, value ): self.value = value
def __call__ ( self, test ):
return test == self.value
def __repr__(self):
return "<SimpleTest(%r)" % self.value
def __str__(self):
return self.__repr__()
def getmembers(object, predicate=None):
"""A safe version of inspect.getmembers that handles missing attributes.
This is useful when there are descriptor based attributes that for
some reason raise AttributeError even though they exist. This happens
in zope.inteface with the __provides__ attribute.
"""
results = []
for key in dir(object):
try:
value = getattr(object, key)
except AttributeError:
pass
else:
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
#-----------------------------------------------------------------------------
# Base TraitType for all traits
#-----------------------------------------------------------------------------
class TraitType(object):
"""A base class for all trait descriptors.
Notes
-----
Our implementation of traits is based on Python's descriptor
prototol. This class is the base class for all such descriptors. The
only magic we use is a custom metaclass for the main :class:`HasTraits`
class that does the following:
1. Sets the :attr:`name` attribute of every :class:`TraitType`
instance in the class dict to the name of the attribute.
2. Sets the :attr:`this_class` attribute of every :class:`TraitType`
instance in the class dict to the *class* that declared the trait.
This is used by the :class:`This` trait to allow subclasses to
accept superclasses for :class:`This` values.
"""
metadata = {}
default_value = Undefined
info_text = 'any value'
def __init__(self, default_value=NoDefaultSpecified, **metadata):
"""Create a TraitType.
"""
if default_value is not NoDefaultSpecified:
self.default_value = default_value
if len(metadata) > 0:
if len(self.metadata) > 0:
self._metadata = self.metadata.copy()
self._metadata.update(metadata)
else:
self._metadata = metadata
else:
self._metadata = self.metadata
self.init()
def init(self):
pass
def get_default_value(self):
"""Create a new instance of the default value."""
return self.default_value
def instance_init(self, obj):
"""This is called by :meth:`HasTraits.__new__` to finish init'ing.
Some stages of initialization must be delayed until the parent
:class:`HasTraits` instance has been created. This method is
called in :meth:`HasTraits.__new__` after the instance has been
created.
This method trigger the creation and validation of default values
and also things like the resolution of str given class names in
:class:`Type` and :class`Instance`.
Parameters
----------
obj : :class:`HasTraits` instance
The parent :class:`HasTraits` instance that has just been
created.
"""
self.set_default_value(obj)
def set_default_value(self, obj):
"""Set the default value on a per instance basis.
This method is called by :meth:`instance_init` to create and
validate the default value. The creation and validation of
default values must be delayed until the parent :class:`HasTraits`
class has been instantiated.
"""
# Check for a deferred initializer defined in the same class as the
# trait declaration or above.
mro = type(obj).mro()
meth_name = '_%s_default' % self.name
for cls in mro[:mro.index(self.this_class)+1]:
if meth_name in cls.__dict__:
break
else:
# We didn't find one. Do static initialization.
dv = self.get_default_value()
newdv = self._validate(obj, dv)
obj._trait_values[self.name] = newdv
return
# Complete the dynamic initialization.
obj._trait_dyn_inits[self.name] = cls.__dict__[meth_name]
def __get__(self, obj, cls=None):
"""Get the value of the trait by self.name for the instance.
Default values are instantiated when :meth:`HasTraits.__new__`
is called. Thus by the time this method gets called either the
default value or a user defined value (they called :meth:`__set__`)
is in the :class:`HasTraits` instance.
"""
if obj is None:
return self
else:
try:
value = obj._trait_values[self.name]
except KeyError:
# Check for a dynamic initializer.
if self.name in obj._trait_dyn_inits:
value = obj._trait_dyn_inits[self.name](obj)
# FIXME: Do we really validate here?
value = self._validate(obj, value)
obj._trait_values[self.name] = value
return value
else:
raise TraitError('Unexpected error in TraitType: '
'both default value and dynamic initializer are '
'absent.')
except Exception:
# HasTraits should call set_default_value to populate
# this. So this should never be reached.
raise TraitError('Unexpected error in TraitType: '
'default value not set properly')
else:
return value
def __set__(self, obj, value):
new_value = self._validate(obj, value)
old_value = self.__get__(obj)
obj._trait_values[self.name] = new_value
if old_value != new_value:
obj._notify_trait(self.name, old_value, new_value)
def _validate(self, obj, value):
if hasattr(self, 'validate'):
return self.validate(obj, value)
elif hasattr(self, 'is_valid_for'):
valid = self.is_valid_for(value)
if valid:
return value
else:
raise TraitError('invalid value for type: %r' % value)
elif hasattr(self, 'value_for'):
return self.value_for(value)
else:
return value
def info(self):
return self.info_text
def error(self, obj, value):
if obj is not None:
e = "The '%s' trait of %s instance must be %s, but a value of %s was specified." \
% (self.name, class_of(obj),
self.info(), repr_type(value))
else:
e = "The '%s' trait must be %s, but a value of %r was specified." \
% (self.name, self.info(), repr_type(value))
raise TraitError(e)
def get_metadata(self, key):
return getattr(self, '_metadata', {}).get(key, None)
def set_metadata(self, key, value):
getattr(self, '_metadata', {})[key] = value
#-----------------------------------------------------------------------------
# The HasTraits implementation
#-----------------------------------------------------------------------------
class MetaHasTraits(type):
"""A metaclass for HasTraits.
This metaclass makes sure that any TraitType class attributes are
instantiated and sets their name attribute.
"""
def __new__(mcls, name, bases, classdict):
"""Create the HasTraits class.
This instantiates all TraitTypes in the class dict and sets their
:attr:`name` attribute.
"""
# print "MetaHasTraitlets (mcls, name): ", mcls, name
# print "MetaHasTraitlets (bases): ", bases
# print "MetaHasTraitlets (classdict): ", classdict
for k,v in iteritems(classdict):
if isinstance(v, TraitType):
v.name = k
elif inspect.isclass(v):
if issubclass(v, TraitType):
vinst = v()
vinst.name = k
classdict[k] = vinst
return super(MetaHasTraits, mcls).__new__(mcls, name, bases, classdict)
def __init__(cls, name, bases, classdict):
"""Finish initializing the HasTraits class.
This sets the :attr:`this_class` attribute of each TraitType in the
class dict to the newly created class ``cls``.
"""
for k, v in iteritems(classdict):
if isinstance(v, TraitType):
v.this_class = cls
super(MetaHasTraits, cls).__init__(name, bases, classdict)
class HasTraits(py3compat.with_metaclass(MetaHasTraits, object)):
def __new__(cls, *args, **kw):
# This is needed because in Python 2.6 object.__new__ only accepts
# the cls argument.
new_meth = super(HasTraits, cls).__new__
if new_meth is object.__new__:
inst = new_meth(cls)
else:
inst = new_meth(cls, **kw)
inst._trait_values = {}
inst._trait_notifiers = {}
inst._trait_dyn_inits = {}
# Here we tell all the TraitType instances to set their default
# values on the instance.
for key in dir(cls):
# Some descriptors raise AttributeError like zope.interface's
# __provides__ attributes even though they exist. This causes
# AttributeErrors even though they are listed in dir(cls).
try:
value = getattr(cls, key)
except AttributeError:
pass
else:
if isinstance(value, TraitType):
value.instance_init(inst)
return inst
def __init__(self, *args, **kw):
# Allow trait values to be set using keyword arguments.
# We need to use setattr for this to trigger validation and
# notifications.
for key, value in iteritems(kw):
setattr(self, key, value)
def _notify_trait(self, name, old_value, new_value):
# First dynamic ones
callables = []
callables.extend(self._trait_notifiers.get(name,[]))
callables.extend(self._trait_notifiers.get('anytrait',[]))
# Now static ones
try:
cb = getattr(self, '_%s_changed' % name)
except:
pass
else:
callables.append(cb)
# Call them all now
for c in callables:
# Traits catches and logs errors here. I allow them to raise
if callable(c):
argspec = inspect.getargspec(c)
nargs = len(argspec[0])
# Bound methods have an additional 'self' argument
# I don't know how to treat unbound methods, but they
# can't really be used for callbacks.
if isinstance(c, types.MethodType):
offset = -1
else:
offset = 0
if nargs + offset == 0:
c()
elif nargs + offset == 1:
c(name)
elif nargs + offset == 2:
c(name, new_value)
elif nargs + offset == 3:
c(name, old_value, new_value)
else:
raise TraitError('a trait changed callback '
'must have 0-3 arguments.')
else:
raise TraitError('a trait changed callback '
'must be callable.')
def _add_notifiers(self, handler, name):
if name not in self._trait_notifiers:
nlist = []
self._trait_notifiers[name] = nlist
else:
nlist = self._trait_notifiers[name]
if handler not in nlist:
nlist.append(handler)
def _remove_notifiers(self, handler, name):
if name in self._trait_notifiers:
nlist = self._trait_notifiers[name]
try:
index = nlist.index(handler)
except ValueError:
pass
else:
del nlist[index]
def on_trait_change(self, handler, name=None, remove=False):
"""Setup a handler to be called when a trait changes.
This is used to setup dynamic notifications of trait changes.
Static handlers can be created by creating methods on a HasTraits
subclass with the naming convention '_[traitname]_changed'. Thus,
to create static handler for the trait 'a', create the method
_a_changed(self, name, old, new) (fewer arguments can be used, see
below).
Parameters
----------
handler : callable
A callable that is called when a trait changes. Its
signature can be handler(), handler(name), handler(name, new)
or handler(name, old, new).
name : list, str, None
If None, the handler will apply to all traits. If a list
of str, handler will apply to all names in the list. If a
str, the handler will apply just to that name.
remove : bool
If False (the default), then install the handler. If True
then unintall it.
"""
if remove:
names = parse_notifier_name(name)
for n in names:
self._remove_notifiers(handler, n)
else:
names = parse_notifier_name(name)
for n in names:
self._add_notifiers(handler, n)
@classmethod
def class_trait_names(cls, **metadata):
"""Get a list of all the names of this classes traits.
This method is just like the :meth:`trait_names` method, but is unbound.
"""
return cls.class_traits(**metadata).keys()
@classmethod
def class_traits(cls, **metadata):
"""Get a list of all the traits of this class.
This method is just like the :meth:`traits` method, but is unbound.
The TraitTypes returned don't know anything about the values
that the various HasTrait's instances are holding.
This follows the same algorithm as traits does and does not allow
for any simple way of specifying merely that a metadata name
exists, but has any value. This is because get_metadata returns
None if a metadata key doesn't exist.
"""
traits = dict([memb for memb in getmembers(cls) if \
isinstance(memb[1], TraitType)])
if len(metadata) == 0:
return traits
for meta_name, meta_eval in metadata.items():
if type(meta_eval) is not FunctionType:
metadata[meta_name] = _SimpleTest(meta_eval)
result = {}
for name, trait in traits.items():
for meta_name, meta_eval in metadata.items():
if not meta_eval(trait.get_metadata(meta_name)):
break
else:
result[name] = trait
return result
def trait_names(self, **metadata):
"""Get a list of all the names of this classes traits."""
return self.traits(**metadata).keys()
def traits(self, **metadata):
"""Get a list of all the traits of this class.
The TraitTypes returned don't know anything about the values
that the various HasTrait's instances are holding.
This follows the same algorithm as traits does and does not allow
for any simple way of specifying merely that a metadata name
exists, but has any value. This is because get_metadata returns
None if a metadata key doesn't exist.
"""
traits = dict([memb for memb in getmembers(self.__class__) if \
isinstance(memb[1], TraitType)])
if len(metadata) == 0:
return traits
for meta_name, meta_eval in metadata.items():
if type(meta_eval) is not FunctionType:
metadata[meta_name] = _SimpleTest(meta_eval)
result = {}
for name, trait in traits.items():
for meta_name, meta_eval in metadata.items():
if not meta_eval(trait.get_metadata(meta_name)):
break
else:
result[name] = trait
return result
def trait_metadata(self, traitname, key):
"""Get metadata values for trait by key."""
try:
trait = getattr(self.__class__, traitname)
except AttributeError:
raise TraitError("Class %s does not have a trait named %s" %
(self.__class__.__name__, traitname))
else:
return trait.get_metadata(key)
#-----------------------------------------------------------------------------
# Actual TraitTypes implementations/subclasses
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# TraitTypes subclasses for handling classes and instances of classes
#-----------------------------------------------------------------------------
class ClassBasedTraitType(TraitType):
"""A trait with error reporting for Type, Instance and This."""
def error(self, obj, value):
kind = type(value)
if (not py3compat.PY3) and kind is InstanceType:
msg = 'class %s' % value.__class__.__name__
else:
msg = '%s (i.e. %s)' % ( str( kind )[1:-1], repr( value ) )
if obj is not None:
e = "The '%s' trait of %s instance must be %s, but a value of %s was specified." \
% (self.name, class_of(obj),
self.info(), msg)
else:
e = "The '%s' trait must be %s, but a value of %r was specified." \
% (self.name, self.info(), msg)
raise TraitError(e)
class Type(ClassBasedTraitType):
"""A trait whose value must be a subclass of a specified class."""
def __init__ (self, default_value=None, klass=None, allow_none=True, **metadata ):
"""Construct a Type trait
A Type trait specifies that its values must be subclasses of
a particular class.
If only ``default_value`` is given, it is used for the ``klass`` as
well.
Parameters
----------
default_value : class, str or None
The default value must be a subclass of klass. If an str,
the str must be a fully specified class name, like 'foo.bar.Bah'.
The string is resolved into real class, when the parent
:class:`HasTraits` class is instantiated.
klass : class, str, None
Values of this trait must be a subclass of klass. The klass
may be specified in a string like: 'foo.bar.MyClass'.
The string is resolved into real class, when the parent
:class:`HasTraits` class is instantiated.
allow_none : boolean
Indicates whether None is allowed as an assignable value. Even if
``False``, the default value may be ``None``.
"""
if default_value is None:
if klass is None:
klass = object
elif klass is None:
klass = default_value
if not (inspect.isclass(klass) or isinstance(klass, py3compat.string_types)):
raise TraitError("A Type trait must specify a class.")
self.klass = klass
self._allow_none = allow_none
super(Type, self).__init__(default_value, **metadata)
def validate(self, obj, value):
"""Validates that the value is a valid object instance."""
try:
if issubclass(value, self.klass):
return value
except:
if (value is None) and (self._allow_none):
return value
self.error(obj, value)
def info(self):
""" Returns a description of the trait."""
if isinstance(self.klass, py3compat.string_types):
klass = self.klass
else:
klass = self.klass.__name__
result = 'a subclass of ' + klass
if self._allow_none:
return result + ' or None'
return result
def instance_init(self, obj):
self._resolve_classes()
super(Type, self).instance_init(obj)
def _resolve_classes(self):
if isinstance(self.klass, py3compat.string_types):
self.klass = import_item(self.klass)
if isinstance(self.default_value, py3compat.string_types):
self.default_value = import_item(self.default_value)
def get_default_value(self):
return self.default_value
class DefaultValueGenerator(object):
"""A class for generating new default value instances."""
def __init__(self, *args, **kw):
self.args = args
self.kw = kw
def generate(self, klass):
return klass(*self.args, **self.kw)
class Instance(ClassBasedTraitType):
"""A trait whose value must be an instance of a specified class.
The value can also be an instance of a subclass of the specified class.
"""
def __init__(self, klass=None, args=None, kw=None,
allow_none=True, **metadata ):
"""Construct an Instance trait.
This trait allows values that are instances of a particular
class or its sublclasses. Our implementation is quite different
from that of enthough.traits as we don't allow instances to be used
for klass and we handle the ``args`` and ``kw`` arguments differently.
Parameters
----------
klass : class, str
The class that forms the basis for the trait. Class names
can also be specified as strings, like 'foo.bar.Bar'.
args : tuple
Positional arguments for generating the default value.
kw : dict
Keyword arguments for generating the default value.
allow_none : bool
Indicates whether None is allowed as a value.
Default Value
-------------
If both ``args`` and ``kw`` are None, then the default value is None.
If ``args`` is a tuple and ``kw`` is a dict, then the default is
created as ``klass(*args, **kw)``. If either ``args`` or ``kw`` is
not (but not both), None is replace by ``()`` or ``{}``.
"""
self._allow_none = allow_none
if (klass is None) or (not (inspect.isclass(klass) or isinstance(klass, py3compat.string_types))):
raise TraitError('The klass argument must be a class'
' you gave: %r' % klass)
self.klass = klass
# self.klass is a class, so handle default_value
if args is None and kw is None:
default_value = None
else:
if args is None:
# kw is not None
args = ()
elif kw is None:
# args is not None
kw = {}
if not isinstance(kw, dict):
raise TraitError("The 'kw' argument must be a dict or None.")
if not isinstance(args, tuple):
raise TraitError("The 'args' argument must be a tuple or None.")
default_value = DefaultValueGenerator(*args, **kw)
super(Instance, self).__init__(default_value, **metadata)
def validate(self, obj, value):
if value is None:
if self._allow_none:
return value
self.error(obj, value)
if isinstance(value, self.klass):
return value
else:
self.error(obj, value)
def info(self):
if isinstance(self.klass, py3compat.string_types):
klass = self.klass
else:
klass = self.klass.__name__
result = class_of(klass)
if self._allow_none:
return result + ' or None'
return result
def instance_init(self, obj):
self._resolve_classes()
super(Instance, self).instance_init(obj)
def _resolve_classes(self):
if isinstance(self.klass, py3compat.string_types):
self.klass = import_item(self.klass)
def get_default_value(self):
"""Instantiate a default value instance.
This is called when the containing HasTraits classes'
:meth:`__new__` method is called to ensure that a unique instance
is created for each HasTraits instance.
"""
dv = self.default_value
if isinstance(dv, DefaultValueGenerator):
return dv.generate(self.klass)
else:
return dv
class This(ClassBasedTraitType):
"""A trait for instances of the class containing this trait.
Because how how and when class bodies are executed, the ``This``
trait can only have a default value of None. This, and because we
always validate default values, ``allow_none`` is *always* true.
"""
info_text = 'an instance of the same type as the receiver or None'
def __init__(self, **metadata):
super(This, self).__init__(None, **metadata)
def validate(self, obj, value):
# What if value is a superclass of obj.__class__? This is
# complicated if it was the superclass that defined the This
# trait.
if isinstance(value, self.this_class) or (value is None):
return value
else:
self.error(obj, value)
#-----------------------------------------------------------------------------
# Basic TraitTypes implementations/subclasses
#-----------------------------------------------------------------------------
class Any(TraitType):
default_value = None
info_text = 'any value'
class Int(TraitType):
"""An int trait."""
default_value = 0
info_text = 'an int'
def validate(self, obj, value):
if isinstance(value, int):
return value
self.error(obj, value)
class CInt(Int):
"""A casting version of the int trait."""
def validate(self, obj, value):
try:
return int(value)
except:
self.error(obj, value)
if py3compat.PY3:
Long, CLong = Int, CInt
Integer = Int
else:
class Long(TraitType):
"""A long integer trait."""
default_value = 0
info_text = 'a long'
def validate(self, obj, value):
if isinstance(value, long):
return value
if isinstance(value, int):
return long(value)
self.error(obj, value)
class CLong(Long):
"""A casting version of the long integer trait."""
def validate(self, obj, value):
try:
return long(value)
except:
self.error(obj, value)
class Integer(TraitType):
"""An integer trait.
Longs that are unnecessary (<= sys.maxint) are cast to ints."""
default_value = 0
info_text = 'an integer'
def validate(self, obj, value):
if isinstance(value, int):
return value
if isinstance(value, long):
# downcast longs that fit in int:
# note that int(n > sys.maxint) returns a long, so
# we don't need a condition on this cast
return int(value)
if sys.platform == "cli":
from System import Int64
if isinstance(value, Int64):
return int(value)
self.error(obj, value)
class Float(TraitType):
"""A float trait."""
default_value = 0.0
info_text = 'a float'
def validate(self, obj, value):
if isinstance(value, float):
return value
if isinstance(value, int):
return float(value)
self.error(obj, value)
class CFloat(Float):
"""A casting version of the float trait."""
def validate(self, obj, value):
try:
return float(value)
except:
self.error(obj, value)
class Complex(TraitType):
"""A trait for complex numbers."""
default_value = 0.0 + 0.0j
info_text = 'a complex number'
def validate(self, obj, value):
if isinstance(value, complex):
return value
if isinstance(value, (float, int)):
return complex(value)
self.error(obj, value)
class CComplex(Complex):
"""A casting version of the complex number trait."""
def validate (self, obj, value):
try:
return complex(value)
except:
self.error(obj, value)
# We should always be explicit about whether we're using bytes or unicode, both
# for Python 3 conversion and for reliable unicode behaviour on Python 2. So
# we don't have a Str type.
class Bytes(TraitType):
"""A trait for byte strings."""
default_value = b''
info_text = 'a bytes object'
def validate(self, obj, value):
if isinstance(value, bytes):
return value
self.error(obj, value)
class CBytes(Bytes):
"""A casting version of the byte string trait."""
def validate(self, obj, value):
try:
return bytes(value)
except:
self.error(obj, value)
class Unicode(TraitType):
"""A trait for unicode strings."""
default_value = u''
info_text = 'a unicode string'
def validate(self, obj, value):
if isinstance(value, py3compat.unicode_type):
return value
if isinstance(value, bytes):
return py3compat.unicode_type(value)
self.error(obj, value)
class CUnicode(Unicode):
"""A casting version of the unicode trait."""
def validate(self, obj, value):
try:
return py3compat.unicode_type(value)
except:
self.error(obj, value)
class ObjectName(TraitType):
"""A string holding a valid object name in this version of Python.
This does not check that the name exists in any scope."""
info_text = "a valid object identifier in Python"
if py3compat.PY3:
# Python 3:
coerce_str = staticmethod(lambda _,s: s)
else:
# Python 2:
def coerce_str(self, obj, value):
"In Python 2, coerce ascii-only unicode to str"
if isinstance(value, unicode):
try:
return str(value)
except UnicodeEncodeError:
self.error(obj, value)
return value
def validate(self, obj, value):
value = self.coerce_str(obj, value)
if isinstance(value, str) and py3compat.isidentifier(value):
return value
self.error(obj, value)
class DottedObjectName(ObjectName):
"""A string holding a valid dotted object name in Python, such as A.b3._c"""
def validate(self, obj, value):
value = self.coerce_str(obj, value)
if isinstance(value, str) and py3compat.isidentifier(value, dotted=True):
return value
self.error(obj, value)
class Bool(TraitType):
"""A boolean (True, False) trait."""
default_value = False
info_text = 'a boolean'
def validate(self, obj, value):
if isinstance(value, bool):
return value
self.error(obj, value)
class CBool(Bool):
"""A casting version of the boolean trait."""
def validate(self, obj, value):
try:
return bool(value)
except:
self.error(obj, value)
class Enum(TraitType):
"""An enum that whose value must be in a given sequence."""
def __init__(self, values, default_value=None, allow_none=True, **metadata):
self.values = values
self._allow_none = allow_none
super(Enum, self).__init__(default_value, **metadata)
def validate(self, obj, value):
if value is None:
if self._allow_none:
return value
if value in self.values:
return value
self.error(obj, value)
def info(self):
""" Returns a description of the trait."""
result = 'any of ' + repr(self.values)
if self._allow_none:
return result + ' or None'
return result
class CaselessStrEnum(Enum):
"""An enum of strings that are caseless in validate."""
def validate(self, obj, value):
if value is None:
if self._allow_none:
return value
if not isinstance(value, py3compat.string_types):
self.error(obj, value)
for v in self.values:
if v.lower() == value.lower():
return v
self.error(obj, value)
class Container(Instance):
"""An instance of a container (list, set, etc.)
To be subclassed by overriding klass.
"""
klass = None
_valid_defaults = SequenceTypes
_trait = None
def __init__(self, trait=None, default_value=None, allow_none=True,
**metadata):
"""Create a container trait type from a list, set, or tuple.
The default value is created by doing ``List(default_value)``,
which creates a copy of the ``default_value``.
``trait`` can be specified, which restricts the type of elements
in the container to that TraitType.
If only one arg is given and it is not a Trait, it is taken as
``default_value``:
``c = List([1,2,3])``
Parameters
----------
trait : TraitType [ optional ]
the type for restricting the contents of the Container. If unspecified,
types are not checked.
default_value : SequenceType [ optional ]
The default value for the Trait. Must be list/tuple/set, and
will be cast to the container type.
allow_none : Bool [ default True ]
Whether to allow the value to be None
**metadata : any
further keys for extensions to the Trait (e.g. config)
"""
# allow List([values]):
if default_value is None and not is_trait(trait):
default_value = trait
trait = None
if default_value is None:
args = ()
elif isinstance(default_value, self._valid_defaults):
args = (default_value,)
else:
raise TypeError('default value of %s was %s' %(self.__class__.__name__, default_value))
if is_trait(trait):
self._trait = trait() if isinstance(trait, type) else trait
self._trait.name = 'element'
elif trait is not None:
raise TypeError("`trait` must be a Trait or None, got %s"%repr_type(trait))
super(Container,self).__init__(klass=self.klass, args=args,
allow_none=allow_none, **metadata)
def element_error(self, obj, element, validator):
e = "Element of the '%s' trait of %s instance must be %s, but a value of %s was specified." \
% (self.name, class_of(obj), validator.info(), repr_type(element))
raise TraitError(e)
def validate(self, obj, value):
value = super(Container, self).validate(obj, value)
if value is None:
return value
value = self.validate_elements(obj, value)
return value
def validate_elements(self, obj, value):
validated = []
if self._trait is None or isinstance(self._trait, Any):
return value
for v in value:
try:
v = self._trait.validate(obj, v)
except TraitError:
self.element_error(obj, v, self._trait)
else:
validated.append(v)
return self.klass(validated)
class List(Container):
"""An instance of a Python list."""
klass = list
def __init__(self, trait=None, default_value=None, minlen=0, maxlen=sys.maxsize,
allow_none=True, **metadata):
"""Create a List trait type from a list, set, or tuple.
The default value is created by doing ``List(default_value)``,
which creates a copy of the ``default_value``.
``trait`` can be specified, which restricts the type of elements
in the container to that TraitType.
If only one arg is given and it is not a Trait, it is taken as
``default_value``:
``c = List([1,2,3])``
Parameters
----------
trait : TraitType [ optional ]
the type for restricting the contents of the Container. If unspecified,
types are not checked.
default_value : SequenceType [ optional ]
The default value for the Trait. Must be list/tuple/set, and
will be cast to the container type.
minlen : Int [ default 0 ]
The minimum length of the input list
maxlen : Int [ default sys.maxsize ]
The maximum length of the input list
allow_none : Bool [ default True ]
Whether to allow the value to be None
**metadata : any
further keys for extensions to the Trait (e.g. config)
"""
self._minlen = minlen
self._maxlen = maxlen
super(List, self).__init__(trait=trait, default_value=default_value,
allow_none=allow_none, **metadata)
def length_error(self, obj, value):
e = "The '%s' trait of %s instance must be of length %i <= L <= %i, but a value of %s was specified." \
% (self.name, class_of(obj), self._minlen, self._maxlen, value)
raise TraitError(e)
def validate_elements(self, obj, value):
length = len(value)
if length < self._minlen or length > self._maxlen:
self.length_error(obj, value)
return super(List, self).validate_elements(obj, value)
class Set(Container):
"""An instance of a Python set."""
klass = set
class Tuple(Container):
"""An instance of a Python tuple."""
klass = tuple
def __init__(self, *traits, **metadata):
"""Tuple(*traits, default_value=None, allow_none=True, **medatata)
Create a tuple from a list, set, or tuple.
Create a fixed-type tuple with Traits:
``t = Tuple(Int, Str, CStr)``
would be length 3, with Int,Str,CStr for each element.
If only one arg is given and it is not a Trait, it is taken as
default_value:
``t = Tuple((1,2,3))``
Otherwise, ``default_value`` *must* be specified by keyword.
Parameters
----------
*traits : TraitTypes [ optional ]
the tsype for restricting the contents of the Tuple. If unspecified,
types are not checked. If specified, then each positional argument
corresponds to an element of the tuple. Tuples defined with traits
are of fixed length.
default_value : SequenceType [ optional ]
The default value for the Tuple. Must be list/tuple/set, and
will be cast to a tuple. If `traits` are specified, the
`default_value` must conform to the shape and type they specify.
allow_none : Bool [ default True ]
Whether to allow the value to be None
**metadata : any
further keys for extensions to the Trait (e.g. config)
"""
default_value = metadata.pop('default_value', None)
allow_none = metadata.pop('allow_none', True)
# allow Tuple((values,)):
if len(traits) == 1 and default_value is None and not is_trait(traits[0]):
default_value = traits[0]
traits = ()
if default_value is None:
args = ()
elif isinstance(default_value, self._valid_defaults):
args = (default_value,)
else:
raise TypeError('default value of %s was %s' %(self.__class__.__name__, default_value))
self._traits = []
for trait in traits:
t = trait() if isinstance(trait, type) else trait
t.name = 'element'
self._traits.append(t)
if self._traits and default_value is None:
# don't allow default to be an empty container if length is specified
args = None
super(Container,self).__init__(klass=self.klass, args=args,
allow_none=allow_none, **metadata)
def validate_elements(self, obj, value):
if not self._traits:
# nothing to validate
return value
if len(value) != len(self._traits):
e = "The '%s' trait of %s instance requires %i elements, but a value of %s was specified." \
% (self.name, class_of(obj), len(self._traits), repr_type(value))
raise TraitError(e)
validated = []
for t,v in zip(self._traits, value):
try:
v = t.validate(obj, v)
except TraitError:
self.element_error(obj, v, t)
else:
validated.append(v)
return tuple(validated)
class Dict(Instance):
"""An instance of a Python dict."""
def __init__(self, default_value=None, allow_none=True, **metadata):
"""Create a dict trait type from a dict.
The default value is created by doing ``dict(default_value)``,
which creates a copy of the ``default_value``.
"""
if default_value is None:
args = ((),)
elif isinstance(default_value, dict):
args = (default_value,)
elif isinstance(default_value, SequenceTypes):
args = (default_value,)
else:
raise TypeError('default value of Dict was %s' % default_value)
super(Dict,self).__init__(klass=dict, args=args,
allow_none=allow_none, **metadata)
class TCPAddress(TraitType):
"""A trait for an (ip, port) tuple.
This allows for both IPv4 IP addresses as well as hostnames.
"""
default_value = ('127.0.0.1', 0)
info_text = 'an (ip, port) tuple'
def validate(self, obj, value):
if isinstance(value, tuple):
if len(value) == 2:
if isinstance(value[0], py3compat.string_types) and isinstance(value[1], int):
port = value[1]
if port >= 0 and port <= 65535:
return value
self.error(obj, value)
class CRegExp(TraitType):
"""A casting compiled regular expression trait.
Accepts both strings and compiled regular expressions. The resulting
attribute will be a compiled regular expression."""
info_text = 'a regular expression'
def validate(self, obj, value):
try:
return re.compile(value)
except:
self.error(obj, value)
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from collections import Counter, defaultdict, OrderedDict
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isna)
from pandas.compat import lrange
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
tm.assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))
# element-wise apply
import math
tm.assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))
# empty series
s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
assert s is not rs
assert s.index is rs.index
assert s.dtype == rs.dtype
assert s.name == rs.name
# index but no data
s = Series(index=[1, 2, 3])
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
f = lambda x: (x, x + 1)
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
f = lambda x: x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
assert result.dtype == object
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.ts.apply(arg)
expected = getattr(self.ts, arg)()
assert result == expected
def test_apply_args(self):
s = Series(['foo,bar'])
result = s.apply(str.split, args=(',', ))
assert result[0] == ['foo', 'bar']
assert isinstance(result[0], list)
def test_series_map_box_timestamps(self):
# GH#2689, GH#2627
ser = Series(pd.date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
ser.map(func)
ser.apply(func)
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns]'
# boxed value must be Timestamp instance
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_None', 'Timestamp_2_None'])
tm.assert_series_equal(res, exp)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns, US/Eastern]'
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_US/Eastern', 'Timestamp_2_US/Eastern'])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
assert s.dtype == 'timedelta64[ns]'
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__, x.days))
exp = pd.Series(['Timedelta_1', 'Timedelta_2'])
tm.assert_series_equal(res, exp)
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
assert s.dtype == 'object'
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__,
x.freqstr))
exp = pd.Series(['Period_M', 'Period_M'])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range('2011-01-01', '2011-01-02',
freq='H').tz_localize('Asia/Tokyo')
s = pd.Series(values, name='XX')
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range('2011-01-02', '2011-01-03',
freq='H').tz_localize('Asia/Tokyo')
exp = pd.Series(exp_values, name='XX')
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name='XX', dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(['Asia/Tokyo'] * 25, name='XX')
tm.assert_series_equal(result, exp)
def test_apply_dict_depr(self):
tsdf = pd.DataFrame(np.random.randn(10, 3),
columns=['A', 'B', 'C'],
index=pd.date_range('1/1/2000', periods=10))
with tm.assert_produces_warning(FutureWarning):
tsdf.A.agg({'foo': ['sum', 'mean']})
class TestSeriesAggregate(TestData):
def test_transform(self):
# transforming functions
with np.errstate(all='ignore'):
f_sqrt = np.sqrt(self.series)
f_abs = np.abs(self.series)
# ufunc
result = self.series.transform(np.sqrt)
expected = f_sqrt.copy()
assert_series_equal(result, expected)
result = self.series.apply(np.sqrt)
assert_series_equal(result, expected)
# list-like
result = self.series.transform([np.sqrt])
expected = f_sqrt.to_frame().copy()
expected.columns = ['sqrt']
assert_frame_equal(result, expected)
result = self.series.transform([np.sqrt])
assert_frame_equal(result, expected)
result = self.series.transform(['sqrt'])
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both functions per
# series and then concatting
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ['sqrt', 'absolute']
result = self.series.apply([np.sqrt, np.abs])
assert_frame_equal(result, expected)
result = self.series.transform(['sqrt', 'abs'])
expected.columns = ['sqrt', 'abs']
assert_frame_equal(result, expected)
# dict, provide renaming
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ['foo', 'bar']
expected = expected.unstack().rename('series')
result = self.series.apply({'foo': np.sqrt, 'bar': np.abs})
assert_series_equal(result.reindex_like(expected), expected)
def test_transform_and_agg_error(self):
# we are trying to transform with an aggregator
def f():
self.series.transform(['min', 'max'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.series.agg(['sqrt', 'max'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.series.transform(['sqrt', 'max'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.series.agg({'foo': np.sqrt, 'bar': 'sum'})
pytest.raises(ValueError, f)
def test_demo(self):
# demonstration tests
s = Series(range(6), dtype='int64', name='series')
result = s.agg(['min', 'max'])
expected = Series([0, 5], index=['min', 'max'], name='series')
tm.assert_series_equal(result, expected)
result = s.agg({'foo': 'min'})
expected = Series([0], index=['foo'], name='series')
tm.assert_series_equal(result, expected)
# nested renaming
with tm.assert_produces_warning(FutureWarning):
result = s.agg({'foo': ['min', 'max']})
expected = DataFrame(
{'foo': [0, 5]},
index=['min', 'max']).unstack().rename('series')
tm.assert_series_equal(result, expected)
def test_multiple_aggregators_with_dict_api(self):
s = Series(range(6), dtype='int64', name='series')
# nested renaming
with tm.assert_produces_warning(FutureWarning):
result = s.agg({'foo': ['min', 'max'], 'bar': ['sum', 'mean']})
expected = DataFrame(
{'foo': [5.0, np.nan, 0.0, np.nan],
'bar': [np.nan, 2.5, np.nan, 15.0]},
columns=['foo', 'bar'],
index=['max', 'mean',
'min', 'sum']).unstack().rename('series')
tm.assert_series_equal(result.reindex_like(expected), expected)
def test_agg_apply_evaluate_lambdas_the_same(self):
# test that we are evaluating row-by-row first
# before vectorized evaluation
result = self.series.apply(lambda x: str(x))
expected = self.series.agg(lambda x: str(x))
tm.assert_series_equal(result, expected)
result = self.series.apply(str)
expected = self.series.agg(str)
tm.assert_series_equal(result, expected)
def test_with_nested_series(self):
# GH 2316
# .agg with a reducer and a transform, what to do
result = self.ts.apply(lambda x: Series(
[x, x ** 2], index=['x', 'x^2']))
expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})
tm.assert_frame_equal(result, expected)
result = self.ts.agg(lambda x: Series(
[x, x ** 2], index=['x', 'x^2']))
tm.assert_frame_equal(result, expected)
def test_replicate_describe(self):
# this also tests a result set that is all scalars
expected = self.series.describe()
result = self.series.apply(OrderedDict(
[('count', 'count'),
('mean', 'mean'),
('std', 'std'),
('min', 'min'),
('25%', lambda x: x.quantile(0.25)),
('50%', 'median'),
('75%', lambda x: x.quantile(0.75)),
('max', 'max')]))
assert_series_equal(result, expected)
def test_reduce(self):
# reductions with named functions
result = self.series.agg(['sum', 'mean'])
expected = Series([self.series.sum(),
self.series.mean()],
['sum', 'mean'],
name=self.series.name)
assert_series_equal(result, expected)
def test_non_callable_aggregates(self):
# test agg using non-callable series attributes
s = Series([1, 2, None])
# Calling agg w/ just a string arg same as calling s.arg
result = s.agg('size')
expected = s.size
assert result == expected
# test when mixed w/ callable reducers
result = s.agg(['size', 'count', 'mean'])
expected = Series(OrderedDict([('size', 3.0),
('count', 2.0),
('mean', 1.5)]))
assert_series_equal(result[expected.index], expected)
class TestSeriesMap(TestData):
def test_map(self):
index, data = tm.getMixedTypeDict()
source = Series(data['B'], index=data['C'])
target = Series(data['C'][:4], index=data['D'][:4])
merged = target.map(source)
for k, v in compat.iteritems(merged):
assert v == source[target[k]]
# input could be a dict
merged = target.map(source.to_dict())
for k, v in compat.iteritems(merged):
assert v == source[target[k]]
# function
result = self.ts.map(lambda x: x * 2)
tm.assert_series_equal(result, self.ts * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
tm.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
tm.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series([1, 2, 3, 4],
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series([1, 2, 3, 4], index=Index(['b', 'c', 'd', 'e']))
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series(['B', 'C', 'D', 'E'], dtype='category',
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series(['B', 'C', 'D', 'E'], index=Index(['b', 'c', 'd', 'e']))
exp = Series(pd.Categorical([np.nan, 'B', 'C', 'D'],
categories=['B', 'C', 'D', 'E']))
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 'B', 'C', 'D'])
tm.assert_series_equal(a.map(c), exp)
@pytest.mark.parametrize("index", tm.all_index_generator(10))
def test_map_empty(self, index):
s = Series(index)
result = s.map({})
expected = pd.Series(np.nan, index=s.index)
tm.assert_series_equal(result, expected)
def test_map_compat(self):
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: 'foo', False: 'bar'})
expected = Series(['foo', 'foo', 'bar'], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_map_int(self):
left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})
right = Series({1: 11, 2: 22, 3: 33})
assert left.dtype == np.float_
assert issubclass(right.dtype.type, np.integer)
merged = left.map(right)
assert merged.dtype == np.float_
assert isna(merged['d'])
assert not isna(merged['c'])
def test_map_type_inference(self):
s = Series(lrange(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
assert issubclass(s2.dtype.type, np.integer)
def test_map_decimal(self):
from decimal import Decimal
result = self.series.map(lambda x: Decimal(str(x)))
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action='ignore')
exp = s * 2
assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
"""
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
"""
# GH 18496
df = pd.DataFrame({'a': [(1, ), (2, ), (3, 4), (5, 6)]})
label_mappings = {(1, ): 'A', (2, ): 'B', (3, 4): 'A', (5, 6): 'B'}
df['labels'] = df['a'].map(label_mappings)
df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df['labels'], df['expected_labels'],
check_names=False)
def test_map_counter(self):
s = Series(['a', 'b', 'c'], index=[1, 2, 3])
counter = Counter()
counter['b'] = 5
counter['c'] += 1
result = s.map(counter)
expected = Series([0, 5, 1], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_map_defaultdict(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
default_dict = defaultdict(lambda: 'blank')
default_dict[1] = 'stuff'
result = s.map(default_dict)
expected = Series(['stuff', 'blank', 'blank'], index=['a', 'b', 'c'])
assert_series_equal(result, expected)
def test_map_dict_subclass_with_missing(self):
"""
Test Series.map with a dictionary subclass that defines __missing__,
i.e. sets a default value (GH #15999).
"""
class DictWithMissing(dict):
def __missing__(self, key):
return 'missing'
s = Series([1, 2, 3])
dictionary = DictWithMissing({3: 'three'})
result = s.map(dictionary)
expected = Series(['missing', 'missing', 'three'])
assert_series_equal(result, expected)
def test_map_dict_subclass_without_missing(self):
class DictWithoutMissing(dict):
pass
s = Series([1, 2, 3])
dictionary = DictWithoutMissing({3: 'three'})
result = s.map(dictionary)
expected = Series([np.nan, np.nan, 'three'])
assert_series_equal(result, expected)
def test_map_box(self):
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns]'
# boxed value must be Timestamp instance
res = s.map(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_None', 'Timestamp_2_None'])
tm.assert_series_equal(res, exp)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns, US/Eastern]'
res = s.map(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_US/Eastern', 'Timestamp_2_US/Eastern'])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
assert s.dtype == 'timedelta64[ns]'
res = s.map(lambda x: '{0}_{1}'.format(x.__class__.__name__, x.days))
exp = pd.Series(['Timedelta_1', 'Timedelta_2'])
tm.assert_series_equal(res, exp)
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
assert s.dtype == 'object'
res = s.map(lambda x: '{0}_{1}'.format(x.__class__.__name__,
x.freqstr))
exp = pd.Series(['Period_M', 'Period_M'])
tm.assert_series_equal(res, exp)
def test_map_categorical(self):
values = pd.Categorical(list('ABBABCD'), categories=list('DCBA'),
ordered=True)
s = pd.Series(values, name='XX', index=list('abcdefg'))
result = s.map(lambda x: x.lower())
exp_values = pd.Categorical(list('abbabcd'), categories=list('dcba'),
ordered=True)
exp = pd.Series(exp_values, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp_values)
result = s.map(lambda x: 'A')
exp = pd.Series(['A'] * 7, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
assert result.dtype == np.object
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action='ignore')
def test_map_datetimetz(self):
values = pd.date_range('2011-01-01', '2011-01-02',
freq='H').tz_localize('Asia/Tokyo')
s = pd.Series(values, name='XX')
# keep tz
result = s.map(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range('2011-01-02', '2011-01-03',
freq='H').tz_localize('Asia/Tokyo')
exp = pd.Series(exp_values, name='XX')
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.map(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name='XX', dtype=np.int64)
tm.assert_series_equal(result, exp)
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action='ignore')
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(['Asia/Tokyo'] * 25, name='XX')
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize("vals,mapping,exp", [
(list('abc'), {np.nan: 'not NaN'}, [np.nan] * 3 + ['not NaN']),
(list('abc'), {'a': 'a letter'}, ['a letter'] + [np.nan] * 3),
(list(range(3)), {0: 42}, [42] + [np.nan] * 3)])
def test_map_missing_mixed(self, vals, mapping, exp):
# GH20495
s = pd.Series(vals + [np.nan])
result = s.map(mapping)
tm.assert_series_equal(result, pd.Series(exp))
|
import sys
from itertools import groupby
def fasta_iter(fasta_name):
fh = open(fasta_name)
faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">"))
for header in faiter:
# Entire line, add .split[0] for just first column
headerStr = header.__next__()[1:].strip()
seq = "".join(s.strip() for s in faiter.__next__())
yield (headerStr, seq)
untrimmed = sys.argv[1]
column_file = sys.argv[2]
nucleotide = sys.argv[3]
output1 = sys.argv[1].split(".")[0] + ".codon.aln"
cut = ""
longIsoform_CDS_combined = {}
sequence_iterator = fasta_iter(nucleotide)
with open(column_file) as f:
for line in f:
if "#ColumnsMap" in line:
cut += line.strip().split("#ColumnsMap")[1]
print(cut)
cut = cut.split(',')
cut = list(map(int, cut))
for ff in sequence_iterator:
headerStr, seq = ff
GeneID = headerStr
if GeneID not in longIsoform_CDS_combined:
longIsoform_CDS_combined[GeneID] = seq
# Open outout
# print(len(longIsoform_CDS_combined))
with open(output1, "w") as out:
# Get column cut file
# Get corresponding untrimmed Alignments, as original, line by line
line1 = True
first_line = True
with open(untrimmed) as f:
for line in f:
if line1:
line1 = False
continue
row = line.strip().split()
original = row[1] # cds
header = row[0]
# NOTE, potetintal bug below, if exception then sequence isn't declared and it can't go forward, use continue probably
try:
sequence = longIsoform_CDS_combined[header] # original
except:
continue
CodonPos = {}
position = 0
codon = ""
number = 1
for i in sequence:
codon += i
if position % 3 == 2:
CodonPos[number] = codon
number += 1
position += 1
if position % 3 == 0:
codon = ""
aaPos = 0
firstAA = True
alnPos = 0
prot = ""
trimmed = ""
for i in original:
if i != "-":
aaPos += 1
if alnPos in cut:
prot += i
if i != "-":
# print(aaPos,CodonPos[aaPos])
trimmed += CodonPos[aaPos]
else:
trimmed += "---"
alnPos += 1
num_lines = sum(1 for line in open(untrimmed))
out.write(">" + header + "\n")
out.write(trimmed + "\n")
|
# -*- coding: utf-8 -*-
### CONNECTS TO SELECTED EC2 INSTANCES VIA SSH AND RUNS UPDATE COMMANDS ON THEM
### If an instance has tag NoUpdate=1, then it's not listed and not updated
### CONFIGURATION
REGION = 'eu-west-1'
PROFILE_NAME = 'InstanceLister'
TERMINAL = 'xfce4-terminal -H'
SSH_CERTIFICATE = ['/home/lorenzo/.certificates/david-1.pem', '/home/lorenzo/.certificates/david-1-pocket-sell-app.pem']
UPDATE_COMMAND = ' && '.join([
'sudo apt-get update',
'sudo apt-get upgrade',
'sudo apt-get clean',
'sudo apt-get autoclean',
'sudo apt-get autoremove',
'if [ -f /var/run/reboot-required ]; then echo; echo "=== SYSTEM REBOOT REQUIRED! ==="; read -p "Do you want to reboot now (y|n)? " -r; if [[ $REPLY =~ ^[Yy] ]]; then sudo shutdown -r now; fi; echo; fi',
'bash'
])
###
import os
import subprocess
from boto import ec2
from colorama import Fore, Back, Style
execfile(os.path.abspath(os.path.dirname(__file__)) + '/commons.py')
printTitle('EC2 INSTANCES UPDATE', 'Runs apt-get to update instances, excluding ones with tag NoUpdate=1')
print
connection = ec2.connect_to_region(REGION, profile_name = PROFILE_NAME)
# Ask which instances
instances = []
for instance in connection.get_only_instances():
if instance.state == 'running' and instance.tags.get('NoUpdate', '0') != '1' and instance.platform != 'windows':
instance_name = instance.tags.get('Name', '(no name)')
if questionYN('Do you want to update instance ' + Style.BRIGHT + '{}' + Style.RESET_ALL + ' ({})', instance_name, instance.id):
instances.append({ 'instance': instance, 'instance_name': instance_name })
if len(instances) > 0:
# Escape command
UPDATE_COMMAND = UPDATE_COMMAND.replace("\\", "\\\\").replace("'", "'\\''").replace('"', '\\"')
# SSH certificates
certificates = (SSH_CERTIFICATE if isinstance(SSH_CERTIFICATE, list) else [SSH_CERTIFICATE])
certificates_command = ' '.join(['-i "' + path + '"' for path in certificates])
# Start SSH on instances
for instance in instances:
instance_dns = instance['instance'].public_dns_name
instance_ip = instance['instance'].ip_address.strip()
terminal_title = instance['instance_name'] + ' UPDATE (' + (instance_ip or instance_dns) + ')'
subprocess.call(TERMINAL + ' --title="' + terminal_title + '" --command=\'ssh -t ' + certificates_command + ' ubuntu@' + (instance_ip or instance_dns) + ' "' + UPDATE_COMMAND + '"\'', shell=True)
|
###############################################################################
# Copyright (C) 2008 Johann Haarhoff <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of Version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
###############################################################################
#
# Originally written:
# 2008 Johann Haarhoff, <[email protected]>
# Modifications:
#
###############################################################################
#my modules
from xmlwriter import * #AUTO_REMOVED by make
class Icon():
def __init__(self,href=''):
self._href = href
def getIcon(self):
return self._href
def setIcon(self,href):
self._href = href
def toKML(self,out,indentstr = '\t'):
kmlwriter = BetterXMLWriter(out,indentstr)
kmlwriter.openElement("Icon")
kmlwriter.openElement("href")
kmlwriter.addData(str(self._href))
kmlwriter.closeLast()
kmlwriter.closeLast()
class hotspot():
def __init__(self,x=0,y=0,xunits="pixels",yunits="pixels"):
self._x = x
self._y = y
self._xunits = xunits
self._yunits = yunits
def getX(self):
return self._x
def setX(self,x):
self._x = x
def getY(self):
return self._y
def setY(self,y):
self._y = y
def getXunits(self):
return self._xunits
def setXunits(self,x):
self._xunits = xunits
def getYunits(self):
return self._yunits
def setYunits(self,y):
self._yunits = yunits
def toKML(self,out,indentstr = '\t'):
kmlwriter = BetterXMLWriter(out,indentstr)
kmlwriter.openElement("hotspot",{"x":str(self._x),"y":str(self._y),"xunits":str(self._xunits),"yunits":str(self._yunits)})
class ColorStyle():
def __init__(self,color="",colorMode=""):
self._color = color
self._colorMode = colorMode
def getColor(self):
return self._color
def setColor(self,color):
self._color = color
def getColorMode(self):
return self._colorMode
def setColorMode(self,colorMode):
self._colorMode = colorMode
class IconStyle(ColorStyle):
def __init__(self,color="",colorMode="",scale="",heading="",theIcon=Icon(),thehotspot=hotspot()):
self._color = color
self._colorMode = colorMode
self._scale = scale
self._heading = heading
self._Icon = theIcon
self._hotspot = thehotspot
def getScale(self):
return self._scale
def setScale(self,scale):
self._scale = scale
def getIcon(self):
return self._Icon
def setIcon(self,theIcon):
self._Icon = theIcon
def getHeading(self):
return self._heading
def setHeading(self,heading):
self._Icon = theIcon
def getHotspot(self):
return self._hotspot
def setHotspot(self,thehotspot):
self._hotspot = thehotspot
def toKML(self,out,indentstr = '\t'):
kmlwriter = BetterXMLWriter(out,indentstr)
kmlwriter.openElement("IconStyle")
kmlwriter.openElement("color")
kmlwriter.addData(str(self._color))
kmlwriter.closeLast()
kmlwriter.openElement("colorMode")
kmlwriter.addData(str(self._colorMode))
kmlwriter.closeLast()
kmlwriter.openElement("scale")
kmlwriter.addData(str(self._scale))
kmlwriter.closeLast()
kmlwriter.openElement("heading")
kmlwriter.addData(str(self._heading))
kmlwriter.closeLast()
self._Icon.toKML(out,indentstr)
self._hotspot.toKML(out,indentstr)
kmlwriter.closeLast()
class LabelStyle(ColorStyle):
def __init__(self,color="",colorMode="",scale=""):
self._color = color
self._colorMode = colorMode
self._scale = scale
def getScale(self):
return self._scale
def setScale(self,scale):
self._scale = scale
def toKML(self,out,indentstr = '\t'):
kmlwriter = BetterXMLWriter(out,indentstr)
kmlwriter.openElement("LabelStyle")
kmlwriter.openElement("color")
kmlwriter.addData(str(self._color))
kmlwriter.closeLast()
kmlwriter.openElement("colorMode")
kmlwriter.addData(str(self._colorMode))
kmlwriter.closeLast()
kmlwriter.openElement("scale")
kmlwriter.addData(str(self._scale))
kmlwriter.closeLast()
kmlwriter.closeLast()
class LineStyle(ColorStyle):
def __init__(self,color="",colorMode="",width=""):
self._color = color
self._colorMode = colorMode
self._width = width
def getWidth(self):
return self._width
def setWidth(self,width):
self._width = width
def toKML(self,out,indentstr = '\t'):
kmlwriter = BetterXMLWriter(out,indentstr)
kmlwriter.openElement("LineStyle")
kmlwriter.openElement("color")
kmlwriter.addData(str(self._color))
kmlwriter.closeLast()
kmlwriter.openElement("colorMode")
kmlwriter.addData(str(self._colorMode))
kmlwriter.closeLast()
kmlwriter.openElement("width")
kmlwriter.addData(str(self._width))
kmlwriter.closeLast()
kmlwriter.closeLast()
class PolyStyle(ColorStyle):
def __init__(self,color="",colorMode="",fill="",outline=""):
self._color = color
self._colorMode = colorMode
self._fill = fill
self._outline = outline
def getFill(self):
return self._fill
def setFill(self,fill):
self._fill = fill
def getOutline(self):
return self._outline
def setOutline(self,outline):
self._outline = outline
def toKML(self,out,indentstr = '\t'):
kmlwriter = BetterXMLWriter(out,indentstr)
kmlwriter.openElement("PolyStyle")
kmlwriter.openElement("color")
kmlwriter.addData(str(self._color))
kmlwriter.closeLast()
kmlwriter.openElement("colorMode")
kmlwriter.addData(str(self._colorMode))
kmlwriter.closeLast()
kmlwriter.openElement("fill")
kmlwriter.addData(str(self._fill))
kmlwriter.closeLast()
kmlwriter.openElement("outline")
kmlwriter.addData(str(self._outline))
kmlwriter.closeLast()
kmlwriter.closeLast()
|
"""Python Compatibility Utilities."""
from __future__ import absolute_import, unicode_literals
import numbers
import sys
from functools import wraps
from contextlib import contextmanager
try:
from importlib import metadata as importlib_metadata
except ImportError:
import importlib_metadata
from kombu.five import reraise
try:
from io import UnsupportedOperation
FILENO_ERRORS = (AttributeError, ValueError, UnsupportedOperation)
except ImportError: # pragma: no cover
# Py2
FILENO_ERRORS = (AttributeError, ValueError) # noqa
try:
from billiard.util import register_after_fork
except ImportError: # pragma: no cover
try:
from multiprocessing.util import register_after_fork # noqa
except ImportError:
register_after_fork = None # noqa
try:
from typing import NamedTuple
except ImportError:
import collections
def NamedTuple(name, fields):
"""Typed version of collections.namedtuple."""
return collections.namedtuple(name, [k for k, _ in fields])
_environment = None
def coro(gen):
"""Decorator to mark generator as co-routine."""
@wraps(gen)
def wind_up(*args, **kwargs):
it = gen(*args, **kwargs)
next(it)
return it
return wind_up
def _detect_environment():
# ## -eventlet-
if 'eventlet' in sys.modules:
try:
from eventlet.patcher import is_monkey_patched as is_eventlet
import socket
if is_eventlet(socket):
return 'eventlet'
except ImportError:
pass
# ## -gevent-
if 'gevent' in sys.modules:
try:
from gevent import socket as _gsocket
import socket
if socket.socket is _gsocket.socket:
return 'gevent'
except ImportError:
pass
return 'default'
def detect_environment():
"""Detect the current environment: default, eventlet, or gevent."""
global _environment
if _environment is None:
_environment = _detect_environment()
return _environment
def entrypoints(namespace):
"""Return setuptools entrypoints for namespace."""
return (
(ep, ep.load())
for ep in importlib_metadata.entry_points().get(namespace, [])
)
def fileno(f):
"""Get fileno from file-like object."""
if isinstance(f, numbers.Integral):
return f
return f.fileno()
def maybe_fileno(f):
"""Get object fileno, or :const:`None` if not defined."""
try:
return fileno(f)
except FILENO_ERRORS:
pass
@contextmanager
def nested(*managers): # pragma: no cover
"""Nest context managers."""
# flake8: noqa
exits = []
vars = []
exc = (None, None, None)
try:
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
# Don't rely on sys.exc_info() still containing
# the right information. Another exception may
# have been raised and caught by an exit method
reraise(exc[0], exc[1], exc[2])
finally:
del(exc)
|
import logging
import re
from django.conf import settings
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http.response import HttpResponse
from django.utils.translation import ugettext as _
from oscar.core.loading import get_class
from rest_framework import exceptions
from rest_framework import authentication
from oscarapi.basket.operations import (
request_contains_basket,
store_basket_in_session,
get_basket
)
from oscarapi.utils import (
get_domain,
session_id_from_parsed_session_uri,
get_session
)
from oscarapi import models
BasketMiddleware = get_class('basket.middleware', 'BasketMiddleware')
logger = logging.getLogger(__name__)
HTTP_SESSION_ID_REGEX = re.compile(
r'^SID:(?P<type>(?:ANON|AUTH)):(?P<realm>.*?):(?P<session_id>.+?)(?:[-:][0-9a-fA-F]+){0,2}$')
def parse_session_id(request):
"""
Parse a session id from the request.
>>> class request:
... META = {'HTTP_SESSION_ID': None}
>>>
>>> request.META['HTTP_SESSION_ID'] = 'SID:ANON:example.com:987171879'
>>> sorted(parse_session_id(request).items())
[('realm', 'example.com'), ('session_id', '987171879'), ('type', 'ANON')]
>>>
>>> request.META['HTTP_SESSION_ID'] = 'SID:AUTH:example.com:987171879'
>>> sorted(parse_session_id(request).items())
[('realm', 'example.com'), ('session_id', '987171879'), ('type', 'AUTH')]
>>>
>>> request.META['HTTP_SESSION_ID'] = 'SID:ANON:example.com:987171879-16EF'
>>> sorted(parse_session_id(request).items())
[('realm', 'example.com'), ('session_id', '987171879'), ('type', 'ANON')]
>>>
>>> request.META['HTTP_SESSION_ID'] = 'SID:ANON:example.com:98717-16EF:100'
>>> sorted(parse_session_id(request).items())
[('realm', 'example.com'), ('session_id', '98717'), ('type', 'ANON')]
>>>
>>> request.META['HTTP_SESSION_ID'] = 'SID:ANON::987171879'
>>> sorted(parse_session_id(request).items())
[('realm', ''), ('session_id', '987171879'), ('type', 'ANON')]
>>>
>>> request.META['HTTP_SESSION_ID'] = 'SID:ANON:example.com:923-thread1'
>>> sorted(parse_session_id(request).items())
[('realm', 'example.com'), ('session_id', '923-thread1'), ('type', 'ANON')]
>>>
>>> request.META['HTTP_SESSION_ID'] = 'SID:BULLSHIT:example.com:987171879'
>>> parse_session_id(request)
>>> request.META['HTTP_SESSION_ID'] = 'ENTIREGABRBAGE'
>>> parse_session_id(request)
>>> request.META['HTTP_SESSION_ID'] = 'SID:ANON:987171879'
>>> parse_session_id(request)
"""
unparsed_session_id = request.META.get('HTTP_SESSION_ID', None)
if unparsed_session_id is not None:
parsed_session_id = HTTP_SESSION_ID_REGEX.match(unparsed_session_id)
if parsed_session_id is not None:
return parsed_session_id.groupdict()
return None
def start_or_resume(session_id, session_type):
if session_type == 'ANON':
return get_session(session_id, raise_on_create=False)
return get_session(session_id, raise_on_create=True)
class IsApiRequest(object):
@staticmethod
def is_api_request(request):
path = request.path.lower()
api_root = reverse('api-root').lower()
return path.startswith(api_root)
class HeaderSessionMiddleware(SessionMiddleware, IsApiRequest):
"""
Implement session through headers:
http://www.w3.org/TR/WD-session-id
TODO:
Implement gateway protection, with permission options for usage of
header sessions. With that in place the api can be used for both trusted
and non trusted clients, see README.rst.
"""
def process_request(self, request):
"""
Parse the session id from the 'Session-Id: ' header when using the api.
"""
if self.is_api_request(request):
try:
parsed_session_uri = parse_session_id(request)
if parsed_session_uri is not None:
domain = get_domain(request)
if parsed_session_uri['realm'] != domain:
raise exceptions.NotAcceptable(
_('Can not accept cookie with realm %s on realm %s') % (
parsed_session_uri['realm'],
domain
)
)
session_id = session_id_from_parsed_session_uri(
parsed_session_uri)
request.session = start_or_resume(
session_id, session_type=parsed_session_uri['type'])
request.parsed_session_uri = parsed_session_uri
# since the session id is assigned by the CLIENT, there is
# no point in having csrf_protection. Session id's read
# from cookies, still need csrf!
request.csrf_processing_done = True
return None
except exceptions.APIException as e:
response = HttpResponse('{"reason": "%s"}' % e.detail,
content_type='application/json')
response.status_code = e.status_code
return response
return super(HeaderSessionMiddleware, self).process_request(request)
def process_response(self, request, response):
"""
Add the 'Session-Id: ' header when using the api.
"""
if self.is_api_request(request) \
and getattr(request, 'session', None) is not None \
and hasattr(request, 'parsed_session_uri'):
session_key = request.session.session_key
parsed_session_key = session_id_from_parsed_session_uri(
request.parsed_session_uri)
assert(session_key == parsed_session_key), \
'%s is not equal to %s' % (session_key, parsed_session_key)
response['Session-Id'] = \
'SID:%(type)s:%(realm)s:%(session_id)s' % (
request.parsed_session_uri)
return super(HeaderSessionMiddleware, self).process_response(
request, response)
class ApiGatewayMiddleWare(IsApiRequest):
"""
Protect the api gateway with a token.
"""
def process_request(self, request):
if self.is_api_request(request):
key = authentication.get_authorization_header(request)
if models.ApiKey.objects.filter(key=key).exists():
return None
logger.error('Invalid credentials provided for %s:%s by %s' % (
request.method,
request.path,
request.META.get('REMOTE_ADDR', '<unknown>')
))
raise PermissionDenied()
return None
class ApiBasketMiddleWare(BasketMiddleware, IsApiRequest):
"""
Use this middleware instead of Oscar's basket middleware if you
want to mix the api with and regular oscar views.
Oscar uses a cookie based session to store baskets for anonymous users, but
oscarapi can not do that, because we don't want to put the burden
of managing a cookie jar on oscarapi clients that are not websites.
"""
def process_request(self, request):
super(ApiBasketMiddleWare, self).process_request(request)
if self.is_api_request(request):
# we should make sure that any cookie baskets are turned into
# session baskets, since oscarapi uses only baskets from the
# session.
cookie_key = self.get_cookie_key(request)
basket = self.get_cookie_basket(
cookie_key,
request,
Exception("get_cookie_basket doesn't use the manager argument")
)
if basket is not None:
if request_contains_basket(request, basket):
pass
else:
store_basket_in_session(basket, request.session)
def process_response(self, request, response):
if self.is_api_request(request) and hasattr(request, 'user') and request.session:
# at this point we are sure a basket can be found in the session
# (if the session hasn't been destroyed by logging out),
# because it is enforced in process_request.
# We just have to make sure it is stored as a cookie, because it
# could have been created by oscarapi.
cookie_key = self.get_cookie_key(request)
basket = get_basket(request)
cookie = self.get_basket_hash(basket.id)
# Delete any surplus cookies
cookies_to_delete = getattr(request, 'cookies_to_delete', [])
for cookie_key in cookies_to_delete:
response.delete_cookie(cookie_key)
if not request.user.is_authenticated():
response.set_cookie(
cookie_key, cookie,
max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True)
return response
else:
return super(
ApiBasketMiddleWare, self).process_response(request, response)
|
#!/usr/bin/env python
# $Id: WebInputMixin.py,v 1.1 2006-09-06 09:50:10 skyostil Exp $
"""Provides helpers for Template.webInput(), a method for importing web
transaction variables in bulk. See the docstring of webInput for full details.
Meta-Data
================================================================================
Author: Mike Orr <[email protected]>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.1 $
Start Date: 2002/03/17
Last Revision Date: $Date: 2006-09-06 09:50:10 $
"""
__author__ = "Mike Orr <[email protected]>"
__revision__ = "$Revision: 1.1 $"[11:-2]
from Cheetah.Utils.Misc import useOrRaise
class NonNumericInputError(ValueError): pass
##################################################
## PRIVATE FUNCTIONS AND CLASSES
class _Converter:
"""A container object for info about type converters.
.name, string, name of this converter (for error messages).
.func, function, factory function.
.default, value to use or raise if the real value is missing.
.error, value to use or raise if .func() raises an exception.
"""
def __init__(self, name, func, default, error):
self.name = name
self.func = func
self.default = default
self.error = error
def _lookup(name, func, multi, converters):
"""Look up a Webware field/cookie/value/session value. Return
'(realName, value)' where 'realName' is like 'name' but with any
conversion suffix strips off. Applies numeric conversion and
single vs multi values according to the comments in the source.
"""
# Step 1 -- split off the conversion suffix from 'name'; e.g. "height:int".
# If there's no colon, the suffix is "". 'longName' is the name with the
# suffix, 'shortName' is without.
# XXX This implementation assumes "height:" means "height".
colon = name.find(':')
if colon != -1:
longName = name
shortName, ext = name[:colon], name[colon+1:]
else:
longName = shortName = name
ext = ''
# Step 2 -- look up the values by calling 'func'.
if longName != shortName:
values = func(longName, None) or func(shortName, None)
else:
values = func(shortName, None)
# 'values' is a list of strings, a string or None.
# Step 3 -- Coerce 'values' to a list of zero, one or more strings.
if values is None:
values = []
elif isinstance(values, str):
values = [values]
# Step 4 -- Find a _Converter object or raise TypeError.
try:
converter = converters[ext]
except KeyError:
fmt = "'%s' is not a valid converter name in '%s'"
tup = (ext, longName)
raise TypeError(fmt % tup)
# Step 5 -- if there's a converter func, run it on each element.
# If the converter raises an exception, use or raise 'converter.error'.
if converter.func is not None:
tmp = values[:]
values = []
for elm in tmp:
try:
elm = converter.func(elm)
except (TypeError, ValueError):
tup = converter.name, elm
errmsg = "%s '%s' contains invalid characters" % tup
elm = useOrRaise(converter.error, errmsg)
values.append(elm)
# 'values' is now a list of strings, ints or floats.
# Step 6 -- If we're supposed to return a multi value, return the list
# as is. If we're supposed to return a single value and the list is
# empty, return or raise 'converter.default'. Otherwise, return the
# first element in the list and ignore any additional values.
if multi:
return shortName, values
if len(values) == 0:
return shortName, useOrRaise(converter.default)
return shortName, values[0]
# vim: sw=4 ts=4 expandtab
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from ..models.courses import CourseRegistration
from ..models.events import EventRegistration
from ..models.orderables import OrderableRegistration
from ..models.refundrequest import RefundRequest
from ..models.transaction import Transaction
from ..utils import comma_separated, currency, first_upper
from .fields import ReadonlyField
from .form import FormMixin
class RefundRequestBaseForm(FormMixin, forms.ModelForm):
def __init__(self, registration, *args, **kwargs):
super().__init__(*args, **kwargs)
self.registration = registration
self.readonly_fields = [
ReadonlyField(label=first_upper(registration.subject.subject_type.name), value=registration.subject.name)
]
if registration.subject.registration_type_participants:
if len(registration.all_participants) > 1:
label = _("Participants")
else:
label = _("Participant")
self.readonly_fields.append(
ReadonlyField(label=label, value=comma_separated(registration.all_participants))
)
elif registration.subject.registration_type_groups:
self.readonly_fields.append(ReadonlyField(label=_("Contact person"), value=registration.group.full_name))
if registration.group.name:
self.readonly_fields.append(ReadonlyField(label=_("Group name"), value=registration.group.name))
self.readonly_fields.append(
ReadonlyField(label=_("Overpaid amount"), value=currency(registration.payment_status.overpaid))
)
class RefundRequestForm(RefundRequestBaseForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instance.registration = self.registration
self.instance.requested_by_id = self.registration.user_id
class Meta:
model = RefundRequest
fields = ["bank_account"]
class PaymentTransferForm(RefundRequestBaseForm):
instance: Transaction
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
valid_target_registration_ids = [
registration.id
for Registration in (CourseRegistration, EventRegistration, OrderableRegistration)
for registration in Registration.objects.filter(user_id=self.registration.user_id)
if registration.payment_status.amount_due
]
registration_choices = self.fields["target_registration"].widget.choices
registration_choices.queryset = registration_choices.queryset.filter(id__in=valid_target_registration_ids)
self.instance.source_registration = self.registration
self.instance.accounted_by_id = self.registration.user_id
self.instance.transaction_type = Transaction.TRANSFER
def clean(self):
self.cleaned_data = super().clean()
target_registration = self.cleaned_data.get("target_registration")
if target_registration:
self.instance.amount = min(
self.registration.payment_status.overpaid,
target_registration.payment_status.amount_due,
)
return self.cleaned_data
class Meta:
model = Transaction
fields = ["target_registration"]
class DonationForm(RefundRequestBaseForm):
instance: Transaction
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instance.source_registration = self.registration
self.instance.accounted_by_id = self.registration.user_id
self.instance.transaction_type = Transaction.DONATION_TRANSFER
self.instance.amount = self.registration.payment_status.overpaid
class Meta:
model = Transaction
fields = []
|
"""
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
from __future__ import unicode_literals
import mimetypes
import os
import stat
import posixpath
import re
from django.http import (Http404, HttpResponse, HttpResponseRedirect,
HttpResponseNotModified, StreamingHttpResponse)
from django.template import loader, Template, Context, TemplateDoesNotExist
from django.utils.http import http_date, parse_http_date
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext as _, ugettext_lazy
STREAM_CHUNK_SIZE = 4096
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.views.static.serve', {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(fullpath)
content_type = content_type or 'application/octet-stream'
f = open(fullpath, 'rb')
response = StreamingHttpResponse(iter(lambda: f.read(STREAM_CHUNK_SIZE), b''),
content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% ifnotequal directory "/" %}
<li><a href="../">../</a></li>
{% endifnotequal %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = ugettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template(['static/directory_index.html',
'static/directory_index'])
except TemplateDoesNotExist:
t = Template(DEFAULT_DIRECTORY_INDEX_TEMPLATE, name='Default directory index template')
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
|
'''
Created on 27-07-2012
@author: jurek
'''
from hra_math.utils.utils import print_import_error
try:
from hra_core.introspection import create_class_object_with_suffix
from hra_core.introspection import get_method_arguments_count
from hra_math.time_domain.poincare_plot.filters.filter_utils import expand_to_real_filters_names # @IgnorePep8
from hra_math.time_domain.poincare_plot.filters.filter_utils import get_package_for_filter # @IgnorePep8
except ImportError as error:
print_import_error(__name__, error)
class FilterManager(object):
def __init__(self, _shift=1, _excluded_annotations=None, _filters=None):
self.__filters__ = []
self.__shift__ = _shift
if _filters is not None:
for _filter in _filters:
if _excluded_annotations and hasattr(_filter,
'excluded_annotations'):
_filter.excluded_annotations = _excluded_annotations
self.addFilter(_filter)
def run_filters(self, _data_vector):
"""
method which runs all filters as objects or as methods;
when a method has two parameters it returns two values also
when has a one parameter it returns one value
when none parameters none value has to be returned
"""
data_vector = _data_vector
for _filter in self.__filters__:
if _filter.arg_count == -1:
data_vector = _filter.filter(data_vector)
else:
data_vector = _filter(data_vector)
return data_vector
def addFilter(self, _filter_object_or_handler_or_names):
"""
filter entity could be passed as filter object itself, handler method
or a name of filter class
"""
arg_count = get_method_arguments_count(_filter_object_or_handler_or_names) # @IgnorePep8
# filter as a string
if isinstance(_filter_object_or_handler_or_names, str):
for filter_name in expand_to_real_filters_names(
_filter_object_or_handler_or_names):
if filter_name == None:
return
_module = get_package_for_filter(filter_name)
filter_object = create_class_object_with_suffix(
get_package_for_filter(filter_name),
filter_name,
_suffix='Filter')
filter_object = filter_object(_shift=self.__shift__)
filter_object.arg_count = -1
self.__filters__.append(filter_object)
# filter as a function
elif arg_count > -1:
filter_method = _filter_object_or_handler_or_names
filter_method.arg_count = arg_count
self.__filters__.append(filter_method)
# filter as an object
else:
filter_object = _filter_object_or_handler_or_names
filter_object.arg_count = -1
self.__filters__.append(filter_object)
|
import sys
import os
import stat
import shutil
import unicodedata
import posixpath
if sys.version_info >= (3,):
from urllib.parse import quote as url_quote
unicode = str
else:
from urllib import quote as url_quote
__all__ = ['check_call', 'check_output', 'rmtree',
'b', 'posix', 'fsdecode', 'hfs_quote', 'compose', 'decompose']
try:
from subprocess import check_call
except ImportError:
# BBB for Python < 2.5
def check_call(*popenargs, **kwargs):
from subprocess import call
from subprocess import CalledProcessError
retcode = call(*popenargs, **kwargs)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
if retcode:
raise CalledProcessError(retcode, cmd)
return retcode
try:
from subprocess import check_output
except ImportError:
# BBB for Python < 2.7
def check_output(*popenargs, **kwargs):
from subprocess import PIPE
from subprocess import Popen
from subprocess import CalledProcessError
if 'stdout' in kwargs:
raise ValueError(
'stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return output
# Windows cannot delete read-only Git objects
def rmtree(path):
if sys.platform == 'win32':
def onerror(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
shutil.rmtree(path, False, onerror)
else:
shutil.rmtree(path, False)
# Fake byte literals for Python < 2.6
def b(s, encoding='utf-8'):
if sys.version_info >= (3,):
return s.encode(encoding)
return s
# Convert path to POSIX path on Windows
def posix(path):
if sys.platform == 'win32':
return path.replace(os.sep, posixpath.sep)
return path
# Decode path from fs encoding under Python 3
def fsdecode(path):
if sys.version_info >= (3,):
if not isinstance(path, str):
if sys.platform == 'win32':
errors = 'strict'
else:
errors = 'surrogateescape'
return path.decode(sys.getfilesystemencoding(), errors)
return path
# HFS Plus quotes unknown bytes like so: %F6
def hfs_quote(path):
if isinstance(path, unicode):
raise TypeError('bytes are required')
try:
path.decode('utf-8')
except UnicodeDecodeError:
path = url_quote(path) # Not UTF-8
if sys.version_info >= (3,):
path = path.encode('ascii')
return path
# HFS Plus uses decomposed UTF-8
def compose(path):
if isinstance(path, unicode):
return unicodedata.normalize('NFC', path)
try:
path = path.decode('utf-8')
path = unicodedata.normalize('NFC', path)
path = path.encode('utf-8')
except UnicodeError:
pass # Not UTF-8
return path
# HFS Plus uses decomposed UTF-8
def decompose(path):
if isinstance(path, unicode):
return unicodedata.normalize('NFD', path)
try:
path = path.decode('utf-8')
path = unicodedata.normalize('NFD', path)
path = path.encode('utf-8')
except UnicodeError:
pass # Not UTF-8
return path
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import warnings
try:
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
except ImportError:
warnings.warn('Please install cryptography and pyyaml: pip install cryptography pyyaml')
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'dryobates/proj1'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
|
"""
verktyg_server.tests.test_sockets
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright:
(c) 2017 by Ben Mather.
:license:
BSD, see LICENSE for more details.
"""
import socket
import ssl
import unittest
import threading
from verktyg_server import make_inet_socket
from verktyg_server import make_adhoc_ssl_context
def _find_open_port(self, *, interface='localhost'):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Disable re-use of the port by other processes to avoid race condition.
# This won't prevent re-use of the socket by the current process.
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 0)
sock.bind((interface, 0))
port = sock.getsockname()[1]
sock.close()
return port
class InetSocketsTestCase(unittest.TestCase):
def test_make_inet_socket_hostname(self):
# TODO this is a bit racy.
port = _find_open_port('localhost')
# Call `make_inet_socket` to open a listening socket.
sock = make_inet_socket('localhost', port=port)
self.addCleanup(sock.close)
# Check that basic socket attributes match what we would expect.
self.assertIsInstance(sock, socket.socket)
actual_addr, actual_port = sock.getsockname()
self.assertEqual(actual_addr, '127.0.0.1')
self.assertEqual(actual_port, port)
actual_timeout = sock.gettimeout()
self.assertEqual(actual_timeout, None)
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
reuse_addr = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertEqual(reuse_addr, 1)
# Everything after here is us just us prodding the socket to make sure
# that it behaves as expected.
# Try to connect to it and send some stuff.
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(client_sock.close)
client_sock.connect(('127.0.0.1', port))
client_sock.sendall(b'hello!')
# Accept the incoming connection and read the data.
sock.listen(1)
server_conn, addr = sock.accept()
self.addCleanup(server_conn.close)
self.assertEqual(server_conn.recv(6), b'hello!')
def test_make_inet_socket_ssl(self):
port = _find_open_port('localhost')
ssl_context = make_adhoc_ssl_context()
sock = make_inet_socket(
'localhost', port=port, ssl_context=ssl_context,
)
self.addCleanup(sock.close)
# Check that basic socket attributes match what we would expect.
self.assertIsInstance(sock, ssl.SSLSocket)
actual_addr, actual_port = sock.getsockname()
self.assertEqual(actual_addr, '127.0.0.1')
self.assertEqual(actual_port, port)
actual_timeout = sock.gettimeout()
self.assertEqual(actual_timeout, None)
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
reuse_addr = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertEqual(reuse_addr, 1)
# Everything after here is us just us prodding the socket to make sure
# that it behaves as expected.
# Unlike the two other inet socket examples, we can't rely on the
# buffer to make it possible to interleave client and server calls as
# connection requires a handshake that is hidden in python.
def _client_thread():
client_ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS)
client_ssl_context.verify_mode = ssl.CERT_NONE
client_ssl_context.check_hostname = False
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_sock = client_ssl_context.wrap_socket(client_sock)
# TODO there is a lot more validation that we should do here.
with client_sock:
client_sock.connect(('127.0.0.1', port))
client_sock.sendall(b'hello!')
client_thread = threading.Thread(target=_client_thread)
client_thread.start()
self.addCleanup(client_thread.join)
# Accept the incoming connection and read the data.
sock.listen(1)
server_conn, addr = sock.accept()
self.addCleanup(server_conn.close)
self.assertEqual(server_conn.recv(6), b'hello!')
def test_make_inet_socket_ipv6(self):
# TODO this is a bit racy.
port = _find_open_port('::1')
# Call `make_inet_socket` to open a listening socket.
sock = make_inet_socket('::1', port=port)
self.addCleanup(sock.close)
# Check that basic socket attributes match what we would expect.
self.assertIsInstance(sock, socket.socket)
actual_addr, actual_port, _flow_info, _scope_id = sock.getsockname()
self.assertEqual(actual_addr, '::1')
self.assertEqual(actual_port, port)
actual_timeout = sock.gettimeout()
self.assertEqual(actual_timeout, None)
self.assertEqual(sock.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
reuse_addr = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertEqual(reuse_addr, 1)
# Everything after here is us just us prodding the socket to make sure
# that it behaves as expected.
# Try to connect to it and send some stuff.
client_sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(client_sock.close)
client_sock.connect(('::1', port))
client_sock.sendall(b'hello!')
# Accept the incoming connection and read the data.
sock.listen(1)
server_conn, addr = sock.accept()
self.addCleanup(server_conn.close)
self.assertEqual(server_conn.recv(6), b'hello!')
|
# Copyright 2014, 2015 SAP SE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: //www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import binascii
def allhexlify(data):
"""Hexlify given data into a string representation with hex values for all chars
Input like
'ab\x04ce'
becomes
'\x61\x62\x04\x63\x65'
"""
hx = binascii.hexlify(data)
return ''.join([r'\x' + o for o in re.findall('..', hx)])
def humanhexlify(data, n=-1):
"""Hexlify given data with 1 space char btw hex values for easier reading for humans
:param data: binary data to hexlify
:param n: If n is a positive integer then shorten the output of this function to n hexlified bytes.
Input like
'ab\x04ce'
becomes
'61 62 04 63 65'
With n=3 input like
data='ab\x04ce', n=3
becomes
'61 62 04 ...'
"""
tail = ' ...' if 0 < n < len(data) else ''
if tail:
data = data[:n]
hx = binascii.hexlify(data)
return ' '.join(re.findall('..', hx)) + tail
def dehexlify(hx):
"""Revert human hexlification - remove white spaces from hex string and convert into real values
Input like
'61 62 04 63 65'
becomes
'ab\x04ce'
"""
return binascii.unhexlify(hx.replace(' ', ''))
|
# Copyright (C) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Memory backend for burrow.'''
import time
import burrow.backend
class Backend(burrow.backend.Backend):
'''This backend stores all data using native Python data
structures. It uses a linked list of objects to store data
(accounts, queues, and messages) with a dictionary as a secondary
index into this list. This is required so we can have O(1) appends,
deletes, and lookups by id, along with easy traversal starting
anywhere in the list.'''
def __init__(self, config):
super(Backend, self).__init__(config)
self.accounts = Accounts()
def delete_accounts(self, filters=None):
if filters is None or len(filters) == 0:
self.accounts.reset()
return
detail = self._get_detail(filters)
for account in self.accounts.iter(filters):
self.accounts.delete(account.id)
if detail is not None:
yield account.detail(detail)
def get_accounts(self, filters=None):
detail = self._get_detail(filters, 'id')
for account in self.accounts.iter(filters):
if detail is not None:
yield account.detail(detail)
def delete_queues(self, account, filters=None):
account = self.accounts.get(account)
if filters is None or len(filters) == 0:
account.queues.reset()
else:
detail = self._get_detail(filters)
for queue in account.queues.iter(filters):
account.queues.delete(queue.id)
if detail is not None:
yield queue.detail(detail)
if account.queues.count() == 0:
self.accounts.delete(account.id)
def get_queues(self, account, filters=None):
account = self.accounts.get(account)
detail = self._get_detail(filters, 'id')
for queue in account.queues.iter(filters):
if detail is not None:
yield queue.detail(detail)
@burrow.backend.wait_without_attributes
def delete_messages(self, account, queue, filters=None):
account, queue = self.accounts.get_queue(account, queue)
detail = self._get_message_detail(filters)
for message in queue.messages.iter(filters):
queue.messages.delete(message.id)
if detail is not None:
yield message.detail(detail)
if queue.messages.count() == 0:
self.accounts.delete_queue(account.id, queue.id)
@burrow.backend.wait_without_attributes
def get_messages(self, account, queue, filters=None):
account, queue = self.accounts.get_queue(account, queue)
detail = self._get_message_detail(filters, 'all')
for message in queue.messages.iter(filters):
if detail is not None:
yield message.detail(detail)
@burrow.backend.wait_with_attributes
def update_messages(self, account, queue, attributes, filters=None):
account, queue = self.accounts.get_queue(account, queue)
notify = False
ttl, hide = self._get_attributes(attributes)
detail = self._get_message_detail(filters)
for message in queue.messages.iter(filters):
if ttl is not None:
message.ttl = ttl
if hide is not None:
message.hide = hide
if hide == 0:
notify = True
if detail is not None:
yield message.detail(detail)
if notify:
self.notify(account.id, queue.id)
def create_message(self, account, queue, message, body, attributes=None):
account, queue = self.accounts.get_queue(account, queue, True)
ttl, hide = self._get_attributes(attributes, ttl=0, hide=0)
try:
message = queue.messages.get(message)
created = False
except burrow.NotFound:
message = queue.messages.get(message, True)
created = True
message.ttl = ttl
message.hide = hide
message.body = body
if created or hide == 0:
self.notify(account.id, queue.id)
return created
def delete_message(self, account, queue, message, filters=None):
account, queue = self.accounts.get_queue(account, queue)
message = queue.messages.get(message)
detail = self._get_message_detail(filters)
queue.messages.delete(message.id)
if queue.messages.count() == 0:
self.accounts.delete_queue(account.id, queue.id)
return message.detail(detail)
def get_message(self, account, queue, message, filters=None):
account, queue = self.accounts.get_queue(account, queue)
message = queue.messages.get(message)
detail = self._get_message_detail(filters, 'all')
return message.detail(detail)
def update_message(self, account, queue, message, attributes,
filters=None):
account, queue = self.accounts.get_queue(account, queue)
message = queue.messages.get(message)
ttl, hide = self._get_attributes(attributes)
detail = self._get_message_detail(filters)
if ttl is not None:
message.ttl = ttl
if hide is not None:
message.hide = hide
if hide == 0:
self.notify(account.id, queue.id)
return message.detail(detail)
def clean(self):
now = int(time.time())
for account in self.accounts.iter():
for queue in account.queues.iter():
notify = False
for message in queue.messages.iter(dict(match_hidden=True)):
if 0 < message.ttl <= now:
queue.messages.delete(message.id)
elif 0 < message.hide <= now:
message.hide = 0
notify = True
if notify:
self.notify(account.id, queue.id)
if queue.messages.count() == 0:
self.accounts.delete_queue(account.id, queue.id)
class Item(object):
'''Object to represent elements in a indexed linked list.'''
def __init__(self, id=None):
self.id = id
self.next = None
self.prev = None
def detail(self, detail):
'''Format detail response for this item.'''
if detail == 'id':
return self.id
elif detail == 'all':
return dict(id=self.id)
return None
class IndexedList(object):
'''Class for managing an indexed linked list.'''
item_class = Item
def __init__(self):
self.first = None
self.last = None
self.index = {}
def add(self, item):
'''Add a new item to the list.'''
if self.first is None:
self.first = item
if self.last is not None:
item.prev = self.last
self.last.next = item
self.last = item
self.index[item.id] = item
return item
def count(self):
'''Return a count of the number of items in the list.'''
return len(self.index)
def delete(self, id):
'''Delete an item from the list by id.'''
item = self.index.pop(id)
if item.next is not None:
item.next.prev = item.prev
if item.prev is not None:
item.prev.next = item.next
if self.first == item:
self.first = item.next
if self.last == item:
self.last = item.prev
def get(self, id, create=False):
'''Get an item from the list by id.'''
if id in self.index:
return self.index[id]
elif create:
return self.add(self.item_class(id))
raise burrow.NotFound(self.item_class.__name__ + " not found")
def iter(self, filters=None):
'''Iterate through all items in the list, possibly filtered.'''
if filters is None:
marker = None
limit = None
else:
marker = filters.get('marker', None)
limit = filters.get('limit', None)
if marker is not None and marker in self.index:
item = self.index[marker].next
else:
item = self.first
if item is None:
raise burrow.NotFound(self.item_class.__name__ + " not found")
while item is not None:
yield item
if limit:
limit -= 1
if limit == 0:
break
item = item.next
def reset(self):
'''Remove all items in the list.'''
if self.count() == 0:
raise burrow.NotFound(self.item_class.__name__ + " not found")
self.first = None
self.last = None
self.index.clear()
class Account(Item):
'''A type of item representing an account.'''
def __init__(self, id=None):
super(Account, self).__init__(id)
self.queues = Queues()
class Accounts(IndexedList):
'''A type of list representing an account list.'''
item_class = Account
def delete_queue(self, account, queue):
'''Delete a queue within the given account.'''
account = self.get(account)
if account is not None:
account.queues.delete(queue)
if account.queues.count() == 0:
self.delete(account.id)
def get_queue(self, account, queue, create=False):
'''Get a queue within the given the account.'''
if account in self.index:
account = self.index[account]
elif create:
account = self.add(Account(account))
else:
raise burrow.NotFound('Account not found')
return account, account.queues.get(queue, create)
class Queue(Item):
'''A type of item representing a queue.'''
def __init__(self, id=None):
super(Queue, self).__init__(id)
self.messages = Messages()
class Queues(IndexedList):
'''A type of list representing a queue list.'''
item_class = Queue
class Message(Item):
'''A type of item representing a message.'''
def __init__(self, id=None):
super(Message, self).__init__(id)
self.ttl = 0
self.hide = 0
self.body = None
def detail(self, detail=None):
if detail == 'id':
return self.id
elif detail == 'body':
return self.body
ttl = self.ttl
if ttl > 0:
ttl -= int(time.time())
hide = self.hide
if hide > 0:
hide -= int(time.time())
if detail == 'attributes':
return dict(id=self.id, ttl=ttl, hide=hide)
elif detail == 'all':
return dict(id=self.id, ttl=ttl, hide=hide, body=self.body)
return None
class Messages(IndexedList):
'''A type of list representing a message list.'''
item_class = Message
def iter(self, filters=None):
if filters is None:
marker = None
limit = None
match_hidden = False
else:
marker = filters.get('marker', None)
limit = filters.get('limit', None)
match_hidden = filters.get('match_hidden', False)
if marker is not None and marker in self.index:
item = self.index[marker].next
else:
item = self.first
count = 0
while item is not None:
if match_hidden or item.hide == 0:
count += 1
yield item
if limit:
limit -= 1
if limit == 0:
break
item = item.next
if count == 0:
raise burrow.NotFound('Message not found')
|
# Main demo script. (see the __main__ section of the code)
# Import the relevant tools
import time # to measure performance
import numpy as np # standard array library
import torch
from torch.autograd import Variable
import torch.optim as optim
import matplotlib.pyplot as plt
# No need for a ~/.theanorc file anymore !
use_cuda = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
dtypeint = torch.cuda.LongTensor if use_cuda else torch.LongTensor
from input_output import GridData, DisplayShoot
from shooting import _Hqp, _HamiltonianShooting, _HamiltonianCarrying
from data_attachment import _data_attachment
from curve import Curve
# Cost function and derivatives =================================================================
def _cost( q,p, xt_measure, connec, params ) :
"""
Returns a total cost, sum of a small regularization term and the data attachment.
.. math ::
C(q_0, p_0) = .01 * H(q0,p0) + 1 * A(q_1, x_t)
Needless to say, the weights can be tuned according to the signal-to-noise ratio.
"""
s,r = params # Deformation scale, Attachment scale
q1 = _HamiltonianShooting(q,p,s)[0] # Geodesic shooting from q0 to q1
# To compute a data attachment cost, we need the set of vertices 'q1' into a measure.
q1_measure = Curve._vertices_to_measure( q1, connec )
attach_info = _data_attachment( q1_measure, xt_measure, r )
return [ .01* _Hqp(q, p, s) + 1* attach_info[0] , attach_info[1] ]
# The discrete backward scheme is automatically computed :
def _dcost_p( q,p, xt_measure, connec, params ) :
"The gradients of C wrt. p_0 is automatically computed."
return torch.autograd.grad( _cost(q,p, xt_measure, connec, params)[0] , p)
#================================================================================================
def VisualizationRoutine(Q0, params) :
def ShootingVisualization(q,p,grid) :
return _HamiltonianCarrying(q, p, grid, params[0])
return ShootingVisualization
def perform_matching( Q0, Xt, params, scale_momentum = 1, scale_attach = 1) :
"Performs a matching from the source Q0 to the target Xt, returns the optimal momentum P0."
(Xt_x, Xt_mu) = Xt.to_measure() # Transform the target into a measure once and for all
connec = torch.from_numpy(Q0.connectivity).type(dtypeint) ;
# Declaration of variable types -------------------------------------------------------------
# Cost is a function of 6 parameters :
# The source 'q', the starting momentum 'p',
# the target points 'xt_x', the target weights 'xt_mu',
# the deformation scale 'sigma_def', the attachment scale 'sigma_att'.
q0 = Variable(torch.from_numpy( Q0.points ).type(dtype), requires_grad=True)
p0 = Variable(torch.from_numpy( 0.*Q0.points ).type(dtype), requires_grad=True )
Xt_x = Variable(torch.from_numpy( Xt_x ).type(dtype), requires_grad=False)
Xt_mu = Variable(torch.from_numpy( Xt_mu ).type(dtype), requires_grad=False)
# Compilation. Depending on settings specified in the ~/.theanorc file or explicitely given
# at execution time, this will produce CPU or GPU code under the hood.
def Cost(q,p, xt_x,xt_mu) :
return _cost( q,p, (xt_x,xt_mu), connec, params )
# Display pre-computing ---------------------------------------------------------------------
g0,cgrid = GridData() ; G0 = Curve(g0, cgrid )
g0 = Variable( torch.from_numpy( g0 ).type(dtype), requires_grad = False )
# Given q0, p0 and grid points grid0 , outputs (q1,p1,grid1) after the flow
# of the geodesic equations from t=0 to t=1 :
ShootingVisualization = VisualizationRoutine(q0, params)
# L-BFGS minimization -----------------------------------------------------------------------
from scipy.optimize import minimize
def matching_problem(p0) :
"Energy minimized in the variable 'p0'."
[c, info] = Cost(q0, p0, Xt_x, Xt_mu)
matching_problem.Info = info
if (matching_problem.it % 20 == 0):# and (c.data.cpu().numpy()[0] < matching_problem.bestc):
matching_problem.bestc = c.data.cpu().numpy()[0]
q1,p1,g1 = ShootingVisualization(q0, p0, g0)
q1 = q1.data.cpu().numpy()
p1 = p1.data.cpu().numpy()
g1 = g1.data.cpu().numpy()
Q1 = Curve(q1, connec) ; G1 = Curve(g1, cgrid )
DisplayShoot( Q0, G0, p0.data.cpu().numpy(),
Q1, G1, Xt, info.data.cpu().numpy(),
matching_problem.it, scale_momentum, scale_attach)
print('Iteration : ', matching_problem.it, ', cost : ', c.data.cpu().numpy(),
' info : ', info.data.cpu().numpy().shape)
matching_problem.it += 1
return c
matching_problem.bestc = np.inf ; matching_problem.it = 0 ; matching_problem.Info = None
optimizer = torch.optim.LBFGS(
[p0],
max_iter = 1000,
tolerance_change = .000001,
history_size = 10)
#optimizer = torch.optim.Adam(
# [p0])
time1 = time.time()
def closure():
optimizer.zero_grad()
c = matching_problem(p0)
c.backward()
return c
for it in range(100) :
optimizer.step(closure)
time2 = time.time()
return p0, matching_problem.Info
def matching_demo(source_file, target_file, params, scale_mom = 1, scale_att = 1) :
Q0 = Curve.from_file('data/' + source_file) # Load source...
Xt = Curve.from_file('data/' + target_file) # and target.
# Compute the optimal shooting momentum :
p0, info = perform_matching( Q0, Xt, params, scale_mom, scale_att)
if __name__ == '__main__' :
plt.ion()
plt.show()
# N.B. : this minimalistic toolbox showcases the Hamiltonian shooting theory...
# To get good-looking matching results on a consistent basis, you should
# use a data attachment term which takes into account the orientation of curve
# elements, such as "currents" and "varifold" kernel-formulas, not implemented here.
#matching_demo('australopithecus.vtk','sapiens.vtk', (.05,.2), scale_mom = .1,scale_att = .1)
matching_demo('amoeba_1.png', 'amoeba_2.png', (.1,0), scale_mom = .1,scale_att = 0)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
sphinx-autopackage-script
This script parses a directory tree looking for python modules and packages and
creates ReST files appropriately to create code documentation with Sphinx.
It also creates a modules index (named modules.<suffix>).
"""
# Copyright 2008 Société des arts technologiques (SAT), http://www.sat.qc.ca/
# Copyright 2010 Thomas Waldmann <tw AT waldmann-edv DOT de>
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import optparse
__builtins__.__openravepy_import_examples__ = 1 # necessary for openrave
# automodule options
OPTIONS = ['members',
'undoc-members',
# 'inherited-members', # disabled because there's a bug in sphinx
'show-inheritance',
]
INIT = '__init__.py'
def makename(package, module):
"""Join package and module with a dot."""
# Both package and module can be None/empty.
if package:
name = package
if module:
name += '.' + module
else:
name = module
return name
def write_file(name, text, opts):
"""Write the output file for module/package <name>."""
if opts.dryrun:
return
fname = os.path.join(opts.destdir, "%s.%s" % (name, opts.suffix))
if not opts.force and os.path.isfile(fname):
print 'File %s already exists, skipping.' % fname
else:
print 'Creating file %s.' % fname
f = open(fname, 'w')
f.write(text)
f.close()
def format_heading(level, text):
"""Create a heading of <level> [1, 2 or 3 supported]."""
underlining = ['=', '-', '~', ][level-1] * len(text)
return '%s\n%s\n\n' % (text, underlining)
def format_directive(module, package=None):
"""Create the automodule directive and add the options."""
directive = '.. automodule:: %s\n' % makename(package, module)
for option in OPTIONS:
directive += ' :%s:\n' % option
return directive
def create_module_file(package, module, opts):
"""Build the text of the file and write the file."""
text = format_heading(1, '%s Module' % module)
text += format_heading(2, ':mod:`%s` Module' % module)
text += format_directive(module, package)
write_file(makename(package, module), text, opts)
def create_package_file(root, master_package, subroot, py_files, opts, subs):
"""Build the text of the file and write the file."""
package = os.path.split(root)[-1]
text = ".. _package-%s:\n\n"%package
# this adds too many headers since __init__ also adds a Package header
#text += format_heading(1, '%s Package' %package)
file_links = []
# add each package's module
for py_file in py_files:
if shall_skip(os.path.join(root, py_file)):
continue
is_package = py_file == INIT
py_file = os.path.splitext(py_file)[0]
py_path = makename(subroot, py_file)
if is_package:
heading = ':mod:`%s` Package' % package
else:
heading = ':mod:`%s` Module' % py_file
filetext = format_heading(2, heading)
filetext += format_directive(is_package and subroot or py_path, master_package)
filetext += '\n'
if opts.sepfiles and not is_package:
file_links.append(py_path)
write_file(py_path, filetext, opts)
else:
text += filetext
if len(file_links) > 0:
text += '.. toctree::\n\n'
for py_path in file_links:
text += ' %s\n' % (py_path)
text += '\n'
# build a list of directories that are packages (they contain an INIT file)
subs = [sub for sub in subs if os.path.isfile(os.path.join(root, sub, INIT))]
# if there are some package directories, add a TOC for theses subpackages
if subs:
text += '.. toctree::\n\n'
for sub in subs:
text += ' %s.%s\n' % (makename(master_package, subroot), sub)
text += '\n'
write_file(makename(master_package, subroot), text, opts)
def create_modules_toc_file(master_package, modules, opts, name='modules'):
"""
Create the module's index.
"""
text = format_heading(1, '%s Modules' % opts.header)
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
prev_module = ''
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
write_file(name, text, opts)
def shall_skip(module):
"""
Check if we want to skip this module.
"""
# skip it, if there is nothing (or just \n or \r\n) in the file
return os.path.getsize(module) < 3
def recurse_tree(path, excludes, opts):
"""
Look for every file in the directory tree and create the corresponding
ReST files.
"""
# use absolute path for root, as relative paths like '../../foo' cause
# 'if "/." in root ...' to filter out *all* modules otherwise
path = os.path.abspath(path)
# check if the base directory is a package and get is name
if INIT in os.listdir(path):
package_name = path.split(os.path.sep)[-1]
else:
package_name = None
toc = []
tree = os.walk(path, False)
for root, subs, files in tree:
# keep only the Python script files
py_files = sorted([f for f in files if os.path.splitext(f)[1] in ['.so','.py']])
if INIT in py_files:
py_files.remove(INIT)
py_files.insert(0, INIT)
# remove hidden ('.') and private ('_') directories
subs = sorted([sub for sub in subs if sub[0] not in ['.', '_']])
# check if there are valid files to process
# TODO: could add check for windows hidden files
if "/." in root or "/_" in root \
or not py_files \
or is_excluded(root, excludes):
continue
if INIT in py_files:
# we are in package ...
if (# ... with subpackage(s)
subs
or
# ... with some module(s)
len(py_files) > 1
or
# ... with a not-to-be-skipped INIT file
not shall_skip(os.path.join(root, INIT))
):
subroot = root[len(path):].lstrip(os.path.sep).replace(os.path.sep, '.')
create_package_file(root, package_name, subroot, py_files, opts, subs)
toc.append(makename(package_name, subroot))
elif root == path:
# if we are at the root level, we don't require it to be a package
for py_file in py_files:
if not shall_skip(os.path.join(path, py_file)):
module = os.path.splitext(py_file)[0]
create_module_file(package_name, module, opts)
toc.append(makename(package_name, module))
# create the module's index
if not opts.notoc:
create_modules_toc_file(package_name, toc, opts)
def normalize_excludes(rootpath, excludes):
"""
Normalize the excluded directory list:
* must be either an absolute path or start with rootpath,
* otherwise it is joined with rootpath
* with trailing slash
"""
sep = os.path.sep
f_excludes = []
for exclude in excludes:
if not os.path.isabs(exclude) and not exclude.startswith(rootpath):
exclude = os.path.join(rootpath, exclude)
if not exclude.endswith(sep):
exclude += sep
f_excludes.append(exclude)
return f_excludes
def is_excluded(root, excludes):
"""
Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar".
"""
sep = os.path.sep
if not root.endswith(sep):
root += sep
for exclude in excludes:
if root.startswith(exclude):
return True
return False
def main():
"""
Parse and check the command line arguments.
"""
parser = optparse.OptionParser(usage="""usage: %prog [options] <package path> [exclude paths, ...]
Note: By default this script will not overwrite already created files.""")
parser.add_option("-n", "--doc-header", action="store", dest="header", help="Documentation Header (default=Project)", default="Project")
parser.add_option("-d", "--dest-dir", action="store", dest="destdir", help="Output destination directory", default="")
parser.add_option("-s", "--suffix", action="store", dest="suffix", help="module suffix (default=txt)", default="txt")
parser.add_option("-m", "--maxdepth", action="store", dest="maxdepth", help="Maximum depth of submodules to show in the TOC (default=4)", type="int", default=4)
parser.add_option("-r", "--dry-run", action="store_true", dest="dryrun", help="Run the script without creating the files")
parser.add_option("-f", "--force", action="store_true", dest="force", help="Overwrite all the files")
parser.add_option("-t", "--no-toc", action="store_true", dest="notoc", help="Don't create the table of content file")
parser.add_option("--sep-files", action="store_true", dest="sepfiles", help="Create separate files for each individual file in a package")
(opts, args) = parser.parse_args()
if not args:
parser.error("package path is required.")
else:
rootpath, excludes = args[0], args[1:]
if os.path.isdir(rootpath):
# check if the output destination is a valid directory
if opts.destdir and os.path.isdir(opts.destdir):
excludes = normalize_excludes(rootpath, excludes)
recurse_tree(rootpath, excludes, opts)
else:
print '%s is not a valid output destination directory.' % opts.destdir
else:
print '%s is not a valid directory.' % rootpath
if __name__ == '__main__':
main()
|
class RoutingTable(object):
UTORRENT_NODE_ID = b'\xeb\xff6isQ\xffJ\xec)\xcd\xba\xab\xf2\xfb\xe3F|\xc2g'
UTORRENT_NODE_IP = '82.221.103.244' # 'router.utorrent.com',
UTORRENT_NODE_PORT = 6881
UTORRENT_NODE_ADDR = (UTORRENT_NODE_IP, UTORRENT_NODE_PORT)
BITTORRENT_NODE_ID = b'2\xf5NisQ\xffJ\xec)\xcd\xba\xab\xf2\xfb\xe3F|\xc2g'
BITTORRENT_NODE_IP = '67.215.246.10' # 'router.bittorrent.com'
BITTORRENT_NODE_PORT = 6881
BITTORRENT_NODE_ADDR = (BITTORRENT_NODE_IP, BITTORRENT_NODE_PORT)
TRANSMISSION_NODE_ID = b'\x8d\xb1S*D+\xb3\xf8\xc4b\xd7\xeb\x1c\xad%\xdeXC\xe5\xd8'
TRANSMISSION_NODE_IP = '91.121.59.153' # 'dht.transmissionbt.com'
TRANSMISSION_NODE_PORT = 6881
TRANSMISSION_NODE_ADDR = (TRANSMISSION_NODE_IP, TRANSMISSION_NODE_PORT)
INITIAL_ROUTING_TABLE = {UTORRENT_NODE_ID: UTORRENT_NODE_ADDR,
BITTORRENT_NODE_ID: BITTORRENT_NODE_ADDR,
TRANSMISSION_NODE_ID: TRANSMISSION_NODE_ADDR}
@staticmethod
def save(routing_table: dict, path):
with open(path, 'wb') as f:
f.write(str(routing_table).encode())
@staticmethod
def load(path):
with open(path, 'rb') as f:
return eval(f.read())
|
# -*- coding: utf-8 -*-
import ujson as json
import pandas as pd
import numpy as np
import re
import smtplib
import email
from email.mime.text import MIMEText
from jinja2 import Template
import pdfkit
import urllib
from agri_med_backend.constants import *
from agri_med_backend import cfg
from agri_med_backend import util
from agri_med_backend.util import Error
def upload_data_handler(params):
# 名字
name = params.get('name', '')
# 地址
address = params.get('address', '')
# email
email_address = params.get('email', '')
# 作物
crop = params.get('crop', '')
# 品種
variety = params.get('variety', '')
# 前期作物
before = params.get('before', '')
# 種植天數
day = params.get('day', '')
# 發病天數
sick_day = params.get('sickDay', '')
# 種植面積
acre = params.get('acre', '')
# 發病面積
sick_acre = params.get('sickAcre', '')
# 補充說明
comment = params.get('comment', '')
# 全景
whole_view = params.get('wholeView', [])
# 單株
single_view = params.get('singleView', [])
# 患部
feature_view = params.get('featureView', [])
# 根部
root_view = params.get('rootView', [])
content, filename = _format_email(name, address, email_address, crop, variety, before, day, sick_day, acre, sick_acre, comment, whole_view, single_view, feature_view, root_view)
error = _send_email(content, filename, email_address)
if error:
return {'success': False, 'errorMsg': error}
return {'success': True, 'errorMsg': ''}
def _format_email(name, address, email_address, crop, variety, before, day, sick_day, acre, sick_acre, comment, whole_view, single_view, feature_view, root_view):
with open('templates/template.html', 'r') as f:
the_str = f.read()
the_template = Template(the_str.decode('utf-8'))
the_map = {
'name': name,
'address': address,
'email': email_address,
'crop': crop,
'variety': variety,
'before': before,
'day': day,
'sick_day': sick_day,
'acre': acre,
'sick_acre': sick_acre,
'comment': comment,
'whole_view': whole_view,
'single_view': single_view,
'feature_view': feature_view,
'root_view': root_view,
}
content = the_template.render(the_map).strip()
filename = cfg.config.get('pdf_dir', '/data/agri_med/pdf') + '/' + util.gen_random_string() + '.pdf'
pdfkit.from_string(content, filename)
return content, filename
def _send_email(content, filename, email_address, mail_list_cfg='default_mail_list'):
if not content:
return None
error = None
title = '[醫農] 您所回報的植物病蟲害資訊已準備好了'
msg = email.MIMEMultipart.MIMEMultipart()
msg['From'] = '[email protected]'
msg['To'] = email_address
msg['Subject'] = title
file_msg = email.mime.base.MIMEBase('image', 'png')
try:
s = smtplib.SMTP(cfg.config.get('smtp_host', 'localhost'))
s.sendmail(msg['From'], cfg.config.get(mail_list_cfg, ['[email protected]']), msg.as_string())
s.quit()
except Exception as e:
error = Error(S_ERR, 'failed to send email: e: %s' % (e))
cfg.logger.error(error)
return error
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.