code
stringlengths 658
1.05M
|
---|
#!/usr/bin/python
# Display memory/swap/cpu usage and system uptime on Linux
# Copyright (C) 2015 Davide Madrisan <davide.madrisan.gmail.com>
from __future__ import division
import glob, os, platform, socket, sys
from datetime import timedelta
__author__ = "Davide Madrisan"
__copyright__ = "Copyright 2015 Davide Madrisan"
__license__ = "GPLv3"
__version__ = "3"
__email__ = "davide.madrisan.gmail.com"
__status__ = "stable"
def _kernel_version():
release = platform.release()
if not release: return None
item = release.split('.')
majVersion = int(item[0])
minVersion = int(item[1])
patchVersion = int(item[2].split('-')[0])
return (((majVersion) << 16) + ((minVersion) << 8) + (patchVersion))
def _readfile(filename, abort_on_error=True, header=False):
if not os.path.isfile(filename):
if abort_on_error:
die(1, 'No such file: ' + filename)
else:
warning('No such file: ' + filename)
return None
fd = open(filename, 'r')
try:
if header:
content = fd.readlines()[1:]
else:
content = fd.readlines()
except:
die(1, 'Error opening the file ' + filename)
return content
def _perc(value, ratio, complement=False):
percentage = 100 * value / ratio
if complement:
return 100 - percentage
else:
return percentage
def _sizeof_fmt(num, factor=1024.0, skip=1, suffix='B'):
units = ['', 'k','m','g','t']
for unit in units[skip:]:
if abs(num) < factor:
return "%3.1f%s%s" % (num, unit, suffix)
num /= factor
return "%.1f%s%s" % (num, 'p', suffix)
def _cpu_count_logical():
"""Return the number of logical CPUs in the system."""
try:
# get the number of online cores
return os.sysconf("SC_NPROCESSORS_ONLN")
except ValueError:
# as a second fallback we try to parse /proc/cpuinfo
num = 0
f = open('/proc/cpuinfo', 'rb')
try:
for line in f:
if line.lower().startswith('processor'):
num += 1
except:
pass
return num
def _cpu_offline():
"""Return the number of CPU offline"""
PATH_SYS_SYSTEM = "/sys/devices/system"
PATH_SYS_CPU = PATH_SYS_SYSTEM + "/cpu"
# note that .../cpu0/online may not exist
fonline = glob.glob(PATH_SYS_CPU + '/cpu*/online')
num = 0
for f in fonline:
fp = open(f)
try:
online = fp.readline().strip()
if not online: num += 1
except:
pass
return num
def check_cpu():
"""Return Total CPU MHz, current utilization and number of logical CPU"""
CPUMzTotal = CPUUtilization = 0
cpu_physical_id = {}
cpuinfo = _readfile('/proc/cpuinfo')
for line in cpuinfo:
cols = line.split(':')
if cols[0].strip() == 'cpu MHz':
CPUMzTotal += int(cols[1].split('.')[0])
elif cols[0].strip() == 'physical id':
cpu_physical_id[cols[1].strip()] = 'cpu id'
CPUsockets = len(cpu_physical_id)
cpustat = _readfile('/proc/stat')
for line in cpustat:
cols = line.split()
if cols[0] == 'cpu':
(User, Nice, Sys, Idle, IOWait, IRQ, SoftIRQ, Steal) = (
int(cols[i]) for i in range(1,9))
UserTot = User + Nice
SystemTot = Sys + IRQ + SoftIRQ
Ratio = UserTot + SystemTot + Idle + IOWait + Steal
CPUUtilization = _perc(Idle, Ratio, complement=True)
CPUs = _cpu_count_logical()
CPUsOffline = _cpu_offline()
return (CPUMzTotal, CPUUtilization, CPUsockets, CPUs, CPUsOffline)
def check_memory():
"""Return Total Memory, Memory Used and percent Utilization"""
MemAvailable = None
MemHugePagesTotal = MemAnonHugePages = 0
MemHugePageSize = 0
meminfo = _readfile('/proc/meminfo')
for line in meminfo:
cols = line.split()
if cols[0] == 'Active(file):' : MemActiveFile = int(cols[1])
elif cols[0] == 'MemAvailable:' : MemAvailable = int(cols[1])
elif cols[0] == 'Cached:' : MemCached = int(cols[1])
elif cols[0] == 'MemFree:' : MemFree = int(cols[1])
elif cols[0] == 'Inactive(file):' : MemInactiveFile = int(cols[1])
elif cols[0] == 'MemTotal:' : MemTotal = int(cols[1])
elif cols[0] == 'SReclaimable:' : MemSlabReclaimable = int(cols[1])
elif cols[0] == 'Hugepagesize:' : MemHugePageSize = int(cols[1])
elif cols[0] == 'HugePages_Total:': MemHugePagesTotal = int(cols[1])
elif cols[0] == 'HugePages_Free:' : MemHugePagesFree = int(cols[1])
elif cols[0] == 'AnonHugePages:' : MemAnonHugePages = int(cols[1])
if not MemAvailable:
kernelVersion = _kernel_version()
if kernelVersion < 132635: # 2.6.27
MemAvailable = MemFree
else:
MemMinFree = int(_readfile('/proc/sys/vm/min_free_kbytes')[0])
MemWatermarkLow = MemMinFree * 5 / 4
MemAvailable = MemFree \
- MemWatermarkLow + MemInactiveFile + MemActiveFile \
- min((MemInactiveFile + MemActiveFile) / 2, MemWatermarkLow) \
+ MemSlabReclaimable \
- min(MemSlabReclaimable / 2, MemWatermarkLow)
if MemAvailable < 0: MemAvailable = 0
MemUsed = MemTotal - MemFree - MemCached
MemUsedPerc = _perc(MemAvailable, MemTotal, complement=True)
if not MemHugePagesTotal:
MemHugePagesTotal = MemHugePagesUsage = MemHugePagesUsagePerc = 0
else:
MemHugePagesUsage = MemHugePagesTotal - MemHugePagesFree
MemHugePagesUsagePerc = (
_perc(MemHugePagesUsage, MemHugePagesTotal))
return (MemTotal, MemUsed, MemUsedPerc, MemAvailable,
MemHugePagesTotal, MemHugePagesUsage, MemHugePagesUsagePerc,
MemAnonHugePages, MemHugePageSize)
def check_swap():
"""Return Total and Used Swap in bytes and percent Utilization"""
# example:
# Filename Type Size Used Priority
# /dev/dm-0 partition 8388604 11512 -1
swapinfo = _readfile('/proc/swaps', abort_on_error=False, header=True)
SwapTotal = SwapUsed = SwapUsedPerc = 0
if swapinfo:
for line in swapinfo:
cols = line.rstrip().split()
if not cols[0].startswith('/'):
continue
SwapTotal += int(cols[2])
SwapUsed += int(cols[3])
SwapUsedPerc = _perc(SwapUsed, SwapTotal)
return (SwapTotal, SwapUsed, SwapUsedPerc)
def check_uptime():
uptime = _readfile('/proc/uptime')
uptime_secs = float(uptime[0].split()[0])
updays = int(uptime_secs / (60 * 60 * 24))
upminutes = int(uptime_secs / 60)
uphours = int(upminutes / 60) % 24
upminutes = upminutes % 60
return (str(timedelta(seconds = uptime_secs)), updays, uphours, upminutes)
def die(exitcode, message):
"Print error and exit with errorcode"
sys.stderr.write('pyoocs: Fatal error: %s\n' % message)
sys.exit(exitcode)
def warning(message):
"Print a warning message"
sys.stderr.write('Warning: %s\n' % message)
def main():
# CSVOUTPUT=1 ./syscheck.py --> Output in CSV Format
EnvCSVOutput = os.environ.get('CSVOUTPUT', '')
# Hostname and FQDN
Hostname = socket.gethostname()
FQDN = socket.getfqdn()
# CPU utilization
CPUMzTotal, CPUUtilization, CPUsockets, CPUs, CPUsOffline = check_cpu()
# Memory and Huge Memory utilization
(MemTotal, MemUsed, MemoryUsedPerc, MemAvailable,
MemHugePagesTotal, MemHugePagesUsage,
MemHugePagesUsagePerc, MemAnonHugePages,
MemHugePageSize) = check_memory()
# Swap utilization
SwapTotal, SwapUsed, SwapUsedPerc = check_swap()
# System Uptime
SystemUptime, UpDays, UpHours, UpMinutes = check_uptime()
if EnvCSVOutput:
print "Hostname,FQDN,\
CPU Total (MHz),CPU Utilization,CPU Sockets,CPUs,Offline CPUs,\
Memory Total (kB),Memory Used (%%),Mem Available (kB),\
Total Huge Pages,Huge Pages Usage (%%),Anonymous Huge Pages (kB),\
Total Swap (kB),Swap Usage (%%),Uptime (days)\n\
%s,%s,%d,%.2f,%d,%d,%d,%d,%.2f,%d,%d,%.2f,%d,%d,%.2f,%s" % (
Hostname, FQDN,
CPUMzTotal, CPUUtilization, CPUsockets, CPUs, CPUsOffline,
MemTotal, MemoryUsedPerc, MemAvailable,
MemHugePagesTotal, MemHugePagesUsagePerc, MemAnonHugePages,
SwapTotal, SwapUsedPerc, UpDays)
else:
print " Hostname : %s (%s)" % (Hostname, FQDN)
print " CPU Tot/Used : %s / %.2f%%" %(
_sizeof_fmt(CPUMzTotal, skip=2, suffix='Hz'), CPUUtilization)
print " CPU Architecture : %d socket(s) / %d CPU(s) / "\
"%d offline" % (CPUsockets, CPUs, CPUsOffline)
print "Memory Tot/Used/Available : %s / %.2f%% / %s" % (
_sizeof_fmt(MemTotal), MemoryUsedPerc, _sizeof_fmt(MemAvailable))
print " Huge Pages Tot/Used : %d / %.2f%% (HugePageSize: %s)" % (
MemHugePagesTotal, MemHugePagesUsagePerc,
_sizeof_fmt(MemHugePageSize))
print " Anonymous Huge Pages : %s" % _sizeof_fmt(MemAnonHugePages)
print " Swap Tot/Used : %s / %.2f%%" % (
_sizeof_fmt(SwapTotal), SwapUsedPerc)
print " System uptime : %s" % SystemUptime
if __name__ == '__main__':
exitcode = 0
try:
main()
except KeyboardInterrupt:
die(3, 'Exiting on user request')
sys.exit(exitcode)
# vim:ts=4:sw=4:et
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to delete files that are also present on Wikimedia Commons.
Do not run this script on Wikimedia Commons itself. It works based on
a given array of templates defined below.
Files are downloaded and compared. If the files match, it can be deleted on
the source wiki. If multiple versions of the file exist, the script will not
delete. If the SHA1 comparison is not equal, the script will not delete.
A sysop account on the local wiki is required if you want all features of
this script to work properly.
This script understands various command-line arguments:
-always run automatically, do not ask any questions. All files
that qualify for deletion are deleted. Reduced screen
output.
-replace replace links if the files are equal and the file names
differ
-replacealways replace links if the files are equal and the file names
differ without asking for confirmation
-replaceloose Do loose replacements. This will replace all occurrences
of the name of the image (and not just explicit image
syntax). This should work to catch all instances of the
file, including where it is used as a template parameter
or in galleries. However, it can also make more
mistakes.
-replaceonly Use this if you do not have a local sysop account, but do
wish to replace links from the NowCommons template.
-hash Use the hash to identify the images that are the same. It
doesn't work always, so the bot opens two tabs to let to
the user to check if the images are equal or not.
-- Example --
python nowcommons.py -replaceonly -hash -replace -replaceloose -replacealways
-- Known issues --
Please fix these if you are capable and motivated:
- if a file marked nowcommons is not present on Wikimedia Commons, the bot
will exit.
"""
#
# (C) Wikipedian, 2006-2007
# (C) Siebrand Mazeland, 2007-2008
# (C) xqt, 2010-2014
# (C) Pywikibot team, 2006-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import sys
import re
import webbrowser
import pywikibot
from pywikibot import i18n, Bot
from pywikibot import pagegenerators as pg
import image
from imagetransfer import nowCommonsMessage
nowCommons = {
'_default': [
u'NowCommons'
],
'ar': [
u'الآن كومنز',
u'الآن كومونز',
],
'de': [
u'NowCommons',
u'NC',
u'NCT',
u'Nowcommons',
u'NowCommons/Mängel',
u'NowCommons-Überprüft',
],
'en': [
u'NowCommons',
u'Ncd',
],
'eo': [
u'Nun en komunejo',
u'NowCommons',
],
'fa': [
u'موجود در انبار',
u'NowCommons',
],
'fr': [
u'Image sur Commons',
u'DoublonCommons',
u'Déjà sur Commons',
u'Maintenant sur commons',
u'Désormais sur Commons',
u'NC',
u'NowCommons',
u'Nowcommons',
u'Sharedupload',
u'Sur Commons',
u'Sur Commons2',
],
'he': [
u'גם בוויקישיתוף'
],
'hu': [
u'Azonnali-commons',
u'NowCommons',
u'Nowcommons',
u'NC'
],
'ia': [
u'OraInCommons'
],
'it': [
u'NowCommons',
],
'ja': [
u'NowCommons',
],
'ko': [
u'NowCommons',
u'공용중복',
u'공용 중복',
u'Nowcommons',
],
'nds-nl': [
u'NoenCommons',
u'NowCommons',
],
'nl': [
u'NuCommons',
u'Nucommons',
u'NowCommons',
u'Nowcommons',
u'NCT',
u'Nct',
],
'ro': [
u'NowCommons'
],
'ru': [
u'NowCommons',
u'NCT',
u'Nowcommons',
u'Now Commons',
u'Db-commons',
u'Перенесено на Викисклад',
u'На Викискладе',
],
'zh': [
u'NowCommons',
u'Nowcommons',
u'NCT',
],
}
namespaceInTemplate = [
'en',
'ia',
'it',
'ja',
'ko',
'lt',
'ro',
'zh',
]
# Stemma and stub are images not to be deleted (and are a lot) on it.wikipedia
# if your project has images like that, put the word often used here to skip them
word_to_skip = {
'en': [],
'it': ['stemma', 'stub', 'hill40 '],
}
class NowCommonsDeleteBot(Bot):
"""Bot to delete migrated files."""
def __init__(self, **kwargs):
self.availableOptions.update({
'replace': False,
'replacealways': False,
'replaceloose': False,
'replaceonly': False,
'use_hash': False,
})
super(NowCommonsDeleteBot, self).__init__(**kwargs)
self.site = pywikibot.Site()
if repr(self.site) == 'commons:commons':
sys.exit('Do not run this bot on Commons!')
def ncTemplates(self):
if self.site.lang in nowCommons:
return nowCommons[self.site.lang]
else:
return nowCommons['_default']
@property
def nc_templates(self):
"""A set of now commons template Page instances."""
if not hasattr(self, '_nc_templates'):
self._nc_templates = set(pywikibot.Page(self.site, title, ns=10)
for title in self.ncTemplates())
return self._nc_templates
def useHashGenerator(self):
# https://toolserver.org/~multichill/nowcommons.php?language=it&page=2&filter=
lang = self.site.lang
num_page = 0
word_to_skip_translated = i18n.translate(self.site, word_to_skip)
images_processed = list()
while 1:
url = ('https://toolserver.org/~multichill/nowcommons.php?'
'language=%s&page=%s&filter=') % (lang, num_page)
HTML_text = self.site.getUrl(url, no_hostname=True)
reg = r'<[Aa] href="(?P<urllocal>.*?)">(?P<imagelocal>.*?)</[Aa]> +?</td><td>\n\s*?'
reg += r'<[Aa] href="(?P<urlcommons>http[s]?://commons.wikimedia.org/.*?)" \
>Image:(?P<imagecommons>.*?)</[Aa]> +?</td><td>'
regex = re.compile(reg, re.UNICODE)
found_something = False
change_page = True
for x in regex.finditer(HTML_text):
found_something = True
image_local = x.group('imagelocal')
image_commons = x.group('imagecommons')
if image_local in images_processed:
continue
change_page = False
images_processed.append(image_local)
# Skip images that have something in the title (useful for it.wiki)
image_to_skip = False
for word in word_to_skip_translated:
if word.lower() in image_local.lower():
image_to_skip = True
if image_to_skip:
continue
url_local = x.group('urllocal')
url_commons = x.group('urlcommons')
pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<"
% image_local)
pywikibot.output(u'Local: %s\nCommons: %s\n'
% (url_local, url_commons))
webbrowser.open(url_local, 0, 1)
webbrowser.open(url_commons, 0, 1)
if image_local.split('Image:')[1] == image_commons:
choice = pywikibot.input_yn(
u'The local and the commons images have the same name, '
'continue?', default=False, automatic_quit=False)
else:
choice = pywikibot.input_yn(
u'Are the two images equal?',
default=False, automatic_quit=False)
if choice:
yield [image_local, image_commons]
else:
continue
# The page is dinamically updated, so we may don't need to change it
if change_page:
num_page += 1
# If no image found means that there aren't anymore, break.
if not found_something:
break
def getPageGenerator(self):
if self.getOption('use_hash'):
gen = self.useHashGenerator()
else:
gens = [t.getReferences(follow_redirects=True, namespaces=[6],
onlyTemplateInclusion=True)
for t in self.nc_templates]
gen = pg.CombinedPageGenerator(gens)
gen = pg.DuplicateFilterPageGenerator(gen)
gen = pg.PreloadingGenerator(gen)
return gen
def findFilenameOnCommons(self, localImagePage):
filenameOnCommons = None
for templateName, params in localImagePage.templatesWithParams():
if templateName in self.nc_templates:
if params == []:
filenameOnCommons = localImagePage.title(withNamespace=False)
elif self.site.lang in namespaceInTemplate:
skip = False
filenameOnCommons = None
for par in params:
val = par.split('=')
if len(val) == 1 and not skip:
filenameOnCommons = par[par.index(':') + 1:]
break
if val[0].strip() == '1':
filenameOnCommons = val[1].strip()[val[1].strip().index(':') + 1:]
break
skip = True
if not filenameOnCommons:
filenameOnCommons = localImagePage.title(withNamespace=False)
else:
val = params[0].split('=')
if len(val) == 1:
filenameOnCommons = params[0].strip()
else:
filenameOnCommons = val[1].strip()
return filenameOnCommons
def run(self):
commons = pywikibot.Site('commons', 'commons')
comment = i18n.translate(self.site, nowCommonsMessage, fallback=True)
for page in self.getPageGenerator():
if self.getOption('use_hash'):
# Page -> Has the namespace | commons image -> Not
images_list = page # 0 -> local image, 1 -> commons image
page = pywikibot.Page(self.site, images_list[0])
else:
# If use_hash is true, we have already print this before, no need
self.current_page = page
try:
localImagePage = pywikibot.FilePage(self.site, page.title())
if localImagePage.fileIsShared():
pywikibot.output(u'File is already on Commons.')
continue
sha1 = localImagePage.latest_file_info.sha1
if self.getOption('use_hash'):
filenameOnCommons = images_list[1]
else:
filenameOnCommons = self.findFilenameOnCommons(
localImagePage)
if not filenameOnCommons and not self.getOption('use_hash'):
pywikibot.output(u'NowCommons template not found.')
continue
commonsImagePage = pywikibot.FilePage(commons, 'Image:%s'
% filenameOnCommons)
if localImagePage.title(withNamespace=False) == \
commonsImagePage.title(withNamespace=False) and self.getOption('use_hash'):
pywikibot.output(
u'The local and the commons images have the same name')
if localImagePage.title(withNamespace=False) != \
commonsImagePage.title(withNamespace=False):
usingPages = list(localImagePage.usingPages())
if usingPages and usingPages != [localImagePage]:
pywikibot.output(
u'\"\03{lightred}%s\03{default}\" is still used in %i pages.'
% (localImagePage.title(withNamespace=False),
len(usingPages)))
if self.getOption('replace') is True:
pywikibot.output(
u'Replacing \"\03{lightred}%s\03{default}\" by \
\"\03{lightgreen}%s\03{default}\".'
% (localImagePage.title(withNamespace=False),
commonsImagePage.title(withNamespace=False)))
oImageRobot = image.ImageRobot(
pg.FileLinksGenerator(localImagePage),
localImagePage.title(withNamespace=False),
commonsImagePage.title(withNamespace=False),
'', self.getOption('replacealways'),
self.getOption('replaceloose'))
oImageRobot.run()
# If the image is used with the urlname the
# previous function won't work
if len(list(pywikibot.FilePage(self.site,
page.title()).usingPages())) > 0 and \
self.getOption('replaceloose'):
oImageRobot = image.ImageRobot(
pg.FileLinksGenerator(
localImagePage),
localImagePage.title(
withNamespace=False, asUrl=True),
commonsImagePage.title(
withNamespace=False),
'', self.getOption('replacealways'),
self.getOption('replaceloose'))
oImageRobot.run()
# refresh because we want the updated list
usingPages = len(list(pywikibot.FilePage(
self.site, page.title()).usingPages()))
if usingPages > 0 and self.getOption('use_hash'):
# just an enter
pywikibot.input(
u'There are still %s pages with this \
image, confirm the manual removal from them please.'
% usingPages)
else:
pywikibot.output(u'Please change them manually.')
continue
else:
pywikibot.output(
u'No page is using \"\03{lightgreen}%s\03{default}\" anymore.'
% localImagePage.title(withNamespace=False))
commonsText = commonsImagePage.get()
if self.getOption('replaceonly') is False:
if sha1 == commonsImagePage.latest_file_info.sha1:
pywikibot.output(
u'The image is identical to the one on Commons.')
if len(localImagePage.getFileVersionHistory()) > 1 and not self.getOption('use_hash'):
pywikibot.output(
u"This image has a version history. Please \
delete it manually after making sure that the \
old versions are not worth keeping.""")
continue
if self.getOption('always') is False:
pywikibot.output(
u'\n\n>>>> Description on \03{lightpurple}%s\03{default} <<<<\n'
% page.title())
pywikibot.output(localImagePage.get())
pywikibot.output(
u'\n\n>>>> Description on \03{lightpurple}%s\03{default} <<<<\n'
% commonsImagePage.title())
pywikibot.output(commonsText)
if pywikibot.input_yn(
u'Does the description on Commons contain '
'all required source and license\n'
'information?',
default=False, automatic_quit=False):
localImagePage.delete(
'%s [[:commons:Image:%s]]'
% (comment, filenameOnCommons), prompt=False)
else:
localImagePage.delete(
comment + ' [[:commons:Image:%s]]'
% filenameOnCommons, prompt=False)
else:
pywikibot.output(
u'The image is not identical to the one on Commons.')
except (pywikibot.NoPage, pywikibot.IsRedirectPage) as e:
pywikibot.output(u'%s' % e[0])
continue
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
options = {}
for arg in pywikibot.handle_args(args):
if arg.startswith('-') and \
arg[1:] in ('always', 'replace', 'replaceloose', 'replaceonly'):
options[arg[1:]] = True
elif arg == '-replacealways':
options['replace'] = True
options['replacealways'] = True
elif arg == '-hash':
options['use_hash'] = True
elif arg == '-autonomous':
pywikibot.warning(u"The '-autonomous' argument is DEPRECATED,"
u" use '-always' instead.")
options['always'] = True
bot = NowCommonsDeleteBot(**options)
bot.run()
if __name__ == "__main__":
main()
|
from __future__ import absolute_import
from django.core.cache import cache
from rest_framework.response import Response
from sentry import http
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.models import ProjectKey, ProjectKeyStatus
DOC_URL = 'https://docs.getsentry.com/hosted/_wizards/{platform}.json'
PLATFORMS = set([
'python',
'python-bottle',
'python-celery',
'python-django',
'python-flask',
'python-pylons',
'python-pyramid',
'python-tornado',
])
def replace_keys(html, project_key):
if project_key is None:
return html
html = html.replace('___DSN___', project_key.dsn_private)
html = html.replace('___PUBLIC_DSN___', project_key.dsn_public)
html = html.replace('___PUBLIC_KEY___', project_key.public_key)
html = html.replace('___SECRET_KEY___', project_key.secret_key)
html = html.replace('___PROJECT_ID___', str(project_key.project_id))
return html
class ProjectPlatformDocsEndpoint(ProjectEndpoint):
def get(self, request, project, platform):
if platform not in PLATFORMS:
raise ResourceDoesNotExist
cache_key = 'docs:{}'.format(platform)
result = cache.get(cache_key)
if result is None:
session = http.build_session()
result = session.get(DOC_URL.format(platform=platform)).json()
cache.set(cache_key, result, 3600)
try:
project_key = ProjectKey.objects.filter(
project=project,
roles=ProjectKey.roles.store,
status=ProjectKeyStatus.ACTIVE
)[0]
except IndexError:
project_key = None
return Response({
'name': result['name'],
'html': replace_keys(result['body'], project_key),
'sdk': result['client_lib'],
'isFramework': result['is_framework'],
'link': result['doc_link'],
})
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
File: gcexport.py
Original author: Kyle Krafka (https://github.com/kjkjava/)
Date: April 28, 2015
Fork author: Michael P (https://github.com/moderation/)
Date: February 21, 2016
Fork author: Peter Steiner (https://github.com/pe-st/)
Date: June 2017
Date: March 2020 - Python3 support by Thomas Th. (https://github.com/telemaxx/)
Description: Use this script to export your fitness data from Garmin Connect.
See README.md for more information, CHANGELOG.md for a history of the changes
Activity & event types:
https://connect.garmin.com/modern/main/js/properties/event_types/event_types.properties
https://connect.garmin.com/modern/main/js/properties/activity_types/activity_types.properties
"""
# this avoids different pylint behaviour for python 2 and 3
from __future__ import print_function
from datetime import datetime, timedelta, tzinfo
from getpass import getpass
from math import floor
from platform import python_version
from subprocess import call
from timeit import default_timer as timer
import argparse
import csv
import io
import json
import logging
import os
import os.path
import re
import string
import sys
import unicodedata
import zipfile
python3 = sys.version_info.major == 3
if python3:
import http.cookiejar
import urllib.error
import urllib.parse
import urllib.request
import urllib
from urllib.parse import urlencode
from urllib.request import Request, HTTPError, URLError
COOKIE_JAR = http.cookiejar.CookieJar()
OPENER = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(COOKIE_JAR), urllib.request.HTTPSHandler(debuglevel=0))
else:
import cookielib
import urllib2
from urllib import urlencode
from urllib2 import Request, HTTPError, URLError
COOKIE_JAR = cookielib.CookieJar()
OPENER = urllib2.build_opener(urllib2.HTTPCookieProcessor(COOKIE_JAR), urllib2.HTTPSHandler(debuglevel=0))
SCRIPT_VERSION = '3.0.2'
# this is almost the datetime format Garmin used in the activity-search-service
# JSON 'display' fields (Garmin didn't zero-pad the date and the hour, but %d and %H do)
ALMOST_RFC_1123 = "%a, %d %b %Y %H:%M"
# used by sanitize_filename()
VALID_FILENAME_CHARS = "-_.() %s%s" % (string.ascii_letters, string.digits)
# map the numeric parentTypeId to its name for the CSV output
PARENT_TYPE_ID = {
1: 'running',
2: 'cycling',
3: 'hiking',
4: 'other',
9: 'walking',
17: 'any',
26: 'swimming',
29: 'fitness_equipment',
71: 'motorcycling',
83: 'transition',
144: 'diving',
149: 'yoga',
165: 'winter_sports'
}
# typeId values using pace instead of speed
USES_PACE = {1, 3, 9} # running, hiking, walking
# Maximum number of activities you can request at once.
# Used to be 100 and enforced by Garmin for older endpoints; for the current endpoint 'URL_GC_LIST'
# the limit is not known (I have less than 1000 activities and could get them all in one go)
LIMIT_MAXIMUM = 1000
MAX_TRIES = 3
CSV_TEMPLATE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "csv_header_default.properties")
WEBHOST = "https://connect.garmin.com"
REDIRECT = "https://connect.garmin.com/modern/"
BASE_URL = "https://connect.garmin.com/en-US/signin"
SSO = "https://sso.garmin.com/sso"
CSS = "https://static.garmincdn.com/com.garmin.connect/ui/css/gauth-custom-v1.2-min.css"
DATA = {
'service': REDIRECT,
'webhost': WEBHOST,
'source': BASE_URL,
'redirectAfterAccountLoginUrl': REDIRECT,
'redirectAfterAccountCreationUrl': REDIRECT,
'gauthHost': SSO,
'locale': 'en_US',
'id': 'gauth-widget',
'cssUrl': CSS,
'clientId': 'GarminConnect',
'rememberMeShown': 'true',
'rememberMeChecked': 'false',
'createAccountShown': 'true',
'openCreateAccount': 'false',
'displayNameShown': 'false',
'consumeServiceTicket': 'false',
'initialFocus': 'true',
'embedWidget': 'false',
'generateExtraServiceTicket': 'true',
'generateTwoExtraServiceTickets': 'false',
'generateNoServiceTicket': 'false',
'globalOptInShown': 'true',
'globalOptInChecked': 'false',
'mobile': 'false',
'connectLegalTerms': 'true',
'locationPromptShown': 'true',
'showPassword': 'true'
}
# URLs for various services.
URL_GC_LOGIN = 'https://sso.garmin.com/sso/signin?' + urlencode(DATA)
URL_GC_POST_AUTH = 'https://connect.garmin.com/modern/activities?'
URL_GC_PROFILE = 'https://connect.garmin.com/modern/profile'
URL_GC_USERSTATS = 'https://connect.garmin.com/modern/proxy/userstats-service/statistics/'
URL_GC_LIST = 'https://connect.garmin.com/modern/proxy/activitylist-service/activities/search/activities?'
URL_GC_ACTIVITY = 'https://connect.garmin.com/modern/proxy/activity-service/activity/'
URL_GC_DEVICE = 'https://connect.garmin.com/modern/proxy/device-service/deviceservice/app-info/'
URL_GC_GEAR = 'https://connect.garmin.com/modern/proxy/gear-service/gear/filterGear?activityId='
URL_GC_ACT_PROPS = 'https://connect.garmin.com/modern/main/js/properties/activity_types/activity_types.properties'
URL_GC_EVT_PROPS = 'https://connect.garmin.com/modern/main/js/properties/event_types/event_types.properties'
URL_GC_GPX_ACTIVITY = 'https://connect.garmin.com/modern/proxy/download-service/export/gpx/activity/'
URL_GC_TCX_ACTIVITY = 'https://connect.garmin.com/modern/proxy/download-service/export/tcx/activity/'
URL_GC_ORIGINAL_ACTIVITY = 'http://connect.garmin.com/proxy/download-service/files/activity/'
def resolve_path(directory, subdir, time):
"""
Replace time variables and returns changed path. Supported place holders are {YYYY} and {MM}
:param directory: export root directory
:param subdir: subdirectory, can have place holders.
:param time: date-time-string
:return: Updated dictionary string
"""
ret = os.path.join(directory, subdir)
if re.compile(".*{YYYY}.*").match(ret):
ret = ret.replace("{YYYY}", time[0:4])
if re.compile(".*{MM}.*").match(ret):
ret = ret.replace("{MM}", time[5:7])
return ret
def hhmmss_from_seconds(sec):
"""Helper function that converts seconds to HH:MM:SS time format."""
if isinstance(sec, (float, int)):
formatted_time = str(timedelta(seconds=int(sec))).zfill(8)
else:
formatted_time = "0.000"
return formatted_time
def kmh_from_mps(mps):
"""Helper function that converts meters per second (mps) to km/h."""
return str(mps * 3.6)
def sanitize_filename(name, max_length=0):
"""
Remove or replace characters that are unsafe for filename
"""
# inspired by https://stackoverflow.com/a/698714/3686
cleaned_filename = unicodedata.normalize('NFKD', name) if name else ''
stripped_filename = ''.join(c for c in cleaned_filename if c in VALID_FILENAME_CHARS).replace(' ', '_')
return stripped_filename[:max_length] if max_length > 0 else stripped_filename
def write_to_file(filename, content, mode='w', file_time=None):
"""
Helper function that persists content to a file.
:param filename: name of the file to write
:param content: content to write; with Python 2 always of type 'str',
with Python 3 it can be 'bytes' or 'str'. If it's
'bytes' and the mode 'w', it will be converted/decoded
:param mode: 'w' or 'wb'
:param file_time: if given use as timestamp for the file written
"""
if mode == 'w':
write_file = io.open(filename, mode, encoding="utf-8")
if isinstance(content, bytes):
content = content.decode("utf-8")
elif mode == 'wb':
write_file = io.open(filename, mode)
else:
raise Exception('Unsupported file mode: ', mode)
write_file.write(content)
write_file.close()
if file_time:
os.utime(filename, (file_time, file_time))
def http_req(url, post=None, headers=None):
"""
Helper function that makes the HTTP requests.
:param url: URL for the request
:param post: dictionary of POST parameters
:param headers: dictionary of headers
:return: response body (type 'str' with Python 2, type 'bytes' with Python 3
"""
request = Request(url)
# Tell Garmin we're some supported browser.
request.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, \
like Gecko) Chrome/54.0.2816.0 Safari/537.36')
if headers:
if python3:
for header_key, header_value in headers.items():
request.add_header(header_key, header_value)
else:
for header_key, header_value in headers.iteritems():
request.add_header(header_key, header_value)
if post:
post = urlencode(post) # Convert dictionary to POST parameter string.
if python3:
post = post.encode("utf-8")
start_time = timer()
try:
response = OPENER.open(request, data=post)
except URLError as ex:
if hasattr(ex, 'reason'):
logging.error('Failed to reach url %s, error: %s', url, ex)
raise
else:
raise
logging.debug('Got %s in %s s from %s', response.getcode(), timer() - start_time, url)
# N.B. urllib2 will follow any 302 redirects.
# print(response.getcode())
if response.getcode() == 204:
# 204 = no content, e.g. for activities without GPS coordinates there is no GPX download.
# Write an empty file to prevent redownloading it.
logging.info('Got 204 for %s, returning empty response', url)
return b''
elif response.getcode() != 200:
raise Exception('Bad return code (' + str(response.getcode()) + ') for: ' + url)
return response.read()
def http_req_as_string(url, post=None, headers=None):
"""Helper function that makes the HTTP requests, returning a string instead of bytes."""
if python3:
return http_req(url, post, headers).decode()
else:
return http_req(url, post, headers)
# idea stolen from https://stackoverflow.com/a/31852401/3686
def load_properties(multiline, separator='=', comment_char='#', keys=None):
"""
Read a multiline string of properties (key/value pair separated by *separator*) into a dict
:param multiline: input string of properties
:param separator: separator between key and value
:param comment_char: lines starting with this char are considered comments, not key/value pairs
:param keys: list to append the keys to
:return:
"""
props = {}
for line in multiline.splitlines():
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(comment_char):
key_value = stripped_line.split(separator)
key = key_value[0].strip()
value = separator.join(key_value[1:]).strip().strip('"')
props[key] = value
if keys != None:
keys.append(key)
return props
def value_if_found_else_key(some_dict, key):
"""Lookup a value in some_dict and use the key itself as fallback"""
return some_dict.get(key, key)
def present(element, act):
"""Return True if act[element] is valid and not None"""
if not act:
return False
elif element not in act:
return False
return act[element]
def absent_or_null(element, act):
"""Return False only if act[element] is valid and not None"""
if not act:
return True
elif element not in act:
return True
elif act[element]:
return False
return True
def from_activities_or_detail(element, act, detail, detail_container):
"""Return detail[detail_container][element] if valid and act[element] (or None) otherwise"""
if absent_or_null(detail_container, detail) or absent_or_null(element, detail[detail_container]):
return None if absent_or_null(element, act) else act[element]
return detail[detail_container][element]
def trunc6(some_float):
"""Return the given float as string formatted with six digit precision"""
return "{0:12.6f}".format(floor(some_float * 1000000) / 1000000).lstrip()
# A class building tzinfo objects for fixed-offset time zones.
# (copied from https://docs.python.org/2/library/datetime.html)
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
super(FixedOffset, self).__init__()
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
del dt # unused
return self.__offset
def tzname(self, dt):
del dt # unused
return self.__name
def dst(self, dt):
del dt # unused
return timedelta(0)
def offset_date_time(time_local, time_gmt):
"""
Build an 'aware' datetime from two 'naive' datetime objects (that is timestamps
as present in the activitylist-service.json), using the time difference as offset.
"""
local_dt = datetime.strptime(time_local, "%Y-%m-%d %H:%M:%S")
gmt_dt = datetime.strptime(time_gmt, "%Y-%m-%d %H:%M:%S")
offset = local_dt - gmt_dt
offset_tz = FixedOffset(offset.seconds // 60, "LCL")
return local_dt.replace(tzinfo=offset_tz)
def pace_or_speed_raw(type_id, parent_type_id, mps):
"""Convert speed (m/s) to speed (km/h) or pace (min/km) depending on type and parent type"""
kmh = 3.6 * mps
if (type_id in USES_PACE) or (parent_type_id in USES_PACE):
return 60 / kmh
return kmh
def pace_or_speed_formatted(type_id, parent_type_id, mps):
"""
Convert speed (m/s) to string: speed (km/h as x.x) or
pace (min/km as MM:SS), depending on type and parent type
"""
kmh = 3.6 * mps
if (type_id in USES_PACE) or (parent_type_id in USES_PACE):
# format seconds per kilometer as MM:SS, see https://stackoverflow.com/a/27751293
return '{0:02d}:{1:02d}'.format(*divmod(int(round(3600 / kmh)), 60))
return "{0:.1f}".format(round(kmh, 1))
class CsvFilter(object):
"""Collects, filters and writes CSV."""
def __init__(self, csv_file, csv_header_properties):
self.__csv_file = csv_file
with open(csv_header_properties, 'r') as prop:
csv_header_props = prop.read()
self.__csv_columns = []
self.__csv_headers = load_properties(csv_header_props, keys=self.__csv_columns)
self.__csv_field_names = []
for column in self.__csv_columns:
self.__csv_field_names.append(self.__csv_headers[column])
self.__writer = csv.DictWriter(self.__csv_file, fieldnames=self.__csv_field_names, quoting=csv.QUOTE_ALL)
self.__current_row = {}
def write_header(self):
"""Write the active column names as CSV header"""
self.__writer.writeheader()
def write_row(self):
"""Write the prepared CSV record"""
self.__writer.writerow(self.__current_row)
self.__current_row = {}
def set_column(self, name, value):
"""
Store a column value (if the column is active) into
the record prepared for the next write_row call
"""
if value and name in self.__csv_columns:
if python3:
self.__current_row[self.__csv_headers[name]] = value
else:
# must encode in UTF-8 because the Python 2 'csv' module doesn't support unicode
self.__current_row[self.__csv_headers[name]] = value.encode('utf8')
def is_column_active(self, name):
"""Return True if the column is present in the header template"""
return name in self.__csv_columns
def parse_arguments(argv):
"""
Setup the argument parser and parse the command line arguments.
"""
current_date = datetime.now().strftime('%Y-%m-%d')
activities_directory = './' + current_date + '_garmin_connect_export'
parser = argparse.ArgumentParser(description='Garmin Connect Exporter')
parser.add_argument('--version', action='version', version='%(prog)s ' + SCRIPT_VERSION,
help='print version and exit')
parser.add_argument('-v', '--verbosity', action='count', default=0,
help='increase output verbosity')
parser.add_argument('--username',
help='your Garmin Connect username or email address (otherwise, you will be prompted)')
parser.add_argument('--password',
help='your Garmin Connect password (otherwise, you will be prompted)')
parser.add_argument('-c', '--count', default='1',
help='number of recent activities to download, or \'all\' (default: 1)')
parser.add_argument('-e', '--external',
help='path to external program to pass CSV file too')
parser.add_argument('-a', '--args',
help='additional arguments to pass to external program')
parser.add_argument('-f', '--format', choices=['gpx', 'tcx', 'original', 'json'], default='gpx',
help="export format; can be 'gpx', 'tcx', 'original' or 'json' (default: 'gpx')")
parser.add_argument('-d', '--directory', default=activities_directory,
help='the directory to export to (default: \'./YYYY-MM-DD_garmin_connect_export\')')
parser.add_argument('-s', "--subdir",
help="the subdirectory for activity files (tcx, gpx etc.), supported placeholders are {YYYY} and {MM}"
" (default: export directory)" )
parser.add_argument('-u', '--unzip', action='store_true',
help='if downloading ZIP files (format: \'original\'), unzip the file and remove the ZIP file')
parser.add_argument('-ot', '--originaltime', action='store_true',
help='will set downloaded (and possibly unzipped) file time to the activity start time')
parser.add_argument('--desc', type=int, nargs='?', const=0, default=None,
help='append the activity\'s description to the file name of the download; limit size if number is given')
parser.add_argument('-t', '--template', default=CSV_TEMPLATE,
help='template file with desired columns for CSV output')
parser.add_argument('-fp', '--fileprefix', action='count', default=0,
help="set the local time as activity file name prefix")
parser.add_argument('-sa', '--start_activity_no', type=int, default=1,
help="give index for first activity to import, i.e. skipping the newest activites")
return parser.parse_args(argv[1:])
def login_to_garmin_connect(args):
"""
Perform all HTTP requests to login to Garmin Connect.
"""
if python3:
username = args.username if args.username else input('Username: ')
else:
username = args.username if args.username else raw_input('Username: ')
password = args.password if args.password else getpass()
logging.debug("Login params: %s", urlencode(DATA))
# Initially, we need to get a valid session cookie, so we pull the login page.
print('Connecting to Garmin Connect...', end='')
logging.info('Connecting to %s', URL_GC_LOGIN)
connect_response = http_req_as_string(URL_GC_LOGIN)
# write_to_file('connect_response.html', connect_response, 'w')
for cookie in COOKIE_JAR:
logging.debug("Cookie %s : %s", cookie.name, cookie.value)
print(' Done.')
# Now we'll actually login.
# Fields that are passed in a typical Garmin login.
post_data = {
'username': username,
'password': password,
'embed': 'false',
'rememberme': 'on'
}
headers = {
'referer': URL_GC_LOGIN
}
print('Requesting Login ticket...', end='')
login_response = http_req_as_string(URL_GC_LOGIN + '#', post_data, headers)
for cookie in COOKIE_JAR:
logging.debug("Cookie %s : %s", cookie.name, cookie.value)
# write_to_file('login-response.html', login_response, 'w')
# extract the ticket from the login response
pattern = re.compile(r".*\?ticket=([-\w]+)\";.*", re.MULTILINE | re.DOTALL)
match = pattern.match(login_response)
if not match:
raise Exception('Couldn\'t find ticket in the login response. Cannot log in. '
'Did you enter the correct username and password?')
login_ticket = match.group(1)
print(' Done. Ticket=', login_ticket, sep='')
print("Authenticating...", end='')
logging.info('Authentication URL %s', URL_GC_POST_AUTH + 'ticket=' + login_ticket)
http_req(URL_GC_POST_AUTH + 'ticket=' + login_ticket)
print(' Done.')
def csv_write_record(csv_filter, extract, actvty, details, activity_type_name, event_type_name):
"""
Write out the given data as a CSV record
"""
type_id = 4 if absent_or_null('activityType', actvty) else actvty['activityType']['typeId']
parent_type_id = 4 if absent_or_null('activityType', actvty) else actvty['activityType']['parentTypeId']
if present(parent_type_id, PARENT_TYPE_ID):
parent_type_key = PARENT_TYPE_ID[parent_type_id]
else:
parent_type_key = None
logging.warning("Unknown parentType %s, please tell script author", str(parent_type_id))
# get some values from detail if present, from a otherwise
start_latitude = from_activities_or_detail('startLatitude', actvty, details, 'summaryDTO')
start_longitude = from_activities_or_detail('startLongitude', actvty, details, 'summaryDTO')
end_latitude = from_activities_or_detail('endLatitude', actvty, details, 'summaryDTO')
end_longitude = from_activities_or_detail('endLongitude', actvty, details, 'summaryDTO')
csv_filter.set_column('id', str(actvty['activityId']))
csv_filter.set_column('url', 'https://connect.garmin.com/modern/activity/' + str(actvty['activityId']))
csv_filter.set_column('activityName', actvty['activityName'] if present('activityName', actvty) else None)
csv_filter.set_column('description', actvty['description'] if present('description', actvty) else None)
csv_filter.set_column('startTimeIso', extract['start_time_with_offset'].isoformat())
csv_filter.set_column('startTime1123', extract['start_time_with_offset'].strftime(ALMOST_RFC_1123))
csv_filter.set_column('startTimeMillis', str(actvty['beginTimestamp']) if present('beginTimestamp', actvty) else None)
csv_filter.set_column('startTimeRaw', details['summaryDTO']['startTimeLocal'] if present('startTimeLocal', details['summaryDTO']) else None)
csv_filter.set_column('endTimeIso', extract['end_time_with_offset'].isoformat() if extract['end_time_with_offset'] else None)
csv_filter.set_column('endTime1123', extract['end_time_with_offset'].strftime(ALMOST_RFC_1123) if extract['end_time_with_offset'] else None)
csv_filter.set_column('endTimeMillis', str(actvty['beginTimestamp'] + extract['elapsed_seconds'] * 1000) if present('beginTimestamp', actvty) else None)
csv_filter.set_column('durationRaw', str(round(actvty['duration'], 3)) if present('duration', actvty) else None)
csv_filter.set_column('duration', hhmmss_from_seconds(round(actvty['duration'])) if present('duration', actvty) else None)
csv_filter.set_column('elapsedDurationRaw', str(round(extract['elapsed_duration'], 3)) if extract['elapsed_duration'] else None)
csv_filter.set_column('elapsedDuration', hhmmss_from_seconds(round(extract['elapsed_duration'])) if extract['elapsed_duration'] else None)
csv_filter.set_column('movingDurationRaw', str(round(details['summaryDTO']['movingDuration'], 3)) if present('movingDuration', details['summaryDTO']) else None)
csv_filter.set_column('movingDuration', hhmmss_from_seconds(round(details['summaryDTO']['movingDuration'])) if present('movingDuration', details['summaryDTO']) else None)
csv_filter.set_column('distanceRaw', "{0:.5f}".format(actvty['distance'] / 1000) if present('distance', actvty) else None)
csv_filter.set_column('averageSpeedRaw', kmh_from_mps(details['summaryDTO']['averageSpeed']) if present('averageSpeed', details['summaryDTO']) else None)
csv_filter.set_column('averageSpeedPaceRaw', trunc6(pace_or_speed_raw(type_id, parent_type_id, actvty['averageSpeed'])) if present('averageSpeed', actvty) else None)
csv_filter.set_column('averageSpeedPace', pace_or_speed_formatted(type_id, parent_type_id, actvty['averageSpeed']) if present('averageSpeed', actvty) else None)
csv_filter.set_column('averageMovingSpeedRaw', kmh_from_mps(details['summaryDTO']['averageMovingSpeed']) if present('averageMovingSpeed', details['summaryDTO']) else None)
csv_filter.set_column('averageMovingSpeedPaceRaw', trunc6(pace_or_speed_raw(type_id, parent_type_id, details['summaryDTO']['averageMovingSpeed'])) if present('averageMovingSpeed', details['summaryDTO']) else None)
csv_filter.set_column('averageMovingSpeedPace', pace_or_speed_formatted(type_id, parent_type_id, details['summaryDTO']['averageMovingSpeed']) if present('averageMovingSpeed', details['summaryDTO']) else None)
csv_filter.set_column('maxSpeedRaw', kmh_from_mps(details['summaryDTO']['maxSpeed']) if present('maxSpeed', details['summaryDTO']) else None)
csv_filter.set_column('maxSpeedPaceRaw', trunc6(pace_or_speed_raw(type_id, parent_type_id, details['summaryDTO']['maxSpeed'])) if present('maxSpeed', details['summaryDTO']) else None)
csv_filter.set_column('maxSpeedPace', pace_or_speed_formatted(type_id, parent_type_id, details['summaryDTO']['maxSpeed']) if present('maxSpeed', details['summaryDTO']) else None)
csv_filter.set_column('elevationLoss', str(round(details['summaryDTO']['elevationLoss'], 2)) if present('elevationLoss', details['summaryDTO']) else None)
csv_filter.set_column('elevationLossUncorr', str(round(details['summaryDTO']['elevationLoss'], 2)) if not actvty['elevationCorrected'] and present('elevationLoss', details['summaryDTO']) else None)
csv_filter.set_column('elevationLossCorr', str(round(details['summaryDTO']['elevationLoss'], 2)) if actvty['elevationCorrected'] and present('elevationLoss', details['summaryDTO']) else None)
csv_filter.set_column('elevationGain', str(round(details['summaryDTO']['elevationGain'], 2)) if present('elevationGain', details['summaryDTO']) else None)
csv_filter.set_column('elevationGainUncorr', str(round(details['summaryDTO']['elevationGain'], 2)) if not actvty['elevationCorrected'] and present('elevationGain', details['summaryDTO']) else None)
csv_filter.set_column('elevationGainCorr', str(round(details['summaryDTO']['elevationGain'], 2)) if actvty['elevationCorrected'] and present('elevationGain', details['summaryDTO']) else None)
csv_filter.set_column('minElevation', str(round(details['summaryDTO']['minElevation'], 2)) if present('minElevation', details['summaryDTO']) else None)
csv_filter.set_column('minElevationUncorr', str(round(details['summaryDTO']['minElevation'], 2)) if not actvty['elevationCorrected'] and present('minElevation', details['summaryDTO']) else None)
csv_filter.set_column('minElevationCorr', str(round(details['summaryDTO']['minElevation'], 2)) if actvty['elevationCorrected'] and present('minElevation', details['summaryDTO']) else None)
csv_filter.set_column('maxElevation', str(round(details['summaryDTO']['maxElevation'], 2)) if present('maxElevation', details['summaryDTO']) else None)
csv_filter.set_column('maxElevationUncorr', str(round(details['summaryDTO']['maxElevation'], 2)) if not actvty['elevationCorrected'] and present('maxElevation', details['summaryDTO']) else None)
csv_filter.set_column('maxElevationCorr', str(round(details['summaryDTO']['maxElevation'], 2)) if actvty['elevationCorrected'] and present('maxElevation', details['summaryDTO']) else None)
csv_filter.set_column('elevationCorrected', 'true' if actvty['elevationCorrected'] else 'false')
# csv_record += empty_record # no minimum heart rate in JSON
csv_filter.set_column('maxHRRaw', str(details['summaryDTO']['maxHR']) if present('maxHR', details['summaryDTO']) else None)
csv_filter.set_column('maxHR', "{0:.0f}".format(actvty['maxHR']) if present('maxHR', actvty) else None)
csv_filter.set_column('averageHRRaw', str(details['summaryDTO']['averageHR']) if present('averageHR', details['summaryDTO']) else None)
csv_filter.set_column('averageHR', "{0:.0f}".format(actvty['averageHR']) if present('averageHR', actvty) else None)
csv_filter.set_column('caloriesRaw', str(details['summaryDTO']['calories']) if present('calories', details['summaryDTO']) else None)
csv_filter.set_column('calories', "{0:.0f}".format(details['summaryDTO']['calories']) if present('calories', details['summaryDTO']) else None)
csv_filter.set_column('vo2max', str(actvty['vO2MaxValue']) if present('vO2MaxValue', actvty) else None)
csv_filter.set_column('aerobicEffect', str(round(details['summaryDTO']['trainingEffect'], 2)) if present('trainingEffect', details['summaryDTO']) else None)
csv_filter.set_column('anaerobicEffect', str(round(details['summaryDTO']['anaerobicTrainingEffect'], 2)) if present('anaerobicTrainingEffect', details['summaryDTO']) else None)
csv_filter.set_column('averageRunCadence', str(round(details['summaryDTO']['averageRunCadence'], 2)) if present('averageRunCadence', details['summaryDTO']) else None)
csv_filter.set_column('maxRunCadence', str(details['summaryDTO']['maxRunCadence']) if present('maxRunCadence', details['summaryDTO']) else None)
csv_filter.set_column('strideLength', str(round(details['summaryDTO']['strideLength'], 2)) if present('strideLength', details['summaryDTO']) else None)
csv_filter.set_column('steps', str(actvty['steps']) if present('steps', actvty) else None)
csv_filter.set_column('averageCadence', str(actvty['averageBikingCadenceInRevPerMinute']) if present('averageBikingCadenceInRevPerMinute', actvty) else None)
csv_filter.set_column('maxCadence', str(actvty['maxBikingCadenceInRevPerMinute']) if present('maxBikingCadenceInRevPerMinute', actvty) else None)
csv_filter.set_column('strokes', str(actvty['strokes']) if present('strokes', actvty) else None)
csv_filter.set_column('averageTemperature', str(details['summaryDTO']['averageTemperature']) if present('averageTemperature', details['summaryDTO']) else None)
csv_filter.set_column('minTemperature', str(details['summaryDTO']['minTemperature']) if present('minTemperature', details['summaryDTO']) else None)
csv_filter.set_column('maxTemperature', str(details['summaryDTO']['maxTemperature']) if present('maxTemperature', details['summaryDTO']) else None)
csv_filter.set_column('device', extract['device'] if extract['device'] else None)
csv_filter.set_column('gear', extract['gear'] if extract['gear'] else None)
csv_filter.set_column('activityTypeKey', actvty['activityType']['typeKey'].title() if present('typeKey', actvty['activityType']) else None)
csv_filter.set_column('activityType', value_if_found_else_key(activity_type_name, 'activity_type_' + actvty['activityType']['typeKey']) if present('activityType', actvty) else None)
csv_filter.set_column('activityParent', value_if_found_else_key(activity_type_name, 'activity_type_' + parent_type_key) if parent_type_key else None)
csv_filter.set_column('eventTypeKey', actvty['eventType']['typeKey'].title() if present('typeKey', actvty['eventType']) else None)
csv_filter.set_column('eventType', value_if_found_else_key(event_type_name, actvty['eventType']['typeKey']) if present('eventType', actvty) else None)
csv_filter.set_column('privacy', details['accessControlRuleDTO']['typeKey'] if present('typeKey', details['accessControlRuleDTO']) else None)
csv_filter.set_column('fileFormat', details['metadataDTO']['fileFormat']['formatKey'] if present('fileFormat', details['metadataDTO']) and present('formatKey', details['metadataDTO']['fileFormat']) else None)
csv_filter.set_column('tz', details['timeZoneUnitDTO']['timeZone'] if present('timeZone', details['timeZoneUnitDTO']) else None)
csv_filter.set_column('tzOffset', extract['start_time_with_offset'].isoformat()[-6:])
csv_filter.set_column('locationName', details['locationName'] if present('locationName', details) else None)
csv_filter.set_column('startLatitudeRaw', str(start_latitude) if start_latitude else None)
csv_filter.set_column('startLatitude', trunc6(start_latitude) if start_latitude else None)
csv_filter.set_column('startLongitudeRaw', str(start_longitude) if start_longitude else None)
csv_filter.set_column('startLongitude', trunc6(start_longitude) if start_longitude else None)
csv_filter.set_column('endLatitudeRaw', str(end_latitude) if end_latitude else None)
csv_filter.set_column('endLatitude', trunc6(end_latitude) if end_latitude else None)
csv_filter.set_column('endLongitudeRaw', str(end_longitude) if end_longitude else None)
csv_filter.set_column('endLongitude', trunc6(end_longitude) if end_longitude else None)
csv_filter.set_column('sampleCount', str(extract['samples']['metricsCount']) if present('metricsCount', extract['samples']) else None)
csv_filter.write_row()
def extract_device(device_dict, details, start_time_seconds, args, http_caller, file_writer):
"""
Try to get the device details (and cache them, as they're used for multiple activities)
:param device_dict: cache (dict) of already known devices
:param details: dict with the details of an activity, should contain a device ID
:param args: command-line arguments (for the file_writer callback)
:param http_caller: callback to perform the HTTP call for downloading the device details
:param file_writer: callback that saves the device details in a file
:return: string with the device name
"""
if not present('metadataDTO', details):
logging.warning("no metadataDTO")
return None
metadata = details['metadataDTO']
device_app_inst_id = metadata['deviceApplicationInstallationId'] if present('deviceApplicationInstallationId', metadata) else None
if device_app_inst_id:
if device_app_inst_id not in device_dict:
# observed from my stock of activities:
# details['metadataDTO']['deviceMetaDataDTO']['deviceId'] == null -> device unknown
# details['metadataDTO']['deviceMetaDataDTO']['deviceId'] == '0' -> device unknown
# details['metadataDTO']['deviceMetaDataDTO']['deviceId'] == 'someid' -> device known
device_dict[device_app_inst_id] = None
device_meta = metadata['deviceMetaDataDTO'] if present('deviceMetaDataDTO', metadata) else None
device_id = device_meta['deviceId'] if present('deviceId', device_meta) else None
if 'deviceId' not in device_meta or device_id and device_id != '0':
device_json = http_caller(URL_GC_DEVICE + str(device_app_inst_id))
file_writer(os.path.join(args.directory, 'device_' + str(device_app_inst_id) + '.json'),
device_json, 'w',
start_time_seconds)
if not device_json:
logging.warning("Device Details %s are empty", device_app_inst_id)
device_dict[device_app_inst_id] = "device-id:" + str(device_app_inst_id)
else:
device_details = json.loads(device_json)
if present('productDisplayName', device_details):
device_dict[device_app_inst_id] = device_details['productDisplayName'] + ' ' \
+ device_details['versionString']
else:
logging.warning("Device details %s incomplete", device_app_inst_id)
return device_dict[device_app_inst_id]
return None
def load_gear(activity_id, args):
"""Retrieve the gear/equipment for an activity"""
try:
gear_json = http_req_as_string(URL_GC_GEAR + activity_id)
gear = json.loads(gear_json)
if gear:
del args # keep 'args' argument in case you need to uncomment write_to_file
# write_to_file(join(args.directory, 'activity_' + activity_id + '-gear.json'),
# gear_json, 'w')
gear_display_name = gear[0]['displayName'] if present('displayName', gear[0]) else None
gear_model = gear[0]['customMakeModel'] if present('customMakeModel', gear[0]) else None
logging.debug("Gear for %s = %s/%s", activity_id, gear_display_name, gear_model)
return gear_display_name if gear_display_name else gear_model
return None
except HTTPError:
pass # don't abort just for missing gear...
# logging.info("Unable to get gear for %d", activity_id)
# logging.exception(e)
def export_data_file(activity_id, activity_details, args, file_time, append_desc, start_time_locale):
"""
Write the data of the activity to a file, depending on the chosen data format
"""
# Time dependent subdirectory for activity files, e.g. '{YYYY}
if not args.subdir is None:
directory = resolve_path(args.directory, args.subdir, start_time_locale)
# export activities to root directory
else:
directory = args.directory
if not os.path.isdir(directory):
os.makedirs(directory)
# timestamp as prefix for filename
if args.fileprefix > 0:
prefix = "{}-".format(start_time_locale.replace("-", "").replace(":", "").replace(" ", "-"))
else:
prefix = ""
fit_filename = None
if args.format == 'gpx':
data_filename = os.path.join(directory, prefix + 'activity_' + activity_id + append_desc + '.gpx')
download_url = URL_GC_GPX_ACTIVITY + activity_id + '?full=true'
file_mode = 'w'
elif args.format == 'tcx':
data_filename = os.path.join(directory, prefix + 'activity_' + activity_id + append_desc + '.tcx')
download_url = URL_GC_TCX_ACTIVITY + activity_id + '?full=true'
file_mode = 'w'
elif args.format == 'original':
data_filename = os.path.join(directory, prefix + 'activity_' + activity_id + append_desc + '.zip')
# TODO not all 'original' files are in FIT format, some are GPX or TCX...
fit_filename = os.path.join(directory, prefix + 'activity_' + activity_id + append_desc + '.fit')
download_url = URL_GC_ORIGINAL_ACTIVITY + activity_id
file_mode = 'wb'
elif args.format == 'json':
data_filename = os.path.join(directory, prefix + 'activity_' + activity_id + append_desc + '.json')
file_mode = 'w'
else:
raise Exception('Unrecognized format.')
if os.path.isfile(data_filename):
logging.debug('Data file for %s already exists', activity_id)
print('\tData file already exists; skipping...')
return
# Regardless of unzip setting, don't redownload if the ZIP or FIT file exists.
if args.format == 'original' and os.path.isfile(fit_filename):
logging.debug('Original data file for %s already exists', activity_id)
print('\tFIT data file already exists; skipping...')
return
if args.format != 'json':
# Download the data file from Garmin Connect. If the download fails (e.g., due to timeout),
# this script will die, but nothing will have been written to disk about this activity, so
# just running it again should pick up where it left off.
try:
data = http_req(download_url)
except HTTPError as ex:
# Handle expected (though unfortunate) error codes; die on unexpected ones.
if ex.code == 500 and args.format == 'tcx':
# Garmin will give an internal server error (HTTP 500) when downloading TCX files
# if the original was a manual GPX upload. Writing an empty file prevents this file
# from being redownloaded, similar to the way GPX files are saved even when there
# are no tracks. One could be generated here, but that's a bit much. Use the GPX
# format if you want actual data in every file, as I believe Garmin provides a GPX
# file for every activity.
logging.info('Writing empty file since Garmin did not generate a TCX file for this \
activity...')
data = ''
elif ex.code == 404 and args.format == 'original':
# For manual activities (i.e., entered in online without a file upload), there is
# no original file. # Write an empty file to prevent redownloading it.
logging.info('Writing empty file since there was no original activity data...')
data = ''
else:
logging.info('Got %s for %s', ex.code, download_url)
raise Exception('Failed. Got an HTTP error ' + str(ex.code) + ' for ' + download_url)
else:
data = activity_details
# Persist file
write_to_file(data_filename, data, file_mode, file_time)
if args.format == 'original':
# Even manual upload of a GPX file is zipped, but we'll validate the extension.
if args.unzip and data_filename[-3:].lower() == 'zip':
logging.debug('Unzipping and removing original file, size is %s', os.stat(data_filename).st_size)
if os.stat(data_filename).st_size > 0:
zip_file = open(data_filename, 'rb')
zip_obj = zipfile.ZipFile(zip_file)
for name in zip_obj.namelist():
unzipped_name = zip_obj.extract(name, directory)
# prepend 'activity_' and append the description to the base name
name_base, name_ext = os.path.splitext(name)
# sometimes in 2020 Garmin added '_ACTIVITY' to the name in the ZIP. Remove it...
# note that 'new_name' should match 'fit_filename' elsewhere in this script to
# avoid downloading the same files again
name_base = name_base.replace('_ACTIVITY', '')
new_name = os.path.join(directory, prefix + 'activity_' + name_base + append_desc + name_ext)
logging.debug('renaming %s to %s', unzipped_name, new_name)
os.rename(unzipped_name, new_name)
if file_time:
os.utime(new_name, (file_time, file_time))
zip_file.close()
else:
print('\tSkipping 0Kb zip file.')
os.remove(data_filename)
def setup_logging():
"""Setup logging"""
logging.basicConfig(
filename='gcexport.log',
level=logging.DEBUG,
format='%(asctime)s [%(levelname)-7.7s] %(message)s'
)
# set up logging to console
console = logging.StreamHandler()
console.setLevel(logging.WARN)
formatter = logging.Formatter('[%(levelname)s] %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def logging_verbosity(verbosity):
"""Adapt logging verbosity, separately for logfile and console output"""
logger = logging.getLogger()
for handler in logger.handlers:
if isinstance(handler, logging.FileHandler):
# this is the logfile handler
level = logging.DEBUG if verbosity > 0 else logging.INFO
handler.setLevel(level)
logging.info('New logfile level: %s', logging.getLevelName(level))
elif isinstance(handler, logging.StreamHandler):
# this is the console handler
level = logging.DEBUG if verbosity > 1 else (logging.INFO if verbosity > 0 else logging.WARN)
handler.setLevel(level)
logging.debug('New console log level: %s', logging.getLevelName(level))
def main(argv):
"""
Main entry point for gcexport.py
"""
setup_logging()
logging.info("Starting %s version %s, using Python version %s", argv[0], SCRIPT_VERSION, python_version())
args = parse_arguments(argv)
logging_verbosity(args.verbosity)
print('Welcome to Garmin Connect Exporter!')
# Create directory for data files.
if os.path.isdir(args.directory):
logging.warning("Output directory %s already exists. "
"Will skip already-downloaded files and append to the CSV file.",
args.directory)
login_to_garmin_connect(args)
# We should be logged in now.
if not os.path.isdir(args.directory):
os.mkdir(args.directory)
csv_filename = args.directory + '/activities.csv'
csv_existed = os.path.isfile(csv_filename)
if python3:
csv_file = open(csv_filename, mode='a', encoding='utf-8')
else:
csv_file = open(csv_filename, 'a')
csv_filter = CsvFilter(csv_file, args.template)
# Write header to CSV file
if not csv_existed:
csv_filter.write_header()
if args.count == 'all':
# If the user wants to download all activities, query the userstats
# on the profile page to know how many are available
print('Getting display name...', end='')
logging.info('Profile page %s', URL_GC_PROFILE)
profile_page = http_req_as_string(URL_GC_PROFILE)
# write_to_file(args.directory + '/profile.html', profile_page, 'w')
# extract the display name from the profile page, it should be in there as
# \"displayName\":\"John.Doe\"
pattern = re.compile(r".*\\\"displayName\\\":\\\"([-.\w]+)\\\".*", re.MULTILINE | re.DOTALL)
match = pattern.match(profile_page)
if not match:
raise Exception('Did not find the display name in the profile page.')
display_name = match.group(1)
print(' Done. displayName=', display_name, sep='')
print('Fetching user stats...', end='')
logging.info('Userstats page %s', URL_GC_USERSTATS + display_name)
result = http_req_as_string(URL_GC_USERSTATS + display_name)
print(' Done.')
# Persist JSON
write_to_file(args.directory + '/userstats.json', result, 'w')
# Modify total_to_download based on how many activities the server reports.
json_results = json.loads(result)
total_to_download = int(json_results['userMetrics'][0]['totalActivities'])
else:
total_to_download = int(args.count)
total_downloaded = 0
device_dict = dict()
# load some dictionaries with lookup data from REST services
activity_type_props = http_req_as_string(URL_GC_ACT_PROPS)
# write_to_file(args.directory + '/activity_types.properties', activity_type_props, 'w')
activity_type_name = load_properties(activity_type_props)
event_type_props = http_req_as_string(URL_GC_EVT_PROPS)
# write_to_file(args.directory + '/event_types.properties', event_type_props, 'w')
event_type_name = load_properties(event_type_props)
# This while loop will download data from the server in multiple chunks, if necessary.
while total_downloaded < total_to_download:
# Maximum chunk size 'LIMIT_MAXIMUM' ... 400 return status if over maximum. So download
# maximum or whatever remains if less than maximum.
# As of 2018-03-06 I get return status 500 if over maximum
if total_to_download - total_downloaded > LIMIT_MAXIMUM:
num_to_download = LIMIT_MAXIMUM
else:
num_to_download = total_to_download - total_downloaded
search_params = {'start': total_downloaded, 'limit': num_to_download}
# Query Garmin Connect
print('Querying list of activities ', total_downloaded + 1,
'..', total_downloaded + num_to_download,
'...', sep='', end='')
logging.info('Activity list URL %s', URL_GC_LIST + urlencode(search_params))
result = http_req_as_string(URL_GC_LIST + urlencode(search_params))
print(' Done.')
# Persist JSON activities list
current_index = total_downloaded + 1
activities_list_filename = '/activities-' \
+ str(current_index) + '-' \
+ str(total_downloaded + num_to_download) + '.json'
write_to_file(args.directory + activities_list_filename, result, 'w')
activities = json.loads(result)
if len(activities) != num_to_download:
logging.warning('Expected %s activities, got %s.', num_to_download, len(activities))
# Process each activity.
for actvty in activities:
if args.start_activity_no and current_index < args.start_activity_no:
pass
# Display which entry we're skipping.
print('Skipping Garmin Connect activity ', end='')
print('(', current_index, '/', total_to_download, ') ', sep='', end='')
print('[', actvty['activityId'], ']', sep='')
else:
# Display which entry we're working on.
print('Garmin Connect activity ', end='')
print('(', current_index, '/', total_to_download, ') ', sep='', end='')
print('[', actvty['activityId'], '] ', sep='', end='')
print(actvty['activityName'])
# Retrieve also the detail data from the activity (the one displayed on
# the https://connect.garmin.com/modern/activity/xxx page), because some
# data are missing from 'a' (or are even different, e.g. for my activities
# 86497297 or 86516281)
activity_details = None
details = None
tries = MAX_TRIES
while tries > 0:
activity_details = http_req_as_string(URL_GC_ACTIVITY + str(actvty['activityId']))
details = json.loads(activity_details)
# I observed a failure to get a complete JSON detail in about 5-10 calls out of 1000
# retrying then statistically gets a better JSON ;-)
if details['summaryDTO']:
tries = 0
else:
logging.info("Retrying activity details download %s", URL_GC_ACTIVITY + str(actvty['activityId']))
tries -= 1
if tries == 0:
raise Exception('Didn\'t get "summaryDTO" after ' + str(MAX_TRIES) + ' tries for ' + str(actvty['activityId']))
extract = {}
extract['start_time_with_offset'] = offset_date_time(actvty['startTimeLocal'], actvty['startTimeGMT'])
elapsed_duration = details['summaryDTO']['elapsedDuration'] if 'summaryDTO' in details and 'elapsedDuration' in details['summaryDTO'] else None
extract['elapsed_duration'] = elapsed_duration if elapsed_duration else actvty['duration']
extract['elapsed_seconds'] = int(round(extract['elapsed_duration']))
extract['end_time_with_offset'] = extract['start_time_with_offset'] + timedelta(seconds=extract['elapsed_seconds'])
print('\t', extract['start_time_with_offset'].isoformat(), ', ', sep='', end='')
print(hhmmss_from_seconds(extract['elapsed_seconds']), ', ', sep='', end='')
if 'distance' in actvty and isinstance(actvty['distance'], (float)):
print("{0:.3f}".format(actvty['distance'] / 1000), 'km', sep='')
else:
print('0.000 km')
if args.desc != None:
append_desc = '_' + sanitize_filename(actvty['activityName'], args.desc)
else:
append_desc = ''
if args.originaltime:
start_time_seconds = actvty['beginTimestamp'] // 1000 if present('beginTimestamp', actvty) else None
else:
start_time_seconds = None
extract['device'] = extract_device(device_dict, details, start_time_seconds, args, http_req_as_string, write_to_file)
# try to get the JSON with all the samples (not all activities have it...),
# but only if it's really needed for the CSV output
extract['samples'] = None
if csv_filter.is_column_active('sampleCount'):
try:
# TODO implement retries here, I have observed temporary failures
activity_measurements = http_req_as_string(URL_GC_ACTIVITY + str(actvty['activityId']) + "/details")
write_to_file(args.directory + '/activity_' + str(actvty['activityId']) + '_samples.json',
activity_measurements, 'w',
start_time_seconds)
samples = json.loads(activity_measurements)
extract['samples'] = samples
except HTTPError:
pass # don't abort just for missing samples...
# logging.info("Unable to get samples for %d", actvty['activityId'])
# logging.exception(e)
extract['gear'] = None
if csv_filter.is_column_active('gear'):
extract['gear'] = load_gear(str(actvty['activityId']), args)
# Write stats to CSV.
csv_write_record(csv_filter, extract, actvty, details, activity_type_name, event_type_name)
export_data_file(str(actvty['activityId']), activity_details, args, start_time_seconds, append_desc,
actvty['startTimeLocal'])
current_index += 1
# End for loop for activities of chunk
total_downloaded += num_to_download
# End while loop for multiple chunks.
csv_file.close()
if args.external:
print('Open CSV output.')
print(csv_filename)
call([args.external, "--" + args.args, csv_filename])
print('Done!')
if __name__ == "__main__":
try:
main(sys.argv)
except KeyboardInterrupt:
print('Interrupted')
sys.exit(0)
|
#!/usr/bin/env python
"""blinky.py:
Starting point of blinky
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2015, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "[email protected]"
__status__ = "Development"
import extract
import webcam
import pylab
import numpy as np
def main(args):
# Extract video first
data = webcam.video2csv(args)
if len(data) == 0:
print('[WARN] Could not load data. Quitting.')
return None
edgyBlinks = extract.find_blinks_using_edge(data)
outfile = "%s_blinks_using_edges.csv" % args['video_device']
print("[INFO] Writing to outfile %s" % outfile)
np.savetxt(outfile, np.array(edgyBlinks).T, delimiter=","
, header = "time,blinks")
pixalBlinks = extract.find_blinks_using_pixals(data)
outfile = "%s_blinks_using_pixals.csv" % args['video_device']
print("[INFO] Writing to outfile %s" % outfile)
np.savetxt(outfile, np.array(pixalBlinks).T, delimiter=","
, header = "time,blinks")
if __name__ == '__main__':
import argparse
# Argument parser.
description = '''Detect eye-blinks in recording (optimized for mouse eye)'''
parser = argparse.ArgumentParser(description=description)
class Args: pass
args = Args()
parser.add_argument('--video-device', '-f'
, required = False
, default = 0
, help = 'Path of the video file or camera index. default camera 0'
)
parser.add_argument('--bbox', '-b'
, required = False
, nargs = '+'
, type = int
, help = 'Bounding box : topx topy width height'
)
parser.parse_args(namespace=args)
main(vars(args))
|
# -*- coding: utf-8 -*-
"""
A Kodi plugin for ESPN Player
"""
import sys
import os
import urllib
import urlparse
import re
from datetime import datetime
from resources.lib.espnlib import espnlib
import xbmc
import xbmcaddon
import xbmcvfs
import xbmcgui
import xbmcplugin
addon = xbmcaddon.Addon()
addon_path = xbmc.translatePath(addon.getAddonInfo('path'))
addon_profile = xbmc.translatePath(addon.getAddonInfo('profile'))
language = addon.getLocalizedString
logging_prefix = '[%s-%s]' % (addon.getAddonInfo('id'), addon.getAddonInfo('version'))
if not xbmcvfs.exists(addon_profile):
xbmcvfs.mkdir(addon_profile)
_url = sys.argv[0] # get the plugin url in plugin:// notation
_handle = int(sys.argv[1]) # get the plugin handle as an integer number
username = addon.getSetting('email')
password = addon.getSetting('password')
cookie_file = os.path.join(addon_profile, 'cookie_file')
if addon.getSetting('debug') == 'false':
debug = False
else:
debug = True
if addon.getSetting('verify_ssl') == 'false':
verify_ssl = False
else:
verify_ssl = True
espn = espnlib(cookie_file, debug, verify_ssl)
def addon_log(string):
if debug:
xbmc.log('%s: %s' % (logging_prefix, string))
def services_menu():
services = espn.get_services()
if len(services) == 1:
# list main menu directly if one service is found
main_menu(services.values()[0])
else:
for name, service in services.items():
parameters = {'action': 'main_menu', 'service': service}
add_item(name, parameters)
xbmcplugin.endOfDirectory(_handle)
def main_menu(service):
listing = []
items = [language(30018), language(30016), language(30017), language(30019)]
for item in items:
if item == language(30018):
parameters = {'action': 'list_today', 'service': service}
elif item == language(30019):
parameters = {'action': 'list_channels', 'service': service}
else:
if item == language(30016):
day = 'upcoming'
else:
day = 'archive'
parameters = {'action': 'list_dates', 'service': service, 'day': day}
add_item(item, parameters)
xbmcplugin.endOfDirectory(_handle)
def list_today(service):
now = datetime.now()
date_today = now.date()
items = [language(30015), language(30016), language(30017)]
for item in items:
if item == language(30015):
parameters = {'action': 'list_games', 'filter_games': 'inplay', 'service': service, 'filter_date': 'false'}
else:
if item == language(30016):
game_type = 'upcoming'
else:
game_type = 'archive'
parameters = {'action': 'list_games', 'service': service, 'filter_date': date_today,
'filter_games': game_type}
add_item(item, parameters)
xbmcplugin.endOfDirectory(_handle)
def list_dates(service, day):
dates = espn.get_gamedates(service, day)
for date in dates:
title = date.strftime('%Y-%m-%d')
parameters = {'action': 'list_games', 'service': service, 'filter_date': date, 'filter_games': 'false'}
add_item(title, parameters)
xbmcplugin.endOfDirectory(_handle)
def list_games(service, filter_date, filter_games):
items = []
if filter_date == 'false':
filter_date = False
if filter_games == 'false':
filter_games = False
games = espn.get_games(service, filter_date=filter_date, filter_games=filter_games)
for game in games:
team_names = True
game_datetime = espn.parse_datetime(game['dateTimeGMT'], localize=True)
time = game_datetime.strftime('%H:%M')
time_and_date = game_datetime.strftime('%Y-%m-%d %H:%M')
category = game['sportId']
try:
home_team = '%s' % (game['homeTeam']['name'])
away_team = '%s' % (game['awayTeam']['name'])
except KeyError:
# try to extract team names from full title
teampattern = re.search(r'(.+)( vs. )(.+)( \()', game['name'])
if teampattern:
home_team = teampattern.group(3)
away_team = teampattern.group(1)
else:
team_names = False
if 'availablePrograms' not in game:
playable = False
parameters = {'action': 'null'}
else:
playable = True
parameters = {'action': 'play_video', 'airingId': game['statsId']}
if team_names:
title = '[B]%s[/B] vs. [B]%s[/B]' % (away_team, home_team)
list_title = '[B]%s[/B] %s: [B]%s[/B] vs. [B]%s[/B]' % (coloring(time, 'time'), coloring(category, 'cat'), away_team, home_team)
else:
title = '[B]%s[/B]' % game['name']
list_title = '[B]%s[/B] %s: [B]%s[/B]' % (coloring(time, 'time'), coloring(category, 'cat'), game['name'])
game_image = game['image'].split('.jpg')[0] + '.jpg'
art = {
'thumb': game_image,
'fanart': game_image,
'cover': game_image,
}
info = {
'title': title,
'genre': category,
'plot': game['name']
}
items = add_item(list_title, parameters, items=items, playable=playable, folder=False, set_art=art,
set_info=info)
xbmcplugin.addDirectoryItems(_handle, items, len(items))
xbmcplugin.endOfDirectory(_handle)
def coloring(text, meaning):
"""Return the text wrapped in appropriate color markup."""
if meaning == 'cat':
color = 'FF0DF214'
elif meaning == 'time':
color = 'FFF16C00'
colored_text = '[COLOR=%s]%s[/COLOR]' % (color, text)
return colored_text
def list_channels(service):
"""List all channels from the returned dict."""
channels = espn.get_channels(service)
for name, channel_id in channels.items():
listitem = xbmcgui.ListItem(label=name)
listitem.setProperty('IsPlayable', 'true')
art = {'thumb': 'http://a.espncdn.com/prod/assets/watchespn/appletv/images/channels-carousel/%s.png' % channel_id}
# airingId is seoName for live channels
parameters = {'action': 'play_channel', 'airingId': channel_id, 'channel': channel_id}
add_item(name, parameters, playable=True, set_art=art)
xbmcplugin.endOfDirectory(_handle)
def play_video(airingId, channel=None):
try:
espn.login(username, password)
except espn.LoginFailure:
addon_log('login failed')
dialog = xbmcgui.Dialog()
dialog.ok(language(30005),
language(30006))
if channel:
stream_url = espn.get_stream_url(airingId, channel)
else:
stream_url = espn.get_stream_url(airingId)
if stream_url['bitrates']:
bitrate = select_bitrate(stream_url['bitrates'].keys())
if bitrate:
play_url = stream_url['bitrates'][bitrate]
playitem = xbmcgui.ListItem(path=play_url)
playitem.setProperty('IsPlayable', 'true')
xbmcplugin.setResolvedUrl(_handle, True, listitem=playitem)
else:
dialog = xbmcgui.Dialog()
dialog.ok(language(30005), language(30013))
def ask_bitrate(bitrates):
"""Presents a dialog for user to select from a list of bitrates.
Returns the value of the selected bitrate."""
options = []
for bitrate in bitrates:
options.append(bitrate + ' Kbps')
dialog = xbmcgui.Dialog()
ret = dialog.select(language(30010), options)
if ret > -1:
return bitrates[ret]
def select_bitrate(manifest_bitrates=None):
"""Returns a bitrate while honoring the user's preference."""
bitrate_setting = int(addon.getSetting('preferred_bitrate'))
if bitrate_setting == 0:
preferred_bitrate = 'highest'
elif bitrate_setting == 1:
preferred_bitrate = 'limit'
else:
preferred_bitrate = 'ask'
manifest_bitrates.sort(key=int, reverse=True)
if preferred_bitrate == 'highest':
return manifest_bitrates[0]
elif preferred_bitrate == 'limit':
allowed_bitrates = []
max_bitrate_allowed = int(addon.getSetting('max_bitrate_allowed'))
for bitrate in manifest_bitrates:
if max_bitrate_allowed >= int(bitrate):
allowed_bitrates.append(str(bitrate))
if allowed_bitrates:
return allowed_bitrates[0]
else:
return ask_bitrate(manifest_bitrates)
def add_item(title, parameters, items=False, folder=True, playable=False, set_info=False, set_art=False,
watched=False, set_content=False):
listitem = xbmcgui.ListItem(label=title)
if playable:
listitem.setProperty('IsPlayable', 'true')
folder = False
if set_art:
listitem.setArt(set_art)
else:
listitem.setArt({'icon': os.path.join(addon_path, 'icon.png')})
listitem.setArt({'fanart': os.path.join(addon_path, 'fanart.jpg')})
if set_info:
listitem.setInfo('video', set_info)
if not watched:
listitem.addStreamInfo('video', {'duration': 0})
if set_content:
xbmcplugin.setContent(_handle, set_content)
listitem.setContentLookup(False) # allows sending custom headers/cookies to ffmpeg
recursive_url = _url + '?' + urllib.urlencode(parameters)
if items is False:
xbmcplugin.addDirectoryItem(_handle, recursive_url, listitem, folder)
else:
items.append((recursive_url, listitem, folder))
return items
def router(paramstring):
"""Router function that calls other functions depending on the provided paramstring."""
params = dict(urlparse.parse_qsl(paramstring))
if params:
if params['action'] == 'main_menu':
main_menu(params['service'])
elif params['action'] == 'list_channels':
list_channels(params['service'])
elif params['action'] == 'list_games':
list_games(params['service'], params['filter_date'], params['filter_games'])
addon_log(params)
elif params['action'] == 'play_video':
play_video(params['airingId'])
elif params['action'] == 'play_channel':
play_video(params['airingId'], params['channel'])
elif params['action'] == 'list_dates':
list_dates(params['service'], params['day'])
elif params['action'] == 'list_today':
list_today(params['service'])
else:
try:
espn.login(username, password)
services_menu()
except espn.LoginFailure:
addon_log('login failed')
dialog = xbmcgui.Dialog()
dialog.ok(language(30005),
language(30006))
sys.exit(0)
if __name__ == '__main__':
router(sys.argv[2][1:]) # trim the leading '?' from the plugin call paramstring
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import os
import posixpath
import re
import tarfile
from collections import defaultdict
from datetime import date, datetime
from io import BytesIO
from operator import itemgetter
from uuid import uuid4
import click
import dateutil.parser
import yaml
from flask import current_app
from sqlalchemy import inspect
from terminaltables import AsciiTable
import indico
from indico.core.config import config
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import PrincipalType
from indico.core.db.sqlalchemy.util.models import get_all_models
from indico.core.storage.backend import get_storage
from indico.modules.events import Event, EventLogKind, EventLogRealm
from indico.modules.events.contributions import Contribution
from indico.modules.events.contributions.models.principals import ContributionPrincipal
from indico.modules.events.models.persons import EventPerson
from indico.modules.events.models.principals import EventPrincipal
from indico.modules.events.registration.models.registrations import Registration
from indico.modules.events.sessions import Session
from indico.modules.events.sessions.models.principals import SessionPrincipal
from indico.modules.users import User
from indico.modules.users.util import get_user_by_email
from indico.util.console import cformat
from indico.util.date_time import now_utc
from indico.util.string import strict_str
_notset = object()
def export_event(event, target_file):
"""Export the specified event with all its data to a file.
:param event: the `Event` to export
:param target_file: a file object to write the data to
"""
exporter = EventExporter(event, target_file)
exporter.serialize()
def import_event(source_file, category_id=0, create_users=None, verbose=False, force=False):
"""Import a previously-exported event.
It is up to the caller of this function to commit the transaction.
:param source_file: An open file object containing the exported event.
:param category_id: ID of the category in which to create the event.
:param create_users: Whether to create missing users or skip them.
If set to None, an interactive prompt is shown
when such users are encountered.
:param verbose: Whether to enable verbose output.
:param force: Whether to ignore version conflicts.
:return: The imported event.
"""
importer = EventImporter(source_file, category_id, create_users, verbose, force)
return importer.deserialize()
def _model_to_table(name):
"""Resolve a model name to a full table name (unless it's already one)."""
return getattr(db.m, name).__table__.fullname if name[0].isupper() else name
def _make_globals(**extra):
"""
Build a globals dict for the exec/eval environment that contains
all the models and whatever extra data is needed.
"""
globals_ = {cls.__name__: cls for cls in get_all_models() if hasattr(cls, '__table__')}
globals_.update(extra)
return globals_
def _exec_custom(code, **extra):
"""Execute a custom code snippet and return all non-underscored values."""
globals_ = _make_globals(**extra)
locals_ = {}
exec(code, globals_, locals_)
return {str(k): v for k, v in locals_.items() if k[0] != '_'}
def _resolve_col(col):
"""Resolve a string or object to a column.
:param col: A string containing a Python expression, a model
attribute or a Column instance.
"""
attr = eval(col, _make_globals()) if isinstance(col, str) else col
if isinstance(attr, db.Column):
return attr
assert len(attr.prop.columns) == 1
return attr.prop.columns[0]
def _get_single_fk(col):
"""Get the single-column FK constraint of the specified column."""
# find the column-specific FK, not some compound fk containing this column
fks = [x for x in col.foreign_keys if len(x.constraint.columns) == 1]
assert len(fks) == 1
return fks[0]
def _get_pk(table):
"""Get the single column that is the table's PK."""
pks = list(inspect(table).primary_key.columns.values())
assert len(pks) == 1
return pks[0]
def _has_single_pk(table):
"""Check if the table has a single PK."""
return len(list(inspect(table).primary_key.columns.values())) == 1
def _get_inserted_pk(result):
"""Get the single PK value inserted by a query."""
assert len(result.inserted_primary_key) == 1
return result.inserted_primary_key[0]
class EventExporter:
def __init__(self, event, target_file):
self.event = event
self.target_file = target_file
self.archive = tarfile.open(mode='w|', fileobj=self.target_file)
self.id_map = defaultdict(dict)
self.used_uuids = set()
self.seen_rows = set()
self.fk_map = self._get_reverse_fk_map()
self.spec = self._load_spec()
self.users = {}
def _add_file(self, name, size, data):
if isinstance(data, bytes):
data = BytesIO(data)
elif isinstance(data, str):
data = BytesIO(data.encode())
info = tarfile.TarInfo(name)
info.size = size
self.archive.addfile(info, data)
def serialize(self):
metadata = {
'timestamp': now_utc(),
'indico_version': indico.__version__,
'objects': list(self._serialize_objects(Event.__table__, Event.id == self.event.id)),
'users': self.users
}
yaml_data = yaml.dump(metadata, indent=2)
self._add_file('data.yaml', len(yaml_data), yaml_data)
def _load_spec(self):
def _process_tablespec(tablename, tablespec):
tablespec.setdefault('cols', {})
tablespec.setdefault('fks', {})
tablespec.setdefault('fks_out', {})
tablespec.setdefault('skipif', None)
tablespec.setdefault('order', None)
tablespec.setdefault('allow_duplicates', False)
fks = {}
for fk_name in tablespec['fks']:
col = _resolve_col(fk_name)
fk = _get_single_fk(col)
fks.setdefault(fk.column.name, []).append(col)
tablespec['fks'] = fks
tablespec['fks_out'] = {fk: _get_single_fk(db.metadata.tables[tablename].c[fk]).column
for fk in tablespec['fks_out']}
return tablespec
with open(os.path.join(current_app.root_path, 'modules', 'events', 'export.yaml')) as f:
spec = yaml.safe_load(f)
return {_model_to_table(k): _process_tablespec(_model_to_table(k), v) for k, v in spec['export'].items()}
def _get_reverse_fk_map(self):
"""Build a mapping between columns and incoming FKs."""
legacy_tables = {'events.legacy_contribution_id_map', 'events.legacy_subcontribution_id_map',
'attachments.legacy_attachment_id_map', 'event_registration.legacy_registration_map',
'events.legacy_session_block_id_map', 'events.legacy_image_id_map',
'events.legacy_session_id_map', 'events.legacy_page_id_map', 'categories.legacy_id_map',
'events.legacy_id_map', 'attachments.legacy_folder_id_map'}
fk_targets = defaultdict(set)
for name, table in db.metadata.tables.items():
if name in legacy_tables:
continue
for column in table.columns:
for fk in column.foreign_keys:
fk_targets[fk.target_fullname].add(fk.parent)
return dict(fk_targets)
def _get_uuid(self):
uuid = str(uuid4())
if uuid in self.used_uuids:
# VERY unlikely but just in case...
return self._get_uuid()
self.used_uuids.add(uuid)
return uuid
def _make_idref(self, column, value, incoming=False, target_column=None):
"""Generate a ID reference.
When generating an incoming ID reference, `column` must be a PK
and point to the column that is referenced by FKs. In this case
the `value` is ignored since it will be auto-generated by the db
when the new row is isnerted.
Otherwise, exactly one of `column` or `target_column` must be set.
`column` is the column in the current table that has a FK referencing
some other column.
`target_column` is already the column that is referenced by a FK
in the current table.
"""
assert (column is None) != (target_column is None)
if value is None:
return None
if incoming:
assert column.primary_key
assert target_column is None
fullname = f'{column.table.fullname}.{column.name}'
type_ = 'idref_set'
else:
if target_column is not None:
fullname = f'{target_column.table.fullname}.{target_column.name}'
else:
fk = _get_single_fk(column)
fullname = fk.target_fullname
target_column = fk.column
if target_column is User.__table__.c.id and value is not None:
type_ = 'userref'
else:
type_ = 'idref'
uuid = self.id_map[fullname].setdefault(value, self._get_uuid())
if type_ == 'userref' and uuid not in self.users:
user = User.get(value)
self.users[uuid] = None if user.is_system else {
'first_name': user.first_name,
'last_name': user.last_name,
'title': user._title,
'affiliation': user.affiliation,
'phone': user.phone,
'address': user.address,
'email': user.email,
'all_emails': list(user.all_emails)
}
return type_, uuid
def _make_value(self, value):
"""Convert values that need extra handling."""
if isinstance(value, (date, datetime)):
# YAML loses timezone information for datatime objects so
# we serialize/deserialize it manually
return type(value).__name__, value.isoformat()
elif isinstance(value, bytes) and len(value) > 1000:
# bytestrings (usually binary data, e.g. an event logo) go into
# separate files - YAML handles them well (base64) but it just
# makes the YAML file larger, which is kind of ugly
uuid = self._get_uuid()
self._add_file(uuid, len(value), value)
return 'binary', uuid
elif isinstance(value, tuple):
# XXX: We don't expect any columns to have tuple data, but
# if we do we need to convert them to `('tuple', value)`
# since we expect any tuple to indicate `(type, value)`
# instead of a plain value that can be used directly
raise ValueError('tuples not handled')
else:
return value
def _process_file(self, data):
"""Copy a file from storage into the export archive."""
if data.get('storage_file_id') is None:
return
assert '__file__' not in data # only one file per row allowed
storage_backend = data.pop('storage_backend')
storage_file_id = data.pop('storage_file_id')
filename = data.pop('filename')
content_type = data.pop('content_type')
size = data.pop('size')
md5 = data.pop('md5')
uuid = self._get_uuid()
with get_storage(storage_backend).open(storage_file_id) as f:
self._add_file(uuid, size, f)
data['__file__'] = ('file', {'uuid': uuid, 'filename': filename, 'content_type': content_type, 'size': size,
'md5': md5})
def _serialize_objects(self, table, filter_):
spec = self.spec[table.fullname]
query = db.session.query(table).filter(filter_)
if spec['order']:
# Use a custom order instead of whatever the DB uses by default.
# This is mainly needed for self-referential FKs and CHECK
# constraints that require certain objects to be exported before
# the ones referencing them
order = eval(spec['order'], _make_globals())
if not isinstance(order, tuple):
order = (order,)
query = query.order_by(*order)
query = query.order_by(*table.primary_key.columns)
cascaded = []
for row in query:
if spec['skipif'] and eval(spec['skipif'], _make_globals(ROW=row)):
continue
rowdict = row._asdict()
pk = tuple(v for k, v in rowdict.items() if table.c[k].primary_key)
if (table.fullname, pk) in self.seen_rows:
if spec['allow_duplicates']:
continue
else:
raise Exception('Trying to serialize already-serialized row')
self.seen_rows.add((table.fullname, pk))
data = {}
for col, value in rowdict.items():
col = str(col) # col names are `quoted_name` objects
col_fullname = f'{table.fullname}.{col}'
col_custom = spec['cols'].get(col, _notset)
colspec = table.c[col]
if col_custom is None:
# column is explicitly excluded
continue
elif col_custom is not _notset:
# column has custom code to process its value (and possibly name)
if value is not None:
def _get_event_idref():
key = f'{Event.__table__.fullname}.{Event.id.name}'
assert key in self.id_map
return 'idref', self.id_map[key][self.event.id]
def _make_id_ref(target, id_):
return self._make_idref(None, id_, target_column=_resolve_col(target))
data.update(_exec_custom(col_custom, VALUE=value, MAKE_EVENT_REF=_get_event_idref,
MAKE_ID_REF=_make_id_ref))
elif col_fullname in self.fk_map:
# an FK references this column -> generate a uuid
data[col] = self._make_idref(colspec, value, incoming=colspec.primary_key)
elif colspec.foreign_keys:
# column is an FK
data[col] = self._make_idref(colspec, value)
elif colspec.primary_key:
# column is a PK with no incoming FKs -> no need to track the ID
pass
else:
# not an fk
data.setdefault(col, self._make_value(value))
self._process_file(data)
# export objects referenced in outgoing FKs before the row
# itself as the FK column might not be nullable
for col, fk in spec['fks_out'].items():
value = rowdict[col]
yield from self._serialize_objects(fk.table, value == fk)
yield table.fullname, data
# serialize objects referencing the current row, but don't export them yet
for col, fks in spec['fks'].items():
value = rowdict[col]
cascaded += [x for fk in fks for x in self._serialize_objects(fk.table, value == fk)]
# we only add incoming fks after being done with all objects in case one
# of the referenced objects references another object from the current table
# that has not been serialized yet (e.g. abstract reviews proposing as duplicate)
yield from cascaded
class EventImporter:
def __init__(self, source_file, category_id=0, create_users=None, verbose=False, force=False):
self.source_file = source_file
self.category_id = category_id
self.create_users = create_users
self.verbose = verbose
self.force = force
self.archive = tarfile.open(fileobj=source_file)
self.data = yaml.unsafe_load(self.archive.extractfile('data.yaml'))
self.id_map = {}
self.user_map = {}
self.event_id = None
self.system_user_id = User.get_system_user().id
self.spec = self._load_spec()
self.deferred_idrefs = defaultdict(set)
def _load_spec(self):
def _resolve_col_name(col):
colspec = _resolve_col(col)
return f'{colspec.table.fullname}.{colspec.name}'
def _process_format(fmt, _re=re.compile(r'<([^>]+)>')):
fmt = _re.sub(r'%{reset}%{cyan}\1%{reset}%{blue!}', fmt)
return cformat('- %{blue!}' + fmt)
with open(os.path.join(current_app.root_path, 'modules', 'events', 'export.yaml')) as f:
spec = yaml.safe_load(f)
spec = spec['import']
spec['defaults'] = {_model_to_table(k): v for k, v in spec.get('defaults', {}).items()}
spec['custom'] = {_model_to_table(k): v for k, v in spec.get('custom', {}).items()}
spec['missing_users'] = {_resolve_col_name(k): v for k, v in spec.get('missing_users', {}).items()}
spec['verbose'] = {_model_to_table(k): _process_format(v) for k, v in spec.get('verbose', {}).items()}
return spec
def _load_users(self, data):
if not data['users']:
return
missing = {}
for uuid, userdata in data['users'].items():
if userdata is None:
self.user_map[uuid] = self.system_user_id
continue
user = (User.query
.filter(User.all_emails.in_(userdata['all_emails']),
~User.is_deleted)
.first())
if user is None:
missing[uuid] = userdata
else:
self.user_map[uuid] = user.id
if missing:
click.secho('The following users from the import data could not be mapped to existing users:', fg='yellow')
table_data = [['First Name', 'Last Name', 'Email', 'Affiliation']]
for userdata in sorted(missing.values(), key=itemgetter('first_name', 'last_name', 'email')):
table_data.append([userdata['first_name'], userdata['last_name'], userdata['email'],
userdata['affiliation']])
table = AsciiTable(table_data)
click.echo(table.table)
if self.create_users is None:
click.echo('Do you want to create these users now?')
click.echo('If you choose to not create them, the behavior depends on where the user would be used:')
click.echo('- If the user is not required, it will be omitted.')
click.echo('- If a user is required but using the system user will not cause any problems or look '
'weird, the system user will be used.')
click.echo('- In case neither is possible, e.g. in abstract reviews or ACLs, these objects will '
'be skipped altogether!')
create_users = click.confirm('Create the missing users?', default=True)
else:
create_users = self.create_users
if create_users:
click.secho('Creating missing users', fg='magenta')
for uuid, userdata in missing.items():
user = User(first_name=userdata['first_name'],
last_name=userdata['last_name'],
email=userdata['email'],
secondary_emails=set(userdata['all_emails']) - {userdata['email']},
address=userdata['address'],
phone=userdata['phone'],
affiliation=userdata['affiliation'],
title=userdata['title'],
is_pending=True)
db.session.add(user)
db.session.flush()
self.user_map[uuid] = user.id
if self.verbose:
click.echo(cformat("- %{cyan}User%{blue!} '{}' ({})").format(user.full_name, user.email))
else:
click.secho('Skipping missing users', fg='magenta')
def deserialize(self):
if not self.force and self.data['indico_version'] != indico.__version__:
click.secho('Version mismatch: trying to import event exported with {} to version {}'
.format(self.data['indico_version'], indico.__version__), fg='red')
return None
self._load_users(self.data)
# we need the event first since it generates the event id, which may be needed
# in case of outgoing FKs on the event model
objects = sorted(self.data['objects'], key=lambda x: x[0] != 'events.events')
for tablename, tabledata in objects:
self._deserialize_object(db.metadata.tables[tablename], tabledata)
if self.deferred_idrefs:
# Any reference to an ID that was exported need to be replaced
# with an actual ID at some point - either immediately (if the
# referenced row was already imported) or later (usually in case
# of circular dependencies where one of the IDs is not available
# when the row is inserted).
click.secho('BUG: Not all deferred idrefs have been consumed', fg='red')
for uuid, values in self.deferred_idrefs.items():
click.secho(f'{uuid}:', fg='yellow', bold=True)
for table, col, pk_value in values:
click.secho(f' - {table.fullname}.{col} ({pk_value})', fg='yellow')
raise Exception('Not all deferred idrefs have been consumed')
event = Event.get(self.event_id)
event.log(EventLogRealm.event, EventLogKind.other, 'Event', 'Event imported from another Indico instance')
self._associate_users_by_email(event)
db.session.flush()
return event
def _associate_users_by_email(self, event):
# link objects to users by email where possible
# event principals
emails = [p.email for p in EventPrincipal.query.with_parent(event).filter_by(type=PrincipalType.email)]
for user in User.query.filter(~User.is_deleted, User.all_emails.in_(emails)):
EventPrincipal.replace_email_with_user(user, 'event')
# session principals
query = (SessionPrincipal.query
.filter(SessionPrincipal.session.has(Session.event == event),
SessionPrincipal.type == PrincipalType.email))
emails = [p.email for p in query]
for user in User.query.filter(~User.is_deleted, User.all_emails.in_(emails)):
SessionPrincipal.replace_email_with_user(user, 'session')
# contribution principals
query = (ContributionPrincipal.query
.filter(ContributionPrincipal.contribution.has(Contribution.event == event),
ContributionPrincipal.type == PrincipalType.email))
emails = [p.email for p in query]
for user in User.query.filter(~User.is_deleted, User.all_emails.in_(emails)):
ContributionPrincipal.replace_email_with_user(user, 'contribution')
# event persons
query = EventPerson.query.with_parent(event).filter(EventPerson.user_id.is_(None), EventPerson.email != '')
for person in query:
person.user = get_user_by_email(person.email)
# registrations
for registration in Registration.query.with_parent(event).filter(Registration.user_id.is_(None)):
registration.user = get_user_by_email(registration.email)
def _convert_value(self, colspec, value):
if not isinstance(value, tuple):
return value
type_, value = value
if type_ == 'datetime':
return dateutil.parser.parse(value)
elif type_ == 'date':
return dateutil.parser.parse(value).date()
elif type_ == 'binary':
return self.archive.extractfile(value).read()
elif type_ == 'idref':
try:
rv = self.id_map[value]
except KeyError:
raise IdRefDeferred(value)
if rv is None:
raise MissingUserCascaded
return rv
elif type_ == 'userref':
try:
return self.user_map[value]
except KeyError:
mode = self.spec['missing_users'][f'{colspec.table.fullname}.{colspec.name}']
if mode == 'system':
return self.system_user_id
elif mode == 'none':
return None
elif mode == 'skip':
raise MissingUser(self.data['users'][value], skip=True)
else:
raise MissingUser(self.data['users'][value], run=mode)
else:
raise ValueError('unknown type: ' + type_)
def _get_file_storage_path(self, id_, filename):
# we use a generic path to store all imported files since we
# are on the table level here and thus cannot use relationships
# and the orignal models' logic to construct paths
path_segments = ['event', strict_str(self.event_id), 'imported']
filename = f'{id_}-{filename}'
path = posixpath.join(*(path_segments + [filename]))
return path
def _process_file(self, id_, data):
storage_backend = config.ATTACHMENT_STORAGE
storage = get_storage(storage_backend)
extracted = self.archive.extractfile(data['uuid'])
path = self._get_file_storage_path(id_, data['filename'])
storage_file_id, md5 = storage.save(path, data['content_type'], data['filename'], extracted)
assert data['size'] == storage.getsize(storage_file_id)
if data['md5']:
assert data['md5'] == md5
return {
'storage_backend': storage_backend,
'storage_file_id': storage_file_id,
'content_type': data['content_type'],
'filename': data['filename'],
'size': data['size'],
'md5': md5
}
def _deserialize_object(self, table, data):
is_event = (table == Event.__table__)
import_defaults = self.spec['defaults'].get(table.fullname, {})
import_custom = self.spec['custom'].get(table.fullname, {})
set_idref = None
file_data = None
insert_values = dict(import_defaults)
deferred_idrefs = {}
missing_user_skip = False
missing_user_exec = set()
if is_event:
# the exported data may contain only one event
assert self.event_id is None
insert_values['category_id'] = self.category_id
for col, value in data.items():
if isinstance(value, tuple):
if value[0] == 'idref_set':
assert set_idref is None
set_idref = value[1]
continue
elif value[0] == 'file':
# import files later in case we end up skipping the column due to a missing user
assert file_data is None
file_data = value[1]
continue
colspec = table.c[col]
if col in import_custom:
# custom python code to process the imported value
def _resolve_id_ref(value):
return self._convert_value(colspec, value)
rv = _exec_custom(import_custom[col], VALUE=value, RESOLVE_ID_REF=_resolve_id_ref)
assert list(rv.keys()) == [col]
insert_values[col] = rv[col]
continue
try:
insert_values[col] = self._convert_value(colspec, value)
except IdRefDeferred as exc:
deferred_idrefs[col] = exc.uuid
except MissingUser as exc:
if exc.skip:
click.secho(f'! Skipping row in {table.fullname} due to missing user ({exc.username})',
fg='yellow')
missing_user_skip = True
else:
missing_user_exec.add(exc.run)
except MissingUserCascaded:
click.secho('! Skipping row in {} as parent row was skipped due to a missing user'
.format(table.fullname), fg='yellow')
missing_user_skip = True
if missing_user_skip:
# skipped row due to a missing user? mark it as skipped so
# anything referencing it will also be skipped
if set_idref is not None:
self.id_map[set_idref] = None
return
elif missing_user_exec:
# run custom code to deal with missing users
for code in missing_user_exec:
insert_values.update(_exec_custom(code))
if file_data is not None:
if _has_single_pk(table):
# restore a file from the import archive and save it in storage
pk_name = _get_pk(table).name
assert pk_name not in insert_values
# get an ID early since we use it in the filename
stmt = db.func.nextval(db.func.pg_get_serial_sequence(table.fullname, pk_name))
insert_values[pk_name] = pk_value = db.session.query(stmt).scalar()
insert_values.update(self._process_file(pk_value, file_data))
else:
insert_values.update(self._process_file(str(uuid4()), file_data))
if self.verbose and table.fullname in self.spec['verbose']:
fmt = self.spec['verbose'][table.fullname]
click.echo(fmt.format(**insert_values))
res = db.session.execute(table.insert(), insert_values)
if set_idref is not None:
# if a column was marked as having incoming FKs, store
# the ID so the reference can be resolved to the ID
self._set_idref(set_idref, _get_inserted_pk(res))
if is_event:
self.event_id = _get_inserted_pk(res)
for col, uuid in deferred_idrefs.items():
# store all the data needed to resolve a deferred ID reference
# later once the ID is available
self.deferred_idrefs[uuid].add((table, col, _get_inserted_pk(res)))
def _set_idref(self, uuid, id_):
self.id_map[uuid] = id_
# update all the previously-deferred ID references
for table, col, pk_value in self.deferred_idrefs.pop(uuid, ()):
pk = _get_pk(table)
db.session.execute(table.update().where(pk == pk_value).values({col: id_}))
class IdRefDeferred(Exception):
def __init__(self, uuid):
self.uuid = uuid
class MissingUser(Exception):
def __init__(self, userdata, skip=False, run=None):
self.skip = skip
self.run = run
assert self.skip != bool(self.run)
self.userdata = userdata
@property
def username(self):
return '{} {} <{}>'.format(self.userdata['first_name'], self.userdata['last_name'], self.userdata['email'])
class MissingUserCascaded(Exception):
pass
|
# -*- coding:utf-8 -*-
"""
交易数据接口
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from __future__ import division
import time
import json
import lxml.html
from lxml import etree
import pandas as pd
import numpy as np
from tushare.stock import cons as ct
import re
from pandas.compat import StringIO
from tushare.util import dateu as du
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_hist_data(code=None, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
获取个股历史交易记录
Parameters
------
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取到API所提供的最早日期数据
end:string
结束日期 format:YYYY-MM-DD 为空时取到最近一个交易日数据
ktype:string
数据类型,D=日k线 W=周 M=月 5=5分钟 15=15分钟 30=30分钟 60=60分钟,默认为D
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
属性:日期 ,开盘价, 最高价, 收盘价, 最低价, 成交量, 价格变动 ,涨跌幅,5日均价,10日均价,20日均价,5日均量,10日均量,20日均量,换手率
"""
symbol = _code_to_symbol(code)
url = ''
if ktype.upper() in ct.K_LABELS:
url = ct.DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],
ct.K_TYPE[ktype.upper()], symbol)
elif ktype in ct.K_MIN_LABELS:
url = ct.DAY_PRICE_MIN_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],
symbol, ktype)
else:
raise TypeError('ktype input error.')
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
cols = []
if (code in ct.INDEX_LABELS) & (ktype.upper() in ct.K_LABELS):
cols = ct.INX_DAY_PRICE_COLUMNS
else:
cols = ct.DAY_PRICE_COLUMNS
if len(js['record'][0]) == 14:
cols = ct.INX_DAY_PRICE_COLUMNS
df = pd.DataFrame(js['record'], columns=cols)
if ktype.upper() in ['D', 'W', 'M']:
df = df.applymap(lambda x: x.replace(u',', u''))
for col in cols[1:]:
df[col] = df[col].astype(float)
if start is not None:
df = df[df.date >= start]
if end is not None:
df = df[df.date <= end]
if (code in ct.INDEX_LABELS) & (ktype in ct.K_MIN_LABELS):
df = df.drop('turnover', axis=1)
df = df.set_index('date')
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _parsing_dayprice_json(pageNum=1):
"""
处理当日行情分页数据,格式为json
Parameters
------
pageNum:页码
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
"""
ct._write_console()
request = Request(ct.SINA_DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['jv'], pageNum))
text = urlopen(request, timeout=10).read()
if text == 'null':
return None
reg = re.compile(r'\,(.*?)\:')
text = reg.sub(r',"\1":', text.decode('gbk') if ct.PY3 else text)
text = text.replace('"{symbol', '{"symbol')
text = text.replace('{symbol', '{"symbol"')
if ct.PY3:
jstr = json.dumps(text)
else:
jstr = json.dumps(text, encoding='GBK')
js = json.loads(jstr)
df = pd.DataFrame(pd.read_json(js, dtype={'code':object}),
columns=ct.DAY_TRADING_COLUMNS)
df = df.drop('symbol', axis=1)
df = df.ix[df.volume > 0]
return df
def get_tick_data(code=None, date=None, retry_count=3, pause=0.001):
"""
获取分笔数据
Parameters
------
code:string
股票代码 e.g. 600848
date:string
日期 format:YYYY-MM-DD
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型
"""
if code is None or len(code)!=6 or date is None:
return None
symbol = _code_to_symbol(code)
for _ in range(retry_count):
time.sleep(pause)
try:
re = Request(ct.TICK_PRICE_URL % (ct.P_TYPE['http'], ct.DOMAINS['sf'], ct.PAGES['dl'],
date, symbol))
lines = urlopen(re, timeout=10).read()
lines = lines.decode('GBK')
if len(lines) < 100:
return None
df = pd.read_table(StringIO(lines), names=ct.TICK_COLUMNS,
skiprows=[0])
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_today_ticks(code=None, retry_count=3, pause=0.001):
"""
获取当日分笔明细数据
Parameters
------
code:string
股票代码 e.g. 600848
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型
"""
if code is None or len(code)!=6 :
return None
symbol = _code_to_symbol(code)
date = du.today()
try:
request = Request(ct.TODAY_TICKS_PAGE_URL % (ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['jv'], date,
symbol))
data_str = urlopen(request, timeout=10).read()
data_str = data_str.decode('GBK')
data_str = data_str[1:-1]
data_str = eval(data_str, type('Dummy', (dict,),
dict(__getitem__ = lambda s, n:n))())
data_str = json.dumps(data_str)
data_str = json.loads(data_str)
pages = len(data_str['detailPages'])
data = pd.DataFrame()
ct._write_head()
for pNo in range(1, pages):
data = data.append(_today_ticks(symbol, date, pNo,
retry_count, pause), ignore_index=True)
except Exception as er:
print(str(er))
return data
def _today_ticks(symbol, tdate, pageNo, retry_count, pause):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
html = lxml.html.parse(ct.TODAY_TICKS_URL % (ct.P_TYPE['http'],
ct.DOMAINS['vsf'], ct.PAGES['t_ticks'],
symbol, tdate, pageNo
))
res = html.xpath('//table[@id=\"datatbl\"]/tbody/tr')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
sarr = sarr.replace('--', '0')
df = pd.read_html(StringIO(sarr), parse_dates=False)[0]
df.columns = ct.TODAY_TICK_COLUMNS
df['pchange'] = df['pchange'].map(lambda x : x.replace('%', ''))
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_today_all():
"""
一次性获取最近一个日交易日所有股票的交易数据
return
-------
DataFrame
属性:代码,名称,涨跌幅,现价,开盘价,最高价,最低价,最日收盘价,成交量,换手率
"""
ct._write_head()
df = _parsing_dayprice_json(1)
if df is not None:
for i in range(2, ct.PAGE_NUM[0]):
newdf = _parsing_dayprice_json(i)
df = df.append(newdf, ignore_index=True)
return df
def get_realtime_quotes(symbols=None):
"""
获取实时交易数据 getting real time quotes data
用于跟踪交易情况(本次执行的结果-上一次执行的数据)
Parameters
------
symbols : string, array-like object (list, tuple, Series).
return
-------
DataFrame 实时交易数据
属性:0:name,股票名字
1:open,今日开盘价
2:pre_close,昨日收盘价
3:price,当前价格
4:high,今日最高价
5:low,今日最低价
6:bid,竞买价,即“买一”报价
7:ask,竞卖价,即“卖一”报价
8:volumn,成交量 maybe you need do volumn/100
9:amount,成交金额(元 CNY)
10:b1_v,委买一(笔数 bid volume)
11:b1_p,委买一(价格 bid price)
12:b2_v,“买二”
13:b2_p,“买二”
14:b3_v,“买三”
15:b3_p,“买三”
16:b4_v,“买四”
17:b4_p,“买四”
18:b5_v,“买五”
19:b5_p,“买五”
20:a1_v,委卖一(笔数 ask volume)
21:a1_p,委卖一(价格 ask price)
...
30:date,日期;
31:time,时间;
"""
symbols_list = ''
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for code in symbols:
symbols_list += _code_to_symbol(code) + ','
else:
symbols_list = _code_to_symbol(symbols)
symbols_list = symbols_list[:-1] if len(symbols_list) > 8 else symbols_list
request = Request(ct.LIVE_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['sinahq'],
_random(), symbols_list))
text = urlopen(request,timeout=10).read()
text = text.decode('GBK')
reg = re.compile(r'\="(.*?)\";')
data = reg.findall(text)
regSym = re.compile(r'(?:sh|sz)(.*?)\=')
syms = regSym.findall(text)
data_list = []
syms_list = []
for index, row in enumerate(data):
if len(row)>1:
data_list.append([astr for astr in row.split(',')])
syms_list.append(syms[index])
if len(syms_list) == 0:
return None
df = pd.DataFrame(data_list, columns=ct.LIVE_DATA_COLS)
df = df.drop('s', axis=1)
df['code'] = syms_list
ls = [cls for cls in df.columns if '_v' in cls]
for txt in ls:
df[txt] = df[txt].map(lambda x : x[:-2])
return df
def get_h_data(code, start=None, end=None, autype='qfq',
index=False, retry_count=3, pause=0.001):
'''
获取历史复权数据
Parameters
------
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取当前日期
end:string
结束日期 format:YYYY-MM-DD 为空时取去年今日
autype:string
复权类型,qfq-前复权 hfq-后复权 None-不复权,默认为qfq
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
date 交易日期 (index)
open 开盘价
high 最高价
close 收盘价
low 最低价
volume 成交量
amount 成交金额
'''
start = du.today_last_year() if start is None else start
end = du.today() if end is None else end
qs = du.get_quarts(start, end)
qt = qs[0]
ct._write_head()
data = _parse_fq_data(_get_index_url(index, code, qt), index,
retry_count, pause)
if len(qs)>1:
for d in range(1, len(qs)):
qt = qs[d]
ct._write_console()
df = _parse_fq_data(_get_index_url(index, code, qt), index,
retry_count, pause)
data = data.append(df, ignore_index=True)
if len(data) == 0 or len(data[(data.date>=start)&(data.date<=end)]) == 0:
return None
data = data.drop_duplicates('date')
if index:
data = data[(data.date>=start) & (data.date<=end)]
data = data.set_index('date')
data = data.sort_index(ascending=False)
return data
if autype == 'hfq':
data = data.drop('factor', axis=1)
data = data[(data.date>=start) & (data.date<=end)]
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label].map(ct.FORMAT)
data[label] = data[label].astype(float)
data = data.set_index('date')
data = data.sort_index(ascending = False)
return data
else:
if autype == 'qfq':
data = data.drop('factor', axis=1)
df = _parase_fq_factor(code, start, end)
df = df.drop_duplicates('date')
df = df.sort('date', ascending=False)
frow = df.head(1)
rt = get_realtime_quotes(code)
if rt is None:
return None
if ((float(rt['high']) == 0) & (float(rt['low']) == 0)):
preClose = float(rt['pre_close'])
else:
if du.is_holiday(du.today()):
preClose = float(rt['price'])
else:
print(du.get_hour())
print((du.get_hour() > 9) & (du.get_hour() < 18) )
if (du.get_hour() > 9) & (du.get_hour() < 18):
preClose = float(rt['pre_close'])
else:
preClose = float(rt['price'])
rate = float(frow['factor']) / preClose
data = data[(data.date >= start) & (data.date <= end)]
for label in ['open', 'high', 'low', 'close']:
data[label] = data[label] / rate
data[label] = data[label].map(ct.FORMAT)
data[label] = data[label].astype(float)
data = data.set_index('date')
data = data.sort_index(ascending = False)
return data
else:
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label] / data['factor']
data = data.drop('factor', axis=1)
data = data[(data.date>=start) & (data.date<=end)]
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label].map(ct.FORMAT)
data = data.set_index('date')
data = data.sort_index(ascending=False)
data = data.astype(float)
return data
def _parase_fq_factor(code, start, end):
symbol = _code_to_symbol(code)
request = Request(ct.HIST_FQ_FACTOR_URL%(ct.P_TYPE['http'],
ct.DOMAINS['vsf'], symbol))
text = urlopen(request, timeout=10).read()
text = text[1:len(text)-1]
text = text.decode('utf-8') if ct.PY3 else text
text = text.replace('{_', '{"')
text = text.replace('total', '"total"')
text = text.replace('data', '"data"')
text = text.replace(':"', '":"')
text = text.replace('",_', '","')
text = text.replace('_', '-')
text = json.loads(text)
df = pd.DataFrame({'date':list(text['data'].keys()), 'factor':list(text['data'].values())})
df['date'] = df['date'].map(_fun_except) # for null case
if df['date'].dtypes == np.object:
df['date'] = df['date'].astype(np.datetime64)
df = df.drop_duplicates('date')
df['factor'] = df['factor'].astype(float)
return df
def _fun_except(x):
if len(x) > 10:
return x[-10:]
else:
return x
def _parse_fq_data(url, index, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath('//table[@id=\"FundHoldSharesTable\"]')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr, skiprows = [0, 1])[0]
if len(df) == 0:
return pd.DataFrame()
if index:
df.columns = ct.HIST_FQ_COLS[0:7]
else:
df.columns = ct.HIST_FQ_COLS
if df['date'].dtypes == np.object:
df['date'] = df['date'].astype(np.datetime64)
df = df.drop_duplicates('date')
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_index():
"""
获取大盘指数行情
return
-------
DataFrame
code:指数代码
name:指数名称
change:涨跌幅
open:开盘价
preclose:昨日收盘价
close:收盘价
high:最高价
low:最低价
volume:成交量(手)
amount:成交金额(亿元)
"""
request = Request(ct.INDEX_HQ_URL%(ct.P_TYPE['http'],
ct.DOMAINS['sinahq']))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('var hq_str_sh', '').replace('var hq_str_sz', '')
text = text.replace('";', '').replace('"', '').replace('=', ',')
text = '%s%s'%(ct.INDEX_HEADER, text)
df = pd.read_csv(StringIO(text), sep=',', thousands=',')
df['change'] = (df['close'] / df['preclose'] - 1 ) * 100
df['amount'] = df['amount'] / 100000000
df['change'] = df['change'].map(ct.FORMAT)
df['amount'] = df['amount'].map(ct.FORMAT)
df = df[ct.INDEX_COLS]
df['code'] = df['code'].map(lambda x:str(x).zfill(6))
df['change'] = df['change'].astype(float)
df['amount'] = df['amount'].astype(float)
return df
def _get_index_url(index, code, qt):
if index:
url = ct.HIST_INDEX_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
code, qt[0], qt[1])
else:
url = ct.HIST_FQ_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
code, qt[0], qt[1])
return url
def get_hists(symbols, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
批量获取历史行情数据,具体参数和返回数据类型请参考get_hist_data接口
"""
df = pd.DataFrame()
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for symbol in symbols:
data = get_hist_data(symbol, start=start, end=end,
ktype=ktype, retry_count=retry_count,
pause=pause)
data['code'] = symbol
df = df.append(data, ignore_index=True)
return df
else:
return None
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
def _code_to_symbol(code):
"""
生成symbol代码标志
"""
if code in ct.INDEX_LABELS:
return ct.INDEX_LIST[code]
else:
if len(code) != 6 :
return ''
else:
return 'sh%s'%code if code[:1] in ['5', '6'] else 'sz%s'%code
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import oslo_utils.strutils as strutils
from glance import i18n
try:
import dns # NOQA
except ImportError:
dnspython_installed = False
else:
dnspython_installed = True
def fix_greendns_ipv6():
if dnspython_installed:
# All of this is because if dnspython is present in your environment
# then eventlet monkeypatches socket.getaddrinfo() with an
# implementation which doesn't work for IPv6. What we're checking here
# is that the magic environment variable was set when the import
# happened.
nogreendns = 'EVENTLET_NO_GREENDNS'
flag = os.environ.get(nogreendns, '')
if 'eventlet' in sys.modules and not strutils.bool_from_string(flag):
msg = i18n._("It appears that the eventlet module has been "
"imported prior to setting %s='yes'. It is currently "
"necessary to disable eventlet.greendns "
"if using ipv6 since eventlet.greendns currently "
"breaks with ipv6 addresses. Please ensure that "
"eventlet is not imported prior to this being set.")
raise ImportError(msg % nogreendns)
os.environ[nogreendns] = 'yes'
i18n.enable_lazy()
fix_greendns_ipv6()
|
import csv
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plot
def get_data(filename):
dates = []
prices = []
with open(filename, 'r') as csvFile:
csv_file_reader = csv.reader(csvFile)
next(csv_file_reader)
for row in csv_file_reader:
dates.append(int(row[0].split('-')[0]))
prices.append(float(row[1]))
return np.array(dates), np.array(prices)
def predict_prices(dates, prices, x):
dates.dot(dates.transpose())
dates.shape = (len(dates), 1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin.fit(dates, prices)
svr_poly.fit(dates, prices)
svr_rbf.fit(dates, prices)
plot.scatter(dates, prices, edgecolors='black', data='Data')
plot.plot(dates, svr_rbf.predict(dates))
plot.plot(dates, svr_rbf.predict(dates))
plot.plot(dates, svr_rbf.predict(dates))
plot.xlabel('Date')
plot.ylabel('Price')
plot.title('Support Vector Regression')
plot.legend()
plot.show()
return svr_rbf.predict(x)[0], svr_lin.predict(x)[0], svr_poly.predict(x)[0]
dates, prices = get_data('./data_files/aapl.csv')
print("\n", "Dates: ", dates.size, "\n", "Prices: ", prices.size)
predicted_price = predict_prices(dates, prices, 29)
print(predicted_price)
|
import string
import warnings
import numpy as np
import pandas.util.testing as tm
from pandas import (DataFrame, Series, MultiIndex, date_range, period_range,
isnull, NaT)
from .pandas_vb_common import setup # noqa
class GetNumericData(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(10000, 25))
self.df['foo'] = 'bar'
self.df['bar'] = 'baz'
with warnings.catch_warnings(record=True):
self.df = self.df.consolidate()
def time_frame_get_numeric_data(self):
self.df._get_numeric_data()
class Lookup(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(10000, 8),
columns=list('abcdefgh'))
self.df['foo'] = 'bar'
self.row_labels = list(self.df.index[::10])[:900]
self.col_labels = list(self.df.columns) * 100
self.row_labels_all = np.array(
list(self.df.index) * len(self.df.columns), dtype='object')
self.col_labels_all = np.array(
list(self.df.columns) * len(self.df.index), dtype='object')
def time_frame_fancy_lookup(self):
self.df.lookup(self.row_labels, self.col_labels)
def time_frame_fancy_lookup_all(self):
self.df.lookup(self.row_labels_all, self.col_labels_all)
class Reindex(object):
goal_time = 0.2
def setup(self):
N = 10**3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.df2 = DataFrame(
{c: {0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64)}
[np.random.randint(0, 4)] for c in range(N)})
def time_reindex_axis0(self):
self.df.reindex(self.idx)
def time_reindex_axis1(self):
self.df.reindex(columns=self.idx)
def time_reindex_both_axes(self):
self.df.reindex(index=self.idx, columns=self.idx)
def time_reindex_both_axes_ix(self):
self.df.ix[self.idx, self.idx]
def time_reindex_upcast(self):
self.df2.reindex(np.random.permutation(range(1200)))
class Iteration(object):
goal_time = 0.2
def setup(self):
N = 1000
self.df = DataFrame(np.random.randn(N * 10, N))
self.df2 = DataFrame(np.random.randn(N * 50, 10))
self.df3 = DataFrame(np.random.randn(N, 5 * N),
columns=['C' + str(c) for c in range(N * 5)])
def time_iteritems(self):
# (monitor no-copying behaviour)
if hasattr(self.df, '_item_cache'):
self.df._item_cache.clear()
for name, col in self.df.iteritems():
pass
def time_iteritems_cached(self):
for name, col in self.df.iteritems():
pass
def time_iteritems_indexing(self):
for col in self.df3:
self.df3[col]
def time_itertuples(self):
for row in self.df2.itertuples():
pass
def time_iterrows(self):
for row in self.df.iterrows():
pass
class ToString(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(100, 10))
def time_to_string_floats(self):
self.df.to_string()
class ToHTML(object):
goal_time = 0.2
def setup(self):
nrows = 500
self.df2 = DataFrame(np.random.randn(nrows, 10))
self.df2[0] = period_range('2000', periods=nrows)
self.df2[1] = range(nrows)
def time_to_html_mixed(self):
self.df2.to_html()
class Repr(object):
goal_time = 0.2
def setup(self):
nrows = 10000
data = np.random.randn(nrows, 10)
arrays = np.tile(np.random.randn(3, int(nrows / 100)), 100)
idx = MultiIndex.from_arrays(arrays)
self.df3 = DataFrame(data, index=idx)
self.df4 = DataFrame(data, index=np.random.randn(nrows))
self.df_tall = DataFrame(np.random.randn(nrows, 10))
self.df_wide = DataFrame(np.random.randn(10, nrows))
def time_html_repr_trunc_mi(self):
self.df3._repr_html_()
def time_html_repr_trunc_si(self):
self.df4._repr_html_()
def time_repr_tall(self):
repr(self.df_tall)
def time_frame_repr_wide(self):
repr(self.df_wide)
class MaskBool(object):
goal_time = 0.2
def setup(self):
data = np.random.randn(1000, 500)
df = DataFrame(data)
df = df.where(df > 0)
self.bools = df > 0
self.mask = isnull(df)
def time_frame_mask_bools(self):
self.bools.mask(self.mask)
def time_frame_mask_floats(self):
self.bools.astype(float).mask(self.mask)
class Isnull(object):
goal_time = 0.2
def setup(self):
N = 10**3
self.df_no_null = DataFrame(np.random.randn(N, N))
sample = np.array([np.nan, 1.0])
data = np.random.choice(sample, (N, N))
self.df = DataFrame(data)
sample = np.array(list(string.ascii_letters + string.whitespace))
data = np.random.choice(sample, (N, N))
self.df_strings = DataFrame(data)
sample = np.array([NaT, np.nan, None, np.datetime64('NaT'),
np.timedelta64('NaT'), 0, 1, 2.0, '', 'abcd'])
data = np.random.choice(sample, (N, N))
self.df_obj = DataFrame(data)
def time_isnull_floats_no_null(self):
isnull(self.df_no_null)
def time_isnull(self):
isnull(self.df)
def time_isnull_strngs(self):
isnull(self.df_strings)
def time_isnull_obj(self):
isnull(self.df_obj)
class Fillna(object):
goal_time = 0.2
params = ([True, False], ['pad', 'bfill'])
param_names = ['inplace', 'method']
def setup(self, inplace, method):
values = np.random.randn(10000, 100)
values[::2] = np.nan
self.df = DataFrame(values)
def time_frame_fillna(self, inplace, method):
self.df.fillna(inplace=inplace, method=method)
class Dropna(object):
goal_time = 0.2
params = (['all', 'any'], [0, 1])
param_names = ['how', 'axis']
def setup(self, how, axis):
self.df = DataFrame(np.random.randn(10000, 1000))
self.df.ix[50:1000, 20:50] = np.nan
self.df.ix[2000:3000] = np.nan
self.df.ix[:, 60:70] = np.nan
self.df_mixed = self.df.copy()
self.df_mixed['foo'] = 'bar'
def time_dropna(self, how, axis):
self.df.dropna(how=how, axis=axis)
def time_dropna_axis_mixed_dtypes(self, how, axis):
self.df_mixed.dropna(how=how, axis=axis)
class Count(object):
goal_time = 0.2
params = [0, 1]
param_names = ['axis']
def setup(self, axis):
self.df = DataFrame(np.random.randn(10000, 1000))
self.df.ix[50:1000, 20:50] = np.nan
self.df.ix[2000:3000] = np.nan
self.df.ix[:, 60:70] = np.nan
self.df_mixed = self.df.copy()
self.df_mixed['foo'] = 'bar'
self.df.index = MultiIndex.from_arrays([self.df.index, self.df.index])
self.df.columns = MultiIndex.from_arrays([self.df.columns,
self.df.columns])
self.df_mixed.index = MultiIndex.from_arrays([self.df_mixed.index,
self.df_mixed.index])
self.df_mixed.columns = MultiIndex.from_arrays([self.df_mixed.columns,
self.df_mixed.columns])
def time_count_level_multi(self, axis):
self.df.count(axis=axis, level=1)
def time_count_level_mixed_dtypes_multi(self, axis):
self.df_mixed.count(axis=axis, level=1)
class Apply(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(1000, 100))
self.s = Series(np.arange(1028.0))
self.df2 = DataFrame({i: self.s for i in range(1028)})
self.df3 = DataFrame(np.random.randn(1000, 3), columns=list('ABC'))
def time_apply_user_func(self):
self.df2.apply(lambda x: np.corrcoef(x, self.s)[(0, 1)])
def time_apply_axis_1(self):
self.df.apply(lambda x: x + 1, axis=1)
def time_apply_lambda_mean(self):
self.df.apply(lambda x: x.mean())
def time_apply_np_mean(self):
self.df.apply(np.mean)
def time_apply_pass_thru(self):
self.df.apply(lambda x: x)
def time_apply_ref_by_name(self):
self.df3.apply(lambda x: x['A'] + x['B'], axis=1)
class Dtypes(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(1000, 1000))
def time_frame_dtypes(self):
self.df.dtypes
class Equals(object):
goal_time = 0.2
def setup(self):
N = 10**3
self.float_df = DataFrame(np.random.randn(N, N))
self.float_df_nan = self.float_df.copy()
self.float_df_nan.iloc[-1, -1] = np.nan
self.object_df = DataFrame('foo', index=range(N), columns=range(N))
self.object_df_nan = self.object_df.copy()
self.object_df_nan.iloc[-1, -1] = np.nan
self.nonunique_cols = self.object_df.copy()
self.nonunique_cols.columns = ['A'] * len(self.nonunique_cols.columns)
self.nonunique_cols_nan = self.nonunique_cols.copy()
self.nonunique_cols_nan.iloc[-1, -1] = np.nan
def time_frame_float_equal(self):
self.float_df.equals(self.float_df)
def time_frame_float_unequal(self):
self.float_df.equals(self.float_df_nan)
def time_frame_nonunique_equal(self):
self.nonunique_cols.equals(self.nonunique_cols)
def time_frame_nonunique_unequal(self):
self.nonunique_cols.equals(self.nonunique_cols_nan)
def time_frame_object_equal(self):
self.object_df.equals(self.object_df)
def time_frame_object_unequal(self):
self.object_df.equals(self.object_df_nan)
class Interpolate(object):
goal_time = 0.2
params = [None, 'infer']
param_names = ['downcast']
def setup(self, downcast):
N = 10000
# this is the worst case, where every column has NaNs.
self.df = DataFrame(np.random.randn(N, 100))
self.df.values[::2] = np.nan
self.df2 = DataFrame({'A': np.arange(0, N),
'B': np.random.randint(0, 100, N),
'C': np.random.randn(N),
'D': np.random.randn(N)})
self.df2.loc[1::5, 'A'] = np.nan
self.df2.loc[1::5, 'C'] = np.nan
def time_interpolate(self, downcast):
self.df.interpolate(downcast=downcast)
def time_interpolate_some_good(self, downcast):
self.df2.interpolate(downcast=downcast)
class Shift(object):
# frame shift speedup issue-5609
goal_time = 0.2
params = [0, 1]
param_names = ['axis']
def setup(self, axis):
self.df = DataFrame(np.random.rand(10000, 500))
def time_shift(self, axis):
self.df.shift(1, axis=axis)
class Nunique(object):
def setup(self):
self.df = DataFrame(np.random.randn(10000, 1000))
def time_frame_nunique(self):
self.df.nunique()
class Duplicated(object):
goal_time = 0.2
def setup(self):
n = (1 << 20)
t = date_range('2015-01-01', freq='S', periods=(n // 64))
xs = np.random.randn(n // 64).round(2)
self.df = DataFrame({'a': np.random.randint(-1 << 8, 1 << 8, n),
'b': np.random.choice(t, n),
'c': np.random.choice(xs, n)})
self.df2 = DataFrame(np.random.randn(1000, 100).astype(str)).T
def time_frame_duplicated(self):
self.df.duplicated()
def time_frame_duplicated_wide(self):
self.df2.duplicated()
class XS(object):
goal_time = 0.2
params = [0, 1]
param_names = ['axis']
def setup(self, axis):
self.N = 10**4
self.df = DataFrame(np.random.randn(self.N, self.N))
def time_frame_xs(self, axis):
self.df.xs(self.N / 2, axis=axis)
class SortValues(object):
goal_time = 0.2
params = [True, False]
param_names = ['ascending']
def setup(self, ascending):
self.df = DataFrame(np.random.randn(1000000, 2), columns=list('AB'))
def time_frame_sort_values(self, ascending):
self.df.sort_values(by='A', ascending=ascending)
class SortIndexByColumns(object):
goal_time = 0.2
def setup(self):
N = 10000
K = 10
self.df = DataFrame({'key1': tm.makeStringIndex(N).values.repeat(K),
'key2': tm.makeStringIndex(N).values.repeat(K),
'value': np.random.randn(N * K)})
def time_frame_sort_values_by_columns(self):
self.df.sort_values(by=['key1', 'key2'])
class Quantile(object):
goal_time = 0.2
params = [0, 1]
param_names = ['axis']
def setup(self, axis):
self.df = DataFrame(np.random.randn(1000, 3), columns=list('ABC'))
def time_frame_quantile(self, axis):
self.df.quantile([0.1, 0.5], axis=axis)
class GetDtypeCounts(object):
# 2807
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(10, 10000))
def time_frame_get_dtype_counts(self):
self.df.get_dtype_counts()
def time_info(self):
self.df.info()
class NSort(object):
goal_time = 0.2
params = ['first', 'last', 'all']
param_names = ['keep']
def setup(self, keep):
self.df = DataFrame(np.random.randn(1000, 3), columns=list('ABC'))
def time_nlargest(self, keep):
self.df.nlargest(100, 'A', keep=keep)
def time_nsmallest(self, keep):
self.df.nsmallest(100, 'A', keep=keep)
class Describe(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame({
'a': np.random.randint(0, 100, int(1e6)),
'b': np.random.randint(0, 100, int(1e6)),
'c': np.random.randint(0, 100, int(1e6))
})
def time_series_describe(self):
self.df['a'].describe()
def time_dataframe_describe(self):
self.df.describe()
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import sys
import datetime
from operator import itemgetter
import GithubException
import Consts
atLeastPython3 = sys.hexversion >= 0x03000000
class _NotSetType:
def __repr__(self):
return "NotSet"
value = None
NotSet = _NotSetType()
class _ValuedAttribute:
def __init__(self, value):
self.value = value
class _BadAttribute:
def __init__(self, value, expectedType, exception=None):
self.__value = value
self.__expectedType = expectedType
self.__exception = exception
@property
def value(self):
raise GithubException.BadAttributeException(self.__value, self.__expectedType, self.__exception)
class GithubObject(object):
"""
Base class for all classes representing objects returned by the API.
"""
'''
A global debug flag to enable header validation by requester for all objects
'''
CHECK_AFTER_INIT_FLAG = False
@classmethod
def setCheckAfterInitFlag(cls, flag):
cls.CHECK_AFTER_INIT_FLAG = flag
def __init__(self, requester, headers, attributes, completed):
self._requester = requester
self._initAttributes()
self._storeAndUseAttributes(headers, attributes)
# Ask requester to do some checking, for debug and test purpose
# Since it's most handy to access and kinda all-knowing
if self.CHECK_AFTER_INIT_FLAG: # pragma no branch (Flag always set in tests)
requester.check_me(self)
def _storeAndUseAttributes(self, headers, attributes):
# Make sure headers are assigned before calling _useAttributes
# (Some derived classes will use headers in _useAttributes)
self._headers = headers
self._rawData = attributes
self._useAttributes(attributes)
@property
def raw_data(self):
"""
:type: dict
"""
self._completeIfNeeded()
return self._rawData
@property
def raw_headers(self):
"""
:type: dict
"""
self._completeIfNeeded()
return self._headers
@staticmethod
def _parentUrl(url):
return "/".join(url.split("/")[: -1])
@staticmethod
def __makeSimpleAttribute(value, type):
if value is None or isinstance(value, type):
return _ValuedAttribute(value)
else:
return _BadAttribute(value, type)
@staticmethod
def __makeSimpleListAttribute(value, type):
if isinstance(value, list) and all(isinstance(element, type) for element in value):
return _ValuedAttribute(value)
else:
return _BadAttribute(value, [type])
@staticmethod
def __makeTransformedAttribute(value, type, transform):
if value is None:
return _ValuedAttribute(None)
elif isinstance(value, type):
try:
return _ValuedAttribute(transform(value))
except Exception, e:
return _BadAttribute(value, type, e)
else:
return _BadAttribute(value, type)
@staticmethod
def _makeStringAttribute(value):
return GithubObject.__makeSimpleAttribute(value, (str, unicode))
@staticmethod
def _makeIntAttribute(value):
return GithubObject.__makeSimpleAttribute(value, (int, long))
@staticmethod
def _makeBoolAttribute(value):
return GithubObject.__makeSimpleAttribute(value, bool)
@staticmethod
def _makeDictAttribute(value):
return GithubObject.__makeSimpleAttribute(value, dict)
@staticmethod
def _makeTimestampAttribute(value):
return GithubObject.__makeTransformedAttribute(value, (int, long), datetime.datetime.utcfromtimestamp)
@staticmethod
def _makeDatetimeAttribute(value):
def parseDatetime(s):
if len(s) == 24: # pragma no branch (This branch was used only when creating a download)
# The Downloads API has been removed. I'm keeping this branch because I have no mean
# to check if it's really useless now.
return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S.000Z") # pragma no cover (This branch was used only when creating a download)
elif len(s) == 25:
return datetime.datetime.strptime(s[:19], "%Y-%m-%dT%H:%M:%S") + (1 if s[19] == '-' else -1) * datetime.timedelta(hours=int(s[20:22]), minutes=int(s[23:25]))
else:
return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ")
return GithubObject.__makeTransformedAttribute(value, (str, unicode), parseDatetime)
def _makeClassAttribute(self, klass, value):
return GithubObject.__makeTransformedAttribute(value, dict, lambda value: klass(self._requester, self._headers, value, completed=False))
@staticmethod
def _makeListOfStringsAttribute(value):
return GithubObject.__makeSimpleListAttribute(value, (str, unicode))
@staticmethod
def _makeListOfIntsAttribute(value):
return GithubObject.__makeSimpleListAttribute(value, int)
@staticmethod
def _makeListOfListOfStringsAttribute(value):
return GithubObject.__makeSimpleListAttribute(value, list)
def _makeListOfClassesAttribute(self, klass, value):
if isinstance(value, list) and all(isinstance(element, dict) for element in value):
return _ValuedAttribute([klass(self._requester, self._headers, element, completed=False) for element in value])
else:
return _BadAttribute(value, [dict])
def _makeDictOfStringsToClassesAttribute(self, klass, value):
if isinstance(value, dict) and all(isinstance(key, (str, unicode)) and isinstance(element, dict) for key, element in value.iteritems()):
return _ValuedAttribute(dict((key, klass(self._requester, self._headers, element, completed=False)) for key, element in value.iteritems()))
else:
return _BadAttribute(value, {(str, unicode): dict})
@property
def etag(self):
'''
:type: str
'''
return self._headers.get(Consts.RES_ETAG)
@property
def last_modified(self):
'''
:type: str
'''
return self._headers.get(Consts.RES_LAST_MODIFED)
def get__repr__(self, params):
"""
Converts the object to a nicely printable string.
"""
def format_params(params):
if atLeastPython3:
items = params.items()
else:
items = list(params.items())
for k, v in sorted(items, key=itemgetter(0), reverse=True):
yield '{k}="{v}"'.format(k=k, v=v) if isinstance(v, (str, unicode)) else '{k}={v}'.format(k=k, v=v)
return '{class_name}({params})'.format(
class_name=self.__class__.__name__,
params=", ".join(list(format_params(params)))
)
class NonCompletableGithubObject(GithubObject):
def _completeIfNeeded(self):
pass
class CompletableGithubObject(GithubObject):
def __init__(self, requester, headers, attributes, completed):
GithubObject.__init__(self, requester, headers, attributes, completed)
self.__completed = completed
def __eq__(self, other):
return other.__class__ is self.__class__ and other._url.value == self._url.value
def __ne__(self, other):
return not self == other
def _completeIfNotSet(self, value):
if value is NotSet:
self._completeIfNeeded()
def _completeIfNeeded(self):
if not self.__completed:
self.__complete()
def __complete(self):
headers, data = self._requester.requestJsonAndCheck(
"GET",
self._url.value
)
self._storeAndUseAttributes(headers, data)
self.__completed = True
def update(self):
'''
Check and update the object with conditional request
:rtype: Boolean value indicating whether the object is changed
'''
conditionalRequestHeader = dict()
if self.etag is not None:
conditionalRequestHeader[Consts.REQ_IF_NONE_MATCH] = self.etag
if self.last_modified is not None:
conditionalRequestHeader[Consts.REQ_IF_MODIFIED_SINCE] = self.last_modified
status, responseHeaders, output = self._requester.requestJson(
"GET",
self._url.value,
headers=conditionalRequestHeader
)
if status == 304:
return False
else:
headers, data = self._requester._Requester__check(status, responseHeaders, output)
self._storeAndUseAttributes(headers, data)
self.__completed = True
return True
|
import itertools
import falcon
import hug
import json
from planszownik.spiders import DEFAULT_REDIS_EXCEPTIONS_KEY
from rest import *
from planszownik import SpiderMode
api = hug.API(__name__)
@hug.get('/')
def base(request, response):
"""Home page listing endpoints
:param response:
:param request:
"""
handler = api.http.documentation_404()
handler(request, response)
response.status = falcon.HTTP_200
ret = json.loads(response.data.decode())
del ret['404']
return ret['documentation']['handlers']
@hug.get('/queues')
def input_queues():
"""
:return: Dictionary of modes with domains and corresponding
queues for crawling and dupefiltering in a form:
{
mode:
{
domain:
{
crawl: 'crawl:queue',
dupefilter: 'dupefilter:queue'
requests: 'requests:queue'
}
}
}
"""
body = iterate_spiders_with_call_to(map_domains_with_queues)
return body
@hug.get('/items')
def items_queues():
"""
:return: Dictionary of queues that produces crawled items
"""
queues_set = set(for_each_spider(spiders, items_queue))
body = {'queues': list(queues_set)}
return body
@hug.get('/modes')
def crawl_modes():
"""
:return: List of modes that spiders may run in
"""
body = list(SpiderMode.get_modes())
return body
@hug.get('/domains')
def domains_supported():
"""
:return: List of supported domains
"""
body = for_each_spider(spiders, get_domains)
body = set(itertools.chain.from_iterable(body))
return list(body)
@hug.get('/exceptions')
def exceptions():
"""
:return: List of exceptions queues
"""
queue = settings.get('REDIS_EXCEPTIONS_KEY', DEFAULT_REDIS_EXCEPTIONS_KEY)
return {'queues': [queue]}
@hug.not_found()
def not_found_handler():
return ''
def iterate_spiders_with_call_to(function):
def callz(mode):
filtered_spiders = filter_spiders_by_mode(spiders, mode)
domain_queue_list = for_each_spider(filtered_spiders, function)
return dict(itertools.chain.from_iterable(domain_queue_list))
return for_each_spider_mode(callz)
|
from copy import copy
import warnings
from django.utils.deprecation import RemovedInDjango20Warning
# Hard-coded processor for easier use of CSRF protection.
_builtin_context_processors = ('django.template.context_processors.csrf',)
_current_app_undefined = object()
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class ContextDict(dict):
def __init__(self, context, *args, **kwargs):
super(ContextDict, self).__init__(*args, **kwargs)
context.dicts.append(self)
self.context = context
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.context.pop()
class BaseContext(object):
def __init__(self, dict_=None):
self._reset_dicts(dict_)
def _reset_dicts(self, value=None):
builtins = {'True': True, 'False': False, 'None': None}
self.dicts = [builtins]
if value is not None:
self.dicts.append(value)
def __copy__(self):
duplicate = copy(super(BaseContext, self))
duplicate.dicts = self.dicts[:]
return duplicate
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self, *args, **kwargs):
return ContextDict(self, *args, **kwargs)
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
for d in self.dicts:
if key in d:
return True
return False
def __contains__(self, key):
return self.has_key(key)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
def new(self, values=None):
"""
Returns a new context with the same properties, but with only the
values given in 'values' stored.
"""
new_context = copy(self)
new_context._reset_dicts(values)
return new_context
def flatten(self):
"""
Returns self.dicts as one dictionary
"""
flat = {}
for d in self.dicts:
flat.update(d)
return flat
def __eq__(self, other):
"""
Compares two contexts by comparing theirs 'dicts' attributes.
"""
if isinstance(other, BaseContext):
# because dictionaries can be put in different order
# we have to flatten them like in templates
return self.flatten() == other.flatten()
# if it's not comparable return false
return False
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True,
current_app=_current_app_undefined,
use_l10n=None, use_tz=None, engine=None):
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of Context is deprecated. Use "
"RequestContext and set the current_app attribute of its "
"request instead.", RemovedInDjango20Warning, stacklevel=2)
self.autoescape = autoescape
self._current_app = current_app
self.use_l10n = use_l10n
self.use_tz = use_tz
self.engine = engine
self.render_context = RenderContext()
super(Context, self).__init__(dict_)
@property
def current_app(self):
return None if self._current_app is _current_app_undefined else self._current_app
def __copy__(self):
duplicate = super(Context, self).__copy__()
duplicate.render_context = copy(self.render_context)
return duplicate
def update(self, other_dict):
"Pushes other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
self.dicts.append(other_dict)
return other_dict
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def has_key(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
return self.dicts[-1].get(key, otherwise)
def __getitem__(self, key):
return self.dicts[-1][key]
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in the engine's configuration.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict_=None, processors=None,
current_app=_current_app_undefined,
use_l10n=None, use_tz=None, engine=None):
# current_app isn't passed here to avoid triggering the deprecation
# warning in Context.__init__.
super(RequestContext, self).__init__(
dict_, use_l10n=use_l10n, use_tz=use_tz, engine=engine)
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of RequestContext is deprecated. "
"Set the current_app attribute of its request instead.",
RemovedInDjango20Warning, stacklevel=2)
self._current_app = current_app
self.request = request
self._processors = () if processors is None else tuple(processors)
self._processors_index = len(self.dicts)
self.update({}) # placeholder for context processors output
self.engine = engine # re-run the setter in case engine is not None
@property
def engine(self):
return self._engine
@engine.setter
def engine(self, engine):
self._engine = engine
if hasattr(self, '_processors_index'):
if engine is None:
# Unset context processors.
self.dicts[self._processors_index] = {}
else:
# Set context processors for this engine.
updates = {}
for processor in engine.template_context_processors + self._processors:
updates.update(processor(self.request))
self.dicts[self._processors_index] = updates
def new(self, values=None):
new_context = super(RequestContext, self).new(values)
# This is for backwards-compatibility: RequestContexts created via
# Context.new don't include values from context processors.
del new_context._processors_index
return new_context
|
#! /usr/bin/env python
import sys
import math
import pango
import cairo
import pangocairo
RADIUS = 150
def draw_text(cr):
N_WORDS = 10
FONT = "Sans Bold 27"
# Center coordinates on the middle of the region we are drawing
cr.translate(RADIUS, RADIUS);
# Create a PangoLayout, set the font and text */
layout = cr.create_layout()
layout.set_text("Text")
layout.set_font_description(pango.FontDescription(FONT))
# Draw the layout N_WORDS times in a circle
for i in range(N_WORDS):
angle = (360 * i) / N_WORDS;
cr.save()
# Gradient from red at angle == 60 to blue at angle == 300
red = (1 + math.cos((angle - 60)*math.pi/180))/2
cr.set_source_rgb(red, 0, 1 - red)
cr.rotate(angle*math.pi/180)
# Inform Pango to re-layout the text with the new transformation */
cr.update_layout(layout)
width, height = layout.get_size()
cr.move_to(-width/pango.SCALE/2, -RADIUS)
cr.show_layout(layout)
cr.restore()
def main(argv):
if len(argv) != 2:
print >> sys.stderr, "Usage: cairosimple OUTPUT_BASENAME\n"
return 1
filename = argv[1]
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 2*RADIUS, 2*RADIUS)
cr = pangocairo.CairoContext(cairo.Context(surface))
cr.set_source_rgb(1.0, 1.0, 1.0)
cr.rectangle(0, 0, 2*RADIUS, 2*RADIUS)
cr.fill()
draw_text(cr)
surface.write_to_png(filename + ".png")
## output also a PDF file
surface = cairo.PDFSurface(filename + ".pdf", 2*RADIUS, 2*RADIUS)
cr = pangocairo.CairoContext(cairo.Context(surface))
draw_text(cr)
cr.show_page()
surface.finish()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
'''
Created on 2012.08.02.
@author: pcsaba
'''
from django.core.management.base import BaseCommand, CommandError
import argparse
import traceback
import sys
from django.utils.encoding import smart_str
from xadrpy.management.libs import is_application_installed
from xadrpy import conf
class MyHelpFormatter(argparse.HelpFormatter):
def __init__(self, prog,
indent_increment=2,
max_help_position=40,
width=120):
super(MyHelpFormatter, self).__init__(prog, indent_increment=2, max_help_position=40, width=120)
def test_func(**kwargs):
print kwargs
class Command(BaseCommand):
description = "xadrpy console tools"
prog = "manage.py xcmd"
need_subcommands = True
subcommands_title = "Subcommands"
subcommands_description = None
subcommands_metavar = "subcommand"
shift = 2
language_code = "en-us"
def __init__(self):
BaseCommand.__init__(self)
self.parser = argparse.ArgumentParser(description='xadrpy\n console tools',
prog="manage.py xcmd",
#usage="manage.py xcmd [options] subcommand",
formatter_class=MyHelpFormatter)
self.init_default_arguments()
self.subcommands = None
if self.need_subcommands:
self.subcommands = self.parser.add_subparsers(title=self.subcommands_title,
description=self.subcommands_description,
metavar=self.subcommands_metavar)
self.init_subcommands()
def init_default_arguments(self):
self.parser.add_argument("-v","--verbosity", action="store", metavar="VERBOSITY", choices=[0,1,2,3], type=int, help="Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output")
self.parser.add_argument("--settings", help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.')
self.parser.add_argument("--pythonpath", help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".')
self.parser.add_argument("--traceback", action="store_true", help='Print traceback on exception')
def init_subcommands(self):
#self.add_subcommand(test_func, "themes.collect", help="collect themes", description="collecting themes")
from xadrpy.management.libs import GeneralCommands
general = GeneralCommands(self)
general.register()
if is_application_installed("xadrpy.core.preferences"):
from xadrpy.core.preferences.libs import PrefsCommands
commands = PrefsCommands(self)
commands.register()
general.add_commands(commands, "preferences")
if is_application_installed("xadrpy.core.router"):
from xadrpy.core.router.libs import RouterCommands
commands = RouterCommands(self)
commands.register()
general.add_commands(commands, "router")
if is_application_installed("xadrpy.contrib.plugins"):
from xadrpy.contrib.plugins.libs import PluginsCommands
commands = PluginsCommands(self)
commands.register()
general.add_commands(commands, "plugins")
if is_application_installed("xadrpy.contrib.themes"):
from xadrpy.contrib.themes.libs import ThemesCommands
commands = ThemesCommands(self)
commands.register()
general.add_commands(commands, "themes")
if is_application_installed("xadrpy.contrib.entries"):
from xadrpy.contrib.entries import EntriesCommands
commands = EntriesCommands(self)
commands.register()
general.add_commands(commands, "entries")
def print_header(self):
self.stdout.write("xadrpy %s - django toolkit\n" % conf.VERSION)
self.stdout.write("Author Csaba Palankai <[email protected]>\n")
def add_subcommand(self, subcommand, name, help=None, description=None, epilog=None, prog=None, usage=None):
parser = self.subcommands.add_parser(name, help=help, description=description, epilog=epilog, prog=prog, usage=usage)
parser.set_defaults(subcommand=subcommand)
return parser
def run_from_argv(self, argv):
namespace = self.parser.parse_args(argv[self.shift:])
kwargs = namespace.__dict__.copy()
kwargs.pop(self.subcommands_metavar)
kwargs.pop("settings")
kwargs.pop("pythonpath")
kwargs.pop("traceback")
if 'verbosity' in kwargs and kwargs['verbosity']==None:
kwargs.pop("verbosity")
show_traceback = kwargs.get('traceback', False)
saved_lang = None
if self.can_import_settings:
try:
from django.utils import translation
saved_lang = translation.get_language()
translation.activate(self.language_code)
except ImportError, e:
# If settings should be available, but aren't,
# raise the error and quit.
if show_traceback:
traceback.print_exc()
else:
sys.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
try:
self.stdout = kwargs.get('stdout', sys.stdout)
self.stderr = kwargs.get('stderr', sys.stderr)
if self.requires_model_validation:
self.validate()
output = namespace.subcommand(**kwargs)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[kwargs.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()) + '\n')
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;") + '\n')
except CommandError, e:
if show_traceback:
traceback.print_exc()
else:
self.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
if saved_lang is not None:
translation.activate(saved_lang)
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import itertools
from distutils.version import LooseVersion
import textwrap
import traits.api as t
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backend_bases import key_press_handler
import warnings
import numpy as np
import logging
from functools import partial
import hyperspy as hs
from hyperspy.defaults_parser import preferences
_logger = logging.getLogger(__name__)
def contrast_stretching(data, vmin=None, vmax=None):
"""Estimate bounds of the data to display.
Parameters
----------
data: numpy array
vmin, vmax: scalar, str, None
If str, formatted as 'xth', use this value to calculate the percentage
of pixels that are left out of the lower and upper bounds.
For example, for a vmin of '1th', 1% of the lowest will be ignored to
estimate the minimum value. Similarly, for a vmax value of '1th', 1%
of the highest value will be ignored in the estimation of the maximum
value. See :py:func:`numpy.percentile` for more explanation.
If None, use the percentiles value set in the preferences.
If float of integer, keep this value as bounds.
Returns
-------
vmin, vmax: scalar
The low and high bounds.
Raises
------
ValueError
if the value of `vmin` `vmax` is out of the valid range for percentile
calculation (in case of string values).
"""
def _parse_value(value, value_name):
if value is None:
if value_name == "vmin":
value = f'{preferences.Plot.saturated_pixels / 2}th'
elif value_name == "vmax":
value = f'{100 - preferences.Plot.saturated_pixels / 2}th'
if isinstance(value, str):
value = float(value.split("th")[0])
if not 0 <= value <= 100:
raise ValueError(f"{value_name} must be in the range[0, 100].")
return value
if np.ma.is_masked(data):
# If there is a mask, compressed the data to remove the masked data
data = np.ma.masked_less_equal(data, 0).compressed()
# If vmin, vmax are float or int, we keep the value, if not we calculate
# the precentile value
if not isinstance(vmin, (float, int)):
vmin = np.nanpercentile(data, _parse_value(vmin, 'vmin'))
if not isinstance(vmax, (float, int)):
vmax = np.nanpercentile(data, _parse_value(vmax, 'vmax'))
return vmin, vmax
MPL_DIVERGING_COLORMAPS = [
"BrBG",
"bwr",
"coolwarm",
"PiYG",
"PRGn",
"PuOr",
"RdBu",
"RdGy",
"RdYIBu",
"RdYIGn",
"seismic",
"Spectral", ]
# Add reversed colormaps
MPL_DIVERGING_COLORMAPS += [cmap + "_r" for cmap in MPL_DIVERGING_COLORMAPS]
def centre_colormap_values(vmin, vmax):
"""Calculate vmin and vmax to set the colormap midpoint to zero.
Parameters
----------
vmin, vmax : scalar
The range of data to display.
Returns
-------
cvmin, cvmax : scalar
The values to obtain a centre colormap.
"""
absmax = max(abs(vmin), abs(vmax))
return -absmax, absmax
def create_figure(window_title=None,
_on_figure_window_close=None,
disable_xyscale_keys=False,
**kwargs):
"""Create a matplotlib figure.
This function adds the possibility to execute another function
when the figure is closed and to easily set the window title. Any
keyword argument is passed to the plt.figure function.
Parameters
----------
window_title : {None, string}, optional
_on_figure_window_close : {None, function}, optional
disable_xyscale_keys : bool, optional
Disable the `k`, `l` and `L` shortcuts which toggle the x or y axis
between linear and log scale. Default False.
Returns
-------
fig : plt.figure
"""
fig = plt.figure(**kwargs)
if window_title is not None:
# remove non-alphanumeric characters to prevent file saving problems
# This is a workaround for:
# https://github.com/matplotlib/matplotlib/issues/9056
reserved_characters = r'<>"/\|?*'
for c in reserved_characters:
window_title = window_title.replace(c, '')
window_title = window_title.replace('\n', ' ')
window_title = window_title.replace(':', ' -')
fig.canvas.set_window_title(window_title)
if disable_xyscale_keys and hasattr(fig.canvas, 'toolbar'):
# hack the `key_press_handler` to disable the `k`, `l`, `L` shortcuts
manager = fig.canvas.manager
fig.canvas.mpl_disconnect(manager.key_press_handler_id)
manager.key_press_handler_id = manager.canvas.mpl_connect(
'key_press_event',
lambda event: key_press_handler_custom(event, manager.canvas))
if _on_figure_window_close is not None:
on_figure_window_close(fig, _on_figure_window_close)
return fig
def key_press_handler_custom(event, canvas):
if event.key not in ['k', 'l', 'L']:
key_press_handler(event, canvas, canvas.manager.toolbar)
def on_figure_window_close(figure, function):
"""Connects a close figure signal to a given function.
Parameters
----------
figure : mpl figure instance
function : function
"""
def function_wrapper(evt):
function()
figure.canvas.mpl_connect('close_event', function_wrapper)
def plot_RGB_map(im_list, normalization='single', dont_plot=False):
"""Plot 2 or 3 maps in RGB.
Parameters
----------
im_list : list of Signal2D instances
normalization : {'single', 'global'}, optional
dont_plot : bool, optional
Default False.
Returns
-------
array: RGB matrix
"""
# from widgets import cursors
height, width = im_list[0].data.shape[:2]
rgb = np.zeros((height, width, 3))
rgb[:, :, 0] = im_list[0].data.squeeze()
rgb[:, :, 1] = im_list[1].data.squeeze()
if len(im_list) == 3:
rgb[:, :, 2] = im_list[2].data.squeeze()
if normalization == 'single':
for i in range(len(im_list)):
rgb[:, :, i] /= rgb[:, :, i].max()
elif normalization == 'global':
rgb /= rgb.max()
rgb = rgb.clip(0, rgb.max())
if not dont_plot:
figure = plt.figure()
ax = figure.add_subplot(111)
ax.frameon = False
ax.set_axis_off()
ax.imshow(rgb, interpolation='nearest')
# cursors.set_mpl_ax(ax)
figure.canvas.draw_idle()
else:
return rgb
def subplot_parameters(fig):
"""Returns a list of the subplot parameters of a mpl figure.
Parameters
----------
fig : mpl figure
Returns
-------
tuple : (left, bottom, right, top, wspace, hspace)
"""
wspace = fig.subplotpars.wspace
hspace = fig.subplotpars.hspace
left = fig.subplotpars.left
right = fig.subplotpars.right
top = fig.subplotpars.top
bottom = fig.subplotpars.bottom
return left, bottom, right, top, wspace, hspace
class ColorCycle:
_color_cycle = [mpl.colors.colorConverter.to_rgba(color) for color
in ('b', 'g', 'r', 'c', 'm', 'y', 'k')]
def __init__(self):
self.color_cycle = copy.copy(self._color_cycle)
def __call__(self):
if not self.color_cycle:
self.color_cycle = copy.copy(self._color_cycle)
return self.color_cycle.pop(0)
def plot_signals(signal_list, sync=True, navigator="auto",
navigator_list=None, **kwargs):
"""Plot several signals at the same time.
Parameters
----------
signal_list : list of BaseSignal instances
If sync is set to True, the signals must have the
same navigation shape, but not necessarily the same signal shape.
sync : {True, False}, default "True", optional
If True: the signals will share navigation, all the signals
must have the same navigation shape for this to work, but not
necessarily the same signal shape.
navigator : {"auto", None, "spectrum", "slider", BaseSignal}, optional,
default "auto"
See signal.plot docstring for full description.
navigator_list : {None, list of navigator arguments}, optional, default None
Set different navigator options for the signals. Must use valid
navigator arguments: "auto", None, "spectrum", "slider", or a
HyperSpy Signal. The list must have the same size as signal_list.
If None, the argument specified in navigator will be used.
**kwargs
Any extra keyword arguments are passed to each signal `plot` method.
Example
-------
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll])
Specifying the navigator:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll], navigator="slider")
Specifying the navigator for each signal:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> s_edx = hs.load("edx.dm3")
>>> s_adf = hs.load("adf.dm3")
>>> hs.plot.plot_signals(
[s_cl, s_ll, s_edx], navigator_list=["slider",None,s_adf])
"""
import hyperspy.signal
if navigator_list:
if not (len(signal_list) == len(navigator_list)):
raise ValueError(
"signal_list and navigator_list must"
" have the same size")
if sync:
axes_manager_list = []
for signal in signal_list:
axes_manager_list.append(signal.axes_manager)
if not navigator_list:
navigator_list = []
if navigator is None:
navigator_list.extend([None] * len(signal_list))
elif isinstance(navigator, hyperspy.signal.BaseSignal):
navigator_list.append(navigator)
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator == "slider":
navigator_list.append("slider")
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator == "spectrum":
navigator_list.extend(["spectrum"] * len(signal_list))
elif navigator == "auto":
navigator_list.extend(["auto"] * len(signal_list))
else:
raise ValueError(
"navigator must be one of \"spectrum\",\"auto\","
" \"slider\", None, a Signal instance")
# Check to see if the spectra have the same navigational shapes
temp_shape_first = axes_manager_list[0].navigation_shape
for i, axes_manager in enumerate(axes_manager_list):
temp_shape = axes_manager.navigation_shape
if not (temp_shape_first == temp_shape):
raise ValueError(
"The spectra do not have the same navigation shape")
axes_manager_list[i] = axes_manager.deepcopy()
if i > 0:
for axis0, axisn in zip(axes_manager_list[0].navigation_axes,
axes_manager_list[i].navigation_axes):
axes_manager_list[i]._axes[axisn.index_in_array] = axis0
del axes_manager
for signal, navigator, axes_manager in zip(signal_list,
navigator_list,
axes_manager_list):
signal.plot(axes_manager=axes_manager,
navigator=navigator,
**kwargs)
# If sync is False
else:
if not navigator_list:
navigator_list = []
navigator_list.extend([navigator] * len(signal_list))
for signal, navigator in zip(signal_list, navigator_list):
signal.plot(navigator=navigator,
**kwargs)
def _make_heatmap_subplot(spectra, **plot_kwargs):
from hyperspy._signals.signal2d import Signal2D
im = Signal2D(spectra.data, axes=spectra.axes_manager._get_axes_dicts())
im.metadata.General.title = spectra.metadata.General.title
im.plot(**plot_kwargs)
return im._plot.signal_plot.ax
def set_xaxis_lims(mpl_ax, hs_axis):
"""
Set the matplotlib axis limits to match that of a HyperSpy axis.
Parameters
----------
mpl_ax : :class:`matplotlib.axis.Axis`
The ``matplotlib`` axis to change.
hs_axis : :class:`~hyperspy.axes.DataAxis`
The data axis that contains the values which control the scaling.
"""
x_axis_lower_lim = hs_axis.axis[0]
x_axis_upper_lim = hs_axis.axis[-1]
mpl_ax.set_xlim(x_axis_lower_lim, x_axis_upper_lim)
def _make_overlap_plot(spectra, ax, color="blue", line_style='-'):
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
spectrum = _transpose_if_required(spectrum, 1)
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_ylabel('Intensity')
ax.autoscale(tight=True)
def _make_cascade_subplot(
spectra, ax, color="blue", line_style='-', padding=1):
max_value = 0
for spectrum in spectra:
spectrum_yrange = (np.nanmax(spectrum.data) -
np.nanmin(spectrum.data))
if spectrum_yrange > max_value:
max_value = spectrum_yrange
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
spectrum = _transpose_if_required(spectrum, 1)
data_to_plot = ((spectrum.data - spectrum.data.min()) /
float(max_value) + spectrum_index * padding)
ax.plot(x_axis.axis, data_to_plot, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_yticks([])
ax.autoscale(tight=True)
def _plot_spectrum(spectrum, ax, color="blue", line_style='-'):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
def _set_spectrum_xlabel(spectrum, ax):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.set_xlabel("%s (%s)" % (x_axis.name, x_axis.units))
def _transpose_if_required(signal, expected_dimension):
# EDS profiles or maps have signal dimension = 0 and navigation dimension
# 1 or 2. For convenience, transpose the signal if possible
if (signal.axes_manager.signal_dimension == 0 and
signal.axes_manager.navigation_dimension == expected_dimension):
return signal.T
else:
return signal
def plot_images(images,
cmap=None,
no_nans=False,
per_row=3,
label='auto',
labelwrap=30,
suptitle=None,
suptitle_fontsize=18,
colorbar='multi',
centre_colormap="auto",
scalebar=None,
scalebar_color='white',
axes_decor='all',
padding=None,
tight_layout=False,
aspect='auto',
min_asp=0.1,
namefrac_thresh=0.4,
fig=None,
vmin=None,
vmax=None,
**kwargs):
"""Plot multiple images as sub-images in one figure.
Parameters
----------
images : list of Signal2D or BaseSignal
`images` should be a list of Signals to plot. For `BaseSignal` with
navigation dimensions 2 and signal dimension 0, the signal will be
tranposed to form a `Signal2D`.
Multi-dimensional images will have each plane plotted as a separate
image.
If any of the signal shapes is not suitable, a ValueError will be
raised.
cmap : {matplotlib colormap, list, ``'mpl_colors'``}, optional
The colormap used for the images, by default uses the setting
``color map signal`` from the plot preferences. A list of colormaps can
also be provided, and the images will cycle through them. Optionally,
the value ``'mpl_colors'`` will cause the cmap to loop through the
default ``matplotlib`` colors (to match with the default output of the
:py:func:`~.drawing.utils.plot_spectra` method).
Note: if using more than one colormap, using the ``'single'``
option for ``colorbar`` is disallowed.
no_nans : bool, optional
If True, set nans to zero for plotting.
per_row : int, optional
The number of plots in each row.
label : {None, str, list of str}, optional
Control the title labeling of the plotted images.
If None, no titles will be shown.
If 'auto' (default), function will try to determine suitable titles
using Signal2D titles, falling back to the 'titles' option if no good
short titles are detected.
Works best if all images to be plotted have the same beginning
to their titles.
If 'titles', the title from each image's `metadata.General.title`
will be used.
If any other single str, images will be labeled in sequence using
that str as a prefix.
If a list of str, the list elements will be used to determine the
labels (repeated, if necessary).
labelwrap : int, optional
Integer specifying the number of characters that will be used on
one line.
If the function returns an unexpected blank figure, lower this
value to reduce overlap of the labels between figures.
suptitle : str, optional
Title to use at the top of the figure. If called with label='auto',
this parameter will override the automatically determined title.
suptitle_fontsize : int, optional
Font size to use for super title at top of figure.
colorbar : {'multi', None, 'single'}
Controls the type of colorbars that are plotted.
If None, no colorbar is plotted.
If 'multi' (default), individual colorbars are plotted for each
(non-RGB) image
If 'single', all (non-RGB) images are plotted on the same scale,
and one colorbar is shown for all
centre_colormap : {"auto", True, False}
If True, the centre of the color scheme is set to zero. This is
particularly useful when using diverging color schemes. If "auto"
(default), diverging color schemes are automatically centred.
scalebar : {None, 'all', list of ints}, optional
If None (or False), no scalebars will be added to the images.
If 'all', scalebars will be added to all images.
If list of ints, scalebars will be added to each image specified.
scalebar_color : str, optional
A valid MPL color string; will be used as the scalebar color.
axes_decor : {'all', 'ticks', 'off', None}, optional
Controls how the axes are displayed on each image; default is 'all'.
If 'all', both ticks and axis labels will be shown.
If 'ticks', no axis labels will be shown, but ticks/labels will.
If 'off', all decorations and frame will be disabled.
If None, no axis decorations will be shown, but ticks/frame will.
padding : {None, dict}, optional
This parameter controls the spacing between images.
If None, default options will be used.
Otherwise, supply a dictionary with the spacing options as
keywords and desired values as values.
Values should be supplied as used in
:py:func:`matplotlib.pyplot.subplots_adjust`,
and can be 'left', 'bottom', 'right', 'top', 'wspace' (width) and
'hspace' (height).
tight_layout : bool, optional
If true, hyperspy will attempt to improve image placement in
figure using matplotlib's tight_layout.
If false, repositioning images inside the figure will be left as
an exercise for the user.
aspect : {str, numeric}, optional
If 'auto', aspect ratio is auto determined, subject to min_asp.
If 'square', image will be forced onto square display.
If 'equal', aspect ratio of 1 will be enforced.
If float (or int/long), given value will be used.
min_asp : float, optional
Minimum aspect ratio to be used when plotting images.
namefrac_thresh : float, optional
Threshold to use for auto-labeling. This parameter controls how
much of the titles must be the same for the auto-shortening of
labels to activate. Can vary from 0 to 1. Smaller values
encourage shortening of titles by auto-labeling, while larger
values will require more overlap in titles before activing the
auto-label code.
fig : mpl figure, optional
If set, the images will be plotted to an existing MPL figure
vmin, vmax: scalar, str, None
If str, formatted as 'xth', use this value to calculate the percentage
of pixels that are left out of the lower and upper bounds.
For example, for a vmin of '1th', 1% of the lowest will be ignored to
estimate the minimum value. Similarly, for a vmax value of '1th', 1%
of the highest value will be ignored in the estimation of the maximum
value. It must be in the range [0, 100]
See :py:func:`numpy.percentile` for more explanation.
If None, use the percentiles value set in the preferences.
If float of integer, keep this value as bounds.
**kwargs, optional
Additional keyword arguments passed to matplotlib.imshow()
Returns
-------
axes_list : list
A list of subplot axes that hold the images.
See Also
--------
plot_spectra : Plotting of multiple spectra
plot_signals : Plotting of multiple signals
plot_histograms : Compare signal histograms
Notes
-----
`interpolation` is a useful parameter to provide as a keyword
argument to control how the space between pixels is interpolated. A
value of ``'nearest'`` will cause no interpolation between pixels.
`tight_layout` is known to be quite brittle, so an option is provided
to disable it. Turn this option off if output is not as expected,
or try adjusting `label`, `labelwrap`, or `per_row`.
"""
def __check_single_colorbar(cbar):
if cbar == 'single':
raise ValueError('Cannot use a single colorbar with multiple '
'colormaps. Please check for compatible '
'arguments.')
from hyperspy.drawing.widgets import ScaleBar
from hyperspy.misc import rgb_tools
from hyperspy.signal import BaseSignal
# Check that we have a hyperspy signal
im = [images] if not isinstance(images, (list, tuple)) else images
for image in im:
if not isinstance(image, BaseSignal):
raise ValueError("`images` must be a list of image signals or a "
"multi-dimensional signal."
" " + repr(type(images)) + " was given.")
# For list of EDS maps, transpose the BaseSignal
if isinstance(images, (list, tuple)):
images = [_transpose_if_required(image, 2) for image in images]
# If input is >= 1D signal (e.g. for multi-dimensional plotting),
# copy it and put it in a list so labeling works out as (x,y) when plotting
if isinstance(images,
BaseSignal) and images.axes_manager.navigation_dimension > 0:
images = [images._deepcopy_with_new_data(images.data)]
n = 0
for i, sig in enumerate(images):
if sig.axes_manager.signal_dimension != 2:
raise ValueError("This method only plots signals that are images. "
"The signal dimension must be equal to 2. "
"The signal at position " + repr(i) +
" was " + repr(sig) + ".")
# increment n by the navigation size, or by 1 if the navigation size is
# <= 0
n += (sig.axes_manager.navigation_size
if sig.axes_manager.navigation_size > 0
else 1)
# If no cmap given, get default colormap from pyplot:
if cmap is None:
cmap = [preferences.Plot.cmap_signal]
elif cmap == 'mpl_colors':
for n_color, c in enumerate(mpl.rcParams['axes.prop_cycle']):
make_cmap(colors=['#000000', c['color']],
name='mpl{}'.format(n_color))
cmap = ['mpl{}'.format(i) for i in
range(len(mpl.rcParams['axes.prop_cycle']))]
__check_single_colorbar(colorbar)
# cmap is list, tuple, or something else iterable (but not string):
elif hasattr(cmap, '__iter__') and not isinstance(cmap, str):
try:
cmap = [c.name for c in cmap] # convert colormap to string
except AttributeError:
cmap = [c for c in cmap] # c should be string if not colormap
__check_single_colorbar(colorbar)
elif isinstance(cmap, mpl.colors.Colormap):
cmap = [cmap.name] # convert single colormap to list with string
elif isinstance(cmap, str):
cmap = [cmap] # cmap is single string, so make it a list
else:
# Didn't understand cmap input, so raise error
raise ValueError('The provided cmap value was not understood. Please '
'check input values.')
# If any of the cmaps given are diverging, and auto-centering, set the
# appropriate flag:
if centre_colormap == "auto":
centre_colormaps = []
for c in cmap:
if c in MPL_DIVERGING_COLORMAPS:
centre_colormaps.append(True)
else:
centre_colormaps.append(False)
# if it was True, just convert to list
elif centre_colormap:
centre_colormaps = [True]
# likewise for false
elif not centre_colormap:
centre_colormaps = [False]
# finally, convert lists to cycle generators for adaptive length:
centre_colormaps = itertools.cycle(centre_colormaps)
cmap = itertools.cycle(cmap)
# Sort out the labeling:
div_num = 0
all_match = False
shared_titles = False
user_labels = False
if label is None:
pass
elif label == 'auto':
# Use some heuristics to try to get base string of similar titles
label_list = [x.metadata.General.title for x in images]
# Find the shortest common string between the image titles
# and pull that out as the base title for the sequence of images
# array in which to store arrays
res = np.zeros((len(label_list), len(label_list[0]) + 1))
res[:, 0] = 1
# j iterates the strings
for j in range(len(label_list)):
# i iterates length of substring test
for i in range(1, len(label_list[0]) + 1):
# stores whether or not characters in title match
res[j, i] = label_list[0][:i] in label_list[j]
# sum up the results (1 is True, 0 is False) and create
# a substring based on the minimum value (this will be
# the "smallest common string" between all the titles
if res.all():
basename = label_list[0]
div_num = len(label_list[0])
all_match = True
else:
div_num = int(min(np.sum(res, 1)))
basename = label_list[0][:div_num - 1]
all_match = False
# trim off any '(' or ' ' characters at end of basename
if div_num > 1:
while True:
if basename[len(basename) - 1] == '(':
basename = basename[:-1]
elif basename[len(basename) - 1] == ' ':
basename = basename[:-1]
else:
break
# namefrac is ratio of length of basename to the image name
# if it is high (e.g. over 0.5), we can assume that all images
# share the same base
if len(label_list[0]) > 0:
namefrac = float(len(basename)) / len(label_list[0])
else:
# If label_list[0] is empty, it means there was probably no
# title set originally, so nothing to share
namefrac = 0
if namefrac > namefrac_thresh:
# there was a significant overlap of label beginnings
shared_titles = True
# only use new suptitle if one isn't specified already
if suptitle is None:
suptitle = basename
else:
# there was not much overlap, so default back to 'titles' mode
shared_titles = False
label = 'titles'
div_num = 0
elif label == 'titles':
# Set label_list to each image's pre-defined title
label_list = [x.metadata.General.title for x in images]
elif isinstance(label, str):
# Set label_list to an indexed list, based off of label
label_list = [label + " " + repr(num) for num in range(n)]
elif isinstance(label, list) and all(
isinstance(x, str) for x in label):
label_list = label
user_labels = True
# If list of labels is longer than the number of images, just use the
# first n elements
if len(label_list) > n:
del label_list[n:]
if len(label_list) < n:
label_list *= (n // len(label_list)) + 1
del label_list[n:]
else:
raise ValueError("Did not understand input of labels.")
# Determine appropriate number of images per row
rows = int(np.ceil(n / float(per_row)))
if n < per_row:
per_row = n
# Set overall figure size and define figure (if not pre-existing)
if fig is None:
k = max(plt.rcParams['figure.figsize']) / max(per_row, rows)
f = plt.figure(figsize=(tuple(k * i for i in (per_row, rows))))
else:
f = fig
# Initialize list to hold subplot axes
axes_list = []
# Initialize list of rgb tags
isrgb = [False] * len(images)
# Check to see if there are any rgb images in list
# and tag them using the isrgb list
for i, img in enumerate(images):
if rgb_tools.is_rgbx(img.data):
isrgb[i] = True
# Determine how many non-rgb images there are
non_rgb = list(itertools.compress(images, [not j for j in isrgb]))
if len(non_rgb) == 0 and colorbar is not None:
colorbar = None
warnings.warn("Sorry, colorbar is not implemented for RGB images.")
# Check if we need to add a scalebar for some of the images
if isinstance(scalebar, list) and all(isinstance(x, int)
for x in scalebar):
scalelist = True
else:
scalelist = False
def check_list_length(arg, arg_name):
if isinstance(arg, (list, tuple)):
if len(arg) != n:
_logger.warning(f'The provided {arg_name} values are ignored '
'because the length of the list does not '
'match the number of images')
arg = [None] * n
return arg
# Find global min and max values of all the non-rgb images for use with
# 'single' scalebar, otherwise define this value later.
if colorbar == 'single':
# check that vmin and vmax are not list
if any([isinstance(v, (tuple, list)) for v in [vmin, vmax]]):
_logger.warning('The provided vmin or vmax value are ignored '
'because it needs to be a scalar or a str '
'to be compatible with a single colorbar. '
'The default values are used instead.')
vmin, vmax = None, None
vmin_max = np.array(
[contrast_stretching(i.data, vmin, vmax) for i in non_rgb])
_vmin, _vmax = vmin_max[:, 0].min(), vmin_max[:, 1].max()
if next(centre_colormaps):
_vmin, _vmax = centre_colormap_values(_vmin, _vmax)
else:
vmin = check_list_length(vmin, "vmin")
vmax = check_list_length(vmax, "vmax")
idx = 0
ax_im_list = [0] * len(isrgb)
# Replot: create a list to store references to the images
replot_ims = []
# Loop through each image, adding subplot for each one
for i, ims in enumerate(images):
# Get handles for the signal axes and axes_manager
axes_manager = ims.axes_manager
if axes_manager.navigation_dimension > 0:
ims = ims._deepcopy_with_new_data(ims.data)
for j, im in enumerate(ims):
ax = f.add_subplot(rows, per_row, idx + 1)
axes_list.append(ax)
data = im.data
centre = next(centre_colormaps) # get next value for centreing
# Enable RGB plotting
if rgb_tools.is_rgbx(data):
data = rgb_tools.rgbx2regular_array(data, plot_friendly=True)
_vmin, _vmax = None, None
elif colorbar != 'single':
_vmin = vmin[idx] if isinstance(vmin, (tuple, list)) else vmin
_vmax = vmax[idx] if isinstance(vmax, (tuple, list)) else vmax
_vmin, _vmax = contrast_stretching(data, _vmin, _vmax)
if centre:
_vmin, _vmax = centre_colormap_values(_vmin, _vmax)
# Remove NaNs (if requested)
if no_nans:
data = np.nan_to_num(data)
# Get handles for the signal axes and axes_manager
axes_manager = im.axes_manager
axes = axes_manager.signal_axes
# Set dimensions of images
xaxis = axes[0]
yaxis = axes[1]
extent = (
xaxis.low_value,
xaxis.high_value,
yaxis.high_value,
yaxis.low_value,
)
if not isinstance(aspect, (int, float)) and aspect not in [
'auto', 'square', 'equal']:
_logger.warning("Did not understand aspect ratio input. "
"Using 'auto' as default.")
aspect = 'auto'
if aspect == 'auto':
if float(yaxis.size) / xaxis.size < min_asp:
factor = min_asp * float(xaxis.size) / yaxis.size
elif float(yaxis.size) / xaxis.size > min_asp ** -1:
factor = min_asp ** -1 * float(xaxis.size) / yaxis.size
else:
factor = 1
asp = np.abs(factor * float(xaxis.scale) / yaxis.scale)
elif aspect == 'square':
asp = abs(extent[1] - extent[0]) / abs(extent[3] - extent[2])
elif aspect == 'equal':
asp = 1
elif isinstance(aspect, (int, float)):
asp = aspect
if 'interpolation' not in kwargs.keys():
kwargs['interpolation'] = 'nearest'
# Plot image data, using _vmin and _vmax to set bounds,
# or allowing them to be set automatically if using individual
# colorbars
kwargs.update({'cmap':next(cmap), 'extent':extent, 'aspect':asp})
axes_im = ax.imshow(data, vmin=_vmin, vmax=_vmax, **kwargs)
ax_im_list[i] = axes_im
# If an axis trait is undefined, shut off :
if (xaxis.units == t.Undefined or yaxis.units == t.Undefined or
xaxis.name == t.Undefined or yaxis.name == t.Undefined):
if axes_decor == 'all':
_logger.warning(
'Axes labels were requested, but one '
'or both of the '
'axes units and/or name are undefined. '
'Axes decorations have been set to '
'\'ticks\' instead.')
axes_decor = 'ticks'
# If all traits are defined, set labels as appropriate:
else:
ax.set_xlabel(axes[0].name + " axis (" + axes[0].units + ")")
ax.set_ylabel(axes[1].name + " axis (" + axes[1].units + ")")
if label:
if all_match:
title = ''
elif shared_titles:
title = label_list[i][div_num - 1:]
else:
if len(ims) == n:
# This is true if we are plotting just 1
# multi-dimensional Signal2D
title = label_list[idx]
elif user_labels:
title = label_list[idx]
else:
title = label_list[i]
if ims.axes_manager.navigation_size > 1 and not user_labels:
title += " %s" % str(ims.axes_manager.indices)
ax.set_title(textwrap.fill(title, labelwrap))
# Set axes decorations based on user input
set_axes_decor(ax, axes_decor)
# If using independent colorbars, add them
if colorbar == 'multi' and not isrgb[i]:
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(axes_im, cax=cax)
# Add scalebars as necessary
if (scalelist and idx in scalebar) or scalebar == 'all':
ax.scalebar = ScaleBar(
ax=ax,
units=axes[0].units,
color=scalebar_color,
)
# Replot: store references to the images
replot_ims.append(im)
idx += 1
# If using a single colorbar, add it, and do tight_layout, ensuring that
# a colorbar is only added based off of non-rgb Images:
if colorbar == 'single':
foundim = None
for i in range(len(isrgb)):
if (not isrgb[i]) and foundim is None:
foundim = i
if foundim is not None:
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.9, 0.1, 0.03, 0.8])
f.colorbar(ax_im_list[foundim], cax=cbar_ax)
if tight_layout:
# tight_layout, leaving room for the colorbar
plt.tight_layout(rect=[0, 0, 0.9, 1])
elif tight_layout:
plt.tight_layout()
elif tight_layout:
plt.tight_layout()
# Set top bounds for shared titles and add suptitle
if suptitle:
f.subplots_adjust(top=0.85)
f.suptitle(suptitle, fontsize=suptitle_fontsize)
# If we want to plot scalebars, loop through the list of axes and add them
if scalebar is None or scalebar is False:
# Do nothing if no scalebars are called for
pass
elif scalebar == 'all':
# scalebars were taken care of in the plotting loop
pass
elif scalelist:
# scalebars were taken care of in the plotting loop
pass
else:
raise ValueError("Did not understand scalebar input. Must be None, "
"'all', or list of ints.")
# Adjust subplot spacing according to user's specification
if padding is not None:
plt.subplots_adjust(**padding)
# Replot: connect function
def on_dblclick(event):
# On the event of a double click, replot the selected subplot
if not event.inaxes:
return
if not event.dblclick:
return
subplots = [axi for axi in f.axes if isinstance(axi, mpl.axes.Subplot)]
inx = list(subplots).index(event.inaxes)
im = replot_ims[inx]
# Use some of the info in the subplot
cm = subplots[inx].images[0].get_cmap()
clim = subplots[inx].images[0].get_clim()
sbar = False
if (scalelist and inx in scalebar) or scalebar == 'all':
sbar = True
im.plot(colorbar=bool(colorbar),
vmin=clim[0],
vmax=clim[1],
no_nans=no_nans,
aspect=asp,
scalebar=sbar,
scalebar_color=scalebar_color,
cmap=cm)
f.canvas.mpl_connect('button_press_event', on_dblclick)
return axes_list
def set_axes_decor(ax, axes_decor):
if axes_decor == 'off':
ax.axis('off')
elif axes_decor == 'ticks':
ax.set_xlabel('')
ax.set_ylabel('')
elif axes_decor == 'all':
pass
elif axes_decor is None:
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_xticklabels([])
ax.set_yticklabels([])
def make_cmap(colors, name='my_colormap', position=None,
bit=False, register=True):
"""
Create a matplotlib colormap with customized colors, optionally registering
it with matplotlib for simplified use.
Adapted from Chris Slocum's code at:
https://github.com/CSlocumWX/custom_colormap/blob/master/custom_colormaps.py
and used under the terms of that code's BSD-3 license
Parameters
----------
colors : iterable
list of either tuples containing rgb values, or html strings
Colors should be arranged so that the first color is the lowest
value for the colorbar and the last is the highest.
name : str
name of colormap to use when registering with matplotlib
position : {None, iterable}, optional
list containing the values (from [0,1]) that dictate the position
of each color within the colormap. If None (default), the colors
will be equally-spaced within the colorbar.
bit : bool, optional
True if RGB colors are given in 8-bit [0 to 255] or False if given
in arithmetic basis [0 to 1] (default).
register : bool, optional
Wwitch to control whether or not to register the custom colormap
with matplotlib in order to enable use by just the name string.
"""
bit_rgb = np.linspace(0, 1, 256)
if position is None:
position = np.linspace(0, 1, len(colors))
else:
if len(position) != len(colors):
raise ValueError("Position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
raise ValueError("Position must start with 0 and end with 1")
cdict = {'red': [], 'green': [], 'blue': []}
for pos, color in zip(position, colors):
if isinstance(color, str):
color = mpl.colors.to_rgb(color)
elif bit:
color = (bit_rgb[color[0]],
bit_rgb[color[1]],
bit_rgb[color[2]])
cdict['red'].append((pos, color[0], color[0]))
cdict['green'].append((pos, color[1], color[1]))
cdict['blue'].append((pos, color[2], color[2]))
cmap = mpl.colors.LinearSegmentedColormap(name, cdict, 256)
if register:
mpl.cm.register_cmap(name, cmap)
return cmap
def plot_spectra(
spectra,
style='overlap',
color=None,
line_style=None,
padding=1.,
legend=None,
legend_picking=True,
legend_loc='upper right',
fig=None,
ax=None,
auto_update=None,
**kwargs):
"""Plot several spectra in the same figure.
Parameters
----------
spectra : list of Signal1D or BaseSignal
Ordered spectra list of signal to plot. If `style` is "cascade" or
"mosaic", the spectra can have different size and axes. For `BaseSignal`
with navigation dimensions 1 and signal dimension 0, the signal will be
tranposed to form a `Signal1D`.
style : {'overlap', 'cascade', 'mosaic', 'heatmap'}
The style of the plot.
color : {None, matplotlib color, list of colors}, optional
Sets the color of the lines of the plots (no action on 'heatmap').
For a list, if its length is less than the number of spectra to plot,
the colors will be cycled. If `None`, use default matplotlib color
cycle.
line_style: {None, matplotlib line style, list of line_styles}, optional
Sets the line style of the plots (no action on 'heatmap').
The main line style are '-','--','steps','-.',':'.
For a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':').
padding : float, optional, default 1.0
Option for "cascade". 1 guarantees that there is no overlapping.
However, in many cases, a value between 0 and 1 can produce a tighter
plot without overlapping. Negative values have the same effect but
reverse the order of the spectra without reversing the order of the
colors.
legend: {None, list of str, 'auto'}, optional
If list of string, legend for "cascade" or title for "mosaic" is
displayed. If 'auto', the title of each spectra (metadata.General.title)
is used.
legend_picking: bool, optional
If True (default), a spectrum can be toggled on and off by clicking on
the legended line.
legend_loc : {str, int}, optional
This parameter controls where the legend is placed on the figure;
see the pyplot.legend docstring for valid values
fig : {None, matplotlib figure}, optional
If None, a default figure will be created. Specifying fig will
not work for the 'heatmap' style.
ax : {none, matplotlib ax (subplot)}, optional
If None, a default ax will be created. Will not work for 'mosaic'
or 'heatmap' style.
auto_update : bool or None
If True, the plot will update when the data are changed. Only supported
with style='overlap' and a list of signal with navigation dimension 0.
If None (default), update the plot only for style='overlap'.
**kwargs, optional
Keywords arguments passed to :py:func:`matplotlib.pyplot.figure` or
:py:func:`matplotlib.pyplot.subplots` if style='mosaic'.
Has no effect on 'heatmap' style.
Example
-------
>>> s = hs.load("some_spectra")
>>> hs.plot.plot_spectra(s, style='cascade', color='red', padding=0.5)
To save the plot as a png-file
>>> hs.plot.plot_spectra(s).figure.savefig("test.png")
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
import hyperspy.signal
def _reverse_legend(ax_, legend_loc_):
"""
Reverse the ordering of a matplotlib legend (to be more consistent
with the default ordering of plots in the 'cascade' and 'overlap'
styles.
Parameters
----------
ax_: matplotlib axes
legend_loc_: {str, int}
This parameter controls where the legend is placed on the
figure; see the pyplot.legend docstring for valid values.
"""
l = ax_.get_legend()
labels = [lb.get_text() for lb in list(l.get_texts())]
handles = l.legendHandles
ax_.legend(handles[::-1], labels[::-1], loc=legend_loc_)
# Before v1.3 default would read the value from prefereces.
if style == "default":
style = "overlap"
if color is not None:
if isinstance(color, str):
color = itertools.cycle([color])
elif hasattr(color, "__iter__"):
color = itertools.cycle(color)
else:
raise ValueError("Color must be None, a valid matplotlib color "
"string, or a list of valid matplotlib colors.")
else:
color = itertools.cycle(
plt.rcParams['axes.prop_cycle'].by_key()["color"])
if line_style is not None:
if isinstance(line_style, str):
line_style = itertools.cycle([line_style])
elif hasattr(line_style, "__iter__"):
line_style = itertools.cycle(line_style)
else:
raise ValueError("line_style must be None, a valid matplotlib "
"line_style string or a list of valid matplotlib "
"line_style.")
else:
line_style = ['-'] * len(spectra)
if legend is not None:
if isinstance(legend, str):
if legend == 'auto':
legend = [spec.metadata.General.title for spec in spectra]
else:
raise ValueError("legend must be None, 'auto' or a list of "
"strings.")
if style == 'overlap':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_overlap_plot(spectra,
ax,
color=color,
line_style=line_style,)
if legend is not None:
ax.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
if legend_picking is True:
animate_legend(fig=fig, ax=ax)
elif style == 'cascade':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_cascade_subplot(spectra,
ax,
color=color,
line_style=line_style,
padding=padding)
if legend is not None:
plt.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
elif style == 'mosaic':
default_fsize = plt.rcParams["figure.figsize"]
figsize = (default_fsize[0], default_fsize[1] * len(spectra))
fig, subplots = plt.subplots(
len(spectra), 1, figsize=figsize, **kwargs)
if legend is None:
legend = [legend] * len(spectra)
for spectrum, ax, color, line_style, legend in zip(
spectra, subplots, color, line_style, legend):
spectrum = _transpose_if_required(spectrum, 1)
_plot_spectrum(spectrum, ax, color=color, line_style=line_style)
ax.set_ylabel('Intensity')
if legend is not None:
ax.set_title(legend)
if not isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
if isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
fig.tight_layout()
elif style == 'heatmap':
if not isinstance(spectra, hyperspy.signal.BaseSignal):
import hyperspy.utils
spectra = [_transpose_if_required(spectrum, 1) for spectrum in
spectra]
spectra = hyperspy.utils.stack(spectra)
with spectra.unfolded():
ax = _make_heatmap_subplot(spectra)
ax.set_ylabel('Spectra')
ax = ax if style != "mosaic" else subplots
def update_line(spectrum, line):
x_axis = spectrum.axes_manager[-1].axis
line.set_data(x_axis, spectrum.data)
fig = line.get_figure()
ax = fig.get_axes()[0]
# `relim` needs to be called before `autoscale_view`
ax.relim()
ax.autoscale_view()
fig.canvas.draw()
if auto_update is None and style == 'overlap':
auto_update = True
if auto_update:
if style != 'overlap':
raise ValueError("auto_update=True is only supported with "
"style='overlap'.")
for spectrum, line in zip(spectra, ax.get_lines()):
f = partial(update_line, spectrum, line)
spectrum.events.data_changed.connect(f, [])
# disconnect event when closing figure
disconnect = partial(spectrum.events.data_changed.disconnect, f)
on_figure_window_close(fig, disconnect)
return ax
def animate_legend(fig=None, ax=None):
"""Animate the legend of a figure.
A spectrum can be toggled on and off by clicking on the line in the legend.
Parameters
----------
fig: {None, matplotlib.figure}, optional
If None pick the current figure using "plt.gcf".
ax: {None, matplotlib.axes}, optional
If None pick the current axes using "plt.gca".
Note
----
Code inspired from legend_picking.py in the matplotlib gallery.
"""
if fig is None:
fig = plt.gcf()
if ax is None:
ax = plt.gca()
lines = ax.lines[::-1]
lined = dict()
leg = ax.get_legend()
for legline, origline in zip(leg.get_lines(), lines):
legline.set_pickradius(5) # 5 pts tolerance
legline.set_picker(True)
lined[legline] = origline
def onpick(event):
# on the pick event, find the orig line corresponding to the
# legend proxy line, and toggle the visibility
legline = event.artist
if legline.axes == ax:
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines
# have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('pick_event', onpick)
def plot_histograms(signal_list,
bins='fd',
range_bins=None,
color=None,
line_style=None,
legend='auto',
fig=None,
**kwargs):
"""Plot the histogram of every signal in the list in one figure.
This function creates a histogram for each signal and plots the list with
the `utils.plot.plot_spectra` function.
Parameters
----------
signal_list : iterable
Ordered list of spectra to plot. If `style` is "cascade" or "mosaic",
the spectra can have different size and axes.
bins : {int, list, str}, optional
If bins is a string, then it must be one of:
'knuth' : use Knuth's rule to determine bins,
'scott' : use Scott's rule to determine bins,
'fd' : use the Freedman-diaconis rule to determine bins,
'blocks' : use bayesian blocks for dynamic bin widths.
range_bins : {None, tuple}, optional
The minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max()).
color : {None, valid matplotlib color, list of colors}, optional
Sets the color of the lines of the plots. For a list, if its length is
less than the number of spectra to plot, the colors will be cycled.
If `None`, use default matplotlib color cycle.
line_style: {None, valid matplotlib line style, list of line styles},
optional
The main line styles are '-','--','steps','-.',':'.
For a list, if its length is less than the number of
spectra to plot, line_style will be cycled.
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
legend: {None, list of str, 'auto'}, optional
Display a legend. If 'auto', the title of each spectra
(metadata.General.title) is used.
legend_picking: bool, optional
If true, a spectrum can be toggled on and off by clicking on
the line in the legend.
fig : {None, matplotlib figure}, optional
If None, a default figure will be created.
**kwargs
other keyword arguments (weight and density) are described in
:py:func:`numpy.histogram`.
Example
-------
Histograms of two random chi-square distributions.
>>> img = hs.signals.Signal2D(np.random.chisquare(1,[10,10,100]))
>>> img2 = hs.signals.Signal2D(np.random.chisquare(2,[10,10,100]))
>>> hs.plot.plot_histograms([img,img2],legend=['hist1','hist2'])
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
hists = []
for obj in signal_list:
hists.append(obj.get_histogram(bins=bins,
range_bins=range_bins, **kwargs))
if line_style is None:
line_style = 'steps'
return plot_spectra(hists, style='overlap', color=color,
line_style=line_style, legend=legend, fig=fig)
def picker_kwargs(value, kwargs={}):
# picker is deprecated in favor of pickradius
if LooseVersion(mpl.__version__) >= LooseVersion("3.3.0"):
kwargs.update({'pickradius': value, 'picker':True})
else:
kwargs['picker'] = value
return kwargs
|
"""
Search for PEAR/PECL packages to include in the blueprint.
"""
import logging
import re
import subprocess
from blueprint import util
def php(b, r):
logging.info('searching for PEAR/PECL packages')
# Precompile a pattern for parsing the output of `{pear,pecl} list`.
pattern = re.compile(r'^([0-9a-zA-Z_]+)\s+([0-9][0-9a-zA-Z\.-]*)\s')
# PEAR packages are managed by `php-pear` (obviously). PECL packages
# are managed by `php5-dev` because they require development headers
# (less obvious but still makes sense).
if util.lsb_release_codename() is None:
pecl_manager = 'php-devel'
else:
pecl_manager = 'php5-dev'
for manager, progname in (('php-pear', 'pear'),
(pecl_manager, 'pecl')):
try:
p = subprocess.Popen([progname, 'list'],
close_fds=True, stdout=subprocess.PIPE)
except OSError:
continue
for line in p.stdout:
match = pattern.match(line)
if match is None:
continue
package, version = match.group(1), match.group(2)
if not r.ignore_package(manager, package):
b.add_package(manager, package, version)
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
VectorSplit.py
---------------------
Date : September 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'September 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputDirectory
from processing.tools import dataobjects, vector
from processing.tools.system import mkdir
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class VectorSplit(GeoAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
OUTPUT = 'OUTPUT'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'split_layer.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Split vector layer')
self.group, self.i18n_group = self.trAlgorithm('Vector general tools')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer')))
self.addParameter(ParameterTableField(self.FIELD,
self.tr('Unique ID field'), self.INPUT))
self.addOutput(OutputDirectory(self.OUTPUT, self.tr('Output directory')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
fieldName = self.getParameterValue(self.FIELD)
directory = self.getOutputValue(self.OUTPUT)
mkdir(directory)
fieldIndex = layer.fields().lookupField(fieldName)
uniqueValues = vector.uniqueValues(layer, fieldIndex)
baseName = os.path.join(directory, '{0}_{1}'.format(layer.name(), fieldName))
fields = layer.fields()
crs = layer.crs()
geomType = layer.wkbType()
total = 100.0 / len(uniqueValues)
for current, i in enumerate(uniqueValues):
fName = u'{0}_{1}.shp'.format(baseName, str(i).strip())
writer = vector.VectorWriter(fName, None, fields, geomType, crs)
for f in vector.features(layer):
if f[fieldName] == i:
writer.addFeature(f)
del writer
progress.setPercentage(int(current * total))
|
# Copyright 2018 SUSE Linux GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.api import keystone
class ApplicationCredentialsPanel(horizon.Panel):
name = _("Application Credentials")
slug = 'application_credentials'
policy_rules = (('identity', 'identity:list_application_credentials'),)
@staticmethod
def can_register():
return keystone.VERSIONS.active >= 3
def can_access(self, context):
request = context['request']
keystone_version = keystone.get_identity_api_version(request)
return keystone_version >= (3, 10)
|
import requests
import datetime
import dateutil
import dateutil.parser
import logging
from plugins.packagetracker.provider import Package
__author__ = "reeen"
# Bring API https://developer.bring.com/api/tracking/
class BringPackage(Package):
API_URL = "https://tracking.bring.com/api/v2/tracking.json"
@classmethod
def get_type(cls):
return "Bring"
@staticmethod
def strip_tags(text):
clean = re.compile("<.*?>")
return re.sub(clean, "", text)
@staticmethod
def create_event(event):
e = BringPackage.Event()
e.datetime = dateutil.parser.isoparse(event["dateIso"])
e.description = f"{event['city']}: {strip_tags(event['description'])}"
return e
@classmethod
def is_package(cls, package_id):
data = cls._get_data(package_id)
if not "error" in data["consignmentSet"][0]:
return True
return False
@classmethod
def _get_url(cls, package_id):
return BringPackage.API_URL + "?q=" + package_id
@classmethod
def _get_data(cls, package_id):
try:
return requests.get(
BringPackage._get_url(package_id),
# More headers are listed as required, but that is only for the registered API end-point
headers={
"X-Bring-Client-URL": "https://github.com/Tigge/platinumshrimp",
},
).json()
except ValueError as e:
logging.exception("Exception while getting package")
return {}
def __init__(self, package_id):
super().__init__(package_id)
self.last_updated = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
def update(self):
data = self._get_data(self.id)
# Note: will only look at first consignment and package in data
try:
self.consignor = data["consignmentSet"][0]["senderName"]
self.consignee = data["consignmentSet"][0]["recipientAddress"]["postalCode"]
self.consignee += (
" " + data["consignmentSet"][0]["recipientAddress"]["city"]
)
last_updated = self.last_updated
for bring_event in data["consignmentSet"][0]["packageSet"][0]["eventSet"]:
event = self.create_event(bring_event)
if event.datetime > last_updated:
last_updated = event.datetime
if event.datetime > self.last_updated:
self.on_event(event)
self.last_updated = last_updated
except Exception as e:
logging.exception("Exception while updating package")
logging.debug("Data: %r", data)
|
#!/usr/bin/env python
### THIS MAY OCCUPY ~10-50GB OF /tmp SPACE PER JOB
import glob,os
import sys,getopt
import gzip
import numpy as np
from collections import defaultdict
from fastq_reader import Fastq_Reader
def max_log_lik_ratio(s,bkg,h1_prob=0.8,thresh1=3.84,thresh2=np.inf):
LLR = [(None,None)]
read_match_sum = s[-1]
del s[-1]
v1 = read_match_sum*h1_prob*(1-h1_prob)
m1 = read_match_sum*h1_prob
for k,sect_sum in s.items():
if sect_sum > read_match_sum*bkg[k]:
v2 = read_match_sum*bkg[k]*(1-bkg[k])
m2 = read_match_sum*bkg[k]
llr = np.log(v2**.5/v1**.5) + .5*((sect_sum-m2)**2/v2 - (sect_sum-m1)**2/v1)
LLR.append((llr,k))
LLR.sort(reverse=True)
K = []
if LLR[0][0] > thresh1:
K.append(LLR[0][1])
for llr,k in LLR[1:]:
if llr > thresh2:
K.append(k)
else:
break
return K
help_message = 'usage example: python write_partition_parts.py -r 1 -i /project/home/hashed_reads/ -o /project/home/cluster_vectors/ -t /tmp/dir/'
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:],'hr:i:o:t:',["--filerank=","inputdir=","outputdir=","tmpdir="])
except:
print help_message
sys.exit(2)
for opt, arg in opts:
if opt in ('-h','--help'):
print help_message
sys.exit()
elif opt in ('-r',"--filerank"):
fr = int(arg)-1
elif opt in ('-i','--inputdir'):
inputdir = arg
if inputdir[-1] != '/':
inputdir += '/'
elif opt in ('-o','--outputdir'):
outputdir = arg
if outputdir[-1] != '/':
outputdir += '/'
elif opt in ('-t','--tmpdir'):
tmpdir = arg
if tmpdir[-1] != '/':
tmpdir += '/'
hashobject = Fastq_Reader(inputdir,outputdir)
cp = np.load(hashobject.output_path+'cluster_probs.npy')
cluster_probs = dict(enumerate(cp))
Hashq_Files = glob.glob(os.path.join(hashobject.input_path,'*.hashq.*'))
Hashq_Files = [fp for fp in Hashq_Files if '.tmp' not in fp]
Hashq_Files.sort()
infile = Hashq_Files[fr]
outpart = infile[-6:-3]
sample_id = infile[infile.rfind('/')+1:infile.index('.hashq')]
tmpdir += str(fr) + '/'
os.system('mkdir '+tmpdir)
G = [open('%s%s.%s.cols.%d' % (tmpdir,sample_id,outpart,i),'w') for i in range(0,2**hashobject.hash_size,2**hashobject.hash_size/50)]
f = gzip.open(infile)
r_id = 0
for a in hashobject.hash_read_generator(f):
for x in a[2]:
G[int(x*50/2**hashobject.hash_size)].write('%d\t%d\n' % (x,r_id))
r_id += 1
R = r_id
f.close()
for g in G:
g.close()
if R < 50:
print 'Fewer than 50 reads...doing nothing'
else:
ClusterFile = open(hashobject.output_path+'cluster_cols.npy')
ValueFile = open(hashobject.output_path+'cluster_vals.npy')
G = [open('%s%s.%s.ids.%d' % (tmpdir,sample_id,outpart,i),'w') for i in range(0,R,R/50)]
# If sharing ClusterFile among many jobs is not practical, we may aggregate jobs below by 1/50 ClusterFile fractions across samples (so each job reads 1 fraction)
for i in range(0,2**hashobject.hash_size,2**hashobject.hash_size/50):
os.system('sort -nk 1 %s%s.%s.cols.%d -o %s%s.%s.cols.%d' % (tmpdir,sample_id,outpart,i,tmpdir,sample_id,outpart,i))
f = open('%s%s.%s.cols.%d' % (tmpdir,sample_id,outpart,i))
ColId = np.fromfile(f,dtype=np.int64,sep='\t')
f.close()
os.system('rm %s%s.%s.cols.%d' % (tmpdir,sample_id,outpart,i))
C = np.fromfile(ClusterFile,dtype=np.int16,count=5*min(2**hashobject.hash_size/50,2**hashobject.hash_size-i))
V = np.fromfile(ValueFile,dtype=np.float32,count=min(2**hashobject.hash_size/50,2**hashobject.hash_size-i))
c0 = None
outlines = [[] for _ in G]
for j in range(0,len(ColId),2):
col,id = ColId[j:j+2]
if col != c0:
ci = col % (2**hashobject.hash_size/50)
c = C[ci*5:(ci+1)*5]
c = c[np.nonzero(c)[0]] - 1
c0 = col
if len(c) > 0:
v = V[ci]
newline = '%d\t%f' % (id,v)
for x in c:
newline += '\t%d' % (x)
outlines[id*50/R].append(newline+'\n')
for g,l in zip(G,outlines):
g.writelines(l)
del C
del V
ClusterFile.close()
ValueFile.close()
for g in G:
g.close()
for i in range(0,R,R/50):
os.system('sort -nk 1 %s%s.%s.ids.%d -o %s%s.%s.ids.%d' % (tmpdir,sample_id,outpart,i,tmpdir,sample_id,outpart,i))
f = gzip.open(infile)
r_id = 0
G = iter(open('%s%s.%s.ids.%d' % (tmpdir,sample_id,outpart,i)) for i in range(0,R,R/50))
g = G.next()
id_vals = np.fromstring(g.readline(),sep='\t')
EOF = False
CF = {}
reads_written = 0
unique_reads_written = 0
for a in hashobject.hash_read_generator(f):
while id_vals[0] < r_id:
id_vals = np.fromstring(g.readline(),sep='\t')
if id_vals[0] == -1:
try:
g = G.next()
id_vals = np.fromstring(g.readline(),sep='\t')
except:
EOF = True
if EOF:
break
D = defaultdict(float)
while id_vals[0] == r_id:
D[-1] += id_vals[1]
for clust in id_vals[2:]:
D[clust] += id_vals[1]
try:
id_vals = np.fromstring(g.readline(),sep='\t')
except:
break
#best_clust = max_log_lik_ratio(D,cluster_probs)
#if best_clust != None:
best_clusts = max_log_lik_ratio(D,cluster_probs)
for best_clust in best_clusts:
if best_clust not in CF:
try:
CF[best_clust] = open('%s%d/%s.fastq.%s' % (hashobject.output_path,best_clust,sample_id,outpart),'a')
except:
os.system('mkdir %s%d/' % (hashobject.output_path,best_clust))
CF[best_clust] = open('%s%d/%s.fastq.%s' % (hashobject.output_path,best_clust,sample_id,outpart),'a')
CF[best_clust].write(a[0]+'\n')
reads_written += 1
if len(best_clusts) > 0:
unique_reads_written += 1
if len(CF) > 200:
for cfv in CF.values():
cfv.close()
CF = {}
r_id += 1
for f in CF.values():
f.close()
os.system('rm -rf '+tmpdir)
print 'total reads written:',reads_written
print 'unique reads written:',unique_reads_written
|
"""
Qualityscore class
"""
import data
import logging
from galaxy.datatypes.sniff import *
from galaxy import util
log = logging.getLogger(__name__)
class QualityScoreSOLiD ( data.Text ):
"""
until we know more about quality score formats
"""
file_ext = "qualsolid"
def set_peek( self, dataset, line_count=None ):
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek( dataset.file_name )
if line_count is None:
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.blurb = "%s lines, SOLiD Quality score file" % util.commaify( str( line_count ) )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except:
return "SOLiD Quality score file (%s)" % ( data.nice_size( dataset.get_size() ) )
def sniff( self, filename ):
"""
>>> fname = get_test_fname( 'sequence.fasta' )
>>> QualityScoreSOLiD().sniff( fname )
False
>>> fname = get_test_fname( 'sequence.qualsolid' )
>>> QualityScoreSOLiD().sniff( fname )
True
"""
try:
fh = open( filename )
readlen = None
goodblock = 0
while True:
line = fh.readline()
if not line:
if goodblock > 0:
return True
else:
break #EOF
line = line.strip()
if line and not line.startswith( '#' ): #first non-empty non-comment line
if line.startswith( '>' ):
line = fh.readline().strip()
if line == '' or line.startswith( '>' ):
break
try:
[ int( x ) for x in line.split() ]
if not(readlen):
readlen = len(line.split())
assert len(line.split()) == readlen #SOLiD reads should be of the same length
except:
break
goodblock += 1
if goodblock > 10:
return True
else:
break #we found a non-empty line, but it's not a header
except:
pass
return False
class QualityScore454 ( data.Text ):
"""
until we know more about quality score formats
"""
file_ext = "qual454"
def set_peek( self, dataset, line_count=None ):
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek( dataset.file_name )
if line_count is None:
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.blurb = "%s lines, 454 Quality score file" % util.commaify( str( line_count ) )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except:
return "454 Quality score file (%s)" % ( data.nice_size( dataset.get_size() ) )
def sniff( self, filename ):
"""
>>> fname = get_test_fname( 'sequence.fasta' )
>>> QualityScore454().sniff( fname )
False
>>> fname = get_test_fname( 'sequence.qual454' )
>>> QualityScore454().sniff( fname )
True
"""
try:
fh = open( filename )
while True:
line = fh.readline()
if not line:
break #EOF
line = line.strip()
if line and not line.startswith( '#' ): #first non-empty non-comment line
if line.startswith( '>' ):
line = fh.readline().strip()
if line == '' or line.startswith( '>' ):
break
try:
[ int( x ) for x in line.split() ]
except:
break
return True
else:
break #we found a non-empty line, but it's not a header
except:
pass
return False
class QualityScoreSolexa ( data.Text ):
"""
until we know more about quality score formats
"""
file_ext = "qualsolexa"
def set_peek( self, dataset, line_count=None ):
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek( dataset.file_name )
if line_count is None:
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.blurb = "%s lines, Solexa Quality score file" % util.commaify( str( line_count ) )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except:
return "Solexa Quality score file (%s)" % ( data.nice_size( dataset.get_size() ) )
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Prototype decorator for defining legacy-graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class VariableHolder(object):
"""Holds variables for a python function."""
def __init__(self, fn):
self._fn = fn
self._variables = []
def variable_creator_scope(self, next_creator, **kwargs):
"""Creates variables & adds them to collections to match legacy code."""
v = next_creator(**kwargs)
self._variables.append(v)
collections = kwargs.get("collections")
trainable = v.trainable
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
ops.add_to_collections(collections, v)
return v
def __call__(self, *args, **kwargs):
with variable_scope.variable_creator_scope(self.variable_creator_scope):
return self._fn(*args, **kwargs)
# TODO(allenl): make this trackable
class WrappedFunction(function.ConcreteFunction):
"""Wraps a tf V1 piece of code in a function."""
def __init__(self, fn_graph, variable_holder, attrs=None, signature=None):
super(WrappedFunction, self).__init__(
fn_graph, attrs=attrs, signature=signature)
self._variable_holder = variable_holder
if ops.executing_eagerly_outside_functions():
# TODO(allenl): Make this work in 1.x?
self._lift_unlifted_variables()
def _lift_unlifted_variables(self):
"""Finds resource variables and lifts them into the outer context.
When we import a GraphDef inside a wrap_function, no Python graph building
code runs. This means we get VarHandleOps which create variable resources,
but no corresponding Python objects. Leaving them like this works but gives
the user no way to interact with or modify the variables outside the graph.
This method searches for variables and lifts them out as regular variable
objects when possible, indicating to the FuncGraph that they are captures.
"""
with self.graph.as_default():
collection_variables = (
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
existing_captures = set(self.graph.internal_captures)
lifted_variables = {}
for old_variable in collection_variables:
if (old_variable._in_graph_mode # pylint: disable=protected-access
and isinstance(old_variable,
resource_variable_ops.ResourceVariable)):
if old_variable.handle in existing_captures:
continue
new_variable = def_function.UnliftedInitializerVariable(
array_ops.placeholder(
name="unused_{}_initializer".format(old_variable.op.name),
shape=old_variable.shape,
dtype=old_variable.dtype),
name=old_variable.op.name,
trainable=old_variable.trainable)
self.graph.captures[new_variable.handle] = old_variable.handle
existing_captures.add(old_variable.handle)
lifted_variables[old_variable] = new_variable
# pylint: disable=protected-access
self._variable_holder._variables.append(new_variable)
self.graph._weak_variables.append(weakref.ref(new_variable))
# pylint: enable=protected-access
# Update the graph's collections, partly for the user and partly so this
# function is idempotent when it runs again in prune() calls.
for collection_name in [ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES]:
mutable_collection = ops.get_collection_ref(collection_name)
for index, current in enumerate(mutable_collection):
mutable_collection[index] = lifted_variables.get(current, current)
def prune(self, feeds, fetches):
flat_feeds, flat_fetches = nest.flatten(feeds), nest.flatten(fetches)
for f in flat_feeds:
if not isinstance(f, ops.Tensor):
raise ValueError("Feeds must be tensors.")
# Ignoring all feeds that are captures allows prune to be called
# using wrapped_func.inputs even when it uses variables
internal_captures = self.graph.internal_captures
flat_feeds = [f for f in flat_feeds
if f not in internal_captures]
tensor_fetches = []
operation_fetches = []
for f in flat_fetches:
if isinstance(f, ops.Tensor):
tensor_fetches.append(f)
elif isinstance(f, ops.Operation):
operation_fetches.append(f)
else:
raise ValueError("Fetches must be tensors or operations.")
for f in flat_feeds + flat_fetches:
if f.graph is not self._func_graph:
raise ValueError(
"Can only prune function whose feeds and fetches "
"are from this graph (%s). Tensor %s from graph %s" % (
self._func_graph, f, f.graph))
with self._func_graph.as_default():
pruned_graph = func_graph.FuncGraph("pruned")
with ops.control_dependencies(operation_fetches):
if tensor_fetches:
identity_fetches = array_ops.identity_n(tensor_fetches)
sink_tensor = identity_fetches[0]
else:
identity_fetches = []
sink_tensor = array_ops.zeros([])
lift_map = lift_to_graph.lift_to_graph(
[sink_tensor], pruned_graph, sources=flat_feeds + internal_captures)
for original_fetch, identity_fetch in zip(
tensor_fetches, identity_fetches):
lift_map[original_fetch] = lift_map[identity_fetch]
pruned_graph.outputs.extend(
lift_map[x] for x in flat_fetches if isinstance(x, ops.Tensor))
if not tensor_fetches:
pruned_graph.outputs.append(lift_map[sink_tensor])
for external_capture, internal_capture in self.graph.captures.items():
pruned_graph.captures[external_capture] = lift_map[internal_capture]
pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds)
pruned_graph.inputs.extend(pruned_graph.captures.values())
pruned_graph.variables = self.graph.variables
def _structured_output_mapping(fetched):
lifted = lift_map[fetched]
if isinstance(lifted, ops.Operation):
return None
return lifted
pruned_graph.structured_outputs = nest.map_structure(
_structured_output_mapping, fetches)
pruned_fn = WrappedFunction(
pruned_graph, variable_holder=self._variable_holder)
pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access
pruned_fn._arg_keywords = [] # pylint: disable=protected-access
return pruned_fn
@tf_export(v1=["wrap_function"])
def wrap_function(fn, signature, name=None):
"""Wraps the TF 1.x function fn into a graph function.
The python function `fn` will be called once with symbolic arguments specified
in the `signature`, traced, and turned into a graph function. Any variables
created by `fn` will be owned by the object returned by `wrap_function`. The
resulting graph function can be called with tensors which match the
signature.
```python
def f(x, do_add):
v = tf.Variable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with tf.control_dependencies([op]):
return v.read_value()
f_add = tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), True])
assert float(f_add(1.0)) == 6.0
assert float(f_add(1.0)) == 7.0
# Can call tf.compat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub= tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), False])
assert float(f_sub(1.0)) == 4.0
assert float(f_sub(1.0)) == 3.0
```
Both `tf.compat.v1.wrap_function` and `tf.function` create a callable
TensorFlow graph. But while `tf.function` runs all stateful operations
(e.g. `tf.print`) and sequences operations to provide the same semantics as
eager execution, `wrap_function` is closer to the behavior of `session.run` in
TensorFlow 1.x. It will not run any operations unless they are required to
compute the function's outputs, either through a data dependency or a control
dependency. Nor will it sequence operations.
Unlike `tf.function`, `wrap_function` will only trace the Python function
once. As with placeholders in TF 1.x, shapes and dtypes must be provided to
`wrap_function`'s `signature` argument.
Since it is only traced once, variables and state may be created inside the
function and owned by the function wrapper object.
Args:
fn: python function to be wrapped
signature: the placeholder and python arguments to be passed to the
wrapped function
name: Optional. The name of the function.
Returns:
the wrapped graph function.
"""
holder = VariableHolder(fn)
func_graph_name = "wrapped_function"
if name is not None:
func_graph_name = "wrapped_function_" + name
return WrappedFunction(
func_graph.func_graph_from_py_func(
func_graph_name,
holder,
args=None, kwargs=None, signature=signature,
add_control_dependencies=False,
collections={}),
variable_holder=holder,
signature=signature)
def function_from_graph_def(graph_def, inputs, outputs):
"""Creates a ConcreteFunction from a GraphDef.
Args:
graph_def: A GraphDef to make a function out of.
inputs: A Tensor name or nested structure of names in `graph_def` which
should be inputs to the function.
outputs: A Tensor name or nested structure of names in `graph_def` which
should be outputs of the function.
Returns:
A ConcreteFunction.
"""
def _imports_graph_def():
importer.import_graph_def(graph_def, name="")
wrapped_import = wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
nest.map_structure(import_graph.as_graph_element, inputs),
nest.map_structure(import_graph.as_graph_element, outputs))
|
from django.db import models
import datetime
from librehatti.voucher.models import VoucherId
from librehatti.voucher.models import FinancialSession
from librehatti.bills.models import QuotedOrder
from librehatti.catalog.models import PurchaseOrder, Category
import simplejson
class SuspenseOrder(models.Model):
voucher = models.IntegerField()
purchase_order = models.ForeignKey(PurchaseOrder)
session_id = models.ForeignKey(FinancialSession)
distance_estimated = models.IntegerField(default=0)
is_cleared = models.BooleanField(default=False)
def __unicode__(self):
return '%s' % (self.id)
class SuspenseClearance(models.Model):
session = models.ForeignKey(FinancialSession)
voucher_no = models.IntegerField()
work_charge =models.IntegerField(blank=True, null=True)
labour_charge = models.IntegerField(blank=True, null=True)
car_taxi_charge = models.IntegerField(blank=True, null=True)
boring_charge_external = models.IntegerField(blank=True, null=True)
boring_charge_internal = models.IntegerField(blank=True, null=True)
lab_testing_staff = models.CharField(max_length=200)
field_testing_staff = models.CharField(max_length=200)
test_date = models.CharField(max_length=600)
clear_date = models.DateField(default=datetime.date.today)
class Department(models.Model):
title = models.CharField(max_length=50)
address = models.CharField(max_length=150)
phone = models.CharField(max_length=20, blank=True)
dean = models.CharField(max_length=50, blank=True)
def __unicode__(self):
return self.title
class StaffPosition(models.Model):
position = models.CharField(max_length=50)
rank = models.IntegerField()
def __unicode__(self):
return self.position
class Staff(models.Model):
department = models.ForeignKey(Department)
code = models.CharField(max_length=5)
name = models.CharField(max_length=50)
daily_ta_da = models.IntegerField(blank=True)
position = models.ForeignKey(StaffPosition)
lab = models.ForeignKey(Category)
email =models.EmailField(blank=True)
class Meta:
verbose_name_plural = "Staff"
def __unicode__(self):
return self.name
class TaDa(models.Model):
date_of_generation = models.DateField(default=datetime.date.today)
voucher_no = models.IntegerField()
session = models.IntegerField()
departure_time_from_tcc= models.TimeField()
arrival_time_at_site = models.TimeField()
departure_time_from_site = models.TimeField()
arrival_time_at_tcc = models.TimeField()
tada_amount = models.IntegerField()
start_test_date = models.DateField()
end_test_date = models.DateField()
source_site = models.CharField(max_length=100, default = 'GNDEC, Ludhiana')
testing_site= models.CharField(max_length=100)
testing_staff = models.CharField(max_length=100)
def __unicode__(self):
return self.suspense
class QuotedSuspenseOrder(models.Model):
quoted_order = models.ForeignKey('bills.QuotedOrder')
distance_estimated = models.IntegerField(default=0)
is_cleared = models.BooleanField(default=False)
def __unicode__(self):
return '%s' % (self.id)
class Vehicle(models.Model):
vehicle_id = models.CharField(max_length=20)
vehicle_no = models.CharField(max_length=20)
vehicle_name = models.CharField(max_length=20)
def __unicode__(self):
return '%s' % (self.vehicle_no)
class Transport(models.Model):
vehicle = models.ForeignKey(Vehicle)
kilometer = models.CharField(max_length=500)
rate = models.FloatField(default=10.0)
date_of_generation = models.DateField()
date = models.CharField(blank=True, max_length=600)
total = models.IntegerField()
voucher_no = models.IntegerField()
session = models.ForeignKey(FinancialSession)
'''def save(self, *args, **kwargs):
# Now decode the kilometers
jsonkilometer = simplejson.loads(self.kilometer)
total_km = 0;
#calculate the total kms
for km in jsonkilometer:
total_km += float(km)
# Now calculate the total and save it in model
self.total = total_km * self.rate
super(Transport, self).save(*args, **kwargs)
'''
class Meta:
verbose_name_plural = "Transport"
def __unicode__(self):
return '%s' % (self.vehicle)
|
import user
import requests
import re
import course
import os
import pickle
from bs4 import BeautifulSoup
class mycourses:
def __init__(self):
self.__mycourses_domain = "https://mycourses2.mcgill.ca"
self.__mycourses_index_page = "https://mycourses2.mcgill.ca/"
self.__shibboleth_domain = "https://shibboleth.mcgill.ca"
self.__shibboleth_login_page = "https://mycourses2.mcgill.ca/Shibboleth.sso/Login?entityID=https://shibboleth.mcgill.ca/idp/shibboleth&target=https%3A%2F%2Fmycourses2.mcgill.ca%2Fd2l%2FshibbolethSSO%2Flogin.d2l"
self.__useragent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7"
self.__request_header = {'user-agent':self.__useragent }
self.session = requests.Session()
self.session.headers.update(self.__request_header)
self.__course_match_pattern = re.compile("(\w{4,6}) (\d{4}) - (\w{3,4})-(\d{3})-(\d{3}) - (.+)")
self.__course_id_match_pattern = re.compile("""\\\/home\\\/(\d+)""")
self.__datadir = os.environ['HOME']+"/.libmycourses"
if not os.path.exists(self.__datadir):
os.mkdir(self.__datadir)
def login(self, user):
self.user = user
self.__do_login()
if self.loginsuccess:
with open(self.__datadir+"/"+self.user.username, 'w') as f:
pickle.dump(self.session, f)
def __parse(self,source):
res = BeautifulSoup(source)
courses_containing_nodes = res.find_all("li", class_="d2l-itemlist-simple d2l-itemlist-arrow d2l-itemlist-short")
ids = self.__course_id_match_pattern.finditer(source)
for course_containing_node in courses_containing_nodes:
strings = course_containing_node.stripped_strings
m = self.__course_match_pattern.match(strings.next())
if m != None:
c = course.course(m.group(1),m.group(2),m.group(3),m.group(4),m.group(5),m.group(6),ids.next().group(1), self.session)
self.user.courses.append(c)
def __do_login(self):
# try loading previous session
try:
with open(self.__datadir+"/"+self.user.username, 'r') as f:
self.session = pickle.load(f)
except:
pass
r = self.session.get("https://mycourses2.mcgill.ca/d2l/m/home")
if "Home - myCourses" in r.text:
self.loginsuccess = True
self.__parse(r.text)
return
# first get the index page of mycourses
r = self.session.get(self.__mycourses_index_page)
# then go to shibboleth login page
r = self.session.get(self.__shibboleth_login_page)
# make login payload data
payload = {'j_username': self.user.username,
'j_password': self.user.password}
r = self.session.post(self.__shibboleth_domain + '/idp/Authn/UserPassword', data=payload)
res = BeautifulSoup(r.text)
# continue button must be pressed manually
continue_form_url = "https://mycourses2.mcgill.ca/Shibboleth.sso/SAML2/POST"
# use beautiful soup to find RelayState and SAMLResponse
try:
RelayState = res.find(attrs={"name": "RelayState"})['value']
SAMLResponse = res.find(attrs={"name": "SAMLResponse"})['value']
except:
self.loginsuccess = False
raise LoginError("Cannot retrieve SAMLResponse, username and password are probably wrong")
# build new payload
payload = {'RelayState': RelayState,
'SAMLResponse': SAMLResponse}
r = self.session.post(continue_form_url, data=payload)
r = self.session.get("https://mycourses2.mcgill.ca/d2l/lp/auth/login/ProcessLoginActions.d2l")
result = r.text
if not "Home - myCourses" in result:
self.loginsuccess = False
raise LoginError("Cannot complete final login step")
self.loginsuccess = True
self.__parse(result)
class LoginError(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os.path
from collections import OrderedDict
from contextlib import contextmanager
from copy import deepcopy
from tempfile import mkdtemp
from django.conf import settings
from django.core.handlers.base import BaseHandler
from django.http import SimpleCookie
from django.test import RequestFactory, TestCase, TransactionTestCase
from django.utils.functional import SimpleLazyObject
from django.utils.six import StringIO
from .utils import UserLoginContext, create_user, get_user_model, reload_urls, temp_dir
try:
from unittest.mock import patch
except ImportError:
from mock import patch
class BaseTestCaseMixin(object):
"""
Utils mixin that provides some helper methods to setup and interact with
Django testing framework.
"""
request_factory = None
user = None
user_staff = None
user_normal = None
site_1 = None
languages = None
_login_context = None
image_name = 'test_image.jpg'
#: Username for auto-generated superuser
_admin_user_username = 'admin'
#: Password for auto-generated superuser
_admin_user_password = 'admin'
#: Email for auto-generated superuser
_admin_user_email = '[email protected]'
#: Username for auto-generated staff user
_staff_user_username = 'staff'
#: Password for auto-generated staff user
_staff_user_password = 'staff'
#: Email for auto-generated staff user
_staff_user_email = '[email protected]'
#: Username for auto-generated non-staff user
_user_user_username = 'normal'
#: Password for auto-generated non-staff user
_user_user_password = 'normal'
#: Email for auto-generated non-staff user
_user_user_email = '[email protected]'
_pages_data = ()
"""
List of pages data for the different languages.
Each item of the list is a dictionary containing the attributes
(as accepted by ``cms.api.create_page``) of the page to be created.
The first language will be created with ``cms.api.create_page`` the following
languages using ``cms.api.create_title``
Example:
Single page created in en, fr, it languages::
_pages_data = (
{
'en': {'title': 'Page title', 'template': 'page.html', 'publish': True},
'fr': {'title': 'Titre', 'publish': True},
'it': {'title': 'Titolo pagina', 'publish': False}
},
)
"""
@classmethod
def setUpClass(cls):
from django.contrib.sites.models import Site
cls.request_factory = RequestFactory()
cls.user = create_user(
cls._admin_user_username, cls._admin_user_email, cls._admin_user_password,
is_staff=True, is_superuser=True
)
cls.user_staff = create_user(
cls._staff_user_username, cls._staff_user_email, cls._staff_user_password,
is_staff=True, is_superuser=False
)
cls.user_normal = create_user(
cls._user_user_username, cls._user_user_email, cls._user_user_password,
is_staff=False, is_superuser=False
)
cls.site_1 = Site.objects.all().first()
try:
from cms.utils import get_language_list
cls.languages = get_language_list()
except ImportError:
cls.languages = [x[0] for x in settings.LANGUAGES]
super(BaseTestCaseMixin, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(BaseTestCaseMixin, cls).tearDownClass()
User = get_user_model()
User.objects.all().delete()
@contextmanager
def temp_dir(self):
"""
Context manager to operate on a temporary directory
"""
yield temp_dir()
def reload_model(self, obj):
"""
Reload a models instance from database
:param obj: model instance to reload
:return: the reloaded model instance
"""
return obj.__class__.objects.get(pk=obj.pk)
@staticmethod
def reload_urlconf(urlconf=None):
reload_urls(settings, urlconf)
def login_user_context(self, user, password=None):
"""
Context manager to make logged in requests
:param user: user username
:param password: user password (if omitted, username is used)
"""
return UserLoginContext(self, user, password)
def create_user(self, username, email, password, is_staff=False, is_superuser=False,
base_cms_permissions=False, permissions=None):
"""
Creates a user with the given properties
:param username: Username
:param email: Email
:param password: password
:param is_staff: Staff status
:param is_superuser: Superuser status
:param base_cms_permissions: Base django CMS permissions
:param permissions: Other permissions
:return: User instance
"""
return create_user(username, email, password, is_staff, is_superuser, base_cms_permissions,
permissions)
def get_pages_data(self):
"""
Construct a list of pages in the different languages available for the
project. Default implementation is to return the :py:attr:`_pages_data`
attribute
:return: list of pages data
"""
return self._pages_data
def get_pages(self):
"""
Create pages using self._pages_data and self.languages
:return: list of created pages
"""
return self.create_pages(self._pages_data, self.languages)
@staticmethod
def create_pages(source, languages):
"""
Build pages according to the pages data provided by :py:meth:`get_pages_data`
and returns the list of the draft version of each
"""
from cms.api import create_page, create_title
pages = OrderedDict()
has_apphook = False
home_set = False
for page_data in source:
main_data = deepcopy(page_data[languages[0]])
if 'publish' in main_data:
main_data['published'] = main_data.pop('publish')
main_data['language'] = languages[0]
if main_data.get('parent', None):
main_data['parent'] = pages[main_data['parent']]
page = create_page(**main_data)
has_apphook = has_apphook or 'apphook' in main_data
for lang in languages[1:]:
if lang in page_data:
publish = False
title_data = deepcopy(page_data[lang])
if 'publish' in title_data:
publish = title_data.pop('publish')
if 'published' in title_data:
publish = title_data.pop('published')
title_data['language'] = lang
title_data['page'] = page
create_title(**title_data)
if publish:
page.publish(lang)
if (
not home_set and hasattr(page, 'set_as_homepage') and
main_data.get('published', False)
):
page.set_as_homepage()
home_set = True
page = page.get_draft_object()
pages[page.get_slug(languages[0])] = page
if has_apphook:
reload_urls(settings, cms_apps=True)
return list(pages.values())
def get_content_renderer(self, request):
"""
Returns a the plugin renderer. Only for django CMS 3.4+
:param request: request instance
:return: ContentRenderer instance
"""
from cms.plugin_rendering import ContentRenderer
return ContentRenderer(request)
def get_plugin_context(self, page, lang, plugin, edit=False):
"""
Returns a context suitable for CMSPlugin.render_plugin / render_placeholder
:param page: Page object
:param lang: Current language
:param plugin: Plugin instance
:param edit: Enable edit mode for rendering
:return: PluginContext instance
"""
from cms.plugin_rendering import PluginContext
from sekizai.context_processors import sekizai
request = self.get_page_request(page, self.user, lang=lang, edit=edit)
context = {
'request': request
}
renderer = self.get_content_renderer(request)
if renderer:
context['cms_content_renderer'] = renderer
context.update(sekizai(request))
return PluginContext(context, plugin, plugin.placeholder)
def render_plugin(self, page, lang, plugin, edit=False):
"""
Renders a single plugin using CMSPlugin.render_plugin
:param page: Page object
:param lang: Current language
:param plugin: Plugin instance
:param edit: Enable edit mode for rendering
:return: Rendered plugin
"""
context = self.get_plugin_context(page, lang, plugin, edit)
content_renderer = context['cms_content_renderer']
rendered = content_renderer.render_plugin(
instance=plugin,
context=context,
placeholder=plugin.placeholder,
)
return rendered
def _prepare_request(self, request, page, user, lang, use_middlewares, use_toolbar=False,
secure=False):
from django.contrib.auth.models import AnonymousUser
from importlib import import_module
engine = import_module(settings.SESSION_ENGINE)
request.current_page = SimpleLazyObject(lambda: page)
if not user:
if self._login_context:
user = self._login_context.user
else:
user = AnonymousUser()
if user.is_authenticated:
session_key = user._meta.pk.value_to_string(user)
else:
session_key = 'session_key'
request.user = user
request._cached_user = user
request.session = engine.SessionStore(session_key)
if secure:
request.environ['SERVER_PORT'] = str('443')
request.environ['wsgi.url_scheme'] = str('https')
request.cookies = SimpleCookie()
request.errors = StringIO()
request.LANGUAGE_CODE = lang
if request.method == 'POST':
request._dont_enforce_csrf_checks = True
# Let's use middleware in case requested, otherwise just use CMS toolbar if needed
if use_middlewares:
self._apply_middlewares(request)
elif use_toolbar:
from cms.middleware.toolbar import ToolbarMiddleware
mid = ToolbarMiddleware()
mid.process_request(request)
return request
def _apply_middlewares(self, request):
handler = BaseHandler()
from django.utils.module_loading import import_string
for middleware_path in reversed(settings.MIDDLEWARE):
middleware = import_string(middleware_path)
mw_instance = middleware(handler)
if hasattr(mw_instance, 'process_request'):
mw_instance.process_request(request)
def request(
self, path, method='get', data=None, page=None, lang='', user=None,
use_middlewares=False, secure=False, use_toolbar=False
):
"""
Create a request for the given parameters.
Request will be enriched with:
* session
* cookies
* user (Anonymous if :param:user is `None`)
* django CMS toolbar (is set)
* current_page (if provided)
:param path: request path
:type path: str
:param method: HTTP verb to use
:type method: str
:param data: payload to pass to the underlying :py:class:`RequestFactory` method
:type data: dict
:param page: current page object
:type page: cms.models.Page
:param lang: request language
:type lang: str
:param user: current user
:type user: :py:class:`django.contrib.auth.AbstractUser`
:param use_middlewares: pass the request through configured middlewares
:type use_middlewares: bool
:param secure: create HTTPS request
:type secure: bool
:param use_toolbar: add django CMS toolbar
:type secure: bool
:return: request
"""
request = getattr(RequestFactory(), method)(path, data=data, secure=secure)
return self._prepare_request(
request, page, user, lang, use_middlewares, secure=secure, use_toolbar=use_toolbar
)
def get_request(
self, page, lang, user=None, path=None, use_middlewares=False, secure=False,
use_toolbar=False
):
"""
Create a GET request for the given page and language
:param page: current page object
:param lang: request language
:param user: current user
:param path: path (if different from the current page path)
:param use_middlewares: pass the request through configured middlewares.
:param secure: create HTTPS request
:param use_toolbar: add django CMS toolbar
:return: request
"""
path = path or page and page.get_absolute_url(lang)
return self.request(
path, method='get', data={}, page=page, lang=lang, user=user,
use_middlewares=use_middlewares, secure=secure, use_toolbar=use_toolbar
)
def post_request(self, page, lang, data, user=None, path=None, use_middlewares=False,
secure=False, use_toolbar=False):
"""
Create a POST request for the given page and language with CSRF disabled
:param page: current page object
:param lang: request language
:param data: POST payload
:param user: current user
:param path: path (if different from the current page path)
:param use_middlewares: pass the request through configured middlewares.
:param secure: create HTTPS request
:param use_toolbar: add django CMS toolbar
:return: request
"""
path = path or page and page.get_absolute_url(lang)
return self.request(
path, method='post', data=data, page=page, lang=lang, user=user,
use_middlewares=use_middlewares, secure=secure, use_toolbar=use_toolbar
)
def get_page_request(self, page, user, path=None, edit=False, lang='en',
use_middlewares=False, secure=False):
"""
Create a GET request for the given page suitable for use the
django CMS toolbar
This method requires django CMS installed to work. It will raise an ImportError otherwise;
not a big deal as this method makes sense only in a django CMS environment
:param page: current page object
:param user: current user
:param path: path (if different from the current page path)
:param edit: whether enabling editing mode
:param lang: request language
:param use_middlewares: pass the request through configured middlewares.
:param secure: create HTTPS request
:return: request
"""
from cms.utils.conf import get_cms_setting
edit_on = get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
path = path or page and page.get_absolute_url(lang)
if edit:
path = '{0}?{1}'.format(path, edit_on)
request = self.request_factory.get(path, secure=secure)
return self._prepare_request(request, page, user, lang, use_middlewares, use_toolbar=True,
secure=secure)
@staticmethod
def create_image(mode='RGB', size=(800, 600)):
"""
Create a random image suitable for saving as DjangoFile
:param mode: color mode
:param size: tuple of width, height
:return: image object
It requires Pillow installed in the environment to work
"""
from PIL import Image as PilImage, ImageDraw
image = PilImage.new(mode, size)
draw = ImageDraw.Draw(image)
x_bit, y_bit = size[0] // 10, size[1] // 10
draw.rectangle((x_bit, y_bit * 2, x_bit * 7, y_bit * 3), 'red')
draw.rectangle((x_bit * 2, y_bit, x_bit * 3, y_bit * 8), 'red')
return image
def create_django_image_obj(self): # pragma: no cover
return self.create_django_image_object()
def create_django_image_object(self):
"""
Create a django image file object suitable for FileField
It also sets the following attributes:
* ``self.image_name``: the image base name
* ``self.filename``: the complete image path
:return: django file object
It requires Pillow installed in the environment to work
"""
img_obj, self.filename = self.create_django_image()
self.image_name = img_obj.name
return img_obj
@staticmethod
def create_django_image():
"""
Create a django image file object suitable for FileField
It also sets the following attributes:
* ``self.image_name``: the image base name
* ``self.filename``: the complete image path
:return: (django file object, path to file image)
It requires Pillow installed in the environment to work
"""
from django.core.files import File as DjangoFile
img = BaseTestCase.create_image()
image_name = 'test_file.jpg'
if settings.FILE_UPLOAD_TEMP_DIR:
tmp_dir = settings.FILE_UPLOAD_TEMP_DIR
else:
tmp_dir = mkdtemp()
filename = os.path.join(tmp_dir, image_name)
img.save(filename, 'JPEG')
return DjangoFile(open(filename, 'rb'), name=image_name), filename
def create_filer_image_object(self):
"""
Create a filer image object suitable for FilerImageField
It also sets the following attributes:
* ``self.image_name``: the image base name
* ``self.filename``: the complete image path
* ``self.filer_image``: the filer image object
:return: filer image object
It requires Pillow and django-filer installed in the environment to work
"""
self.filer_image = self.create_filer_image(self.user, self.image_name)
return self.filer_image
@staticmethod
def create_filer_image(user, image_name):
"""
Create a filer image object suitable for FilerImageField
It also sets the following attributes:
* ``self.image_name``: the image base name
* ``self.filename``: the complete image path
* ``self.filer_image``: the filer image object
:param user: image owner
:param image_name: image name
:return: filer image object
It requires Pillow and django-filer installed in the environment to work
"""
from filer.models import Image
file_obj, filename = BaseTestCase.create_django_image()
filer_image = Image.objects.create(
owner=user, file=file_obj, original_filename=image_name
)
return filer_image
@contextmanager
def captured_output(self):
"""
Context manager that patches stdout / stderr with StringIO and return the instances.
Use it to test output
:return: stdout, stderr wrappers
"""
with patch('sys.stdout', new_callable=StringIO) as out:
with patch('sys.stderr', new_callable=StringIO) as err:
yield out, err
class BaseTestCase(BaseTestCaseMixin, TestCase):
"""
Base class that implements :py:class:`BaseTestCaseMixin` and
:py:class:`django.tests.TestCase`
"""
class BaseTransactionTestCase(BaseTestCaseMixin, TransactionTestCase):
"""
Base class that implements :py:class:`BaseTestCaseMixin` and
:py:class:`django.tests.TransactionTestCase`
"""
|
#!/usr/bin/env python3
import os
import sys
import time
import random
import requests
import webbrowser
import signal
import sys
def signal_handler(signal, frame):
print('👇 You pressed Ctrl+C! Skipping getting Feedly article.')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
try:
FEEDLY_TOKEN = os.environ["FEEDLY_TOKEN"]
except KeyError:
print("🙅♂️ No 'FEEDLY_TOKEN' environment found! Cannot continue.")
sys.exit(1)
FEEDLY_URL = "https://cloud.feedly.com/v3"
request = requests.Session()
request.headers.update(
{"Content-Type": "application/json", "Authorization": f"OAuth {FEEDLY_TOKEN}"}
)
try:
response = request.get(FEEDLY_URL + "/profile")
if not response.ok:
print(
f'🙅♂️ {response.json()["errorMessage"].title()}.\n'
'To refresh "FEEDLY_TOKEN" go to https://feedly.com/v3/auth/dev.\n'
"Don't forget to run `$ update_feedly_token \"FEEDLY_TOKEN\"` after 👍"
)
sys.exit(1)
except Exception as e:
print(f'🙅♂️ Something wrong happend: {str(e)}')
sys.exit(1)
stream_id = f'user/{response.json()["id"]}/tag/global.saved'
continuation = ""
articles = []
while True:
# Query feedly API to get all Saved articles, and parse all Article ids, storing them in `articles` variable.
try:
response = request.get(
FEEDLY_URL
+ f"/streams/contents?streamId={stream_id}&continuation={continuation}"
)
items = response.json()["items"]
articles.extend([item.get("id") for item in items])
except:
# Might have hit the threshold of requests. Stop fetching and just use the ones we already have
break
# Then check if is there any more pages left, if not, then break out of the while loop.
try:
continuation = response.json()["continuation"]
print(f'\r{random.choice(["🤔", "👍", "⚡️", "🥴", "👏"])}', end='', flush=True)
except KeyError:
break
# Print message and sleep for a second so we can read it. Finally open page on a web browser.
print(f'\n✅ Found {len(articles)} articles in "Saved for later"!')
time.sleep(1)
# Get article count if given, get only 1 otherwise
count = int(sys.argv[1] if len(sys.argv) > 1 else 1)
for _ in range(count):
if articles:
article = random.choice(articles)
webbrowser.open(f"https://feedly.com/i/entry/{article}")
articles.remove(article)
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from extensions.rich_text_components import base
class Image(base.BaseRichTextComponent):
"""A rich-text component representing an inline image."""
name = 'Image'
category = 'Basic Input'
description = 'An image.'
frontend_name = 'image'
tooltip = 'Insert image'
_customization_arg_specs = [{
'name': 'filepath',
'description': (
'The name of the image file. (Allowed extensions: gif, jpeg, jpg, '
'png.)'),
'schema': {
'type': 'custom',
'obj_type': 'Filepath',
},
'default_value': '',
}, {
'name': 'alt',
'description': 'Alternative text (for screen readers)',
'schema': {
'type': 'unicode',
},
'default_value': '',
}]
icon_data_url = (
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAA'
'ABGdBTUEAAK/INwWK6QAAABl0RVh0%0AU29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZ'
'TwAAAHwSURBVDjLpZM9a1RBFIafM/fevfcmC7uQ%0AjWEjUZKAYBHEVEb/gIWFjVVSWEj'
'6gI0/wt8gprPQykIsTP5BQLAIhBVBzRf52Gw22bk7c8YiZslu%0AgggZppuZ55z3nfdIC'
'IHrrBhg%2BePaa1WZPyk0s%2B6KWwM1khiyhDcvns4uxQAaZOHJo4nRLMtEJPpn%0AxY6'
'Cd10%2BfNl4DpwBTqymaZrJ8uoBHfZoyTqTYzvkSRMXlP2jnG8bFYbCXWJGePlsEq8iPQ'
'mFA2Mi%0AjEBhtpis7ZCWftC0LZx3xGnK1ESd741hqqUaqgMeAChgjGDDLqXkgMPTJtZ3'
'KJzDhTZpmtK2OSO5%0AIRB6xvQDRAhOsb5Lx1lOu5ZCHV4B6RLUExvh4s%2BZntHhDJAx'
'Sqs9TCDBqsc6j0iJdqtMuTROFBkI%0AcllCCGcSytFNfm1tU8k2GRo2pOI43h9ie6tOvT'
'JFbORyDsJFQHKD8fw%2BP9dWqJZ/I96TdEa5Nb1A%0AOavjVfti0dfB%2Bt4iXhWvyh27'
'y9zEbRRobG7z6fgVeqSoKvB5oIMQEODx7FLvIJo55KS9R7b5ldrD%0AReajpC%2BZ5z7G'
'AHJFXn1exedVbG36ijwOmJgl0kS7lXtjD0DkLyqc70uPnSuIIwk9QCmWd%2B9XGnOF%0A'
'DzP/M5xxBInhLYBcd5z/AAZv2pOvFcS/AAAAAElFTkSuQmCC%0A'
)
|
#
# Copyright (c) 2010-2014, MIT Probabilistic Computing Project
#
# Lead Developers: Dan Lovell and Jay Baxter
# Authors: Dan Lovell, Baxter Eaves, Jay Baxter, Vikash Mansinghka
# Research Leads: Vikash Mansinghka, Patrick Shafto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import itertools
import collections
import numpy
import crosscat.cython_code.State as State
import crosscat.EngineTemplate as EngineTemplate
import crosscat.utils.sample_utils as su
import crosscat.utils.general_utils as gu
import crosscat.utils.inference_utils as iu
# for default_diagnostic_func_dict below
import crosscat.utils.diagnostic_utils
class LocalEngine(EngineTemplate.EngineTemplate):
"""A simple interface to the Cython-wrapped C++ engine
LocalEngine holds no state other than a seed generator.
Methods use resources on the local machine.
"""
def __init__(self, seed=None):
"""Initialize a LocalEngine
This is really just setting the initial seed to be used for
initializing CrossCat states. Seeds are generated sequentially
"""
super(LocalEngine, self).__init__(seed=seed)
self.mapper = map
self.do_initialize = _do_initialize_tuple
self.do_analyze = _do_analyze_tuple
self.do_insert = _do_insert_tuple
return
def get_initialize_arg_tuples(self, M_c, M_r, T, initialization,
row_initialization, n_chains,
ROW_CRP_ALPHA_GRID,
COLUMN_CRP_ALPHA_GRID,
S_GRID, MU_GRID,
N_GRID,
):
seeds = [self.get_next_seed() for seed_idx in range(n_chains)]
arg_tuples = itertools.izip(
seeds,
itertools.cycle([M_c]),
itertools.cycle([M_r]),
itertools.cycle([T]),
itertools.cycle([initialization]),
itertools.cycle([row_initialization]),
itertools.cycle([ROW_CRP_ALPHA_GRID]),
itertools.cycle([COLUMN_CRP_ALPHA_GRID]),
itertools.cycle([S_GRID]),
itertools.cycle([MU_GRID]),
itertools.cycle([N_GRID]),
)
return arg_tuples
def initialize(self, M_c, M_r, T, initialization='from_the_prior',
row_initialization=-1, n_chains=1,
ROW_CRP_ALPHA_GRID=(),
COLUMN_CRP_ALPHA_GRID=(),
S_GRID=(), MU_GRID=(),
N_GRID=31,
# subsample=False,
# subsample_proportion=None,
# subsample_rows_list=None,
):
"""Sample a latent state from prior
:param M_c: The column metadata
:type M_c: dict
:param M_r: The row metadata
:type M_r: dict
:param T: The data table in mapped representation (all floats, generated
by data_utils.read_data_objects)
:type T: list of lists
:returns: X_L, X_D -- the latent state
"""
# FIXME: why is M_r passed?
arg_tuples = self.get_initialize_arg_tuples(
M_c, M_r, T, initialization,
row_initialization, n_chains,
ROW_CRP_ALPHA_GRID, COLUMN_CRP_ALPHA_GRID,
S_GRID, MU_GRID,
N_GRID,
)
chain_tuples = self.mapper(self.do_initialize, arg_tuples)
X_L_list, X_D_list = zip(*chain_tuples)
if n_chains == 1:
X_L_list, X_D_list = X_L_list[0], X_D_list[0]
return X_L_list, X_D_list
def get_insert_arg_tuples(self, M_c, T, X_L_list, X_D_list, new_rows, N_GRID, CT_KERNEL):
arg_tuples = itertools.izip(
itertools.cycle([M_c]),
itertools.cycle([T]),
X_L_list, X_D_list,
itertools.cycle([new_rows]),
itertools.cycle([N_GRID]),
itertools.cycle([CT_KERNEL]),
)
return arg_tuples
def insert(self, M_c, T, X_L_list, X_D_list, new_rows=None, N_GRID=31, CT_KERNEL=0):
"""
Insert mutates the data T.
"""
if new_rows is None:
raise ValueError("new_row must exist")
if not isinstance(new_rows, list):
raise TypeError('new_rows must be list of lists')
if not isinstance(new_rows[0], list):
raise TypeError('new_rows must be list of lists')
X_L_list, X_D_list, was_multistate = su.ensure_multistate(X_L_list, X_D_list)
# get insert arg tuples
arg_tuples = self.get_insert_arg_tuples(M_c, T, X_L_list, X_D_list, new_rows, N_GRID,
CT_KERNEL)
chain_tuples = self.mapper(self.do_insert, arg_tuples)
X_L_list, X_D_list = zip(*chain_tuples)
if not was_multistate:
X_L_list, X_D_list = X_L_list[0], X_D_list[0]
T.extend(new_rows)
ret_tuple = X_L_list, X_D_list, T
return ret_tuple
def get_analyze_arg_tuples(self, M_c, T, X_L_list, X_D_list, kernel_list,
n_steps, c, r, max_iterations, max_time, diagnostic_func_dict,
every_N, ROW_CRP_ALPHA_GRID, COLUMN_CRP_ALPHA_GRID,
S_GRID, MU_GRID, N_GRID, do_timing, CT_KERNEL):
n_chains = len(X_L_list)
seeds = [self.get_next_seed() for seed_idx in range(n_chains)]
arg_tuples = itertools.izip(
seeds,
X_L_list, X_D_list,
itertools.cycle([M_c]),
itertools.cycle([T]),
itertools.cycle([kernel_list]),
itertools.cycle([n_steps]),
itertools.cycle([c]),
itertools.cycle([r]),
itertools.cycle([max_iterations]),
itertools.cycle([max_time]),
itertools.cycle([diagnostic_func_dict]),
itertools.cycle([every_N]),
itertools.cycle([ROW_CRP_ALPHA_GRID]),
itertools.cycle([COLUMN_CRP_ALPHA_GRID]),
itertools.cycle([S_GRID]),
itertools.cycle([MU_GRID]),
itertools.cycle([N_GRID]),
itertools.cycle([do_timing]),
itertools.cycle([CT_KERNEL]),
)
return arg_tuples
def analyze(self, M_c, T, X_L, X_D, kernel_list=(), n_steps=1, c=(), r=(),
max_iterations=-1, max_time=-1, do_diagnostics=False,
diagnostics_every_N=1,
ROW_CRP_ALPHA_GRID=(),
COLUMN_CRP_ALPHA_GRID=(),
S_GRID=(), MU_GRID=(),
N_GRID=31,
do_timing=False,
CT_KERNEL=0,
):
"""Evolve the latent state by running MCMC transition kernels
:param M_c: The column metadata
:type M_c: dict
:param T: The data table in mapped representation (all floats, generated
by data_utils.read_data_objects)
:param X_L: the latent variables associated with the latent state
:type X_L: dict
:param X_D: the particular cluster assignments of each row in each view
:type X_D: list of lists
:param kernel_list: names of the MCMC transition kernels to run
:type kernel_list: list of strings
:param n_steps: the number of times to run each MCMC transition kernel
:type n_steps: int
:param c: the (global) column indices to run MCMC transition kernels on
:type c: list of ints
:param r: the (global) row indices to run MCMC transition kernels on
:type r: list of ints
:param max_iterations: the maximum number of times ot run each MCMC
transition kernel. Applicable only if
max_time != -1.
:type max_iterations: int
:param max_time: the maximum amount of time (seconds) to run MCMC
transition kernels for before stopping to return
progress
:type max_time: float
:returns: X_L, X_D -- the evolved latent state
"""
if n_steps <= 0:
raise ValueError("You must do at least one analyze step.")
if CT_KERNEL not in [0, 1]:
raise ValueError("CT_KERNEL must be 0 (Gibbs) or 1 (MH)")
if do_timing:
# diagnostics and timing are exclusive
do_diagnostics = False
diagnostic_func_dict, reprocess_diagnostics_func = do_diagnostics_to_func_dict(
do_diagnostics)
X_L_list, X_D_list, was_multistate = su.ensure_multistate(X_L, X_D)
arg_tuples = self.get_analyze_arg_tuples(M_c, T, X_L_list, X_D_list,
kernel_list, n_steps, c, r,
max_iterations, max_time,
diagnostic_func_dict, diagnostics_every_N,
ROW_CRP_ALPHA_GRID,
COLUMN_CRP_ALPHA_GRID,
S_GRID, MU_GRID,
N_GRID,
do_timing,
CT_KERNEL,
)
chain_tuples = self.mapper(self.do_analyze, arg_tuples)
X_L_list, X_D_list, diagnostics_dict_list = zip(*chain_tuples)
if do_timing:
timing_list = diagnostics_dict_list
if not was_multistate:
X_L_list, X_D_list = X_L_list[0], X_D_list[0]
ret_tuple = X_L_list, X_D_list
#
if diagnostic_func_dict is not None:
diagnostics_dict = munge_diagnostics(diagnostics_dict_list)
if reprocess_diagnostics_func is not None:
diagnostics_dict = reprocess_diagnostics_func(diagnostics_dict)
ret_tuple = ret_tuple + (diagnostics_dict, )
if do_timing:
ret_tuple = ret_tuple + (timing_list, )
return ret_tuple
def _sample_and_insert(self, M_c, T, X_L, X_D, matching_row_indices):
p_State = State.p_State(M_c, T, X_L, X_D)
draws = []
for matching_row_idx in matching_row_indices:
random_seed = self.get_next_seed()
draw = p_State.get_draw(matching_row_idx, random_seed)
p_State.insert_row(draw, matching_row_idx)
draws.append(draw)
T.append(draw)
X_L, X_D = p_State.get_X_L(), p_State.get_X_D()
return draws, T, X_L, X_D
def sample_and_insert(self, M_c, T, X_L, X_D, matching_row_idx):
matching_row_indices = gu.ensure_listlike(matching_row_idx)
if len(matching_row_indices) == 0:
matching_row_indices = range(len(T))
pass
was_single_row = len(matching_row_indices) == 1
draws, T, X_L, X_D = self._sample_and_insert(M_c, T, X_L, X_D, matching_row_indices)
if was_single_row:
draws = draws[0]
pass
return draws, T, X_L, X_D
def simple_predictive_sample(self, M_c, X_L, X_D, Y, Q, n=1):
"""Sample values from the predictive distribution of the given latent state
:param M_c: The column metadata
:type M_c: dict
:param X_L: the latent variables associated with the latent state
:type X_L: dict
:param X_D: the particular cluster assignments of each row in each view
:type X_D: list of lists
:param Y: A list of constraints to apply when sampling. Each constraint
is a triplet of (r, d, v): r is the row index, d is the column
index and v is the value of the constraint
:type Y: list of lists
:param Q: A list of values to sample. Each value is doublet of (r, d):
r is the row index, d is the column index
:type Q: list of lists
:param n: the number of samples to draw
:type n: int
:returns: list of floats -- samples in the same order specified by Q
"""
get_next_seed = self.get_next_seed
samples = _do_simple_predictive_sample(
M_c, X_L, X_D, Y, Q, n, get_next_seed)
return samples
def simple_predictive_probability(self, M_c, X_L, X_D, Y, Q):
"""Calculate the probability of a cell taking a value given a latent state
:param M_c: The column metadata
:type M_c: dict
:param X_L: the latent variables associated with the latent state
:type X_L: dict
:param X_D: the particular cluster assignments of each row in each view
:type X_D: list of lists
:param Y: A list of constraints to apply when sampling. Each constraint
is a triplet of (r, d, v): r is the row index, d is the column
index and v is the value of the constraint
:type Y: list of lists
:param Q: A list of values to sample. Each value is triplet of (r, d, v):
r is the row index, d is the column index, and v is the value at
which the density is evaluated.
:type Q: list of lists
:returns: list of floats -- probabilities of the values specified by Q
"""
return su.simple_predictive_probability(M_c, X_L, X_D, Y, Q)
def simple_predictive_probability_multistate(self, M_c, X_L_list, X_D_list, Y, Q):
"""Calculate the probability of a cell taking a value given a latent state
:param M_c: The column metadata
:type M_c: dict
:param X_L_list: list of the latent variables associated with the latent state
:type X_L_list: list of dict
:param X_D_list: list of the particular cluster assignments of each row in each view
:type X_D_list: list of list of lists
:param Y: A list of constraints to apply when sampling. Each constraint
is a triplet of (r,d,v): r is the row index, d is the column
index and v is the value of the constraint
:type Y: list of lists
:param Q: A list of values to sample. Each value is triplet of (r,d,v):
r is the row index, d is the column index, and v is the value at
which the density is evaluated.
:type Q: list of lists
:returns: list of floats -- probabilities of the values specified by Q
"""
return su.simple_predictive_probability_multistate(M_c, X_L_list, X_D_list, Y, Q)
def mutual_information(self, M_c, X_L_list, X_D_list, Q, n_samples=1000):
"""
Return the estimated mutual information for each pair of columns on Q given
the set of samples.
:param M_c: The column metadata
:type M_c: dict
:param X_L_list: list of the latent variables associated with the latent state
:type X_L_list: list of dict
:param X_D_list: list of the particular cluster assignments of each row in each view
:type X_D_list: list of list of lists
:param Q: List of tuples where each tuple contains the two column indexes to compare
:type Q: list of two-tuples of ints
:param n_samples: the number of simple predictive samples to use
:type n_samples: int
:returns: list of list, where each sublist is a set of MIs and Linfoots from each crosscat
sample.
"""
return iu.mutual_information(M_c, X_L_list, X_D_list, Q, n_samples)
def row_structural_typicality(self, X_L_list, X_D_list, row_id):
"""
Returns the typicality (opposite of anomalousness) of the given row.
:param X_L_list: list of the latent variables associated with the latent state
:type X_L_list: list of dict
:param X_D_list: list of the particular cluster assignments of each row in each view
:type X_D_list: list of list of lists
:param row_id: id of the target row
:type row_id: int
:returns: float, the typicality, from 0 to 1
"""
return su.row_structural_typicality(X_L_list, X_D_list, row_id)
def column_structural_typicality(self, X_L_list, col_id):
"""
Returns the typicality (opposite of anomalousness) of the given column.
:param X_L_list: list of the latent variables associated with the latent state
:type X_L_list: list of dict
:param col_id: id of the target col
:type col_id: int
:returns: float, the typicality, from 0 to 1
"""
return su.column_structural_typicality(X_L_list, col_id)
def similarity(self, M_c, X_L_list, X_D_list, given_row_id, target_row_id, target_columns=None):
"""Computes the similarity of the given row to the target row, averaged over all the
column indexes given by target_columns.
:param M_c: The column metadata
:type M_c: dict
:param X_L: list of the latent variables associated with the latent state
:type X_L: list of dicts
:param X_D: list of the particular cluster assignments of each row in each view
:type X_D: list of list of lists
:param given_row_id: the id of one of the rows to measure similarity between
:type given_row_id: int
:param target_row_id: the id of the other row to measure similarity between
:type target_row_id: int
:param target_columns: the columns to average the similarity over. defaults to all columns.
:type target_columns: int, string, or list of ints
:returns: float
"""
return su.similarity(M_c, X_L_list, X_D_list, given_row_id, target_row_id, target_columns)
def impute(self, M_c, X_L, X_D, Y, Q, n):
"""Impute values from the predictive distribution of the given latent state
:param M_c: The column metadata
:type M_c: dict
:param X_L: the latent variables associated with the latent state
:type X_L: dict
:param X_D: the particular cluster assignments of each row in each view
:type X_D: list of lists
:param Y: A list of constraints to apply when sampling. Each constraint
is a triplet of (r,d,v): r is the row index, d is the column
index and v is the value of the constraint
:type Y: list of lists
:param Q: A list of values to sample. Each value is doublet of (r, d):
r is the row index, d is the column index
:type Q: list of lists
:param n: the number of samples to use in the imputation
:type n: int
:returns: list of floats -- imputed values in the same order as
specified by Q
"""
e = su.impute(M_c, X_L, X_D, Y, Q, n, self.get_next_seed)
return e
def impute_and_confidence(self, M_c, X_L, X_D, Y, Q, n):
"""Impute values and confidence of the value from the predictive
distribution of the given latent state
:param M_c: The column metadata
:type M_c: dict
:param X_L: the latent variables associated with the latent state
:type X_L: dict
:param X_D: the particular cluster assignments of each row in each view
:type X_D: list of lists
:param Y: A list of constraints to apply when sampling. Each constraint
is a triplet of (r, d, v): r is the row index, d is the column
index and v is the value of the constraint
:type Y: list of lists
:param Q: A list of values to sample. Each value is doublet of (r, d):
r is the row index, d is the column index
:type Q: list of lists
:param n: the number of samples to use in the imputation
:type n: int
:returns: list of lists -- list of (value, confidence) tuples in the
same order as specified by Q
"""
if isinstance(X_L, (list, tuple)):
assert isinstance(X_D, (list, tuple))
# TODO: multistate impute doesn't exist yet
# e,confidence = su.impute_and_confidence_multistate(M_c, X_L, X_D, Y, Q, n,
# self.get_next_seed)
e, confidence = su.impute_and_confidence(
M_c, X_L, X_D, Y, Q, n, self.get_next_seed)
else:
e, confidence = su.impute_and_confidence(
M_c, X_L, X_D, Y, Q, n, self.get_next_seed)
return (e, confidence)
def ensure_col_dep_constraints(self, M_c, M_r, T, X_L, X_D, dep_constraints,
max_rejections=100):
"""Ensures dependencey or indepdendency between columns.
dep_constraints is a list of where each entry is an (int, int, bool) tuple
where the first two entries are column indices and the third entry
describes whether the columns are to be dependent (True) or independent
(False).
Behavior Notes:
ensure_col_dep_constraints will add col_esnure enforcement to the
metadata (top level of X_L); unensure_col will remove it. Calling
ensure_col_dep_constraints twice will replace the first ensure.
This operation destroys the existing X_L and X_D metadata; the user
should be aware that it will clobber any existing analyses.
Implementation Notes:
Initialization is implemented via rejection (by repeatedly initalizing
states and throwing ones out that do not adhear to dep_constraints).
This means that in the event the contraints in dep_constraints are
complex, or impossible, that the rejection alogrithm may fail.
The returned metadata looks like this:
>>> dep_constraints
[(1, 2, True), (2, 5, True), (1, 5, True), (1, 3, False)]
>>> X_L['col_ensure']
{
"dependent" :
{
1 : [2, 5],
2 : [1, 5],
5 : [1, 2]
},
"independent" :
{
1 : [3],
3 : [1]
}
"""
X_L_list, X_D_list, was_multistate = su.ensure_multistate(X_L, X_D)
if was_multistate:
num_states = len(X_L_list)
else:
num_states = 1
col_ensure_md = dict()
col_ensure_md[True] = dict()
col_ensure_md[False] = dict()
for col1, col2, dependent in dep_constraints:
if col1 == col2:
raise ValueError("Cannot specify same columns in dependence"\
" constraints.")
if str(col1) in col_ensure_md[dependent]:
col_ensure_md[dependent][str(col1)].append(col2)
else:
col_ensure_md[dependent][str(col1)] = [col2]
if col2 in col_ensure_md[dependent]:
col_ensure_md[dependent][str(col2)].append(col1)
else:
col_ensure_md[dependent][str(col2)] = [col1]
def assert_dep_constraints(X_L, X_D, dep_constraints):
for col1, col2, dep in dep_constraints:
if not self.assert_col_dep_constraints(X_L, X_D, col1, col2,
dep, True):
return False
return True
X_L_out = []
X_D_out = []
for _ in range(num_states):
counter = 0
X_L_i, X_D_i = self.initialize(M_c, M_r, T)
while not assert_dep_constraints(X_L_i, X_D_i, dep_constraints):
if counter > max_rejections:
raise RuntimeError("Could not ranomly generate a partition"\
" that satisfies the constraints in dep_constraints.")
counter += 1
X_L_i, X_D_i = self.initialize(M_c, M_r, T)
X_L_i['col_ensure'] = dict()
X_L_i['col_ensure']['dependent'] = col_ensure_md[True]
X_L_i['col_ensure']['independent'] = col_ensure_md[False]
X_D_out.append(X_D_i)
X_L_out.append(X_L_i)
if was_multistate:
return X_L_out, X_D_out
else:
return X_L_out[0], X_D_out[0]
def ensure_row_dep_constraint(self, M_c, T, X_L, X_D, row1, row2,
dependent=True, wrt=None, max_iter=100, force=False):
"""Ensures dependencey or indepdendency between rows with respect to
(wrt) columns."""
X_L_list, X_D_list, was_multistate = su.ensure_multistate(X_L, X_D)
if force:
raise NotImplementedError
else:
kernel_list = ('row_partition_assignements',)
for i, (X_L_i, X_D_i) in enumerate(zip(X_L_list, X_D_list)):
iters = 0
X_L_tmp = copy.deepcopy(X_L_i)
X_D_tmp = copy.deepcopy(X_D_i)
while not self.assert_row(X_L_tmp, X_D_tmp, row1, row2,
dependent=dependent, wrt=wrt):
if iters >= max_iter:
raise RuntimeError('Maximum ensure iterations reached.')
res = self.analyze(M_c, T, X_L_i, X_D_i, kernel_list=kernel_list,
n_steps=1, r=(row1,))
X_L_tmp = res[0]
X_D_tmp = res[1]
iters += 1
X_L_list[i] = X_L_tmp
X_D_list[i] = X_D_tmp
if was_multistate:
return X_L_list, X_D_list
else:
return X_L_list[0], X_D_list[0]
def assert_col_dep_constraints(self, X_L, X_D, col1, col2, dependent=True,
single_bool=False):
# TODO: X_D is not used for anything other than ensure_multistate.
# I should probably edit ensure_multistate to take X_L or X_D using
# keyword arguments.
X_L_list, _, was_multistate = su.ensure_multistate(X_L, X_D)
model_assertions = []
assertion = True
for X_L_i in X_L_list:
assg = X_L_i['column_partition']['assignments']
assertion = (assg[col1] == assg[col2]) == dependent
if single_bool and not assertion:
return False
model_assertions.append(assertion)
if single_bool:
return True
if was_multistate:
return model_assertions
else:
return model_assertions[0]
def assert_row(self, X_L, X_D, row1, row2, dependent=True, wrt=None):
X_L_list, X_D_list, was_multistate = su.ensure_multistate(X_L, X_D)
if wrt is None:
num_cols = len(X_L_list[0]['column_partition']['assignments'])
wrt = range(num_cols)
else:
if not isinstance(wrt, list):
raise TypeError('wrt must be a list')
model_assertions = []
for X_L_i, X_D_i in zip(X_L_list, X_D_list):
view_assg = X_L_i['column_partition']['assignments']
views_wrt = list(set([view_assg[col] for col in wrt]))
model_assertion = True
for view in views_wrt:
if (X_D_i[view][row1] == X_D_i[view][row2]) != dependent:
model_assertion = False
break
model_assertions.append(model_assertion)
if was_multistate:
return model_assertions
else:
return model_assertions[0]
pass
def do_diagnostics_to_func_dict(do_diagnostics):
diagnostic_func_dict = None
reprocess_diagnostics_func = None
if do_diagnostics:
if isinstance(do_diagnostics, (dict,)):
diagnostic_func_dict = do_diagnostics
else:
diagnostic_func_dict = dict(default_diagnostic_func_dict)
if 'reprocess_diagnostics_func' in diagnostic_func_dict:
reprocess_diagnostics_func = diagnostic_func_dict.pop(
'reprocess_diagnostics_func')
return diagnostic_func_dict, reprocess_diagnostics_func
def get_value_in_each_dict(key, dict_list):
return numpy.array([dict_i[key] for dict_i in dict_list]).T
def munge_diagnostics(diagnostics_dict_list):
# all dicts should have the same keys
diagnostic_names = diagnostics_dict_list[0].keys()
diagnostics_dict = {
diagnostic_name: get_value_in_each_dict(diagnostic_name, diagnostics_dict_list)
for diagnostic_name in diagnostic_names
}
return diagnostics_dict
# switched ordering so args that change come first
# FIXME: change LocalEngine.initialze to match ordering here
def _do_initialize(SEED, M_c, M_r, T, initialization, row_initialization,
ROW_CRP_ALPHA_GRID, COLUMN_CRP_ALPHA_GRID,
S_GRID, MU_GRID,
N_GRID,
):
p_State = State.p_State(M_c, T, initialization=initialization,
row_initialization=row_initialization, SEED=SEED,
ROW_CRP_ALPHA_GRID=ROW_CRP_ALPHA_GRID,
COLUMN_CRP_ALPHA_GRID=COLUMN_CRP_ALPHA_GRID,
S_GRID=S_GRID,
MU_GRID=MU_GRID,
N_GRID=N_GRID,
)
X_L = p_State.get_X_L()
X_D = p_State.get_X_D()
return X_L, X_D
def _do_initialize_tuple(arg_tuple):
return _do_initialize(*arg_tuple)
def _do_insert_tuple(arg_tuple):
return _do_insert(*arg_tuple)
def _do_insert(M_c, T, X_L, X_D, new_rows, N_GRID, CT_KERNEL):
p_State = State.p_State(M_c, T, X_L=X_L, X_D=X_D,
N_GRID=N_GRID,
CT_KERNEL=CT_KERNEL)
row_idx = len(T)
for row_data in new_rows:
p_State.insert_row(row_data, row_idx)
p_State.transition(which_transitions=['row_partition_assignments'], r=[row_idx])
row_idx += 1
X_L_prime = p_State.get_X_L()
X_D_prime = p_State.get_X_D()
return X_L_prime, X_D_prime
# switched ordering so args that change come first
# FIXME: change LocalEngine.analyze to match ordering here
def _do_analyze(SEED, X_L, X_D, M_c, T, kernel_list, n_steps, c, r,
max_iterations, max_time,
ROW_CRP_ALPHA_GRID, COLUMN_CRP_ALPHA_GRID,
S_GRID, MU_GRID,
N_GRID,
CT_KERNEL,
):
p_State = State.p_State(M_c, T, X_L, X_D, SEED=SEED,
ROW_CRP_ALPHA_GRID=ROW_CRP_ALPHA_GRID,
COLUMN_CRP_ALPHA_GRID=COLUMN_CRP_ALPHA_GRID,
S_GRID=S_GRID,
MU_GRID=MU_GRID,
N_GRID=N_GRID,
CT_KERNEL=CT_KERNEL
)
p_State.transition(kernel_list, n_steps, c, r,
max_iterations, max_time)
X_L_prime = p_State.get_X_L()
X_D_prime = p_State.get_X_D()
return X_L_prime, X_D_prime
def _do_analyze_tuple(arg_tuple):
return _do_analyze_with_diagnostic(*arg_tuple)
def get_child_n_steps_list(n_steps, every_N):
if every_N is None:
# results in one block of size n_steps
every_N = n_steps
missing_endpoint = numpy.arange(0, n_steps, every_N)
with_endpoint = numpy.append(missing_endpoint, n_steps)
child_n_steps_list = numpy.diff(with_endpoint)
return child_n_steps_list.tolist()
none_summary = lambda p_State: None
# switched ordering so args that change come first
# FIXME: change LocalEngine.analyze to match ordering here
def _do_analyze_with_diagnostic(SEED, X_L, X_D, M_c, T, kernel_list, n_steps, c, r,
max_iterations, max_time, diagnostic_func_dict, every_N,
ROW_CRP_ALPHA_GRID, COLUMN_CRP_ALPHA_GRID,
S_GRID, MU_GRID,
N_GRID,
do_timing,
CT_KERNEL,
):
diagnostics_dict = collections.defaultdict(list)
if diagnostic_func_dict is None:
diagnostic_func_dict = dict()
every_N = None
child_n_steps_list = get_child_n_steps_list(n_steps, every_N)
# import ipdb; ipdb.set_trace()
p_State = State.p_State(M_c, T, X_L, X_D, SEED=SEED,
ROW_CRP_ALPHA_GRID=ROW_CRP_ALPHA_GRID,
COLUMN_CRP_ALPHA_GRID=COLUMN_CRP_ALPHA_GRID,
S_GRID=S_GRID,
MU_GRID=MU_GRID,
N_GRID=N_GRID,
CT_KERNEL=CT_KERNEL,
)
with gu.Timer('all transitions', verbose=False) as timer:
for child_n_steps in child_n_steps_list:
p_State.transition(kernel_list, child_n_steps, c, r,
max_iterations, max_time)
for diagnostic_name, diagnostic_func in diagnostic_func_dict.iteritems():
diagnostic_value = diagnostic_func(p_State)
diagnostics_dict[diagnostic_name].append(diagnostic_value)
pass
pass
pass
X_L_prime = p_State.get_X_L()
X_D_prime = p_State.get_X_D()
#
if do_timing:
# diagnostics and timing are exclusive
diagnostics_dict = timer.elapsed_secs
pass
return X_L_prime, X_D_prime, diagnostics_dict
def _do_simple_predictive_sample(M_c, X_L, X_D, Y, Q, n, get_next_seed):
is_multistate = su.get_is_multistate(X_L, X_D)
if is_multistate:
samples = su.simple_predictive_sample_multistate(M_c, X_L, X_D, Y, Q,
get_next_seed, n)
else:
samples = su.simple_predictive_sample(M_c, X_L, X_D, Y, Q,
get_next_seed, n)
return samples
default_diagnostic_func_dict = dict(
# fully qualify path b/c dview.sync_imports can't deal with 'as'
# imports
logscore=crosscat.utils.diagnostic_utils.get_logscore,
num_views=crosscat.utils.diagnostic_utils.get_num_views,
column_crp_alpha=crosscat.utils.diagnostic_utils.get_column_crp_alpha,
# any outputs required by reproess_diagnostics_func must be generated
# as well
column_partition_assignments=crosscat.utils.diagnostic_utils.get_column_partition_assignments,
reprocess_diagnostics_func=crosscat.utils.diagnostic_utils.default_reprocess_diagnostics_func,
)
if __name__ == '__main__':
import crosscat.tests.timing_test_utils as ttu
import crosscat.utils.data_utils as du
import crosscat.utils.convergence_test_utils as ctu
# settings
gen_seed = 0
inf_seed = 0
num_clusters = 4
num_cols = 32
num_rows = 400
num_views = 2
n_steps = 1
n_times = 5
n_chains = 3
n_test = 100
CT_KERNEL = 1
# generate some data
T, M_r, M_c, data_inverse_permutation_indices = du.gen_factorial_data_objects(
gen_seed, num_clusters, num_cols, num_rows, num_views,
max_mean=100, max_std=1, send_data_inverse_permutation_indices=True)
view_assignment_truth, X_D_truth = ctu.truth_from_permute_indices(
data_inverse_permutation_indices, num_rows, num_cols, num_views, num_clusters)
# run some tests
engine = LocalEngine(seed=inf_seed)
multi_state_ARIs = []
multi_state_mean_test_lls = []
X_L_list, X_D_list = engine.initialize(M_c, M_r, T, n_chains=n_chains)
multi_state_ARIs.append(
ctu.get_column_ARIs(X_L_list, view_assignment_truth))
for time_i in range(n_times):
X_L_list, X_D_list = engine.analyze(
M_c, T, X_L_list, X_D_list, n_steps=n_steps, CT_KERNEL=CT_KERNEL)
multi_state_ARIs.append(
ctu.get_column_ARIs(X_L_list, view_assignment_truth))
# multi_state_mean_test_lls.append(
# ctu.calc_mean_test_log_likelihoods(M_c, T,
# X_L_list, X_D_list, T_test))
X_L_list, X_D_list, diagnostics_dict = engine.analyze(
M_c, T, X_L_list, X_D_list,
n_steps=n_steps, do_diagnostics=True)
# print results
ct_kernel_name = 'UNKNOWN'
if CT_KERNEL == 0:
ct_kernel_name = 'GIBBS'
elif CT_KERNEL == 1:
ct_kernel_name = 'METROPOLIS'
print 'Running with %s CT_KERNEL' % (ct_kernel_name)
print 'generative_mean_test_log_likelihood'
# print generative_mean_test_log_likelihood
#
print 'multi_state_mean_test_lls:'
print multi_state_mean_test_lls
#
print 'multi_state_ARIs:'
print multi_state_ARIs
|
size(535, 140)
# BitBop -- a fun demonstration of path.contains.
#
# The textpath command returns a BezierPath of the text that can
# be manipulated or, as demonstrated here, queried using path.contains.
# A grid is generated and everywhere a point in the path is encountered,
# a random square is drawn.
background(0.8, 0.7, 0)
fill(0.1, 0.1, 0.2)
# Set the font and create the text path.
font("Verdana", 100)
align(CENTER)
tp = textpath("NodeBox", 0, 100, width=WIDTH)
#tp.draw() # Draws the underlying path
# Here are the variables that influence the composition:
resx = 100 # The horizontal resolution
resy = 100 # The vertical resolution
rx = 5.0 # The horizontal randomness each point has
ry = 5.0 # The vertical randomness each point has
dotsize = 6.0 # The maximum size of one dot.
dx = WIDTH / float(resx) # The width each dot covers
dy = HEIGHT / float(resy) # The height each dot covers
# We create a grid of the specified resolution.
# Each x,y coordinate is a measuring point where
# we check if it falls within the path.
for x, y in grid(resx, resy):
sz = random(dotsize)
# Create the point that will be checked
px = x*dx-sz
py = y*dy-sz
# Only do something if the point falls within the path bounds.
# You could add an "else" statement, that draws something in the
# empty positions.
if tp.contains(px, py):
# Change the color for each point -- try it out!
# fill(0, 0, random(), random())
oval(px+random(-rx, rx),
py+random(-ry, ry),
sz, sz)
|
# -*- coding: utf-8 -*-
"""
This file contains the Qudi hardware dummy for fast counting devices.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
import time
import os
import numpy as np
from core.base import Base
from interface.fast_counter_interface import FastCounterInterface
class InterfaceImplementationError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class FastCounterDummy(Base, FastCounterInterface):
"""This is the Interface class to define the controls for the simple
microwave hardware.
"""
_modclass = 'fastcounterinterface'
_modtype = 'hardware'
def __init__(self, config, **kwargs):
super().__init__(config=config, **kwargs)
self.log.info('The following configuration was found.')
# checking for the right configuration
for key in config.keys():
self.log.info('{0}: {1}'.format(key,config[key]))
if 'gated' in config.keys():
self._gated = config['gated']
else:
self._gated = False
self.log.warning('No parameter "gated" was specified in the '
'config. The default configuration gated={0} will be '
'taken instead.'.format(self._gated))
if 'load_trace' in config.keys():
self.trace_path = config['load_trace']
else:
self.trace_path = os.path.join(
self.get_main_dir(),
'tools',
'FastComTec_demo_timetrace.asc')
def on_activate(self):
""" Initialisation performed during activation of the module.
"""
self.statusvar = 0
self._binwidth = 1
self._gate_length_bins = 8192
return
def on_deactivate(self):
""" Deinitialisation performed during deactivation of the module.
"""
self.statusvar = -1
return
def get_constraints(self):
""" Retrieve the hardware constrains from the Fast counting device.
@return dict: dict with keys being the constraint names as string and
items are the definition for the constaints.
The keys of the returned dictionary are the str name for the constraints
(which are set in this method).
NO OTHER KEYS SHOULD BE INVENTED!
If you are not sure about the meaning, look in other hardware files to
get an impression. If still additional constraints are needed, then they
have to be added to all files containing this interface.
The items of the keys are again dictionaries which have the generic
dictionary form:
{'min': <value>,
'max': <value>,
'step': <value>,
'unit': '<value>'}
Only the key 'hardware_binwidth_list' differs, since they
contain the list of possible binwidths.
If the constraints cannot be set in the fast counting hardware then
write just zero to each key of the generic dicts.
Note that there is a difference between float input (0.0) and
integer input (0), because some logic modules might rely on that
distinction.
ALL THE PRESENT KEYS OF THE CONSTRAINTS DICT MUST BE ASSIGNED!
"""
constraints = dict()
# the unit of those entries are seconds per bin. In order to get the
# current binwidth in seonds use the get_binwidth method.
constraints['hardware_binwidth_list'] = [1/950e6, 2/950e6, 4/950e6, 8/950e6]
return constraints
def configure(self, bin_width_s, record_length_s, number_of_gates = 0):
""" Configuration of the fast counter.
@param float bin_width_s: Length of a single time bin in the time trace
histogram in seconds.
@param float record_length_s: Total length of the timetrace/each single
gate in seconds.
@param int number_of_gates: optional, number of gates in the pulse
sequence. Ignore for not gated counter.
@return tuple(binwidth_s, gate_length_s, number_of_gates):
binwidth_s: float the actual set binwidth in seconds
gate_length_s: the actual set gate length in seconds
number_of_gates: the number of gated, which are accepted
"""
self._binwidth = int(np.rint(bin_width_s * 1e9 * 950 / 1000))
self._gate_length_bins = int(np.rint(record_length_s / bin_width_s))
actual_binwidth = self._binwidth * 1000 / 950e9
actual_length = self._gate_length_bins * actual_binwidth
self.statusvar = 1
return actual_binwidth, actual_length, number_of_gates
def get_status(self):
""" Receives the current status of the Fast Counter and outputs it as
return value.
0 = unconfigured
1 = idle
2 = running
3 = paused
-1 = error state
"""
return self.statusvar
def start_measure(self):
time.sleep(1)
self.statusvar = 2
try:
self._count_data = np.loadtxt(self.trace_path)
except:
return -1
return 0
def pause_measure(self):
""" Pauses the current measurement.
Fast counter must be initially in the run state to make it pause.
"""
time.sleep(1)
self.statusvar = 3
return 0
def stop_measure(self):
""" Stop the fast counter. """
time.sleep(1)
self.statusvar = 1
return 0
def continue_measure(self):
""" Continues the current measurement.
If fast counter is in pause state, then fast counter will be continued.
"""
self.statusvar = 2
return 0
def is_gated(self):
""" Check the gated counting possibility.
@return bool: Boolean value indicates if the fast counter is a gated
counter (TRUE) or not (FALSE).
"""
return self._gated
def get_binwidth(self):
""" Returns the width of a single timebin in the timetrace in seconds.
@return float: current length of a single bin in seconds (seconds/bin)
"""
width_in_seconds = self._binwidth * 1/950e6
return width_in_seconds
def get_data_trace(self):
""" Polls the current timetrace data from the fast counter.
Return value is a numpy array (dtype = int64).
The binning, specified by calling configure() in forehand, must be
taken care of in this hardware class. A possible overflow of the
histogram bins must be caught here and taken care of.
If the counter is NOT GATED it will return a 1D-numpy-array with
returnarray[timebin_index]
If the counter is GATED it will return a 2D-numpy-array with
returnarray[gate_index, timebin_index]
"""
# include an artificial waiting time
time.sleep(0.5)
return self._count_data
def get_frequency(self):
freq = 950.
time.sleep(0.5)
return freq
|
# encoding: utf-8
# TODO: make abstract class for all models/managers
# to prevent code coping of common methods (for example _predict method)
from PyQt4.QtCore import *
import copy
import numpy as np
from processing.molusce.algorithms.dataprovider import Raster, ProviderError
from processing.molusce.algorithms.models.mlp.model import MLP, sigmoid
from processing.molusce.algorithms.models.sampler.sampler import Sampler
from processing.molusce.algorithms.models.correlation.model import DependenceCoef
class MlpManagerError(Exception):
'''Base class for exceptions in this module.'''
def __init__(self, msg):
self.msg = msg
class MlpManager(QObject):
'''This class gets the data extracted from the UI and
pass it to multi-layer perceptron, then gets and stores the result.
'''
updateGraph = pyqtSignal(float, float) # Train error, val. error
updateMinValErr = pyqtSignal(float) # Min validation error
updateDeltaRMS = pyqtSignal(float) # Delta of RMS: min(valError) - currentValError
updateKappa = pyqtSignal(float) # Kappa value
processFinished = pyqtSignal()
processInterrupted = pyqtSignal()
logMessage = pyqtSignal(str)
errorReport = pyqtSignal(str)
rangeChanged = pyqtSignal(str, int)
updateProgress = pyqtSignal()
def __init__(self, ns=0, MLP=None):
QObject.__init__(self)
self.MLP = MLP
self.interrupted = False
self.layers = None
if self.MLP:
self.layers = self.getMlpTopology()
self.ns = ns # Neighbourhood size of training rasters.
self.data = None # Training data
self.catlist = None # List of unique output values of the output raster
self.train_error = None # Error on training set
self.val_error = None # Error on validation set
self.minValError = None # The minimum error that is achieved on the validation set
self.valKappa = 0 # Kappa on on the validation set
self.sampler = None # Sampler
# Results of the MLP prediction
self.prediction = None # Raster of the MLP prediction results
self.confidence = None # Raster of the MLP results confidence (1 = the maximum confidence, 0 = the least confidence)
self.transitionPotentials = None # Dictionary of transition potencial maps: {category1: map1, category2: map2, ...}
# Outputs of the activation function for small and big numbers
self.sigmax, self.sigmin = sigmoid(100), sigmoid(-100) # Max and Min of the sigmoid function
self.sigrange = self.sigmax - self.sigmin # Range of the sigmoid
def computeMlpError(self, sample):
'''Get MLP error on the sample'''
input = np.hstack( (sample['state'], sample['factors']) )
out = self.getOutput( input )
err = ((sample['output'] - out)**2).sum()/len(out)
return err
def computePerformance(self, train_indexes, val_ind):
'''Check errors of training and validation sets
@param train_indexes Tuple that contains indexes of the first and last elements of the training set.
@param val_ind Tuple that contains indexes of the first and last elements of the validation set.
'''
train_error = 0
train_sampl = train_indexes[1] - train_indexes[0] # Count of training samples
for i in range(train_indexes[0], train_indexes[1]):
train_error = train_error + self.computeMlpError(sample = self.data[i])
self.setTrainError(train_error/train_sampl)
if val_ind:
val_error = 0
val_sampl = val_ind[1] - val_ind[0]
answers = np.ma.zeros(val_sampl)
out = np.ma.zeros(val_sampl)
for i in xrange(val_ind[0], val_ind[1]):
sample = self.data[i]
val_error = val_error + self.computeMlpError(sample = self.data[i])
input = np.hstack( (sample['state'],sample['factors']) )
output = self.getOutput(input)
out[i-val_ind[0]] = self.outCategory(output)
answers[i-val_ind[0]] = self.outCategory(sample['output'])
self.setValError(val_error/val_sampl)
depCoef = DependenceCoef(out, answers, expand=True)
self.valKappa = depCoef.kappa(mode=None)
def copyWeights(self):
'''Deep copy of the MLP weights'''
return copy.deepcopy(self.MLP.weights)
def createMlp(self, state, factors, output, hidden_layers):
'''
@param state Raster of the current state (categories) values.
@param factors List of the factor rasters (predicting variables).
@param hidden_layers List of neuron counts in hidden layers.
@param ns Neighbourhood size.
'''
if output.getBandsCount() != 1:
raise MlpManagerError('Output layer must have one band!')
input_neurons = 0
for raster in factors:
input_neurons = input_neurons+ raster.getNeighbourhoodSize(self.ns)
# state raster contains categories. We need use n-1 dummy variables (where n = number of categories)
input_neurons = input_neurons + (len(state.getBandGradation(1))-1) * state.getNeighbourhoodSize(self.ns)
# Output category's (neuron) list and count
self.catlist = output.getBandGradation(1)
categories = len(self.catlist)
# set neuron counts in the MLP layers
self.layers = hidden_layers
self.layers.insert(0, input_neurons)
self.layers.append(categories)
self.MLP = MLP(*self.layers)
def getConfidence(self):
return self.confidence
def getInputVectLen(self):
'''Length of input data vector of the MLP'''
shape = self.getMlpTopology()
return shape[0]
def getOutput(self, input_vector):
out = self.MLP.propagate_forward( input_vector )
return out
def getOutputVectLen(self):
'''Length of input data vector of the MLP'''
shape = self.getMlpTopology()
return shape[-1]
def getOutputVector(self, val):
'''Convert a number val into vector,
for example, let self.catlist = [1, 3, 4] then
if val = 1, result = [ 1, -1, -1]
if val = 3, result = [-1, 1, -1]
if val = 4, result = [-1, -1, 1]
where -1 is minimum of the sigmoid, 1 is max of the sigmoid
'''
size = self.getOutputVectLen()
res = np.ones(size) * (self.sigmin)
ind = np.where(self.catlist==val)
res[ind] = self.sigmax
return res
def getMinValError(self):
return self.minValError
def getMlpTopology(self):
return self.MLP.shape
def getKappa(self):
return self.valKappa
def getPrediction(self, state, factors, calcTransitions=False):
self._predict(state, factors, calcTransitions)
return self.prediction
def getTrainError(self):
return self.train_error
def getTransitionPotentials(self):
return self.transitionPotentials
def getValError(self):
return self.val_error
def outCategory(self, out_vector):
# Get index of the biggest output value as the result
biggest = max(out_vector)
res = list(out_vector).index(biggest)
res = self.catlist[res]
return res
def outputConfidence(self, output, scale=True):
'''
Return confidence (difference between 2 biggest values) of the MLP output.
@param output: The confidence
@param scale: If True, then scale the confidence to int [0, 1, ..., 100] percent
'''
out_scl = self.scaleOutput(output, percent=scale)
out_scl.sort()
return out_scl[-1] - out_scl[-2]
def outputTransitions(self, output, scale=True):
'''
Return transition potencial of the outputs scaled to [0,1] or 1-100
@param output: The output of MLP
@param scale: If True, then scale the transitions to int ([0, 1, ..., 100]) percent
'''
out_scl = self.scaleOutput(output, percent=scale)
result = {}
for r, v in enumerate(out_scl):
cat = self.catlist[r]
result[cat] = v
return result
def scaleOutput(self, output, percent=True):
'''
Scale the output to range [0,1] or 1-100
@param output: Output of a MLP
@param percent: If True, then scale the output to int [0, 1, ..., 100] percent
'''
res = 1.0 * (output - self.sigmin) / self.sigrange
if percent:
res = [ int(100 * x) for x in res]
return res
def _predict(self, state, factors, calcTransitions=False):
'''
Calculate output and confidence rasters using MLP model and input rasters
@param state Raster of the current state (categories) values.
@param factors List of the factor rasters (predicting variables).
'''
try:
self.rangeChanged.emit(self.tr("Initialize model %p%"), 1)
geodata = state.getGeodata()
rows, cols = geodata['ySize'], geodata['xSize']
for r in factors:
if not state.geoDataMatch(r):
raise MlpManagerError('Geometries of the input rasters are different!')
self.transitionPotentials = None # Reset tr.potentials if they exist
# Normalize factors before prediction:
for f in factors:
f.normalize(mode = 'mean')
predicted_band = np.zeros([rows, cols], dtype=np.uint8)
confidence_band = np.zeros([rows, cols], dtype=np.uint8)
if calcTransitions:
self.transitionPotentials = {}
for cat in self.catlist:
self.transitionPotentials[cat] = np.zeros([rows, cols], dtype=np.uint8)
self.sampler = Sampler(state, factors, ns=self.ns)
mask = state.getBand(1).mask.copy()
if mask.shape == ():
mask = np.zeros([rows, cols], dtype=np.bool)
self.updateProgress.emit()
self.rangeChanged.emit(self.tr("Prediction %p%"), rows)
for i in xrange(rows):
for j in xrange(cols):
if not mask[i,j]:
input = self.sampler.get_inputs(state, i,j)
if input != None:
out = self.getOutput(input)
res = self.outCategory(out)
predicted_band[i, j] = res
confidence = self.outputConfidence(out)
confidence_band[i, j] = confidence
if calcTransitions:
potentials = self.outputTransitions(out)
for cat in self.catlist:
map = self.transitionPotentials[cat]
map[i, j] = potentials[cat]
else: # Input sample is incomplete => mask this pixel
mask[i, j] = True
self.updateProgress.emit()
predicted_bands = [np.ma.array(data = predicted_band, mask = mask, dtype=np.uint8)]
confidence_bands = [np.ma.array(data = confidence_band, mask = mask, dtype=np.uint8)]
self.prediction = Raster()
self.prediction.create(predicted_bands, geodata)
self.confidence = Raster()
self.confidence.create(confidence_bands, geodata)
if calcTransitions:
for cat in self.catlist:
band = [np.ma.array(data=self.transitionPotentials[cat], mask=mask, dtype=np.uint8)]
self.transitionPotentials[cat] = Raster()
self.transitionPotentials[cat].create(band, geodata)
except MemoryError:
self.errorReport.emit(self.tr("The system out of memory during ANN prediction"))
raise
except:
self.errorReport.emit(self.tr("An unknown error occurs during ANN prediction"))
raise
def readMlp(self):
pass
def resetErrors(self):
self.val_error = np.finfo(np.float).max
self.train_error = np.finfo(np.float).max
def resetMlp(self):
self.MLP.reset()
self.resetErrors()
def saveMlp(self):
pass
def saveSamples(self, fileName):
self.sampler.saveSamples(fileName)
def setMlpWeights(self, w):
'''Set weights of the MLP'''
self.MLP.weights = w
def setTrainingData(self, state, factors, output, shuffle=True, mode='All', samples=None):
'''
@param state Raster of the current state (categories) values.
@param factors List of the factor rasters (predicting variables).
@param output Raster that contains categories to predict.
@param shuffle Perform random shuffle.
@param mode Type of sampling method:
All Get all pixels
Random Get samples. Count of samples in the data=samples.
Stratified Undersampling of major categories and/or oversampling of minor categories.
@samples Sample count of the training data (doesn't used in 'All' mode).
'''
if not self.MLP:
raise MlpManagerError('You must create a MLP before!')
# Normalize factors before sampling:
for f in factors:
f.normalize(mode = 'mean')
self.sampler = Sampler(state, factors, output, self.ns)
self.sampler.setTrainingData(state=state, output=output, shuffle=shuffle, mode=mode, samples=samples)
outputVecLen = self.getOutputVectLen()
stateVecLen = self.sampler.stateVecLen
factorVectLen = self.sampler.factorVectLen
size = len(self.sampler.data)
self.data = np.zeros(size, dtype=[('coords', float, 2), ('state', float, stateVecLen), ('factors', float, factorVectLen), ('output', float, outputVecLen)])
self.data['coords'] = self.sampler.data['coords']
self.data['state'] = self.sampler.data['state']
self.data['factors'] = self.sampler.data['factors']
self.data['output'] = [self.getOutputVector(sample['output']) for sample in self.sampler.data]
def setTrainError(self, error):
self.train_error = error
def setValError(self, error):
self.val_error = error
def setEpochs(self, epochs):
self.epochs = epochs
def setValPercent(self, value=20):
self.valPercent = value
def setLRate(self, value=0.1):
self.lrate = value
def setMomentum(self, value=0.01):
self.momentum = value
def setContinueTrain(self, value=False):
self.continueTrain = value
def startTrain(self):
self.train(self.epochs, self.valPercent, self.lrate, self.momentum, self.continueTrain)
def stopTrain(self):
self.interrupted = True
def train(self, epochs, valPercent=20, lrate=0.1, momentum=0.01, continue_train=False):
'''Perform the training procedure on the MLP and save the best neural net
@param epoch Max iteration count.
@param valPercent Percent of the validation set.
@param lrate Learning rate.
@param momentum Learning momentum.
@param continue_train If False then it is new training cycle, reset weights training and validation error. If True, then continue training.
'''
try:
samples_count = len(self.data)
val_sampl_count = samples_count*valPercent/100
apply_validation = True if val_sampl_count>0 else False # Use or not use validation set
train_sampl_count = samples_count - val_sampl_count
# Set first train_sampl_count as training set, the other as validation set
train_indexes = (0, train_sampl_count)
val_indexes = (train_sampl_count, samples_count) if apply_validation else None
if not continue_train: self.resetMlp()
self.minValError = self.getValError() # The minimum error that is achieved on the validation set
last_train_err = self.getTrainError()
best_weights = self.copyWeights() # The MLP weights when minimum error that is achieved on the validation set
self.rangeChanged.emit(self.tr("Train model %p%"), epochs)
for epoch in range(epochs):
self.trainEpoch(train_indexes, lrate, momentum)
self.computePerformance(train_indexes, val_indexes)
self.updateGraph.emit(self.getTrainError(), self.getValError())
self.updateDeltaRMS.emit(self.getMinValError() - self.getValError())
self.updateKappa.emit(self.getKappa())
QCoreApplication.processEvents()
if self.interrupted:
self.processInterrupted.emit()
break
last_train_err = self.getTrainError()
self.setTrainError(last_train_err)
if apply_validation and (self.getValError() < self.getMinValError()):
self.minValError = self.getValError()
best_weights = self.copyWeights()
self.updateMinValErr.emit(self.getMinValError())
self.updateProgress.emit()
self.setMlpWeights(best_weights)
except MemoryError:
self.errorReport.emit(self.tr("The system out of memory during ANN training"))
raise
except:
self.errorReport.emit(self.tr("An unknown error occurs during ANN trainig"))
raise
finally:
self.processFinished.emit()
def trainEpoch(self, train_indexes, lrate=0.1, momentum=0.01):
'''Perform a training epoch on the MLP
@param train_ind Tuple of the min&max indexes of training samples in the samples data.
@param val_ind Tuple of the min&max indexes of validation samples in the samples data.
@param lrate Learning rate.
@param momentum Learning momentum.
'''
train_sampl = train_indexes[1] - train_indexes[0]
for i in range(train_sampl):
n = np.random.randint( *train_indexes )
sample = self.data[n]
input = np.hstack( (sample['state'],sample['factors']) )
self.getOutput( input ) # Forward propagation
self.MLP.propagate_backward( sample['output'], lrate, momentum )
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Apr 25, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Apr 25, 2012"
import random
from pymatgen.core.lattice import Lattice
from pymatgen.util.coord import *
from pymatgen.util.testing import PymatgenTest
class CoordUtilsTest(PymatgenTest):
def test_get_linear_interpolated_value(self):
xvals = [0, 1, 2, 3, 4, 5]
yvals = [3, 6, 7, 8, 10, 12]
self.assertEqual(get_linear_interpolated_value(xvals, yvals, 3.6), 9.2)
self.assertRaises(ValueError, get_linear_interpolated_value, xvals,
yvals, 6)
def test_in_coord_list(self):
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
test_coord = [0.1, 0.1, 0.1]
self.assertFalse(in_coord_list(coords, test_coord))
self.assertTrue(in_coord_list(coords, test_coord, atol=0.15))
self.assertFalse(in_coord_list([0.99, 0.99, 0.99], test_coord,
atol=0.15))
def test_is_coord_subset(self):
c1 = [0,0,0]
c2 = [0,1.2,-1]
c3 = [3,2,1]
c4 = [3-9e-9, 2-9e-9, 1-9e-9]
self.assertTrue(is_coord_subset([c1, c2, c3], [c1, c4, c2]))
self.assertTrue(is_coord_subset([c1], [c2, c1]))
self.assertTrue(is_coord_subset([c1, c2], [c2, c1]))
self.assertFalse(is_coord_subset([c1, c2], [c2, c3]))
self.assertFalse(is_coord_subset([c1, c2], [c2]))
def test_coord_list_mapping(self):
c1 = [0,.124,0]
c2 = [0,1.2,-1]
c3 = [3,2,1]
a = np.array([c1, c2])
b = np.array([c3, c2, c1])
inds = coord_list_mapping(a, b)
self.assertTrue(np.allclose(a, b[inds]))
self.assertRaises(Exception, coord_list_mapping, [c1,c2], [c2,c3])
self.assertRaises(Exception, coord_list_mapping, [c2], [c2,c2])
def test_coord_list_mapping_pbc(self):
c1 = [0.1, 0.2, 0.3]
c2 = [0.2, 0.3, 0.3]
c3 = [0.5, 0.3, 0.6]
c4 = [1.5, -0.7, -1.4]
a = np.array([c1, c3, c2])
b = np.array([c4, c2, c1])
inds = coord_list_mapping_pbc(a, b)
diff = a - b[inds]
diff -= np.round(diff)
self.assertTrue(np.allclose(diff, 0))
self.assertRaises(Exception, coord_list_mapping_pbc, [c1,c2], [c2,c3])
self.assertRaises(Exception, coord_list_mapping_pbc, [c2], [c2,c2])
def test_find_in_coord_list(self):
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
test_coord = [0.1, 0.1, 0.1]
self.assertFalse(find_in_coord_list(coords, test_coord))
self.assertEqual(find_in_coord_list(coords, test_coord, atol=0.15)[0],
0)
self.assertFalse(find_in_coord_list([0.99, 0.99, 0.99], test_coord,
atol=0.15))
coords = [[0, 0, 0], [0.5, 0.5, 0.5], [0.1, 0.1, 0.1]]
self.assertArrayEqual(find_in_coord_list(coords, test_coord,
atol=0.15), [0, 2])
def test_all_distances(self):
coords1 = [[0, 0, 0], [0.5, 0.5, 0.5]]
coords2 = [[1, 2, -1], [1, 0, 0], [1, 0, 0]]
result = [[2.44948974, 1, 1], [2.17944947, 0.8660254, 0.8660254]]
self.assertArrayAlmostEqual(all_distances(coords1, coords2), result, 4)
def test_pbc_diff(self):
self.assertArrayAlmostEqual(pbc_diff([0.1, 0.1, 0.1], [0.3, 0.5, 0.9]),
[-0.2, -0.4, 0.2])
self.assertArrayAlmostEqual(pbc_diff([0.9, 0.1, 1.01],
[0.3, 0.5, 0.9]),
[-0.4, -0.4, 0.11])
self.assertArrayAlmostEqual(pbc_diff([0.1, 0.6, 1.01],
[0.6, 0.1, 0.9]),
[-0.5, 0.5, 0.11])
self.assertArrayAlmostEqual(pbc_diff([100.1, 0.2, 0.3],
[0123123.4, 0.5, 502312.6]),
[-0.3, -0.3, -0.3])
def test_in_coord_list_pbc(self):
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
test_coord = [0.1, 0.1, 0.1]
self.assertFalse(in_coord_list_pbc(coords, test_coord))
self.assertTrue(in_coord_list_pbc(coords, test_coord, atol=0.15))
test_coord = [0.99, 0.99, 0.99]
self.assertFalse(in_coord_list_pbc(coords, test_coord, atol=0.01))
def test_find_in_coord_list_pbc(self):
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
test_coord = [0.1, 0.1, 0.1]
self.assertFalse(find_in_coord_list_pbc(coords, test_coord))
self.assertEqual(find_in_coord_list_pbc(coords, test_coord,
atol=0.15)[0], 0)
test_coord = [0.99, 0.99, 0.99]
self.assertEqual(
find_in_coord_list_pbc(coords, test_coord, atol=0.02)[0], 0)
test_coord = [-0.499, -0.499, -0.499]
self.assertEqual(
find_in_coord_list_pbc(coords, test_coord, atol=0.01)[0], 1)
def test_is_coord_subset_pbc(self):
c1 = [0, 0, 0]
c2 = [0, 1.2, -1]
c3 = [2.3, 0, 1]
c4 = [1.3-9e-9, -1-9e-9, 1-9e-9]
self.assertTrue(is_coord_subset_pbc([c1, c2, c3], [c1, c4, c2]))
self.assertTrue(is_coord_subset_pbc([c1], [c2, c1]))
self.assertTrue(is_coord_subset_pbc([c1, c2], [c2, c1]))
self.assertFalse(is_coord_subset_pbc([c1, c2], [c2, c3]))
self.assertFalse(is_coord_subset_pbc([c1, c2], [c2]))
# test tolerances
c5 = [0.1, 0.1, 0.2]
atol1 = [0.25, 0.15, 0.15]
atol2 = [0.15, 0.15, 0.25]
self.assertFalse(is_coord_subset_pbc([c1], [c5], atol1))
self.assertTrue(is_coord_subset_pbc([c1], [c5], atol2))
# test mask
mask1 = [[True]]
self.assertFalse(is_coord_subset_pbc([c1], [c5], atol2, mask1))
mask2 = [[True, False]]
self.assertTrue(is_coord_subset_pbc([c1], [c2, c1], mask=mask2))
self.assertFalse(is_coord_subset_pbc([c1], [c1, c2], mask=mask2))
mask3 = [[False, True]]
self.assertFalse(is_coord_subset_pbc([c1], [c2, c1], mask=mask3))
self.assertTrue(is_coord_subset_pbc([c1], [c1, c2], mask=mask3))
def test_lattice_points_in_supercell(self):
supercell = np.array([[1, 3, 5], [-3, 2, 3], [-5, 3, 1]])
points = lattice_points_in_supercell(supercell)
self.assertAlmostEqual(len(points), abs(np.linalg.det(supercell)))
self.assertGreaterEqual(np.min(points), -1e-10)
self.assertLessEqual(np.max(points), 1-1e-10)
supercell = np.array([[-5, -5, -3], [0, -4, -2], [0, -5, -2]])
points = lattice_points_in_supercell(supercell)
self.assertAlmostEqual(len(points), abs(np.linalg.det(supercell)))
self.assertGreaterEqual(np.min(points), -1e-10)
self.assertLessEqual(np.max(points), 1-1e-10)
def test_barycentric(self):
#2d test
simplex1 = np.array([[0.3, 0.1], [0.2, -1.2], [1.3, 2.3]])
pts1 = np.array([[0.6, 0.1], [1.3, 2.3], [0.5, 0.5], [.7, 1]])
output1 = barycentric_coords(pts1, simplex1)
#do back conversion to cartesian
o_dot_s = np.sum(output1[:, :, None] * simplex1[None, :, :], axis=1)
self.assertTrue(np.allclose(pts1, o_dot_s))
#do 3d tests
simplex2 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 0]])
pts2 = np.array([[0, 0, 1], [0, 0.5, 0.5], [1./3, 1./3, 1./3]])
output2 = barycentric_coords(pts2, simplex2)
self.assertTrue(np.allclose(output2[1], [0.5, 0.5, 0, 0]))
#do back conversion to cartesian
o_dot_s = np.sum(output2[:, :, None] * simplex2[None, :, :], axis=1)
self.assertTrue(np.allclose(pts2, o_dot_s))
#test single point
self.assertTrue(np.allclose(output2[2],
barycentric_coords(pts2[2], simplex2)))
def test_pbc_shortest_vectors(self):
fcoords = np.array([[0.3, 0.3, 0.5],
[0.1, 0.1, 0.3],
[0.9, 0.9, 0.8],
[0.1, 0.0, 0.5],
[0.9, 0.7, 0.0]])
lattice = Lattice.from_lengths_and_angles([8, 8, 4],
[90, 76, 58])
expected = np.array([[0.000, 3.015, 4.072, 3.519, 3.245],
[3.015, 0.000, 3.207, 1.131, 4.453],
[4.072, 3.207, 0.000, 2.251, 1.788],
[3.519, 1.131, 2.251, 0.000, 3.852]])
vectors = pbc_shortest_vectors(lattice, fcoords[:-1], fcoords)
dists = np.sum(vectors**2, axis = -1)**0.5
self.assertArrayAlmostEqual(dists, expected, 3)
#now try with small loop threshold
from pymatgen.util import coord
prev_threshold = coord.LOOP_THRESHOLD
coord.LOOP_THRESHOLD = 0
vectors = pbc_shortest_vectors(lattice, fcoords[:-1], fcoords)
dists = np.sum(vectors**2, axis = -1)**0.5
self.assertArrayAlmostEqual(dists, expected, 3)
coord.LOOP_THRESHOLD = prev_threshold
def test_get_angle(self):
v1 = (1, 0, 0)
v2 = (1, 1, 1)
self.assertAlmostEqual(get_angle(v1, v2), 54.7356103172)
self.assertAlmostEqual(get_angle(v1, v2, units="radians"),
0.9553166181245092)
class SimplexTest(PymatgenTest):
def setUp(self):
coords = []
coords.append([0, 0, 0])
coords.append([0, 1, 0])
coords.append([0, 0, 1])
coords.append([1, 0, 0])
self.simplex = Simplex(coords)
def test_equal(self):
c2 = list(self.simplex.coords)
random.shuffle(c2)
self.assertEqual(Simplex(c2), self.simplex)
def test_in_simplex(self):
self.assertTrue(self.simplex.in_simplex([0.1, 0.1, 0.1]))
self.assertFalse(self.simplex.in_simplex([0.6, 0.6, 0.6]))
for i in range(10):
coord = np.random.random_sample(size=3) / 3
self.assertTrue(self.simplex.in_simplex(coord))
def test_2dtriangle(self):
s = Simplex([[0, 1], [1, 1], [1, 0]])
self.assertArrayAlmostEqual(s.bary_coords([0.5, 0.5]),
[0.5, 0, 0.5])
self.assertArrayAlmostEqual(s.bary_coords([0.5, 1]), [0.5, 0.5, 0])
self.assertArrayAlmostEqual(s.bary_coords([0.5, 0.75]), [0.5, 0.25, 0.25])
self.assertArrayAlmostEqual(s.bary_coords([0.75, 0.75]), [0.25, 0.5, 0.25])
s = Simplex([[1, 1], [1, 0]])
self.assertRaises(ValueError, s.bary_coords, [0.5, 0.5])
def test_volume(self):
# Should be value of a right tetrahedron.
self.assertAlmostEqual(self.simplex.volume, 1/6)
def test_str(self):
self.assertTrue(str(self.simplex).startswith("3-simplex in 4D space"))
self.assertTrue(repr(self.simplex).startswith("3-simplex in 4D space"))
def test_bary_coords(self):
s = Simplex([[0, 2], [3, 1], [1, 0]])
point = [0.7, 0.5]
bc = s.bary_coords(point)
self.assertArrayAlmostEqual(bc, [0.26, -0.02, 0.76])
new_point = s.point_from_bary_coords(bc)
self.assertArrayAlmostEqual(point, new_point)
def test_intersection(self):
# simple test, with 2 intersections at faces
s = Simplex([[0, 2], [3, 1], [1, 0]])
point1 = [0.7, 0.5]
point2 = [0.5, 0.7]
intersections = s.line_intersection(point1, point2)
expected = np.array([[1.13333333, 0.06666667],
[ 0.8, 0.4]])
self.assertArrayAlmostEqual(intersections, expected)
# intersection through point and face
point1 = [0, 2] # simplex point
point2 = [1, 1] # inside simplex
expected = np.array([[1.66666667, 0.33333333],
[0, 2]])
intersections = s.line_intersection(point1, point2)
self.assertArrayAlmostEqual(intersections, expected)
# intersection through point only
point1 = [0, 2] # simplex point
point2 = [0.5, 0.7]
expected = np.array([[0, 2]])
intersections = s.line_intersection(point1, point2)
self.assertArrayAlmostEqual(intersections, expected)
# 3d intersection through edge and face
point1 = [0.5, 0, 0] # edge point
point2 = [0.5, 0.5, 0.5] # in simplex
expected = np.array([[ 0.5, 0.25, 0.25],
[ 0.5, 0. , 0. ]])
intersections = self.simplex.line_intersection(point1, point2)
self.assertArrayAlmostEqual(intersections, expected)
# 3d intersection through edge only
point1 = [0.5, 0, 0] # edge point
point2 = [0.5, 0.5, -0.5] # outside simplex
expected = np.array([[0.5, 0., 0.]])
intersections = self.simplex.line_intersection(point1, point2)
self.assertArrayAlmostEqual(intersections, expected)
# coplanar to face (no intersection)
point1 = [-1, 2]
point2 = [0, 0]
expected = np.array([])
intersections = s.line_intersection(point1, point2)
self.assertArrayAlmostEqual(intersections, expected)
# coplanar to face (with intersection line)
point1 = [0, 2] # simplex point
point2 = [1, 0]
expected = np.array([[1, 0],
[0, 2]])
intersections = s.line_intersection(point1, point2)
self.assertArrayAlmostEqual(intersections, expected)
# coplanar to face (with intersection points)
point1 = [0.1, 2]
point2 = [1.1, 0]
expected = np.array([[1.08, 0.04],
[0.12, 1.96]])
intersections = s.line_intersection(point1, point2)
self.assertArrayAlmostEqual(intersections, expected)
if __name__ == "__main__":
import unittest
unittest.main()
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import print_function # In case we're running with python2
import argparse
import os
import requests
import re
import sys
import pystache
import random
import shutil
FORMATIONS = [
([u'ailier droit', u'ailier gauche', u'arrière droit',
u'arrière gauche', u'attaquant', u'attaquant',
u'défenseur central', u'défenseur central',
u'gardien', u'milieu de terrain', u'milieu de terrain'], 442),
([u'arrière droit', u'arrière gauche', u'attaquant', u'attaquant',
u'attaquant', u'défenseur central', u'défenseur central', u'gardien',
u'milieu de terrain', u'milieu de terrain', u'milieu de terrain'], 433),
([u'ailier droit', u'ailier gauche', u'arrière droit', u'arrière gauche',
u'attaquant', u'défenseur central', u'défenseur central', u'gardien',
u'milieu de terrain', u'milieu de terrain', u'milieu de terrain'], 451),
([u'arrière droit', u'arrière gauche', u'attaquant', u'défenseur central',
u'défenseur central', u'défenseur central', u'gardien',
u'milieu de terrain', u'milieu de terrain', u'milieu droit',
u'milieu gauche'], 541)
]
class Sheet():
def __init__(self, key):
self.__endpoint = 'https://spreadsheets.google.com'
self.__key = key
self.__data = list()
try:
path = '/feeds/worksheets/{key}/public/basic?alt=json'.format(
key=key)
for entry in self.__requestData(path)['feed']['entry']:
if entry['title']['$t'] != 'Feuille 1':
continue
path = '/feeds/list/{key}/{sheetId}/public/values?alt=json'\
.format(key=key,
sheetId=entry['link'][len(entry['link']) - 1]
['href'].split('/').pop())
self.__setData(self.__formatData([
{key[4:]: value['$t']
for key, value in entry.items()
if key[:4] == 'gsx$'}
for entry in self.__requestData(path)['feed']['entry']]))
except requests.exceptions.RequestException as e:
print(e, file=sys.stderr)
sys.exit(1)
def __requestData(self, path):
r = requests.get(self.__endpoint + path)
if r.status_code == 200:
return r.json()
raise requests.exceptions.RequestException(
"Seems we can't find {0}".format(self.__key))
def __setData(self, data):
self.__data = data
def __formatData(self, data):
def getOrFalse(d, k):
return len(d[k]) > 0 and dict(value=d[k].encode('utf-8')) or False
def addNBSPs(s):
for char in ['?', ':', '!']:
s = s.replace(' {0}'.format(char), ' {0}'.format(char))
return s
return [dict(
id=int(d['id']),
firstname=d['prenom'].encode('utf-8'),
lastname=d['nom'].encode('utf-8'),
place=d['poste'].encode('utf-8'),
team=d['equipe'].encode('utf-8'),
description=addNBSPs(d['description']).encode('utf-8'),
picture=d['photo'].encode('utf-8')
) for d in data]
def getData(self):
return self.__data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('key', metavar='key', type=str)
parser.add_argument('--dest', type=str)
parser.add_argument('--src', type=str)
args = parser.parse_args()
srcDir = os.path.dirname(os.path.realpath(__file__))
destDir = os.path.join(srcDir, 'dist')
srcDir = os.path.join(srcDir, 'src')
if args.src is not None:
srcDir = os.path.realpath(args.src)
if args.dest is not None:
destDir = os.path.realpath(args.dest)
if not os.path.isdir(destDir):
os.mkdir(destDir)
print('Writing {0}...'.format(os.path.join(destDir, 'index.html')))
with open(os.path.join(destDir, 'index.html'), 'w') as f:
with open(os.path.join(srcDir, 'template.html'), 'r') as template:
data = Sheet(args.key).getData()
formation = 442
places = sorted([d['place'].decode('utf-8').lower() for d in data])
for FORMATION in FORMATIONS:
intersect = [x for x, y in zip(FORMATION[0], places) if x == y]
if len(intersect) == len(places):
formation = FORMATION[1]
break
style = ''
with open(os.path.join(srcDir, 'style.css')) as styleF:
style = styleF.read()
script = ''
with open(os.path.join(srcDir, 'script.js')) as scriptF:
script = scriptF.read()
data = dict(joueurs=data,
formation='f{0}'.format(str(formation)),
style=style,
script=script)
f.write(pystache.render(template.read(), data))
print('\t[OK]')
|
from __future__ import print_function
from datetime import datetime
from django.utils import timezone
from gc_apps.gis_shapefiles.models import ShapefileInfo, WorldMapShapefileLayerInfo
from gc_apps.gis_tabular.models import TabularFileInfo
from gc_apps.gis_tabular.models import WorldMapLatLngInfo, WorldMapJoinLayerInfo
from django.core.mail import send_mail
#from django.template.loader import render_to_string
from django.conf import settings
from msg_util import msg, msgt
DEFAULT_STALE_THREE_HOURS = 3 * 60 * 60 # 3 hours, in seconds
STALE_AGE_TWO_DAYS = 2 * 24 * 60 * 60 # 48 hours, in seconds
DEFAULT_STALE_AGE = DEFAULT_STALE_THREE_HOURS
class StaleDataRemover(object):
"""Convenience class for removing old objects"""
def __init__(self):
self.num_objects_checked = 0
self.num_objects_removed = 0
self.message_lines = []
def add_message_line(self, mline, prepend=False):
if mline:
msg(mline)
if prepend:
self.message_lines.insert(0, mline)
else:
self.message_lines.append(mline)
def check_for_stale_objects(self, MODEL_CLASS, stale_age_in_seconds):
"""
Retrieve a class of objects (e.g. WorldMapLatLngInfo) and count what's happening
"""
current_time = timezone.now()
self.num_objects_checked = 0
self.num_objects_removed = 0
for obj_info in MODEL_CLASS.objects.all():
self.num_objects_checked += 1
if self.remove_if_stale(obj_info, stale_age_in_seconds, current_time):
self.num_objects_removed += 1
def remove_stale_worldmap_data(self, stale_age_in_seconds=DEFAULT_STALE_AGE):
"""
Remove old map data...There are the objects storing WorldMap links
"""
msgt("Remove stale WorldMap data")
for CLASS_TYPE in (WorldMapLatLngInfo, WorldMapJoinLayerInfo, WorldMapShapefileLayerInfo):
self.add_message_line('checking: %s' % CLASS_TYPE.__name__)
self.check_for_stale_objects(CLASS_TYPE, stale_age_in_seconds)
self.add_message_line("# of WorldMap objects Checked: %s" % self.num_objects_checked)
self.add_message_line("# of WorldMap objects Removed: %s" % self.num_objects_removed)
def remove_stale_dataverse_data(self, stale_age_in_seconds=STALE_AGE_TWO_DAYS):
"""
Here we're removing the metadata and files from dataverse
"""
msgt("Remove stale Dataverse data")
for CLASS_TYPE in (TabularFileInfo, ShapefileInfo):
self.add_message_line('checking: %s' % CLASS_TYPE.__name__)
self.check_for_stale_objects(CLASS_TYPE, stale_age_in_seconds)
self.add_message_line("# of Dataverse objects Checked: %s" % self.num_objects_checked)
self.add_message_line("# of Dataverse objects Removed: %s" % self.num_objects_removed)
def remove_if_stale(self, info_object, stale_age_in_seconds, current_time=None):
"""
If the object has a "modified" timestamp
older than "stale_age_in_seconds", then delete it
"""
assert hasattr(info_object, 'modified'),\
'The info_object must have "modified" date'
# Get the current time, if not already given
#
if not current_time:
current_time = timezone.now()
# Pull the modification time, setting timezone info to None
#
mod_time = info_object.modified
if hasattr(mod_time, 'tzinfo'):
mod_time = mod_time.replace(tzinfo=None)
# Is this object beyond its time limit?
#
time_diff = (current_time - mod_time).total_seconds()
if time_diff > stale_age_in_seconds:
# Yes! delete it
msg(' > Removing: %s' % info_object)
info_object.delete()
return True
else:
return False
def send_email_notice(self):
msgt('Send email notice!')
subject = 'GeoConnect: Clear stale data (%s)' % timezone.now()
self.add_message_line('This is an email notice from Geoconnect',\
prepend=True)
self.add_message_line('-' *30, prepend=True)
self.add_message_line('-' *30)
self.add_message_line('(end of message)')
if len(settings.ADMINS)==0:
msg('No one to email! (no one in settings.ADMINS)')
return
to_addresses = map(lambda x: x[1], settings.ADMINS)
if len(settings.ADMINS)==0:
msg('No one to email! (no one in settings.ADMINS)')
return
#email_msg = render_to_string('task_scripts/prune_scratch_directories_email.txt', d)
#msg(subject)
#msg(email_msg)
from_email = to_addresses[0]
email_msg = '\n'.join(self.message_lines)
send_mail(subject, email_msg, from_email, to_addresses, fail_silently=False)
msg('email sent to: %s' % to_addresses)
"""
from gc_apps.geo_utils.stale_data_remover import StaleDataRemover
sdr = StaleDataRemover()
sdr.remove_stale_worldmap_data()
sdr.send_email_notice()
#sdr.remove_stale_dataverse_data()
test_email = '[email protected]'
send_mail('test geoconnect', 'did it work', test_email, [test_email])
"""
|
import datetime
def _format_from_ms(t):
if t >= 1 * 60 * 60 * 1000:
return str(datetime.timedelta(seconds=(t / 1000)))
else:
mins = (t / 1000) / 60
secs = (t / 1000) % 60
return '{:02d}:{:02d}'.format(mins, secs)
class Album(object):
class Track(object):
@staticmethod
def init_from_spotify(sp):
self = Album.Track()
self.name = sp['name']
self.track_number = sp['track_number']
self.duration_ms = sp['duration_ms']
self.spotify_id = sp['id']
self.play_link = "/play/{}/{}".format(sp['type'], sp['id'])
self.duration_str = _format_from_ms(self.duration_ms)
return self
@staticmethod
def init_from_spotify(sp):
self = Album()
self.name = sp['name']
self.spotify_id = sp['id']
self.release_date = sp['release_date']
self.play_link = "/play/{}/{}".format(sp['type'], sp['id'])
if len(sp['images']) == 0:
self.image_uri = None
else:
self.image_uri = sp['images'][len(sp['images']) / 2]['url']
self.tracks = []
if sp.has_key('tracks'):
for track in sp['tracks']['items']:
self.tracks.append(Album.Track.init_from_spotify(track))
self.duration_ms = reduce(lambda x,y: x+y, [x.duration_ms for x in self.tracks])
self.duration_str = _format_from_ms(self.duration_ms)
return self
|
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'subsample', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated in 0.17 and"
"will be removed in 0.19. It was replaced by "
"class_weight='balanced_subsample' using the balanced"
"strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
|
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.dirichlet import Dirichlet
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import broadcast_all
class Beta(ExponentialFamily):
r"""
Beta distribution parameterized by :attr:`concentration1` and :attr:`concentration0`.
Example::
>>> m = Beta(torch.tensor([0.5]), torch.tensor([0.5]))
>>> m.sample() # Beta distributed with concentration concentration1 and concentration0
tensor([ 0.1046])
Args:
concentration1 (float or Tensor): 1st concentration parameter of the distribution
(often referred to as alpha)
concentration0 (float or Tensor): 2nd concentration parameter of the distribution
(often referred to as beta)
"""
arg_constraints = {'concentration1': constraints.positive, 'concentration0': constraints.positive}
support = constraints.unit_interval
has_rsample = True
def __init__(self, concentration1, concentration0, validate_args=None):
if isinstance(concentration1, Number) and isinstance(concentration0, Number):
concentration1_concentration0 = torch.tensor([float(concentration1), float(concentration0)])
else:
concentration1, concentration0 = broadcast_all(concentration1, concentration0)
concentration1_concentration0 = torch.stack([concentration1, concentration0], -1)
self._dirichlet = Dirichlet(concentration1_concentration0)
super(Beta, self).__init__(self._dirichlet._batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Beta, _instance)
batch_shape = torch.Size(batch_shape)
new._dirichlet = self._dirichlet.expand(batch_shape)
super(Beta, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
@property
def mean(self):
return self.concentration1 / (self.concentration1 + self.concentration0)
@property
def variance(self):
total = self.concentration1 + self.concentration0
return (self.concentration1 * self.concentration0 /
(total.pow(2) * (total + 1)))
def rsample(self, sample_shape=()):
value = self._dirichlet.rsample(sample_shape).select(-1, 0)
if isinstance(value, Number):
value = self._dirichlet.concentration.new_tensor(value)
return value
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
heads_tails = torch.stack([value, 1.0 - value], -1)
return self._dirichlet.log_prob(heads_tails)
def entropy(self):
return self._dirichlet.entropy()
@property
def concentration1(self):
result = self._dirichlet.concentration[..., 0]
if isinstance(result, Number):
return torch.tensor([result])
else:
return result
@property
def concentration0(self):
result = self._dirichlet.concentration[..., 1]
if isinstance(result, Number):
return torch.tensor([result])
else:
return result
@property
def _natural_params(self):
return (self.concentration1, self.concentration0)
def _log_normalizer(self, x, y):
return torch.lgamma(x) + torch.lgamma(y) - torch.lgamma(x + y)
|
__author__ = 'Wayne'
from django import forms
from django.contrib.auth import authenticate
from messenger.models import Officer
from django.utils.safestring import mark_safe
class LoginForm(forms.Form):
username = forms.CharField(max_length=100, label='User ID')
password = forms.CharField(widget=forms.PasswordInput())
def clean(self):
user = self.login()
if not user or not user.is_active:
raise forms.ValidationError('Sorry, that login was invalid. Please try again.')
return self.cleaned_data
def login(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
return user
class MessageForm(forms.Form):
recipients = forms.ModelMultipleChoiceField(Officer.objects.all(),
help_text='You can select more than one person.',
widget=forms.CheckboxSelectMultiple)
content = forms.CharField(widget=forms.Textarea, help_text='Be as detailed or brief as you want. '
'Messages are always anonymous.',
label='Message')
password = forms.CharField(widget=forms.PasswordInput, help_text=mark_safe('This is optional, but will allow you to'
' continue an anonymous conversation. '
'<a href="/about">More information</a>.'),
required=False)
password_again = forms.CharField(widget=forms.PasswordInput, required=False,
help_text='Type your password again to make sure you have it right.')
def clean(self):
cleaned_data = super(MessageForm, self).clean()
password = cleaned_data.get('password')
password_again = cleaned_data.get('password_again')
if password:
if password_again:
if password != password_again:
# Only do something if both passwords do not match.
msg = "Passwords must match."
self._errors["password"] = self.error_class([msg])
self._errors["password_again"] = self.error_class([msg])
# These fields are no longer valid. Remove them from the
# cleaned data.
del cleaned_data["password"]
del cleaned_data["password_again"]
else:
# Only do something if both passwords do not match.
msg = "Please type your password twice to ensure correctness"
self._errors["password"] = self.error_class([msg])
self._errors["password_again"] = self.error_class([msg])
# These fields are no longer valid. Remove them from the
# cleaned data.
del cleaned_data["password"]
del cleaned_data["password_again"]
return cleaned_data
class ReplyForm(forms.Form):
content = forms.CharField(widget=forms.Textarea, help_text='Reply to this conversation. People who can view this '
'conversation will be able to see your message.',
label='Message')
|
# Copyright 2015 Tianchuan Du University of Delaware
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
import cPickle
import gzip
import numpy
import os
import sys
import theano
from theano.tensor.shared_randomstreams import RandomStreams
import time
import theano.tensor as T
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh, do_maxout = False, pool_size = 1):
""" Class for hidden layer """
self.input = input
self.n_in = n_in
self.n_out = n_out
self.activation = activation
self.type = 'fc'
if W is None:
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if self.activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
lin_output = T.dot(input, self.W) + self.b
if do_maxout == True:
self.last_start = n_out - pool_size
self.tmp_output = lin_output[:,0:self.last_start+1:pool_size]
for i in range(1, pool_size):
cur = lin_output[:,i:self.last_start+i+1:pool_size]
self.tmp_output = T.maximum(cur, self.tmp_output)
self.output = self.activation(self.tmp_output)
else:
self.output = (lin_output if self.activation is None
else self.activation(lin_output))
# parameters of the model
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def _dropout_from_layer(theano_rng, hid_out, p):
""" p is the factor for dropping a unit """
# p=1-p because 1's indicate keep and p is prob of dropping
return theano_rng.binomial(n=1, p=1-p, size=hid_out.shape,
dtype=theano.config.floatX) * hid_out
class DropoutHiddenLayer(HiddenLayer):
def __init__(self, rng, input, n_in, n_out,
W=None, b=None, activation=T.tanh, do_maxout = False, pool_size = 1, dropout_factor=0.5):
super(DropoutHiddenLayer, self).__init__(
rng=rng, input=input, n_in=n_in, n_out=n_out, W=W, b=b,
activation=activation, do_maxout = do_maxout, pool_size = pool_size)
self.theano_rng = RandomStreams(rng.randint(2 ** 30))
self.dropout_output = _dropout_from_layer(theano_rng = self.theano_rng,
hid_out = self.output, p=dropout_factor)
|
'''
Generate a pbs script for job submission, submit the job, be happy
'''
import glob
import sys
import os
import shutil
import time
def return_template(output_direc, ms_name, model_name, mask_name,
script_name="HI_single_channel_clean.py"):
# Emailing
#PBS -m bea
#PBS -M [email protected]
template = \
'''
#!/bin/bash
#PBS -S /bin/bash
#PBS -l pmem=1000m
#PBS -l nodes=1:ppn=12
#PBS -l walltime=7:00:00
#PBS -l epilogue=/home/ekoch/code_repos/simscript/epilogue.sh
source /home/ekoch/.bashrc
cd X1
echo "Starting at: `date`"
casa-4.4 --nologger --logfile X5 -c SCRIPT X2 X3 X4
echo "Exited with code $? at: `date`"
'''
template = template.strip()
template = template.replace("X1", output_direc)
template = template.replace("X2", ms_name)
template = template.replace("X3", model_name)
template = template.replace("X4", mask_name)
template = template.replace("SCRIPT", script_name)
# Create log file name
logfile = ms_name.rstrip(".ms") + ".log"
template = template.replace("X5", logfile)
return template
def drop_last(ms_list):
'''
CASA is generally writing to the final MS in the folder, so skip it.
'''
max_num = 0
for ms in ms_list:
if int(ms.split("_")[-1][:-3]) > max_num:
max_num_ms = ms
max_num = int(ms.split("_")[-1][:-3])
ms_list.remove(max_num_ms)
return ms_list
# Set the directory to look in.
ms_channel = "/home/ekoch/m33/14B-088/14B-088_channel_ms/"
model_channel_name = "/home/ekoch/m33/14B-088/model_channels/M33_14B-088_HI_" \
"model_channel_{}.image"
mask_channel_name = "/home/ekoch/m33/14B-088/mask_channels/M33_14B-088_HI_" \
"mask_channel_{}.image"
output_direc = "/home/ekoch/m33/14B-088/single_channels/"
# Name of script to run. Default is to use the natural weighting script
script_name = "/home/ekoch/code_repos/VLA_Lband/14B-088/HI/imaging/" \
"HI_single_channel_clean.py"
# script_name = "/home/ekoch/code_repos/VLA_Lband/14B-088/HI/imaging/" \
# "HI_single_channel_clean_robust.py"
# Use mask and model? Disable when continuing to clean.
use_mask_model = True
# Set the mode to use. Continuously checking for new splits, or a set number.
sub_mode = sys.argv[1]
if sub_mode == "continuous":
pass
elif sub_mode == "range":
try:
start = int(sys.argv[2])
stop = int(sys.argv[3])
except IndexError:
raise IndexError("Must provide a start and stop when using "
"'range' mode.")
else:
raise TypeError("sub_mode must be 'continuous' or 'range'.")
while True:
# Run channels in given range
if sub_mode == "range":
channel_ms = []
for chan in xrange(start, stop + 1):
channel_path = \
os.path.join(ms_channel,
"14B-088_HI_LSRK.ms.contsub_channel_{}.ms".format(chan))
channel_ms.append(channel_path)
elif sub_mode == "continuous":
channel_ms = glob.glob(os.path.join(ms_channel, "*channel*.ms"))
channel_ms = drop_last(channel_ms)
# If there aren't any more split ms in the path, break and exit
if len(channel_ms) == 0:
print("No more MSs found in the directory. Exiting.")
break
# Now loop through the existing channel ms
for chan in channel_ms:
chan_num = int(chan.split("_")[-1][:-3])
# adjust for numbering offset
mod_mask_num = chan_num - 670
channel_direc = os.path.join(output_direc,
"channel_{}".format(chan_num))
# Check if that channel has been imaged already
# if os.path.isdir(channel_direc):
# print("Already imaged "+str(chan_num)+". Skipping")
# continue
if not os.path.exists(channel_direc):
os.mkdir(channel_direc)
# Names of the clean inputs in the channel_ms folder. Defined here to
# check if they exist before moving.
base_ms_name = os.path.basename(chan.rstrip("/"))
chan_ms = os.path.join(channel_direc, base_ms_name)
base_model_name = \
os.path.basename(model_channel_name.format(mod_mask_num))
model_name = os.path.join(channel_direc, base_model_name)
base_mask_name = \
os.path.basename(mask_channel_name.format(mod_mask_num))
mask_name = os.path.join(channel_direc, base_mask_name)
# Now move the mask, model, and channel ms into the folder, if they
# aren't there already
if not os.path.exists(chan_ms):
shutil.move(chan, channel_direc)
if not os.path.exists(model_name):
shutil.move(model_channel_name.format(mod_mask_num), channel_direc)
if not os.path.exists(mask_name):
shutil.move(mask_channel_name.format(mod_mask_num), channel_direc)
chan_template = return_template(channel_direc, chan_ms,
model_name, mask_name,
script_name=script_name)
# Write to file
sub_file = os.path.join(channel_direc,
"channel_{}.sub".format(chan_num))
if not os.path.exists(sub_file):
with open(sub_file, 'w') as f:
f.write(chan_template)
# Now submit!
old_direc = os.getcwd()
os.chdir(channel_direc) # Switch to directory so log files are there
os.system("qsub " + sub_file)
os.chdir(old_direc)
# Temporary stopper
break
# Wait an hour, then check again for new channel ms
time.sleep(3600)
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import os
import sys
import time
import types
if sys.platform != "win32" or sys.version_info[:2] >= (3, 0):
import signal # used by kill() method on Linux/Mac
import logging
import threading
import warnings
#-------- Globals -----------#
log = logging.getLogger("process")
# log.setLevel(logging.DEBUG)
try:
from subprocess32 import Popen, PIPE
except ImportError:
# Not available on Windows - fallback to using regular subprocess module.
from subprocess import Popen, PIPE
if sys.platform != "win32" or sys.version_info[:2] >= (3, 0):
log.warn(
"Could not import subprocess32 module, falling back to subprocess module")
CREATE_NEW_CONSOLE = 0x10 # same as win32process.CREATE_NEW_CONSOLE
CREATE_NEW_PROCESS_GROUP = 0x200 # same as win32process.CREATE_NEW_PROCESS_GROUP
CREATE_NO_WINDOW = 0x8000000 # same as win32process.CREATE_NO_WINDOW
CTRL_BREAK_EVENT = 1 # same as win32con.CTRL_BREAK_EVENT
WAIT_TIMEOUT = 258 # same as win32event.WAIT_TIMEOUT
#-------- Classes -----------#
# XXX - TODO: Work out what exceptions raised by SubProcess and turn into
# ProcessError?
class ProcessError(Exception):
def __init__(self, msg, errno=-1):
Exception.__init__(self, msg)
self.errno = errno
# Check if this is Windows NT and above.
if sys.platform == "win32" and sys.getwindowsversion()[3] == 2 and sys.version_info[:2] < (3, 0):
import winprocess
from subprocess import pywintypes, list2cmdline, STARTUPINFO
try:
# These subprocess variables have moved around between Python versions.
from subprocess import (SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW,
GetVersion, CreateProcess, TerminateProcess)
except ImportError:
import subprocess
SW_HIDE = subprocess._subprocess.SW_HIDE
STARTF_USESTDHANDLES = subprocess._subprocess.STARTF_USESTDHANDLES
STARTF_USESHOWWINDOW = subprocess._subprocess.STARTF_USESHOWWINDOW
GetVersion = subprocess._subprocess.GetVersion
CreateProcess = subprocess._subprocess.CreateProcess
TerminateProcess = subprocess._subprocess.TerminateProcess
# This fix is for killing child processes on windows, based on:
# http://www.microsoft.com/msj/0698/win320698.aspx
# It works by creating a uniquely named job object that will contain our
# process(es), starts the process in a suspended state, maps the process
# to a specific job object, resumes the process, from now on every child
# it will create will be assigned to the same job object. We can then
# later terminate this job object (and all of it's child processes).
#
# This code is based upon Benjamin Smedberg's killableprocess, see:
# http://benjamin.smedbergs.us/blog/2006-12-11/killableprocesspy/
class WindowsKillablePopen(Popen):
_job = None
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, str):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (GetVersion() >= 0x80000000 or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= CREATE_NEW_CONSOLE
# We create a new job for this process, so that we can kill
# the process and any sub-processes
self._job = winprocess.CreateJobObject()
creationflags |= winprocess.CREATE_SUSPENDED
# Vista will launch Komodo in a job object itself, so we need
# to specify that the created process is not part of the Komodo
# job object, but instead specify that it will be using a
# separate breakaway job object, bug 83001.
creationflags |= winprocess.CREATE_BREAKAWAY_FROM_JOB
# Start the process
try:
hp, ht, pid, tid = CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error as e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or simliar), but
# how can this be done from Python?
raise WindowsError(*e.args)
except WindowsError:
log.error(
"process.py: can't execute %r (%s)", executable, args)
raise
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
if self._job:
# Resume the thread.
winprocess.AssignProcessToJobObject(self._job, int(hp))
winprocess.ResumeThread(int(ht))
ht.Close()
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def terminate(self):
"""Terminates the process"""
if self._job:
winprocess.TerminateJobObject(self._job, 127)
self.returncode = 127
else:
# Cannot call the parent class, as there is no terminate method
# defined at the class level (it's added upon instantiation),
# so this is a copy of subprocess.Popen.terminate() code.
TerminateProcess(self._handle, 1)
kill = terminate
# Use our own killable process instead of the regular Popen.
Popen = WindowsKillablePopen
class ProcessOpen(Popen):
def __init__(self, cmd, cwd=None, env=None, flags=None,
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True):
"""Create a child process.
"cmd" is the command to run, either a list of arguments or a string.
"cwd" is a working directory in which to start the child process.
"env" is an environment dictionary for the child.
"flags" are system-specific process creation flags. On Windows
this can be a bitwise-OR of any of the win32process.CREATE_*
constants (Note: win32process.CREATE_NEW_PROCESS_GROUP is always
OR'd in). On Unix, this is currently ignored.
"stdin", "stdout", "stderr" can be used to specify file objects
to handle read (stdout/stderr) and write (stdin) events from/to
the child. By default a file handle will be created for each
io channel automatically, unless set explicitly to None. When set
to None, the parent io handles will be used, which can mean the
output is redirected to Komodo's log files.
"universal_newlines": On by default (the opposite of subprocess).
"""
self._child_created = False
self.__use_killpg = False
auto_piped_stdin = False
preexec_fn = None
shell = False
if not isinstance(cmd, (list, tuple)):
# The cmd is the already formatted, ready for the shell. Otherwise
# subprocess.Popen will treat this as simply one command with
# no arguments, resulting in an unknown command.
shell = True
if sys.platform.startswith("win"):
# On Windows, cmd requires some special handling of multiple quoted
# arguments, as this is what cmd will do:
# See if the first character is a quote character and if so,
# strip the leading character and remove the last quote character
# on the command line, preserving any text after the last quote
# character.
if cmd and shell and cmd.count('"') > 2:
if not cmd.startswith('""') or not cmd.endswith('""'):
# Needs to be a re-quoted with additional double quotes.
# http://bugs.activestate.com/show_bug.cgi?id=75467
cmd = '"%s"' % (cmd, )
if sys.version_info[:2] < (3, 0):
# XXX - subprocess needs to be updated to use the wide string API.
# subprocess uses a Windows API that does not accept unicode, so
# we need to convert all the environment variables to strings
# before we make the call. Temporary fix to bug:
# http://bugs.activestate.com/show_bug.cgi?id=72311
if env:
encoding = sys.getfilesystemencoding()
_enc_env = {}
for key, value in env.items():
try:
_enc_env[key.encode(encoding)] = value.encode(encoding)
except (UnicodeEncodeError, UnicodeDecodeError):
# Could not encode it, warn we are dropping it.
log.warn("Could not encode environment variable %r "
"so removing it", key)
env = _enc_env
if flags is None:
flags = CREATE_NO_WINDOW
# If we don't have standard handles to pass to the child process
# (e.g. we don't have a console app), then
# `subprocess.GetStdHandle(...)` will return None. `subprocess.py`
# handles that (http://bugs.python.org/issue1124861)
#
# However, if Komodo is started from the command line, then
# the shell's stdin handle is inherited, i.e. in subprocess.py:
# p2cread = GetStdHandle(STD_INPUT_HANDLE) # p2cread == 3
# A few lines later this leads to:
# Traceback (most recent call last):
# ...
# File "...\lib\mozilla\python\komodo\process.py", line 130, in __init__
# creationflags=flags)
# File "...\lib\python\lib\subprocess.py", line 588, in __init__
# errread, errwrite) = self._get_handles(stdin, stdout, stderr)
# File "...\lib\python\lib\subprocess.py", line 709, in _get_handles
# p2cread = self._make_inheritable(p2cread)
# File "...\lib\python\lib\subprocess.py", line 773, in _make_inheritable
# DUPLICATE_SAME_ACCESS)
# WindowsError: [Error 6] The handle is invalid
#
# I suspect this indicates that the stdin handle inherited by
# the subsystem:windows komodo.exe process is invalid -- perhaps
# because of mis-used of the Windows API for passing that handle
# through. The same error can be demonstrated in PythonWin:
# from _subprocess import *
# from subprocess import *
# h = GetStdHandle(STD_INPUT_HANDLE)
# p = Popen("python -c '1'")
# p._make_interitable(h)
#
# I don't understand why the inherited stdin is invalid for
# `DuplicateHandle`, but here is how we are working around this:
# If we detect the condition where this can fail, then work around
# it by setting the handle to `subprocess.PIPE`, resulting in
# a different and workable code path.
if self._needToHackAroundStdHandles() \
and not (flags & CREATE_NEW_CONSOLE):
if self._checkFileObjInheritable(sys.stdin, "STD_INPUT_HANDLE"):
stdin = PIPE
auto_piped_stdin = True
if self._checkFileObjInheritable(sys.stdout, "STD_OUTPUT_HANDLE"):
stdout = PIPE
if self._checkFileObjInheritable(sys.stderr, "STD_ERROR_HANDLE"):
stderr = PIPE
else:
# Set flags to 0, subprocess raises an exception otherwise.
flags = 0
# Set a preexec function, this will make the sub-process create it's
# own session and process group - bug 80651, bug 85693.
preexec_fn = os.setsid
# Mark as requiring progressgroup killing. This will allow us to
# later kill both the spawned shell and the sub-process in one go
# (see the kill method) - bug 85693.
self.__use_killpg = True
# Internal attributes.
self.__cmd = cmd
self.__retval = None
self.__hasTerminated = threading.Condition()
# Launch the process.
# print "Process: %r in %r" % (cmd, cwd)
Popen.__init__(self, cmd, cwd=cwd, env=env, shell=shell,
stdin=stdin, stdout=stdout, stderr=stderr,
preexec_fn=preexec_fn,
universal_newlines=universal_newlines,
creationflags=flags)
if auto_piped_stdin:
self.stdin.close()
__needToHackAroundStdHandles = None
@classmethod
def _needToHackAroundStdHandles(cls):
if cls.__needToHackAroundStdHandles is None:
if sys.platform != "win32" or sys.version_info[:2] >= (3, 0):
cls.__needToHackAroundStdHandles = False
else:
from _subprocess import GetStdHandle, STD_INPUT_HANDLE
stdin_handle = GetStdHandle(STD_INPUT_HANDLE)
if stdin_handle is not None:
cls.__needToHackAroundStdHandles = True
if stdin_handle != 3:
log.warn("`GetStdHandle(STD_INPUT_HANDLE)` != 3: "
"something has changed w.r.t. std handle "
"inheritance in Komodo that may affect "
"subprocess launching")
else:
cls.__needToHackAroundStdHandles = False
return cls.__needToHackAroundStdHandles
@classmethod
def _checkFileObjInheritable(cls, fileobj, handle_name):
"""Check if a given file-like object (or whatever else subprocess.Popen
takes as a handle/stream) can be correctly inherited by a child process.
This just duplicates the code in subprocess.Popen._get_handles to make
sure we go down the correct code path; this to catch some non-standard
corner cases."""
import _subprocess
import ctypes
import msvcrt
new_handle = None
try:
if fileobj is None:
handle = _subprocess.GetStdHandle(getattr(_subprocess,
handle_name))
if handle is None:
return True # No need to check things we create
elif fileobj == subprocess.PIPE:
return True # No need to check things we create
elif isinstance(fileobj, int):
handle = msvcrt.get_osfhandle(fileobj)
else:
# Assuming file-like object
handle = msvcrt.get_osfhandle(fileobj.fileno())
new_handle = self._make_inheritable(handle)
return True
except:
return False
finally:
CloseHandle = ctypes.windll.kernel32.CloseHandle
if new_handle is not None:
CloseHandle(new_handle)
# Override the returncode handler (used by subprocess.py), this is so
# we can notify any listeners when the process has finished.
def _getReturncode(self):
return self.__returncode
def _setReturncode(self, value):
self.__returncode = value
if value is not None:
# Notify that the process is done.
self.__hasTerminated.acquire()
self.__hasTerminated.notifyAll()
self.__hasTerminated.release()
returncode = property(fget=_getReturncode, fset=_setReturncode)
# Setup the retval handler. This is a readonly wrapper around returncode.
def _getRetval(self):
# Ensure the returncode is set by subprocess if the process is
# finished.
self.poll()
return self.returncode
retval = property(fget=_getRetval)
def wait(self, timeout=None):
"""Wait for the started process to complete.
"timeout" is a floating point number of seconds after
which to timeout. Default is None, which is to never timeout.
If the wait time's out it will raise a ProcessError. Otherwise it
will return the child's exit value. Note that in the case of a timeout,
the process is still running. Use kill() to forcibly stop the process.
"""
if timeout is None or timeout < 0:
# Use the parent call.
try:
return Popen.wait(self)
except OSError as ex:
# If the process has already ended, that is fine. This is
# possible when wait is called from a different thread.
if ex.errno != 10: # No child process
raise
return self.returncode
# We poll for the retval, as we cannot rely on self.__hasTerminated
# to be called, as there are some code paths that do not trigger it.
# The accuracy of this wait call is between 0.1 and 1 second.
time_now = time.time()
time_end = time_now + timeout
# These values will be used to incrementally increase the wait period
# of the polling check, starting from the end of the list and working
# towards the front. This is to avoid waiting for a long period on
# processes that finish quickly, see bug 80794.
time_wait_values = [1.0, 0.5, 0.2, 0.1]
while time_now < time_end:
result = self.poll()
if result is not None:
return result
# We use hasTerminated here to get a faster notification.
self.__hasTerminated.acquire()
if time_wait_values:
wait_period = time_wait_values.pop()
self.__hasTerminated.wait(wait_period)
self.__hasTerminated.release()
time_now = time.time()
# last chance
result = self.poll()
if result is not None:
return result
raise ProcessError("Process timeout: waited %d seconds, "
"process not yet finished." % (timeout,),
WAIT_TIMEOUT)
# For backward compatibility with older process.py
def close(self):
pass
# For backward compatibility with older process.py
def kill(self, exitCode=-1, gracePeriod=None, sig=None):
"""Kill process.
"exitCode" this sets what the process return value will be.
"gracePeriod" [deprecated, not supported]
"sig" (Unix only) is the signal to use to kill the process. Defaults
to signal.SIGKILL. See os.kill() for more information.
"""
if gracePeriod is not None:
import warnings
warnings.warn("process.kill() gracePeriod is no longer used",
DeprecationWarning)
# Need to ensure stdin is closed, makes it easier to end the process.
if self.stdin is not None:
self.stdin.close()
if sys.platform.startswith("win"):
# TODO: 1) It would be nice if we could give the process(es) a
# chance to exit gracefully first, rather than having to
# resort to a hard kill.
# 2) May need to send a WM_CLOSE event in the case of a GUI
# application, like the older process.py was doing.
Popen.kill(self)
else:
if sig is None:
sig = signal.SIGKILL
try:
if self.__use_killpg:
os.killpg(self.pid, sig)
else:
os.kill(self.pid, sig)
except OSError as ex:
if ex.errno != 3:
# Ignore: OSError: [Errno 3] No such process
raise
self.returncode = exitCode
class AbortableProcessHelper(object):
"""A helper class that is able to run a process and have the process be
killed/aborted (possibly by another thread) if it is still running.
"""
STATUS_INITIALIZED = 0 # Ready to run.
STATUS_RUNNING = 1 # A process is running.
STATUS_FINISHED_NORMALLY = 2 # The command/process finished normally.
STATUS_ABORTED = 3 # The command/process was aborted.
def __init__(self):
self._process = None
self._process_status = self.STATUS_INITIALIZED
self._process_status_lock = threading.Lock()
def ProcessOpen(self, *args, **kwargs):
"""Create a new process and return it."""
self._process_status_lock.acquire()
try:
self._process_status = self.STATUS_RUNNING
self._process = ProcessOpen(*args, **kwargs)
return self._process
finally:
self._process_status_lock.release()
def ProcessDone(self):
"""Mark the process as being completed, does not need to be aborted."""
self._process_status_lock.acquire()
try:
self._process = None
self._process_status = self.STATUS_FINISHED_NORMALLY
finally:
self._process_status_lock.release()
def ProcessAbort(self):
"""Kill the process if it is still running."""
self._process_status_lock.acquire()
try:
self._process_status = self.STATUS_ABORTED
if self._process:
self._process.kill()
self._process = None
finally:
self._process_status_lock.release()
## Deprecated process classes ##
class Process(ProcessOpen):
def __init__(self, *args, **kwargs):
warnings.warn("'process.%s' is now deprecated. Please use 'process.ProcessOpen'." %
(self.__class__.__name__))
ProcessOpen.__init__(self, *args, **kwargs)
class ProcessProxy(Process):
pass
|
from django.conf import settings
from django.shortcuts import HttpResponse, Http404
from django.template.response import TemplateResponse
from django.core.files.storage import default_storage
from visual_translations.models import VisualTranslationsEUCommunity
def visual_translation_map(request, term):
dirs, files = default_storage.listdir('visual_translations/{}/'.format(term))
time = request.GET.dict().get("t", None)
if time is not None and time not in dirs:
raise Http404("Visual translation with t={} not found or not ready".format(time))
elif time is None:
time = str(max((int(dir) for dir in dirs)))
locales_info = [
{
"locale": "{}_{}".format(language, country),
"small_image_file": "visual_translations/{}/{}/S_{}_{}.jpg".format(term, time, language, country),
"large_image_file": "visual_translations/{}/{}/L_{}_{}.jpg".format(term, time, language, country),
"xlarge_image_file": "visual_translations/{}/{}/XL_{}_{}.jpg".format(term, time, language, country),
"grid": {
"width": grid["cell_width"] * grid["columns"],
"height": grid["cell_height"] * grid["rows"],
"width_xl": grid["cell_width"] * grid["columns"] * factor,
"height_xl": grid["cell_height"] * grid["rows"] * factor,
"width_2": int(grid["cell_width"] * grid["columns"] / 2),
"height_2": int(grid["cell_height"] * grid["rows"] / 2),
"width_20": int(grid["cell_width"] * grid["columns"] / 20),
"height_20": int(grid["cell_height"] * grid["rows"] / 20)
}
}
for language, country, grid, factor in VisualTranslationsEUCommunity.LOCALES
]
context = {
"region_topo_json": "visual_translations/geo/europe.topo.json",
"locales": locales_info,
}
return TemplateResponse(request, "visual_translations/map.html", context=context)
def visual_translations_controller(request):
context = {
"words": ["pension", "peace", "women", "immigrants", "cowshed", "leave"]
}
return TemplateResponse(request, "visual_translations/controller.html", context=context)
def web_sockets_broadcast(request, message):
if not settings.USE_WEBSOCKETS:
return HttpResponse('Websockets not enabled in bootstrap.py')
try:
from ws4redis.publisher import RedisPublisher
from ws4redis.redis_store import RedisMessage
except ImportError:
return HttpResponse('Websockets package ws4redis not installed')
redis_publisher = RedisPublisher(facility='visual-translations-map', broadcast=True)
message = RedisMessage(message)
redis_publisher.publish_message(message)
return HttpResponse("Broadcast: {}".format(message))
|
import numpy as np
import os
import subprocess
# from time import sleep
# Rotates input xyz file and generates 100 output xyz files at random orientations along with input.scan for each of them to give as input to mechAFM
def makeIt(output_folder = "randomRotateOutput/"):
fileNumber = 0
for i in range(24): # COM of xyz file will be at 0, 0, 0. The position of one atom will be defined at some point determined by the values of i and j. i specifies distance from origin, which is between 0.6 and 3.0 at steps of 0.1. Implies 24 points
for j in range(72): # There will be 72 different rotations. Each rotation is separated from the adjacent orientation by 5 degrees
distanceFromOrigin = 0.6 + i*0.1
angularOrientation = 5*j*np.pi/180.0 #In radians
x = distanceFromOrigin * np.cos(angularOrientation)
y = distanceFromOrigin * np.sin(angularOrientation)
xyzOut = '''2
C %s %s 0.0
H %s %s 0.0''' % (x, y, -x, -y)
scanOut = '''xyzfile %s
paramfile parameters.dat
tipatom T
dummyatom X
units kcal/mol
minterm f
etol 0.001
ftol 0.001
dt 0.001
maxsteps 50000
minimiser FIRE
integrator midpoint
coulomb off
rigidgrid off
flexible off
area 8.0 8.0
center 4.0 4.0
zhigh 10.0
zlow 6.0
dx 0.2
dy 0.2
dz 0.1
bufsize 10000
gzip off
statistics on''' % (str(fileNumber) + ".xyz")
parametersContent = '''# Parameters for a system from a paper
# name | epsilon (kcal/mol) | sigma (A) | mass (amu) | charge (e)
atom C 0.07000 3.55000 12.01100 0.00000
atom H 0.03350 2.42000 1.00800 0.00000
atom O 0.11080 2.98504 15.99940 0.00000
atom N 0.19200 3.31988 14.00670 0.00000
atom S 0.43560 3.63599 32.06500 0.00000
atom F 0.11080 2.90789 18.99840 0.00000
atom B 0.10500 3.63000 10.81000 0.00000
atom X 0.07000 3.55000 12.01100 0.02100
atom T 0.19200 3.15000 15.99900 -0.02100
# Boron parameters guessed from Baowan & Hill, IET Micro & Nano Letters 2:46 (2007)
# Carbon, oxygen and hydrogen parameters from original CHARMM force field
# Pair style to overwrite and default LJ-mixing
# atom1 | atom2 | pair_style | parameters (eps,sig for LJ; De,a,re for Morse)
# pair_ovwrt C T morse 1 2 3
pair_ovwrt X T lj 20.0000 3.5500
# Tip harmonic constraint
# force constant (kcal/mol) | distance (A)
harm 0.72000 0.00
# Additional parameters for making the molecules flexible
# We need to know the topology, so list the possible bonds and their expected length
# atom1 | atom2 | exp. length (A)
# topobond C C 1.430
# topobond C H 1.095
# topobond C B 1.534
# bonds are assumed harmonic and in their equilibrium position (in the xyz file)
# force constant (kcal/mol)
bond 25.000
# angles are assumed harmonic and in their equilibrium position (in the xyz file)
# force constant (kcal/mol)
angle 0.2500
# dihedrals are assumed harmonic and in their equilibrium position (in the xyz file)
# force constant (kcal/mol)
dihedral 0.2500
# substrate support using a 10-4 wall potential
# epsilon (kcal/mol) | sigma (A) | lambda (A) | r_cut (A) | lateral constant (kcal/mol)
substrate 0.100 3.0 3.0 7.5 0.01'''
os.makedirs(output_folder + str(fileNumber))
xyzFile = open(output_folder + str(fileNumber) + "/" + str(fileNumber) + ".xyz", "w+")
xyzFile.write(xyzOut)
scanFile = open(output_folder + str(fileNumber) + "/" + str(fileNumber) + ".scan", "w+")
scanFile.write(scanOut)
paraFile = open(output_folder + str(fileNumber) + "/" + "parameters.dat", "w+")
paraFile.write(parametersContent)
xyzFile.close()
scanFile.close()
paraFile.close()
print("done with file number " + str(fileNumber))
fileNumber += 1
|
from __future__ import print_function
from builtins import map
from past.builtins import basestring
from builtins import enumerate, int, list, range, zip
import operator
import re
import numpy as np
import pandas as pd
import sportsref
PLAYER_RE = r'\w{0,7}\d{2}'
HM_LINEUP_COLS = ['hm_player{}'.format(i) for i in range(1, 6)]
AW_LINEUP_COLS = ['aw_player{}'.format(i) for i in range(1, 6)]
ALL_LINEUP_COLS = AW_LINEUP_COLS + HM_LINEUP_COLS
def sparse_lineup_cols(df):
regex = '{}_in'.format(PLAYER_RE)
return [c for c in df.columns if re.match(regex, c)]
def parse_play(boxscore_id, details, is_hm):
"""Parse play details from a play-by-play string describing a play.
Assuming valid input, this function returns structured data in a dictionary
describing the play. If the play detail string was invalid, this function
returns None.
:param boxscore_id: the boxscore ID of the play
:param details: detail string for the play
:param is_hm: bool indicating whether the offense is at home
:param returns: dictionary of play attributes or None if invalid
:rtype: dictionary or None
"""
# if input isn't a string, return None
if not details or not isinstance(details, basestring):
return None
bs = sportsref.nba.BoxScore(boxscore_id)
aw, hm = bs.away(), bs.home()
season = sportsref.nba.Season(bs.season())
hm_roster = set(bs.basic_stats().query('is_home == True').player_id.values)
p = {}
p['detail'] = details
p['home'] = hm
p['away'] = aw
p['is_home_play'] = is_hm
# parsing field goal attempts
shotRE = (r'(?P<shooter>{0}) (?P<is_fgm>makes|misses) '
'(?P<is_three>2|3)\-pt shot').format(PLAYER_RE)
distRE = r' (?:from (?P<shot_dist>\d+) ft|at rim)'
assistRE = r' \(assist by (?P<assister>{0})\)'.format(PLAYER_RE)
blockRE = r' \(block by (?P<blocker>{0})\)'.format(PLAYER_RE)
shotRE = r'{0}{1}(?:{2}|{3})?'.format(shotRE, distRE, assistRE, blockRE)
m = re.match(shotRE, details, re.IGNORECASE)
if m:
p['is_fga'] = True
p.update(m.groupdict())
p['shot_dist'] = p['shot_dist'] if p['shot_dist'] is not None else 0
p['shot_dist'] = int(p['shot_dist'])
p['is_fgm'] = p['is_fgm'] == 'makes'
p['is_three'] = p['is_three'] == '3'
p['is_assist'] = pd.notnull(p.get('assister'))
p['is_block'] = pd.notnull(p.get('blocker'))
shooter_home = p['shooter'] in hm_roster
p['off_team'] = hm if shooter_home else aw
p['def_team'] = aw if shooter_home else hm
return p
# parsing jump balls
jumpRE = ((r'Jump ball: (?P<away_jumper>{0}) vs\. (?P<home_jumper>{0})'
r'(?: \((?P<gains_poss>{0}) gains possession\))?')
.format(PLAYER_RE))
m = re.match(jumpRE, details, re.IGNORECASE)
if m:
p['is_jump_ball'] = True
p.update(m.groupdict())
return p
# parsing rebounds
rebRE = (r'(?P<is_oreb>Offensive|Defensive) rebound'
r' by (?P<rebounder>{0}|Team)').format(PLAYER_RE)
m = re.match(rebRE, details, re.I)
if m:
p['is_reb'] = True
p.update(m.groupdict())
p['is_oreb'] = p['is_oreb'].lower() == 'offensive'
p['is_dreb'] = not p['is_oreb']
if p['rebounder'] == 'Team':
p['reb_team'], other = (hm, aw) if is_hm else (aw, hm)
else:
reb_home = p['rebounder'] in hm_roster
p['reb_team'], other = (hm, aw) if reb_home else (aw, hm)
p['off_team'] = p['reb_team'] if p['is_oreb'] else other
p['def_team'] = p['reb_team'] if p['is_dreb'] else other
return p
# parsing free throws
ftRE = (r'(?P<ft_shooter>{}) (?P<is_ftm>makes|misses) '
r'(?P<is_tech_fta>technical )?(?P<is_flag_fta>flagrant )?'
r'(?P<is_clearpath_fta>clear path )?free throw'
r'(?: (?P<fta_num>\d+) of (?P<tot_fta>\d+))?').format(PLAYER_RE)
m = re.match(ftRE, details, re.I)
if m:
p['is_fta'] = True
p.update(m.groupdict())
p['is_ftm'] = p['is_ftm'] == 'makes'
p['is_tech_fta'] = bool(p['is_tech_fta'])
p['is_flag_fta'] = bool(p['is_flag_fta'])
p['is_clearpath_fta'] = bool(p['is_clearpath_fta'])
p['is_pf_fta'] = not p['is_tech_fta']
if p['tot_fta']:
p['tot_fta'] = int(p['tot_fta'])
if p['fta_num']:
p['fta_num'] = int(p['fta_num'])
ft_home = p['ft_shooter'] in hm_roster
p['fta_team'] = hm if ft_home else aw
if not p['is_tech_fta']:
p['off_team'] = hm if ft_home else aw
p['def_team'] = aw if ft_home else hm
return p
# parsing substitutions
subRE = (r'(?P<sub_in>{0}) enters the game for '
r'(?P<sub_out>{0})').format(PLAYER_RE)
m = re.match(subRE, details, re.I)
if m:
p['is_sub'] = True
p.update(m.groupdict())
sub_home = p['sub_in'] in hm_roster or p['sub_out'] in hm_roster
p['sub_team'] = hm if sub_home else aw
return p
# parsing turnovers
toReasons = (r'(?P<to_type>[^;]+)(?:; steal by '
r'(?P<stealer>{0}))?').format(PLAYER_RE)
toRE = (r'Turnover by (?P<to_by>{}|Team) '
r'\((?:{})\)').format(PLAYER_RE, toReasons)
m = re.match(toRE, details, re.I)
if m:
p['is_to'] = True
p.update(m.groupdict())
p['to_type'] = p['to_type'].lower()
if p['to_type'] == 'offensive foul':
return None
p['is_steal'] = pd.notnull(p['stealer'])
p['is_travel'] = p['to_type'] == 'traveling'
p['is_shot_clock_viol'] = p['to_type'] == 'shot clock'
p['is_oob'] = p['to_type'] == 'step out of bounds'
p['is_three_sec_viol'] = p['to_type'] == '3 sec'
p['is_backcourt_viol'] = p['to_type'] == 'back court'
p['is_off_goaltend'] = p['to_type'] == 'offensive goaltending'
p['is_double_dribble'] = p['to_type'] == 'dbl dribble'
p['is_discont_dribble'] = p['to_type'] == 'discontinued dribble'
p['is_carry'] = p['to_type'] == 'palming'
if p['to_by'] == 'Team':
p['off_team'] = hm if is_hm else aw
p['def_team'] = aw if is_hm else hm
else:
to_home = p['to_by'] in hm_roster
p['off_team'] = hm if to_home else aw
p['def_team'] = aw if to_home else hm
return p
# parsing shooting fouls
shotFoulRE = (r'Shooting(?P<is_block_foul> block)? foul by (?P<fouler>{0})'
r'(?: \(drawn by (?P<drew_foul>{0})\))?').format(PLAYER_RE)
m = re.match(shotFoulRE, details, re.I)
if m:
p['is_pf'] = True
p['is_shot_foul'] = True
p.update(m.groupdict())
p['is_block_foul'] = bool(p['is_block_foul'])
foul_on_home = p['fouler'] in hm_roster
p['off_team'] = aw if foul_on_home else hm
p['def_team'] = hm if foul_on_home else aw
p['foul_team'] = p['def_team']
return p
# parsing offensive fouls
offFoulRE = (r'Offensive(?P<is_charge> charge)? foul '
r'by (?P<to_by>{0})'
r'(?: \(drawn by (?P<drew_foul>{0})\))?').format(PLAYER_RE)
m = re.match(offFoulRE, details, re.I)
if m:
p['is_pf'] = True
p['is_off_foul'] = True
p['is_to'] = True
p['to_type'] = 'offensive foul'
p.update(m.groupdict())
p['is_charge'] = bool(p['is_charge'])
p['fouler'] = p['to_by']
foul_on_home = p['fouler'] in hm_roster
p['off_team'] = hm if foul_on_home else aw
p['def_team'] = aw if foul_on_home else hm
p['foul_team'] = p['off_team']
return p
# parsing personal fouls
foulRE = (r'Personal (?P<is_take_foul>take )?(?P<is_block_foul>block )?'
r'foul by (?P<fouler>{0})(?: \(drawn by '
r'(?P<drew_foul>{0})\))?').format(PLAYER_RE)
m = re.match(foulRE, details, re.I)
if m:
p['is_pf'] = True
p.update(m.groupdict())
p['is_take_foul'] = bool(p['is_take_foul'])
p['is_block_foul'] = bool(p['is_block_foul'])
foul_on_home = p['fouler'] in hm_roster
p['off_team'] = aw if foul_on_home else hm
p['def_team'] = hm if foul_on_home else aw
p['foul_team'] = p['def_team']
return p
# TODO: parsing double personal fouls
# double_foul_re = (r'Double personal foul by (?P<fouler1>{0}) and '
# r'(?P<fouler2>{0})').format(PLAYER_RE)
# m = re.match(double_Foul_re, details, re.I)
# if m:
# p['is_pf'] = True
# p.update(m.groupdict())
# p['off_team'] =
# parsing loose ball fouls
looseBallRE = (r'Loose ball foul by (?P<fouler>{0})'
r'(?: \(drawn by (?P<drew_foul>{0})\))?').format(PLAYER_RE)
m = re.match(looseBallRE, details, re.I)
if m:
p['is_pf'] = True
p['is_loose_ball_foul'] = True
p.update(m.groupdict())
foul_home = p['fouler'] in hm_roster
p['foul_team'] = hm if foul_home else aw
return p
# parsing punching fouls
# TODO
# parsing away from play fouls
awayFromBallRE = ((r'Away from play foul by (?P<fouler>{0})'
r'(?: \(drawn by (?P<drew_foul>{0})\))?')
.format(PLAYER_RE))
m = re.match(awayFromBallRE, details, re.I)
if m:
p['is_pf'] = True
p['is_away_from_play_foul'] = True
p.update(m.groupdict())
foul_on_home = p['fouler'] in hm_roster
# TODO: figure out who had the ball based on previous play
p['foul_team'] = hm if foul_on_home else aw
return p
# parsing inbound fouls
inboundRE = (r'Inbound foul by (?P<fouler>{0})'
r'(?: \(drawn by (?P<drew_foul>{0})\))?').format(PLAYER_RE)
m = re.match(inboundRE, details, re.I)
if m:
p['is_pf'] = True
p['is_inbound_foul'] = True
p.update(m.groupdict())
foul_on_home = p['fouler'] in hm_roster
p['off_team'] = aw if foul_on_home else hm
p['def_team'] = hm if foul_on_home else aw
p['foul_team'] = p['def_team']
return p
# parsing flagrant fouls
flagrantRE = (r'Flagrant foul type (?P<flag_type>1|2) by (?P<fouler>{0})'
r'(?: \(drawn by (?P<drew_foul>{0})\))?').format(PLAYER_RE)
m = re.match(flagrantRE, details, re.I)
if m:
p['is_pf'] = True
p['is_flagrant'] = True
p.update(m.groupdict())
foul_on_home = p['fouler'] in hm_roster
p['foul_team'] = hm if foul_on_home else aw
return p
# parsing clear path fouls
clearPathRE = (r'Clear path foul by (?P<fouler>{0})'
r'(?: \(drawn by (?P<drew_foul>{0})\))?').format(PLAYER_RE)
m = re.match(clearPathRE, details, re.I)
if m:
p['is_pf'] = True
p['is_clear_path_foul'] = True
p.update(m.groupdict())
foul_on_home = p['fouler'] in hm_roster
p['off_team'] = aw if foul_on_home else hm
p['def_team'] = hm if foul_on_home else aw
p['foul_team'] = p['def_team']
return p
# parsing timeouts
timeoutRE = r'(?P<timeout_team>.*?) (?:full )?timeout'
m = re.match(timeoutRE, details, re.I)
if m:
p['is_timeout'] = True
p.update(m.groupdict())
isOfficialTO = p['timeout_team'].lower() == 'official'
name_to_id = season.team_names_to_ids()
p['timeout_team'] = (
'Official' if isOfficialTO else
name_to_id.get(hm, name_to_id.get(aw, p['timeout_team']))
)
return p
# parsing technical fouls
techRE = (r'(?P<is_hanging>Hanging )?'
r'(?P<is_taunting>Taunting )?'
r'(?P<is_ill_def>Ill def )?'
r'(?P<is_delay>Delay )?'
r'(?P<is_unsport>Non unsport )?'
r'tech(?:nical)? foul by '
r'(?P<tech_fouler>{0}|Team)').format(PLAYER_RE)
m = re.match(techRE, details, re.I)
if m:
p['is_tech_foul'] = True
p.update(m.groupdict())
p['is_hanging'] = bool(p['is_hanging'])
p['is_taunting'] = bool(p['is_taunting'])
p['is_ill_def'] = bool(p['is_ill_def'])
p['is_delay'] = bool(p['is_delay'])
p['is_unsport'] = bool(p['is_unsport'])
foul_on_home = p['tech_fouler'] in hm_roster
p['foul_team'] = hm if foul_on_home else aw
return p
# parsing ejections
ejectRE = r'(?P<ejectee>{0}|Team) ejected from game'.format(PLAYER_RE)
m = re.match(ejectRE, details, re.I)
if m:
p['is_ejection'] = True
p.update(m.groupdict())
if p['ejectee'] == 'Team':
p['ejectee_team'] = hm if is_hm else aw
else:
eject_home = p['ejectee'] in hm_roster
p['ejectee_team'] = hm if eject_home else aw
return p
# parsing defensive 3 seconds techs
def3TechRE = (r'(?:Def 3 sec tech foul|Defensive three seconds)'
r' by (?P<tech_fouler>{})').format(PLAYER_RE)
m = re.match(def3TechRE, details, re.I)
if m:
p['is_tech_foul'] = True
p['is_def_three_secs'] = True
p.update(m.groupdict())
foul_on_home = p['tech_fouler'] in hm_roster
p['off_team'] = aw if foul_on_home else hm
p['def_team'] = hm if foul_on_home else aw
p['foul_team'] = p['def_team']
return p
# parsing violations
violRE = (r'Violation by (?P<violator>{0}|Team) '
r'\((?P<viol_type>.*)\)').format(PLAYER_RE)
m = re.match(violRE, details, re.I)
if m:
p['is_viol'] = True
p.update(m.groupdict())
if p['viol_type'] == 'kicked_ball':
p['is_to'] = True
p['to_by'] = p['violator']
if p['violator'] == 'Team':
p['viol_team'] = hm if is_hm else aw
else:
viol_home = p['violator'] in hm_roster
p['viol_team'] = hm if viol_home else aw
return p
p['is_error'] = True
return p
def clean_features(df):
"""Fixes up columns of the passed DataFrame, such as casting T/F columns to
boolean and filling in NaNs for team and opp.
:param df: DataFrame of play-by-play data.
:returns: Dataframe with cleaned columns.
"""
df = pd.DataFrame(df)
bool_vals = set([True, False, None, np.nan])
sparse_cols = sparse_lineup_cols(df)
for col in df:
# make indicator columns boolean type (and fill in NaNs)
if set(df[col].unique()[:5]) <= bool_vals:
df[col] = (df[col] == True)
# fill NaN's in sparse lineup columns to 0
elif col in sparse_cols:
df[col] = df[col].fillna(0)
# fix free throw columns on technicals
df.loc[df.is_tech_fta, ['fta_num', 'tot_fta']] = 1
# fill in NaN's/fix off_team and def_team columns
df.off_team.fillna(method='bfill', inplace=True)
df.def_team.fillna(method='bfill', inplace=True)
df.off_team.fillna(method='ffill', inplace=True)
df.def_team.fillna(method='ffill', inplace=True)
return df
def clean_multigame_features(df):
"""TODO: Docstring for clean_multigame_features.
:df: TODO
:returns: TODO
"""
df = pd.DataFrame(df)
if df.index.value_counts().max() > 1:
df.reset_index(drop=True, inplace=True)
df = clean_features(df)
# if it's many games in one DataFrame, make poss_id and play_id unique
for col in ('play_id', 'poss_id'):
diffs = df[col].diff().fillna(0)
if (diffs < 0).any():
new_col = np.cumsum(diffs.astype(bool))
df.eval('{} = @new_col'.format(col), inplace=True)
return df
def get_period_starters(df):
"""TODO
"""
def players_from_play(play):
"""Figures out what players are in the game based on the players
mentioned in a play. Returns away and home players as two sets.
:param play: A dictionary representing a parsed play.
:returns: (aw_players, hm_players)
:rtype: tuple of lists
"""
# if it's a tech FT from between periods, don't count this play
if (
play['clock_time'] == '12:00.0' and
(play.get('is_tech_foul') or play.get('is_tech_fta'))
):
return [], []
stats = sportsref.nba.BoxScore(play['boxscore_id']).basic_stats()
home_grouped = stats.groupby('is_home')
hm_roster = set(home_grouped.player_id.get_group(True).values)
aw_roster = set(home_grouped.player_id.get_group(False).values)
player_keys = [
'assister', 'away_jumper', 'blocker', 'drew_foul', 'fouler',
'ft_shooter', 'gains_poss', 'home_jumper', 'rebounder', 'shooter',
'stealer', 'sub_in', 'sub_out', 'to_by'
]
players = [p for p in play[player_keys] if pd.notnull(p)]
aw_players = [p for p in players if p in aw_roster]
hm_players = [p for p in players if p in hm_roster]
return aw_players, hm_players
# create a mapping { quarter => (away_starters, home_starters) }
n_periods = df.quarter.nunique()
period_starters = [(set(), set()) for _ in range(n_periods)]
# fill out this mapping quarter by quarter
for qtr, qtr_grp in df.groupby(df.quarter):
aw_starters, hm_starters = period_starters[qtr-1]
exclude = set()
# loop through sets of plays that happen at the "same time"
for label, time_grp in qtr_grp.groupby(qtr_grp.secs_elapsed):
# first, if they sub in and weren't already starters, exclude them
sub_ins = set(time_grp.sub_in.dropna().values)
exclude.update(sub_ins - aw_starters - hm_starters)
# second, figure out new starters from each play at this time
for i, row in time_grp.iterrows():
aw_players, hm_players = players_from_play(row)
# update overall sets for the quarter
aw_starters.update(aw_players)
hm_starters.update(hm_players)
# remove excluded (subbed-in) players
hm_starters -= exclude
aw_starters -= exclude
# check whether we have found all starters
if len(hm_starters) > 5 or len(aw_starters) > 5:
import ipdb
ipdb.set_trace()
if len(hm_starters) >= 5 and len(aw_starters) >= 5:
break
if len(hm_starters) != 5 or len(aw_starters) != 5:
print('WARNING: wrong number of starters for a team in Q{} of {}'
.format(qtr, df.boxscore_id.iloc[0]))
return period_starters
def get_sparse_lineups(df):
"""TODO: Docstring for get_sparse_lineups.
:param df: TODO
:returns: TODO
"""
# get the lineup data using get_dense_lineups if necessary
if (set(ALL_LINEUP_COLS) - set(df.columns)):
lineup_df = get_dense_lineups(df)
else:
lineup_df = df[ALL_LINEUP_COLS]
# create the sparse representation
hm_lineups = lineup_df[HM_LINEUP_COLS].values
aw_lineups = lineup_df[AW_LINEUP_COLS].values
# +1 for home, -1 for away
hm_df = pd.DataFrame([
{'{}_in'.format(player_id): 1 for player_id in lineup}
for lineup in hm_lineups
], dtype=int)
aw_df = pd.DataFrame([
{'{}_in'.format(player_id): -1 for player_id in lineup}
for lineup in aw_lineups
], dtype=int)
sparse_df = pd.concat((hm_df, aw_df), axis=1).fillna(0)
return sparse_df
def get_dense_lineups(df):
"""Returns a new DataFrame based on the one it is passed. Specifically, it
adds five columns for each team (ten total), where each column has the ID
of a player on the court during the play.
This information is figured out sequentially from the game's substitution
data in the passed DataFrame, so the DataFrame passed as an argument must
be from a specific BoxScore (rather than a DataFrame of non-consecutive
plays). That is, the DataFrame must be of the form returned by
:func:`nba.BoxScore.pbp <nba.BoxScore.pbp>`.
.. note:: Note that the lineups reflect the teams in the game when the play
happened, not after the play. For example, if a play is a substitution,
the lineups for that play will be the lineups before the substituion
occurs.
:param df: A DataFrame of a game's play-by-play data.
:returns: A DataFrame with additional lineup columns.
"""
# TODO: add this precondition to documentation
assert df['boxscore_id'].nunique() == 1
def lineup_dict(aw_lineup, hm_lineup):
"""Returns a dictionary of lineups to be converted to columns.
Specifically, the columns are 'aw_player1' through 'aw_player5' and
'hm_player1' through 'hm_player5'.
:param aw_lineup: The away team's current lineup.
:param hm_lineup: The home team's current lineup.
:returns: A dictionary of lineups.
"""
return {
'{}_player{}'.format(tm, i+1): player
for tm, lineup in zip(['aw', 'hm'], [aw_lineup, hm_lineup])
for i, player in enumerate(lineup)
}
def handle_sub(row, aw_lineup, hm_lineup):
"""Modifies the aw_lineup and hm_lineup lists based on the substitution
that takes place in the given row."""
assert row['is_sub']
sub_lineup = hm_lineup if row['sub_team'] == row['home'] else aw_lineup
try:
# make the sub
idx = sub_lineup.index(row['sub_out'])
sub_lineup[idx] = row['sub_in']
except ValueError:
# if the sub was double-entered and it's already been executed...
if (
row['sub_in'] in sub_lineup
and row['sub_out'] not in sub_lineup
):
return aw_lineup, hm_lineup
# otherwise, let's print and pretend this never happened
print('ERROR IN SUB IN {}, Q{}, {}: {}'
.format(row['boxscore_id'], row['quarter'],
row['clock_time'], row['detail']))
raise
return aw_lineup, hm_lineup
per_starters = get_period_starters(df)
cur_qtr = 0
aw_lineup, hm_lineup = [], []
df = df.reset_index(drop=True)
lineups = [{} for _ in range(df.shape[0])]
# loop through select plays to determine lineups
sub_or_per_start = df.is_sub | df.quarter.diff().astype(bool)
for i, row in df.loc[sub_or_per_start].iterrows():
if row['quarter'] > cur_qtr:
# first row in a quarter
assert row['quarter'] == cur_qtr + 1
# first, finish up the last quarter's lineups
if cur_qtr > 0 and not df.loc[i-1, 'is_sub']:
lineups[i-1] = lineup_dict(aw_lineup, hm_lineup)
# then, move on to the quarter, and enter the starting lineups
cur_qtr += 1
aw_lineup, hm_lineup = map(list, per_starters[cur_qtr-1])
lineups[i] = lineup_dict(aw_lineup, hm_lineup)
# if the first play in the quarter is a sub, handle that
if row['is_sub']:
aw_lineup, hm_lineup = handle_sub(row, aw_lineup, hm_lineup)
else:
# during the quarter
# update lineups first then change lineups based on subs
lineups[i] = lineup_dict(aw_lineup, hm_lineup)
if row['is_sub']:
aw_lineup, hm_lineup = handle_sub(row, aw_lineup, hm_lineup)
# create and clean DataFrame
lineup_df = pd.DataFrame(lineups)
if lineup_df.iloc[-1].isnull().all():
lineup_df.iloc[-1] = lineup_dict(aw_lineup, hm_lineup)
lineup_df = lineup_df.groupby(df.quarter).fillna(method='bfill')
# fill in NaN's based on minutes played
bool_mat = lineup_df.isnull()
mask = bool_mat.any(axis=1)
if mask.any():
bs = sportsref.nba.BoxScore(df.boxscore_id[0])
# first, get the true minutes played from the box score
stats = sportsref.nba.BoxScore(df.boxscore_id.iloc[0]).basic_stats()
true_mp = pd.Series(
stats.query('mp > 0')[['player_id', 'mp']]
.set_index('player_id').to_dict()['mp']
) * 60
# next, calculate minutes played based on the lineup data
calc_mp = pd.Series(
{p: (df.secs_elapsed.diff() *
[p in row for row in lineup_df.values]).sum()
for p in stats.query('mp > 0').player_id.values})
# finally, figure which players are missing minutes
diff = true_mp - calc_mp
players_missing = diff.loc[diff.abs() >= 150]
hm_roster = bs.basic_stats().query('is_home == True').player_id.values
missing_df = pd.DataFrame(
{'secs': players_missing.values,
'is_home': players_missing.index.isin(hm_roster)},
index=players_missing.index
)
if missing_df.empty:
# TODO: log this as a warning (or error?)
print('There are NaNs in the lineup data, but no players were '
'found to be missing significant minutes')
else:
# import ipdb
# ipdb.set_trace()
for is_home, group in missing_df.groupby('is_home'):
player_id = group.index.item()
tm_cols = (sportsref.nba.pbp.HM_LINEUP_COLS if is_home else
sportsref.nba.pbp.AW_LINEUP_COLS)
row_mask = lineup_df[tm_cols].isnull().any(axis=1)
lineup_df.loc[row_mask, tm_cols] = (
lineup_df.loc[row_mask, tm_cols].fillna(player_id).values
)
return lineup_df
|
# Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
"""
This module contains a ``modifyModlist`` function adopted from
:py:mod:`ldap:ldap.modlist`.
"""
import ldap3
import ldap3.utils.conv
import tldap.helpers
from distutils.version import LooseVersion
def list_dict(l, case_insensitive=0):
"""
return a dictionary with all items of l being the keys of the dictionary
If argument case_insensitive is non-zero ldap.cidict.cidict will be
used for case-insensitive string keys
"""
if case_insensitive:
d = tldap.helpers.CaseInsensitiveDict()
else:
d = {}
for i in l:
d[i] = None
return d
if LooseVersion(getattr(ldap3, '__version__', "0")) < LooseVersion("0.9.6"):
def escape_list(bytes_list):
assert isinstance(bytes_list, list)
return [
ldap3.utils.conv.escape_bytes(bytes_value)
for bytes_value in bytes_list
]
else:
def escape_list(bytes_list):
assert isinstance(bytes_list, list)
return bytes_list
def addModlist(entry, ignore_attr_types=None):
"""Build modify list for call of method LDAPObject.add()"""
ignore_attr_types = list_dict(map(str.lower, (ignore_attr_types or [])))
modlist = {}
for attrtype in entry.keys():
if attrtype.lower() in ignore_attr_types:
# This attribute type is ignored
continue
for value in entry[attrtype]:
assert value is not None
if len(entry[attrtype]) > 0:
modlist[attrtype] = escape_list(entry[attrtype])
return modlist # addModlist()
def modifyModlist(
old_entry, new_entry, ignore_attr_types=None, ignore_oldexistent=0):
"""
Build differential modify list for calling LDAPObject.modify()/modify_s()
:param old_entry:
Dictionary holding the old entry
:param new_entry:
Dictionary holding what the new entry should be
:param ignore_attr_types:
List of attribute type names to be ignored completely
:param ignore_oldexistent:
If non-zero attribute type names which are in old_entry
but are not found in new_entry at all are not deleted.
This is handy for situations where your application
sets attribute value to '' for deleting an attribute.
In most cases leave zero.
:return: List of tuples suitable for
:py:meth:`ldap:ldap.LDAPObject.modify`.
This function is the same as :py:func:`ldap:ldap.modlist.modifyModlist`
except for the following changes:
* MOD_DELETE/MOD_DELETE used in preference to MOD_REPLACE when updating
an existing value.
"""
ignore_attr_types = list_dict(map(str.lower, (ignore_attr_types or [])))
modlist = {}
attrtype_lower_map = {}
for a in old_entry.keys():
attrtype_lower_map[a.lower()] = a
for attrtype in new_entry.keys():
attrtype_lower = attrtype.lower()
if attrtype_lower in ignore_attr_types:
# This attribute type is ignored
continue
# Filter away null-strings
new_value = list(filter(lambda x: x is not None, new_entry[attrtype]))
if attrtype_lower in attrtype_lower_map:
old_value = old_entry.get(attrtype_lower_map[attrtype_lower], [])
old_value = list(filter(lambda x: x is not None, old_value))
del attrtype_lower_map[attrtype_lower]
else:
old_value = []
if not old_value and new_value:
# Add a new attribute to entry
modlist[attrtype] = (ldap3.MODIFY_ADD, escape_list(new_value))
elif old_value and new_value:
# Replace existing attribute
old_value_dict = list_dict(old_value)
new_value_dict = list_dict(new_value)
delete_values = []
for v in old_value:
if v not in new_value_dict:
delete_values.append(v)
add_values = []
for v in new_value:
if v not in old_value_dict:
add_values.append(v)
if len(delete_values) > 0 or len(add_values) > 0:
modlist[attrtype] = (
ldap3.MODIFY_REPLACE, escape_list(new_value))
elif old_value and not new_value:
# Completely delete an existing attribute
modlist[attrtype] = (ldap3.MODIFY_DELETE, [])
if not ignore_oldexistent:
# Remove all attributes of old_entry which are not present
# in new_entry at all
for a in attrtype_lower_map.keys():
if a in ignore_attr_types:
# This attribute type is ignored
continue
attrtype = attrtype_lower_map[a]
modlist[attrtype] = (ldap3.MODIFY_DELETE, [])
return modlist # modifyModlist()
|
from pymongo import MongoClient
class Connection(object):
def __init__(self):
super().__init__()
class MongoConnection(Connection):
"""Connection object for connecting to the mongodb database and retrieving data."""
def __init__(self, db, mongo_options={}):
super().__init__()
self.client = MongoClient(**mongo_options)
self.db = self.client[db]
def query(self, d):
coll_name = d.pop('collection', None)
if coll_name is None:
raise Exception('Collection param not found in query.')
coll = self.db[coll_name]
if 'id' in d:
return coll.find_one(d)
if 'ids' in d.keys():
return list(coll.find({'id':{'$in':d['ids']}}))
if 'all' in d and d['all']:
return coll.find()
def __enter__(self):
"""For use with the "with" statement. Will create an open db connection.
:return: Client connection.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""For use with the "with" statement. Will disconnect from db connection.
:param exc_type:
:param exc_val:
:param exc_tb:
:return:
"""
self.client.close()
if __name__ == '__main__':
with MongoConnection() as conn:
print(conn)
|
#!/usr/bin/env python
"""Script for testing the performance of pickling/unpickling.
This will pickle/unpickle several real world-representative objects a few
thousand times. The methodology below was chosen for was chosen to be similar
to real-world scenarios which operate on single objects at a time. Note that if
we did something like
pickle.dumps([dict(some_dict) for _ in xrange(10000)])
this isn't equivalent to dumping the dict 10000 times: pickle uses a
highly-efficient encoding for the n-1 following copies.
"""
__author__ = "[email protected] (Collin Winter)"
# Python imports
import datetime
import gc
import optparse
import random
import sys
import time
# Local imports
import util
gc.disable() # Minimize jitter.
DICT = {
'ads_flags': 0L,
'age': 18,
'birthday': datetime.date(1980, 5, 7),
'bulletin_count': 0L,
'comment_count': 0L,
'country': 'BR',
'encrypted_id': 'G9urXXAJwjE',
'favorite_count': 9L,
'first_name': '',
'flags': 412317970704L,
'friend_count': 0L,
'gender': 'm',
'gender_for_display': 'Male',
'id': 302935349L,
'is_custom_profile_icon': 0L,
'last_name': '',
'locale_preference': 'pt_BR',
'member': 0L,
'tags': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
'profile_foo_id': 827119638L,
'secure_encrypted_id': 'Z_xxx2dYx3t4YAdnmfgyKw',
'session_number': 2L,
'signup_id': '201-19225-223',
'status': 'A',
'theme': 1,
'time_created': 1225237014L,
'time_updated': 1233134493L,
'unread_message_count': 0L,
'user_group': '0',
'username': 'collinwinter',
'play_count': 9L,
'view_count': 7L,
'zip': ''}
TUPLE = ([265867233L, 265868503L, 265252341L, 265243910L, 265879514L,
266219766L, 266021701L, 265843726L, 265592821L, 265246784L,
265853180L, 45526486L, 265463699L, 265848143L, 265863062L,
265392591L, 265877490L, 265823665L, 265828884L, 265753032L], 60)
def mutate_dict(orig_dict, random_source):
new_dict = dict(orig_dict)
for key, value in new_dict.items():
rand_val = random_source.random() * sys.maxint
if isinstance(key, (int, long)):
new_dict[key] = long(rand_val)
elif isinstance(value, str):
new_dict[key] = str(rand_val)
elif isinstance(key, unicode):
new_dict[key] = unicode(rand_val)
return new_dict
random_source = random.Random(5) # Fixed seed.
DICT_GROUP = [mutate_dict(DICT, random_source) for _ in range(3)]
def test_pickle(num_obj_copies, pickle, options):
# Warm-up runs.
pickle.dumps(DICT, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
loops = num_obj_copies / 20 # We do 20 runs per loop.
times = []
for _ in xrange(options.num_runs):
t0 = time.time()
for _ in xrange(loops):
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(DICT, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(TUPLE, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
pickle.dumps(DICT_GROUP, options.protocol)
t1 = time.time()
times.append(t1 - t0)
return times
def test_unpickle(num_obj_copies, pickle, options):
pickled_dict = pickle.dumps(DICT, options.protocol)
pickled_tuple = pickle.dumps(TUPLE, options.protocol)
pickled_dict_group = pickle.dumps(DICT_GROUP, options.protocol)
# Warm-up runs.
pickle.loads(pickled_dict)
pickle.loads(pickled_tuple)
pickle.loads(pickled_dict_group)
loops = num_obj_copies / 20 # We do 20 runs per loop.
times = []
for _ in xrange(options.num_runs):
t0 = time.time()
for _ in xrange(loops):
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_dict)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_tuple)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
pickle.loads(pickled_dict_group)
t1 = time.time()
times.append(t1 - t0)
return times
LIST = [[range(10), range(10)] for _ in xrange(10)]
def test_pickle_list(loops, pickle, options):
# Warm-up runs.
pickle.dumps(LIST, options.protocol)
pickle.dumps(LIST, options.protocol)
loops = loops / 5 # Scale to compensate for the workload.
times = []
for _ in xrange(options.num_runs):
t0 = time.time()
for _ in xrange(loops):
pickle.dumps(LIST, options.protocol)
pickle.dumps(LIST, options.protocol)
pickle.dumps(LIST, options.protocol)
pickle.dumps(LIST, options.protocol)
pickle.dumps(LIST, options.protocol)
pickle.dumps(LIST, options.protocol)
pickle.dumps(LIST, options.protocol)
pickle.dumps(LIST, options.protocol)
pickle.dumps(LIST, options.protocol)
pickle.dumps(LIST, options.protocol)
t1 = time.time()
times.append(t1 - t0)
return times
def test_unpickle_list(loops, pickle, options):
pickled_list = pickle.dumps(LIST, options.protocol)
# Warm-up runs.
pickle.loads(pickled_list)
pickle.loads(pickled_list)
loops = loops / 5 # Scale to compensate for the workload.
times = []
for _ in xrange(options.num_runs):
t0 = time.time()
for _ in xrange(loops):
pickle.loads(pickled_list)
pickle.loads(pickled_list)
pickle.loads(pickled_list)
pickle.loads(pickled_list)
pickle.loads(pickled_list)
pickle.loads(pickled_list)
pickle.loads(pickled_list)
pickle.loads(pickled_list)
pickle.loads(pickled_list)
pickle.loads(pickled_list)
t1 = time.time()
times.append(t1 - t0)
return times
MICRO_DICT = dict((key, dict.fromkeys(range(10))) for key in xrange(100))
def test_pickle_dict(loops, pickle, options):
# Warm-up runs.
pickle.dumps(MICRO_DICT, options.protocol)
pickle.dumps(MICRO_DICT, options.protocol)
loops = max(1, loops / 10)
times = []
for _ in xrange(options.num_runs):
t0 = time.time()
for _ in xrange(loops):
pickle.dumps(MICRO_DICT, options.protocol)
pickle.dumps(MICRO_DICT, options.protocol)
pickle.dumps(MICRO_DICT, options.protocol)
pickle.dumps(MICRO_DICT, options.protocol)
pickle.dumps(MICRO_DICT, options.protocol)
t1 = time.time()
times.append(t1 - t0)
return times
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [pickle|unpickle] [options]",
description=("Test the performance of pickling."))
parser.add_option("--use_cpickle", action="store_true",
help="Use the C version of pickle.")
parser.add_option("--protocol", action="store", default=2, type="int",
help="Which protocol to use (0, 1, 2).")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
benchmarks = ["pickle", "unpickle", "pickle_list", "unpickle_list",
"pickle_dict"]
for bench_name in benchmarks:
if bench_name in args:
benchmark = globals()["test_" + bench_name]
break
else:
raise RuntimeError("Need to specify one of %s" % benchmarks)
if options.use_cpickle:
num_obj_copies = 8000
import cPickle as pickle
else:
num_obj_copies = 200
import pickle
if options.protocol > 0:
num_obj_copies *= 2 # Compensate for faster protocols.
util.run_benchmark(options, num_obj_copies, benchmark, pickle, options)
|
'''
Created on 01.10.2012
@author: vlkv
'''
import os
import PyQt4.QtGui as QtGui
import reggata
import reggata.helpers as helpers
import reggata.consts as consts
import reggata.statistics as stats
from reggata.helpers import show_exc_info
from reggata.consts import STATUSBAR_TIMEOUT
from reggata.errors import MsgException, LoginError
from reggata.ui.ui_aboutdialog import Ui_AboutDialog # TODO: gui imports should be removed from logic package!
from reggata.data.db_schema import User
from reggata.data.commands import SaveNewUserCommand, ChangeUserPasswordCommand
from reggata.data.repo_mgr import RepoMgr
from reggata.logic.ext_app_mgr import ExtAppMgr
from reggata.logic.handler_signals import HandlerSignals
from reggata.logic.worker_threads import ImportItemsThread
from reggata.logic.action_handlers import AbstractActionHandler
from reggata.gui.external_apps_dialog import ExternalAppsDialog
from reggata.gui.user_dialogs_facade import UserDialogsFacade
from reggata.gui.user_dialog import UserDialog
class CreateUserActionHandler(AbstractActionHandler):
def __init__(self, model):
super(CreateUserActionHandler, self).__init__(model)
def handle(self):
try:
self._model.checkActiveRepoIsNotNone()
user = User()
dialogs = UserDialogsFacade()
if not dialogs.execUserDialog(
user=user, gui=self._model.gui, dialogMode=UserDialog.CREATE_MODE):
return
uow = self._model.repo.createUnitOfWork()
try:
uow.executeCommand(SaveNewUserCommand(user))
self._model.user = user
finally:
uow.close()
stats.sendEvent("main_window.create_user")
except Exception as ex:
show_exc_info(self._model.gui, ex)
class LoginUserActionHandler(AbstractActionHandler):
def __init__(self, model):
super(LoginUserActionHandler, self).__init__(model)
def handle(self):
try:
self._model.checkActiveRepoIsNotNone()
user = User()
dialogs = UserDialogsFacade()
if not dialogs.execUserDialog(
user=user, gui=self._model.gui, dialogMode=UserDialog.LOGIN_MODE):
return
self._model.loginUser(user.login, user.password)
stats.sendEvent("main_window.login_user")
except Exception as ex:
show_exc_info(self._model.gui, ex)
class LogoutUserActionHandler(AbstractActionHandler):
def __init__(self, model):
super(LogoutUserActionHandler, self).__init__(model)
def handle(self):
try:
self._model.user = None
stats.sendEvent("main_window.logout_user")
except Exception as ex:
show_exc_info(self._model.gui, ex)
class ChangeUserPasswordActionHandler(AbstractActionHandler):
def __init__(self, model):
super(ChangeUserPasswordActionHandler, self).__init__(model)
def handle(self):
try:
self._model.checkActiveRepoIsNotNone()
self._model.checkActiveUserIsNotNone()
user = self._model.user
dialogs = UserDialogsFacade()
dialogExecOk, newPasswordHash = \
dialogs.execChangeUserPasswordDialog(user=user, gui=self._model.gui)
if not dialogExecOk:
return
uow = self._model.repo.createUnitOfWork()
try:
command = ChangeUserPasswordCommand(user.login, newPasswordHash)
uow.executeCommand(command)
finally:
uow.close()
user.password = newPasswordHash
stats.sendEvent("main_window.change_user_password")
except Exception as ex:
show_exc_info(self._model.gui, ex)
else:
self._emitHandlerSignal(HandlerSignals.STATUS_BAR_MESSAGE,
self.tr("Operation completed."), STATUSBAR_TIMEOUT)
class CreateRepoActionHandler(AbstractActionHandler):
def __init__(self, model):
super(CreateRepoActionHandler, self).__init__(model)
def handle(self):
try:
dialogs = UserDialogsFacade()
basePath = dialogs.getExistingDirectory(
self._model.gui, self.tr("Choose a base path for new repository"))
if not basePath:
raise MsgException(
self.tr("You haven't chosen existent directory. Operation canceled."))
# QFileDialog returns forward slashes in windows! Because of this
# the path should be normalized
basePath = os.path.normpath(basePath)
self._model.repo = RepoMgr.createNewRepo(basePath)
self._model.user = self.__createDefaultUser()
stats.sendEvent("main_window.create_repo")
except Exception as ex:
show_exc_info(self._model.gui, ex)
def __createDefaultUser(self):
self._model.checkActiveRepoIsNotNone()
defaultLogin = consts.DEFAULT_USER_LOGIN
defaultPassword = helpers.computePasswordHash(consts.DEFAULT_USER_PASSWORD)
user = User(login=defaultLogin, password=defaultPassword)
uow = self._model.repo.createUnitOfWork()
try:
uow.executeCommand(SaveNewUserCommand(user))
finally:
uow.close()
return user
class CloseRepoActionHandler(AbstractActionHandler):
def __init__(self, model):
super(CloseRepoActionHandler, self).__init__(model)
def handle(self):
try:
self._model.checkActiveRepoIsNotNone()
self._model.repo = None
self._model.user = None
stats.sendEvent("main_window.close_repo")
except Exception as ex:
show_exc_info(self._model.gui, ex)
class OpenRepoActionHandler(AbstractActionHandler):
def __init__(self, model):
super(OpenRepoActionHandler, self).__init__(model)
def handle(self):
try:
dialogs = UserDialogsFacade()
basePath = dialogs.getExistingDirectory(
self._model.gui, self.tr("Choose a repository base path"))
if not basePath:
raise Exception(
self.tr("You haven't chosen existent directory. Operation canceled."))
#QFileDialog returns forward slashes in windows! Because of this path should be normalized
basePath = os.path.normpath(basePath)
self._model.repo = RepoMgr(basePath)
self._model.user = None
self._model.loginRecentUser()
stats.sendEvent("main_window.open_repo")
except LoginError:
self.__letUserLoginByHimself()
except Exception as ex:
show_exc_info(self._model.gui, ex)
def __letUserLoginByHimself(self):
user = User()
dialogs = UserDialogsFacade()
if not dialogs.execUserDialog(
user=user, gui=self._model.gui, dialogMode=UserDialog.LOGIN_MODE):
return
try:
self._model.loginUser(user.login, user.password)
except Exception as ex:
show_exc_info(self._model.gui, ex)
class AddCurrentRepoToFavoritesActionHandler(AbstractActionHandler):
def __init__(self, model, favoriteReposStorage):
super(AddCurrentRepoToFavoritesActionHandler, self).__init__(model)
self.__favoriteReposStorage = favoriteReposStorage
def handle(self):
try:
self._model.checkActiveRepoIsNotNone()
self._model.checkActiveUserIsNotNone()
repoBasePath = self._model.repo.base_path
userLogin = self._model.user.login
#TODO: Maybe ask user for a repoAlias...
self.__favoriteReposStorage.addRepoToFavorites(userLogin,
repoBasePath,
os.path.basename(repoBasePath))
self._emitHandlerSignal(HandlerSignals.STATUS_BAR_MESSAGE,
self.tr("Current repository saved in favorites list."), STATUSBAR_TIMEOUT)
self._emitHandlerSignal(HandlerSignals.LIST_OF_FAVORITE_REPOS_CHANGED)
stats.sendEvent("main_window.add_repo_to_favorites")
except Exception as ex:
show_exc_info(self._model.gui, ex)
class RemoveCurrentRepoFromFavoritesActionHandler(AbstractActionHandler):
def __init__(self, model, favoriteReposStorage):
super(RemoveCurrentRepoFromFavoritesActionHandler, self).__init__(model)
self.__favoriteReposStorage = favoriteReposStorage
def handle(self):
try:
self._model.checkActiveRepoIsNotNone()
self._model.checkActiveUserIsNotNone()
repoBasePath = self._model.repo.base_path
userLogin = self._model.user.login
self.__favoriteReposStorage.removeRepoFromFavorites(userLogin, repoBasePath)
self._emitHandlerSignal(HandlerSignals.STATUS_BAR_MESSAGE,
self.tr("Current repository removed from favorites list."), STATUSBAR_TIMEOUT)
self._emitHandlerSignal(HandlerSignals.LIST_OF_FAVORITE_REPOS_CHANGED)
stats.sendEvent("main_window.remove_repo_from_favorites")
except Exception as ex:
show_exc_info(self._model.gui, ex)
class ImportItemsActionHandler(AbstractActionHandler):
'''
Imports previously exported items.
'''
def __init__(self, model, dialogs):
super(ImportItemsActionHandler, self).__init__(model)
self._dialogs = dialogs
def handle(self):
try:
self._model.checkActiveRepoIsNotNone()
self._model.checkActiveUserIsNotNone()
importFromFilename = self._dialogs.getOpenFileName(
self._model.gui,
self.tr("Open Reggata Archive File"),
self.tr("Reggata Archive File (*.raf)"))
if not importFromFilename:
raise MsgException(self.tr("You haven't chosen a file. Operation canceled."))
thread = ImportItemsThread(self, self._model.repo, importFromFilename,
self._model.user.login)
self._dialogs.startThreadWithWaitDialog(thread, self._model.gui, indeterminate=False)
self._emitHandlerSignal(HandlerSignals.ITEM_CREATED)
#TODO: display information about how many items were imported
self._emitHandlerSignal(HandlerSignals.STATUS_BAR_MESSAGE,
self.tr("Operation completed."), STATUSBAR_TIMEOUT)
stats.sendEvent("main_window.import_items")
except Exception as ex:
show_exc_info(self._model.gui, ex)
class ExitReggataActionHandler(AbstractActionHandler):
def __init__(self, tool):
super(ExitReggataActionHandler, self).__init__(tool)
def handle(self):
try:
self._tool.gui.close()
stats.sendEvent("main_window.exit_reggata")
except Exception as ex:
show_exc_info(self._tool.gui, ex)
class ManageExternalAppsActionHandler(AbstractActionHandler):
def __init__(self, model, dialogs):
super(ManageExternalAppsActionHandler, self).__init__(model)
self._dialogs = dialogs
def handle(self):
try:
extAppMgrState = ExtAppMgr.readCurrentState()
dialog = ExternalAppsDialog(self._model.gui, extAppMgrState, self._dialogs)
if dialog.exec_() != QtGui.QDialog.Accepted:
return
ExtAppMgr.setCurrentState(dialog.extAppMgrState())
self._emitHandlerSignal(HandlerSignals.REGGATA_CONF_CHANGED)
self._emitHandlerSignal(HandlerSignals.STATUS_BAR_MESSAGE,
self.tr("Operation completed."), STATUSBAR_TIMEOUT)
stats.sendEvent("main_window.manage_external_apps")
except Exception as ex:
show_exc_info(self._model.gui, ex)
class ShowAboutDialogActionHandler(AbstractActionHandler):
def __init__(self, model):
super(ShowAboutDialogActionHandler, self).__init__(model)
def handle(self):
try:
ad = AboutDialog(self._model.gui)
ad.exec_()
stats.sendEvent("main_window.show_about_dialog")
except Exception as ex:
show_exc_info(self._model.gui, ex)
else:
self._emitHandlerSignal(HandlerSignals.STATUS_BAR_MESSAGE,
self.tr("Operation completed."), STATUSBAR_TIMEOUT)
about_message = \
'''
<h1>Reggata</h1>
<p>Version: {0}</p>
<p>Reggata is a tagging system for local files.</p>
<p>Copyright 2012 Vitaly Volkov, <font color="blue">[email protected]</font></p>
<p>Home page: <font color="blue">http://github.com/vlkv/reggata</font></p>
<p>Reggata is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
</p>
<p>Reggata is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
</p>
<p>You should have received a copy of the GNU General Public License
along with Reggata. If not, see <font color="blue">http://www.gnu.org/licenses</font>.
</p>
'''.format(reggata.__version__)
class AboutDialog(QtGui.QDialog):
def __init__(self, parent=None):
super(AboutDialog, self).__init__(parent)
self.ui = Ui_AboutDialog()
self.ui.setupUi(self)
self.ui.textEdit.setHtml(about_message)
class OpenFavoriteRepoActionHandler(AbstractActionHandler):
def __init__(self, model):
super(OpenFavoriteRepoActionHandler, self).__init__(model)
def handle(self):
try:
action = self.sender()
repoBasePath = action.repoBasePath
currentUser = self._model.user
assert currentUser is not None
self._model.repo = RepoMgr(repoBasePath)
try:
self._model.loginUser(currentUser.login, currentUser.password)
self._emitHandlerSignal(HandlerSignals.STATUS_BAR_MESSAGE,
self.tr("Repository opened. Login succeded."), STATUSBAR_TIMEOUT)
except LoginError:
self._model.user = None
self._emitHandlerSignal(HandlerSignals.STATUS_BAR_MESSAGE,
self.tr("Repository opened. Login failed."), STATUSBAR_TIMEOUT)
stats.sendEvent("main_window.open_favorite_repo")
except Exception as ex:
show_exc_info(self._model.gui, ex)
|
"""
Form classes
"""
from __future__ import unicode_literals
from collections import OrderedDict
import copy
import warnings
from django.core.exceptions import ValidationError
from django.forms.fields import Field, FileField
from django.forms.util import flatatt, ErrorDict, ErrorList
from django.forms.widgets import Media, media_property, TextInput, Textarea
from django.utils.html import conditional_escape, format_html
from django.utils.encoding import smart_text, force_text, python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils import six
__all__ = ('BaseForm', 'Form')
NON_FIELD_ERRORS = '__all__'
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return ''
return name.replace('_', ' ').capitalize()
def get_declared_fields(bases, attrs, with_base_fields=True):
"""
Create a list of form field instances from the passed in 'attrs', plus any
similar fields on the base classes (in 'bases'). This is used by both the
Form and ModelForm metaclasses.
If 'with_base_fields' is True, all fields from the bases are used.
Otherwise, only fields in the 'declared_fields' attribute on the bases are
used. The distinction is useful in ModelForm subclassing.
Also integrates any additional media definitions.
"""
fields = [(field_name, attrs.pop(field_name)) for field_name, obj in list(six.iteritems(attrs)) if isinstance(obj, Field)]
fields.sort(key=lambda x: x[1].creation_counter)
# If this class is subclassing another Form, add that Form's fields.
# Note that we loop over the bases in *reverse*. This is necessary in
# order to preserve the correct order of fields.
if with_base_fields:
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = list(six.iteritems(base.base_fields)) + fields
else:
for base in bases[::-1]:
if hasattr(base, 'declared_fields'):
fields = list(six.iteritems(base.declared_fields)) + fields
return OrderedDict(fields)
class DeclarativeFieldsMetaclass(type):
"""
Metaclass that converts Field attributes to a dictionary called
'base_fields', taking into account parent class 'base_fields' as well.
"""
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = get_declared_fields(bases, attrs)
new_class = super(DeclarativeFieldsMetaclass,
cls).__new__(cls, name, bases, attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
@python_2_unicode_compatible
class BaseForm(object):
# This is the main implementation of all the Form logic. Note that this
# class is different than Form. See the comments by the Form class for more
# information. Any improvements to the form API should be made to *this*
# class, not to the Form class.
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False):
self.is_bound = data is not None or files is not None
self.data = data or {}
self.files = files or {}
self.auto_id = auto_id
self.prefix = prefix
self.initial = initial or {}
self.error_class = error_class
# Translators: This is the default suffix added to form field labels
self.label_suffix = label_suffix if label_suffix is not None else _(':')
self.empty_permitted = empty_permitted
self._errors = None # Stores the errors after clean() has been called.
self._changed_data = None
# The base_fields class attribute is the *class-wide* definition of
# fields. Because a particular *instance* of the class might want to
# alter self.fields, we create self.fields here by copying base_fields.
# Instances should always modify self.fields; they should not modify
# self.base_fields.
self.fields = copy.deepcopy(self.base_fields)
def __str__(self):
return self.as_table()
def __iter__(self):
for name in self.fields:
yield self[name]
def __getitem__(self, name):
"Returns a BoundField with the given name."
try:
field = self.fields[name]
except KeyError:
raise KeyError('Key %r not found in Form' % name)
return BoundField(self, field, name)
@property
def errors(self):
"Returns an ErrorDict for the data provided for the form"
if self._errors is None:
self.full_clean()
return self._errors
def is_valid(self):
"""
Returns True if the form has no errors. Otherwise, False. If errors are
being ignored, returns False.
"""
return self.is_bound and not bool(self.errors)
def add_prefix(self, field_name):
"""
Returns the field name with a prefix appended, if this Form has a
prefix set.
Subclasses may wish to override.
"""
return '%s-%s' % (self.prefix, field_name) if self.prefix else field_name
def add_initial_prefix(self, field_name):
"""
Add a 'initial' prefix for checking dynamic initial values
"""
return 'initial-%s' % self.add_prefix(field_name)
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in self.fields.items():
html_class_attr = ''
bf = self[name]
# Escape and cache in local variable.
bf_errors = self.error_class([conditional_escape(error) for error in bf.errors])
if bf.is_hidden:
if bf_errors:
top_errors.extend(
[_('(Hidden field %(name)s) %(error)s') % {'name': name, 'error': force_text(e)}
for e in bf_errors])
hidden_fields.append(six.text_type(bf))
else:
# Create a 'class="..."' atribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % force_text(bf_errors))
if bf.label:
label = conditional_escape(force_text(bf.label))
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = help_text_html % force_text(field.help_text)
else:
help_text = ''
output.append(normal_row % {
'errors': force_text(bf_errors),
'label': force_text(label),
'field': six.text_type(bf),
'help_text': help_text,
'html_class_attr': html_class_attr
})
if top_errors:
output.insert(0, error_row % force_text(top_errors))
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = ''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {'errors': '', 'label': '',
'field': '', 'help_text':'',
'html_class_attr': html_class_attr})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return mark_safe('\n'.join(output))
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row = '<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row = '<tr><td colspan="2">%s</td></tr>',
row_ender = '</td></tr>',
help_text_html = '<br /><span class="helptext">%s</span>',
errors_on_separate_row = False)
def as_ul(self):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
return self._html_output(
normal_row = '<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>',
error_row = '<li>%s</li>',
row_ender = '</li>',
help_text_html = ' <span class="helptext">%s</span>',
errors_on_separate_row = False)
def as_p(self):
"Returns this form rendered as HTML <p>s."
return self._html_output(
normal_row = '<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>',
error_row = '%s',
row_ender = '</p>',
help_text_html = ' <span class="helptext">%s</span>',
errors_on_separate_row = True)
def non_field_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
field -- i.e., from Form.clean(). Returns an empty ErrorList if there
are none.
"""
return self.errors.get(NON_FIELD_ERRORS, self.error_class())
def _raw_value(self, fieldname):
"""
Returns the raw_value for a particular field name. This is just a
convenient wrapper around widget.value_from_datadict.
"""
field = self.fields[fieldname]
prefix = self.add_prefix(fieldname)
return field.widget.value_from_datadict(self.data, self.files, prefix)
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data has
# changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
self._post_clean()
def _clean_fields(self):
for name, field in self.fields.items():
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))
try:
if isinstance(field, FileField):
initial = self.initial.get(name, field.initial)
value = field.clean(value, initial)
else:
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.cleaned_data[name] = value
except ValidationError as e:
self._errors[name] = self.error_class(e.messages)
if name in self.cleaned_data:
del self.cleaned_data[name]
def _clean_form(self):
try:
cleaned_data = self.clean()
except ValidationError as e:
self._errors[NON_FIELD_ERRORS] = self.error_class(e.messages)
else:
if cleaned_data is not None:
self.cleaned_data = cleaned_data
def _post_clean(self):
"""
An internal hook for performing additional cleaning after form cleaning
is complete. Used for model validation in model forms.
"""
pass
def clean(self):
"""
Hook for doing any extra form-wide cleaning after Field.clean() been
called on every field. Any ValidationError raised by this method will
not be associated with a particular field; it will have a special-case
association with the field named '__all__'.
"""
return self.cleaned_data
def has_changed(self):
"""
Returns True if data differs from initial.
"""
return bool(self.changed_data)
@property
def changed_data(self):
if self._changed_data is None:
self._changed_data = []
# XXX: For now we're asking the individual widgets whether or not the
# data has changed. It would probably be more efficient to hash the
# initial data, store it in a hidden field, and compare a hash of the
# submitted data, but we'd need a way to easily get the string value
# for a given field. Right now, that logic is embedded in the render
# method of each widget.
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if not field.show_hidden_initial:
initial_value = self.initial.get(name, field.initial)
if callable(initial_value):
initial_value = initial_value()
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
try:
initial_value = field.to_python(hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name))
except ValidationError:
# Always assume data has changed if validation fails.
self._changed_data.append(name)
continue
if hasattr(field.widget, '_has_changed'):
warnings.warn("The _has_changed method on widgets is deprecated,"
" define it at field level instead.",
DeprecationWarning, stacklevel=2)
if field.widget._has_changed(initial_value, data_value):
self._changed_data.append(name)
elif field._has_changed(initial_value, data_value):
self._changed_data.append(name)
return self._changed_data
@property
def media(self):
"""
Provide a description of all media required to render the widgets on this form
"""
media = Media()
for field in self.fields.values():
media = media + field.widget.media
return media
def is_multipart(self):
"""
Returns True if the form needs to be multipart-encoded, i.e. it has
FileInput. Otherwise, False.
"""
for field in self.fields.values():
if field.widget.needs_multipart_form:
return True
return False
def hidden_fields(self):
"""
Returns a list of all the BoundField objects that are hidden fields.
Useful for manual form layout in templates.
"""
return [field for field in self if field.is_hidden]
def visible_fields(self):
"""
Returns a list of BoundField objects that aren't hidden fields.
The opposite of the hidden_fields() method.
"""
return [field for field in self if not field.is_hidden]
class Form(six.with_metaclass(DeclarativeFieldsMetaclass, BaseForm)):
"A collection of Fields, plus their associated data."
# This is a separate class from BaseForm in order to abstract the way
# self.fields is specified. This class (Form) is the one that does the
# fancy metaclass stuff purely for the semantic sugar -- it allows one
# to define a form using declarative syntax.
# BaseForm itself has no way of designating self.fields.
@python_2_unicode_compatible
class BoundField(object):
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ''
def __str__(self):
"""Renders this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
def __iter__(self):
"""
Yields rendered strings that comprise all widgets in this BoundField.
This really is only useful for RadioSelect widgets, so that you can
iterate over individual radio buttons in a template.
"""
id_ = self.field.widget.attrs.get('id') or self.auto_id
attrs = {'id': id_} if id_ else {}
for subwidget in self.field.widget.subwidgets(self.html_name, self.value(), attrs):
yield subwidget
def __len__(self):
return len(list(self.__iter__()))
def __getitem__(self, idx):
return list(self.__iter__())[idx]
@property
def errors(self):
"""
Returns an ErrorList for this field. Returns an empty ErrorList
if there are none.
"""
return self.form.errors.get(self.name, self.form.error_class())
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
if not widget:
widget = self.field.widget
attrs = attrs or {}
auto_id = self.auto_id
if auto_id and 'id' not in attrs and 'id' not in widget.attrs:
if not only_initial:
attrs['id'] = auto_id
else:
attrs['id'] = self.html_initial_id
if not only_initial:
name = self.html_name
else:
name = self.html_initial_name
return widget.render(name, self.value(), attrs=attrs)
def as_text(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"Returns a string of HTML for representing this as a <textarea>."
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
@property
def data(self):
"""
Returns the data for this BoundField, or None if it wasn't given.
"""
return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name)
def value(self):
"""
Returns the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
if not self.form.is_bound:
data = self.form.initial.get(self.name, self.field.initial)
if callable(data):
data = data()
else:
data = self.field.bound_data(
self.data, self.form.initial.get(self.name, self.field.initial)
)
return self.field.prepare_value(data)
def label_tag(self, contents=None, attrs=None, label_suffix=None):
"""
Wraps the given contents in a <label>, if the field has an ID attribute.
contents should be 'mark_safe'd to avoid HTML escaping. If contents
aren't given, uses the field's HTML-escaped label.
If attrs are given, they're used as HTML attributes on the <label> tag.
label_suffix allows overriding the form's label_suffix.
"""
contents = contents or self.label
# Only add the suffix if the label does not end in punctuation.
label_suffix = label_suffix if label_suffix is not None else self.form.label_suffix
# Translators: If found as last label character, these punctuation
# characters will prevent the default label_suffix to be appended to the label
if label_suffix and contents and contents[-1] not in _(':?.!'):
contents = format_html('{0}{1}', contents, label_suffix)
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
if id_:
id_for_label = widget.id_for_label(id_)
if id_for_label:
attrs = dict(attrs or {}, **{'for': id_for_label})
attrs = flatatt(attrs) if attrs else ''
contents = format_html('<label{0}>{1}</label>', attrs, contents)
else:
contents = conditional_escape(contents)
return mark_safe(contents)
def css_classes(self, extra_classes=None):
"""
Returns a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, 'split'):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, 'error_css_class'):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, 'required_css_class'):
extra_classes.add(self.form.required_css_class)
return ' '.join(extra_classes)
@property
def is_hidden(self):
"Returns True if this BoundField's widget is hidden."
return self.field.widget.is_hidden
@property
def auto_id(self):
"""
Calculates and returns the ID attribute for this BoundField, if the
associated Form has specified auto_id. Returns an empty string otherwise.
"""
auto_id = self.form.auto_id
if auto_id and '%s' in smart_text(auto_id):
return smart_text(auto_id) % self.html_name
elif auto_id:
return self.html_name
return ''
@property
def id_for_label(self):
"""
Wrapper around the field widget's `id_for_label` method.
Useful, for example, for focusing on this field regardless of whether
it has a single widget or a MutiWidget.
"""
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
return widget.id_for_label(id_)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
#
# Parts of the LDA inference code come from Dr. Hoffman's `onlineldavb.py` script,
# (C) 2010 Matthew D. Hoffman, GNU GPL 3.0
"""
Latent Dirichlet Allocation (LDA) in Python.
This module allows both LDA model estimation from a training corpus and inference of topic
distribution on new, unseen documents. The model can also be updated with new documents
for online training.
The core estimation code is based on the `onlineldavb.py` script by M. Hoffman [1]_, see
**Hoffman, Blei, Bach: Online Learning for Latent Dirichlet Allocation, NIPS 2010.**
The algorithm:
* is **streamed**: training documents may come in sequentially, no random access required,
* runs in **constant memory** w.r.t. the number of documents: size of the
training corpus does not affect memory footprint, can process corpora larger than RAM, and
* is **distributed**: makes use of a cluster of machines, if available, to
speed up model estimation.
.. [1] http://www.cs.princeton.edu/~mdhoffma
"""
import logging
import itertools
logger = logging.getLogger('gensim.models.ldamodel')
import numpy # for arrays, array broadcasting etc.
#numpy.seterr(divide='ignore') # ignore 0*log(0) errors
from scipy.special import gammaln, digamma, psi # gamma function utils
from scipy.special import gamma as gammafunc
from scipy.special import polygamma
try:
from scipy.maxentropy import logsumexp # log(sum(exp(x))) that tries to avoid overflow
except ImportError: # maxentropy has been removed for next release
from scipy.misc import logsumexp
from gensim import interfaces, utils
from six.moves import xrange
def dirichlet_expectation(alpha):
"""
For a vector `theta~Dir(alpha)`, compute `E[log(theta)]`.
"""
if (len(alpha.shape) == 1):
result = psi(alpha) - psi(numpy.sum(alpha))
else:
result = psi(alpha) - psi(numpy.sum(alpha, 1))[:, numpy.newaxis]
return result.astype(alpha.dtype) # keep the same precision as input
class LdaState(utils.SaveLoad):
"""
Encapsulate information for distributed computation of LdaModel objects.
Objects of this class are sent over the network, so try to keep them lean to
reduce traffic.
"""
def __init__(self, eta, shape):
self.eta = eta
self.sstats = numpy.zeros(shape)
self.numdocs = 0
def reset(self):
"""
Prepare the state for a new EM iteration (reset sufficient stats).
"""
self.sstats[:] = 0.0
self.numdocs = 0
def merge(self, other):
"""
Merge the result of an E step from one node with that of another node
(summing up sufficient statistics).
The merging is trivial and after merging all cluster nodes, we have the
exact same result as if the computation was run on a single node (no
approximation).
"""
assert other is not None
self.sstats += other.sstats
self.numdocs += other.numdocs
def blend(self, rhot, other, targetsize=None):
"""
Given LdaState `other`, merge it with the current state. Stretch both to
`targetsize` documents before merging, so that they are of comparable
magnitude.
Merging is done by average weighting: in the extremes, `rhot=0.0` means
`other` is completely ignored; `rhot=1.0` means `self` is completely ignored.
This procedure corresponds to the stochastic gradient update from Hoffman
et al., algorithm 2 (eq. 14).
"""
assert other is not None
if targetsize is None:
targetsize = self.numdocs
# stretch the current model's expected n*phi counts to target size
if self.numdocs == 0 or targetsize == self.numdocs:
scale = 1.0
else:
scale = 1.0 * targetsize / self.numdocs
self.sstats *= (1.0 - rhot) * scale
# stretch the incoming n*phi counts to target size
if other.numdocs == 0 or targetsize == other.numdocs:
scale = 1.0
else:
logger.info("merging changes from %i documents into a model of %i documents" %
(other.numdocs, targetsize))
scale = 1.0 * targetsize / other.numdocs
self.sstats += rhot * scale * other.sstats
self.numdocs = targetsize
def blend2(self, rhot, other, targetsize=None):
"""
Alternative, more simple blend.
"""
assert other is not None
if targetsize is None:
targetsize = self.numdocs
# merge the two matrices by summing
self.sstats += other.sstats
self.numdocs = targetsize
def get_lambda(self):
return self.eta + self.sstats
def get_Elogbeta(self):
return dirichlet_expectation(self.get_lambda())
#endclass LdaState
class LdaModel(interfaces.TransformationABC):
"""
The constructor estimates Latent Dirichlet Allocation model parameters based
on a training corpus:
>>> lda = LdaModel(corpus, num_topics=10)
You can then infer topic distributions on new, unseen documents, with
>>> doc_lda = lda[doc_bow]
The model can be updated (trained) with new documents via
>>> lda.update(other_corpus)
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus=None, num_topics=100, id2word=None, distributed=False,
chunksize=2000, passes=1, update_every=1, alpha='symmetric', eta=None, decay=0.5,
eval_every=10, iterations=50, gamma_threshold=0.001):
"""
If given, start training from the iterable `corpus` straight away. If not given,
the model is left untrained (presumably because you want to call `update()` manually).
`num_topics` is the number of requested latent topics to be extracted from
the training corpus.
`id2word` is a mapping from word ids (integers) to words (strings). It is
used to determine the vocabulary size, as well as for debugging and topic
printing.
`alpha` and `eta` are hyperparameters that affect sparsity of the document-topic
(theta) and topic-word (lambda) distributions. Both default to a symmetric
1.0/num_topics prior.
`alpha` can be also set to an explicit array = prior of your choice. It also
support special values of 'asymmetric' and 'auto': the former uses a fixed
normalized asymmetric 1.0/topicno prior, the latter learns an asymmetric
prior directly from your data.
Turn on `distributed` to force distributed computing (see the `web tutorial <http://radimrehurek.com/gensim/distributed.html>`_
on how to set up a cluster of machines for gensim).
Calculate and log perplexity estimate from the latest mini-batch every
`eval_every` model updates (setting this to 1 slows down training ~2x;
default is 10 for better performance). Set to None to disable perplexity estimation.
Example:
>>> lda = LdaModel(corpus, num_topics=100) # train model
>>> print(lda[doc_bow]) # get topic probability distribution for a document
>>> lda.update(corpus2) # update the LDA model with additional documents
>>> print(lda[doc_bow])
>>> lda = LdaModel(corpus, num_topics=50, alpha='auto', eval_every=5) # train asymmetric alpha from data
"""
# store user-supplied parameters
self.id2word = id2word
if corpus is None and self.id2word is None:
raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality')
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif len(self.id2word) > 0:
self.num_terms = 1 + max(self.id2word.keys())
else:
self.num_terms = 0
if self.num_terms == 0:
raise ValueError("cannot compute LDA over an empty collection (no terms)")
self.distributed = bool(distributed)
self.num_topics = int(num_topics)
self.chunksize = chunksize
self.decay = decay
self.num_updates = 0
self.passes = passes
self.update_every = update_every
self.eval_every = eval_every
self.optimize_alpha = alpha == 'auto'
if alpha == 'symmetric' or alpha is None:
logger.info("using symmetric alpha at %s" % (1.0 / num_topics))
self.alpha = numpy.asarray([1.0 / num_topics for i in xrange(num_topics)])
elif alpha == 'asymmetric':
self.alpha = numpy.asarray([1.0 / (i + numpy.sqrt(num_topics)) for i in xrange(num_topics)])
self.alpha /= self.alpha.sum()
logger.info("using asymmetric alpha %s" % list(self.alpha))
elif alpha == 'auto':
self.alpha = numpy.asarray([1.0 / num_topics for i in xrange(num_topics)])
logger.info("using autotuned alpha, starting with %s" % list(self.alpha))
else:
# must be either float or an array of floats, of size num_topics
self.alpha = alpha if isinstance(alpha, numpy.ndarray) else numpy.asarray([alpha] * num_topics)
if len(self.alpha) != num_topics:
raise RuntimeError("invalid alpha shape (must match num_topics)")
if eta is None:
self.eta = 1.0 / num_topics
else:
self.eta = eta
# VB constants
self.iterations = iterations
self.gamma_threshold = gamma_threshold
# set up distributed environment if necessary
if not distributed:
logger.info("using serial LDA version on this node")
self.dispatcher = None
self.numworkers = 1
else:
if self.optimize_alpha:
raise NotImplementedError("auto-optimizing alpha not implemented in distributed LDA")
# set up distributed version
try:
import Pyro4
dispatcher = Pyro4.Proxy('PYRONAME:gensim.lda_dispatcher')
dispatcher._pyroOneway.add("exit")
logger.debug("looking for dispatcher at %s" % str(dispatcher._pyroUri))
dispatcher.initialize(id2word=self.id2word, num_topics=num_topics,
chunksize=chunksize, alpha=alpha, eta=eta, distributed=False)
self.dispatcher = dispatcher
self.numworkers = len(dispatcher.getworkers())
logger.info("using distributed version with %i workers" % self.numworkers)
except Exception as err:
logger.error("failed to initialize distributed LDA (%s)" % err)
raise RuntimeError("failed to initialize distributed LDA (%s)" % err)
# Initialize the variational distribution q(beta|lambda)
self.state = LdaState(self.eta, (self.num_topics, self.num_terms))
self.state.sstats = numpy.random.gamma(100., 1. / 100., (self.num_topics, self.num_terms))
self.sync_state()
# if a training corpus was provided, start estimating the model right away
if corpus is not None:
self.update(corpus)
def __str__(self):
return "LdaModel(num_terms=%s, num_topics=%s, decay=%s, chunksize=%s, alpha=%s)" % \
(self.num_terms, self.num_topics, self.decay, self.chunksize, self.alpha)
def sync_state(self):
self.expElogbeta = numpy.exp(self.state.get_Elogbeta())
def clear(self):
"""Clear model state (free up some memory). Used in the distributed algo."""
self.state = None
self.Elogbeta = None
def inference(self, chunk, collect_sstats=False):
"""
Given a chunk of sparse document vectors, estimate gamma (parameters
controlling the topic weights) for each document in the chunk.
This function does not modify the model (=is read-only aka const). The
whole input chunk of document is assumed to fit in RAM; chunking of a
large corpus must be done earlier in the pipeline.
If `collect_sstats` is True, also collect sufficient statistics needed
to update the model's topic-word distributions, and return a 2-tuple
`(gamma, sstats)`. Otherwise, return `(gamma, None)`. `gamma` is of shape
`len(chunk) x self.num_topics`.
"""
try:
_ = len(chunk)
except:
chunk = list(chunk) # convert iterators/generators to plain list, so we have len() etc.
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents" % len(chunk))
# Initialize the variational distribution q(theta|gamma) for the chunk
gamma = numpy.random.gamma(100., 1. / 100., (len(chunk), self.num_topics))
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = numpy.exp(Elogtheta)
if collect_sstats:
sstats = numpy.zeros_like(self.expElogbeta)
else:
sstats = None
converged = 0
# Now, for each document d update that document's gamma and phi
# Inference code copied from Hoffman's `onlineldavb.py` (esp. the
# Lee&Seung trick which speeds things up by an order of magnitude, compared
# to Blei's original LDA-C code, cool!).
for d, doc in enumerate(chunk):
ids = [id for id, _ in doc]
cts = numpy.array([cnt for _, cnt in doc])
gammad = gamma[d, :]
Elogthetad = Elogtheta[d, :]
expElogthetad = expElogtheta[d, :]
expElogbetad = self.expElogbeta[:, ids]
# The optimal phi_{dwk} is proportional to expElogthetad_k * expElogbetad_w.
# phinorm is the normalizer.
phinorm = numpy.dot(expElogthetad, expElogbetad) + 1e-100 # TODO treat zeros explicitly, instead of adding eps?
# Iterate between gamma and phi until convergence
for _ in xrange(self.iterations):
lastgamma = gammad
# We represent phi implicitly to save memory and time.
# Substituting the value of the optimal phi back into
# the update for gamma gives this update. Cf. Lee&Seung 2001.
gammad = self.alpha + expElogthetad * numpy.dot(cts / phinorm, expElogbetad.T)
Elogthetad = dirichlet_expectation(gammad)
expElogthetad = numpy.exp(Elogthetad)
phinorm = numpy.dot(expElogthetad, expElogbetad) + 1e-100
# If gamma hasn't changed much, we're done.
meanchange = numpy.mean(abs(gammad - lastgamma))
if (meanchange < self.gamma_threshold):
converged += 1
break
gamma[d, :] = gammad
if collect_sstats:
# Contribution of document d to the expected sufficient
# statistics for the M step.
sstats[:, ids] += numpy.outer(expElogthetad.T, cts / phinorm)
if len(chunk) > 1:
logger.debug("%i/%i documents converged within %i iterations" %
(converged, len(chunk), self.iterations))
if collect_sstats:
# This step finishes computing the sufficient statistics for the
# M step, so that
# sstats[k, w] = \sum_d n_{dw} * phi_{dwk}
# = \sum_d n_{dw} * exp{Elogtheta_{dk} + Elogbeta_{kw}} / phinorm_{dw}.
sstats *= self.expElogbeta
return gamma, sstats
def do_estep(self, chunk, state=None):
"""
Perform inference on a chunk of documents, and accumulate the collected
sufficient statistics in `state` (or `self.state` if None).
"""
if state is None:
state = self.state
gamma, sstats = self.inference(chunk, collect_sstats=True)
state.sstats += sstats
state.numdocs += gamma.shape[0] # avoid calling len(chunk), might be a generator
return gamma
def update_alpha(self, gammat, rho):
"""
Update parameters for the Dirichlet prior on the per-document
topic weights `alpha` given the last `gammat`.
Uses Newton's method: http://www.stanford.edu/~jhuang11/research/dirichlet/dirichlet.pdf
"""
N = float(len(gammat))
logphat = sum(dirichlet_expectation(gamma) for gamma in gammat) / N
dalpha = numpy.copy(self.alpha)
gradf = N * (psi(numpy.sum(self.alpha)) - psi(self.alpha) + logphat)
c = N * polygamma(1, numpy.sum(self.alpha))
q = -N * polygamma(1, self.alpha)
b = numpy.sum(gradf / q) / ( 1 / c + numpy.sum(1 / q))
dalpha = -(gradf - b) / q
if all(rho() * dalpha + self.alpha > 0):
self.alpha += rho() * dalpha
else:
logger.warning("updated alpha not positive")
logger.info("optimized alpha %s" % list(self.alpha))
return self.alpha
def log_perplexity(self, chunk, total_docs=None):
if total_docs is None:
total_docs = len(chunk)
corpus_words = sum(cnt for document in chunk for _, cnt in document)
subsample_ratio = 1.0 * total_docs / len(chunk)
perwordbound = self.bound(chunk, subsample_ratio=subsample_ratio) / (subsample_ratio * corpus_words)
logger.info("%.3f per-word bound, %.1f perplexity estimate based on a held-out corpus of %i documents with %i words" %
(perwordbound, numpy.exp2(-perwordbound), len(chunk), corpus_words))
return perwordbound
def update(self, corpus, chunksize=None, decay=None, passes=None, update_every=None, eval_every=None,
iterations=None, gamma_threshold=None):
"""
Train the model with new documents, by EM-iterating over `corpus` until
the topics converge (or until the maximum number of allowed iterations
is reached). `corpus` must be an iterable (repeatable stream of documents),
In distributed mode, the E step is distributed over a cluster of machines.
This update also supports updating an already trained model (`self`)
with new documents from `corpus`; the two models are then merged in
proportion to the number of old vs. new documents. This feature is still
experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand,
this equals the online update of Hoffman et al. and is guaranteed to
converge for any `decay` in (0.5, 1.0>.
"""
# use parameters given in constructor, unless user explicitly overrode them
if chunksize is None:
chunksize = self.chunksize
if decay is None:
decay = self.decay
if passes is None:
passes = self.passes
if update_every is None:
update_every = self.update_every
if eval_every is None:
eval_every = self.eval_every
if iterations is None:
iterations = self.iterations
if gamma_threshold is None:
gamma_threshold = self.gamma_threshold
# rho is the "speed" of updating; TODO try other fncs
rho = lambda: pow(1.0 + self.num_updates, -decay)
try:
lencorpus = len(corpus)
except:
logger.warning("input corpus stream has no len(); counting documents")
lencorpus = sum(1 for _ in corpus)
if lencorpus == 0:
logger.warning("LdaModel.update() called with an empty corpus")
return
self.state.numdocs += lencorpus
if update_every:
updatetype = "online"
updateafter = min(lencorpus, update_every * self.numworkers * chunksize)
else:
updatetype = "batch"
updateafter = lencorpus
evalafter = min(lencorpus, (eval_every or 0) * self.numworkers * chunksize)
updates_per_pass = max(1, lencorpus / updateafter)
logger.info("running %s LDA training, %s topics, %i passes over "
"the supplied corpus of %i documents, updating model once "
"every %i documents, evaluating perplexity every %i documents, "
"iterating %ix with a convergence threshold of %f" %
(updatetype, self.num_topics, passes, lencorpus,
updateafter, evalafter, iterations,
gamma_threshold))
if updates_per_pass * passes < 10:
logger.warning("too few updates, training might not converge; consider "
"increasing the number of passes or iterations to improve accuracy")
for pass_ in xrange(passes):
if self.dispatcher:
logger.info('initializing %s workers' % self.numworkers)
self.dispatcher.reset(self.state)
else:
other = LdaState(self.eta, self.state.sstats.shape)
dirty = False
reallen = 0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize, as_numpy=True)):
reallen += len(chunk) # keep track of how many documents we've processed so far
if eval_every and ((reallen == lencorpus) or ((chunk_no + 1) % (eval_every * self.numworkers) == 0)):
self.log_perplexity(chunk, total_docs=lencorpus)
if self.dispatcher:
# add the chunk to dispatcher's job queue, so workers can munch on it
logger.info('PROGRESS: pass %i, dispatching documents up to #%i/%i' %
(pass_, chunk_no * chunksize + len(chunk), lencorpus))
# this will eventually block until some jobs finish, because the queue has a small finite length
self.dispatcher.putjob(chunk)
else:
logger.info('PROGRESS: pass %i, at document #%i/%i' %
(pass_, chunk_no * chunksize + len(chunk), lencorpus))
gammat = self.do_estep(chunk, other)
if self.optimize_alpha:
self.update_alpha(gammat, rho)
dirty = True
del chunk
# perform an M step. determine when based on update_every, don't do this after every chunk
if update_every and (chunk_no + 1) % (update_every * self.numworkers) == 0:
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other)
del other # free up some mem
if self.dispatcher:
logger.info('initializing workers')
self.dispatcher.reset(self.state)
else:
other = LdaState(self.eta, self.state.sstats.shape)
dirty = False
#endfor single corpus iteration
if reallen != lencorpus:
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
if dirty:
# finish any remaining updates
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other)
del other
dirty = False
#endfor entire corpus update
def do_mstep(self, rho, other):
"""
M step: use linear interpolation between the existing topics and
collected sufficient statistics in `other` to update the topics.
"""
logger.debug("updating topics")
# update self with the new blend; also keep track of how much did
# the topics change through this update, to assess convergence
diff = numpy.log(self.expElogbeta)
self.state.blend(rho, other)
del other
diff -= self.state.get_Elogbeta()
self.sync_state()
self.print_topics(15) # print out some debug info at the end of each EM iteration
logger.info("topic diff=%f, rho=%f" % (numpy.mean(numpy.abs(diff)), rho))
self.num_updates += 1
def bound(self, corpus, gamma=None, subsample_ratio=1.0):
"""
Estimate the variational bound of documents from `corpus`:
E_q[log p(corpus)] - E_q[log q(corpus)]
`gamma` are the variational parameters on topic weights for each `corpus`
document (=2d matrix=what comes out of `inference()`).
If not supplied, will be inferred from the model.
"""
score = 0.0
_lambda = self.state.get_lambda()
Elogbeta = dirichlet_expectation(_lambda)
for d, doc in enumerate(corpus): # stream the input doc-by-doc, in case it's too large to fit in RAM
if d % self.chunksize == 0:
logger.debug("bound: at document #%i" % d)
if gamma is None:
gammad, _ = self.inference([doc])
else:
gammad = gamma[d]
Elogthetad = dirichlet_expectation(gammad)
# E[log p(doc | theta, beta)]
score += numpy.sum(cnt * logsumexp(Elogthetad + Elogbeta[:, id]) for id, cnt in doc)
# E[log p(theta | alpha) - log q(theta | gamma)]; assumes alpha is a vector
score += numpy.sum((self.alpha - gammad) * Elogthetad)
score += numpy.sum(gammaln(gammad) - gammaln(self.alpha))
score += gammaln(numpy.sum(self.alpha)) - gammaln(numpy.sum(gammad))
# compensate likelihood for when `corpus` above is only a sample of the whole corpus
score *= subsample_ratio
# E[log p(beta | eta) - log q (beta | lambda)]; assumes eta is a scalar
score += numpy.sum((self.eta - _lambda) * Elogbeta)
score += numpy.sum(gammaln(_lambda) - gammaln(self.eta))
score += numpy.sum(gammaln(self.eta * self.num_terms) - gammaln(numpy.sum(_lambda, 1)))
return score
def print_topics(self, num_topics=10, num_words=10):
return self.show_topics(num_topics, num_words, log=True)
def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):
"""
For `num_topics` number of topics, return `num_words` most significant words
(10 words per topic, by default).
The topics are returned as a list -- a list of strings if `formatted` is
True, or a list of (probability, word) 2-tuples if False.
If `log` is True, also output this result to log.
Unlike LSA, there is no natural ordering between the topics in LDA.
The returned `num_topics <= self.num_topics` subset of all topics is therefore
arbitrary and may change between two LDA training runs.
"""
if num_topics < 0 or num_topics >= self.num_topics:
num_topics = self.num_topics
chosen_topics = range(num_topics)
else:
num_topics = min(num_topics, self.num_topics)
sort_alpha = self.alpha + 0.0001 * numpy.random.rand(len(self.alpha)) # add a little random jitter, to randomize results around the same alpha
sorted_topics = list(numpy.argsort(sort_alpha))
chosen_topics = sorted_topics[:num_topics//2] + sorted_topics[-num_topics//2:]
shown = []
for i in chosen_topics:
if formatted:
topic = self.print_topic(i, topn=num_words)
else:
topic = self.show_topic(i, topn=num_words)
shown.append(topic)
if log:
logger.info("topic #%i (%.3f): %s" % (i, self.alpha[i], topic))
return shown
def show_topic(self, topicid, topn=10):
topic = self.state.get_lambda()[topicid]
topic = topic / topic.sum() # normalize to probability dist
bestn = numpy.argsort(topic)[::-1][:topn]
beststr = [(topic[id], self.id2word[id]) for id in bestn]
return beststr
def print_topic(self, topicid, topn=10):
return ' + '.join(['%.3f*%s' % v for v in self.show_topic(topicid, topn)])
def __getitem__(self, bow, eps=0.01):
"""
Return topic distribution for the given document `bow`, as a list of
(topic_id, topic_probability) 2-tuples.
Ignore topics with very low probability (below `eps`).
"""
# if the input vector is in fact a corpus, return a transformed corpus as result
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
return self._apply(corpus)
gamma, _ = self.inference([bow])
topic_dist = gamma[0] / sum(gamma[0]) # normalize to proper distribution
return [(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= eps] # ignore document's topics that have prob < eps
def save(self, fname, *args, **kwargs):
"""
Save the model to file.
Large internal arrays may be stored into separate files, with `fname` as prefix.
"""
if self.state is not None:
self.state.save(fname + '.state', *args, **kwargs)
super(LdaModel, self).save(fname, *args, ignore=['state', 'dispatcher'], **kwargs)
@classmethod
def load(cls, fname, *args, **kwargs):
"""
Load a previously saved object from file (also see `save`).
Large arrays are mmap'ed back as read-only (shared memory).
"""
kwargs['mmap'] = kwargs.get('mmap', 'r')
result = super(LdaModel, cls).load(fname, *args, **kwargs)
try:
result.state = super(LdaModel, cls).load(fname + '.state', *args, **kwargs)
except Exception as e:
logging.warning("failed to load state from %s: %s" % (fname + '.state', e))
return result
#endclass LdaModel
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.forms.widgets import Media
import operator
from cms.utils.placeholder import PlaceholderNoAction
class PlaceholderManager(models.Manager):
def _orphans(self):
"""
Private method because it should never actually return anything.
"""
from cms.models import CMSPlugin
m2m = self.model._meta.get_all_related_many_to_many_objects()
fks = self.model._meta.get_all_related_objects()
kwargs = {}
for rel in m2m:
kwargs[rel.var_name] = None
for rel in fks:
if rel.model == CMSPlugin:
continue
kwargs[rel.var_name] = None
return self.filter(**kwargs)
class Placeholder(models.Model):
slot = models.CharField(_("slot"), max_length=50, db_index=True, editable=False)
default_width = models.PositiveSmallIntegerField(_("width"), null=True, editable=False)
objects = PlaceholderManager()
def __unicode__(self):
return self.slot
class Meta:
app_label = 'cms'
def has_change_permission(self, request):
opts = self._meta
if request.user.is_superuser:
return True
return request.user.has_perm(opts.app_label + '.' + opts.get_change_permission())
def render(self, context, width):
from cms.plugin_rendering import render_placeholder
if not 'request' in context:
return '<!-- missing request -->'
context.update({'width': width or self.default_width})
return render_placeholder(self, context)
def get_media(self, request, context):
from cms.plugins.utils import get_plugin_media
media_classes = [get_plugin_media(request, context, plugin) for plugin in self.cmsplugin_set.all()]
if media_classes:
return reduce(operator.add, media_classes)
return Media()
def _get_attached_field(self):
from cms.models import CMSPlugin
if not hasattr(self, '_attached_field_cache'):
self._attached_field_cache = None
for rel in self._meta.get_all_related_objects():
if isinstance(rel.model, CMSPlugin):
continue
field = getattr(self, rel.get_accessor_name())
if field.count():
self._attached_field_cache = rel.field
return self._attached_field_cache
def _get_attached_field_name(self):
field = self._get_attached_field()
if field:
return field.name
return None
def _get_attached_model(self):
field = self._get_attached_field()
if field:
return field.model
return None
def get_plugins_list(self):
return list(self.get_plugins())
def get_plugins(self):
return self.cmsplugin_set.all().order_by('tree_id', '-rght')
@property
def actions(self):
if not hasattr(self, '_actions_cache'):
field = self._get_attached_field()
self._actions_cache = getattr(field, 'actions', PlaceholderNoAction())
return self._actions_cache
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('gondula_veteran')
mobileTemplate.setLevel(79)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(3)
mobileTemplate.setMaxSpawnDistance(5)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup('gondula tribe')
mobileTemplate.setAssistRange(1)
mobileTemplate.setOptionsBitmask(128)
mobileTemplate.setStalker(True)
templates = Vector()
templates.add('object/mobile/shared_dressed_ewok_f_01.iff')
templates.add('object/mobile/shared_dressed_ewok_f_02.iff')
templates.add('object/mobile/shared_dressed_ewok_f_03.iff')
templates.add('object/mobile/shared_dressed_ewok_f_04.iff')
templates.add('object/mobile/shared_dressed_ewok_f_05.iff')
templates.add('object/mobile/shared_dressed_ewok_f_06.iff')
templates.add('object/mobile/shared_dressed_ewok_f_07.iff')
templates.add('object/mobile/shared_dressed_ewok_f_08.iff')
templates.add('object/mobile/shared_dressed_ewok_f_09.iff')
templates.add('object/mobile/shared_dressed_ewok_f_10.iff')
templates.add('object/mobile/shared_dressed_ewok_f_11.iff')
templates.add('object/mobile/shared_dressed_ewok_f_12.iff')
templates.add('object/mobile/shared_dressed_ewok_m_01.iff')
templates.add('object/mobile/shared_dressed_ewok_m_02.iff')
templates.add('object/mobile/shared_dressed_ewok_m_03.iff')
templates.add('object/mobile/shared_dressed_ewok_m_04.iff')
templates.add('object/mobile/shared_dressed_ewok_m_05.iff')
templates.add('object/mobile/shared_dressed_ewok_m_06.iff')
templates.add('object/mobile/shared_dressed_ewok_m_07.iff')
templates.add('object/mobile/shared_dressed_ewok_m_08.iff')
templates.add('object/mobile/shared_dressed_ewok_m_09.iff')
templates.add('object/mobile/shared_dressed_ewok_m_10.iff')
templates.add('object/mobile/shared_dressed_ewok_m_11.iff')
templates.add('object/mobile/shared_dressed_ewok_m_12.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('meleeHit')
mobileTemplate.setAttacks(attacks)
lootPoolNames_1 = ['Junk']
lootPoolChances_1 = [100]
lootGroupChance_1 = 65
mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1)
lootPoolNames_2 = ['random_loot_primitives']
lootPoolChances_2 = [100]
lootGroupChance_2 = 35
mobileTemplate.addToLootGroups(lootPoolNames_2,lootPoolChances_2,lootGroupChance_2)
core.spawnService.addMobileTemplate('gondula_veteran', mobileTemplate)
return
|
""" Contains functions to fetch info from different simple online APIs."""
import util.web
def urbandictionary_search(search):
"""
Searches urbandictionary's API for a given search term.
:param search: The search term str to search for.
:return: defenition str or None on no match or error.
"""
if str(search).strip():
urban_api_url = 'http://api.urbandictionary.com/v0/define?term=%s' % search
response = util.web.http_get(url=urban_api_url, json=True)
if response['json'] is not None:
try:
definition = response['json']['list'][0]['definition']
return definition.encode('ascii', 'ignore')
except (KeyError, IndexError):
return None
else:
return None
def weather_search(city):
"""
Searches worldweatheronline's API for weather data for a given city.
You must have a working API key to be able to use this function.
:param city: The city str to search for.
:return: weather data str or None on no match or error.
"""
if str(city).strip():
api_key = ''
if not api_key:
return 'Missing api key.'
else:
weather_api_url = 'http://api.worldweatheronline.com/premium/v1/weather.ashx?key=%s&q=%s&format=json' % \
(api_key, city)
response = util.web.http_get(url=weather_api_url, json=True)
if response['json'] is not None:
try:
pressure = response['json']['data']['current_condition'][0]['pressure']
temp_c = response['json']['data']['current_condition'][0]['temp_C']
temp_f = response['json']['data']['current_condition'][0]['temp_F']
query = response['json']['data']['request'][0]['query'].encode('ascii', 'ignore')
result = '%s. Temperature: %sC (%sF) Pressure: %s millibars' % (query, temp_c, temp_f, pressure)
return result
except (IndexError, KeyError):
return None
else:
return None
def whois(ip):
"""
Searches ip-api for information about a given IP.
:param ip: The ip str to search for.
:return: information str or None on error.
"""
if str(ip).strip():
url = 'http://ip-api.com/json/%s' % ip
response = util.web.http_get(url=url, json=True)
if response['json'] is not None:
try:
city = response['json']['city']
country = response['json']['country']
isp = response['json']['isp']
org = response['json']['org']
region = response['json']['regionName']
zipcode = response['json']['zip']
info = country + ', ' + city + ', ' + region + ', Zipcode: ' + zipcode + ' Isp: ' + isp + '/' + org
return info
except KeyError:
return None
else:
return None
def chuck_norris():
"""
Finds a random Chuck Norris joke/quote.
:return: joke str or None on failure.
"""
url = 'http://api.icndb.com/jokes/random/?escape=javascript'
response = util.web.http_get(url=url, json=True)
if response['json'] is not None:
if response['json']['type'] == 'success':
joke = response['json']['value']['joke']
return joke
return None
|
"""Analysis Category - the category of the analysis service
"""
from AccessControl import ClassSecurityInfo
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.config import PROJECTNAME
from bika.lims.content.bikaschema import BikaSchema
from bika.lims.interfaces import IAnalysisCategory
from plone.indexer import indexer
from Products.Archetypes.public import *
from Products.Archetypes.references import HoldingReference
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.WorkflowCore import WorkflowException
from zope.interface import implements
import sys
import transaction
@indexer(IAnalysisCategory)
def sortable_title_with_sort_key(instance):
sort_key = instance.getSortKey()
if sort_key:
return "{:010.3f}{}".format(sort_key, instance.Title())
return instance.Title()
schema = BikaSchema.copy() + Schema((
TextField('Comments',
default_output_type = 'text/plain',
allowable_content_types = ('text/plain',),
widget=TextAreaWidget (
description = _("To be displayed below each Analysis "
"Category section on results reports."),
label = _("Comments")),
),
ReferenceField('Department',
required=1,
vocabulary='getDepartments',
vocabulary_display_path_bound=sys.maxsize,
allowed_types=('Department',),
relationship='AnalysisCategoryDepartment',
referenceClass=HoldingReference,
widget=ReferenceWidget(
checkbox_bound=0,
label = _("Department"),
description = _("The laboratory department"),
),
),
ComputedField('DepartmentTitle',
expression="context.getDepartment() and context.getDepartment().Title() or ''",
widget=ComputedWidget(
visible=False,
),
),
FloatField('SortKey',
validators=('SortKeyValidator',),
widget=DecimalWidget(
label = _("Sort Key"),
description = _("Float value from 0.0 - 1000.0 indicating the sort order. Duplicate values are ordered alphabetically."),
),
),
))
schema['description'].widget.visible = True
schema['description'].schemata = 'default'
class AnalysisCategory(BaseContent):
implements(IAnalysisCategory)
security = ClassSecurityInfo()
displayContentsTab = False
schema = schema
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
def getDepartments(self):
bsc = getToolByName(self, 'bika_setup_catalog')
deps = []
for d in bsc(portal_type='Department',
inactive_state='active'):
deps.append((d.UID, d.Title))
return DisplayList(deps)
def workflow_script_deactivat(self):
# A instance cannot be deactivated if it contains services
pu = getToolByName(self, 'plone_utils')
bsc = getToolByName(self, 'bika_setup_catalog')
ars = bsc(portal_type='AnalysisService', getCategoryUID=self.UID())
if ars:
message = _("Category cannot be deactivated because "
"it contains Analysis Services")
pu.addPortalMessage(message, 'error')
transaction.get().abort()
raise WorkflowException
registerType(AnalysisCategory, PROJECTNAME)
|
from contextlib2 import contextmanager, closing
import urllib2
import tempfile
import os
import logging
import shutil
import httplib
import ftplib
import socket
from time import sleep
# Custom error wrapper for (known) exceptions thrown by the download module.
class DownloadFailedError(Exception):
pass
@contextmanager
def get(url, options={}):
"""
Download a file to a temporary directory, returning it.
The options provided will control the behaviour of the download algorithm.
* 'tries' - The maximum number of tries to download the file before
giving up and raising an exception.
* 'timeout' - Timeout in seconds before considering the connection to
have failed.
* 'verifier' - A function which is called with a filelike object. It
should return True if the file is okay and appears to be fully
downloaded.
"""
logger = logging.getLogger('download')
with closing(tempfile.NamedTemporaryFile()) as tmp:
# current file position = number of bytes read
filepos = 0
# file size when downloaded, if known
filesize = None
# number of attempts so far
tries = 0
# last try which resulted in some forward progress (i.e: filepos got
# bigger)
last_successful_try = 0
# maximum number of attempts to make
max_tries = options.get('tries', 1)
# timeout for blocking operations (e.g: connect) in seconds
timeout = options.get('timeout', 60)
# verifier function
verifier = options.get('verifier')
# backoff function - to delay between retries
backoff = options.get('backoff')
# whether the server supports Range headers (if it doesn't we'll have
# to restart from the beginning every time).
accept_range = False
# we need to download _something_ if the file position is less than the
# known size, or the size is unknown.
while filesize is None or filepos < filesize:
# explode if we've exceeded the number of allowed attempts
if tries >= max_tries:
raise DownloadFailedError("Max tries exceeded (%d) while "
"downloading file %r"
% (max_tries, url))
else:
if backoff and tries > last_successful_try:
backoff(tries - last_successful_try)
tries += 1
req = urllib2.Request(url)
# if the server supports accept range, and we have a partial
# download then attemp to resume it.
if accept_range and filepos > 0:
logger.info("Continuing (try %d/%d) at %d bytes: %r"
% (tries, max_tries, filepos, url))
assert filesize is not None
req.headers['Range'] = 'bytes=%s-%s' % (filepos, filesize - 1)
else:
# otherwise, truncate the file in readiness to download from
# scratch.
logger.info("Downloading (try %d/%d) %r"
% (tries, max_tries, url))
filepos = 0
tmp.seek(0, os.SEEK_SET)
tmp.truncate(0)
try:
f = urllib2.urlopen(req, timeout=timeout)
# try to get the filesize, if the server reports it.
if filesize is None:
content_length = f.info().get('Content-Length')
if content_length is not None:
try:
filesize = int(content_length)
except ValueError:
pass
# detect whether the server accepts Range requests.
accept_range = f.info().get('Accept-Ranges') == 'bytes'
# copy data from the server
shutil.copyfileobj(f, tmp)
except (IOError, httplib.HTTPException) as e:
logger.debug("Got HTTP error: %s" % str(e))
continue
except ftplib.all_errors as e:
logger.debug("Got FTP error: %s" % str(e))
continue
except socket.timeout as e:
logger.debug("Got socket timeout: %s" % str(e))
continue
# update number of bytes read (this would be nicer if copyfileobj
# returned it.
old_filepos = filepos
filepos = tmp.tell()
if filepos > old_filepos:
last_successful_try = tries
# if we don't know how large the file is supposed to be, then
# verify it every time.
if filesize is None and verifier is not None:
# reset tmp file to beginning for verification
tmp.seek(0, os.SEEK_SET)
if verifier(tmp):
break
# no need to reset here - since filesize is none, then we'll be
# downloading from scratch, which will truncate the file.
# verify the file, if it hasn't been verified before
if filesize is not None and verifier is not None:
# reset tmp file to beginning for verification
tmp.seek(0, os.SEEK_SET)
if not verifier(tmp):
raise DownloadFailedError("File downloaded from %r failed "
"verification" % url)
tmp.seek(0, os.SEEK_SET)
yield tmp
def _exponential_backoff(try_num):
"""
Backoff exponentially, with each request backing off 2x from the previous
attempt. The time limits at 10 minutes maximum back-off. This is generally
a good default if nothing else is known about the upstream rate-limiter.
"""
secs = min((1 << try_num) - 1, 600)
sleep(secs)
def options(in_opts={}):
"""
Extract a set of options from the input and augment them with some
defaults.
"""
out_opts = dict()
backoff = in_opts.get('backoff', 'exponential')
if backoff == 'exponential':
out_opts['backoff'] = _exponential_backoff
else:
raise NotImplementedError("Configuration backoff=%r not understood."
% backoff)
timeout = in_opts.get('timeout', 60)
out_opts['timeout'] = int(timeout)
tries = in_opts.get('tries', 10)
out_opts['tries'] = int(tries)
return out_opts
|
from __future__ import absolute_import
from django.db import transaction
from rest_framework import status
from sentry import features
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.serializers import serialize
from sentry.api.validators import ServiceHookValidator
from sentry.models import AuditLogEntryEvent, ObjectStatus, ServiceHook
from sentry.utils.apidocs import scenario, attach_scenarios
@scenario('ListServiceHooks')
def list_hooks_scenario(runner):
runner.request(
method='GET', path='/projects/%s/%s/hooks/' % (runner.org.slug, runner.default_project.slug)
)
@scenario('CreateServiceHook')
def create_hook_scenario(runner):
runner.request(
method='POST',
path='/projects/%s/%s/hooks/' % (runner.org.slug, runner.default_project.slug),
data={'url': 'https://example.com/sentry-hook', 'events': ['event.alert', 'event.created']}
)
class ProjectServiceHooksEndpoint(ProjectEndpoint):
doc_section = DocSection.PROJECTS
def has_feature(self, request, project):
return features.has(
'projects:servicehooks',
project=project,
actor=request.user,
)
@attach_scenarios([list_hooks_scenario])
def get(self, request, project):
"""
List a Project's Service Hooks
``````````````````````````````
Return a list of service hooks bound to a project.
:pparam string organization_slug: the slug of the organization the
client keys belong to.
:pparam string project_slug: the slug of the project the client keys
belong to.
"""
if not self.has_feature(request, project):
return self.respond({
'error_type': 'unavailable_feature',
'detail': ['You do not have that feature enabled']
}, status=403)
queryset = ServiceHook.objects.filter(
project_id=project.id,
)
status = request.GET.get('status')
if status == 'active':
queryset = queryset.filter(
status=ObjectStatus.ACTIVE,
)
elif status == 'disabled':
queryset = queryset.filter(
status=ObjectStatus.DISABLED,
)
elif status:
queryset = queryset.none()
return self.paginate(
request=request,
queryset=queryset,
order_by='-id',
on_results=lambda x: serialize(x, request.user),
)
@attach_scenarios([create_hook_scenario])
def post(self, request, project):
"""
Register a new Service Hook
```````````````````````````
Register a new service hook on a project.
Events include:
- event.alert: An alert is generated for an event (via rules).
- event.created: A new event has been processed.
:pparam string organization_slug: the slug of the organization the
client keys belong to.
:pparam string project_slug: the slug of the project the client keys
belong to.
:param string url: the url for the webhook
:param array[string] events: the events to subscribe to
"""
if not request.user.is_authenticated():
return self.respond(status=401)
if not self.has_feature(request, project):
return self.respond({
'error_type': 'unavailable_feature',
'detail': ['You do not have that feature enabled']
}, status=403)
validator = ServiceHookValidator(data=request.DATA)
if not validator.is_valid():
return self.respond(validator.errors, status=status.HTTP_400_BAD_REQUEST)
result = validator.object
with transaction.atomic():
hook = ServiceHook.objects.create(
project_id=project.id,
url=result['url'],
actor_id=request.user.id,
events=result.get('events'),
application=getattr(request.auth, 'application', None) if request.auth else None,
)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=hook.id,
event=AuditLogEntryEvent.SERVICEHOOK_ADD,
data=hook.get_audit_log_data(),
)
return self.respond(serialize(hook, request.user), status=201)
|
### A custom item example
import sys
import gobject
import gtk
import goocanvas
class CustomItem(gobject.GObject, goocanvas.Item):
__gproperties__ = {
'title': (str, None, None, '', gobject.PARAM_READWRITE),
'description': (str, None, None, '', gobject.PARAM_READWRITE),
'can-focus': (bool, None, None, False, gobject.PARAM_READWRITE),
'visibility-threshold': (float, None, None, 0, 10e6, 0, gobject.PARAM_READWRITE),
'visibility': (goocanvas.ItemVisibility, None, None, goocanvas.ITEM_VISIBLE, gobject.PARAM_READWRITE),
'pointer-events': (goocanvas.PointerEvents, None, None, goocanvas.EVENTS_NONE, gobject.PARAM_READWRITE),
'transform': (goocanvas.TYPE_CAIRO_MATRIX, None, None, gobject.PARAM_READWRITE),
'parent': (gobject.GObject, None, None, gobject.PARAM_READWRITE),
}
def __init__(self, **kwargs):
self.bounds = goocanvas.Bounds()
self.view = None
self.parent = None
## default values for properties
#self.title = None
#self.description = None
#self.can_focus = False
#self.visibility = goocanvas.ITEM_VISIBLE
#self.visibility_threshold = 0.0
#self.pointer_events = goocanvas.EVENTS_NONE
#self.transform = None
## chain to parent constructor
gobject.GObject.__init__(self, **kwargs)
def do_set_parent(self, parent):
assert self.parent is None
self.parent = parent
def do_set_property(self, pspec, value):
if pspec.name == 'title':
self.title = value
elif pspec.name == 'description':
self.description = value
elif pspec.name == 'can-focus':
self.can_focus = value
elif pspec.name == 'visibility':
self.visibility = value
elif pspec.name == 'visibility-threshold':
self.visibility_threshold = value
elif pspec.name == 'pointer-events':
self.pointer_events = value
elif pspec.name == 'transform':
self.transform = value
elif pspec.name == 'parent':
self.parent = value
else:
raise AttributeError, 'unknown property %s' % pspec.name
def do_get_property(self, pspec):
if pspec.name == 'title':
return self.title
elif pspec.name == 'description':
return self.description
elif pspec.name == 'can-focus':
return self.can_focus
elif pspec.name == 'visibility':
return self.visibility
elif pspec.name == 'visibility-threshold':
return self.visibility_threshold
elif pspec.name == 'pointer-events':
return self.pointer_events
elif pspec.name == 'transform':
return self.transform
elif pspec.name == 'parent':
return self.parent
else:
raise AttributeError, 'unknown property %s' % pspec.name
## optional methods
def do_get_bounds(self):
return self.bounds
def do_get_item_at(self, x, y, cr, is_pointer_event, parent_is_visible):
return None
## mandatory methods
def do_update(self, entire_tree, cr):
raise NotImplementedError
def do_paint(self, cr, bounds, scale):
raise NotImplementedError
class CustomRectItem(CustomItem):
def __init__(self, x, y, width, height, line_width, **kwargs):
CustomItem.__init__(self, **kwargs)
self.x = x
self.y = y
self.width = width
self.height = height
self.line_width = line_width
def do_update(self, entire_tree, cr):
half_lw = self.line_width/2
self.bounds.x1 = float(self.x - half_lw)
self.bounds.y1 = float(self.y - half_lw)
self.bounds.x2 = float(self.x + self.width + half_lw)
self.bounds.y2 = float(self.y + self.height + half_lw)
return self.bounds
def do_paint(self, cr, bounds, scale):
cr.rectangle(self.x, self.y, self.width, self.height)
cr.set_line_width(self.line_width)
cr.set_source_rgb(0, 0, 0)
cr.stroke()
gobject.type_register(CustomRectItem)
def main(argv):
window = gtk.Window()
window.set_default_size(640, 600)
window.show()
window.connect("destroy", lambda w: gtk.main_quit())
scrolled_win = gtk.ScrolledWindow()
scrolled_win.set_shadow_type(gtk.SHADOW_IN)
scrolled_win.show()
window.add(scrolled_win)
canvas = goocanvas.Canvas()
canvas.set_size_request(600, 450)
canvas.set_bounds(0, 0, 1000, 1000)
root = canvas.get_root_item()
item = CustomRectItem(x=100, y=100, width=400, height=400, line_width=20)
root.add_child(item)
item = goocanvas.Text(text="Hello World",
x=300, y=300,
anchor=gtk.ANCHOR_CENTER,
font="Sans 24")
root.add_child(item)
item.rotate(45, 300, 300)
canvas.show()
scrolled_win.add(canvas)
gtk.main()
if __name__ == "__main__":
main(sys.argv)
|
#!/usr/bin/env python2.7
import sys
import json
import unicodedata
import re
import string
if(len(sys.argv) < 3):
k = 3
elif(sys.argv[1] == '-k'):
k = int(sys.argv[2])
else:
k = 3
def clean(input):
v1 = unicodedata.normalize('NFC', input)
v2 = filter(lambda x: x in string.printable, v1)
remap = {
ord('\t') : u' ',
ord('\f') : u' ',
ord('\r') : None,
ord('\n') : u' ',
ord('!'): None,
ord('!') : None,
ord('"') : None,
ord('#') : None,
ord('$') : None,
ord('%') : None,
ord('&') : None,
ord('\\') : None,
ord('\'') : None,
ord('(') : None,
ord(')') : None,
ord('*') : None,
ord('+') : None,
ord(',') : None,
ord('-') : None,
ord('.') : None,
ord('/') : None,
ord(':') : None,
ord(';') : None,
ord('<') : None,
ord('=') : None,
ord('>') : None,
ord('?') : None,
ord('@') : None,
ord('[') : None,
ord('\\') : None,
ord('\\') : None,
ord(']') : None,
ord('^') : None,
ord('_') : None,
ord('`') : None,
ord('{') : None,
ord('|') : None,
ord('}') : None,
ord('~') : None
}
v3 = v2.translate(remap)
v4 = re.sub("\s\s+", " ", v3)
v5 = v4.upper().strip()
return v5
def dateToWeekNumber(unixTime):
return str(int(unixTime)/604800)
for line in sys.stdin:
window = [None] * k
index = 0
post = json.loads(line)
body = clean(post['body']).split()
for word in body:
window[index % k] = word
if(index >= k - 1):
ngram = ' '.join(window[(index + 1) % k:] + window[:(index + 1) % k])
print(dateToWeekNumber(post['created_utc']) + '\t' + ngram)
index += 1
|
import numpy as np
import skimage.io
from scipy.ndimage import zoom
from skimage.transform import resize
try:
# Python3 will most likely not be able to load protobuf
from caffe.proto import caffe_pb2
except:
import sys
if sys.version_info >= (3, 0):
print("Failed to include caffe_pb2, things might go wrong!")
else:
raise
## proto / datum / ndarray conversion
def blobproto_to_array(blob, return_diff=False):
"""
Convert a blob proto to an array. In default, we will just return the data,
unless return_diff is True, in which case we will return the diff.
"""
# Read the data into an array
if return_diff:
data = np.array(blob.diff)
else:
data = np.array(blob.data)
# Reshape the array
if blob.HasField('num') or blob.HasField('channels') or blob.HasField('height') or blob.HasField('width'):
# Use legacy 4D shape
return data.reshape(blob.num, blob.channels, blob.height, blob.width)
else:
return data.reshape(blob.shape.dim)
def array_to_blobproto(arr, diff=None):
"""Converts a N-dimensional array to blob proto. If diff is given, also
convert the diff. You need to make sure that arr and diff have the same
shape, and this function does not do sanity check.
"""
blob = caffe_pb2.BlobProto()
blob.shape.dim.extend(arr.shape)
blob.data.extend(arr.astype(float).flat)
if diff is not None:
blob.diff.extend(diff.astype(float).flat)
return blob
def arraylist_to_blobprotovecor_str(arraylist):
"""Converts a list of arrays to a serialized blobprotovec, which could be
then passed to a network for processing.
"""
vec = caffe_pb2.BlobProtoVector()
vec.blobs.extend([array_to_blobproto(arr) for arr in arraylist])
return vec.SerializeToString()
def blobprotovector_str_to_arraylist(str):
"""Converts a serialized blobprotovec to a list of arrays.
"""
vec = caffe_pb2.BlobProtoVector()
vec.ParseFromString(str)
return [blobproto_to_array(blob) for blob in vec.blobs]
def array_to_datum(arr, label=0):
"""Converts a 3-dimensional array to datum. If the array has dtype uint8,
the output data will be encoded as a string. Otherwise, the output data
will be stored in float format.
"""
if arr.ndim != 3:
raise ValueError('Incorrect array shape.')
datum = caffe_pb2.Datum()
datum.channels, datum.height, datum.width = arr.shape
if arr.dtype == np.uint8:
datum.data = arr.tostring()
else:
datum.float_data.extend(arr.flat)
datum.label = label
return datum
def datum_to_array(datum):
"""Converts a datum to an array. Note that the label is not returned,
as one can easily get it by calling datum.label.
"""
if len(datum.data):
return np.fromstring(datum.data, dtype=np.uint8).reshape(
datum.channels, datum.height, datum.width)
else:
return np.array(datum.float_data).astype(float).reshape(
datum.channels, datum.height, datum.width)
## Pre-processing
class Transformer:
"""
Transform input for feeding into a Net.
Note: this is mostly for illustrative purposes and it is likely better
to define your own input preprocessing routine for your needs.
Parameters
----------
net : a Net for which the input should be prepared
"""
def __init__(self, inputs):
self.inputs = inputs
self.transpose = {}
self.channel_swap = {}
self.raw_scale = {}
self.mean = {}
self.input_scale = {}
def __check_input(self, in_):
if in_ not in self.inputs:
raise Exception('{} is not one of the net inputs: {}'.format(
in_, self.inputs))
def preprocess(self, in_, data):
"""
Format input for Caffe:
- convert to single
- resize to input dimensions (preserving number of channels)
- transpose dimensions to K x H x W
- reorder channels (for instance color to BGR)
- scale raw input (e.g. from [0, 1] to [0, 255] for ImageNet models)
- subtract mean
- scale feature
Parameters
----------
in_ : name of input blob to preprocess for
data : (H' x W' x K) ndarray
Returns
-------
caffe_in : (K x H x W) ndarray for input to a Net
"""
self.__check_input(in_)
caffe_in = data.astype(np.float32, copy=False)
transpose = self.transpose.get(in_)
channel_swap = self.channel_swap.get(in_)
raw_scale = self.raw_scale.get(in_)
mean = self.mean.get(in_)
input_scale = self.input_scale.get(in_)
in_dims = self.inputs[in_][2:]
if caffe_in.shape[:2] != in_dims:
caffe_in = resize_image(caffe_in, in_dims)
if transpose is not None:
caffe_in = caffe_in.transpose(transpose)
if channel_swap is not None:
caffe_in = caffe_in[channel_swap, :, :]
if raw_scale is not None:
caffe_in *= raw_scale
if mean is not None:
caffe_in -= mean
if input_scale is not None:
caffe_in *= input_scale
return caffe_in
def deprocess(self, in_, data):
"""
Invert Caffe formatting; see preprocess().
"""
self.__check_input(in_)
decaf_in = data.copy().squeeze()
transpose = self.transpose.get(in_)
channel_swap = self.channel_swap.get(in_)
raw_scale = self.raw_scale.get(in_)
mean = self.mean.get(in_)
input_scale = self.input_scale.get(in_)
if input_scale is not None:
decaf_in /= input_scale
if mean is not None:
decaf_in += mean
if raw_scale is not None:
decaf_in /= raw_scale
if channel_swap is not None:
decaf_in = decaf_in[np.argsort(channel_swap), :, :]
if transpose is not None:
decaf_in = decaf_in.transpose(np.argsort(transpose))
return decaf_in
def set_transpose(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
Parameters
----------
in_ : which input to assign this channel order
order : the order to transpose the dimensions
"""
self.__check_input(in_)
if len(order) != len(self.inputs[in_]) - 1:
raise Exception('Transpose order needs to have the same number of '
'dimensions as the input.')
self.transpose[in_] = order
def set_channel_swap(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
N.B. this assumes the channels are the first dimension AFTER transpose.
Parameters
----------
in_ : which input to assign this channel order
order : the order to take the channels.
(2,1,0) maps RGB to BGR for example.
"""
self.__check_input(in_)
if len(order) != self.inputs[in_][1]:
raise Exception('Channel swap needs to have the same number of '
'dimensions as the input channels.')
self.channel_swap[in_] = order
def set_raw_scale(self, in_, scale):
"""
Set the scale of raw features s.t. the input blob = input * scale.
While Python represents images in [0, 1], certain Caffe models
like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale
of these models must be 255.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.raw_scale[in_] = scale
def set_mean(self, in_, mean):
"""
Set the mean to subtract for centering the data.
Parameters
----------
in_ : which input to assign this mean.
mean : mean ndarray (input dimensional or broadcastable)
"""
self.__check_input(in_)
ms = mean.shape
if mean.ndim == 1:
# broadcast channels
if ms[0] != self.inputs[in_][1]:
raise ValueError('Mean channels incompatible with input.')
mean = mean[:, np.newaxis, np.newaxis]
else:
# elementwise mean
if len(ms) == 2:
ms = (1,) + ms
if len(ms) != 3:
raise ValueError('Mean shape invalid')
if ms != self.inputs[in_][1:]:
raise ValueError('Mean shape incompatible with input shape.')
self.mean[in_] = mean
def set_input_scale(self, in_, scale):
"""
Set the scale of preprocessed inputs s.t. the blob = blob * scale.
N.B. input_scale is done AFTER mean subtraction and other preprocessing
while raw_scale is done BEFORE.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.input_scale[in_] = scale
## Image IO
def load_image(filename, color=True):
"""
Load an image converting from grayscale or alpha as needed.
Parameters
----------
filename : string
color : boolean
flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Returns
-------
image : an image with type np.float32 in range [0, 1]
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
"""
img = skimage.img_as_float(skimage.io.imread(filename)).astype(np.float32)
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def resize_image(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
if im.shape[-1] == 1 or im.shape[-1] == 3:
im_min, im_max = im.min(), im.max()
if im_max > im_min:
# skimage is fast but only understands {1,3} channel images
# in [0, 1].
im_std = (im - im_min) / (im_max - im_min)
resized_std = resize(im_std, new_dims, order=interp_order)
resized_im = resized_std * (im_max - im_min) + im_min
else:
# the image is a constant -- avoid divide by 0
ret = np.empty((new_dims[0], new_dims[1], im.shape[-1]),
dtype=np.float32)
ret.fill(im_min)
return ret
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=interp_order)
return resized_im.astype(np.float32)
def oversample(images, crop_dims):
"""
Crop images into the four corners, center, and their mirrored versions.
Parameters
----------
image : iterable of (H x W x K) ndarrays
crop_dims : (height, width) tuple for the crops.
Returns
-------
crops : (10*N x H x W x K) ndarray of crops for number of inputs N.
"""
# Dimensions and center.
im_shape = np.array(images[0].shape)
crop_dims = np.array(crop_dims)
im_center = im_shape[:2] / 2.0
# Make crop coordinates
h_indices = (0, im_shape[0] - crop_dims[0])
w_indices = (0, im_shape[1] - crop_dims[1])
crops_ix = np.empty((5, 4), dtype=int)
curr = 0
for i in h_indices:
for j in w_indices:
crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1])
curr += 1
crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate([
-crop_dims / 2.0,
crop_dims / 2.0
])
crops_ix = np.tile(crops_ix, (2, 1))
# Extract crops
crops = np.empty((10 * len(images), crop_dims[0], crop_dims[1],
im_shape[-1]), dtype=np.float32)
ix = 0
for im in images:
for crop in crops_ix:
crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :]
ix += 1
crops[ix-5:ix] = crops[ix-5:ix, :, ::-1, :] # flip for mirrors
return crops
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pafy import Pafy
url = "http://www.youtube.com/watch?v=cyMHZVT91Dw"
# create a video instance
video = Pafy(url)
# get certain attributes
print("\n\n")
print("Title, Rating, Length...")
print("------------------------")
print(video.title)
print(video.rating) # out of 5
print(video.length) # seconds
print("\n")
# get video metadata
print("Video meta info...")
print("------------------")
print(video)
print("\n")
# show all formats for a video:
print("All available formats...")
print("------------------------")
streams = video.streams
print([(s.resolution, s.extension) for s in streams])
print("\n")
# show all formats and their download/stream url:
print("All available streams...")
print("------------------------")
for s in streams:
print(s.resolution, s.extension, s.url)
print("\n")
# get best resolution regardless of file format
print("Best available quality...")
print("-------------------------")
best = video.getbest()
print(best.resolution, best.extension)
print("\n")
# get best resolution for a specified file format
# (mp4, webm, flv or 3gp)
print("Best available mp4 quality...")
print("-----------------------------")
best = video.getbest(preftype="mp4")
print(best.resolution, best.extension)
print("\n")
# get best resolution for specified file format, or return a different format
# if one happens to have a better resolution than the specified format
print("Best available quality, mp4 if exists as best")
print("---------------------------------------------")
best = video.getbest(preftype="mp4", ftypestrict=False)
print(best.resolution, best.extension)
print("\n")
# get url - for download or for streaming in mplayer / vlc
print("Best available quality url")
print("--------------------------")
print(best.url)
print("\n")
# download video, show progress
print("Download video, show progress")
print("-----------------------------")
print("Uncomment line in example.py source file to enable")
#best.download(quiet=False)
print("\n")
# download, specify output filepath
print("Download video, specify filepath")
print("--------------------------------")
print("Uncomment line in example.py source file to enable")
filename = "/tmp/" + best.title + best.extension
#best.download(quiet=False, filepath=filename)
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import json
import pytest
from django.test import override_settings
from django.utils.translation import activate, get_language
from shuup.admin.views.select import MultiselectAjaxView
from shuup.core.models import (
Category, CompanyContact, PersonContact, Product, ProductMode,
SalesUnit, ShopProduct, ShopProductVisibility, CategoryStatus, Supplier
)
from shuup.testing.factories import create_product, get_default_shop, get_shop, create_random_user
from shuup.testing.utils import apply_request_middleware
from shuup_tests.utils.fixtures import regular_user
def _get_search_results(rf, view, model_name, search_str, user, search_mode=None, sales_units=None, shop=None):
data = {
"model": model_name,
"search": search_str
}
if search_mode:
data.update({"searchMode": search_mode})
if sales_units:
data.update({"salesUnits": sales_units})
if shop:
data.update({"shop": shop.pk})
request = apply_request_middleware(rf.get("sa/search", data), user=user)
response = view(request)
assert response.status_code == 200
return json.loads(response.content.decode("utf-8")).get("results")
@pytest.mark.django_db
def test_ajax_select_view_with_products(rf, admin_user):
shop = get_default_shop()
activate("en")
view = MultiselectAjaxView.as_view()
# No products, no results
results = _get_search_results(rf, view, "shuup.Product", "some str", admin_user)
assert len(results) == 0
product_name_en = "The Product"
product = create_product("the product", shop=shop, **{"name": product_name_en})
shop_product = product.get_shop_instance(shop)
product_name_fi = "tuote"
product.set_current_language("fi")
# Making sure we are not getting duplicates from translations
product.name = product_name_fi # It seems that finnish translation overlaps with english name
product.save()
view = MultiselectAjaxView.as_view()
results = _get_search_results(rf, view, "shuup.Product", "some str", admin_user)
assert len(results) == 0
results = _get_search_results(rf, view, "shuup.Product", None, admin_user)
assert len(results) == 0
results = _get_search_results(rf, view, "shuup.Product", "product", admin_user)
assert len(results) == 1
assert results[0].get("id") == product.id
assert results[0].get("name") == product_name_en
results = _get_search_results(rf, view, "shuup.ShopProduct", "product", admin_user)
assert len(results) == 1
assert results[0].get("id") == shop_product.id
assert results[0].get("name") == product_name_en
activate("fi")
results = _get_search_results(rf, view, "shuup.Product", "product", admin_user)
assert get_language() == 'fi'
assert len(results) == 1
assert results[0].get("id") == product.id
assert results[0].get("name") == product_name_fi
results = _get_search_results(rf, view, "shuup.Product", " product ", admin_user)
assert len(results) == 1
assert results[0].get("id") == product.id
assert results[0].get("name") == product_name_fi
product.soft_delete()
results = _get_search_results(rf, view, "shuup.Product", "product", admin_user)
assert len(results) == 0
supplier1 = Supplier.objects.create(name="supplier1", enabled=True)
supplier1.shops.add(shop)
product = create_product(
"test-product", shop, default_price="200", supplier=supplier1, mode=ProductMode.SIMPLE_VARIATION_PARENT)
results = _get_search_results(rf, view, "shuup.Product", " product ", admin_user, "parent_product")
assert len(results) == 1
shop2 = get_shop(identifier="shop2")
supplier2 = Supplier.objects.create(name="supplier2", enabled=False)
supplier2.shops.add(shop2)
product2 = create_product(
"test-product-two", shop2, default_price="200", supplier=supplier2, mode=ProductMode.SIMPLE_VARIATION_PARENT)
results = _get_search_results(rf, view, "shuup.Product", " product ", admin_user, "parent_product")
assert len(results) == 1
@pytest.mark.django_db
def test_multi_select_with_main_products(rf, admin_user):
shop = get_default_shop()
activate("en")
view = MultiselectAjaxView.as_view()
var1 = "size"
var2 = "color"
parent = create_product("test", shop=shop, **{"name": "test"})
for a in range(4):
for b in range(3):
product_name = "test-%s-%s" % (a, b)
child = create_product(product_name, shop=shop, **{"name": product_name})
child.link_to_parent(parent, variables={var1: a, var2: b})
assert child.mode == ProductMode.VARIATION_CHILD
assert parent.variation_children.count() == 4 * 3
assert Product.objects.count() == 4*3 + 1
results = _get_search_results(rf, view, "shuup.Product", "test", admin_user)
assert len(results) == Product.objects.count()
results = _get_search_results(rf, view, "shuup.Product", "test", admin_user, "main")
assert len(results) == 1
create_product("test1", shop=shop, **{"name": "test 123"})
results = _get_search_results(rf, view, "shuup.Product", "test", admin_user, "main")
assert len(results) == 2
create_product("2", shop=shop, **{"name": "something that doesn not match with the search term"})
results = _get_search_results(rf, view, "shuup.Product", "test", admin_user, "main")
assert len(results) == 2
@pytest.mark.django_db
def test_multi_select_with_sellable_only_products(rf, admin_user):
shop = get_default_shop()
activate("en")
view = MultiselectAjaxView.as_view()
var1 = "size"
var2 = "color"
parent = create_product("test", shop=shop, **{"name": "test"})
for a in range(4):
for b in range(3):
product_name = "test-%s-%s" % (a, b)
child = create_product(product_name, shop=shop, **{"name": product_name})
child.link_to_parent(parent, variables={var1: a, var2: b})
assert child.mode == ProductMode.VARIATION_CHILD
assert parent.variation_children.count() == 4 * 3
assert Product.objects.count() == 4 * 3 + 1
results = _get_search_results(rf, view, "shuup.Product", "test", admin_user)
assert len(results) == Product.objects.count()
results = _get_search_results(rf, view, "shuup.Product", "test", admin_user, "sellable_mode_only")
assert len(results) == Product.objects.count() - 1
create_product("test1", shop=shop, **{"name": "test 123"})
results = _get_search_results(rf, view, "shuup.Product", "test", admin_user, "sellable_mode_only")
assert len(results) == Product.objects.count() - 1 # Still only the parent is excluded
assert Product.objects.count() == 4 * 3 + 2
# hide all shop products
ShopProduct.objects.all().update(visibility=ShopProductVisibility.NOT_VISIBLE)
results = _get_search_results(rf, view, "shuup.Product", "test", admin_user, "sellable_mode_only")
assert len(results) == 0
# show them again
ShopProduct.objects.all().update(visibility=ShopProductVisibility.ALWAYS_VISIBLE)
results = _get_search_results(rf, view, "shuup.Product", "test", admin_user, "sellable_mode_only")
assert len(results) == Product.objects.count() - 1
# delete all products
[product.soft_delete() for product in Product.objects.all()]
results = _get_search_results(rf, view, "shuup.Product", "test", admin_user, "sellable_mode_only")
assert len(results) == 0
@pytest.mark.django_db
def test_multi_select_with_product_sales_unit(rf, admin_user):
shop = get_default_shop()
activate("en")
gram = SalesUnit.objects.create(symbol="g", name="Grams")
create_product("gram", shop=shop, **{"name": "Gram Product", "sales_unit": gram})
pieces = SalesUnit.objects.create(symbol="pcs", name="Pieces")
create_product("pcs", shop=shop, **{"name": "Pieces Product", "sales_unit": pieces})
kg = SalesUnit.objects.create(symbol="kg", name="Kilograms")
create_product("kg", shop=shop, **{"name": "Kilogram Product", "sales_unit": kg})
oz = SalesUnit.objects.create(symbol="oz", name="Ounce")
create_product("oz", shop=shop, **{"name": "Ounce Product", "sales_unit": oz})
view = MultiselectAjaxView.as_view()
results = _get_search_results(rf, view, "shuup.Product", "Product", admin_user)
assert len(results) == 4
assert len(_get_search_results(rf, view, "shuup.Product", "Product", admin_user, sales_units="g")) == 1
assert len(_get_search_results(rf, view, "shuup.Product", "Product", admin_user, sales_units="pcs")) == 1
assert len(_get_search_results(rf, view, "shuup.Product", "Product", admin_user, sales_units="kg")) == 1
assert len(_get_search_results(rf, view, "shuup.Product", "Product", admin_user, sales_units="oz")) == 1
assert len(_get_search_results(rf, view, "shuup.Product", "Product", admin_user, sales_units="g,oz")) == 2
assert len(_get_search_results(rf, view, "shuup.Product", "Product", admin_user, sales_units="g,kg,pcs")) == 3
assert len(_get_search_results(rf, view, "shuup.Product", "Product", admin_user, sales_units="oz,pcs,g,kg")) == 4
@pytest.mark.django_db
@pytest.mark.parametrize("contact_cls", [
PersonContact, CompanyContact
])
def test_ajax_select_view_with_contacts(rf, contact_cls, admin_user):
shop = get_default_shop()
view = MultiselectAjaxView.as_view()
results = _get_search_results(rf, view, "", "some str", admin_user)
assert len(results) == 0
model_name = "shuup.%s" % contact_cls._meta.model_name
results = _get_search_results(rf, view, model_name, "some str", admin_user)
assert len(results) == 0
# customer doesn't belong to shop
customer = contact_cls.objects.create(name="Michael Jackson", email="[email protected]")
results = _get_search_results(rf, view, model_name, "michael", admin_user)
assert len(results) == 0
customer.add_to_shop(shop)
results = _get_search_results(rf, view, model_name, "michael", admin_user)
assert len(results) == 1
assert results[0].get("id") == customer.id
assert results[0].get("name") == customer.name
results = _get_search_results(rf, view, model_name, "jacks", admin_user)
assert len(results) == 1
assert results[0].get("id") == customer.id
assert results[0].get("name") == customer.name
results = _get_search_results(rf, view, model_name, "el@ex", admin_user)
assert len(results) == 1
assert results[0].get("id") == customer.id
assert results[0].get("name") == customer.name
results = _get_search_results(rf, view, model_name, "random", admin_user) # Shouldn't find anything with this
assert len(results) == 0
@pytest.mark.django_db
@pytest.mark.parametrize("contact_cls", [
PersonContact, CompanyContact
])
def test_ajax_select_view_with_contacts_multipleshop(rf, contact_cls):
shop1 = get_default_shop()
shop2 = get_shop(identifier="shop2")
staff = create_random_user(is_staff=True)
shop1.staff_members.add(staff)
shop2.staff_members.add(staff)
view = MultiselectAjaxView.as_view()
model_name = "shuup.%s" % contact_cls._meta.model_name
customer = contact_cls.objects.create(name="Michael Jackson", email="[email protected]")
customer_shop1 = contact_cls.objects.create(name="Roberto", email="[email protected]")
customer_shop2 = contact_cls.objects.create(name="Maria", email="[email protected]")
results = _get_search_results(rf, view, model_name, "michael", staff)
assert len(results) == 0
customer.add_to_shop(shop1)
customer.add_to_shop(shop2)
customer_shop1.add_to_shop(shop1)
customer_shop2.add_to_shop(shop2)
for shop in [shop1, shop2]:
results = _get_search_results(rf, view, model_name, "michael", staff, shop=shop)
assert len(results) == 1
assert results[0].get("id") == customer.id
assert results[0].get("name") == customer.name
results = _get_search_results(rf, view, model_name, "roberto", staff, shop=shop)
if shop == shop1:
assert len(results) == 1
assert results[0].get("id") == customer_shop1.id
assert results[0].get("name") == customer_shop1.name
else:
assert len(results) == 0
results = _get_search_results(rf, view, model_name, "maria", staff, shop=shop)
if shop == shop2:
assert len(results) == 1
assert results[0].get("id") == customer_shop2.id
assert results[0].get("name") == customer_shop2.name
else:
assert len(results) == 0
@pytest.mark.django_db
def test_ajax_select_view_with_categories(rf, admin_user):
activate("en")
shop = get_default_shop()
view = MultiselectAjaxView.as_view()
# No categories, no results
results = _get_search_results(rf, view, "shuup.Category", "some str", admin_user)
assert len(results) == 0
category = Category.objects.create(
parent=None,
identifier="test",
name="test",
)
category.shops.add(shop)
results = _get_search_results(rf, view, "shuup.Category", "some str", admin_user)
assert len(results) == 0
results = _get_search_results(rf, view, "shuup.Category", category.name, admin_user)
assert len(results) == 1
category.soft_delete()
results = _get_search_results(rf, view, "shuup.Category", category.name, admin_user)
assert len(results) == 0
@pytest.mark.django_db
def test_multiselect_inactive_users_and_contacts(rf, regular_user, admin_user):
"""
Make sure inactive users and contacts are filtered from search results.
"""
shop = get_default_shop()
view = MultiselectAjaxView.as_view()
assert "joe" in regular_user.username
results = _get_search_results(rf, view, "auth.User", "joe", admin_user)
assert len(results) == 1
assert results[0].get("id") == regular_user.id
assert results[0].get("name") == regular_user.username
contact = PersonContact.objects.create(first_name="Joe", last_name="Somebody")
# contact not in shop
results = _get_search_results(rf, view, "shuup.PersonContact", "joe", admin_user)
assert len(results) == 0
contact.add_to_shop(shop)
results = _get_search_results(rf, view, "shuup.PersonContact", "joe", admin_user)
assert len(results) == 1
assert results[0].get("id") == contact.id
assert results[0].get("name") == contact.name
contact.is_active = False
contact.save()
results = _get_search_results(rf, view, "shuup.PersonContact", "joe", admin_user)
assert len(results) == 0
@pytest.mark.django_db
def test_select_category(rf, admin_user):
shop = get_default_shop()
activate("en")
view = MultiselectAjaxView.as_view()
category1 = Category.objects.create(name="category", status=CategoryStatus.VISIBLE)
category2 = Category.objects.create(name="category", status=CategoryStatus.INVISIBLE)
Category.objects.create(name="category")
category1.shops.add(shop)
category2.shops.add(shop)
results = _get_search_results(rf, view, "shuup.Category", "category", admin_user)
assert len(results) == 2
# only visible
results = _get_search_results(rf, view, "shuup.Category", "category", admin_user, search_mode="visible")
assert len(results) == 1
@pytest.mark.django_db
def test_select_supplier(rf, admin_user):
shop = get_default_shop()
activate("en")
view = MultiselectAjaxView.as_view()
supplier1 = Supplier.objects.create(name="supplier1", enabled=True)
supplier2 = Supplier.objects.create(name="supplier2", enabled=False)
Supplier.objects.create(name="supplier3", enabled=True)
supplier1.shops.add(shop)
supplier2.shops.add(shop)
results = _get_search_results(rf, view, "shuup.supplier", "supplier", admin_user)
assert len(results) == 2
# only enabled
results = _get_search_results(rf, view, "shuup.supplier", "supplier", admin_user, search_mode="enabled")
assert len(results) == 1
@pytest.mark.django_db
def test_shop_products_with_supplier_filter(rf, admin_user):
shop = get_default_shop()
activate("en")
view = MultiselectAjaxView.as_view()
superuser1 = create_random_user(is_superuser=True, is_staff=True)
supplier1 = Supplier.objects.create(identifier=superuser1.username)
superuser2 = create_random_user(is_superuser=True, is_staff=True)
supplier2 = Supplier.objects.create(identifier=superuser2.username)
product_name_en = "ok"
product = create_product("test1", shop=shop, supplier=supplier1, **{"name": product_name_en})
shop_product = product.get_shop_instance(shop)
assert shop_product.suppliers.filter(pk=supplier1.pk).exists()
supplier_provider = "shuup.testing.supplier_provider.UsernameSupplierProvider"
with override_settings(SHUUP_ADMIN_SUPPLIER_PROVIDER_SPEC=supplier_provider):
results = _get_search_results(rf, view, "shuup.ShopProduct", "ok", superuser1)
assert len(results) == 1
assert results[0].get("id") == shop_product.id
assert results[0].get("name") == product_name_en
results = _get_search_results(rf, view, "shuup.ShopProduct", "ok", superuser2)
assert len(results) == 0
|
#!/usr/bin/python
import numpy as np
import sys
from sim2 import * ## where all the BF stuff is; for the four 'a' matrices
import galsim
import matplotlib
matplotlib.use('Pdf')
import matplotlib.cm as cm # color bar, to plot
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.patches as patches
pp=PdfPages("wfirst_psf.pdf")
def my_imshow(im, ax=None, **kwargs):
if ax is None:
ax = plt.gca()
def format_coord(x, y):
x = int(x + 0.5)
y = int(y + 0.5)
try:
return '%8e @ [%4i, %4i]' % (im[y, x], x, y)
except IndexError:
return ''
img = ax.imshow(im, **kwargs)
ax.format_coord=format_coord
return img
def measurement_function(profile, noise=None, beta=3.566e-7, base_size='1024', type='nl', n='3', offset = (0.,0.), pixel_scale=0.11, new_params=galsim.hsm.HSMParams(max_amoment=60000000, max_mom2_iter=10000000000, max_moment_nsig2=25)):
"""
This function receives a GSObject profile and applies one of the two sensor effects:
1) NL (cuadratic, with a single /beta parameter)
2) BF (cdModel implemented in GalSim for CCDs. Antilogus et al. 2014)
Return: None, it is a void function. But the input vectors e1_inter_vec=[], e2_inter_vec=[], size_inter_vec=[] should be filled.
"""
#print "INSIDE meas. function: ", beta
# Figure out how many times we are going to go through the whole rendering process
# Even if n_offsets = 0, we are going to draw once.
#if n_offsets == 0:
# n_iter = 1
#else:
# n_iter = n_offsets
draw_wfirst_psf=False
#offset_input=(0.0, 0.0)
if type == 'nl':
method='oversampling'
#method='interleaving' # Just temporal
#f=lambda x,beta : x - beta*x*x*x*x
f=lambda x,beta : x - beta*x*x
#f=lambda x,beta : x - beta*x*x
#f=lambda x, (b,g,d) : x + b*x*x + g*x*x*x + d*x*x*x*x
#f=lambda x, b : x + b*x*x*x*x #+ g*x*x*x + d*x*x*x*x
elif type == 'bf':
method='interleaving'
else:
print "ERROR in call to 'measurement_function': wrong type (nor 'nl' nor 'bf')"
sys.exit(1)
if method == 'oversampling': ## NL does not need interleaving
print "METHOD: oversampling"
#### Calculate moments without effect
print "Applied FLUX in electrons: ", profile.getFlux()
# Do several realizations at differen centroid offsets
"""
vec_ud=[]
for ind in range(n_iter):
ud=galsim.UniformDeviate()
vec_ud.append(ud)
if n_offsets > 0:
offset=(ud(), ud())
# For the high-res image, have to find how many high-res pixels the offset is, and then take
# only the sub-pixel part.
offset_highres = (offset[0]*n % 1, offset[1]*n % 1)
else:
offset = (0., 0.)
offset_highres = (0., 0.)
image=profile.drawImage(image=galsim.Image(base_size, base_size, dtype=np.float64 ), scale=pixel_scale/n, method='no_pixel', offset=offset_highres)
#print "Maximum flux: ", np.amax(image.array)
#print "Flux of image after being drawn (using np.sum(image.array)): ", np.sum(image.array)
#print image.array.shape
image_mult=(n*n)*image
#print "Maximum flux: ", np.amax(image.array)
#print "Flux of image after adjusting n*n(using np.sum(image.array)): ", np.sum(image.array)
if ind == 0:
image_sum= image_mult
else:
image_sum+=image_mult
image=image_sum/float(n_iter)
"""
offset= (0.0, 0.0)
print "Offset: ", offset
image=profile.drawImage(image=galsim.Image(base_size, base_size, dtype=np.float64), scale=pixel_scale/n, method='no_pixel', offset=offset)
image=(n*n)*image
#image
#sys.exit()
#if not noise == None:
# read_noise = galsim.GaussianNoise(sigma=noise/(n**2))
# image.addNoise(read_noise)
# IMAGE
if draw_wfirst_psf == True:
k=64
delta=15
bm, bp = 0.5*(1.5*k)-delta, 0.5*(1.5*k) + delta
bounds=galsim.BoundsI(bm,bp,bm,bp)
before=image[bounds].array
fig=plt.figure()
ax=fig.add_subplot(223)
plt.imshow(before, cmap='cubehelix', norm=LogNorm(), interpolation='nearest' )
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title ('Y106: no NL (core)')
plt.colorbar()
before_all=image.array
ax=fig.add_subplot(221)
plt.imshow((before_all), cmap='cubehelix', norm=LogNorm(), interpolation='nearest')
plt.colorbar()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title ('Y106: no NL (full stamp)')
ax.add_patch( patches.Rectangle( (0.5*(1.5*k) - delta, 0.5*(1.5*k) - delta),2*delta,2*delta, fill=False))
#IMAGE
results=image.FindAdaptiveMom(hsmparams=new_params)
ref_e1=results.observed_shape.e1
ref_e2=results.observed_shape.e2
ref_s=results.moments_sigma
#print "Image shape, before interleave: ", image.array.shape
print "ref_e1, ref_e2, ref_s", ref_e1, ref_e2, ref_s
#### Calculate moments with the effect
"""
# Do several realizations at differen centroid offsets
for ind in range(n_iter):
#ud=galsim.UniformDeviate()
ud=vec_ud[ind]
if n_offsets > 0:
offset=(ud(), ud())
# For the high-res image, have to find how many high-res pixels the offset is, and then take
# only the sub-pixel part.
offset_highres = (offset[0]*n % 1, offset[1]*n % 1)
else:
offset = (0., 0.)
offset_highres = (0., 0.)
image=profile.drawImage(image=galsim.Image(base_size, base_size), scale=pixel_scale/n, method='no_pixel', offset=offset_highres)
#print "Maximum flux: ", np.amax(image.array)
#print "Flux of image after being drawn (using np.sum(image.array)): ", np.sum(image.array)
#print image.array.shape
image_mult=(n*n)*image
#print "Maximum flux: ", np.amax(image.array)
#print "Flux of image after adjusting n*n(using np.sum(image.array)): ", np.sum(image.array)
if ind == 0:
image_sum= image_mult
else:
image_sum+=image_mult
image=image_sum/float(n_iter)
"""
image=profile.drawImage(image=galsim.Image(base_size, base_size, dtype=np.float64), scale=pixel_scale/n, method='no_pixel', offset=offset)
image=(n*n)*image
image.applyNonlinearity(f,beta)
#sys.exit()
print "Flux of image after VNL (using np.sum(image.array)): ", np.sum(image.array)
if draw_wfirst_psf == True:
# IMAGE
after=image[bounds].array
print "drawing fractional difference "
diff= (before - after)/before # VNL attenuates
ax=fig.add_subplot(122)
#ax=plt.subplot2grid ( (2,2), (1,1), colspan=2 )
#ax.set_position ([0.1, 0.5, 0.5, 0.5])
#print "diff: ", diff
#sys.exit()
plt.imshow((diff), cmap='gnuplot2', norm=LogNorm(), interpolation='nearest')
plt.colorbar()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title ('NL vs no NL: \n fractional difference (core)')
plt.tight_layout()
pp.savefig()
pp.close()
sys.exit()
#IMAGE
#results=image.FindAdaptiveMom(hsmparams=new_params)
#print "results.observed_shape.e1, results.observed_shape.e2, results.moments_sigma ", results.observed_shape.e1, results.observed_shape.e2, results.moments_sigma
#print "Differences: ", results.observed_shape.e1 - ref_e1, results.observed_shape.e2 - ref_e2, (results.moments_sigma - ref_s) / ref_s
# Output values
#e1_out=results.observed_shape.e1 - ref_e1
#e2_out=results.observed_shape.e2 - ref_e2
#size_out=(results.moments_sigma - ref_s) / ref_s
#return e1_out, e2_out, size_out
results=image.FindAdaptiveMom(hsmparams=new_params)
obs_e1=(results.observed_shape.e1)
obs_e2=(results.observed_shape.e2)
obs_s=(results.moments_sigma)
d_e1=(obs_e1 - ref_e1)
d_e2=(obs_e2 - ref_e2)
d_s=(obs_s/ref_s -1.)
print "obs_e1: %.16g, obs_e2 : %.16g, obs_s: %.16g" %(obs_e1, obs_e2, obs_s)
print "Differences: d_e1: %.16g, d_e2 : %.16g, d_s: %.16g" %(d_e1, d_e2, d_s)
# Output values
e1_out= d_e1
e2_out= d_e2
size_out= d_s
return e1_out, e2_out, size_out
if method == 'interleaving':
print "METHOD: Interleaving"
## Interleave the profiles with NO EFFECT
im_list=[]
offsets_list=[]
#create list of images to interleave-no effect
# First, draw a big image from which to obtain n^2 subimages. This is to avoid calling draImage n^2 times
big_image=galsim.Image
for j in xrange(n):
for i in xrange(n):
im=galsim.Image(base_size, base_size, dtype=np.float64)
offset=galsim.PositionD(offset_input[0] - (i+0.5)/n+0.5, offset_input[1] - (j+0.5)/n+0.5) ## Add randon uniform offset
offsets_list.append(offset)
#print "Offset: ", offset
profile.drawImage(image=im, scale=pixel_scale, offset=offset, method='no_pixel')
im_list.append(im)
image=galsim.utilities.interleaveImages(im_list=im_list, N=(n,n), offsets=offsets_list, add_flux=True)
print "Image shape, after interleave, no effect: ", image.array.shape
print "Flux of image after interleave (using np.sum(image.array)): ", np.sum(image.array)
if not noise == None:
read_noise = galsim.GaussianNoise(sigma=noise)
image.addNoise(read_noise)
results=image.FindAdaptiveMom(hsmparams=new_params)
ref_e1=results.observed_shape.e1
ref_e2=results.observed_shape.e2
ref_s=results.moments_sigma
print "ref_e1, ref_e2, ref_s", ref_e1, ref_e2, ref_s
## Interleave the profiles with the effect
im_list=[]
offsets_list=[]
#create list of images to interleave-no effect
for j in xrange(n):
for i in xrange(n):
im=galsim.Image(base_size, base_size, dtype=np.float64)
offset=galsim.PositionD(offset_input[0] -(i+0.5)/n+0.5, offset_input[1] - (j+0.5)/n+0.5)
offsets_list.append(offset)
#print "Offset: ", offset
im=profile.drawImage(image=im, scale=pixel_scale, offset=offset, method='no_pixel')
if type == 'bf':
#cd = PowerLawCD(5, 5.e-7, 5.e-7, 1.5e-7, 1.5e-7, 2.5e-7, 2.5e-7, 1.3)
(aL,aR,aB,aT) = readmeanmatrices()
cd = galsim.cdmodel.BaseCDModel (aL,aR,aB,aT)
im=cd.applyForward(im)
elif type == 'nl':
im.applyNonlinearity(f,beta)
im_list.append(im)
image2=galsim.utilities.interleaveImages(im_list=im_list, N=(n,n), offsets=offsets_list, add_flux=True)
print "Image shape, after interleave: ", image2.array.shape
print "Flux of image after interleave (using np.sum(image.array)): ", np.sum(image.array)
if not noise == None:
read_noise = galsim.GaussianNoise(sigma=noise)
image2.addNoise(read_noise)
results=image2.FindAdaptiveMom(hsmparams=new_params)
print "results.observed_shape.e1, results.observed_shape.e2, results.moments_sigma ", results.observed_shape.e1, results.observed_shape.e2, results.moments_sigma
print "Differences: ", results.observed_shape.e1 - ref_e1, results.observed_shape.e2 - ref_e2, (results.moments_sigma - ref_s) / ref_s
#e1_inter_vec.append (results.observed_shape.e1 - ref_e1)
#e2_inter_vec.append (results.observed_shape.e2 - ref_e2)
#size_inter_vec.append ( (results.moments_sigma - ref_s) / ref_s)
# Output values
e1_out=results.observed_shape.e1 - ref_e1
e2_out=results.observed_shape.e2 - ref_e2
size_out=(results.moments_sigma - ref_s) / ref_s
return e1_out, e2_out, size_out
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2015, 2016 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Model for WorkflowsAudit."""
from __future__ import absolute_import, print_function
import requests
import json
from flask import current_app
from .models import WorkflowsAudit
def json_api_request(url, data, headers=None):
"""Make JSON API request and return JSON response."""
final_headers = {
"Content-Type": "application/json",
"Accept": "application/json"
}
if headers:
final_headers.update(headers)
current_app.logger.debug("POST {0} with \n{1}".format(
url, json.dumps(data, indent=4)
))
try:
response = requests.post(
url=url,
headers=final_headers,
data=json.dumps(data),
timeout=30
)
except requests.exceptions.RequestException as err:
current_app.logger.exception(err)
raise
if response.status_code == 200:
return response.json()
def log_workflows_action(action, prediction_results,
object_id, user_id,
source, user_action=""):
"""Log the action taken by user compared to a prediction."""
if prediction_results:
score = prediction_results.get("max_score") # returns 0.222113
decision = prediction_results.get("decision") # returns "Rejected"
# Map actions to align with the prediction format
action_map = {
'accept': 'Non-CORE',
'accept_core': 'CORE',
'reject': 'Rejected'
}
logging_info = {
'object_id': object_id,
'user_id': user_id,
'score': score,
'user_action': action_map.get(user_action, ""),
'decision': decision,
'source': source,
'action': action
}
audit = WorkflowsAudit(**logging_info)
audit.save()
|
# -*- coding: utf-8 -*-
# -- This file is part of the Apio project
# -- (C) 2016-2018 FPGAwars
# -- Author Jesús Arroyo
# -- Licence GPLv2
import click
import requests
from apio import util
requests.packages.urllib3.disable_warnings()
def api_request(command, organization='FPGAwars'):
result = None
r = None
try:
r = requests.get(
'https://api.github.com/repos/{0}/{1}'.format(
organization, command),
headers=_get_headers())
result = r.json()
r.raise_for_status()
except requests.exceptions.ConnectionError as e:
error_message = str(e)
if 'NewConnectionError' in error_message:
click.secho('Error: could not connect to GitHub API.\n'
'Check your internet connection and try again',
fg='red')
else:
click.secho(error_message, fg='red')
exit(1)
except Exception as e:
click.secho('Error: ' + str(e), fg='red')
exit(1)
finally:
if r:
r.close()
if result is None:
click.secho('Error: wrong data from GitHub API', fg='red')
exit(1)
return result
def _get_headers():
enc = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJwdWJsaWNfdG9rZW4iOiJ0' + \
'b2tlbiBhNTk2OTUwNjFhYzRkMjBkZjEwNTFlZDljOWZjNGI4M2Q0NzAyYzA3I' + \
'n0.POR6Iae_pSt0m6h-AaRi1X6QaRcnnfl9aZbTSV0BUJw'
return {'Authorization': util.decode(enc).get('public_token')}
|
# Copyright (c) 2010-2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A standard ring built using the :ref:`ring-builder <ring_builder>` will attempt
to randomly disperse replicas or erasure-coded fragments across failure
domains, but does not provide any guarantees such as placing at least one
replica of every partition into each region. Composite rings are intended to
provide operators with greater control over the dispersion of object replicas
or fragments across a cluster, in particular when there is a desire to
have strict guarantees that some replicas or fragments are placed in certain
failure domains. This is particularly important for policies with duplicated
erasure-coded fragments.
A composite ring comprises two or more component rings that are combined to
form a single ring with a replica count equal to the sum of replica counts
from the component rings. The component rings are built independently, using
distinct devices in distinct regions, which means that the dispersion of
replicas between the components can be guaranteed. The ``composite_builder``
utilities may then be used to combine components into a composite ring.
For example, consider a normal ring ``ring0`` with replica count of 4 and
devices in two regions ``r1`` and ``r2``. Despite the best efforts of the
ring-builder, it is possible for there to be three replicas of a particular
partition placed in one region and only one replica placed in the other region.
For example::
part_n -> r1z1h110/sdb r1z2h12/sdb r1z3h13/sdb r2z1h21/sdb
Now consider two normal rings each with replica count of 2: ``ring1`` has
devices in only ``r1``; ``ring2`` has devices in only ``r2``.
When these rings are combined into a composite ring then every partition is
guaranteed to be mapped to two devices in each of ``r1`` and ``r2``, for
example::
part_n -> r1z1h10/sdb r1z2h20/sdb r2z1h21/sdb r2z2h22/sdb
|_____________________| |_____________________|
| |
ring1 ring2
The dispersion of partition replicas across failure domains within each of the
two component rings may change as they are modified and rebalanced, but the
dispersion of replicas between the two regions is guaranteed by the use of a
composite ring.
For rings to be formed into a composite they must satisfy the following
requirements:
* All component rings must have the same part power (and therefore number of
partitions)
* All component rings must have an integer replica count
* Each region may only be used in one component ring
* Each device may only be used in one component ring
Under the hood, the composite ring has a ``_replica2part2dev_id`` table that is
the union of the tables from the component rings. Whenever the component rings
are rebalanced, the composite ring must be rebuilt. There is no dynamic
rebuilding of the composite ring.
.. note::
The order in which component rings are combined into a composite ring is
very significant because it determines the order in which the
Ring.get_part_nodes() method will provide primary nodes for the composite
ring and consequently the node indexes assigned to the primary nodes. For
an erasure-coded policy, inadvertent changes to the primary node indexes
could result in large amounts of data movement due to fragments being moved
to their new correct primary.
The ``id`` of each component RingBuilder is therefore stored in metadata of
the composite and used to check for the component ordering when the same
composite ring is re-composed. RingBuilder ``id``\s are normally assigned
when a RingBuilder instance is first saved. Older RingBuilder instances
loaded from file may not have an ``id`` assigned and will need to be saved
before they can be used as components of a composite ring. This can be
achieved by, for example::
swift-ring-builder <builder-file> rebalance --force
"""
import copy
import json
import os
from swift.common.ring import RingBuilder
from swift.common.ring import RingData
from collections import defaultdict
from itertools import combinations
MUST_MATCH_ATTRS = (
'part_power',
)
def pre_validate_all_builders(builders):
"""
Pre-validation for all component ring builders that are to be included in
the composite ring. Checks that all component rings are valid with respect
to each other.
:param builders: a list of :class:`swift.common.ring.builder.RingBuilder`
instances
:raises ValueError: if the builders are invalid with respect to each other
"""
if len(builders) < 2:
raise ValueError('Two or more component builders are required.')
# all ring builders should be consistent for each MUST_MATCH_ATTRS
for attr in MUST_MATCH_ATTRS:
attr_dict = defaultdict(list)
for i, builder in enumerate(builders):
value = getattr(builder, attr, None)
attr_dict[value].append(i)
if len(attr_dict) > 1:
variations = ['%s=%s found at indexes %s' %
(attr, val, indexes)
for val, indexes in attr_dict.items()]
raise ValueError(
'All builders must have same value for %r.\n%s'
% (attr, '\n '.join(variations)))
# all ring builders should have int replica count and not have dirty mods
errors = []
for index, builder in enumerate(builders):
if int(builder.replicas) != builder.replicas:
errors.append(
'Non integer replica count %s found at index %s' %
(builder.replicas, index))
if builder.devs_changed:
errors.append(
'Builder needs rebalance to apply changes at index %s' %
index)
if errors:
raise ValueError(
'Problem with builders.\n%s' % ('\n '.join(errors)))
# check regions
regions_info = {}
for builder in builders:
regions_info[builder] = set(
dev['region'] for dev in builder._iter_devs())
for first_region_set, second_region_set in combinations(
regions_info.values(), 2):
inter = first_region_set & second_region_set
if inter:
raise ValueError('Same region found in different rings')
# check device uniqueness
check_for_dev_uniqueness(builders)
def check_for_dev_uniqueness(builders):
"""
Check that no device appears in more than one of the given list of
builders.
:param builders: a list of :class:`swift.common.ring.builder.RingBuilder`
instances
:raises ValueError: if the same device is found in more than one builder
"""
builder2devs = []
for i, builder in enumerate(builders):
dev_set = set()
for dev in builder._iter_devs():
ip, port, device = (dev['ip'], dev['port'], dev['device'])
for j, (other_builder, devs) in enumerate(builder2devs):
if (ip, port, device) in devs:
raise ValueError(
'Duplicate ip/port/device combination %s/%s/%s found '
'in builders at indexes %s and %s' %
(ip, port, device, j, i)
)
dev_set.add((ip, port, device))
builder2devs.append((builder, dev_set))
def _make_composite_ring(builders):
"""
Given a list of component ring builders, return a composite RingData
instance.
:param builders: a list of
:class:`swift.common.ring.builder.RingBuilder` instances
:return: a new RingData instance built from the component builders
:raises ValueError: if the builders are invalid with respect to each other
"""
composite_r2p2d = []
composite_devs = []
device_offset = 0
for builder in builders:
# copy all devs list and replica2part2dev table to be able
# to modify the id for each dev
devs = copy.deepcopy(builder.devs)
r2p2d = copy.deepcopy(builder._replica2part2dev)
for part2dev in r2p2d:
for part, dev in enumerate(part2dev):
part2dev[part] += device_offset
for dev in [d for d in devs if d]:
# note that some devs may not be referenced in r2p2d but update
# their dev id nonetheless
dev['id'] += device_offset
composite_r2p2d.extend(r2p2d)
composite_devs.extend(devs)
device_offset += len(builder.devs)
return RingData(composite_r2p2d, composite_devs, builders[0].part_shift)
def compose_rings(builders):
"""
Given a list of component ring builders, perform validation on the list of
builders and return a composite RingData instance.
:param builders: a list of
:class:`swift.common.ring.builder.RingBuilder` instances
:return: a new RingData instance built from the component builders
:raises ValueError: if the builders are invalid with respect to each other
"""
pre_validate_all_builders(builders)
rd = _make_composite_ring(builders)
return rd
def _make_component_meta(builder):
"""
Return a dict of selected builder attributes to save in composite meta. The
dict has keys ``version``, ``replicas`` and ``id``.
:param builder: a :class:`swift.common.ring.builder.RingBuilder`
instance
:return: a dict of component metadata
"""
attrs = ['version', 'replicas', 'id']
metadata = dict((attr, getattr(builder, attr)) for attr in attrs)
return metadata
def _make_composite_metadata(builders):
"""
Return a dict with key ``components`` that maps to a list of dicts, each
dict being of the form returned by :func:`_make_component_meta`.
:param builders: a list of
:class:`swift.common.ring.builder.RingBuilder` instances
:return: a dict of composite metadata
"""
component_meta = [_make_component_meta(builder) for builder in builders]
return {'components': component_meta}
def check_same_builder(old_component, new_component):
"""
Check that the given new_component metadata describes the same builder as
the given old_component metadata. The new_component builder does not
necessarily need to be in the same state as when the old_component metadata
was created to satisfy this check e.g. it may have changed devs and been
rebalanced.
:param old_component: a dict of metadata describing a component builder
:param new_component: a dict of metadata describing a component builder
:raises ValueError: if the new_component is not the same as that described
by the old_component
"""
for key in ['replicas', 'id']:
if old_component[key] != new_component[key]:
raise ValueError("Attribute mismatch for %s: %r != %r" %
(key, old_component[key], new_component[key]))
def is_builder_newer(old_component, new_component):
"""
Return True if the given builder has been modified with respect to its
state when the given component_meta was created.
:param old_component: a dict of metadata describing a component ring
:param new_component: a dict of metadata describing a component ring
:return: True if the builder has been modified, False otherwise.
:raises ValueError: if the version of the new_component is older than the
version of the existing component.
"""
if new_component['version'] < old_component['version']:
raise ValueError('Older builder version: %s < %s' %
(new_component['version'], old_component['version']))
return old_component['version'] < new_component['version']
def check_against_existing(old_composite_meta, new_composite_meta):
"""
Check that the given builders and their order are the same as that
used to build an existing composite ring. Return True if any of the given
builders has been modified with respect to its state when the given
component_meta was created.
:param old_composite_meta: a dict of the form returned by
:func:`_make_composite_meta`
:param new_composite_meta: a dict of the form returned by
:func:`_make_composite_meta`
:return: True if any of the components has been modified, False otherwise.
:raises Value Error: if proposed new components do not match any existing
components.
"""
errors = []
newer = False
old_components = old_composite_meta['components']
new_components = new_composite_meta['components']
for i, old_component in enumerate(old_components):
try:
new_component = new_components[i]
except IndexError:
errors.append("Missing builder at index %d" % i)
continue
try:
# check we have same component builder in this position vs existing
check_same_builder(old_component, new_component)
newer |= is_builder_newer(old_component, new_component)
except ValueError as err:
errors.append("Invalid builder change at index %d: %s" % (i, err))
for j, new_component in enumerate(new_components[i + 1:], start=i + 1):
errors.append("Unexpected extra builder at index %d: %r" %
(j, new_component))
if errors:
raise ValueError('\n'.join(errors))
return newer
def check_builder_ids(builders):
"""
Check that all builders in the given list have id's assigned and that no
id appears more than once in the list.
:param builders: a list instances of
:class:`swift.common.ring.builder.RingBuilder`
:raises: ValueError if any builder id is missing or repeated
"""
id2index = defaultdict(list)
errors = []
for i, builder in enumerate(builders):
try:
id2index[builder.id].append(str(i))
except AttributeError as err:
errors.append("Problem with builder at index %d: %s" % (i, err))
for builder_id, index in id2index.items():
if len(index) > 1:
errors.append("Builder id %r used at indexes %s" %
(builder_id, ', '.join(index)))
if errors:
raise ValueError('\n'.join(errors))
class CompositeRingBuilder(object):
"""
Provides facility to create, persist, load and update composite rings, for
example::
# create a CompositeRingBuilder instance with a list of
# component builder files
crb = CompositeRingBuilder(["region1.builder", "region2.builder"])
# call compose which will make a new RingData instance
ring_data = crb.compose()
# save the composite ring file
ring_data.save("composite_ring.gz")
# save the composite metadata file
crb.save("composite_builder.composite")
# load the persisted composite metadata file
crb = CompositeRingBuilder.load("composite_builder.composite")
# compose (optionally update the paths to the component builder files)
crb.compose(["/path/to/region1.builder", "/path/to/region2.builder"])
Composite ring metadata is persisted to file in JSON format. The metadata
has the structure shown below (using example values)::
{
"version": 4,
"components": [
{
"version": 3,
"id": "8e56f3b692d43d9a666440a3d945a03a",
"replicas": 1
},
{
"version": 5,
"id": "96085923c2b644999dbfd74664f4301b",
"replicas": 1
}
]
"component_builder_files": {
"8e56f3b692d43d9a666440a3d945a03a": "/etc/swift/region1.builder",
"96085923c2b644999dbfd74664f4301b": "/etc/swift/region2.builder",
}
"serialization_version": 1,
"saved_path": "/etc/swift/multi-ring-1.composite",
}
`version` is an integer representing the current version of the composite
ring, which increments each time the ring is successfully (re)composed.
`components` is a list of dicts, each of which describes relevant
properties of a component ring
`component_builder_files` is a dict that maps component ring builder ids to
the file from which that component ring builder was loaded.
`serialization_version` is an integer constant.
`saved_path` is the path to which the metadata was written.
:params builder_files: a list of paths to builder files that will be used
as components of the composite ring.
"""
def __init__(self, builder_files=None):
self.version = 0
self.components = []
self.ring_data = None
self._builder_files = None
self._set_builder_files(builder_files or [])
def _set_builder_files(self, builder_files):
self._builder_files = [os.path.abspath(bf) for bf in builder_files]
@classmethod
def load(cls, path_to_file):
"""
Load composite ring metadata.
:param path_to_file: Absolute path to a composite ring JSON file.
:return: an instance of :class:`CompositeRingBuilder`
:raises IOError: if there is a problem opening the file
:raises ValueError: if the file does not contain valid composite ring
metadata
"""
try:
with open(path_to_file, 'rb') as fp:
metadata = json.load(fp)
builder_files = [metadata['component_builder_files'][comp['id']]
for comp in metadata['components']]
builder = CompositeRingBuilder(builder_files)
builder.components = metadata['components']
builder.version = metadata['version']
except (ValueError, TypeError, KeyError):
raise ValueError("File does not contain valid composite ring data")
return builder
def to_dict(self):
"""
Transform the composite ring attributes to a dict. See
:class:`CompositeRingBuilder` for details of the persisted metadata
format.
:return: a composite ring metadata dict
"""
id2builder_file = dict((component['id'], self._builder_files[i])
for i, component in enumerate(self.components))
return {'components': self.components,
'component_builder_files': id2builder_file,
'version': self.version}
def save(self, path_to_file):
"""
Save composite ring metadata to given file. See
:class:`CompositeRingBuilder` for details of the persisted metadata
format.
:param path_to_file: Absolute path to a composite ring file
:raises ValueError: if no composite ring has been built yet with this
instance
"""
if not self.components or not self._builder_files:
raise ValueError("No composed ring to save.")
# persist relative paths to builder files
with open(path_to_file, 'wb') as fp:
metadata = self.to_dict()
# future-proofing:
# - saving abs path to component builder files and this file should
# allow the relative paths to be derived if required when loading
# a set of {composite builder file, component builder files} that
# has been moved, so long as their relative locations are
# unchanged.
# - save a serialization format version number
metadata['saved_path'] = os.path.abspath(path_to_file)
metadata['serialization_version'] = 1
json.dump(metadata, fp)
def compose(self, builder_files=None, force=False):
"""
Builds a composite ring using component ring builders loaded from a
list of builder files.
If a list of component ring builder files is given then that will be
used to load component ring builders. Otherwise, component ring
builders will be loaded using the list of builder files that was set
when the instance was constructed.
In either case, if metadata for an existing composite ring has been
loaded then the component ring builders are verified for consistency
with the existing composition of builders, unless the optional
``force`` flag if set True.
:param builder_files: Optional list of paths to ring builder
files that will be used to load the component ring builders.
Typically the list of component builder files will have been set
when the instance was constructed, for example when using the
load() class method. However, this parameter may be used if the
component builder file paths have moved, or, in conjunction with
the ``force`` parameter, if a new list of component builders is to
be used.
:param force: if True then do not verify given builders are consistent
with any existing composite ring.
:return: An instance of :class:`swift.common.ring.ring.RingData`
:raises: ValueError if the component ring builders are not suitable for
composing with each other, or are inconsistent with any existing
composite ring, or if there has been no change with respect to the
existing ring.
"""
builder_files = builder_files or self._builder_files
builders = [RingBuilder.load(f) for f in builder_files]
check_builder_ids(builders)
new_metadata = _make_composite_metadata(builders)
if self.components and self._builder_files and not force:
modified = check_against_existing(self.to_dict(), new_metadata)
if not modified:
raise ValueError(
"None of the component builders has been modified"
" since the existing composite ring was built.")
self.ring_data = compose_rings(builders)
self.version += 1
self.components = new_metadata['components']
self._set_builder_files(builder_files)
return self.ring_data
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
#
# Author: Carlos Garcia Gomez
# Date: 29-11-2013
# web: http://www.facturaScripts.com
import time, http.server, os
from urllib.request import urlopen
from subprocess import call
HOST_NAME = 'localhost'
PORT_NUMBER = 10080
class MyHandler(http.server.BaseHTTPRequestHandler):
def do_HEAD(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def do_GET(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
global api_url
extra_url = ''
if len(s.path) > 2:
extra_url = '&'+s.path[2:]
response = urlopen(api_url+'?v=2&f=remote_printer'+extra_url)
html = response.read()
if html:
f = open('ticket.txt', 'wb')
f.write( html + b'\n' )
f.close()
global printer_name
call(['lpr', '-P', printer_name, 'ticket.txt'])
if __name__ == '__main__':
# preguntamos por la configuración
global api_url
global printer_name
if os.path.isfile('config.txt'):
f = open('config.txt', 'r')
line = f.readline()
api_url = line[5:].rstrip()
line = f.readline()
printer_name = line[9:].rstrip()
f.close()
else:
api_url = input('URL de la api: ')
printer_name = input('Nombre de la impresora: ')
f = open('config.txt', 'w')
f.write('api: '+api_url+"\nprinter: "+printer_name)
f.close()
# iniciamos el servidor web
server_class = http.server.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler)
print( time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER) )
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print( time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER) )
# borramos el ticket
if os.path.isfile('ticket.txt'):
os.remove('ticket.txt')
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# streamondemand - XBMC Plugin
# Conector para vidspot
# http://www.mimediacenter.info/foro/viewforum.php?f=36
# ------------------------------------------------------------
import re
from core import logger
from core import scrapertools
def test_video_exists(page_url):
logger.info("streamondemand.servers.vidspot test_video_exists(page_url='%s')" % page_url)
# No existe / borrado: http://vidspot.net/8jcgbrzhujri
data = scrapertools.cache_page("http://anonymouse.org/cgi-bin/anon-www.cgi/" + page_url)
if "File Not Found" in data or "Archivo no encontrado" in data or '<b class="err">Deleted' in data \
or '<b class="err">Removed' in data or '<font class="err">No such' in data:
return False, "No existe o ha sido borrado de vidspot"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("streamondemand.servers.vidspot url=%s" % page_url)
# Normaliza la URL
videoid = scrapertools.get_match(page_url, "http://vidspot.net/([a-z0-9A-Z]+)")
page_url = "http://vidspot.net/embed-%s-728x400.html" % videoid
data = scrapertools.cachePage(page_url)
if "Access denied" in data:
geobloqueo = True
else:
geobloqueo = False
if geobloqueo:
url = "http://www.videoproxy.co/hide.php"
post = "go=%s" % page_url
location = scrapertools.get_header_from_response(url, post=post, header_to_get="location")
url = "http://www.videoproxy.co/%s" % location
data = scrapertools.cachePage(url)
# Extrae la URL
media_url = scrapertools.find_single_match(data, '"file" : "([^"]+)",')
video_urls = []
if media_url != "":
if geobloqueo:
url = "http://www.videoproxy.co/hide.php"
post = "go=%s" % media_url
location = scrapertools.get_header_from_response(url, post=post, header_to_get="location")
media_url = "http://www.videoproxy.co/%s&direct=false" % location
else:
media_url += "&direct=false"
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [vidspot]", media_url])
for video_url in video_urls:
logger.info("[vidspot.py] %s - %s" % (video_url[0], video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
# Añade manualmente algunos erróneos para evitarlos
encontrados = set()
encontrados.add("http://vidspot.net/embed-theme.html")
encontrados.add("http://vidspot.net/embed-jquery.html")
encontrados.add("http://vidspot.net/embed-s.html")
encontrados.add("http://vidspot.net/embed-images.html")
encontrados.add("http://vidspot.net/embed-faq.html")
encontrados.add("http://vidspot.net/embed-embed.html")
encontrados.add("http://vidspot.net/embed-ri.html")
encontrados.add("http://vidspot.net/embed-d.html")
encontrados.add("http://vidspot.net/embed-css.html")
encontrados.add("http://vidspot.net/embed-js.html")
encontrados.add("http://vidspot.net/embed-player.html")
encontrados.add("http://vidspot.net/embed-cgi.html")
encontrados.add("http://vidspot.net/embed-i.html")
encontrados.add("http://vidspot.net/images")
encontrados.add("http://vidspot.net/theme")
encontrados.add("http://vidspot.net/xupload")
encontrados.add("http://vidspot.net/s")
encontrados.add("http://vidspot.net/js")
encontrados.add("http://vidspot.net/jquery")
encontrados.add("http://vidspot.net/login")
encontrados.add("http://vidspot.net/make")
encontrados.add("http://vidspot.net/i")
encontrados.add("http://vidspot.net/faq")
encontrados.add("http://vidspot.net/tos")
encontrados.add("http://vidspot.net/premium")
encontrados.add("http://vidspot.net/checkfiles")
encontrados.add("http://vidspot.net/privacy")
encontrados.add("http://vidspot.net/refund")
encontrados.add("http://vidspot.net/links")
encontrados.add("http://vidspot.net/contact")
devuelve = []
# http://vidspot.net/3sw6tewl21sn
# http://vidspot.net/embed-3sw6tewl21sn.html
# http://vidspot.net/embed-3sw6tewl21sn-728x400.html
# http://www.cinetux.org/video/vidspot.php?id=3sw6tewl21sn
patronvideos = 'vidspot.(?:net/|php\?id=)(?:embed-|)([a-z0-9]+)'
logger.info("streamondemand.servers.vidspot find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
for match in matches:
titulo = "[vidspot]"
url = "http://vidspot.net/" + match
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'vidspot'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve
|
"""Logical Clocks and Synchronization."""
from __future__ import absolute_import, unicode_literals
from threading import Lock
from itertools import islice
from operator import itemgetter
from .five import python_2_unicode_compatible, zip
__all__ = ['LamportClock', 'timetuple']
R_CLOCK = '_lamport(clock={0}, timestamp={1}, id={2} {3!r})'
@python_2_unicode_compatible
class timetuple(tuple):
"""Tuple of event clock information.
Can be used as part of a heap to keep events ordered.
Arguments:
clock (int): Event clock value.
timestamp (float): Event UNIX timestamp value.
id (str): Event host id (e.g. ``hostname:pid``).
obj (Any): Optional obj to associate with this event.
"""
__slots__ = ()
def __new__(cls, clock, timestamp, id, obj=None):
return tuple.__new__(cls, (clock, timestamp, id, obj))
def __repr__(self):
return R_CLOCK.format(*self)
def __getnewargs__(self):
return tuple(self)
def __lt__(self, other):
# 0: clock 1: timestamp 3: process id
try:
A, B = self[0], other[0]
# uses logical clock value first
if A and B: # use logical clock if available
if A == B: # equal clocks use lower process id
return self[2] < other[2]
return A < B
return self[1] < other[1] # ... or use timestamp
except IndexError:
return NotImplemented
def __gt__(self, other):
return other < self
def __le__(self, other):
return not other < self
def __ge__(self, other):
return not self < other
clock = property(itemgetter(0))
timestamp = property(itemgetter(1))
id = property(itemgetter(2))
obj = property(itemgetter(3))
@python_2_unicode_compatible
class LamportClock(object):
"""Lamport's logical clock.
From Wikipedia:
A Lamport logical clock is a monotonically incrementing software counter
maintained in each process. It follows some simple rules:
* A process increments its counter before each event in that process;
* When a process sends a message, it includes its counter value with
the message;
* On receiving a message, the receiver process sets its counter to be
greater than the maximum of its own value and the received value
before it considers the message received.
Conceptually, this logical clock can be thought of as a clock that only
has meaning in relation to messages moving between processes. When a
process receives a message, it resynchronizes its logical clock with
the sender.
See Also:
* `Lamport timestamps`_
* `Lamports distributed mutex`_
.. _`Lamport Timestamps`: http://en.wikipedia.org/wiki/Lamport_timestamps
.. _`Lamports distributed mutex`: http://bit.ly/p99ybE
*Usage*
When sending a message use :meth:`forward` to increment the clock,
when receiving a message use :meth:`adjust` to sync with
the time stamp of the incoming message.
"""
#: The clocks current value.
value = 0
def __init__(self, initial_value=0, Lock=Lock):
self.value = initial_value
self.mutex = Lock()
def adjust(self, other):
with self.mutex:
value = self.value = max(self.value, other) + 1
return value
def forward(self):
with self.mutex:
self.value += 1
return self.value
def sort_heap(self, h):
"""Sort heap of events.
List of tuples containing at least two elements, representing
an event, where the first element is the event's scalar clock value,
and the second element is the id of the process (usually
``"hostname:pid"``): ``sh([(clock, processid, ...?), (...)])``
The list must already be sorted, which is why we refer to it as a
heap.
The tuple will not be unpacked, so more than two elements can be
present.
Will return the latest event.
"""
if h[0][0] == h[1][0]:
same = []
for PN in zip(h, islice(h, 1, None)):
if PN[0][0] != PN[1][0]:
break # Prev and Next's clocks differ
same.append(PN[0])
# return first item sorted by process id
return sorted(same, key=lambda event: event[1])[0]
# clock values unique, return first item
return h[0]
def __str__(self):
return str(self.value)
def __repr__(self):
return '<LamportClock: {0.value}>'.format(self)
|
import socket, sys
from zeroconf import ServiceInfo, ServiceBrowser, ServiceStateChange, Zeroconf
def get_hosts():
"""
Returns a list of available hosts in the network
"""
hosts = []
def search_hostnames(zeroconf, service_type, name, state_change):
"""
Prints the hostname to stdout
"""
if state_change is ServiceStateChange.Added:
hostname = name.split('.')
hosts.append(hostname[0])
zeroconf = Zeroconf()
browser = ServiceBrowser(zeroconf,
'_lanshare._tcp.local.',
handlers=[search_hostnames])
# Should sleep to allow discover?
zeroconf.close()
return hosts
def list_files(address, port):
"""
Returns a list of files shared by the given host
"""
files = []
received = ""
address = host
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Connect to server
sock.connect((address, port))
# Request the list of files
sock.sendall(b"LIST")
buff = sock.recv(1024)
while buff:
received += buff.decode('utf-8')
buff = sock.recv(1024)
finally:
sock.close()
if len(received) == 0 and received[0] != '2':
raise Exception()
files = received.split("\n")
files.pop()
return files
def browse_host(hostname):
"""
Gets the info of the hostname from mDNS
and then retrieves a list of files hosted
in that server and returns them
"""
filenames = []
found = False
zeroconf = Zeroconf()
fqdn = '{0}.local.'.format(hostname)
def get_hostnames(zeroconf, service_type, name, state_change):
if state_change is ServiceStateChange.Added:
host = zeroconf.get_service_info(service_type, name)
if host is not None and host.server == fqdn:
found = True
address = socket_inet_ntoa(host.address)
filenames = list_files(address, host.port)
browser = ServiceBrowser(zeroconf,
'_lanshare._tcp.local.',
handlers=[get_hostnames])
if not found:
print("Couldn't find {0} in the network".format(hostname), file=sys.stderr)
return filenames
|
# -*- coding: utf-8 -*-
from flask import render_template, redirect, url_for, abort, flash, request,\
current_app, make_response
from flask_login import login_required, current_user
from flask_sqlalchemy import get_debug_queries
from .. import files
from . import main
from ..models import Post, Tag, Category, Spc, User
from ..post.posts import Archive
from ..tools.jinja_keys import JinjaKeys
from ..decorators import admin_required
import os
@main.route('/')
def index():
page = request.args.get('page', 1, type=int)
pagination = Post.query.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
value = JinjaKeys()
value.add_keys({'posts': posts, 'pagination': pagination})
my_dict = value.keys()
return render_template('index.html', **my_dict)
@main.route('/admin-images/', defaults={'page': 1})
@main.route('/admin-images/<int:page>')
@admin_required
def admin_images(page):
from ..tools.pagination import Pagination, PageItem
PER_PAGE = 20
all_file = os.listdir(current_app.config['UPLOADED_FILES_DEST'])
count = len(all_file)
page_item = PageItem(page, PER_PAGE, all_file, count)
files_list = page_item.get_item()
pagination = Pagination(page, PER_PAGE, count)
return render_template('admin/admin_images.html', pagination=pagination,
files_list=files_list, files=files)
@main.route('/delete-image/<filename>')
@admin_required
def delete_image(filename):
file_path = files.path(filename)
os.remove(file_path)
return redirect(url_for('main.admin_images'))
@main.route('/baidu_verify_ruyZUdOLs5.html')
def baidu_api():
return render_template('baidu_verify_ruyZUdOLs5.html')
|
import pytest
import re
import subprocess
from pynetlinux import ifconfig
def interface(request, name):
i = ifconfig.Interface(name)
ip = i.ip
mac = i.mac
netmask = i.netmask
i.up()
def cleanup():
i.ip = ip
i.mac = mac
i.netmask = netmask
i.up()
request.addfinalizer(cleanup)
return i
@pytest.fixture
def if1(request):
return interface(request, b'eth1')
@pytest.fixture
def if2(request):
return interface(request, b'eth2')
def check_output(shell_cmd, regex=[], substr=[], not_regex=[], not_substr=[],
debug=False):
assert regex or substr or not_regex or not_substr
output = subprocess.check_output(shell_cmd, stderr=subprocess.STDOUT,
shell=True)
if debug:
print(regex, substr, not_regex, not_substr)
print(output)
for s in substr:
assert s in output
for r in regex:
assert re.search(r, output, re.MULTILINE)
for s in not_substr:
assert s not in output
for r in not_regex:
assert not re.search(r, output, re.MULTILINE)
|
"""
TextTable is use to generate a pretty table in text format,
which can be easily printed on console or output into text file
Sample:
Name Age Gender Desc Nationality
You 10 male You are a boy China
Me 100 male I am an old man Japan
She 18 female She is a pretty girl America
He 1 male He is a little baby British
"""
import textwrap
from pylib.exceptions import Error
class TextTable(object):
def __init__(self, field_names, **kwargs):
'''
Arguments:
field_names - list or tuple of field names
vertical_str - vertical separator betwwen each columns
'''
self._field_names = field_names
self._rows = []
self._sequence = [False, '', 0]
self._max_widths = {}
self._vertical_str = ' '
self._padding_width = 0
supported_options = ('vertical_str',)
for key, value in kwargs.items():
if key not in supported_options:
raise Error('unsupported option: ' + key)
setattr(self, '_'+key, value)
def set_sequence(self, enable, field_name='Seq', start=1):
'''
set whether need sequence for each row.
Arguments:
enable - whether need sequence for each row
field_name - the name of sequence field
start - the start number of sequence
'''
self._sequence = [enable, field_name, start]
def set_max_width(self, field_name, max_width):
'''
set max width of sepcified column, if max width is shorter than the length of field name,
the max width will be the length of field name
Arguments:
field_name - specify the field
max_width - max width of the specified field
if the actual value exceed the max width, will be split in multiple lines
'''
self._max_widths[field_name] = max_width
def _format_rows(self, rows):
'''
convert each column to string
'''
formatted_rows = []
for index, row in enumerate(rows):
formatted_row = [str(col) for col in row]
if self._sequence[0]:
formatted_row.insert(0, str(index+self._sequence[2]))
formatted_rows.append(formatted_row)
return formatted_rows
def _calculate_widths(self, field_names, rows):
'''
calculate max width of each column
'''
widths = [len(field) for field in field_names]
for row in rows:
for index, value in enumerate(row):
lines = value.split('\n')
max_len = max([len(line) for line in lines])
field_name = field_names[index]
if field_name in self._max_widths:
widths[index] = max(widths[index], min(max_len, self._max_widths[field_name]))
else:
widths[index] = max(widths[index], max_len)
return widths
def _get_row_string(self, field_names, row, widths):
'''
get formatted row string
'''
lines = []
total_width = 0
padding = self._padding_width * ' '
for index, field, value, width, in zip(range(0, len(row)), field_names, row, widths):
last_column = True if index == len(row) - 1 else False
col_lines = value.split('\n')
final_col_lines = []
for line in col_lines:
final_col_lines += textwrap.wrap(line, width)
for index, line in enumerate(final_col_lines):
if len(lines) <= index:
column = total_width*' ' + line + (width-len(line))*' '
lines.append(padding + column + padding)
if not last_column:
lines[index] += self._vertical_str
else:
column = (total_width-len(lines[index]))*' ' + line + (width-len(line))*' '
lines[index] += padding + column + padding
if not last_column:
lines[index] += self._vertical_str
total_width += width + self._padding_width*2 + len(self._vertical_str)
return '\n'.join(lines)
def to_string(self, ignore_field_names=False):
'''
get formatted result
'''
return '\n'.join(self.to_lines(ignore_field_names))
def to_lines(self, ignore_field_names=False):
'''
get formatted result
'''
field_names = [self._sequence[1]] + list(self._field_names) if self._sequence[0] else self._field_names
formatted_rows = self._format_rows(self._rows)
widths = self._calculate_widths(field_names, formatted_rows)
lines = []
if not ignore_field_names:
lines.append(self._get_row_string(field_names, field_names, widths))
for row in formatted_rows:
lines.append(self._get_row_string(field_names, row, widths))
return lines
def add_row(self, row):
'''
Arguments:
row - list or tuple of field values
'''
if len(row) != len(self._field_names):
raise Error("Row has different number of values with field names, (row) %d!=%d (field)" \
% (len(row), len(self._field_names)))
new_row = [col if col is not None else '' for col in row]
self._rows.append(new_row)
def add_rows(self, rows):
for row in rows:
self.add_row(row)
if __name__ == "__main__":
table = TextTable(['Name', 'Age', 'Gender', 'Desc', 'Nationality'], vertical_str=' ')
table.add_row(('You', 10, 'male', 'You are a boy', 'China'))
table.add_row(('Me', 100, 'male', 'I am an old man', 'Japan'))
table.add_row(('She', 18, 'female', 'She is a pretty girl', 'America'))
table.add_row(('He', 1, 'male', 'He is a little baby', 'British'))
#table.set_sequence(True)
print(table.to_string())
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import os
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import importutils
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import utils as linux_utils
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
class DhcpAgent(manager.Manager):
"""DHCP agent service manager.
Note that the public methods of this class are exposed as the server side
of an rpc interface. The neutron server uses
neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.DhcpAgentNotifyApi as the
client side to execute the methods here. For more information about
changing rpc interfaces, see doc/source/devref/rpc_api.rst.
"""
target = oslo_messaging.Target(version='1.0')
def __init__(self, host=None):
super(DhcpAgent, self).__init__(host=host)
self.needs_resync_reasons = collections.defaultdict(list)
self.conf = cfg.CONF
self.cache = NetworkCache()
self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
ctx = context.get_admin_context_without_session()
self.plugin_rpc = DhcpPluginApi(topics.PLUGIN,
ctx, self.conf.use_namespaces)
# create dhcp dir to store dhcp info
dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
linux_utils.ensure_dir(dhcp_dir)
self.dhcp_version = self.dhcp_driver_cls.check_version()
self._populate_networks_cache()
self._process_monitor = external_process.ProcessMonitor(
config=self.conf,
resource_type='dhcp')
def _populate_networks_cache(self):
"""Populate the networks cache when the DHCP-agent starts."""
try:
existing_networks = self.dhcp_driver_cls.existing_dhcp_networks(
self.conf
)
for net_id in existing_networks:
net = dhcp.NetModel(self.conf.use_namespaces,
{"id": net_id,
"subnets": [],
"ports": []})
self.cache.put(net)
except NotImplementedError:
# just go ahead with an empty networks cache
LOG.debug("The '%s' DHCP-driver does not support retrieving of a "
"list of existing networks",
self.conf.dhcp_driver)
def after_start(self):
self.run()
LOG.info(_LI("DHCP agent started"))
def run(self):
"""Activate the DHCP agent."""
self.sync_state()
self.periodic_resync()
def call_driver(self, action, network, **action_kwargs):
"""Invoke an action on a DHCP driver instance."""
LOG.debug('Calling driver for network: %(net)s action: %(action)s',
{'net': network.id, 'action': action})
try:
# the Driver expects something that is duck typed similar to
# the base models.
driver = self.dhcp_driver_cls(self.conf,
network,
self._process_monitor,
self.dhcp_version,
self.plugin_rpc)
getattr(driver, action)(**action_kwargs)
return True
except exceptions.Conflict:
# No need to resync here, the agent will receive the event related
# to a status update for the network
LOG.warning(_LW('Unable to %(action)s dhcp for %(net_id)s: there '
'is a conflict with its current state; please '
'check that the network and/or its subnet(s) '
'still exist.'),
{'net_id': network.id, 'action': action})
except Exception as e:
if getattr(e, 'exc_type', '') != 'IpAddressGenerationFailure':
# Don't resync if port could not be created because of an IP
# allocation failure. When the subnet is updated with a new
# allocation pool or a port is deleted to free up an IP, this
# will automatically be retried on the notification
self.schedule_resync(e, network.id)
if (isinstance(e, oslo_messaging.RemoteError)
and e.exc_type == 'NetworkNotFound'
or isinstance(e, exceptions.NetworkNotFound)):
LOG.warning(_LW("Network %s has been deleted."), network.id)
else:
LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'),
{'net_id': network.id, 'action': action})
def schedule_resync(self, reason, network=None):
"""Schedule a resync for a given network and reason. If no network is
specified, resync all networks.
"""
self.needs_resync_reasons[network].append(reason)
@utils.synchronized('dhcp-agent')
def sync_state(self, networks=None):
"""Sync the local DHCP state with Neutron. If no networks are passed,
or 'None' is one of the networks, sync all of the networks.
"""
only_nets = set([] if (not networks or None in networks) else networks)
LOG.info(_LI('Synchronizing state'))
pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
known_network_ids = set(self.cache.get_network_ids())
try:
active_networks = self.plugin_rpc.get_active_networks_info()
active_network_ids = set(network.id for network in active_networks)
for deleted_id in known_network_ids - active_network_ids:
try:
self.disable_dhcp_helper(deleted_id)
except Exception as e:
self.schedule_resync(e, deleted_id)
LOG.exception(_LE('Unable to sync network state on '
'deleted network %s'), deleted_id)
for network in active_networks:
if (not only_nets or # specifically resync all
network.id not in known_network_ids or # missing net
network.id in only_nets): # specific network to sync
pool.spawn(self.safe_configure_dhcp_for_network, network)
pool.waitall()
LOG.info(_LI('Synchronizing state complete'))
except Exception as e:
self.schedule_resync(e)
LOG.exception(_LE('Unable to sync network state.'))
@utils.exception_logger()
def _periodic_resync_helper(self):
"""Resync the dhcp state at the configured interval."""
while True:
eventlet.sleep(self.conf.resync_interval)
if self.needs_resync_reasons:
# be careful to avoid a race with additions to list
# from other threads
reasons = self.needs_resync_reasons
self.needs_resync_reasons = collections.defaultdict(list)
for net, r in reasons.items():
if not net:
net = "*"
LOG.debug("resync (%(network)s): %(reason)s",
{"reason": r, "network": net})
self.sync_state(reasons.keys())
def periodic_resync(self):
"""Spawn a thread to periodically resync the dhcp state."""
eventlet.spawn(self._periodic_resync_helper)
def safe_get_network_info(self, network_id):
try:
network = self.plugin_rpc.get_network_info(network_id)
if not network:
LOG.warn(_LW('Network %s has been deleted.'), network_id)
return network
except Exception as e:
self.schedule_resync(e, network_id)
LOG.exception(_LE('Network %s info call failed.'), network_id)
def enable_dhcp_helper(self, network_id):
"""Enable DHCP for a network that meets enabling criteria."""
network = self.safe_get_network_info(network_id)
if network:
self.configure_dhcp_for_network(network)
@utils.exception_logger()
def safe_configure_dhcp_for_network(self, network):
try:
self.configure_dhcp_for_network(network)
except (exceptions.NetworkNotFound, RuntimeError):
LOG.warn(_LW('Network %s may have been deleted and its resources '
'may have already been disposed.'), network.id)
def configure_dhcp_for_network(self, network):
if not network.admin_state_up:
return
enable_metadata = self.dhcp_driver_cls.should_enable_metadata(
self.conf, network)
dhcp_network_enabled = False
for subnet in network.subnets:
if subnet.enable_dhcp:
if self.call_driver('enable', network):
dhcp_network_enabled = True
self.cache.put(network)
break
if enable_metadata and dhcp_network_enabled:
for subnet in network.subnets:
if subnet.ip_version == 4 and subnet.enable_dhcp:
self.enable_isolated_metadata_proxy(network)
break
def disable_dhcp_helper(self, network_id):
"""Disable DHCP for a network known to the agent."""
network = self.cache.get_network_by_id(network_id)
if network:
if (self.conf.use_namespaces and
self.conf.enable_isolated_metadata):
# NOTE(jschwarz): In the case where a network is deleted, all
# the subnets and ports are deleted before this function is
# called, so checking if 'should_enable_metadata' is True
# for any subnet is false logic here.
self.disable_isolated_metadata_proxy(network)
if self.call_driver('disable', network):
self.cache.remove(network)
def refresh_dhcp_helper(self, network_id):
"""Refresh or disable DHCP for a network depending on the current state
of the network.
"""
old_network = self.cache.get_network_by_id(network_id)
if not old_network:
# DHCP current not running for network.
return self.enable_dhcp_helper(network_id)
network = self.safe_get_network_info(network_id)
if not network:
return
old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)
new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)
if new_cidrs and old_cidrs == new_cidrs:
self.call_driver('reload_allocations', network)
self.cache.put(network)
elif new_cidrs:
if self.call_driver('restart', network):
self.cache.put(network)
else:
self.disable_dhcp_helper(network.id)
@utils.synchronized('dhcp-agent')
def network_create_end(self, context, payload):
"""Handle the network.create.end notification event."""
network_id = payload['network']['id']
self.enable_dhcp_helper(network_id)
@utils.synchronized('dhcp-agent')
def network_update_end(self, context, payload):
"""Handle the network.update.end notification event."""
network_id = payload['network']['id']
if payload['network']['admin_state_up']:
self.enable_dhcp_helper(network_id)
else:
self.disable_dhcp_helper(network_id)
@utils.synchronized('dhcp-agent')
def network_delete_end(self, context, payload):
"""Handle the network.delete.end notification event."""
self.disable_dhcp_helper(payload['network_id'])
@utils.synchronized('dhcp-agent')
def subnet_update_end(self, context, payload):
"""Handle the subnet.update.end notification event."""
network_id = payload['subnet']['network_id']
self.refresh_dhcp_helper(network_id)
# Use the update handler for the subnet create event.
subnet_create_end = subnet_update_end
@utils.synchronized('dhcp-agent')
def subnet_delete_end(self, context, payload):
"""Handle the subnet.delete.end notification event."""
subnet_id = payload['subnet_id']
network = self.cache.get_network_by_subnet_id(subnet_id)
if network:
self.refresh_dhcp_helper(network.id)
@utils.synchronized('dhcp-agent')
def port_update_end(self, context, payload):
"""Handle the port.update.end notification event."""
updated_port = dhcp.DictModel(payload['port'])
network = self.cache.get_network_by_id(updated_port.network_id)
if network:
driver_action = 'reload_allocations'
if self._is_port_on_this_agent(updated_port):
orig = self.cache.get_port_by_id(updated_port['id'])
# assume IP change if not in cache
old_ips = {i['ip_address'] for i in orig['fixed_ips'] or []}
new_ips = {i['ip_address'] for i in updated_port['fixed_ips']}
if old_ips != new_ips:
driver_action = 'restart'
self.cache.put_port(updated_port)
self.call_driver(driver_action, network)
def _is_port_on_this_agent(self, port):
thishost = utils.get_dhcp_agent_device_id(
port['network_id'], self.conf.host)
return port['device_id'] == thishost
# Use the update handler for the port create event.
port_create_end = port_update_end
@utils.synchronized('dhcp-agent')
def port_delete_end(self, context, payload):
"""Handle the port.delete.end notification event."""
port = self.cache.get_port_by_id(payload['port_id'])
if port:
network = self.cache.get_network_by_id(port.network_id)
self.cache.remove_port(port)
self.call_driver('reload_allocations', network)
def enable_isolated_metadata_proxy(self, network):
# The proxy might work for either a single network
# or all the networks connected via a router
# to the one passed as a parameter
kwargs = {'network_id': network.id}
# When the metadata network is enabled, the proxy might
# be started for the router attached to the network
if self.conf.enable_metadata_network:
router_ports = [port for port in network.ports
if (port.device_owner in
constants.ROUTER_INTERFACE_OWNERS)]
if router_ports:
# Multiple router ports should not be allowed
if len(router_ports) > 1:
LOG.warning(_LW("%(port_num)d router ports found on the "
"metadata access network. Only the port "
"%(port_id)s, for router %(router_id)s "
"will be considered"),
{'port_num': len(router_ports),
'port_id': router_ports[0].id,
'router_id': router_ports[0].device_id})
kwargs = {'router_id': router_ports[0].device_id}
metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
self._process_monitor, network.namespace, dhcp.METADATA_PORT,
self.conf, **kwargs)
def disable_isolated_metadata_proxy(self, network):
metadata_driver.MetadataDriver.destroy_monitored_metadata_proxy(
self._process_monitor, network.id, network.namespace, self.conf)
class DhcpPluginApi(object):
"""Agent side of the dhcp rpc API.
This class implements the client side of an rpc interface. The server side
of this interface can be found in
neutron.api.rpc.handlers.dhcp_rpc.DhcpRpcCallback. For more information
about changing rpc interfaces, see doc/source/devref/rpc_api.rst.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
and update_dhcp_port methods.
"""
def __init__(self, topic, context, use_namespaces):
self.context = context
self.host = cfg.CONF.host
self.use_namespaces = use_namespaces
target = oslo_messaging.Target(
topic=topic,
namespace=constants.RPC_NAMESPACE_DHCP_PLUGIN,
version='1.0')
self.client = n_rpc.get_client(target)
def get_active_networks_info(self):
"""Make a remote process call to retrieve all network info."""
cctxt = self.client.prepare(version='1.1')
networks = cctxt.call(self.context, 'get_active_networks_info',
host=self.host)
return [dhcp.NetModel(self.use_namespaces, n) for n in networks]
def get_network_info(self, network_id):
"""Make a remote process call to retrieve network info."""
cctxt = self.client.prepare()
network = cctxt.call(self.context, 'get_network_info',
network_id=network_id, host=self.host)
if network:
return dhcp.NetModel(self.use_namespaces, network)
def get_dhcp_port(self, network_id, device_id):
"""Make a remote process call to get the dhcp port."""
cctxt = self.client.prepare()
port = cctxt.call(self.context, 'get_dhcp_port',
network_id=network_id, device_id=device_id,
host=self.host)
if port:
return dhcp.DictModel(port)
def create_dhcp_port(self, port):
"""Make a remote process call to create the dhcp port."""
cctxt = self.client.prepare(version='1.1')
port = cctxt.call(self.context, 'create_dhcp_port',
port=port, host=self.host)
if port:
return dhcp.DictModel(port)
def update_dhcp_port(self, port_id, port):
"""Make a remote process call to update the dhcp port."""
cctxt = self.client.prepare(version='1.1')
port = cctxt.call(self.context, 'update_dhcp_port',
port_id=port_id, port=port, host=self.host)
if port:
return dhcp.DictModel(port)
def release_dhcp_port(self, network_id, device_id):
"""Make a remote process call to release the dhcp port."""
cctxt = self.client.prepare()
return cctxt.call(self.context, 'release_dhcp_port',
network_id=network_id, device_id=device_id,
host=self.host)
def release_port_fixed_ip(self, network_id, device_id, subnet_id):
"""Make a remote process call to release a fixed_ip on the port."""
cctxt = self.client.prepare()
return cctxt.call(self.context, 'release_port_fixed_ip',
network_id=network_id, subnet_id=subnet_id,
device_id=device_id, host=self.host)
class NetworkCache(object):
"""Agent cache of the current network state."""
def __init__(self):
self.cache = {}
self.subnet_lookup = {}
self.port_lookup = {}
def get_network_ids(self):
return self.cache.keys()
def get_network_by_id(self, network_id):
return self.cache.get(network_id)
def get_network_by_subnet_id(self, subnet_id):
return self.cache.get(self.subnet_lookup.get(subnet_id))
def get_network_by_port_id(self, port_id):
return self.cache.get(self.port_lookup.get(port_id))
def put(self, network):
if network.id in self.cache:
self.remove(self.cache[network.id])
self.cache[network.id] = network
for subnet in network.subnets:
self.subnet_lookup[subnet.id] = network.id
for port in network.ports:
self.port_lookup[port.id] = network.id
def remove(self, network):
del self.cache[network.id]
for subnet in network.subnets:
del self.subnet_lookup[subnet.id]
for port in network.ports:
del self.port_lookup[port.id]
def put_port(self, port):
network = self.get_network_by_id(port.network_id)
for index in range(len(network.ports)):
if network.ports[index].id == port.id:
network.ports[index] = port
break
else:
network.ports.append(port)
self.port_lookup[port.id] = network.id
def remove_port(self, port):
network = self.get_network_by_port_id(port.id)
for index in range(len(network.ports)):
if network.ports[index] == port:
del network.ports[index]
del self.port_lookup[port.id]
break
def get_port_by_id(self, port_id):
network = self.get_network_by_port_id(port_id)
if network:
for port in network.ports:
if port.id == port_id:
return port
def get_state(self):
net_ids = self.get_network_ids()
num_nets = len(net_ids)
num_subnets = 0
num_ports = 0
for net_id in net_ids:
network = self.get_network_by_id(net_id)
num_subnets += len(network.subnets)
num_ports += len(network.ports)
return {'networks': num_nets,
'subnets': num_subnets,
'ports': num_ports}
class DhcpAgentWithStateReport(DhcpAgent):
def __init__(self, host=None):
super(DhcpAgentWithStateReport, self).__init__(host=host)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-dhcp-agent',
'host': host,
'topic': topics.DHCP_AGENT,
'configurations': {
'dhcp_driver': cfg.CONF.dhcp_driver,
'use_namespaces': cfg.CONF.use_namespaces,
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_DHCP}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.agent_state.get('configurations').update(
self.cache.get_state())
ctx = context.get_admin_context_without_session()
self.state_rpc.report_state(ctx, self.agent_state, self.use_call)
self.use_call = False
except AttributeError:
# This means the server does not support report_state
LOG.warn(_LW("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
self.run()
return
except Exception:
LOG.exception(_LE("Failed reporting state!"))
return
if self.agent_state.pop('start_flag', None):
self.run()
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.schedule_resync(_("Agent updated: %(payload)s") %
{"payload": payload})
LOG.info(_LI("agent_updated by server side %s!"), payload)
def after_start(self):
LOG.info(_LI("DHCP agent started"))
|
# Copyright (C) 2016 - Oscar Campos <[email protected]>
# This program is Free Software see LICENSE file for details
from anaconda_go.lib import go
from anaconda_go.lib.helpers import get_settings
from anaconda_go.lib.plugin import completion, Worker, Callback, is_code
import sublime
class GoCompletionEventListener(completion.AnacondaCompletionEventListener):
"""Completion listener for anaconda_go
"""
def on_query_completions(self, view, prefix, locations):
"""Fired directly from Sublime Text 3 events systems
"""
if not is_code(view, lang='go'):
return
if not go.ANAGONDA_PRESENT:
if go.AVAILABLE:
go.init()
else:
return
if self.ready_from_defer is True:
completion_flags = 0
if get_settings(view, 'suppress_word_completions', False):
completion_flags = sublime.INHIBIT_WORD_COMPLETIONS
if get_settings(view, 'suppress_explicit_completions', False):
completion_flags |= sublime.INHIBIT_EXPLICIT_COMPLETIONS
cpl = self.completions
self.completions = []
self.ready_from_defer = False
return (cpl, completion_flags)
code = view.substr(sublime.Region(0, view.size()))
row, col = view.rowcol(locations[0])
data = {
'vid': view.id(),
'path': view.file_name(),
'code': code,
'offset': view.text_point(row, col),
'add_params': get_settings(
view, 'anaconda_go_add_completion_params', True),
'go_env': {
'GOROOT': go.GOROOT,
'GOPATH': go.GOPATH,
'CGO_ENABLED': go.CGO_ENABLED
},
'method': 'autocomplete',
'handler': 'anaGonda'
}
Worker.execute(
Callback(
on_success=self._complete,
on_failure=self._on_failure,
on_timeout=self._on_timeout
),
**data
)
def _on_timeout(self, _):
"""Called when request times out
"""
print('anaconda_go completion timed out...')
def _on_failure(self, data):
"""Called when request fails
"""
print('anaconda_go error: {}'.format(data['error']))
def _on_modified(self, view):
"""Just override anaconda superclass func
"""
return
|
from utils import *
# Q2
def if_this_not_that(i_list, this):
"""
>>> original_list = [1, 2, 3, 4, 5]
>>> if_this_not_that(original_list, 3)
that
that
that
4
5
"""
"*** YOUR CODE HERE ***"
for elem in i_list:
if elem <= this:
print('that')
else:
print(elem)
# Q3
def reverse_iter(lst):
"""Returns the reverse of the given list.
>>> reverse_iter([1, 2, 3, 4])
[4, 3, 2, 1]
"""
"*** YOUR CODE HERE ***"
rev = []
for elem in lst:
rev = [elem] + rev
return rev
# Q4
def closer_city(lat, lon, city1, city2):
""" Returns the name of either city1 or city2, whichever is closest
to coordinate (lat, lon).
>>> berkeley = make_city('Berkeley', 37.87, 112.26)
>>> stanford = make_city('Stanford', 34.05, 118.25)
>>> closer_city(38.33, 121.44, berkeley, stanford)
'Stanford'
>>> bucharest = make_city('Bucharest', 44.43, 26.10)
>>> vienna = make_city('Vienna', 48.20, 16.37)
>>> closer_city(41.29, 174.78, bucharest, vienna)
'Bucharest'
"""
my_city = make_city('MYCITY', lat, lon)
distance1, distance2 = distance(my_city, city1), distance(my_city, city2)
return get_name(city1) if distance1 < distance2 else get_name(city2)
# Connect N: Q5-11
######################
### Connect N Game ###
######################
def create_row(size):
"""Returns a single, empty row with the given size. Each empty spot is
represented by the string '-'.
>>> create_row(5)
['-', '-', '-', '-', '-']
"""
return ['-'] * size
def create_board(rows, columns):
"""Returns a board with the given dimensions.
>>> create_board(3, 5)
[['-', '-', '-', '-', '-'], ['-', '-', '-', '-', '-'], ['-', '-', '-', '-', '-']]
"""
"*** YOUR CODE HERE ***"
return [create_row(columns)] * rows
def replace_elem(lst, index, elem):
"""Create and return a new list whose elements are the same as those in
LST except at index INDEX, which should contain element ELEM instead.
>>> old = [1, 2, 3, 4, 5, 6, 7]
>>> new = replace_elem(old, 2, 8)
>>> new
[1, 2, 8, 4, 5, 6, 7]
>>> new is old # check that replace_elem outputs a new list
False
"""
assert index >= 0 and index < len(lst), 'Index is out of bounds'
"*** YOUR CODE HERE ***"
return lst[:index] + [elem] + lst[index + 1:]
def get_piece(board, row, column):
"""Returns the piece at location (row, column) in the board.
>>> rows, columns = 2, 2
>>> board = create_board(rows, columns)
>>> board = put_piece(board, rows, 0, 'X')[1]
>>> board = put_piece(board, rows, 0, 'O')[1]
>>> get_piece(board, 1, 0)
'X'
>>> get_piece(board, 1, 1)
'-'
"""
"*** YOUR CODE HERE ***"
return board[row][column]
def put_piece(board, max_rows, column, player):
"""Puts PLAYER's piece in the bottommost empty spot in the given column of
the board. Returns a tuple of two elements:
1. The index of the row the piece ends up in, or -1 if the column
is full.
2. The new board
>>> rows, columns = 2, 2
>>> board = create_board(rows, columns)
>>> row, new_board = put_piece(board, rows, 0, 'X')
>>> row
1
>>> row, new_board = put_piece(new_board, rows, 0, 'O')
>>> row
0
>>> row, new_board = put_piece(new_board, rows, 0, 'X')
>>> row
-1
"""
"*** YOUR CODE HERE ***"
row = 0
while row < max_rows and get_piece(board, row, column) == '-':
row = row + 1
if row != 0:
new_row = replace_elem(board[row - 1], column, player)
board = replace_elem(board, row - 1, new_row)
return row - 1, board
def make_move(board, max_rows, max_cols, col, player):
"""Put player's piece in column COL of the board, if it is a valid move.
Return a tuple of two values:
1. If the move is valid, make_move returns the index of the row the
piece is placed in. Otherwise, it returns -1.
2. The updated board
>>> rows, columns = 2, 2
>>> board = create_board(rows, columns)
>>> row, board = make_move(board, rows, columns, 0, 'X')
>>> row
1
>>> get_piece(board, 1, 0)
'X'
>>> row, board = make_move(board, rows, columns, 0, 'O')
>>> row
0
>>> row, board = make_move(board, rows, columns, 0, 'X')
>>> row
-1
>>> row, board = make_move(board, rows, columns, -4, '0')
>>> row
-1
"""
"*** YOUR CODE HERE ***"
if col < 0 or col >= max_cols:
return -1, board
else:
row, new_board = put_piece(board, max_rows, col, player)
return row, new_board
def print_board(board, max_rows, max_cols):
"""Prints the board. Row 0 is at the top, and column 0 at the far left.
>>> rows, columns = 2, 2
>>> board = create_board(rows, columns)
>>> print_board(board, rows, columns)
- -
- -
>>> new_board = make_move(board, rows, columns, 0, 'X')[1]
>>> print_board(new_board, rows, columns)
- -
X -
"""
"*** YOUR CODE HERE ***"
assert max_rows > 0 and max_cols > 0
row = 0
while row < max_rows:
col = 1
line = get_piece(board, row, 0)
while col < max_cols:
line = line + ' ' + get_piece(board, row, col)
col = col + 1
print(line)
row = row + 1
def check_win_row(board, max_rows, max_cols, num_connect, row, player):
""" Returns True if the given player has a horizontal win
in the given row, and otherwise False.
>>> rows, columns, num_connect = 4, 4, 2
>>> board = create_board(rows, columns)
>>> board = make_move(board, rows, columns, 0, 'X')[1]
>>> board = make_move(board, rows, columns, 0, 'O')[1]
>>> check_win_row(board, rows, columns, num_connect, 3, 'O')
False
>>> board = make_move(board, rows, columns, 2, 'X')[1]
>>> board = make_move(board, rows, columns, 0, 'O')[1]
>>> check_win_row(board, rows, columns, num_connect, 3, 'X')
False
>>> board = make_move(board, rows, columns, 1, 'X')[1]
>>> check_win_row(board, rows, columns, num_connect, 3, 'X')
True
>>> check_win_row(board, rows, columns, 4, 3, 'X') # A win depends on the value of num_connect
False
>>> check_win_row(board, rows, columns, num_connect, 3, 'O') # We only detect wins for the given player
False
"""
"*** YOUR CODE HERE ***"
assert row >= 0 and row < max_rows
max_consec, cur_consec = 0, 0
for col in range(0, max_cols):
if get_piece(board, row, col) == player:
cur_consec += 1
if cur_consec > max_consec:
max_consec = cur_consec
else:
cur_consec = 0
return max_consec >= num_connect
def check_win_column(board, max_rows, max_cols, num_connect, col, player):
""" Returns True if the given player has a vertical win in the given column,
and otherwise False.
>>> rows, columns, num_connect = 5, 5, 2
>>> board = create_board(rows, columns)
>>> board = make_move(board, rows, columns, 0, 'X')[1]
>>> board = make_move(board, rows, columns, 1, 'O')[1]
>>> check_win_column(board, rows, columns, num_connect, 0, 'X')
False
>>> board = make_move(board, rows, columns, 1, 'X')[1]
>>> board = make_move(board, rows, columns, 1, 'O')[1]
>>> check_win_column(board, rows, columns, num_connect, 1, 'O')
False
>>> board = make_move(board, rows, columns, 2, 'X')[1]
>>> board = make_move(board, rows, columns, 1, 'O')[1]
>>> check_win_column(board, rows, columns, num_connect, 1, 'O')
True
>>> check_win_column(board, rows, columns, 4, 1, 'O')
False
>>> check_win_column(board, rows, columns, num_connect, 1, 'X')
False
"""
"*** YOUR CODE HERE ***"
assert col >= 0 and col < max_cols
max_consec, cur_consec = 0, 0
for row in range(0, max_cols):
if get_piece(board, row, col) == player:
cur_consec += 1
if cur_consec > max_consec:
max_consec = cur_consec
else:
cur_consec = 0
return max_consec >= num_connect
def check_win(board, max_rows, max_cols, num_connect, row, col, player):
""" Returns True if the given player has any kind of win after placing a
piece at (row, col), and False otherwise.
>>> rows, columns, num_connect = 2, 2, 2
>>> board = create_board(rows, columns)
>>> board = make_move(board, rows, columns, 0, 'X')[1]
>>> board = make_move(board, rows, columns, 1, 'O')[1]
>>> board = make_move(board, rows, columns, 0, 'X')[1]
>>> check_win(board, rows, columns, num_connect, 0, 0, 'O')
False
>>> check_win(board, rows, columns, num_connect, 0, 0, 'X')
True
>>> board = create_board(rows, columns)
>>> board = make_move(board, rows, columns, 0, 'X')[1]
>>> board = make_move(board, rows, columns, 0, 'O')[1]
>>> board = make_move(board, rows, columns, 1, 'X')[1]
>>> check_win(board, rows, columns, num_connect, 1, 0, 'X')
True
>>> check_win(board, rows, columns, num_connect, 0, 0, 'X')
False
>>> board = create_board(rows, columns)
>>> board = make_move(board, rows, columns, 0, 'X')[1]
>>> board = make_move(board, rows, columns, 1, 'O')[1]
>>> board = make_move(board, rows, columns, 1, 'X')[1]
>>> check_win(board, rows, columns, num_connect, 0, 0, 'X')
False
>>> check_win(board, rows, columns, num_connect, 1, 0, 'X')
True
"""
diagonal_win = check_win_diagonal(board, max_rows, max_cols, num_connect,
row, col, player)
"*** YOUR CODE HERE ***"
return diagonal_win or check_win_row(board, max_rows, max_cols, num_connect, row, player) or \
check_win_column(board, max_rows, max_cols, num_connect, col, player)
###############################################################
### Functions for reference when solving the other problems ###
###############################################################
def check_win_diagonal(board, max_rows, max_cols, num_connect, row, col, player):
""" Returns True if the given player has a diagonal win passing the spot
(row, column), and False otherwise.
"""
# Find top left of diagonal passing through the newly placed piece.
adjacent = 0
row_top_left, col_top_left = row, col
while row_top_left > 0 and col_top_left > 0:
row_top_left -= 1
col_top_left -= 1
# Loop through top left to bottom right diagonal and check for win.
while row_top_left < max_rows and col_top_left < max_cols:
piece = get_piece(board, row_top_left, col_top_left)
if piece == player:
adjacent += 1
else:
adjacent = 0
if adjacent >= num_connect:
return True
row_top_left += 1
col_top_left += 1
# Find top right of diagonal passing through the newly placed piece.
adjacent = 0
row_top_right, col_top_right = row, col
while row_top_right > 0 and col_top_right < max_cols - 1:
row_top_right -= 1
col_top_right += 1
# Loop through top right to bottom left diagonal and check for win.
while row_top_right < max_rows and col_top_right >= 0:
piece = get_piece(board, row_top_right, col_top_right)
if piece == player:
adjacent += 1
else:
adjacent = 0
if adjacent >= num_connect:
return True
row_top_right += 1
col_top_right -= 1
return False
#####################################################################################
### You do not need to read or understand the following code for this assignment. ###
#####################################################################################
import sys
def other(player):
""" Returns the given player's opponent.
"""
if player == 'X':
return 'O'
return 'X'
def play(board, max_rows, max_cols, num_connect):
max_turns = max_rows * max_cols
playing = True
print("Player 'X' starts")
who = 'X'
turns = 0
while True:
turns += 1
if turns > max_turns:
print("No more moves. It's a tie!")
sys.exit()
while True:
try:
col_index = int(input('Which column, player {}? '.format(who)))
except ValueError as e:
print('Invalid input. Please try again.')
continue
row_index, board = make_move(board, max_rows, max_cols, col_index, who)
if row_index != -1:
break
print("Oops, you can't put a piece there")
print_board(board, max_rows, max_cols)
if check_win(board, max_rows, max_cols, num_connect, row_index, col_index, who):
print("Player {} wins!".format(who))
sys.exit()
who = other(who)
def start_game():
# Get all parameters for the game from user.
while True:
# Get num_connect from user.
while True:
try:
num_connect = int(input('How many to connect (e.g. 4 for Connect 4)? '))
except ValueError as e:
print('Invalid input. Please try again.')
continue
break
# Get number of rows for board from user.
while True:
try:
max_rows = int(input('How many rows? '))
except ValueError as e:
print('Invalid input. Please try again.')
continue
break
# Get number of columns for board from user.
while True:
try:
max_cols = int(input('How many columns? '))
except ValueError as e:
print('Invalid input. Please try again.')
continue
break
if max_rows >= num_connect or max_cols >= num_connect:
break
print("Invalid dimensions for connect {0}. Please try again.".format(num_connect))
board = create_board(max_rows, max_cols)
play(board, max_rows, max_cols, num_connect)
|
#
# IIT Kharagpur - Hall Management System
# System to manage Halls of residences, Warden grant requests, student complaints
# hall worker attendances and salary payments
#
# MIT License
#
"""
@ authors: Madhav Datt, Avikalp Srivastava
"""
import password_validation as pv
import re
import db_rebuild as dbr
from ..workers import clerk, mess_manager
def is_valid(password):
"""
Check if passed plain-text string is a valid password
Valid passwords - minimum criteria:
8 characters
1 capital letter
1 numerical value
no spaces
"""
present_capital = re.search(r'[A-Z]', password, re.M)
present_num = re.search(r'\d', password, re.M)
if (len(password) >= 8) and (" " not in password) and present_capital and present_num:
return True
return False
def authenticate(table, user_ID, password):
"""
Authenticate login with entered user_ID and password
Check table to match and return True if correct
"""
if table == "clerk":
table_data = dbr.rebuild("worker")
if user_ID not in table_data:
return False
if isinstance(table_data[user_ID], clerk.Clerk):
if pv.check_password(password, table_data[user_ID].password):
return True
elif table == "mess_manager":
table_data = dbr.rebuild("worker")
if user_ID not in table_data:
return False
if isinstance(table_data[user_ID], mess_manager.MessManager):
if pv.check_password(password, table_data[user_ID].password):
return True
elif table == "student":
table_data = dbr.rebuild(table)
if user_ID not in table_data:
return False
if pv.check_password(password, table_data[user_ID].password):
return True
elif table == "warden":
table_data = dbr.rebuild(table)
if user_ID not in table_data:
return False
if pv.check_password(password, table_data[user_ID].password):
return True
elif table == "hmc":
table_data = dbr.rebuild(table)
for key in table_data:
if pv.check_password(password, table_data[key].password):
return True
return False
|
import csv
import pandas as pd
from time import time, sleep
import numpy as np
import nltk
import string
import ast
import re
import os
import sys
import multiprocessing
from os import listdir
from os.path import isfile, join
csv.field_size_limit(sys.maxsize)
#os.system("taskset -p 0xff %d" % os.getpid())
def IsNotNull(value):
return value is not None and len(value) > 0
totallist = []
#create +/- word-dict
#Bing Liu's dictionary
dict_p = []
f = open('positive-words.txt', 'r')
for line in f:
t = line.strip().lower()
if IsNotNull(t):
dict_p.append(t)
f.close
dict_n = []
f = open('negative-words.txt', 'r')
for line in f:
t = line.strip().lower()
if IsNotNull(t):
dict_n.append(t)
f.close
#change to MASTER DICTIONARY
dict_n2 = []
f = open('negative - master dictionary.txt', 'r')
for line in f:
t = line.strip().lower()
if IsNotNull(t):
dict_n2.append(t)
f.close
dict_p2 = []
f = open('positive - master dictionary.txt', 'r')
for line in f:
t = line.strip().lower()
if IsNotNull(t):
dict_p2.append(t)
f.close
#EXTENDED SENTIMENT
dict_uncertainty = []
f = open('uncertainty - master dictionary.txt', 'r')
for line in f:
t = line.strip().lower()
if IsNotNull(t):
dict_uncertainty.append(t)
f.close
dict_litigious = []
f = open('litigious - master dictionary.txt', 'r')
for line in f:
t = line.strip().lower()
if IsNotNull(t):
dict_litigious.append(t)
f.close
dict_constraining = []
f = open('constraining - master dictionary.txt', 'r')
for line in f:
t = line.strip().lower()
if IsNotNull(t):
dict_constraining.append(t)
f.close
dict_superfluous = []
f = open('superfluous - master dictionary.txt', 'r')
for line in f:
t = line.strip().lower()
if IsNotNull(t):
dict_superfluous.append(t)
f.close
dict_interesting = []
f = open('interesting - master dictionary.txt', 'r')
for line in f:
t = line.strip().lower()
if IsNotNull(t):
dict_interesting.append(t)
f.close
dict_modal = []
f = open('modal - master dictionary.txt', 'r')
for line in f:
t = line.strip().lower()
if IsNotNull(t):
dict_modal.append(t)
f.close
rowlist = []
rowlist2 = []
newlist = []
netcnt2 = 0
netcnt = 0
counti = 1
qa = 0
qb = 0
qa2 = 0
qb2 = 0
unc = 0
lit = 0
con = 0
sup = 0
inte = 0
mod = 0
mypath = '/Users/francis/Documents/FORDHAM/2nd Term/Text Analytics/' #path where files are located
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
for i in onlyfiles:
qa = 0
qb = 0
if i.endswith('.txt'):
# get code
j = i.replace('.txt','')
# string filename
file = mypath + str(i)
f = open(file,'rU')
raw = f.read()
raw = raw.replace('\n',' ')
#raw = raw.decode('utf8')
for word in dict_p:
if word in raw:
qa += 1
for word in dict_n:
if word in raw:
qb += 1
qc = qa - qb
if qc > 0:
sentiment = 'POSITIVE'
elif qc == 0:
sentiment = 'NEUTRAL'
else:
sentiment = 'NEGATIVE'
#version 2 - dictionaries
for word in dict_p2:
if word in raw:
qa2 += 1
for word in dict_n2:
if word in raw:
qb2 += 1
qc2 = qa2 - qb2
if qc2 > 0:
sentiment2 = 'POSITIVE'
elif qc2 == 0:
sentiment2 = 'NEUTRAL'
else:
sentiment2 = 'NEGATIVE'
#extended
for word in dict_uncertainty:
if word in raw:
unc += 1
for word in dict_litigious:
if word in raw:
lit += 1
for word in dict_constraining:
if word in raw:
con += 1
for word in dict_superfluous:
if word in raw:
sup += 1
for word in dict_interesting:
if word in raw:
inte += 1
for word in dict_modal:
if word in raw:
mod += 1
rowlist.append(i)
rowlist.append(qa)
rowlist.append(qb)
rowlist.append(qc)
rowlist.append(sentiment)
rowlist.append(qa2)
rowlist.append(qb2)
rowlist.append(qc2)
rowlist.append(sentiment2)
rowlist.append(unc)
rowlist.append(lit)
rowlist.append(con)
rowlist.append(sup)
rowlist.append(inte)
rowlist.append(mod)
print counti
counti += 1
totallist.append(rowlist)
rowlist2 = []
rowlist = []
labels = ('file', 'BL_P', 'BL_N', 'BL_NET', 'BL_SENTIMENT','M_P', 'M_N', 'M_NET', 'M_SENTIMENT','M_UNCERTAINTY','M_LITIGIOUS','M_CONSTRAINING','M_SUPERFLUOUS','M_INTERESTING','M_MODAL')
df = pd.DataFrame.from_records(totallist, columns = labels)
df.to_csv('allsentiment.csv', index = False)
# netcnt += qc
# netcnt2 += qc2
# if netcnt > 0:
# print "V1 - TOTAL +"
# elif netcnt == 0:
# print "V1 - TOTAL ~"
# else:
# print "V1 - TOTAL -"
# netcnt = 0
# if netcnt2 > 0:
# print "V2 - TOTAL +"
# elif netcnt2 == 0:
# print "V2 - TOTAL ~"
# else:
# print "V2 - TOTAL -"
# netcnt2 = 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#===============================================================================
# T9 message problem
#===============================================================================
from __future__ import unicode_literals
from codejam.common import CodeJamIO, Problem, ProblemInstance
#------------------------------------------------------------------------------
T9 = {
' ': '0',
'a': '2', 'b': '22', 'c': '222',
'd': '3', 'e': '33', 'f': '333',
'g': '4', 'h': '44', 'i': '444',
'j': '5', 'k': '55', 'l': '555',
'm': '6', 'n': '66', 'o': '666',
'p': '7', 'q': '77', 'r': '777', 's': '7777',
't': '8', 'u': '88', 'v': '888',
'w': '9', 'x': '99', 'y': '999', 'z': '9999'
}
class T9Message(ProblemInstance):
def __init__(self):
self.msg = CodeJamIO.read_input(strip=False)
def solve(self):
t9 = ''
for l in self.msg:
v = T9[l]
t9 += ' ' + v if (t9 and t9[-1] == v[0]) else v
return t9
#------------------------------------------------------------------------------
if __name__ == '__main__':
p = Problem(T9Message)
p.solve()
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Starwels developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction RPC."""
from test_framework.test_framework import StarwelsTestFramework
from test_framework.util import *
class SignRawTransactionsTest(StarwelsTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA']
inputs = [
# Valid pay-to-pubkey scripts
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
{'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0,
'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'},
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
# Make sure decoderawtransaction is at least marginally sane
decodedRawTx = self.nodes[0].decoderawtransaction(rawTx)
for i, inp in enumerate(inputs):
assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"])
assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"])
# Make sure decoderawtransaction throws if there is extra data
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, rawTx + "00")
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
assert not rawTxSigned['errors'][0]['witness']
# Now test signing failure for transaction with input witnesses
p2wpkh_raw_tx = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000"
rawTxSigned = self.nodes[0].signrawtransaction(p2wpkh_raw_tx)
# 7) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 8) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 9) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# Non-empty witness checked here
assert_equal(rawTxSigned['errors'][1]['witness'], ["304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01", "025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357"])
assert not rawTxSigned['errors'][0]['witness']
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
|
import os
import unittest
from avro import schema
from avro import io
from avro import datafile
class TestAvro(unittest.TestCase):
def test_container(self):
writer = open('data.avro', 'wb')
datum_writer = io.DatumWriter()
schema_object = schema.parse("""\
{ "type": "record",
"name": "StringPair",
"doc": "A pair of strings.",
"fields": [
{"name": "left", "type": "string"},
{"name": "right", "type": "string"}
]
}
""")
dfw = datafile.DataFileWriter(writer, datum_writer, schema_object)
datum = {'left':'L', 'right':'R'}
dfw.append(datum)
dfw.close()
reader = open('data.avro', 'rb')
datum_reader = io.DatumReader()
dfr = datafile.DataFileReader(reader, datum_reader)
data = []
for datum in dfr:
data.append(datum)
self.assertEquals(1, len(data));
self.assertEquals(datum, data[0]);
def test_write_data(self):
writer = open('pairs.avro', 'wb')
datum_writer = io.DatumWriter()
schema_object = schema.parse(open('/Users/tom/workspace/hadoop-book-avro/src/main/java/Pair.avsc').read())
dfw = datafile.DataFileWriter(writer, datum_writer, schema_object)
dfw.append({'left':'a', 'right':'1'})
dfw.append({'left':'c', 'right':'2'})
dfw.append({'left':'b', 'right':'3'})
dfw.append({'left':'b', 'right':'2'})
dfw.close()
if __name__ == '__main__':
unittest.main()
|
'''Basic test cases for QGraphicsScene'''
import unittest
import gc
from PySide.QtCore import QPointF
from PySide.QtGui import QApplication, QPushButton, QPolygonF, QPixmap
from PySide.QtGui import QGraphicsScene, QPainterPath
from PySide.QtGui import QGraphicsEllipseItem, QGraphicsLineItem
from PySide.QtGui import QGraphicsPathItem, QGraphicsPixmapItem
from PySide.QtGui import QGraphicsPolygonItem, QGraphicsRectItem
from PySide.QtGui import QGraphicsSimpleTextItem, QGraphicsTextItem
from PySide.QtGui import QGraphicsProxyWidget
from helper import UsesQApplication
class Constructor(unittest.TestCase):
'''QGraphicsScene constructor'''
def testConstructor(self):
#QGraphicsScene constructor
obj = QGraphicsScene()
self.assertTrue(isinstance(obj, QGraphicsScene))
class ConstructorWithRect(unittest.TestCase):
'''QGraphicsScene qrect constructor and related sizes'''
def setUp(self):
#Acquire resources
# PyQt4 doesn't accept a QRect as argument to constructor
self.scene = QGraphicsScene(0, 200, 150, 175)
def tearDown(self):
#Release resources
del self.scene
def testHeight(self):
#QGraphicsScene.height()
self.assertEqual(self.scene.height(), 175)
def testWidth(self):
#QGraphicsScene.width()
self.assertEqual(self.scene.width(), 150)
class AddItem(UsesQApplication):
'''Tests for QGraphicsScene.add*'''
qapplication = True
def setUp(self):
#Acquire resources
super(AddItem, self).setUp()
self.scene = QGraphicsScene()
# While the scene does not inherits from QWidget, requires
# an application to make the internals work.
def tearDown(self):
#Release resources
del self.scene
super(AddItem, self).tearDown()
def testEllipse(self):
#QGraphicsScene.addEllipse
item = self.scene.addEllipse(100, 100, 100, 100)
self.assertTrue(isinstance(item, QGraphicsEllipseItem))
def testLine(self):
#QGraphicsScene.addLine
item = self.scene.addLine(100, 100, 200, 200)
self.assertTrue(isinstance(item, QGraphicsLineItem))
def testPath(self):
#QGraphicsScene.addPath
item = self.scene.addPath(QPainterPath())
self.assertTrue(isinstance(item, QGraphicsPathItem))
def testPixmap(self):
#QGraphicsScene.addPixmap
item = self.scene.addPixmap(QPixmap())
self.assertTrue(isinstance(item, QGraphicsPixmapItem))
def testPolygon(self):
#QGraphicsScene.addPolygon
points = [QPointF(0, 0), QPointF(100, 100), QPointF(0, 100)]
item = self.scene.addPolygon(QPolygonF(points))
self.assertTrue(isinstance(item, QGraphicsPolygonItem))
def testRect(self):
#QGraphicsScene.addRect
item = self.scene.addRect(100, 100, 100, 100)
self.assertTrue(isinstance(item, QGraphicsRectItem))
def testSimpleText(self):
#QGraphicsScene.addSimpleText
item = self.scene.addSimpleText('Monty Python 42')
self.assertTrue(isinstance(item, QGraphicsSimpleTextItem))
def testText(self):
#QGraphicsScene.addText
item = self.scene.addText('Monty Python 42')
self.assertTrue(isinstance(item, QGraphicsTextItem))
def testWidget(self):
#QGraphicsScene.addWidget
# XXX: printing some X11 error when using under PyQt4
item = self.scene.addWidget(QPushButton())
self.assertTrue(isinstance(item, QGraphicsProxyWidget))
class ItemRetrieve(UsesQApplication):
'''Tests for QGraphicsScene item retrieval methods'''
qapplication = True
def setUp(self):
#Acquire resources
super(ItemRetrieve, self).setUp()
self.scene = QGraphicsScene()
self.topleft = QGraphicsRectItem(0, 0, 100, 100)
self.topright = QGraphicsRectItem(100, 0, 100, 100)
self.bottomleft = QGraphicsRectItem(0, 100, 100, 100)
self.bottomright = QGraphicsRectItem(100, 100, 100, 100)
self.items = [self.topleft, self.topright, self.bottomleft,
self.bottomright]
for item in self.items:
self.scene.addItem(item)
def tearDown(self):
#Release resources
del self.scene
super(ItemRetrieve, self).tearDown()
def testItems(self):
#QGraphicsScene.items()
items = self.scene.items()
for i in items:
self.assertTrue(i in self.items)
def testItemAt(self):
#QGraphicsScene.itemAt()
self.assertEqual(self.scene.itemAt(50, 50), self.topleft)
self.assertEqual(self.scene.itemAt(150, 50), self.topright)
self.assertEqual(self.scene.itemAt(50, 150), self.bottomleft)
self.assertEqual(self.scene.itemAt(150, 150), self.bottomright)
class TestGraphicsGroup(UsesQApplication):
def testIt(self):
scene = QGraphicsScene()
i1 = QGraphicsRectItem()
scene.addItem(i1)
i2 = QGraphicsRectItem(i1)
i3 = QGraphicsRectItem()
i4 = QGraphicsRectItem()
group = scene.createItemGroup((i2, i3, i4))
scene.removeItem(i1)
del i1 # this shouldn't delete i2
self.assertEqual(i2.scene(), scene)
scene.destroyItemGroup(group)
self.assertRaises(RuntimeError, group.type)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebAuthorProfile daemon
"""
from sys import stdout
import bibtask
from invenio.bibauthorid_dbinterface import get_existing_authors
from invenio.webauthorprofile_dbapi import get_expired_person_ids
from invenio.webauthorprofile_corefunctions import _compute_cache_for_person
def webauthorprofile_daemon():
""" Constructs the webauthorprofile bibtask. """
bibtask.task_init(authorization_action='runbibclassify',
authorization_msg="WebAuthorProfile Task Submission",
description="""
Purpose:
Precompute WebAuthorProfile caches.
Examples:
$webauthorprofile -u admin --all
""",
help_specific_usage="""
webauthorprofile [OPTIONS]
OPTIONS
Options for update personid
(default) Computes all caches for all persons with at least one expired cache
--all Computes all caches for all persons
--mp Enables multiprocessing computation
""",
version="Invenio WebAuthorProfile v 1.0",
specific_params=("i:", ["all", "mp"]),
task_submit_elaborate_specific_parameter_fnc=_task_submit_elaborate_specific_parameter,
task_submit_check_options_fnc=_task_submit_check_options,
task_run_fnc=_task_run_core)
def _task_submit_elaborate_specific_parameter(key, value, opts, args):
"""
Given the string key it checks it's meaning, eventually using the
value. Usually, it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
"""
if key in ("--all",):
bibtask.task_set_option("all_pids", True)
elif key in ("--mp",):
bibtask.task_set_option("mp", True)
else:
return False
return True
def _task_run_core():
""" Runs the requested task in the bibsched environment. """
def compute_cache_f(mp):
if mp:
return compute_cache_mp
else:
return compute_cache
all_pids = bibtask.task_get_option('all_pids', False)
mp = bibtask.task_get_option('mp', False)
if all_pids:
pids = list(get_existing_authors(with_papers_only=True))
compute_cache_f(mp)(pids)
else:
pids = get_expired_person_ids()
if pids:
compute_cache_f(mp)(pids)
return 1
def _task_submit_check_options():
""" Required by bibtask. Checks the options. """
return True
def compute_cache(pids):
bibtask.write_message("WebAuthorProfile: %s persons to go" % len(pids),
stream=stdout, verbose=0)
for _, p in enumerate(pids):
bibtask.write_message("WebAuthorProfile: doing %s out of %s (personid: %s)" % (pids.index(p) + 1, len(pids), p))
bibtask.task_update_progress("WebAuthorProfile: doing %s out of %s (personid: %s)" % (pids.index(p) + 1, len(pids), p))
_compute_cache_for_person(p)
bibtask.task_sleep_now_if_required(can_stop_too=True)
def compute_cache_mp(pids):
from multiprocessing import Pool
p = Pool()
bibtask.write_message("WebAuthorProfileMP: %s persons to go" % len(pids),
stream=stdout, verbose=0)
sl = 100
ss = [pids[i: i + sl] for i in range(0, len(pids), sl)]
for i, bunch in enumerate(ss):
bibtask.write_message("WebAuthorProfileMP: doing bunch %s out of %s" % (str(i + 1), len(ss)))
bibtask.task_update_progress("WebAuthorProfileMP: doing bunch %s out of %s" % (str(i + 1), len(ss)))
p.map(_compute_cache_for_person, bunch)
bibtask.task_sleep_now_if_required(can_stop_too=True)
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=unused-import,import-error
"""The qutebrowser test suite conftest file."""
import os
import sys
import collections
import itertools
import logging
import textwrap
import warnings
import pytest
import helpers.stubs as stubsmod
from helpers import logfail
from helpers.logfail import fail_on_logging
from helpers.messagemock import message_mock
from qutebrowser.config import config
from qutebrowser.utils import objreg
from PyQt5.QtNetwork import QNetworkCookieJar
import xvfbwrapper
def _apply_platform_markers(item):
"""Apply a skip marker to a given item."""
markers = [
('posix', os.name != 'posix', "Requires a POSIX os"),
('windows', os.name != 'nt', "Requires Windows"),
('linux', not sys.platform.startswith('linux'), "Requires Linux"),
('osx', sys.platform != 'darwin', "Requires OS X"),
('not_osx', sys.platform == 'darwin', "Skipped on OS X"),
('not_frozen', getattr(sys, 'frozen', False),
"Can't be run when frozen"),
('frozen', not getattr(sys, 'frozen', False),
"Can only run when frozen"),
('not_xvfb', item.config.xvfb_display is not None,
"Can't be run with Xvfb."),
]
for searched_marker, condition, default_reason in markers:
marker = item.get_marker(searched_marker)
if not marker or not condition:
continue
if 'reason' in marker.kwargs:
reason = '{}: {}'.format(default_reason,
marker.kwargs['reason'])
del marker.kwargs['reason']
else:
reason = default_reason + '.'
skipif_marker = pytest.mark.skipif(condition, *marker.args,
reason=reason, **marker.kwargs)
item.add_marker(skipif_marker)
def pytest_collection_modifyitems(items):
"""Handle custom markers.
pytest hook called after collection has been performed.
Adds a marker named "gui" which can be used to filter gui tests from the
command line.
For example:
py.test -m "not gui" # run all tests except gui tests
py.test -m "gui" # run only gui tests
It also handles the platform specific markers by translating them to skipif
markers.
Args:
items: list of _pytest.main.Node items, where each item represents
a python test that will be executed.
Reference:
http://pytest.org/latest/plugins.html
"""
for item in items:
if 'qapp' in getattr(item, 'fixturenames', ()):
item.add_marker('gui')
if sys.platform == 'linux' and not os.environ.get('DISPLAY', ''):
if ('CI' in os.environ and
not os.environ.get('QUTE_NO_DISPLAY_OK', '')):
raise Exception("No display available on CI!")
skip_marker = pytest.mark.skipif(
True, reason="No DISPLAY available")
item.add_marker(skip_marker)
if hasattr(item, 'module'):
module_path = os.path.relpath(
item.module.__file__,
os.path.commonprefix([__file__, item.module.__file__]))
module_root_dir = os.path.split(module_path)[0]
if module_root_dir == 'integration':
item.add_marker(pytest.mark.integration)
_apply_platform_markers(item)
def pytest_ignore_collect(path):
"""Ignore BDD tests during collection if frozen."""
rel_path = path.relto(os.path.dirname(__file__))
return (rel_path == os.path.join('integration', 'features') and
hasattr(sys, 'frozen'))
@pytest.fixture(scope='session')
def qapp(qapp):
"""Change the name of the QApplication instance."""
qapp.setApplicationName('qute_test')
return qapp
class WinRegistryHelper:
"""Helper class for win_registry."""
FakeWindow = collections.namedtuple('FakeWindow', ['registry'])
def __init__(self):
self._ids = []
def add_window(self, win_id):
assert win_id not in objreg.window_registry
registry = objreg.ObjectRegistry()
window = self.FakeWindow(registry)
objreg.window_registry[win_id] = window
self._ids.append(win_id)
def cleanup(self):
for win_id in self._ids:
del objreg.window_registry[win_id]
@pytest.yield_fixture
def win_registry():
"""Fixture providing a window registry for win_id 0 and 1."""
helper = WinRegistryHelper()
helper.add_window(0)
yield helper
helper.cleanup()
@pytest.yield_fixture
def tab_registry(win_registry):
"""Fixture providing a tab registry for win_id 0."""
registry = objreg.ObjectRegistry()
objreg.register('tab-registry', registry, scope='window', window=0)
yield registry
objreg.delete('tab-registry', scope='window', window=0)
def _generate_cmdline_tests():
"""Generate testcases for test_split_binding."""
# pylint: disable=invalid-name
TestCase = collections.namedtuple('TestCase', 'cmd, valid')
separators = [';;', ' ;; ', ';; ', ' ;;']
invalid = ['foo', '']
valid = ['leave-mode', 'hint all']
# Valid command only -> valid
for item in valid:
yield TestCase(''.join(item), True)
# Invalid command only -> invalid
for item in invalid:
yield TestCase(''.join(item), False)
# Invalid command combined with invalid command -> invalid
for item in itertools.product(invalid, separators, invalid):
yield TestCase(''.join(item), False)
# Valid command combined with valid command -> valid
for item in itertools.product(valid, separators, valid):
yield TestCase(''.join(item), True)
# Valid command combined with invalid command -> invalid
for item in itertools.product(valid, separators, invalid):
yield TestCase(''.join(item), False)
# Invalid command combined with valid command -> invalid
for item in itertools.product(invalid, separators, valid):
yield TestCase(''.join(item), False)
# Command with no_cmd_split combined with an "invalid" command -> valid
for item in itertools.product(['bind x open'], separators, invalid):
yield TestCase(''.join(item), True)
@pytest.fixture(params=_generate_cmdline_tests(), ids=lambda e: e.cmd)
def cmdline_test(request):
"""Fixture which generates tests for things validating commandlines."""
# Import qutebrowser.app so all cmdutils.register decorators get run.
import qutebrowser.app # pylint: disable=unused-variable
return request.param
@pytest.yield_fixture
def config_stub(stubs):
"""Fixture which provides a fake config object."""
stub = stubs.ConfigStub()
objreg.register('config', stub)
yield stub
objreg.delete('config')
@pytest.yield_fixture
def default_config():
"""Fixture that provides and registers an empty default config object."""
config_obj = config.ConfigManager(configdir=None, fname=None, relaxed=True)
objreg.register('config', config_obj)
yield config_obj
objreg.delete('config')
@pytest.yield_fixture
def key_config_stub(stubs):
"""Fixture which provides a fake key config object."""
stub = stubs.KeyConfigStub()
objreg.register('key-config', stub)
yield stub
objreg.delete('key-config')
@pytest.yield_fixture
def host_blocker_stub(stubs):
"""Fixture which provides a fake host blocker object."""
stub = stubs.HostBlockerStub()
objreg.register('host-blocker', stub)
yield stub
objreg.delete('host-blocker')
@pytest.fixture(scope='session')
def stubs():
"""Provide access to stub objects useful for testing."""
return stubsmod
@pytest.fixture(scope='session')
def unicode_encode_err():
"""Provide a fake UnicodeEncodeError exception."""
return UnicodeEncodeError('ascii', # codec
'', # object
0, # start
2, # end
'fake exception') # reason
@pytest.fixture(scope='session')
def qnam(qapp):
"""Session-wide QNetworkAccessManager."""
from PyQt5.QtNetwork import QNetworkAccessManager
nam = QNetworkAccessManager()
nam.setNetworkAccessible(QNetworkAccessManager.NotAccessible)
return nam
@pytest.fixture
def webpage(qnam):
"""Get a new QWebPage object."""
from PyQt5.QtWebKitWidgets import QWebPage
page = QWebPage()
page.networkAccessManager().deleteLater()
page.setNetworkAccessManager(qnam)
return page
@pytest.fixture
def webview(qtbot, webpage):
"""Get a new QWebView object."""
from PyQt5.QtWebKitWidgets import QWebView
view = QWebView()
qtbot.add_widget(view)
view.page().deleteLater()
view.setPage(webpage)
view.resize(640, 480)
return view
@pytest.fixture
def webframe(webpage):
"""Convenience fixture to get a mainFrame of a QWebPage."""
return webpage.mainFrame()
@pytest.fixture
def fake_keyevent_factory():
"""Fixture that when called will return a mock instance of a QKeyEvent."""
from unittest import mock
from PyQt5.QtGui import QKeyEvent
def fake_keyevent(key, modifiers=0, text=''):
"""Generate a new fake QKeyPressEvent."""
evtmock = mock.create_autospec(QKeyEvent, instance=True)
evtmock.key.return_value = key
evtmock.modifiers.return_value = modifiers
evtmock.text.return_value = text
return evtmock
return fake_keyevent
@pytest.yield_fixture
def cookiejar_and_cache(stubs):
"""Fixture providing a fake cookie jar and cache."""
jar = QNetworkCookieJar()
cache = stubs.FakeNetworkCache()
objreg.register('cookie-jar', jar)
objreg.register('cache', cache)
yield
objreg.delete('cookie-jar')
objreg.delete('cache')
@pytest.fixture
def py_proc():
"""Get a python executable and args list which executes the given code."""
def func(code):
return (sys.executable, ['-c', textwrap.dedent(code.strip('\n'))])
return func
@pytest.yield_fixture(autouse=True)
def fail_tests_on_warnings():
warnings.simplefilter('error')
# https://github.com/pytest-dev/pytest-bdd/issues/153
warnings.filterwarnings('ignore', message=r'inspect.getargspec\(\) is '
r'deprecated, use inspect.signature\(\) instead',
category=DeprecationWarning)
yield
warnings.resetwarnings()
def pytest_addoption(parser):
parser.addoption('--no-xvfb', action='store_true', default=False,
help='Disable xvfb in tests.')
parser.addoption('--qute-delay', action='store', default=0, type=int,
help="Delay between qutebrowser commands.")
def pytest_configure(config):
"""Start Xvfb if we're on Linux, not on a CI and Xvfb is available.
This is a lot nicer than having windows popping up.
"""
config.xvfb_display = None
if os.environ.get('DISPLAY', None) == '':
# xvfbwrapper doesn't handle DISPLAY="" correctly
del os.environ['DISPLAY']
if sys.platform.startswith('linux') and not config.getoption('--no-xvfb'):
assert 'QUTE_BUILDBOT' not in os.environ
try:
disp = xvfbwrapper.Xvfb(width=800, height=600, colordepth=16)
disp.start()
except FileNotFoundError:
# We run without Xvfb if it's unavailable.
pass
else:
config.xvfb_display = disp
def pytest_unconfigure(config):
if config.xvfb_display is not None:
config.xvfb_display.stop()
|
import numpy as np
from numpy.linalg import LinAlgError
from .blas import get_blas_funcs
from .lapack import get_lapack_funcs
__all__ = ['LinAlgError', 'LinAlgWarning', 'norm']
class LinAlgWarning(RuntimeWarning):
"""
The warning emitted when a linear algebra related operation is close
to fail conditions of the algorithm or loss of accuracy is expected.
"""
pass
def norm(a, ord=None, axis=None, keepdims=False, check_finite=True):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter. For tensors with rank different from
1 or 2, only `ord=None` is supported.
Parameters
----------
a : array_like
Input array. If `axis` is None, `a` must be 1-D or 2-D, unless `ord`
is None. If both `axis` and `ord` are None, the 2-norm of
``a.ravel`` will be returned.
ord : {int, inf, -inf, 'fro', 'nuc', None}, optional
Order of the norm (see table under ``Notes``). inf means NumPy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `a` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `a`
is 1-D) or a matrix norm (when `a` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `a`.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(a), axis=1)) max(abs(a))
-inf min(sum(abs(a), axis=1)) min(abs(a))
0 -- sum(a != 0)
1 max(sum(abs(a), axis=0)) as below
-1 min(sum(abs(a), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(a)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
Both the Frobenius and nuclear norm orders are only defined for
matrices.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from scipy.linalg import norm
>>> a = np.arange(9) - 4.0
>>> a
array([-4., -3., -2., -1., 0., 1., 2., 3., 4.])
>>> b = a.reshape((3, 3))
>>> b
array([[-4., -3., -2.],
[-1., 0., 1.],
[ 2., 3., 4.]])
>>> norm(a)
7.745966692414834
>>> norm(b)
7.745966692414834
>>> norm(b, 'fro')
7.745966692414834
>>> norm(a, np.inf)
4
>>> norm(b, np.inf)
9
>>> norm(a, -np.inf)
0
>>> norm(b, -np.inf)
2
>>> norm(a, 1)
20
>>> norm(b, 1)
7
>>> norm(a, -1)
-4.6566128774142013e-010
>>> norm(b, -1)
6
>>> norm(a, 2)
7.745966692414834
>>> norm(b, 2)
7.3484692283495345
>>> norm(a, -2)
0
>>> norm(b, -2)
1.8570331885190563e-016
>>> norm(a, 3)
5.8480354764257312
>>> norm(a, -3)
0
"""
# Differs from numpy only in non-finite handling and the use of blas.
if check_finite:
a = np.asarray_chkfinite(a)
else:
a = np.asarray(a)
if a.size and a.dtype.char in 'fdFD' and axis is None and not keepdims:
if ord in (None, 2) and (a.ndim == 1):
# use blas for fast and stable euclidean norm
nrm2 = get_blas_funcs('nrm2', dtype=a.dtype, ilp64='preferred')
return nrm2(a)
if a.ndim == 2:
# Use lapack for a couple fast matrix norms.
# For some reason the *lange frobenius norm is slow.
lange_args = None
# Make sure this works if the user uses the axis keywords
# to apply the norm to the transpose.
if ord == 1:
if np.isfortran(a):
lange_args = '1', a
elif np.isfortran(a.T):
lange_args = 'i', a.T
elif ord == np.inf:
if np.isfortran(a):
lange_args = 'i', a
elif np.isfortran(a.T):
lange_args = '1', a.T
if lange_args:
lange = get_lapack_funcs('lange', dtype=a.dtype, ilp64='preferred')
return lange(*lange_args)
# fall back to numpy in every other case
return np.linalg.norm(a, ord=ord, axis=axis, keepdims=keepdims)
def _datacopied(arr, original):
"""
Strict check for `arr` not sharing any data with `original`,
under the assumption that arr = asarray(original)
"""
if arr is original:
return False
if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):
return False
return arr.base is None
|
#!/usr/bin/python
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import pytest
from selenium.webdriver.common.by import By
class PageLoadingTests(unittest.TestCase):
def testShouldWaitForDocumentToBeLoaded(self):
self._loadSimplePage()
self.assertEqual(self.driver.title, "Hello WebDriver")
# Disabled till Java WebServer is used
#def testShouldFollowRedirectsSentInTheHttpResponseHeaders(self):
# self.driver.get(pages.redirectPage);
# self.assertEqual(self.driver.title, "We Arrive Here")
# Disabled till the Java WebServer is used
#def testShouldFollowMetaRedirects(self):
# self._loadPage("metaRedirect")
# self.assertEqual(self.driver.title, "We Arrive Here")
def testShouldBeAbleToGetAFragmentOnTheCurrentPage(self):
self._loadPage("xhtmlTest")
location = self.driver.current_url
self.driver.get(location + "#text")
self.driver.find_element(by=By.ID, value="id1")
@pytest.mark.ignore_safari
def testShouldReturnWhenGettingAUrlThatDoesNotResolve(self):
try:
# Of course, we're up the creek if this ever does get registered
self.driver.get("http://www.thisurldoesnotexist.comx/")
except ValueError:
pass
@pytest.mark.ignore_safari
def testShouldReturnWhenGettingAUrlThatDoesNotConnect(self):
# Here's hoping that there's nothing here. There shouldn't be
self.driver.get("http://localhost:3001")
#@Ignore({IE, IPHONE, SELENESE})
#def testShouldBeAbleToLoadAPageWithFramesetsAndWaitUntilAllFramesAreLoaded() {
# self.driver.get(pages.framesetPage);
# self.driver.switchTo().frame(0);
# WebElement pageNumber = self.driver.findElement(By.xpath("#span[@id='pageNumber']"));
# self.assertEqual((pageNumber.getText().trim(), equalTo("1"));
# self.driver.switchTo().defaultContent().switchTo().frame(1);
# pageNumber = self.driver.findElement(By.xpath("#span[@id='pageNumber']"));
# self.assertEqual((pageNumber.getText().trim(), equalTo("2"));
#Need to implement this decorator
#@NeedsFreshDriver
#def testSouldDoNothingIfThereIsNothingToGoBackTo() {
# String originalTitle = self.driver.getTitle();
# self.driver.get(pages.formPage);
# self.driver.back();
# We may have returned to the browser's home page
# self.assertEqual(self.driver.title, anyOf(equalTo(originalTitle), equalTo("We Leave From Here")));
def testShouldBeAbleToNavigateBackInTheBrowserHistory(self):
self._loadPage("formPage")
self.driver.find_element(by=By.ID, value="imageButton").submit()
self.assertEqual(self.driver.title, "We Arrive Here")
self.driver.back()
self.assertEqual(self.driver.title, "We Leave From Here")
def testShouldBeAbleToNavigateBackInTheBrowserHistoryInPresenceOfIframes(self):
self._loadPage("xhtmlTest")
self.driver.find_element(by=By.NAME,value="sameWindow").click()
self.assertEqual(self.driver.title, "This page has iframes")
self.driver.back()
self.assertEqual(self.driver.title, "XHTML Test Page")
def testShouldBeAbleToNavigateForwardsInTheBrowserHistory(self):
self._loadPage("formPage")
self.driver.find_element(by=By.ID, value="imageButton").submit()
self.assertEqual(self.driver.title, "We Arrive Here")
self.driver.back()
self.assertEqual(self.driver.title, "We Leave From Here")
self.driver.forward()
self.assertEqual(self.driver.title, "We Arrive Here")
@pytest.mark.ignore_ie
def testShouldNotHangifDocumentOpenCallIsNeverFollowedByDocumentCloseCall(self):
self._loadPage("document_write_in_onload")
self.driver.find_element(By.XPATH, "//body")
def testShouldBeAbleToRefreshAPage(self):
self._loadPage("xhtmlTest")
self.driver.refresh()
self.assertEqual(self.driver.title, "XHTML Test Page")
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
# gLifestream Copyright (C) 2009, 2010, 2014, 2015 Wojciech Polak
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import feedparser
from glifestream.utils import httpclient
from glifestream.utils.time import mtime, now
from glifestream.utils.html import strip_script
from glifestream.stream.models import Entry
from glifestream.stream import media
class API:
name = 'Webfeed API'
limit_sec = 3600
fetch_only = False
payload = None
def __init__(self, service, verbose=0, force_overwrite=False):
self.service = service
self.verbose = verbose
self.force_overwrite = force_overwrite
if self.verbose:
print('%s: %s' % (self.name, self.service))
def get_urls(self):
return (self.service.url,)
def run(self):
for url in self.get_urls():
try:
self.fetch(url)
except Exception:
pass
def fetch(self, url):
self.fp_error = False
if not self.payload:
try:
hs = httpclient.gen_auth(self.service)
r = httpclient.get(url, auth=hs)
alturl = httpclient.get_alturl_if_html(r)
if alturl:
r = httpclient.get(alturl, auth=hs)
self.fp = feedparser.parse(r.text)
self.fp.etag = r.headers.get('etag')
self.fp.modified = r.headers.get('last-modified')
except (IOError, httpclient.HTTPError) as e:
self.fp_error = True
if self.verbose:
# pylint: disable=no-member
error = e.message if hasattr(e, 'message') else ''
print('%s (%d) HTTPError: %s' % (self.service.api,
self.service.id,
error))
return
else:
self.fp = feedparser.parse(self.payload)
if hasattr(self.fp, 'bozo') and self.fp.bozo:
self.fp_error = True
if isinstance(self.fp.bozo_exception,
feedparser.CharacterEncodingOverride):
self.fp_error = False
if self.verbose:
print('%s (%d) Bozo: %s' % (self.service.api,
self.service.id, self.fp))
if not self.fp_error:
self.service.etag = self.fp.get('etag', '')
if self.service.etag is None:
self.service.etag = ''
try:
self.service.last_modified = mtime(self.fp.modified)
except Exception:
pass
self.service.last_checked = now()
if not self.service.link:
self.service.link = self.fp.feed.get('link', '')
self.service.save()
if not self.fetch_only:
self.process()
def process(self):
for ent in self.fp.entries:
guid = ent.id if 'id' in ent else ent.link
if self.verbose:
print('ID: %s' % guid)
try:
e = Entry.objects.get(service=self.service, guid=guid)
if not self.force_overwrite and 'updated_parsed' in ent:
if e.date_updated and \
mtime(ent.updated_parsed) <= e.date_updated:
continue
if e.protected:
continue
except Entry.DoesNotExist:
e = Entry(service=self.service, guid=guid)
e.title = ent.title
e.link = ent.get('feedburner_origlink', ent.get('link', ''))
if 'author_detail' in ent:
e.author_name = ent.author_detail.get('name', '')
e.author_email = ent.author_detail.get('email', '')
e.author_uri = ent.author_detail.get('href', '')
else:
e.author_name = ent.get('author', ent.get('creator', ''))
if not e.author_name and 'author_detail' in self.fp.feed:
e.author_name = self.fp.feed.author_detail.get('name', '')
e.author_email = self.fp.feed.author_detail.get(
'email', '')
e.author_uri = self.fp.feed.author_detail.get('href', '')
try:
e.content = ent.content[0].value
except Exception:
e.content = ent.get('summary', ent.get('description', ''))
if 'published_parsed' in ent:
e.date_published = mtime(ent.published_parsed)
elif 'updated_parsed' in ent:
e.date_published = mtime(ent.updated_parsed)
if 'updated_parsed' in ent:
e.date_updated = mtime(ent.updated_parsed)
if 'geo_lat' in ent and 'geo_long' in ent:
e.geolat = ent.geo_lat
e.geolng = ent.geo_long
elif 'georss_point' in ent:
geo = ent['georss_point'].split(' ')
e.geolat = geo[0]
e.geolng = geo[1]
if 'image' in self.fp.feed:
e.link_image = media.save_image(self.fp.feed.image.url)
else:
for link in ent.links:
if link.rel == 'image' or link.rel == 'photo':
e.link_image = media.save_image(link.href)
if hasattr(self, 'custom_process'):
self.custom_process(e, ent) # pylint: disable=no-member
if hasattr(e, 'custom_mblob'):
e.mblob = e.custom_mblob
else:
e.mblob = None
mblob = media.mrss_init(e.mblob)
if 'media_content' in ent:
mblob['content'].append(ent.media_content)
e.mblob = media.mrss_gen_json(mblob)
e.content = strip_script(e.content)
try:
e.save()
media.extract_and_register(e)
except Exception:
pass
def filter_title(entry):
return entry.title
def filter_content(entry):
return entry.content
|
# CubETL
# Copyright (c) 2013-2019 Jose Juan Montes
# This is a CubETL example
# See: https://github.com/jjmontesl/cubetl
import datetime
from cubetl import text, flow, fs, script, olap, pcaxis, table, util
from cubetl.cubes import cubes10
from cubetl.olap import sqlschema
from cubetl.olap.sql import TableMapper
from cubetl.sql import sql, schemaimport
from cubetl.table import cache
from cubetl.util import log
from cubetl.sdmx import sdmx
from cubetl.sql.sql import SQLTable, SQLColumn
def cubetl_config(ctx):
# Input database connection
ctx.add('example.sql.connection',
sql.Connection(url='sqlite:///Chinook_Sqlite.sqlite'))
# Read database schema
schemaimport.DBToSQL.db2sql(ctx, ctx.get("example.sql.connection"))
# Add output database and schema
ctx.add('example.sql.connection_out',
sql.Connection(url='sqlite:///chinook-aggregated.sqlite3'))
ctx.add('example.agg.table', SQLTable(
name='example_aggregates',
label='Album Sales',
connection=ctx.get('example.sql.connection_out'),
columns=[
SQLColumn(name='album_id', type='Integer', pk=True, label='AlbumId'),
SQLColumn(name='album_title', type='String', label='Title'),
SQLColumn(name='total_sales', type='Float', label='Sales')]))
# Process
ctx.add('example.process', flow.Chain(steps=[
sql.Transaction(connection=ctx.get('example.sql.connection_out')),
# Query album sales
sql.Query(connection=ctx.get('example.sql.connection'),
query="""
select Album.AlbumId as album_id,
Album.Title as album_title,
sum(InvoiceLine.UnitPrice * InvoiceLine.Quantity) as total_sales,
sum(InvoiceLine.Quantity) as total_count
from InvoiceLine
join Track on InvoiceLine.TrackId = Track.TrackId
join Album on Track.AlbumId = Album.AlbumId
group by Album.AlbumId
"""),
util.Print(),
sql.StoreRow(sqltable=ctx.get('example.agg.table'), store_mode=sql.SQLTable.STORE_MODE_UPSERT),
log.LogPerformance(),
]))
|
# Copyright 2016 - 2020 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
# pylint: disable=blacklisted-name,unused-variable,unused-argument,too-few-public-methods
import marv_api as marv
from marv_api.dag import Model
from marv_api.decorators import getdag
@marv.node()
def source1():
yield # pragma: nocoverage
@marv.node()
def source2():
yield # pragma: nocoverage
@marv.node()
@marv.input('foo', type=int)
@marv.input('stream', default=source1)
def consumer(foo, stream):
yield # pragma: nocoverage
class Foo(Model):
xyz: int
class Bar(Model):
xyz: int
def test_hashable():
assert hash(Foo(xyz=1)) != hash(Bar(xyz=1))
assert hash(getdag(source1)) != hash(getdag(source2))
assert hash(getdag(consumer)) != hash(consumer.clone(foo=1))
assert hash(consumer.clone(foo=1)) == hash(consumer.clone(foo=1))
assert hash(getdag(consumer)) != hash(consumer.clone(stream=source2))
assert hash(consumer.clone(stream=source2)) == hash(consumer.clone(stream=source2))
assert hash(consumer.clone(stream=marv.select(source2, name='foo'))) != \
hash(consumer.clone(stream=marv.select(source2, name='bar')))
assert hash(consumer.clone(stream=marv.select(source2, name='foo'))) == \
hash(consumer.clone(stream=marv.select(source2, name='foo')))
|
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _lazy
from kitsune.sumo.urlresolvers import reverse
class TokenLoginMiddleware(object):
"""Allows users to be logged in via one time tokens."""
def process_request(self, request):
try:
auth = request.GET.get('auth')
except IOError:
# Django can throw an IOError when trying to read the GET
# data.
return
if auth is None or (request.user and request.user.is_authenticated()):
return
user = authenticate(auth=auth)
if user and user.is_active:
login(request, user)
msg = _lazy(u'You have been automatically logged in.')
messages.success(request, msg)
class LogoutDeactivatedUsersMiddleware(object):
"""Verifies that user.is_active == True.
If a user has been deactivated, we log them out.
If a user isn't active but is in the AAQ process, we let them be.
"""
def process_request(self, request):
user = request.user
if (user.is_authenticated() and not user.is_active and
not request.session.get('in-aaq', False)):
# The user is auth'd, not active and not in AAQ. /KICK
logout(request)
res = HttpResponseRedirect(reverse('home'))
res.delete_cookie(settings.SESSION_EXISTS_COOKIE)
return res
|
#!/usr/bin/env python
# Copyright 2013 The Mozilla Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gzip
import inspect
import sys
from urllib import urlretrieve
from wsgiref.simple_server import make_server
import nose
from webob import Request
from geodude import load_geodude
parser = argparse.ArgumentParser(description=globals()['__doc__'],
formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(title='Commands')
def command(func):
"""
Decorator that turns a function into a sub-command.
The command will be named after the function, and the help text will be
taken from the docstring. Command arguments are automatically set up based
on the function arguments.
"""
cmd_parser = subparsers.add_parser(func.__name__, help=func.__doc__)
cmd_parser.set_defaults(func=func) # Set which function this command runs.
# Inspect the function arguments and create them on the parser.
spec = inspect.getargspec(func)
for idx, arg in enumerate(spec.args):
try:
# First try treating this is a kwarg.
default_index = idx - (len(spec.args) - len(spec.defaults))
cmd_parser.add_argument(arg, default=spec.defaults[default_index],
nargs='?')
except (TypeError, IndexError):
# Required, positional argument.
cmd_parser.add_argument(arg)
return func
@command
def runserver(port=8000):
"""Run a development instance of the geodude server."""
application = load_geodude()
server = make_server('', int(port), application)
print 'Serving HTTP on port {0}...'.format(port)
try:
server.serve_forever()
except KeyboardInterrupt:
print 'Exiting server...'
@command
def test_ip(ip_address, path='/country.js'):
"""Run a mock request against the service."""
application = load_geodude()
request = Request.blank(path, remote_addr=ip_address)
response = request.get_response(application)
print response.status
for header in response.headers:
print header, ':', response.headers[header]
print '\n', response.body
@command
def download_db():
"""Download MaxMind's free GeoLite Country database."""
urlretrieve('http://geolite.maxmind.com/download/geoip/database/'
'GeoLiteCountry/GeoIP.dat.gz', 'GeoIP.dat.gz')
# `with` doesn't work with GzipFiles in Python 2.6. :(
infile = gzip.open('GeoIP.dat.gz')
with open('GeoIP.dat', 'w+b') as outfile:
outfile.write(infile.read())
infile.close()
@command
def test():
"""Run the test suite."""
argv = sys.argv
argv.pop(1)
nose.main(argv=argv)
def main():
"""Parses command-line arguments and delegates to the specified command."""
args = vars(parser.parse_args())
func = args.pop('func')
func(**args)
if __name__ == '__main__':
main()
|
from __future__ import division
import numpy as np
from sklearn import linear_model
from sklearn.cross_validation import KFold
from sklearn import preprocessing as prep
from sklearn.metrics import r2_score
from scipy.stats import pearsonr
from collections import namedtuple
import warnings
from sklearn.utils import ConvergenceWarning
Md = namedtuple('Md', ['model', 'idx', 'cor', 'r2'])
def LassoSelector(x, y, cv, njob):
cor_score = lambda x, y: pearsonr(x, y)[0]
lr = linear_model.LinearRegression(n_jobs=njob)
skf = KFold(len(y), n_folds=cv)
model = linear_model.LassoLarsCV(fit_intercept=False, cv=cv, n_jobs=njob)
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
model.fit(x, y)
columns = np.arange(x.shape[1])[model.coef_ != 0]
mdl_eval = lambda func: lambda idx_tr, idx_te: func(y[idx_te], lr.fit(x[idx_tr][:,columns], y[idx_tr]).predict(x[idx_te][:,columns]))
res_eval = lambda func: np.average(map(mdl_eval(func), *zip(*[(idx_tr, idx_te) for idx_tr, idx_te in skf])))
l1r2 = res_eval(r2_score)
l1cor = res_eval(cor_score)
lr.fit(x[:,columns], y)
return Md(model=lr, idx=columns, cor=l1cor, r2=l1r2)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/maarten/programming/subdownloader_old/scripts/gui/ui/uploadWidget.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_UploadWidget(object):
def setupUi(self, UploadWidget):
UploadWidget.setObjectName("UploadWidget")
UploadWidget.resize(935, 725)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(UploadWidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.groupBox_2 = QtWidgets.QGroupBox(UploadWidget)
self.groupBox_2.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.groupBox_2.setLayoutDirection(QtCore.Qt.LeftToRight)
self.groupBox_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout.setContentsMargins(-1, 1, -1, 1)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.buttonUploadBrowseFolder = QtWidgets.QToolButton(self.groupBox_2)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/images/openfolder.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.buttonUploadBrowseFolder.setIcon(icon)
self.buttonUploadBrowseFolder.setIconSize(QtCore.QSize(24, 24))
self.buttonUploadBrowseFolder.setObjectName("buttonUploadBrowseFolder")
self.horizontalLayout_5.addWidget(self.buttonUploadBrowseFolder)
self.line_3 = QtWidgets.QFrame(self.groupBox_2)
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.horizontalLayout_5.addWidget(self.line_3)
self.buttonUploadPlusRow = QtWidgets.QToolButton(self.groupBox_2)
self.buttonUploadPlusRow.setEnabled(True)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/images/plus.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.buttonUploadPlusRow.setIcon(icon1)
self.buttonUploadPlusRow.setIconSize(QtCore.QSize(24, 24))
self.buttonUploadPlusRow.setObjectName("buttonUploadPlusRow")
self.horizontalLayout_5.addWidget(self.buttonUploadPlusRow)
self.buttonUploadMinusRow = QtWidgets.QToolButton(self.groupBox_2)
self.buttonUploadMinusRow.setEnabled(False)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/images/minus.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.buttonUploadMinusRow.setIcon(icon2)
self.buttonUploadMinusRow.setIconSize(QtCore.QSize(24, 24))
self.buttonUploadMinusRow.setObjectName("buttonUploadMinusRow")
self.horizontalLayout_5.addWidget(self.buttonUploadMinusRow)
self.buttonUploadDeleteAllRow = QtWidgets.QToolButton(self.groupBox_2)
self.buttonUploadDeleteAllRow.setEnabled(True)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/images/delete_all.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.buttonUploadDeleteAllRow.setIcon(icon3)
self.buttonUploadDeleteAllRow.setIconSize(QtCore.QSize(24, 24))
self.buttonUploadDeleteAllRow.setObjectName("buttonUploadDeleteAllRow")
self.horizontalLayout_5.addWidget(self.buttonUploadDeleteAllRow)
self.line_2 = QtWidgets.QFrame(self.groupBox_2)
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.horizontalLayout_5.addWidget(self.line_2)
self.buttonUploadUpRow = QtWidgets.QToolButton(self.groupBox_2)
self.buttonUploadUpRow.setEnabled(False)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/images/up.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.buttonUploadUpRow.setIcon(icon4)
self.buttonUploadUpRow.setIconSize(QtCore.QSize(24, 24))
self.buttonUploadUpRow.setObjectName("buttonUploadUpRow")
self.horizontalLayout_5.addWidget(self.buttonUploadUpRow)
self.buttonUploadDownRow = QtWidgets.QToolButton(self.groupBox_2)
self.buttonUploadDownRow.setEnabled(False)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/images/down.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.buttonUploadDownRow.setIcon(icon5)
self.buttonUploadDownRow.setIconSize(QtCore.QSize(24, 24))
self.buttonUploadDownRow.setObjectName("buttonUploadDownRow")
self.horizontalLayout_5.addWidget(self.buttonUploadDownRow)
spacerItem = QtWidgets.QSpacerItem(401, 33, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.uploadView = UploadListView(self.groupBox_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.uploadView.sizePolicy().hasHeightForWidth())
self.uploadView.setSizePolicy(sizePolicy)
self.uploadView.setMinimumSize(QtCore.QSize(0, 0))
self.uploadView.setAutoScrollMargin(16)
self.uploadView.setObjectName("uploadView")
self.verticalLayout.addWidget(self.uploadView)
self.uploadDetailsWidget = QtWidgets.QWidget(self.groupBox_2)
self.uploadDetailsWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.uploadDetailsWidget.setObjectName("uploadDetailsWidget")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.uploadDetailsWidget)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setVerticalSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.labelReleaseName = QtWidgets.QLabel(self.uploadDetailsWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelReleaseName.sizePolicy().hasHeightForWidth())
self.labelReleaseName.setSizePolicy(sizePolicy)
self.labelReleaseName.setObjectName("labelReleaseName")
self.gridLayout.addWidget(self.labelReleaseName, 0, 0, 1, 1)
self.comboAutomaticTranslation = QtWidgets.QCheckBox(self.uploadDetailsWidget)
self.comboAutomaticTranslation.setText("")
self.comboAutomaticTranslation.setObjectName("comboAutomaticTranslation")
self.gridLayout.addWidget(self.comboAutomaticTranslation, 7, 2, 1, 1)
self.labelTranslator = QtWidgets.QLabel(self.uploadDetailsWidget)
self.labelTranslator.setObjectName("labelTranslator")
self.gridLayout.addWidget(self.labelTranslator, 9, 0, 1, 1)
self.comboHearingImpaired = QtWidgets.QCheckBox(self.uploadDetailsWidget)
self.comboHearingImpaired.setText("")
self.comboHearingImpaired.setObjectName("comboHearingImpaired")
self.gridLayout.addWidget(self.comboHearingImpaired, 6, 2, 1, 1)
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.uploadLanguages = LanguageComboBox(self.uploadDetailsWidget)
self.uploadLanguages.setObjectName("uploadLanguages")
self.horizontalLayout_12.addWidget(self.uploadLanguages)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_12.addItem(spacerItem1)
self.gridLayout.addLayout(self.horizontalLayout_12, 2, 2, 1, 2)
self.labelHearingImpaired = QtWidgets.QLabel(self.uploadDetailsWidget)
self.labelHearingImpaired.setObjectName("labelHearingImpaired")
self.gridLayout.addWidget(self.labelHearingImpaired, 6, 0, 1, 1)
self.label = QtWidgets.QLabel(self.uploadDetailsWidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 12, 0, 1, 1)
self.buttonUploadFindIMDB = QtWidgets.QPushButton(self.uploadDetailsWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonUploadFindIMDB.sizePolicy().hasHeightForWidth())
self.buttonUploadFindIMDB.setSizePolicy(sizePolicy)
self.buttonUploadFindIMDB.setMinimumSize(QtCore.QSize(0, 0))
self.buttonUploadFindIMDB.setMaximumSize(QtCore.QSize(120, 16777215))
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/images/search.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.buttonUploadFindIMDB.setIcon(icon6)
self.buttonUploadFindIMDB.setObjectName("buttonUploadFindIMDB")
self.gridLayout.addWidget(self.buttonUploadFindIMDB, 1, 3, 1, 1)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.gridLayout.addLayout(self.horizontalLayout_2, 13, 0, 1, 4)
self.labelMovieTitle = QtWidgets.QLabel(self.uploadDetailsWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelMovieTitle.sizePolicy().hasHeightForWidth())
self.labelMovieTitle.setSizePolicy(sizePolicy)
self.labelMovieTitle.setObjectName("labelMovieTitle")
self.gridLayout.addWidget(self.labelMovieTitle, 1, 0, 1, 1)
self.buttonUpload = QtWidgets.QPushButton(self.uploadDetailsWidget)
self.buttonUpload.setEnabled(True)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.buttonUpload.setFont(font)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(":/images/upload.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.buttonUpload.setIcon(icon7)
self.buttonUpload.setIconSize(QtCore.QSize(24, 24))
self.buttonUpload.setObjectName("buttonUpload")
self.gridLayout.addWidget(self.buttonUpload, 12, 3, 1, 1)
self.uploadTranslator = QtWidgets.QLineEdit(self.uploadDetailsWidget)
self.uploadTranslator.setObjectName("uploadTranslator")
self.gridLayout.addWidget(self.uploadTranslator, 9, 2, 1, 2)
self.labelSubtitleLanguage = QtWidgets.QLabel(self.uploadDetailsWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelSubtitleLanguage.sizePolicy().hasHeightForWidth())
self.labelSubtitleLanguage.setSizePolicy(sizePolicy)
self.labelSubtitleLanguage.setObjectName("labelSubtitleLanguage")
self.gridLayout.addWidget(self.labelSubtitleLanguage, 2, 0, 1, 1)
self.comboHighDefinition = QtWidgets.QCheckBox(self.uploadDetailsWidget)
self.comboHighDefinition.setText("")
self.comboHighDefinition.setObjectName("comboHighDefinition")
self.gridLayout.addWidget(self.comboHighDefinition, 5, 2, 1, 1)
self.uploadReleaseText = QtWidgets.QLineEdit(self.uploadDetailsWidget)
self.uploadReleaseText.setObjectName("uploadReleaseText")
self.gridLayout.addWidget(self.uploadReleaseText, 0, 2, 1, 2)
self.uploadComments = QtWidgets.QTextEdit(self.uploadDetailsWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.uploadComments.sizePolicy().hasHeightForWidth())
self.uploadComments.setSizePolicy(sizePolicy)
self.uploadComments.setMaximumSize(QtCore.QSize(16777215, 50))
self.uploadComments.setObjectName("uploadComments")
self.gridLayout.addWidget(self.uploadComments, 11, 2, 1, 2)
self.uploadIMDB = QtWidgets.QComboBox(self.uploadDetailsWidget)
self.uploadIMDB.setObjectName("uploadIMDB")
self.uploadIMDB.addItem("")
self.gridLayout.addWidget(self.uploadIMDB, 1, 2, 1, 1)
self.labelAutomaticTranslation = QtWidgets.QLabel(self.uploadDetailsWidget)
self.labelAutomaticTranslation.setObjectName("labelAutomaticTranslation")
self.gridLayout.addWidget(self.labelAutomaticTranslation, 7, 0, 1, 1)
self.labelHighDefinition = QtWidgets.QLabel(self.uploadDetailsWidget)
self.labelHighDefinition.setObjectName("labelHighDefinition")
self.gridLayout.addWidget(self.labelHighDefinition, 5, 0, 1, 1)
self.labelComments = QtWidgets.QLabel(self.uploadDetailsWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelComments.sizePolicy().hasHeightForWidth())
self.labelComments.setSizePolicy(sizePolicy)
self.labelComments.setObjectName("labelComments")
self.gridLayout.addWidget(self.labelComments, 11, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.comboProvider = ProviderComboBox(self.uploadDetailsWidget)
self.comboProvider.setObjectName("comboProvider")
self.horizontalLayout.addWidget(self.comboProvider)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.gridLayout.addLayout(self.horizontalLayout, 12, 2, 1, 1)
self.verticalLayout_7.addLayout(self.gridLayout)
self.verticalLayout.addWidget(self.uploadDetailsWidget)
self.verticalLayout_2.addWidget(self.groupBox_2)
self.retranslateUi(UploadWidget)
QtCore.QMetaObject.connectSlotsByName(UploadWidget)
def retranslateUi(self, UploadWidget):
_translate = QtCore.QCoreApplication.translate
UploadWidget.setWindowTitle(_("Form"))
self.groupBox_2.setTitle(_("Select the videos and subtitles (only subtitles will be uploaded):"))
self.labelReleaseName.setText(_("Release name:"))
self.labelTranslator.setText(_("Translator"))
self.labelHearingImpaired.setText(_("Hearing impaired"))
self.label.setText(_("Provider:"))
self.buttonUploadFindIMDB.setText(_("Find"))
self.labelMovieTitle.setText(_("Movie Title:"))
self.buttonUpload.setText(_("Upload"))
self.labelSubtitleLanguage.setText(_("Subtitle Language:"))
self.uploadIMDB.setItemText(0, _("Click on the Find button to identify the movie"))
self.labelAutomaticTranslation.setText(_("Automatic translation"))
self.labelHighDefinition.setText(_("High Definition"))
self.labelComments.setText(_("Comments:"))
from subdownloader.client.gui.views.language import LanguageComboBox
from subdownloader.client.gui.views.provider import ProviderComboBox
from subdownloader.client.gui.views.upload import UploadListView
|
"""Test game scheduler"""
import asyncio
import numpy as np
import numpy.random as rand
import pytest
from gameanalysis import gamegen
from gameanalysis import rsgame
from egta import gamesched
@pytest.mark.asyncio
async def test_basic_profile():
"""Test basic profile"""
game = gamegen.game([4, 3], [3, 4])
profs = game.random_profiles(20)
sched = gamesched.gamesched(game)
assert rsgame.empty_copy(sched) == rsgame.empty_copy(game)
paylist = await asyncio.gather(*[sched.sample_payoffs(p) for p in profs])
pays = np.stack(paylist)
assert np.allclose(pays[profs == 0], 0)
assert str(sched) == repr(game)
@pytest.mark.asyncio
async def test_basic_profile_sample():
"""Test basic profile in sample game"""
sgame = gamegen.samplegame([4, 3], [3, 4])
profs = sgame.random_profiles(20)
sched = gamesched.samplegamesched(sgame)
assert rsgame.empty_copy(sched) == rsgame.empty_copy(sgame)
paylist = await asyncio.gather(*[sched.sample_payoffs(p) for p in profs])
pays = np.stack(paylist)
assert np.allclose(pays[profs == 0], 0)
assert str(sched) == repr(sgame)
@pytest.mark.asyncio
async def test_duplicate_profile_sample():
"""Test duplicate profile in sample game"""
sgame = gamegen.samplegame([4, 3], [3, 4], 0)
profs = sgame.random_profiles(20)
sched = gamesched.samplegamesched(sgame)
paylist1 = await asyncio.gather(*[sched.sample_payoffs(p) for p in profs])
pays1 = np.stack(paylist1)
paylist2 = await asyncio.gather(*[sched.sample_payoffs(p) for p in profs])
pays2 = np.stack(paylist2)
assert np.allclose(pays1[profs == 0], 0)
assert np.allclose(pays2[profs == 0], 0)
assert np.allclose(pays1, pays2)
@pytest.mark.asyncio
async def test_basic_profile_aggfn():
"""Test using an action graph game"""
agame = gamegen.normal_aggfn([4, 3], [3, 4], 5)
profs = agame.random_profiles(20)
sched = gamesched.gamesched(agame)
paylist = await asyncio.gather(*[sched.sample_payoffs(p) for p in profs])
pays = np.stack(paylist)
assert np.allclose(pays[profs == 0], 0)
@pytest.mark.asyncio
async def test_noise_profile():
"""Test adding noise"""
sgame = gamegen.samplegame([4, 3], [3, 4])
profs = sgame.random_profiles(20)
sched = gamesched.samplegamesched(
sgame, lambda w: rand.normal(0, w, sgame.num_strats), lambda: (rand.random(),)
)
paylist = await asyncio.gather(*[sched.sample_payoffs(p) for p in profs])
pays = np.stack(paylist)
assert np.allclose(pays[profs == 0], 0)
@pytest.mark.asyncio
async def test_duplicate_prof():
"""Test that duplicate profiles can be scheduled"""
game = gamegen.game([4, 3], [3, 4])
profs = game.random_profiles(20)
sched = gamesched.gamesched(game)
paylist1 = await asyncio.gather(*[sched.sample_payoffs(p) for p in profs])
pays1 = np.stack(paylist1)
paylist2 = await asyncio.gather(*[sched.sample_payoffs(p) for p in profs])
pays2 = np.stack(paylist2)
assert np.allclose(pays1[profs == 0], 0)
assert np.allclose(pays2[profs == 0], 0)
assert np.allclose(pays1, pays2)
|
# -*- encoding: utf-8 -*-
# Yuuno - IPython + VapourSynth
# Copyright (C) 2017 StuxCrystal (Roland Netzsch <[email protected]>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ast
import time
import hashlib
import linecache
from typing import Callable
from yuuno.yuuno import Yuuno
EXECUTE_CODE_LINENO = 0
RESULT_VAR = '_yuuno_exec_last_'
def _code_name(code, file, number=0):
hash_digest = hashlib.sha1(code.encode("utf-8")).hexdigest()
return f"<{file}-{number}-{hash_digest[:12]}>"
def compile_with_cache(ipython, code, ast, file, symbol):
# Increment the cache name.
global EXECUTE_CODE_LINENO
exec_no = EXECUTE_CODE_LINENO
EXECUTE_CODE_LINENO += 1
# Directly drop the fake python file into the cache.
name = _code_name(code, file, exec_no)
entry = (len(code), time.time(), [line + '\n' for line in code.splitlines()], name)
linecache.cache[name] = entry
if hasattr(linecache, '_ipython_cache'):
linecache._ipython_cache[name] = entry
# Compile the code
return ipython.compile(ast, name, symbol)
def execute_code(expr, file, fail_on_error=True, ns=None):
ipy = Yuuno.instance().environment.ipython
expr = ipy.input_transformer_manager.transform_cell(expr)
expr_ast = ipy.compile.ast_parse(expr)
expr_ast = ipy.transform_ast(expr_ast)
if len(expr_ast.body) == 0:
# There is no code to execute.
# Take the fast path and skip executing.
return None
elif isinstance(expr_ast.body[-1], ast.Expr):
last_expr = expr_ast.body[-1]
assign = ast.Assign( # _yuuno_exec_last_ = <LAST_EXPR>
targets=[ast.Name(
id=RESULT_VAR,
ctx=ast.Store()
)],
value=last_expr.value
)
expr_ast.body[-1] = assign
else:
assign = ast.Assign( # _yuuno_exec_last_ = None
targets=[ast.Name(
id=RESULT_VAR,
ctx=ast.Store(),
)],
value=ast.NameConstant(
value=None
)
)
expr_ast.body.append(assign)
ast.fix_missing_locations(expr_ast)
code = compile_with_cache(ipy, expr, expr_ast, file, "exec")
if ns is None:
ns = ipy.user_ns
try:
exec(code, ipy.user_ns, ns)
result = ipy.user_ns.get(RESULT_VAR, None)
finally:
ns.pop(RESULT_VAR, None)
return result
|
#!/usr/bin/env python
""" Stream-based publishing and subscribing """
__author__ = 'Luke Campbell <[email protected]>, Michael Meisinger'
import gevent
from pyon.core.bootstrap import get_sys_name, CFG
from pyon.core.exception import BadRequest
from pyon.net.endpoint import Publisher, Subscriber
from pyon.ion.identifier import create_simple_unique_id
from pyon.ion.service import BaseService
from pyon.util.log import log
from interface.objects import StreamRoute
DEFAULT_SYSTEM_XS = "system"
DEFAULT_DATA_XP = "data"
class StreamPublisher(Publisher):
"""
Publishes outgoing messages on "streams", while setting proper message headers.
"""
def __init__(self, process, stream, **kwargs):
"""
Creates a StreamPublisher which publishes to the specified stream
and is attached to the specified process.
@param process The IonProcess to attach to.
@param stream Name of the stream or StreamRoute object
"""
super(StreamPublisher, self).__init__()
if not isinstance(process, BaseService):
raise BadRequest("No valid process provided.")
if isinstance(stream, basestring):
self.stream_route = StreamRoute(routing_key=stream)
elif isinstance(stream, StreamRoute):
self.stream_route = stream
else:
raise BadRequest("No valid stream information provided.")
self.container = process.container
self.xp_name = get_streaming_xp(self.stream_route.exchange_point) # Fully qualified
self.xp = self.container.ex_manager.create_xp(self.stream_route.exchange_point or DEFAULT_DATA_XP)
self.xp_route = self.xp.create_route(self.stream_route.routing_key)
Publisher.__init__(self, to_name=self.xp_route, **kwargs)
def publish(self, msg, *args, **kwargs):
"""
Encapsulates and publishes a message; the message is sent to either the specified
stream/route or the stream/route specified at instantiation
"""
pub_hdrs = self._get_publish_headers(msg, kwargs)
super(StreamPublisher, self).publish(msg, to_name=self._send_name, headers=pub_hdrs)
def _get_publish_headers(self, msg, kwargs):
headers = {}
if "headers" in kwargs:
headers.update(kwargs["headers"])
headers.update({'exchange_point': self.xp_name,
'stream': self.stream_route.routing_key})
return headers
class StreamSubscriber(Subscriber):
"""
StreamSubscriber is a subscribing class to be attached to an ION process.
The callback should accept three parameters:
message The incoming message
stream_route The route from where the message came.
stream_name The identifier of the stream.
"""
def __init__(self, process, exchange_name=None, stream=None, exchange_point=None, callback=None):
"""
Creates a new StreamSubscriber which will listen on the specified queue (exchange_name).
@param process The IonProcess to attach to.
@param exchange_name The subscribing queue name.
@param stream (optional) Name of the stream or StreamRoute object, to subscribe to
@param callback The callback to execute upon receipt of a packet.
"""
if not isinstance(process, BaseService):
raise BadRequest("No valid process provided.")
self.queue_name = exchange_name or ("subsc_" + create_simple_unique_id())
self.streams = []
self.container = process.container
exchange_point = exchange_point or DEFAULT_DATA_XP
self.xp_name = get_streaming_xp(exchange_point)
self.xp = self.container.ex_manager.create_xp(exchange_point)
self.xn = self.container.ex_manager.create_queue_xn(self.queue_name, xs=self.xp)
self.started = False
self.callback = callback or process.call_process
super(StreamSubscriber, self).__init__(from_name=self.xn, callback=self.preprocess)
if stream:
self.add_stream_subscription(stream)
def add_stream_subscription(self, stream):
if isinstance(stream, basestring):
stream_route = StreamRoute(routing_key=stream)
elif isinstance(stream, StreamRoute):
stream_route = stream
else:
raise BadRequest("No valid stream information provided.")
xp = self.container.ex_manager.create_xp(stream_route.exchange_point or DEFAULT_DATA_XP)
self.xn.bind(stream_route.routing_key, xp)
self.streams.append(stream_route)
def remove_stream_subscription(self, stream):
if isinstance(stream, basestring):
stream_route = StreamRoute(routing_key=stream)
elif isinstance(stream, StreamRoute):
stream_route = stream
else:
raise BadRequest("No valid stream information provided.")
existing_st = None
for st in self.streams:
if st.routing_key == stream_route.routing_key and st.exchange_point == stream_route.exchange_point:
self.streams.remove(st)
existing_st = st
break
if existing_st:
xp = get_streaming_xp(stream_route.exchange_point)
self.xn.unbind(existing_st.routing_key, xp)
else:
raise BadRequest("Stream was not a subscription")
def preprocess(self, msg, headers):
"""
Unwrap the incoming message and calls the callback.
@param msg The incoming packet.
@param headers The headers of the incoming message.
"""
route = StreamRoute(headers['exchange_point'], headers['routing_key'])
self.callback(msg, route, headers['stream'])
def start(self):
"""
Begins consuming on the queue.
"""
if self.started:
raise BadRequest("Subscriber already started")
self.started = True
self.greenlet = gevent.spawn(self.listen)
self.greenlet._glname = "StreamSubscriber"
def stop(self):
"""
Ceases consuming on the queue.
"""
if not self.started:
raise BadRequest("Subscriber is not running.")
self.close()
self.greenlet.join(timeout=10)
self.greenlet.kill()
self.started = False
def get_streaming_xp(streaming_xp_name=None):
root_xs = CFG.get_safe("exchange.core.system_xs", DEFAULT_SYSTEM_XS)
events_xp = streaming_xp_name or CFG.get_safe("exchange.core.data_streams", DEFAULT_DATA_XP)
return "%s.%s.%s" % (get_sys_name(), root_xs, events_xp)
|
# coding: utf-8
# Copyright (c) 2013 Jorge Javier Araya Navarro <[email protected]>
#
# This file is free software: you may copy, redistribute and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import math
import cPickle
import summa
from summa import euclid
import pyglet
from pyglet import gl
import copy
class Skin(summa.summanode.SummaNode):
def __init__(self, skeleton):
super(Skin, self).__init__()
self.skeleton = skeleton
class ColorSkin(Skin):
def __init__(self, skeleton, color):
super(ColorSkin, self).__init__(skeleton)
self.color = color
def draw(self):
self.skeleton.propagate_matrix()
glPushMatrix()
self.transform()
self.skeleton.visit_children( lambda bone: self.draw_bone( bone ) )
bones = self.skeleton.visit_children(
lambda bone: (bone.label, bone.parent_matrix*bone.matrix))
bones = dict(bones)
glPopMatrix()
def draw_bone(self, bone):
p1 = bone.get_start()
p2 = bone.get_end()
glColor4ub(*self.color)
glLineWidth(5)
glBegin(GL_LINES)
glVertex2f(*p1)
glVertex2f(*p2)
glEnd()
class BitmapSkin(Skin):
skin_parts = []
def __init__(self, skeleton, skin_def, alpha=255):
super(BitmapSkin, self).__init__(skeleton)
self.alpha = alpha
self.skin_parts = skin_def
self.regenerate()
def move(self, idx, dx, dy):
sp = self.skin_parts
pos = sp[idx][1]
sp[idx] = sp[idx][0], (pos[0]+dx, pos[1]+dy), sp[idx][2], \
sp[idx][3], sp[idx][4], sp[idx][5]
self.regenerate()
def get_control_points(self):
return [ (i, p[0]) for i,p in enumerate(self.skin_parts) ]
def regenerate(self):
# print self.skin_parts
self.parts = [ (name, position, scale,\
pyglet.resource.image(image,flip_y=flip_y, flip_x=flip_x)) \
for name, position, image, flip_x, flip_y, scale
in self.skin_parts ]
def draw(self):
self.skeleton.propagate_matrix()
glPushMatrix()
self.transform()
bones = self.skeleton.visit_children(
lambda bone: (bone.label, bone.parent_matrix*bone.matrix))
bones = dict(bones)
for bname, position, scale, image in self.parts:
matrix = bones[bname]
self.blit_image(matrix, position, scale, image)
glPopMatrix()
def blit_image(self, matrix, position, scale, image):
x, y = image.width*scale, image.height*scale
#dx = self.x + position[0]
#dy = self.y + position[1]
dx, dy = position
glEnable(image.target)
glBindTexture(image.target, image.id)
glPushAttrib(GL_COLOR_BUFFER_BIT)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# blit img
points = [
(-dx, -dy),
(x-dx, -dy),
(x-dx, y-dy),
(-dx, y-dy)
]
a,b,_,c,d,_,e,f,_,g,h,_ = image.texture.tex_coords
textures = [ a,b,c,d,e,f,g,h ]
np = [ matrix*euclid.Point2(*p) for p in points ]
glColor4ub(255,255,255,self.alpha)
glBegin(GL_QUADS)
glTexCoord2f(a,b)
glVertex2f(*np[0])
glTexCoord2f(c,d)
glVertex2f(*np[1])
glTexCoord2f(e,f)
glVertex2f(*np[2])
glTexCoord2f(g,h)
glVertex2f(*np[3])
glEnd()
glColor4ub(255,255,255,255)
#pyglet.graphics.draw(4, GL_QUADS,
# ("v2f", new_points),
# ("t2f", textures),
# ("c4B", [255,255,255,self.alpha]*4),
# )
glPopAttrib()
glDisable(image.target)
def flip(self):
nsp = []
for name, position, image, flip_x, flip_y, scale in self.skin_parts:
im = pyglet.resource.image(image,flip_y=flip_y, flip_x=flip_x)
x = im.width*scale - position[0]
y = position[1]
nsp.append( (name, (x,y), image, not flip_x, flip_y, scale))
self.skin_parts = nsp
self.regenerate()
self.skeleton = self.skeleton.flipped()
class Animate(summa.actions.IntervalAction):
def init(self, animation, recenter=False, recenter_x=False, recenter_y=False):
if recenter:
recenter_x = recenter_y = True
self.recenter_x = recenter_x
self.recenter_y = recenter_y
self.duration = animation.get_duration()
self.animation = animation
def start(self):
nsk = copy.deepcopy(self.target.skeleton)
if self.recenter_x:
self.target.x += nsk.translation.x
nsk.translation.x = 0
if self.recenter_y:
self.target.y += nsk.translation.y
nsk.translation.y = 0
self.start_skeleton = nsk
def update(self, t):
self.animation.pose(self.target.skeleton, t, self.start_skeleton)
def __reversed__(self):
raise NotImplementedError("gimme some time")
class Skeleton(object):
def __init__(self, bone):
super(Skeleton, self).__init__()
self.bone = bone
self.matrix = euclid.Matrix3.new_identity()
self.translation = euclid.Vector2(0,0)
def flipped(self):
sk = Skeleton(self.bone.flipped())
sk.translation.x = -self.translation.x
sk.translation.y = self.translation.y
sk.matrix = euclid.Matrix3.new_translate( *sk.translation )
return sk
def save(self, name):
f = open(name, "w")
cPickle.dump(self, f)
f.close()
def move(self, dx, dy):
self.matrix.translate(dx, dy)
self.translation.x += dx
self.translation.y += dy
def propagate_matrix(self):
def visit(matrix, child):
child.parent_matrix = matrix
matrix = matrix * child.matrix
for c in child.children:
visit(matrix, c)
visit(self.matrix, self.bone)
def visit_children(self, func):
result = []
def inner(bone):
result.append( func( bone ) )
for b in bone.children:
inner(b)
inner(self.bone)
return result
def get_control_points(self):
points = [self]
self.propagate_matrix()
points += self.visit_children( lambda bone: bone )
return points
def interpolated_to(self, next, delta):
sk = Skeleton(self.bone.interpolated_to(next.bone, delta))
sk.translation = (next.translation-self.translation) * delta + self.translation
sk.matrix = euclid.Matrix3.new_translate( *sk.translation )
return sk
def pose_from(self, other):
self.matrix = other.matrix
self.translation = other.translation
self.bone = copy.deepcopy(other.bone)
class Bone(object):
def __init__(self, label, size, rotation, translation):
self.size = size
self.label = label
self.children = []
self.matrix = euclid.Matrix3.new_translate(*translation) * \
euclid.Matrix3.new_rotate( math.radians(rotation) )
self.parent_matrix = euclid.Matrix3.new_identity()
self.translation = euclid.Point2(*translation)
self.rotation = math.radians(rotation)
def move(self, dx, dy):
self.translation.x += dx
self.translation.y += dy
self.matrix = euclid.Matrix3.new_translate(*self.translation) * \
euclid.Matrix3.new_rotate( self.rotation)
def flipped(self):
bone = Bone(self.label, self.size, -math.degrees(self.rotation),
(-self.translation[0], self.translation[1]))
for b in self.children:
bone.add( b.flipped() )
return bone
def rotate(self, angle):
self.rotation += angle
self.matrix.rotate( angle )
def add(self, bone):
self.children.append(bone)
return self
def get_end(self):
return self.parent_matrix * self.matrix * euclid.Point2(0, -self.size)
def get_start(self):
return self.parent_matrix * self.matrix * euclid.Point2(0, 0)
def interpolated_to(self, next, delta):
ea = next.rotation%(math.pi*2)
sa = self.rotation %(math.pi*2)
angle = ((ea%(math.pi*2)) - (sa%(math.pi*2)))
if angle > math.pi:
angle = -math.pi*2+angle
if angle < -math.pi:
angle = math.pi*2+angle
nr = ( sa + angle * delta ) % (math.pi*2)
nr = math.degrees( nr )
bone = Bone(self.label, self.size, nr, self.translation)
for i, c in enumerate(self.children):
nc = c.interpolated_to(next.children[i], delta)
bone.add( nc )
return bone
def dump(self, depth=0):
print "-"*depth, self
for c in self.children:
c.dump(depth+1)
def repr(self, depth=0):
repr = " "*depth*4 + "Bone('%s', %s, %s, %s)"%(
self.label, self.size, math.degrees(self.rotation), self.translation
)
for c in self.children:
repr += " "*depth*4 +".add(\n" + c.repr(depth+1) + ")"
repr += "\n"
return repr
class Animation(object):
def __init__(self, skeleton):
self.frames = {}
self.position = 0
self.skeleton = skeleton
def flipped(self):
c = copy.deepcopy(self)
for t, sk in c.frames.items():
c.frames[t] = sk.flipped()
return c
def pose(self, who, t, start):
dt = t * self.get_duration()
self.position = dt
ct, curr = self.get_keyframe()
#print who.tranlation
# if we are in a keyframe, pose that
if curr:
who.pose_from( curr )
return
# find previous, if not, use start
pt, prev = self.get_keyframe(-1)
if not prev:
prev = start
pt = 0
# find next, if not, pose at prev
nt, next = self.get_keyframe(1)
if not next:
who.pose_from( prev )
return
# we find the dt betwen prev and next and pose from it
ft = (nt-dt)/(nt-pt)
who.pose_from( next.interpolated_to( prev, ft ) )
def get_duration(self):
if self.frames:
return max(max( self.frames ), self.position )
else:
return self.position
def get_markers(self):
return self.frames.keys()
def get_position(self):
return self.position
def get_keyframe(self, offset=0):
if offset == 0:
if self.position in self.frames:
return self.position, self.frames[self.position]
else:
return None, None
elif offset < 0:
prevs = [ t for t in self.frames if t < self.position ]
prevs.sort()
if abs(offset) <= len(prevs):
return prevs[offset], self.frames[prevs[offset]]
else:
return None, None
elif offset > 0:
next = [ t for t in self.frames if t > self.position ]
next.sort()
if abs(offset) <= len(next):
return next[offset-1], self.frames[next[offset-1]]
else:
return None, None
def next_keyframe(self):
next = [ t for t in self.frames if t > self.position ]
if not next:
return False
self.position = min(next)
return True
def prev_keyframe(self):
prevs = [ t for t in self.frames if t < self.position ]
if not prevs:
return False
self.position = max(prevs)
return True
def move_position(self, delta):
self.position = max(self.position+delta, 0)
return True
def move_start(self):
self.position = 0
return True
def move_end(self):
if self.frames:
self.position = max( self.frames )
else:
self.position = 0
return True
def insert_keyframe(self):
if self.position not in self.frames:
t, sk = self.get_keyframe(-1)
if not sk:
sk = self.skeleton
self.frames[ self.position ] = copy.deepcopy(sk)
return True
return False
def remove_keyframe(self):
if self.position in self.frames:
del self.frames[ self.position ]
return True
return False
def insert_time(self, delta):
new_frames = {}
for t, sk in sorted(self.frames.items()):
if t >= self.position:
t += delta
new_frames[ t ] = sk
self.frames = new_frames
def delete_time(self, delta):
for t in self.frames:
if self.position <= t < self.position + delta:
return False
new_frames = {}
for t, sk in sorted(self.frames.items()):
if t > self.position:
t -= delta
new_frames[ t ] = sk
self.frames = new_frames
|
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, FileType
from importlib.util import find_spec
from .main_cross_validation import cross_validation_core
from .main_predict import predict_core
from .main_retraining import retrain_core
from .main_train import train_core
from ..version import version
class DefaultList(list):
@staticmethod
def __copy__(*_):
return []
def _common(parser):
parser.add_argument("--type_model", "-tm", type=str, default="nb",
choices=['nb', 'mlp', 'keras'],
help="Model type used for training / prediction AAM:"
"'nb' - used naive Bayes classifier;"
"'mlp' - used MLPClassifier;"
"'keras' - used Keras_MLPClassifier.")
parser.add_argument("--mlp_hls", "-hls", action='append', type=int, default=DefaultList([100]),
help="If the model type is 'mlp', then the following hyper-parameters 'hidden_layer_sizes'."
"Example, write -hls 100 -hls 100 => [100, 100].")
parser.add_argument("--mlp_a", "-a", type=str, default="relu",
choices=['identity', 'logistic', 'tanh', 'relu'],
help="If the model type is 'mlp', then the following hyper-parameters 'activation'.")
parser.add_argument("--mlp_s", "-s", type=str, default="adam",
choices=['lbfgs', 'sgd', 'adam'],
help="If the model type is 'mlp', then the following hyper-parameters 'solver'.")
parser.add_argument("--mlp_alpha", "-alpha", type=float, default=0.0001,
help="If the model type is 'mlp', then the following hyper-parameters 'alpha'.")
parser.add_argument("--mlp_bs", "-bs", type=int, default=200,
help="If the model type is 'mlp', then the following hyper-parameters 'batch_size'.")
parser.add_argument("--mlp_lr", "-lr", type=str, default="constant",
choices=['constant', 'invscaling', 'adaptive'],
help="If the model type is 'mlp', then the following hyper-parameters 'learning_rate'.")
parser.add_argument("--mlp_mi", "-mi", type=int, default=200,
help="If the model type is 'mlp', then the following hyper-parameters 'max_iter'.")
parser.add_argument("--mlp_es", "-es", type=bool, default=False,
help="If the model type is 'mlp', then the following hyper-parameters 'early_stopping'.")
parser.add_argument("--keras_dropout", "-do", action='append', type=int, default=DefaultList([]),
help="If the model type is 'keras', then the following hyper-parameters 'dropout'."
"Example, write -do 0 -do 0.5 => [0, 0.5].")
parser.add_argument("--batch_chunk", "-bc", type=int, default=1,
help="Breakdown by the count of reactions (for model training).")
parser.add_argument("--pairs", "-p", type=int, default=0,
help="Type of union of atoms pairs:\n"
"0 = 'sim' - uniting atoms with the same name (in periodic table),\n"
"1 = 'eqv' - uniting same name with atoms symmetries refinement.")
parser.add_argument("--duplicate", "-d", type=bool, default=True,
help="Accounted the atomic pairs information duplicates:\n"
"True - doesn't duplicate,\n"
"False - does all duplicate.") # "2-does 'False' duplicate")
parser.add_argument("--fragment_type", "-ft", type=str, default='augSeq',
choices=['seq', 'aug', 'augSeq', 'fSeq', 'fAug'],
help="Method of fragmentation of a molecule:\n"
"'seq' - sequenced fragments,\n"
"'aug' - augmented fragments,\n"
"'augSeq' - sequenced and augmented fragments,\n"
"'fSeq' - fuzzy sequenced fragments,\n"
"'fAug' - fuzzy augmented fragments.")
parser.add_argument("--min", "-m", type=int, default=1,
help="The minimal sequenced fragments length.")
parser.add_argument("--min2", "-m2", type=int, default=3,
help="The minimal fuzzy sequenced fragments length.")
parser.add_argument("--max", "-M", type=int, default=8,
help="The maximal sequenced fragments length.")
parser.add_argument("--max2", "-M2", type=int, default=8,
help="The maximal fuzzy sequenced fragments length.")
parser.add_argument("--deep", "-deep", type=int, default=3,
help="The maximum number of levels of augmented fragments.")
parser.add_argument("--fuzzy", "-fl", type=int, default=2,
help="The count of fuzzy first N-bonds.")
parser.add_argument("--fragment_count", "-fc", type=bool, default=False,
help="Accounted for the number of fragments of each type:\n"
"False - to ignored,\n"
"True - to account.")
parser.add_argument("--bitstring", "-b", type=int, default=0,
help="Type of union bit-strings the reagents (A) and the products (B):\n"
"0 = 'and' - intersection of information [A & B],\n"
"1 = [A+B+(A*B)],\n"
"2 = [(A!B)+(B!A)+(A*B)],\n"
"3 = [A+B],\n"
"4 = 'xor_&_and' - United 'symmetric difference' and 'intersection' [(A ^ B) + (A & B)],\n"
"5 = 'kron' - Tensor product of information.")
parser.add_argument("--length", "-l", type=int, default=2048,
help="Length the bit-strings.")
parser.add_argument("--chunk", "-c", type=int, default=None,
help="Necessary partitioning of the process of creating bit strings, "
"if the LENGTH value exceeding 100,000.")
def train(subparsers):
parser = subparsers.add_parser('train', help='The stage of the mapping learning on the reaction sets',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--input", "-i", default="input.rdf", type=str,
help="RDF input file on which to learn to create mapping")
parser.add_argument("--model", "-n", default="model.dat", type=FileType('wb'),
help="File with trained model")
parser.add_argument("--model_filename", "-n2", default="trained_keras_model.h5", type=str,
help="File with trained keras-model")
parser.add_argument("--debug", action='store_true', help="debug mod")
_common(parser)
parser.set_defaults(func=train_core)
def predict(subparsers):
parser = subparsers.add_parser('predict', help='The stage of the mapping prediction on the new reaction sets',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--input", "-i", default="input.rdf", type=str,
help="RDF input filename, to which want to create the mapping")
parser.add_argument("--model", "-n", default="model.dat", type=FileType('rb'),
help="File with trained model")
parser.add_argument("--output", "-o", default="output.rdf", type=str,
help="RDF outputfile")
parser.add_argument("--dfs", "-dfs", type=int, default=0,
help="Choice of the revision method (Depth-first search):\n"
"0 - by the symmetrically equivalent groups,\n"
"1 - by the values of probabilities.")
'''parser.add_argument("--rank", "-ro", default="rank/rank.txt", type=FileType('w'),
help="The debug file with average values of the mapping probability a reaction atoms "
"at the mappings value True/False")'''
parser.add_argument("--debug", action='store_true', help="debug mod")
parser.set_defaults(func=predict_core)
def cross_validation(subparsers):
parser = subparsers.add_parser('cross_validation', help='The stage of the process cross-validation',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--input", "-i", default="input.rdf", type=str,
help="RDF input file")
parser.add_argument("--output", "-o", default="cross_v", type=str,
help="The path to the directory with service/output files.")
parser.add_argument("--fold", "-k", type=int, default=5,
help="Split the data into k consecutive folds.")
parser.add_argument("--repeat", "-r", type=int, default=1,
help="The number of repetitions of the cross-validation procedure.")
parser.add_argument("--weights", "-w_dfs2", action='append', type=float, default=None,
help="Selection of the weights parameters, for 'trimming a tree' in dfs2. "
"Needed 3 float parameters. "
"Example, write -w_dfs2 0.1 -w_dfs2 0.1 -w_dfs2 1.0 => [0.1, 0.1, 1.0].")
parser.add_argument("--debug", action='store_true', help="debug mod")
_common(parser)
parser.set_defaults(func=cross_validation_core)
def retrain(subparsers):
parser = subparsers.add_parser('retrain', help='The stage of the mapping retrain on the new reaction sets',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--input", "-i", default="input.rdf", type=str,
help="RDF input file on which to learn to create mapping")
parser.add_argument("--model", "-n", default="model.dat", type=FileType('rb'),
help="File with trained model")
parser.add_argument("--model2", "-n2", default="model2.dat", type=FileType('wb'),
help="File with trained model")
parser.add_argument("--debug", action='store_true', help="debug mod")
_common(parser)
parser.set_defaults(func=retrain_core)
def argparser():
parser = ArgumentParser(description="NaiveMapper", epilog="(c) A-Deal1993", prog='naivemapper')
parser.add_argument("--version", "-v", action="version", version=version(), default=False)
subparsers = parser.add_subparsers(title='subcommands', description='available utilities')
train(subparsers)
predict(subparsers)
cross_validation(subparsers)
retrain(subparsers)
if find_spec('argcomplete'):
from argcomplete import autocomplete
autocomplete(parser)
return parser
|
# coding=utf-8
# 这是一个查找项目中未国际化的脚本
import os
import re
# 汉语写入文件时需要
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# 将要解析的项目名称
DESPATH = "/Users/wangsuyan/Desktop/Kmart"
# 解析结果存放的路径
WDESPATH = "/Users/wangsuyan/Desktop/unlocalized.log"
#目录黑名单,这个目录下所有的文件将被忽略
BLACKDIRLIST = [
DESPATH + '/Classes/Personal/PERSetting/PERAccount', # 多级目录
DESPATH + '/Utils', # Utils 下所有的文件将被忽略
'PREPhoneNumResetViewController.m', # 文件名直接写,将忽略这个文件
]
# 输出分隔符
SEPREATE = ' <=> '
def isInBlackList(filePath):
if os.path.isfile(filePath):
return fileNameAtPath(filePath) in BLACKDIRLIST
if filePath:
return filePath in BLACKDIRLIST
return False
def fileNameAtPath(filePath):
return os.path.split(filePath)[1]
def isSignalNote(str):
if '//' in str:
return True
if str.startswith('#pragma'):
return True
return False
def isLogMsg(str):
if str.startswith('NSLog') or str.startswith('FLOG'):
return True
return False
def unlocalizedStrs(filePath):
f = open(filePath)
fileName = fileNameAtPath(filePath)
isMutliNote = False
isHaveWriteFileName = False
for index, line in enumerate(f):
#多行注释
line = line.strip()
if '/*' in line:
isMutliNote = True
if '*/' in line:
isMutliNote = False
if isMutliNote:
continue
#单行注释
if isSignalNote(line):
continue
#打印信息
if isLogMsg(line):
continue
matchList = re.findall(u'@"[\u4e00-\u9fff]+', line.decode('utf-8'))
if matchList:
if not isHaveWriteFileName:
wf.write('\n' + fileName + '\n')
isHaveWriteFileName = True
for item in matchList:
wf.write(str(index + 1) + ':' + item[2 : len(item)] + SEPREATE + line + '\n')
def findFromFile(path):
paths = os.listdir(path)
for aCompent in paths:
aPath = os.path.join(path, aCompent)
if isInBlackList(aPath):
print('在黑名单中,被自动忽略' + aPath)
continue
if os.path.isdir(aPath):
findFromFile(aPath)
elif os.path.isfile(aPath) and os.path.splitext(aPath)[1]=='.m':
unlocalizedStrs(aPath)
if __name__ == '__main__':
wf = open(WDESPATH, 'w')
findFromFile(DESPATH)
wf.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.